diff --git a/ckpts/universal/global_step40/zero/15.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/15.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..4df31d72e651d8bc863bb2c01398ed96074a9d75 --- /dev/null +++ b/ckpts/universal/global_step40/zero/15.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38dff829c2020c3e39d7ad2bde4962c23ecac7722359bffc74bbb7c0440f0e03 +size 16778317 diff --git a/ckpts/universal/global_step40/zero/15.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/15.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9df6eaa59bd8a53b165596a4e8afebf29b61dc8c --- /dev/null +++ b/ckpts/universal/global_step40/zero/15.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa18500e1a868374d4589a520c6866e200434fb5ba2ce6d22dfe29059ec0a734 +size 9293 diff --git a/ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f9812a52aeba1d61d5460d596aaa03b32a64b7b7 --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b45220ac44735a023fe49d4f5c55544da02a11b0a036d77f28001c1494beb604 +size 33555627 diff --git a/ckpts/universal/global_step40/zero/26.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/26.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..90caa201ee4df769099105408d8bcfc484fc2631 --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07974e68e74e5b4a0765c2cd3e34a0afa25d2ca6d1300111796fb0180f5fb441 +size 16778411 diff --git a/ckpts/universal/global_step40/zero/26.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/26.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..23e6fd1aa1d32024d2754a30a921d1ff3c0e4df6 --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e86bc6874312288703ca761702337f1044fdccfd6897ece2957914192e9d066b +size 16778317 diff --git a/ckpts/universal/global_step40/zero/26.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/26.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..e01485585e20cfb0495593acafc58277e75e8c19 --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0914774ec02ad1de3d4ed79fd71a5ed2f2a8de00080c8a34ac9ee915a525c8fe +size 50332828 diff --git a/ckpts/universal/global_step40/zero/26.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step40/zero/26.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..76c63005ed938b3bc041ee9fd101bd954d787cca --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2678b103056818002b5276ddec83da875b705035299503ffe28a708027fd8c30 +size 50332749 diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__init__.py b/venv/lib/python3.10/site-packages/scipy/fft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7d6c3c27febc0449e463afbb5aa10e5d3581c37e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/__init__.py @@ -0,0 +1,111 @@ +""" +============================================== +Discrete Fourier transforms (:mod:`scipy.fft`) +============================================== + +.. currentmodule:: scipy.fft + +Fast Fourier Transforms (FFTs) +============================== + +.. autosummary:: + :toctree: generated/ + + fft - Fast (discrete) Fourier Transform (FFT) + ifft - Inverse FFT + fft2 - 2-D FFT + ifft2 - 2-D inverse FFT + fftn - N-D FFT + ifftn - N-D inverse FFT + rfft - FFT of strictly real-valued sequence + irfft - Inverse of rfft + rfft2 - 2-D FFT of real sequence + irfft2 - Inverse of rfft2 + rfftn - N-D FFT of real sequence + irfftn - Inverse of rfftn + hfft - FFT of a Hermitian sequence (real spectrum) + ihfft - Inverse of hfft + hfft2 - 2-D FFT of a Hermitian sequence + ihfft2 - Inverse of hfft2 + hfftn - N-D FFT of a Hermitian sequence + ihfftn - Inverse of hfftn + +Discrete Sin and Cosine Transforms (DST and DCT) +================================================ + +.. autosummary:: + :toctree: generated/ + + dct - Discrete cosine transform + idct - Inverse discrete cosine transform + dctn - N-D Discrete cosine transform + idctn - N-D Inverse discrete cosine transform + dst - Discrete sine transform + idst - Inverse discrete sine transform + dstn - N-D Discrete sine transform + idstn - N-D Inverse discrete sine transform + +Fast Hankel Transforms +====================== + +.. autosummary:: + :toctree: generated/ + + fht - Fast Hankel transform + ifht - Inverse of fht + +Helper functions +================ + +.. autosummary:: + :toctree: generated/ + + fftshift - Shift the zero-frequency component to the center of the spectrum + ifftshift - The inverse of `fftshift` + fftfreq - Return the Discrete Fourier Transform sample frequencies + rfftfreq - DFT sample frequencies (for usage with rfft, irfft) + fhtoffset - Compute an optimal offset for the Fast Hankel Transform + next_fast_len - Find the optimal length to zero-pad an FFT for speed + set_workers - Context manager to set default number of workers + get_workers - Get the current default number of workers + +Backend control +=============== + +.. autosummary:: + :toctree: generated/ + + set_backend - Context manager to set the backend within a fixed scope + skip_backend - Context manager to skip a backend within a fixed scope + set_global_backend - Sets the global fft backend + register_backend - Register a backend for permanent use + +""" + +from ._basic import ( + fft, ifft, fft2, ifft2, fftn, ifftn, + rfft, irfft, rfft2, irfft2, rfftn, irfftn, + hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn) +from ._realtransforms import dct, idct, dst, idst, dctn, idctn, dstn, idstn +from ._fftlog import fht, ifht, fhtoffset +from ._helper import next_fast_len, fftfreq, rfftfreq, fftshift, ifftshift +from ._backend import (set_backend, skip_backend, set_global_backend, + register_backend) +from ._pocketfft.helper import set_workers, get_workers + +__all__ = [ + 'fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn', + 'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn', + 'hfft', 'ihfft', 'hfft2', 'ihfft2', 'hfftn', 'ihfftn', + 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift', + 'next_fast_len', + 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn', + 'fht', 'ifht', + 'fhtoffset', + 'set_backend', 'skip_backend', 'set_global_backend', 'register_backend', + 'get_workers', 'set_workers'] + + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d924898b79d326b4e1b1f869e3609b780d7aa46 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..378fa9783766f716257ca5d0386ea3e3636c2331 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf8f03b6c1a5a756bbc7194879e20d7273a618e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95a1202b194277de1814bfeb468c19d5c54e7e3a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_debug_backends.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_debug_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b12f490c3c6a21dac453a8bb4f3d66d33e20a6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_debug_backends.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..355cafa9bea688a1b769b95cab88f9cdc6b7f7dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dda6de9dd61478141b50ee11af00a4efd6372500 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_helper.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da15dab07e8124628941fbe7a45db96ea5156679 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_helper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23dc2dffaa6b36b38906974bb4f4d3ed77f741cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7ff4dedf9c2da4cb99dde4b8f6232edd3e3b018 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_backend.py b/venv/lib/python3.10/site-packages/scipy/fft/_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..c1e5cfcad5c4cbc43276e151d2da33039368630d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_backend.py @@ -0,0 +1,196 @@ +import scipy._lib.uarray as ua +from . import _basic_backend +from . import _realtransforms_backend +from . import _fftlog_backend + + +class _ScipyBackend: + """The default backend for fft calculations + + Notes + ----- + We use the domain ``numpy.scipy`` rather than ``scipy`` because ``uarray`` + treats the domain as a hierarchy. This means the user can install a single + backend for ``numpy`` and have it implement ``numpy.scipy.fft`` as well. + """ + __ua_domain__ = "numpy.scipy.fft" + + @staticmethod + def __ua_function__(method, args, kwargs): + + fn = getattr(_basic_backend, method.__name__, None) + if fn is None: + fn = getattr(_realtransforms_backend, method.__name__, None) + if fn is None: + fn = getattr(_fftlog_backend, method.__name__, None) + if fn is None: + return NotImplemented + return fn(*args, **kwargs) + + +_named_backends = { + 'scipy': _ScipyBackend, +} + + +def _backend_from_arg(backend): + """Maps strings to known backends and validates the backend""" + + if isinstance(backend, str): + try: + backend = _named_backends[backend] + except KeyError as e: + raise ValueError(f'Unknown backend {backend}') from e + + if backend.__ua_domain__ != 'numpy.scipy.fft': + raise ValueError('Backend does not implement "numpy.scipy.fft"') + + return backend + + +def set_global_backend(backend, coerce=False, only=False, try_last=False): + """Sets the global fft backend + + This utility method replaces the default backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. This will be the first tried + backend outside the :obj:`set_backend` context manager. + + Parameters + ---------- + backend : {object, 'scipy'} + The backend to use. + Can either be a ``str`` containing the name of a known backend + {'scipy'} or an object that implements the uarray protocol. + coerce : bool + Whether to coerce input types when trying this backend. + only : bool + If ``True``, no more backends will be tried if this fails. + Implied by ``coerce=True``. + try_last : bool + If ``True``, the global backend is tried after registered backends. + + Raises + ------ + ValueError: If the backend does not implement ``numpy.scipy.fft``. + + Notes + ----- + This will overwrite the previously set global backend, which, by default, is + the SciPy implementation. + + Examples + -------- + We can set the global fft backend: + + >>> from scipy.fft import fft, set_global_backend + >>> set_global_backend("scipy") # Sets global backend (default is "scipy"). + >>> fft([1]) # Calls the global backend + array([1.+0.j]) + """ + backend = _backend_from_arg(backend) + ua.set_global_backend(backend, coerce=coerce, only=only, try_last=try_last) + + +def register_backend(backend): + """ + Register a backend for permanent use. + + Registered backends have the lowest priority and will be tried after the + global backend. + + Parameters + ---------- + backend : {object, 'scipy'} + The backend to use. + Can either be a ``str`` containing the name of a known backend + {'scipy'} or an object that implements the uarray protocol. + + Raises + ------ + ValueError: If the backend does not implement ``numpy.scipy.fft``. + + Examples + -------- + We can register a new fft backend: + + >>> from scipy.fft import fft, register_backend, set_global_backend + >>> class NoopBackend: # Define an invalid Backend + ... __ua_domain__ = "numpy.scipy.fft" + ... def __ua_function__(self, func, args, kwargs): + ... return NotImplemented + >>> set_global_backend(NoopBackend()) # Set the invalid backend as global + >>> register_backend("scipy") # Register a new backend + # The registered backend is called because + # the global backend returns `NotImplemented` + >>> fft([1]) + array([1.+0.j]) + >>> set_global_backend("scipy") # Restore global backend to default + + """ + backend = _backend_from_arg(backend) + ua.register_backend(backend) + + +def set_backend(backend, coerce=False, only=False): + """Context manager to set the backend within a fixed scope. + + Upon entering the ``with`` statement, the given backend will be added to + the list of available backends with the highest priority. Upon exit, the + backend is reset to the state before entering the scope. + + Parameters + ---------- + backend : {object, 'scipy'} + The backend to use. + Can either be a ``str`` containing the name of a known backend + {'scipy'} or an object that implements the uarray protocol. + coerce : bool, optional + Whether to allow expensive conversions for the ``x`` parameter. e.g., + copying a NumPy array to the GPU for a CuPy backend. Implies ``only``. + only : bool, optional + If only is ``True`` and this backend returns ``NotImplemented``, then a + BackendNotImplemented error will be raised immediately. Ignoring any + lower priority backends. + + Examples + -------- + >>> import scipy.fft as fft + >>> with fft.set_backend('scipy', only=True): + ... fft.fft([1]) # Always calls the scipy implementation + array([1.+0.j]) + """ + backend = _backend_from_arg(backend) + return ua.set_backend(backend, coerce=coerce, only=only) + + +def skip_backend(backend): + """Context manager to skip a backend within a fixed scope. + + Within the context of a ``with`` statement, the given backend will not be + called. This covers backends registered both locally and globally. Upon + exit, the backend will again be considered. + + Parameters + ---------- + backend : {object, 'scipy'} + The backend to skip. + Can either be a ``str`` containing the name of a known backend + {'scipy'} or an object that implements the uarray protocol. + + Examples + -------- + >>> import scipy.fft as fft + >>> fft.fft([1]) # Calls default SciPy backend + array([1.+0.j]) + >>> with fft.skip_backend('scipy'): # We explicitly skip the SciPy backend + ... fft.fft([1]) # leaving no implementation available + Traceback (most recent call last): + ... + BackendNotImplementedError: No selected backends had an implementation ... + """ + backend = _backend_from_arg(backend) + return ua.skip_backend(backend) + + +set_global_backend('scipy', try_last=True) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_basic.py b/venv/lib/python3.10/site-packages/scipy/fft/_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..a3fc021c9ef9b7c2a40bf7b5138158df8e276ae6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_basic.py @@ -0,0 +1,1630 @@ +from scipy._lib.uarray import generate_multimethod, Dispatchable +import numpy as np + + +def _x_replacer(args, kwargs, dispatchables): + """ + uarray argument replacer to replace the transform input array (``x``) + """ + if len(args) > 0: + return (dispatchables[0],) + args[1:], kwargs + kw = kwargs.copy() + kw['x'] = dispatchables[0] + return args, kw + + +def _dispatch(func): + """ + Function annotation that creates a uarray multimethod from the function + """ + return generate_multimethod(func, _x_replacer, domain="numpy.scipy.fft") + + +@_dispatch +def fft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 1-D discrete Fourier Transform. + + This function computes the 1-D *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [1]_. + + Parameters + ---------- + x : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode. Default is "backward", meaning no normalization on + the forward transforms and scaling by ``1/n`` on the `ifft`. + "forward" instead applies the ``1/n`` factor on the forward transform. + For ``norm="ortho"``, both directions are scaled by ``1/sqrt(n)``. + + .. versionadded:: 1.6.0 + ``norm={"forward", "backward"}`` options were added + + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See the notes below for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. See below for more + details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + if `axes` is larger than the last axis of `x`. + + See Also + -------- + ifft : The inverse of `fft`. + fft2 : The 2-D FFT. + fftn : The N-D FFT. + rfftn : The N-D FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + next_fast_len : Size to pad input to for most efficient transforms + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform + (DFT) can be calculated efficiently, by using symmetries in the calculated + terms. The symmetry is highest when `n` is a power of 2, and the transform + is therefore most efficient for these sizes. For poorly factorizable sizes, + `scipy.fft` uses Bluestein's algorithm [2]_ and so is never worse than + O(`n` log `n`). Further performance improvements may be seen by zero-padding + the input using `next_fast_len`. + + If ``x`` is a 1d array, then the `fft` is equivalent to :: + + y[k] = np.sum(x * np.exp(-2j * np.pi * k * np.arange(n)/n)) + + The frequency term ``f=k/n`` is found at ``y[k]``. At ``y[n/2]`` we reach + the Nyquist frequency and wrap around to the negative-frequency terms. So, + for an 8-point transform, the frequencies of the result are + [0, 1, 2, 3, -4, -3, -2, -1]. To rearrange the fft output so that the + zero-frequency component is centered, like [-4, -3, -2, -1, 0, 1, 2, 3], + use `fftshift`. + + Transforms can be done in single, double, or extended precision (long + double) floating point. Half precision inputs will be converted to single + precision and non-floating-point inputs will be converted to double + precision. + + If the data type of ``x`` is real, a "real FFT" algorithm is automatically + used, which roughly halves the computation time. To increase efficiency + a little further, use `rfft`, which does the same calculation, but only + outputs half of the symmetrical spectrum. If the data are both real and + symmetrical, the `dct` can again double the efficiency, by generating + half of the spectrum from half of the signal. + + When ``overwrite_x=True`` is specified, the memory referenced by ``x`` may + be used by the implementation in any way. This may include reusing the + memory for the result, but this is in no way guaranteed. You should not + rely on the contents of ``x`` after the transform as this may change in + future without warning. + + The ``workers`` argument specifies the maximum number of parallel jobs to + split the FFT computation into. This will execute independent 1-D + FFTs within ``x``. So, ``x`` must be at least 2-D and the + non-transformed axes must be large enough to split into chunks. If ``x`` is + too small, fewer jobs may be used than requested. + + References + ---------- + .. [1] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + .. [2] Bluestein, L., 1970, "A linear filtering approach to the + computation of discrete Fourier transform". *IEEE Transactions on + Audio and Electroacoustics.* 18 (4): 451-455. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> scipy.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, + 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, + -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, + 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part: + + >>> from scipy.fft import fft, fftfreq, fftshift + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = fftshift(fft(np.sin(t))) + >>> freq = fftshift(fftfreq(t.shape[-1])) + >>> plt.plot(freq, sp.real, freq, sp.imag) + [, + ] + >>> plt.show() + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 1-D inverse discrete Fourier Transform. + + This function computes the inverse of the 1-D *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(x)) == x`` to within numerical accuracy. + + The input should be ordered in the same way as is returned by `fft`, + i.e., + + * ``x[0]`` should contain the zero frequency term, + * ``x[1:n//2]`` should contain the positive-frequency terms, + * ``x[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``x[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `fft` for details. + + Parameters + ---------- + x : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axes` is larger than the last axis of `x`. + + See Also + -------- + fft : The 1-D (forward) FFT, of which `ifft` is the inverse. + ifft2 : The 2-D inverse FFT. + ifftn : The N-D inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + If ``x`` is a 1-D array, then the `ifft` is equivalent to :: + + y[k] = np.sum(x * np.exp(2j * np.pi * k * np.arange(n)/n)) / len(x) + + As with `fft`, `ifft` has support for all floating point types and is + optimized for real input. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> scipy.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*rng.uniform(0, 2*np.pi, (20,))) + >>> s = scipy.fft.ifft(n) + >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') + [, ] + >>> plt.legend(('real', 'imaginary')) + + >>> plt.show() + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def rfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 1-D discrete Fourier Transform for real input. + + This function computes the 1-D *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + x : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See Also + -------- + irfft : The inverse of `rfft`. + fft : The 1-D FFT of general (complex) input. + fftn : The N-D FFT. + rfft2 : The 2-D FFT of real input. + rfftn : The N-D FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e., the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``X = rfft(x)`` and fs is the sampling frequency, ``X[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> import scipy.fft + >>> scipy.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary + >>> scipy.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def irfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Computes the inverse of `rfft`. + + This function computes the inverse of the 1-D *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(x), len(x)) == x`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e., the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + x : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is taken to be + ``2*(m-1)``, where ``m`` is the length of the input along the axis + specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `x`. + + See Also + -------- + rfft : The 1-D FFT of real input, of which `irfft` is inverse. + fft : The 1-D FFT. + irfft2 : The inverse of the 2-D FFT of real input. + irfftn : The inverse of the N-D FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `x`, where `x` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + The default value of `n` assumes an even output length. By the Hermitian + symmetry, the last imaginary component must be 0 and so is ignored. To + avoid losing information, the correct length of the real input *must* be + given. + + Examples + -------- + >>> import scipy.fft + >>> scipy.fft.ifft([1, -1j, -1, 1j]) + array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary + >>> scipy.fft.irfft([1, -1j, -1]) + array([0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def hfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the FFT of a signal that has Hermitian symmetry, i.e., a real + spectrum. + + Parameters + ---------- + x : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. For `n` output + points, ``n//2 + 1`` input points are necessary. If the input is + longer than this, it is cropped. If it is shorter than this, it is + padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``, + where ``m`` is the length of the input along the axis specified by + `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See `fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*m - 2``, where ``m`` is the length of the transformed axis of + the input. To get an odd number of output points, `n` must be + specified, for instance, as ``2*m - 1`` in the typical case, + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See Also + -------- + rfft : Compute the 1-D FFT for real input. + ihfft : The inverse of `hfft`. + hfftn : Compute the N-D FFT of a Hermitian signal. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So, here, it's `hfft`, for + which you must supply the length of the result if it is to be odd. + * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. + + Examples + -------- + >>> from scipy.fft import fft, hfft + >>> import numpy as np + >>> a = 2 * np.pi * np.arange(10) / 10 + >>> signal = np.cos(a) + 3j * np.sin(3 * a) + >>> fft(signal).round(10) + array([ -0.+0.j, 5.+0.j, -0.+0.j, 15.-0.j, 0.+0.j, 0.+0.j, + -0.+0.j, -15.-0.j, 0.+0.j, 5.+0.j]) + >>> hfft(signal[:6]).round(10) # Input first half of signal + array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.]) + >>> hfft(signal, 10) # Input entire signal and truncate + array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.]) + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ihfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the inverse FFT of a signal that has Hermitian symmetry. + + Parameters + ---------- + x : array_like + Input array. + n : int, optional + Length of the inverse FFT, the number of points along + transformation axis in the input to use. If `n` is smaller than + the length of the input, the input is cropped. If it is larger, + the input is padded with zeros. If `n` is not given, the length of + the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See `fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is ``n//2 + 1``. + + See Also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here, the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So, here, it's `hfft`, for + which you must supply the length of the result if it is to be odd: + * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. + + Examples + -------- + >>> from scipy.fft import ifft, ihfft + >>> import numpy as np + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> ifft(spectrum) + array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary + >>> ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def fftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D discrete Fourier Transform. + + This function computes the N-D discrete Fourier Transform over + any number of axes in an M-D array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + x : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `x`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + ifftn : The inverse of `fftn`, the inverse N-D FFT. + fft : The 1-D FFT, with definitions and conventions used. + rfftn : The N-D FFT of real input. + fft2 : The 2-D FFT. + fftshift : Shifts zero-frequency terms to centre of array. + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.mgrid[:3, :3, :3][0] + >>> scipy.fft.fftn(x, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> scipy.fft.fftn(x, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + rng.uniform(0, 1, X.shape) + >>> FS = scipy.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(scipy.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ifftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D inverse discrete Fourier Transform. + + This function computes the inverse of the N-D discrete + Fourier Transform over any number of axes in an M-D array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(x)) == x`` to within numerical accuracy. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e., it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + x : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `x`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + fftn : The forward N-D FFT, of which `ifftn` is the inverse. + ifft : The 1-D inverse FFT. + ifft2 : The 2-D inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.eye(4) + >>> scipy.fft.ifftn(scipy.fft.fftn(x, axes=(0,)), axes=(1,)) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*rng.uniform(0, 2*np.pi, (20, 20))) + >>> im = scipy.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D discrete Fourier Transform + + This function computes the N-D discrete Fourier Transform + over any axes in an M-D array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + x : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two axes are + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + ifft2 : The inverse 2-D FFT. + fft : The 1-D FFT. + fftn : The N-D FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For 2-D input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `fft` for + definitions and conventions used. + + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.mgrid[:5, :5][0] + >>> scipy.fft.fft2(x) + array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary + 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ifft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D inverse discrete Fourier Transform. + + This function computes the inverse of the 2-D discrete Fourier + Transform over any number of axes in an M-D array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(x)) == x`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e., it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + x : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + fft2 : The forward 2-D FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the N-D FFT. + fft : The 1-D FFT. + ifft : The 1-D inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = 4 * np.eye(4) + >>> scipy.fft.ifft2(x) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def rfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D discrete Fourier Transform for real input. + + This function computes the N-D discrete Fourier Transform over + any number of axes in an M-D real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + x : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `x`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e., the inverse of the N-D FFT + of real input. + fft : The 1-D FFT, with definitions and conventions used. + rfft : The 1-D FFT of real input. + fftn : The N-D FFT. + rfft2 : The 2-D FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.ones((2, 2, 2)) + >>> scipy.fft.rfftn(x) + array([[[8.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + >>> scipy.fft.rfftn(x, axes=(2, 0)) + array([[[4.+0.j, 0.+0.j], # may vary + [4.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def rfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D FFT of a real array. + + Parameters + ---------- + x : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + axes : sequence of ints, optional + Axes over which to compute the FFT. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + irfft2 : The inverse of the 2-D FFT of real input. + rfft : The 1-D FFT of real input. + rfftn : Compute the N-D discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Computes the inverse of `rfftn` + + This function computes the inverse of the N-D discrete + Fourier Transform for real input over any number of axes in an + M-D array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(x), x.shape) == x`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e., as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + x : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to be + ``2*(m-1)``, where ``m`` is the length of the input along that axis. + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `x`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)``, where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + rfftn : The forward N-D FFT of real input, + of which `ifftn` is the inverse. + fft : The 1-D FFT, with definitions and conventions used. + irfft : The inverse of the 1-D FFT of real input. + irfft2 : The inverse of the 2-D FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + The default value of `s` assumes an even output length in the final + transformation axis. When performing the final complex to real + transformation, the Hermitian symmetry requires that the last imaginary + component along that axis must be 0 and so it is ignored. To avoid losing + information, the correct length of the real input *must* be given. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.zeros((3, 2, 2)) + >>> x[0, 0, 0] = 3 * 2 * 2 + >>> scipy.fft.irfftn(x) + array([[[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def irfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Computes the inverse of `rfft2` + + Parameters + ---------- + x : array_like + The input array + s : sequence of ints, optional + Shape of the real output to the inverse FFT. + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default is the last two axes. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + rfft2 : The 2-D FFT of real input. + irfft : The inverse of the 1-D FFT of real input. + irfftn : The inverse of the N-D FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def hfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D FFT of Hermitian symmetric complex input, i.e., a + signal with a real spectrum. + + This function computes the N-D discrete Fourier Transform for a + Hermitian symmetric complex input over any number of axes in an + M-D array by means of the Fast Fourier Transform (FFT). In other + words, ``ihfftn(hfftn(x, s)) == x`` to within numerical accuracy. (``s`` + here is ``x.shape`` with ``s[-1] = x.shape[-1] * 2 - 1``, this is necessary + for the same reason ``x.shape`` would be necessary for `irfft`.) + + Parameters + ---------- + x : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to be + ``2*(m-1)`` where ``m`` is the length of the input along that axis. + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `x`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + ihfftn : The inverse N-D FFT with real spectrum. Inverse of `hfftn`. + fft : The 1-D FFT, with definitions and conventions used. + rfft : Forward FFT of real input. + + Notes + ----- + For a 1-D signal ``x`` to have a real spectrum, it must satisfy + the Hermitian property:: + + x[i] == np.conj(x[-i]) for all i + + This generalizes into higher dimensions by reflecting over each axis in + turn:: + + x[i, j, k, ...] == np.conj(x[-i, -j, -k, ...]) for all i, j, k, ... + + This should not be confused with a Hermitian matrix, for which the + transpose is its own conjugate:: + + x[i, j] == np.conj(x[j, i]) for all i, j + + + The default value of `s` assumes an even output length in the final + transformation axis. When performing the final complex to real + transformation, the Hermitian symmetry requires that the last imaginary + component along that axis must be 0 and so it is ignored. To avoid losing + information, the correct length of the real input *must* be given. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.ones((3, 2, 2)) + >>> scipy.fft.hfftn(x) + array([[[12., 0.], + [ 0., 0.]], + [[ 0., 0.], + [ 0., 0.]], + [[ 0., 0.], + [ 0., 0.]]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D FFT of a Hermitian complex array. + + Parameters + ---------- + x : array + Input array, taken to be Hermitian complex. + s : sequence of ints, optional + Shape of the real output. + axes : sequence of ints, optional + Axes over which to compute the FFT. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See `fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The real result of the 2-D Hermitian complex real FFT. + + See Also + -------- + hfftn : Compute the N-D discrete Fourier Transform for Hermitian + complex input. + + Notes + ----- + This is really just `hfftn` with different default behavior. + For more details see `hfftn`. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ihfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D inverse discrete Fourier Transform for a real + spectrum. + + This function computes the N-D inverse discrete Fourier Transform + over any number of axes in an M-D real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining transforms + are complex. + + Parameters + ---------- + x : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `x`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + hfftn : The forward N-D FFT of Hermitian input. + hfft : The 1-D FFT of Hermitian input. + fft : The 1-D FFT, with definitions and conventions used. + fftn : The N-D FFT. + hfft2 : The 2-D FFT of Hermitian input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `ihfft`, then the transform over the remaining axes is + performed as by `ifftn`. The order of the output is the positive part of + the Hermitian output signal, in the same format as `rfft`. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.ones((2, 2, 2)) + >>> scipy.fft.ihfftn(x) + array([[[1.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + >>> scipy.fft.ihfftn(x, axes=(2, 0)) + array([[[1.+0.j, 0.+0.j], # may vary + [1.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D inverse FFT of a real spectrum. + + Parameters + ---------- + x : array_like + The input array + s : sequence of ints, optional + Shape of the real input to the inverse FFT. + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default is the last two axes. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + ihfftn : Compute the inverse of the N-D FFT of Hermitian input. + + Notes + ----- + This is really `ihfftn` with different defaults. + For more details see `ihfftn`. + + """ + return (Dispatchable(x, np.ndarray),) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_basic_backend.py b/venv/lib/python3.10/site-packages/scipy/fft/_basic_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..14b77661482d8b2284dd129d98b04a9b7947867e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_basic_backend.py @@ -0,0 +1,176 @@ +from scipy._lib._array_api import ( + array_namespace, is_numpy, xp_unsupported_param_msg, is_complex +) +from . import _pocketfft +import numpy as np + + +def _validate_fft_args(workers, plan, norm): + if workers is not None: + raise ValueError(xp_unsupported_param_msg("workers")) + if plan is not None: + raise ValueError(xp_unsupported_param_msg("plan")) + if norm is None: + norm = 'backward' + return norm + + +# pocketfft is used whenever SCIPY_ARRAY_API is not set, +# or x is a NumPy array or array-like. +# When SCIPY_ARRAY_API is set, we try to use xp.fft for CuPy arrays, +# PyTorch arrays and other array API standard supporting objects. +# If xp.fft does not exist, we attempt to convert to np and back to use pocketfft. + +def _execute_1D(func_str, pocketfft_func, x, n, axis, norm, overwrite_x, workers, plan): + xp = array_namespace(x) + + if is_numpy(xp): + return pocketfft_func(x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + norm = _validate_fft_args(workers, plan, norm) + if hasattr(xp, 'fft'): + xp_func = getattr(xp.fft, func_str) + return xp_func(x, n=n, axis=axis, norm=norm) + + x = np.asarray(x) + y = pocketfft_func(x, n=n, axis=axis, norm=norm) + return xp.asarray(y) + + +def _execute_nD(func_str, pocketfft_func, x, s, axes, norm, overwrite_x, workers, plan): + xp = array_namespace(x) + + if is_numpy(xp): + return pocketfft_func(x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + norm = _validate_fft_args(workers, plan, norm) + if hasattr(xp, 'fft'): + xp_func = getattr(xp.fft, func_str) + return xp_func(x, s=s, axes=axes, norm=norm) + + x = np.asarray(x) + y = pocketfft_func(x, s=s, axes=axes, norm=norm) + return xp.asarray(y) + + +def fft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('fft', _pocketfft.fft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + return _execute_1D('ifft', _pocketfft.ifft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def rfft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('rfft', _pocketfft.rfft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def irfft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('irfft', _pocketfft.irfft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def hfft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('hfft', _pocketfft.hfft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def ihfft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('ihfft', _pocketfft.ihfft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def fftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_nD('fftn', _pocketfft.fftn, x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + + +def ifftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_nD('ifftn', _pocketfft.ifftn, x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def fft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return fftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def ifft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return ifftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def rfftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_nD('rfftn', _pocketfft.rfftn, x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def rfft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return rfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def irfftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_nD('irfftn', _pocketfft.irfftn, x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def irfft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return irfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def _swap_direction(norm): + if norm in (None, 'backward'): + norm = 'forward' + elif norm == 'forward': + norm = 'backward' + elif norm != 'ortho': + raise ValueError('Invalid norm value %s; should be "backward", ' + '"ortho", or "forward".' % norm) + return norm + + +def hfftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + xp = array_namespace(x) + if is_numpy(xp): + return _pocketfft.hfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + if is_complex(x, xp): + x = xp.conj(x) + return irfftn(x, s, axes, _swap_direction(norm), + overwrite_x, workers, plan=plan) + + +def hfft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return hfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def ihfftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + xp = array_namespace(x) + if is_numpy(xp): + return _pocketfft.ihfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + return xp.conj(rfftn(x, s, axes, _swap_direction(norm), + overwrite_x, workers, plan=plan)) + +def ihfft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return ihfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_debug_backends.py b/venv/lib/python3.10/site-packages/scipy/fft/_debug_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..c9647c5d6ceddc73b97d95f562662ada02c1ae74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_debug_backends.py @@ -0,0 +1,22 @@ +import numpy as np + +class NumPyBackend: + """Backend that uses numpy.fft""" + __ua_domain__ = "numpy.scipy.fft" + + @staticmethod + def __ua_function__(method, args, kwargs): + kwargs.pop("overwrite_x", None) + + fn = getattr(np.fft, method.__name__, None) + return (NotImplemented if fn is None + else fn(*args, **kwargs)) + + +class EchoBackend: + """Backend that just prints the __ua_function__ arguments""" + __ua_domain__ = "numpy.scipy.fft" + + @staticmethod + def __ua_function__(method, args, kwargs): + print(method, args, kwargs, sep='\n') diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_fftlog.py b/venv/lib/python3.10/site-packages/scipy/fft/_fftlog.py new file mode 100644 index 0000000000000000000000000000000000000000..8960242989c7c1d062af4fe1960c2384abaab94f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_fftlog.py @@ -0,0 +1,223 @@ +"""Fast Hankel transforms using the FFTLog algorithm. + +The implementation closely follows the Fortran code of Hamilton (2000). + +added: 14/11/2020 Nicolas Tessore +""" + +from ._basic import _dispatch +from scipy._lib.uarray import Dispatchable +from ._fftlog_backend import fhtoffset +import numpy as np + +__all__ = ['fht', 'ifht', 'fhtoffset'] + + +@_dispatch +def fht(a, dln, mu, offset=0.0, bias=0.0): + r'''Compute the fast Hankel transform. + + Computes the discrete Hankel transform of a logarithmically spaced periodic + sequence using the FFTLog algorithm [1]_, [2]_. + + Parameters + ---------- + a : array_like (..., n) + Real periodic input array, uniformly logarithmically spaced. For + multidimensional input, the transform is performed over the last axis. + dln : float + Uniform logarithmic spacing of the input array. + mu : float + Order of the Hankel transform, any positive or negative real number. + offset : float, optional + Offset of the uniform logarithmic spacing of the output array. + bias : float, optional + Exponent of power law bias, any positive or negative real number. + + Returns + ------- + A : array_like (..., n) + The transformed output array, which is real, periodic, uniformly + logarithmically spaced, and of the same shape as the input array. + + See Also + -------- + ifht : The inverse of `fht`. + fhtoffset : Return an optimal offset for `fht`. + + Notes + ----- + This function computes a discrete version of the Hankel transform + + .. math:: + + A(k) = \int_{0}^{\infty} \! a(r) \, J_\mu(kr) \, k \, dr \;, + + where :math:`J_\mu` is the Bessel function of order :math:`\mu`. The index + :math:`\mu` may be any real number, positive or negative. Note that the + numerical Hankel transform uses an integrand of :math:`k \, dr`, while the + mathematical Hankel transform is commonly defined using :math:`r \, dr`. + + The input array `a` is a periodic sequence of length :math:`n`, uniformly + logarithmically spaced with spacing `dln`, + + .. math:: + + a_j = a(r_j) \;, \quad + r_j = r_c \exp[(j-j_c) \, \mathtt{dln}] + + centred about the point :math:`r_c`. Note that the central index + :math:`j_c = (n-1)/2` is half-integral if :math:`n` is even, so that + :math:`r_c` falls between two input elements. Similarly, the output + array `A` is a periodic sequence of length :math:`n`, also uniformly + logarithmically spaced with spacing `dln` + + .. math:: + + A_j = A(k_j) \;, \quad + k_j = k_c \exp[(j-j_c) \, \mathtt{dln}] + + centred about the point :math:`k_c`. + + The centre points :math:`r_c` and :math:`k_c` of the periodic intervals may + be chosen arbitrarily, but it would be usual to choose the product + :math:`k_c r_c = k_j r_{n-1-j} = k_{n-1-j} r_j` to be unity. This can be + changed using the `offset` parameter, which controls the logarithmic offset + :math:`\log(k_c) = \mathtt{offset} - \log(r_c)` of the output array. + Choosing an optimal value for `offset` may reduce ringing of the discrete + Hankel transform. + + If the `bias` parameter is nonzero, this function computes a discrete + version of the biased Hankel transform + + .. math:: + + A(k) = \int_{0}^{\infty} \! a_q(r) \, (kr)^q \, J_\mu(kr) \, k \, dr + + where :math:`q` is the value of `bias`, and a power law bias + :math:`a_q(r) = a(r) \, (kr)^{-q}` is applied to the input sequence. + Biasing the transform can help approximate the continuous transform of + :math:`a(r)` if there is a value :math:`q` such that :math:`a_q(r)` is + close to a periodic sequence, in which case the resulting :math:`A(k)` will + be close to the continuous transform. + + References + ---------- + .. [1] Talman J. D., 1978, J. Comp. Phys., 29, 35 + .. [2] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191) + + Examples + -------- + + This example is the adapted version of ``fftlogtest.f`` which is provided + in [2]_. It evaluates the integral + + .. math:: + + \int^\infty_0 r^{\mu+1} \exp(-r^2/2) J_\mu(k, r) k dr + = k^{\mu+1} \exp(-k^2/2) . + + >>> import numpy as np + >>> from scipy import fft + >>> import matplotlib.pyplot as plt + + Parameters for the transform. + + >>> mu = 0.0 # Order mu of Bessel function + >>> r = np.logspace(-7, 1, 128) # Input evaluation points + >>> dln = np.log(r[1]/r[0]) # Step size + >>> offset = fft.fhtoffset(dln, initial=-6*np.log(10), mu=mu) + >>> k = np.exp(offset)/r[::-1] # Output evaluation points + + Define the analytical function. + + >>> def f(x, mu): + ... """Analytical function: x^(mu+1) exp(-x^2/2).""" + ... return x**(mu + 1)*np.exp(-x**2/2) + + Evaluate the function at ``r`` and compute the corresponding values at + ``k`` using FFTLog. + + >>> a_r = f(r, mu) + >>> fht = fft.fht(a_r, dln, mu=mu, offset=offset) + + For this example we can actually compute the analytical response (which in + this case is the same as the input function) for comparison and compute the + relative error. + + >>> a_k = f(k, mu) + >>> rel_err = abs((fht-a_k)/a_k) + + Plot the result. + + >>> figargs = {'sharex': True, 'sharey': True, 'constrained_layout': True} + >>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), **figargs) + >>> ax1.set_title(r'$r^{\mu+1}\ \exp(-r^2/2)$') + >>> ax1.loglog(r, a_r, 'k', lw=2) + >>> ax1.set_xlabel('r') + >>> ax2.set_title(r'$k^{\mu+1} \exp(-k^2/2)$') + >>> ax2.loglog(k, a_k, 'k', lw=2, label='Analytical') + >>> ax2.loglog(k, fht, 'C3--', lw=2, label='FFTLog') + >>> ax2.set_xlabel('k') + >>> ax2.legend(loc=3, framealpha=1) + >>> ax2.set_ylim([1e-10, 1e1]) + >>> ax2b = ax2.twinx() + >>> ax2b.loglog(k, rel_err, 'C0', label='Rel. Error (-)') + >>> ax2b.set_ylabel('Rel. Error (-)', color='C0') + >>> ax2b.tick_params(axis='y', labelcolor='C0') + >>> ax2b.legend(loc=4, framealpha=1) + >>> ax2b.set_ylim([1e-9, 1e-3]) + >>> plt.show() + + ''' + return (Dispatchable(a, np.ndarray),) + + +@_dispatch +def ifht(A, dln, mu, offset=0.0, bias=0.0): + r"""Compute the inverse fast Hankel transform. + + Computes the discrete inverse Hankel transform of a logarithmically spaced + periodic sequence. This is the inverse operation to `fht`. + + Parameters + ---------- + A : array_like (..., n) + Real periodic input array, uniformly logarithmically spaced. For + multidimensional input, the transform is performed over the last axis. + dln : float + Uniform logarithmic spacing of the input array. + mu : float + Order of the Hankel transform, any positive or negative real number. + offset : float, optional + Offset of the uniform logarithmic spacing of the output array. + bias : float, optional + Exponent of power law bias, any positive or negative real number. + + Returns + ------- + a : array_like (..., n) + The transformed output array, which is real, periodic, uniformly + logarithmically spaced, and of the same shape as the input array. + + See Also + -------- + fht : Definition of the fast Hankel transform. + fhtoffset : Return an optimal offset for `ifht`. + + Notes + ----- + This function computes a discrete version of the Hankel transform + + .. math:: + + a(r) = \int_{0}^{\infty} \! A(k) \, J_\mu(kr) \, r \, dk \;, + + where :math:`J_\mu` is the Bessel function of order :math:`\mu`. The index + :math:`\mu` may be any real number, positive or negative. Note that the + numerical inverse Hankel transform uses an integrand of :math:`r \, dk`, while the + mathematical inverse Hankel transform is commonly defined using :math:`k \, dk`. + + See `fht` for further details. + """ + return (Dispatchable(A, np.ndarray),) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_fftlog_backend.py b/venv/lib/python3.10/site-packages/scipy/fft/_fftlog_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..0f262b22af74a888633069b96ab408cd4a936f32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_fftlog_backend.py @@ -0,0 +1,197 @@ +import numpy as np +from warnings import warn +from ._basic import rfft, irfft +from ..special import loggamma, poch + +from scipy._lib._array_api import array_namespace, copy + +__all__ = ['fht', 'ifht', 'fhtoffset'] + +# constants +LN_2 = np.log(2) + + +def fht(a, dln, mu, offset=0.0, bias=0.0): + xp = array_namespace(a) + + # size of transform + n = a.shape[-1] + + # bias input array + if bias != 0: + # a_q(r) = a(r) (r/r_c)^{-q} + j_c = (n-1)/2 + j = xp.arange(n, dtype=xp.float64) + a = a * xp.exp(-bias*(j - j_c)*dln) + + # compute FHT coefficients + u = xp.asarray(fhtcoeff(n, dln, mu, offset=offset, bias=bias)) + + # transform + A = _fhtq(a, u, xp=xp) + + # bias output array + if bias != 0: + # A(k) = A_q(k) (k/k_c)^{-q} (k_c r_c)^{-q} + A *= xp.exp(-bias*((j - j_c)*dln + offset)) + + return A + + +def ifht(A, dln, mu, offset=0.0, bias=0.0): + xp = array_namespace(A) + + # size of transform + n = A.shape[-1] + + # bias input array + if bias != 0: + # A_q(k) = A(k) (k/k_c)^{q} (k_c r_c)^{q} + j_c = (n-1)/2 + j = xp.arange(n, dtype=xp.float64) + A = A * xp.exp(bias*((j - j_c)*dln + offset)) + + # compute FHT coefficients + u = xp.asarray(fhtcoeff(n, dln, mu, offset=offset, bias=bias, inverse=True)) + + # transform + a = _fhtq(A, u, inverse=True, xp=xp) + + # bias output array + if bias != 0: + # a(r) = a_q(r) (r/r_c)^{q} + a /= xp.exp(-bias*(j - j_c)*dln) + + return a + + +def fhtcoeff(n, dln, mu, offset=0.0, bias=0.0, inverse=False): + """Compute the coefficient array for a fast Hankel transform.""" + lnkr, q = offset, bias + + # Hankel transform coefficients + # u_m = (kr)^{-i 2m pi/(n dlnr)} U_mu(q + i 2m pi/(n dlnr)) + # with U_mu(x) = 2^x Gamma((mu+1+x)/2)/Gamma((mu+1-x)/2) + xp = (mu+1+q)/2 + xm = (mu+1-q)/2 + y = np.linspace(0, np.pi*(n//2)/(n*dln), n//2+1) + u = np.empty(n//2+1, dtype=complex) + v = np.empty(n//2+1, dtype=complex) + u.imag[:] = y + u.real[:] = xm + loggamma(u, out=v) + u.real[:] = xp + loggamma(u, out=u) + y *= 2*(LN_2 - lnkr) + u.real -= v.real + u.real += LN_2*q + u.imag += v.imag + u.imag += y + np.exp(u, out=u) + + # fix last coefficient to be real + u.imag[-1] = 0 + + # deal with special cases + if not np.isfinite(u[0]): + # write u_0 = 2^q Gamma(xp)/Gamma(xm) = 2^q poch(xm, xp-xm) + # poch() handles special cases for negative integers correctly + u[0] = 2**q * poch(xm, xp-xm) + # the coefficient may be inf or 0, meaning the transform or the + # inverse transform, respectively, is singular + + # check for singular transform or singular inverse transform + if np.isinf(u[0]) and not inverse: + warn('singular transform; consider changing the bias', stacklevel=3) + # fix coefficient to obtain (potentially correct) transform anyway + u = copy(u) + u[0] = 0 + elif u[0] == 0 and inverse: + warn('singular inverse transform; consider changing the bias', stacklevel=3) + # fix coefficient to obtain (potentially correct) inverse anyway + u = copy(u) + u[0] = np.inf + + return u + + +def fhtoffset(dln, mu, initial=0.0, bias=0.0): + """Return optimal offset for a fast Hankel transform. + + Returns an offset close to `initial` that fulfils the low-ringing + condition of [1]_ for the fast Hankel transform `fht` with logarithmic + spacing `dln`, order `mu` and bias `bias`. + + Parameters + ---------- + dln : float + Uniform logarithmic spacing of the transform. + mu : float + Order of the Hankel transform, any positive or negative real number. + initial : float, optional + Initial value for the offset. Returns the closest value that fulfils + the low-ringing condition. + bias : float, optional + Exponent of power law bias, any positive or negative real number. + + Returns + ------- + offset : float + Optimal offset of the uniform logarithmic spacing of the transform that + fulfils a low-ringing condition. + + Examples + -------- + >>> from scipy.fft import fhtoffset + >>> dln = 0.1 + >>> mu = 2.0 + >>> initial = 0.5 + >>> bias = 0.0 + >>> offset = fhtoffset(dln, mu, initial, bias) + >>> offset + 0.5454581477676637 + + See Also + -------- + fht : Definition of the fast Hankel transform. + + References + ---------- + .. [1] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191) + + """ + + lnkr, q = initial, bias + + xp = (mu+1+q)/2 + xm = (mu+1-q)/2 + y = np.pi/(2*dln) + zp = loggamma(xp + 1j*y) + zm = loggamma(xm + 1j*y) + arg = (LN_2 - lnkr)/dln + (zp.imag + zm.imag)/np.pi + return lnkr + (arg - np.round(arg))*dln + + +def _fhtq(a, u, inverse=False, *, xp=None): + """Compute the biased fast Hankel transform. + + This is the basic FFTLog routine. + """ + if xp is None: + xp = np + + # size of transform + n = a.shape[-1] + + # biased fast Hankel transform via real FFT + A = rfft(a, axis=-1) + if not inverse: + # forward transform + A *= u + else: + # backward transform + A /= xp.conj(u) + A = irfft(A, n, axis=-1) + A = xp.flip(A, axis=-1) + + return A diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_helper.py b/venv/lib/python3.10/site-packages/scipy/fft/_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..aaa9a107be35f9efacf4b15f958ad57787723443 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_helper.py @@ -0,0 +1,313 @@ +from functools import update_wrapper, lru_cache +import inspect + +from ._pocketfft import helper as _helper + +import numpy as np +from scipy._lib._array_api import array_namespace + + +def next_fast_len(target, real=False): + """Find the next fast size of input data to ``fft``, for zero-padding, etc. + + SciPy's FFT algorithms gain their speed by a recursive divide and conquer + strategy. This relies on efficient functions for small prime factors of the + input length. Thus, the transforms are fastest when using composites of the + prime factors handled by the fft implementation. If there are efficient + functions for all radices <= `n`, then the result will be a number `x` + >= ``target`` with only prime factors < `n`. (Also known as `n`-smooth + numbers) + + Parameters + ---------- + target : int + Length to start searching from. Must be a positive integer. + real : bool, optional + True if the FFT involves real input or output (e.g., `rfft` or `hfft` + but not `fft`). Defaults to False. + + Returns + ------- + out : int + The smallest fast length greater than or equal to ``target``. + + Notes + ----- + The result of this function may change in future as performance + considerations change, for example, if new prime factors are added. + + Calling `fft` or `ifft` with real input data performs an ``'R2C'`` + transform internally. + + Examples + -------- + On a particular machine, an FFT of prime length takes 11.4 ms: + + >>> from scipy import fft + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> min_len = 93059 # prime length is worst case for speed + >>> a = rng.standard_normal(min_len) + >>> b = fft.fft(a) + + Zero-padding to the next regular length reduces computation time to + 1.6 ms, a speedup of 7.3 times: + + >>> fft.next_fast_len(min_len, real=True) + 93312 + >>> b = fft.fft(a, 93312) + + Rounding up to the next power of 2 is not optimal, taking 3.0 ms to + compute; 1.9 times longer than the size given by ``next_fast_len``: + + >>> b = fft.fft(a, 131072) + + """ + pass + + +# Directly wrap the c-function good_size but take the docstring etc., from the +# next_fast_len function above +_sig = inspect.signature(next_fast_len) +next_fast_len = update_wrapper(lru_cache(_helper.good_size), next_fast_len) +next_fast_len.__wrapped__ = _helper.good_size +next_fast_len.__signature__ = _sig + + +def _init_nd_shape_and_axes(x, shape, axes): + """Handle shape and axes arguments for N-D transforms. + + Returns the shape and axes in a standard form, taking into account negative + values and checking for various potential errors. + + Parameters + ---------- + x : array_like + The input array. + shape : int or array_like of ints or None + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If `shape` is -1, the size of the corresponding dimension of `x` is + used. + axes : int or array_like of ints or None + Axes along which the calculation is computed. + The default is over all axes. + Negative indices are automatically converted to their positive + counterparts. + + Returns + ------- + shape : tuple + The shape of the result as a tuple of integers. + axes : list + Axes along which the calculation is computed, as a list of integers. + + """ + x = np.asarray(x) + return _helper._init_nd_shape_and_axes(x, shape, axes) + + +def fftfreq(n, d=1.0, *, xp=None, device=None): + """Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + xp : array_namespace, optional + The namespace for the return array. Default is None, where NumPy is used. + device : device, optional + The device for the return array. + Only valid when `xp.fft.fftfreq` implements the device parameter. + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> import scipy.fft + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = scipy.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = scipy.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) + + """ + xp = np if xp is None else xp + # numpy does not yet support the `device` keyword + # `xp.__name__ != 'numpy'` should be removed when numpy is compatible + if hasattr(xp, 'fft') and xp.__name__ != 'numpy': + return xp.fft.fftfreq(n, d=d, device=device) + if device is not None: + raise ValueError('device parameter is not supported for input array type') + return np.fft.fftfreq(n, d=d) + + +def rfftfreq(n, d=1.0, *, xp=None, device=None): + """Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + xp : array_namespace, optional + The namespace for the return array. Default is None, where NumPy is used. + device : device, optional + The device for the return array. + Only valid when `xp.fft.rfftfreq` implements the device parameter. + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> import scipy.fft + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = scipy.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = scipy.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., ..., -30., -20., -10.]) + >>> freq = scipy.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + xp = np if xp is None else xp + # numpy does not yet support the `device` keyword + # `xp.__name__ != 'numpy'` should be removed when numpy is compatible + if hasattr(xp, 'fft') and xp.__name__ != 'numpy': + return xp.fft.rfftfreq(n, d=d, device=device) + if device is not None: + raise ValueError('device parameter is not supported for input array type') + return np.fft.rfftfreq(n, d=d) + + +def fftshift(x, axes=None): + """Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., ..., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + xp = array_namespace(x) + if hasattr(xp, 'fft'): + return xp.fft.fftshift(x, axes=axes) + x = np.asarray(x) + y = np.fft.fftshift(x, axes=axes) + return xp.asarray(y) + + +def ifftshift(x, axes=None): + """The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + xp = array_namespace(x) + if hasattr(xp, 'fft'): + return xp.fft.ifftshift(x, axes=axes) + x = np.asarray(x) + y = np.fft.ifftshift(x, axes=axes) + return xp.asarray(y) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/LICENSE.md b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..1b5163d8435976c24988afbd39ded304947178cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/LICENSE.md @@ -0,0 +1,25 @@ +Copyright (C) 2010-2019 Max-Planck-Society +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of the copyright holder nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__init__.py b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0671484c9a0780df353b9b783813b6fa7492d38d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__init__.py @@ -0,0 +1,9 @@ +""" FFT backend using pypocketfft """ + +from .basic import * +from .realtransforms import * +from .helper import * + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aaf52807901488ff83da19e753ffa3bc5e303f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d16baca1133fdd463b5a5c77b14ea690f6f54e7b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7256c1a57111ab32af7f08dce921377da3a3f5e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a04a077d11c58ddcf29f5d5006eaa797b1bea78 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/basic.py b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2d0d33958021c431171b72f72c37363ac98e03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/basic.py @@ -0,0 +1,251 @@ +""" +Discrete Fourier Transforms - basic.py +""" +import numpy as np +import functools +from . import pypocketfft as pfft +from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied, + _fix_shape, _fix_shape_1d, _normalization, + _workers) + +def c2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """ Return discrete Fourier transform of real or complex sequence. """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + overwrite_x = overwrite_x or _datacopied(tmp, x) + norm = _normalization(norm, forward) + workers = _workers(workers) + + if n is not None: + tmp, copied = _fix_shape_1d(tmp, n, axis) + overwrite_x = overwrite_x or copied + elif tmp.shape[axis] < 1: + message = f"invalid number of data points ({tmp.shape[axis]}) specified" + raise ValueError(message) + + out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None) + + return pfft.c2c(tmp, (axis,), forward, norm, out, workers) + + +fft = functools.partial(c2c, True) +fft.__name__ = 'fft' +ifft = functools.partial(c2c, False) +ifft.__name__ = 'ifft' + + +def r2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """ + Discrete Fourier transform of a real sequence. + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + norm = _normalization(norm, forward) + workers = _workers(workers) + + if not np.isrealobj(tmp): + raise TypeError("x must be a real sequence") + + if n is not None: + tmp, _ = _fix_shape_1d(tmp, n, axis) + elif tmp.shape[axis] < 1: + raise ValueError(f"invalid number of data points ({tmp.shape[axis]}) specified") + + # Note: overwrite_x is not utilised + return pfft.r2c(tmp, (axis,), forward, norm, None, workers) + + +rfft = functools.partial(r2c, True) +rfft.__name__ = 'rfft' +ihfft = functools.partial(r2c, False) +ihfft.__name__ = 'ihfft' + + +def c2r(forward, x, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """ + Return inverse discrete Fourier transform of real sequence x. + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + norm = _normalization(norm, forward) + workers = _workers(workers) + + # TODO: Optimize for hermitian and real? + if np.isrealobj(tmp): + tmp = tmp + 0.j + + # Last axis utilizes hermitian symmetry + if n is None: + n = (tmp.shape[axis] - 1) * 2 + if n < 1: + raise ValueError(f"Invalid number of data points ({n}) specified") + else: + tmp, _ = _fix_shape_1d(tmp, (n//2) + 1, axis) + + # Note: overwrite_x is not utilized + return pfft.c2r(tmp, (axis,), n, forward, norm, None, workers) + + +hfft = functools.partial(c2r, True) +hfft.__name__ = 'hfft' +irfft = functools.partial(c2r, False) +irfft.__name__ = 'irfft' + + +def hfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None, + *, plan=None): + """ + 2-D discrete Fourier transform of a Hermitian sequence + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + return hfftn(x, s, axes, norm, overwrite_x, workers) + + +def ihfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None, + *, plan=None): + """ + 2-D discrete inverse Fourier transform of a Hermitian sequence + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + return ihfftn(x, s, axes, norm, overwrite_x, workers) + + +def c2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """ + Return multidimensional discrete Fourier transform. + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + + shape, axes = _init_nd_shape_and_axes(tmp, s, axes) + overwrite_x = overwrite_x or _datacopied(tmp, x) + workers = _workers(workers) + + if len(axes) == 0: + return x + + tmp, copied = _fix_shape(tmp, shape, axes) + overwrite_x = overwrite_x or copied + + norm = _normalization(norm, forward) + out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None) + + return pfft.c2c(tmp, axes, forward, norm, out, workers) + + +fftn = functools.partial(c2cn, True) +fftn.__name__ = 'fftn' +ifftn = functools.partial(c2cn, False) +ifftn.__name__ = 'ifftn' + +def r2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """Return multidimensional discrete Fourier transform of real input""" + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + + if not np.isrealobj(tmp): + raise TypeError("x must be a real sequence") + + shape, axes = _init_nd_shape_and_axes(tmp, s, axes) + tmp, _ = _fix_shape(tmp, shape, axes) + norm = _normalization(norm, forward) + workers = _workers(workers) + + if len(axes) == 0: + raise ValueError("at least 1 axis must be transformed") + + # Note: overwrite_x is not utilized + return pfft.r2c(tmp, axes, forward, norm, None, workers) + + +rfftn = functools.partial(r2cn, True) +rfftn.__name__ = 'rfftn' +ihfftn = functools.partial(r2cn, False) +ihfftn.__name__ = 'ihfftn' + + +def c2rn(forward, x, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """Multidimensional inverse discrete fourier transform with real output""" + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + + # TODO: Optimize for hermitian and real? + if np.isrealobj(tmp): + tmp = tmp + 0.j + + noshape = s is None + shape, axes = _init_nd_shape_and_axes(tmp, s, axes) + + if len(axes) == 0: + raise ValueError("at least 1 axis must be transformed") + + shape = list(shape) + if noshape: + shape[-1] = (x.shape[axes[-1]] - 1) * 2 + + norm = _normalization(norm, forward) + workers = _workers(workers) + + # Last axis utilizes hermitian symmetry + lastsize = shape[-1] + shape[-1] = (shape[-1] // 2) + 1 + + tmp, _ = tuple(_fix_shape(tmp, shape, axes)) + + # Note: overwrite_x is not utilized + return pfft.c2r(tmp, axes, lastsize, forward, norm, None, workers) + + +hfftn = functools.partial(c2rn, True) +hfftn.__name__ = 'hfftn' +irfftn = functools.partial(c2rn, False) +irfftn.__name__ = 'irfftn' + + +def r2r_fftpack(forward, x, n=None, axis=-1, norm=None, overwrite_x=False): + """FFT of a real sequence, returning fftpack half complex format""" + tmp = _asfarray(x) + overwrite_x = overwrite_x or _datacopied(tmp, x) + norm = _normalization(norm, forward) + workers = _workers(None) + + if tmp.dtype.kind == 'c': + raise TypeError('x must be a real sequence') + + if n is not None: + tmp, copied = _fix_shape_1d(tmp, n, axis) + overwrite_x = overwrite_x or copied + elif tmp.shape[axis] < 1: + raise ValueError(f"invalid number of data points ({tmp.shape[axis]}) specified") + + out = (tmp if overwrite_x else None) + + return pfft.r2r_fftpack(tmp, (axis,), forward, forward, norm, out, workers) + + +rfft_fftpack = functools.partial(r2r_fftpack, True) +rfft_fftpack.__name__ = 'rfft_fftpack' +irfft_fftpack = functools.partial(r2r_fftpack, False) +irfft_fftpack.__name__ = 'irfft_fftpack' diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/helper.py b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..47e54063fe8e790dc4a92126a7ea0fc69e7b2bb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/helper.py @@ -0,0 +1,221 @@ +from numbers import Number +import operator +import os +import threading +import contextlib + +import numpy as np + +from scipy._lib._util import copy_if_needed + +# good_size is exposed (and used) from this import +from .pypocketfft import good_size + + +__all__ = ['good_size', 'set_workers', 'get_workers'] + +_config = threading.local() +_cpu_count = os.cpu_count() + + +def _iterable_of_int(x, name=None): + """Convert ``x`` to an iterable sequence of int + + Parameters + ---------- + x : value, or sequence of values, convertible to int + name : str, optional + Name of the argument being converted, only used in the error message + + Returns + ------- + y : ``List[int]`` + """ + if isinstance(x, Number): + x = (x,) + + try: + x = [operator.index(a) for a in x] + except TypeError as e: + name = name or "value" + raise ValueError(f"{name} must be a scalar or iterable of integers") from e + + return x + + +def _init_nd_shape_and_axes(x, shape, axes): + """Handles shape and axes arguments for nd transforms""" + noshape = shape is None + noaxes = axes is None + + if not noaxes: + axes = _iterable_of_int(axes, 'axes') + axes = [a + x.ndim if a < 0 else a for a in axes] + + if any(a >= x.ndim or a < 0 for a in axes): + raise ValueError("axes exceeds dimensionality of input") + if len(set(axes)) != len(axes): + raise ValueError("all axes must be unique") + + if not noshape: + shape = _iterable_of_int(shape, 'shape') + + if axes and len(axes) != len(shape): + raise ValueError("when given, axes and shape arguments" + " have to be of the same length") + if noaxes: + if len(shape) > x.ndim: + raise ValueError("shape requires more axes than are present") + axes = range(x.ndim - len(shape), x.ndim) + + shape = [x.shape[a] if s == -1 else s for s, a in zip(shape, axes)] + elif noaxes: + shape = list(x.shape) + axes = range(x.ndim) + else: + shape = [x.shape[a] for a in axes] + + if any(s < 1 for s in shape): + raise ValueError( + f"invalid number of data points ({shape}) specified") + + return tuple(shape), list(axes) + + +def _asfarray(x): + """ + Convert to array with floating or complex dtype. + + float16 values are also promoted to float32. + """ + if not hasattr(x, "dtype"): + x = np.asarray(x) + + if x.dtype == np.float16: + return np.asarray(x, np.float32) + elif x.dtype.kind not in 'fc': + return np.asarray(x, np.float64) + + # Require native byte order + dtype = x.dtype.newbyteorder('=') + # Always align input + copy = True if not x.flags['ALIGNED'] else copy_if_needed + return np.array(x, dtype=dtype, copy=copy) + +def _datacopied(arr, original): + """ + Strict check for `arr` not sharing any data with `original`, + under the assumption that arr = asarray(original) + """ + if arr is original: + return False + if not isinstance(original, np.ndarray) and hasattr(original, '__array__'): + return False + return arr.base is None + + +def _fix_shape(x, shape, axes): + """Internal auxiliary function for _raw_fft, _raw_fftnd.""" + must_copy = False + + # Build an nd slice with the dimensions to be read from x + index = [slice(None)]*x.ndim + for n, ax in zip(shape, axes): + if x.shape[ax] >= n: + index[ax] = slice(0, n) + else: + index[ax] = slice(0, x.shape[ax]) + must_copy = True + + index = tuple(index) + + if not must_copy: + return x[index], False + + s = list(x.shape) + for n, axis in zip(shape, axes): + s[axis] = n + + z = np.zeros(s, x.dtype) + z[index] = x[index] + return z, True + + +def _fix_shape_1d(x, n, axis): + if n < 1: + raise ValueError( + f"invalid number of data points ({n}) specified") + + return _fix_shape(x, (n,), (axis,)) + + +_NORM_MAP = {None: 0, 'backward': 0, 'ortho': 1, 'forward': 2} + + +def _normalization(norm, forward): + """Returns the pypocketfft normalization mode from the norm argument""" + try: + inorm = _NORM_MAP[norm] + return inorm if forward else (2 - inorm) + except KeyError: + raise ValueError( + f'Invalid norm value {norm!r}, should ' + 'be "backward", "ortho" or "forward"') from None + + +def _workers(workers): + if workers is None: + return getattr(_config, 'default_workers', 1) + + if workers < 0: + if workers >= -_cpu_count: + workers += 1 + _cpu_count + else: + raise ValueError(f"workers value out of range; got {workers}, must not be" + f" less than {-_cpu_count}") + elif workers == 0: + raise ValueError("workers must not be zero") + + return workers + + +@contextlib.contextmanager +def set_workers(workers): + """Context manager for the default number of workers used in `scipy.fft` + + Parameters + ---------- + workers : int + The default number of workers to use + + Examples + -------- + >>> import numpy as np + >>> from scipy import fft, signal + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal((128, 64)) + >>> with fft.set_workers(4): + ... y = signal.fftconvolve(x, x) + + """ + old_workers = get_workers() + _config.default_workers = _workers(operator.index(workers)) + try: + yield + finally: + _config.default_workers = old_workers + + +def get_workers(): + """Returns the default number of workers within the current context + + Examples + -------- + >>> from scipy import fft + >>> fft.get_workers() + 1 + >>> with fft.set_workers(4): + ... fft.get_workers() + 4 + """ + return getattr(_config, 'default_workers', 1) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/realtransforms.py b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/realtransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0c616742305444d51258e650344c060129dfab --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/realtransforms.py @@ -0,0 +1,109 @@ +import numpy as np +from . import pypocketfft as pfft +from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied, + _fix_shape, _fix_shape_1d, _normalization, _workers) +import functools + + +def _r2r(forward, transform, x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + """Forward or backward 1-D DCT/DST + + Parameters + ---------- + forward : bool + Transform direction (determines type and normalisation) + transform : {pypocketfft.dct, pypocketfft.dst} + The transform to perform + """ + tmp = _asfarray(x) + overwrite_x = overwrite_x or _datacopied(tmp, x) + norm = _normalization(norm, forward) + workers = _workers(workers) + + if not forward: + if type == 2: + type = 3 + elif type == 3: + type = 2 + + if n is not None: + tmp, copied = _fix_shape_1d(tmp, n, axis) + overwrite_x = overwrite_x or copied + elif tmp.shape[axis] < 1: + raise ValueError(f"invalid number of data points ({tmp.shape[axis]}) specified") + + out = (tmp if overwrite_x else None) + + # For complex input, transform real and imaginary components separably + if np.iscomplexobj(x): + out = np.empty_like(tmp) if out is None else out + transform(tmp.real, type, (axis,), norm, out.real, workers) + transform(tmp.imag, type, (axis,), norm, out.imag, workers) + return out + + return transform(tmp, type, (axis,), norm, out, workers, orthogonalize) + + +dct = functools.partial(_r2r, True, pfft.dct) +dct.__name__ = 'dct' +idct = functools.partial(_r2r, False, pfft.dct) +idct.__name__ = 'idct' + +dst = functools.partial(_r2r, True, pfft.dst) +dst.__name__ = 'dst' +idst = functools.partial(_r2r, False, pfft.dst) +idst.__name__ = 'idst' + + +def _r2rn(forward, transform, x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + """Forward or backward nd DCT/DST + + Parameters + ---------- + forward : bool + Transform direction (determines type and normalisation) + transform : {pypocketfft.dct, pypocketfft.dst} + The transform to perform + """ + tmp = _asfarray(x) + + shape, axes = _init_nd_shape_and_axes(tmp, s, axes) + overwrite_x = overwrite_x or _datacopied(tmp, x) + + if len(axes) == 0: + return x + + tmp, copied = _fix_shape(tmp, shape, axes) + overwrite_x = overwrite_x or copied + + if not forward: + if type == 2: + type = 3 + elif type == 3: + type = 2 + + norm = _normalization(norm, forward) + workers = _workers(workers) + out = (tmp if overwrite_x else None) + + # For complex input, transform real and imaginary components separably + if np.iscomplexobj(x): + out = np.empty_like(tmp) if out is None else out + transform(tmp.real, type, axes, norm, out.real, workers) + transform(tmp.imag, type, axes, norm, out.imag, workers) + return out + + return transform(tmp, type, axes, norm, out, workers, orthogonalize) + + +dctn = functools.partial(_r2rn, True, pfft.dct) +dctn.__name__ = 'dctn' +idctn = functools.partial(_r2rn, False, pfft.dct) +idctn.__name__ = 'idctn' + +dstn = functools.partial(_r2rn, True, pfft.dst) +dstn.__name__ = 'dstn' +idstn = functools.partial(_r2rn, False, pfft.dst) +idstn.__name__ = 'idstn' diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a66a2497ec22c3c6c9e763a09c3e80fb5b5d5eec Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1aa36d33ffa2132ae6fa07aa693016fe1d4a168d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b180b8737f8fa1dad7fd3e9bcde3d587cf736f76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_basic.py b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..8960cace3e081368d00efbad77059f91cef4dbdd --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_basic.py @@ -0,0 +1,1005 @@ +# Created by Pearu Peterson, September 2002 + +from numpy.testing import (assert_, assert_equal, assert_array_almost_equal, + assert_array_almost_equal_nulp, assert_array_less, + assert_allclose) +import pytest +from pytest import raises as assert_raises +from scipy.fft._pocketfft import (ifft, fft, fftn, ifftn, + rfft, irfft, rfftn, irfftn, + hfft, ihfft, hfftn, ihfftn) + +from numpy import (arange, array, asarray, zeros, dot, exp, pi, + swapaxes, cdouble) +import numpy as np +import numpy.fft +from numpy.random import rand + +# "large" composite numbers supported by FFT._PYPOCKETFFT +LARGE_COMPOSITE_SIZES = [ + 2**13, + 2**5 * 3**5, + 2**3 * 3**3 * 5**2, +] +SMALL_COMPOSITE_SIZES = [ + 2, + 2*3*5, + 2*2*3*3, +] +# prime +LARGE_PRIME_SIZES = [ + 2011 +] +SMALL_PRIME_SIZES = [ + 29 +] + + +def _assert_close_in_norm(x, y, rtol, size, rdt): + # helper function for testing + err_msg = f"size: {size} rdt: {rdt}" + assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg) + + +def random(size): + return rand(*size) + +def swap_byteorder(arr): + """Returns the same array with swapped byteorder""" + dtype = arr.dtype.newbyteorder('S') + return arr.astype(dtype) + +def direct_dft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = -arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x) + return y + + +def direct_idft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x)/n + return y + + +def direct_dftn(x): + x = asarray(x) + for axis in range(x.ndim): + x = fft(x, axis=axis) + return x + + +def direct_idftn(x): + x = asarray(x) + for axis in range(x.ndim): + x = ifft(x, axis=axis) + return x + + +def direct_rdft(x): + x = asarray(x) + n = len(x) + w = -arange(n)*(2j*pi/n) + y = zeros(n//2+1, dtype=cdouble) + for i in range(n//2+1): + y[i] = dot(exp(i*w), x) + return y + + +def direct_irdft(x, n): + x = asarray(x) + x1 = zeros(n, dtype=cdouble) + for i in range(n//2+1): + x1[i] = x[i] + if i > 0 and 2*i < n: + x1[n-i] = np.conj(x[i]) + return direct_idft(x1).real + + +def direct_rdftn(x): + return fftn(rfft(x), axes=range(x.ndim - 1)) + + +class _TestFFTBase: + def setup_method(self): + self.cdt = None + self.rdt = None + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt) + y = fft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_dft(x) + assert_array_almost_equal(y,y1) + x = np.array([1,2,3,4+0j,5], dtype=self.cdt) + assert_array_almost_equal(fft(x),direct_dft(x)) + + def test_n_argument_real(self): + x1 = np.array([1,2,3,4], dtype=self.rdt) + x2 = np.array([1,2,3,4], dtype=self.rdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def _test_n_argument_complex(self): + x1 = np.array([1,2,3,4+1j], dtype=self.cdt) + x2 = np.array([1,2,3,4+1j], dtype=self.cdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = np.arange(n) + y = fft(x.astype(complex)) + y2 = numpy.fft.fft(x) + assert_array_almost_equal(y,y2) + y = fft(x) + assert_array_almost_equal(y,y2) + + def test_invalid_sizes(self): + assert_raises(ValueError, fft, []) + assert_raises(ValueError, fft, [[1,1],[2,2]], -5) + + +class TestLongDoubleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.clongdouble + self.rdt = np.longdouble + + +class TestDoubleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.cdouble + self.rdt = np.float64 + + +class TestSingleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class TestFloat16FFT: + + def test_1_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft(x1, n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (4, )) + assert_array_almost_equal(y, direct_dft(x1.astype(np.float32))) + + def test_n_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + x2 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft([x1, x2], n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (2, 4)) + assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32))) + assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32))) + + +class _TestIFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt) + y = ifft(x) + y1 = direct_idft(x) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4+0j,5], self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_definition_real(self): + x = np.array([1,2,3,4,1,2,3,4], self.rdt) + y = ifft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_idft(x) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4,5], dtype=self.rdt) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = np.arange(n) + y = ifft(x.astype(self.cdt)) + y2 = numpy.fft.ifft(x.astype(self.cdt)) + assert_allclose(y,y2, rtol=self.rtol, atol=self.atol) + y = ifft(x) + assert_allclose(y,y2, rtol=self.rtol, atol=self.atol) + + def test_random_complex(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.cdt) + x = random([size]).astype(self.cdt) + 1j*x + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, self.rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, self.rtol, size, self.rdt) + + x = (x + 1j*np.random.rand(size)).astype(self.cdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, self.rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, self.rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, ifft, []) + assert_raises(ValueError, ifft, [[1,1],[2,2]], -5) + + +@pytest.mark.skipif(np.longdouble is np.float64, + reason="Long double is aliased to double") +class TestLongDoubleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.clongdouble + self.rdt = np.longdouble + self.rtol = 1e-10 + self.atol = 1e-10 + + +class TestDoubleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + self.rtol = 1e-10 + self.atol = 1e-10 + + +class TestSingleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + self.rtol = 1e-5 + self.atol = 1e-4 + + +class _TestRFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]: + x = np.array(t, dtype=self.rdt) + y = rfft(x) + y1 = direct_rdft(x) + assert_array_almost_equal(y,y1) + assert_equal(y.dtype, self.cdt) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = np.arange(n) + y1 = np.fft.rfft(x) + y = rfft(x) + assert_array_almost_equal(y,y1) + + def test_invalid_sizes(self): + assert_raises(ValueError, rfft, []) + assert_raises(ValueError, rfft, [[1,1],[2,2]], -5) + + def test_complex_input(self): + x = np.zeros(10, dtype=self.cdt) + with assert_raises(TypeError, match="x must be a real sequence"): + rfft(x) + + # See gh-5790 + class MockSeries: + def __init__(self, data): + self.data = np.asarray(data) + + def __getattr__(self, item): + try: + return getattr(self.data, item) + except AttributeError as e: + raise AttributeError("'MockSeries' object " + f"has no attribute '{item}'") from e + + def test_non_ndarray_with_dtype(self): + x = np.array([1., 2., 3., 4., 5.]) + xs = _TestRFFTBase.MockSeries(x) + + expected = [1, 2, 3, 4, 5] + rfft(xs) + + # Data should not have been overwritten + assert_equal(x, expected) + assert_equal(xs.data, expected) + +@pytest.mark.skipif(np.longdouble is np.float64, + reason="Long double is aliased to double") +class TestRFFTLongDouble(_TestRFFTBase): + def setup_method(self): + self.cdt = np.clongdouble + self.rdt = np.longdouble + + +class TestRFFTDouble(_TestRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + + +class TestRFFTSingle(_TestRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class _TestIRFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x1 = [1,2+3j,4+1j,1+2j,3+4j] + x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j] + x1 = x1_1[:5] + x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j] + x2 = x2_1[:5] + + def _test(x, xr): + y = irfft(np.array(x, dtype=self.cdt), n=len(xr)) + y1 = direct_irdft(x, len(xr)) + assert_equal(y.dtype, self.rdt) + assert_array_almost_equal(y,y1, decimal=self.ndec) + assert_array_almost_equal(y,ifft(xr), decimal=self.ndec) + + _test(x1, x1_1) + _test(x2, x2_1) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = np.arange(-1, n, 2) + 1j * np.arange(0, n+1, 2) + x[0] = 0 + if n % 2 == 0: + x[-1] = np.real(x[-1]) + y1 = np.fft.irfft(x) + y = irfft(x) + assert_array_almost_equal(y,y1) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = irfft(rfft(x), n=size) + y2 = rfft(irfft(x, n=(size*2-1))) + assert_equal(y1.dtype, self.rdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x, decimal=self.ndec, + err_msg="size=%d" % size) + assert_array_almost_equal(y2, x, decimal=self.ndec, + err_msg="size=%d" % size) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + if self.rdt == np.float32: + rtol = 1e-5 + elif self.rdt == np.float64: + rtol = 1e-10 + + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = irfft(rfft(x), len(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = rfft(irfft(x, 2 * len(x) - 1)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, irfft, []) + assert_raises(ValueError, irfft, [[1,1],[2,2]], -5) + + +# self.ndec is bogus; we should have a assert_array_approx_equal for number of +# significant digits + +@pytest.mark.skipif(np.longdouble is np.float64, + reason="Long double is aliased to double") +class TestIRFFTLongDouble(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + self.ndec = 14 + + +class TestIRFFTDouble(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + self.ndec = 14 + + +class TestIRFFTSingle(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + self.ndec = 5 + + +class TestFftnSingle: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float32)) + assert_(y.dtype == np.complex64, + msg="double precision output with single precision") + + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_size_accuracy_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_size_accuracy_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + def test_definition_float16(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float16)) + assert_equal(y.dtype, np.complex64) + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_float16_input_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 5e5) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_float16_input_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2e6) + + +class TestFftn: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(x) + assert_array_almost_equal(y, direct_dftn(x)) + + x = random((20, 26)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + def test_axes_argument(self): + # plane == ji_plane, x== kji_space + plane1 = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + plane2 = [[10, 11, 12], + [13, 14, 15], + [16, 17, 18]] + plane3 = [[19, 20, 21], + [22, 23, 24], + [25, 26, 27]] + ki_plane1 = [[1, 2, 3], + [10, 11, 12], + [19, 20, 21]] + ki_plane2 = [[4, 5, 6], + [13, 14, 15], + [22, 23, 24]] + ki_plane3 = [[7, 8, 9], + [16, 17, 18], + [25, 26, 27]] + jk_plane1 = [[1, 10, 19], + [4, 13, 22], + [7, 16, 25]] + jk_plane2 = [[2, 11, 20], + [5, 14, 23], + [8, 17, 26]] + jk_plane3 = [[3, 12, 21], + [6, 15, 24], + [9, 18, 27]] + kj_plane1 = [[1, 4, 7], + [10, 13, 16], [19, 22, 25]] + kj_plane2 = [[2, 5, 8], + [11, 14, 17], [20, 23, 26]] + kj_plane3 = [[3, 6, 9], + [12, 15, 18], [21, 24, 27]] + ij_plane1 = [[1, 4, 7], + [2, 5, 8], + [3, 6, 9]] + ij_plane2 = [[10, 13, 16], + [11, 14, 17], + [12, 15, 18]] + ij_plane3 = [[19, 22, 25], + [20, 23, 26], + [21, 24, 27]] + ik_plane1 = [[1, 10, 19], + [2, 11, 20], + [3, 12, 21]] + ik_plane2 = [[4, 13, 22], + [5, 14, 23], + [6, 15, 24]] + ik_plane3 = [[7, 16, 25], + [8, 17, 26], + [9, 18, 27]] + ijk_space = [jk_plane1, jk_plane2, jk_plane3] + ikj_space = [kj_plane1, kj_plane2, kj_plane3] + jik_space = [ik_plane1, ik_plane2, ik_plane3] + jki_space = [ki_plane1, ki_plane2, ki_plane3] + kij_space = [ij_plane1, ij_plane2, ij_plane3] + x = array([plane1, plane2, plane3]) + + assert_array_almost_equal(fftn(x), + fftn(x, axes=(-3, -2, -1))) # kji_space + assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2))) + assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1))) + y = fftn(x, axes=(2, 1, 0)) # ijk_space + assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space)) + y = fftn(x, axes=(2, 0, 1)) # ikj_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2), + fftn(ikj_space)) + y = fftn(x, axes=(1, 2, 0)) # jik_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2), + fftn(jik_space)) + y = fftn(x, axes=(1, 0, 2)) # jki_space + assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space)) + y = fftn(x, axes=(0, 2, 1)) # kij_space + assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space)) + + y = fftn(x, axes=(-2, -1)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(1, 2)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(-3, -2)) # kj_plane + assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0]) + assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1]) + assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2]) + + y = fftn(x, axes=(-3, -1)) # ki_plane + assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :]) + assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :]) + assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :]) + + y = fftn(x, axes=(-1, -2)) # ij_plane + assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1)) + assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1)) + assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1)) + + y = fftn(x, axes=(-1, -3)) # ik_plane + assert_array_almost_equal(fftn(ik_plane1), + swapaxes(y[:, 0, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane2), + swapaxes(y[:, 1, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane3), + swapaxes(y[:, 2, :], -1, -2)) + + y = fftn(x, axes=(-2, -3)) # jk_plane + assert_array_almost_equal(fftn(jk_plane1), + swapaxes(y[:, :, 0], -1, -2)) + assert_array_almost_equal(fftn(jk_plane2), + swapaxes(y[:, :, 1], -1, -2)) + assert_array_almost_equal(fftn(jk_plane3), + swapaxes(y[:, :, 2], -1, -2)) + + y = fftn(x, axes=(-1,)) # i_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :]) + y = fftn(x, axes=(-2,)) # j_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j]) + y = fftn(x, axes=(0,)) # k_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j]) + + y = fftn(x, axes=()) # point + assert_array_almost_equal(y, x) + + def test_shape_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6]] + large_x1 = [[1, 2, 3, 0], + [4, 5, 6, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]] + + y = fftn(small_x, s=(4, 4)) + assert_array_almost_equal(y, fftn(large_x1)) + + y = fftn(small_x, s=(3, 4)) + assert_array_almost_equal(y, fftn(large_x1[:-1])) + + def test_shape_axes_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + large_x1 = array([[1, 2, 3, 0], + [4, 5, 6, 0], + [7, 8, 9, 0], + [0, 0, 0, 0]]) + y = fftn(small_x, s=(4, 4), axes=(-2, -1)) + assert_array_almost_equal(y, fftn(large_x1)) + y = fftn(small_x, s=(4, 4), axes=(-1, -2)) + + assert_array_almost_equal(y, swapaxes( + fftn(swapaxes(large_x1, -1, -2)), -1, -2)) + + def test_shape_axes_argument2(self): + # Change shape of the last axis + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-1,), s=(8,)) + assert_array_almost_equal(y, fft(x, axis=-1, n=8)) + + # Change shape of an arbitrary axis which is not the last one + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-2,), s=(8,)) + assert_array_almost_equal(y, fft(x, axis=-2, n=8)) + + # Change shape of axes: cf #244, where shape and axes were mixed up + x = numpy.random.random((4, 4, 2)) + y = fftn(x, axes=(-3, -2), s=(8, 8)) + assert_array_almost_equal(y, + numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8))) + + def test_shape_argument_more(self): + x = zeros((4, 4, 2)) + with assert_raises(ValueError, + match="shape requires more axes than are present"): + fftn(x, s=(8, 8, 2, 1)) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + fftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + fftn([[1, 1], [2, 2]], (4, -3)) + + def test_no_axes(self): + x = numpy.random.random((2,2,2)) + assert_allclose(fftn(x, axes=[]), x, atol=1e-7) + + def test_regression_244(self): + """FFT returns wrong result with axes parameter.""" + # fftn (and hence fft2) used to break when both axes and shape were used + x = numpy.ones((4, 4, 2)) + y = fftn(x, s=(8, 8), axes=(-3, -2)) + y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2)) + assert_allclose(y, y_r) + + +class TestIfftn: + dtype = None + cdtype = None + + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.parametrize('dtype,cdtype,maxnlp', + [(np.float64, np.complex128, 2000), + (np.float32, np.complex64, 3500)]) + def test_definition(self, dtype, cdtype, maxnlp): + x = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], dtype=dtype) + y = ifftn(x) + assert_equal(y.dtype, cdtype) + assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp) + + x = random((20, 26)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + @pytest.mark.parametrize('maxnlp', [2000, 3500]) + @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92]) + def test_random_complex(self, maxnlp, size): + x = random([size, size]) + 1j*random([size, size]) + assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp) + assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + ifftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + ifftn([[1, 1], [2, 2]], (4, -3)) + + def test_no_axes(self): + x = numpy.random.random((2,2,2)) + assert_allclose(ifftn(x, axes=[]), x, atol=1e-7) + +class TestRfftn: + dtype = None + cdtype = None + + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.parametrize('dtype,cdtype,maxnlp', + [(np.float64, np.complex128, 2000), + (np.float32, np.complex64, 3500)]) + def test_definition(self, dtype, cdtype, maxnlp): + x = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], dtype=dtype) + y = rfftn(x) + assert_equal(y.dtype, cdtype) + assert_array_almost_equal_nulp(y, direct_rdftn(x), maxnlp) + + x = random((20, 26)) + assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp) + + @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92]) + def test_random(self, size): + x = random([size, size]) + assert_allclose(irfftn(rfftn(x), x.shape), x, atol=1e-10) + + @pytest.mark.parametrize('func', [rfftn, irfftn]) + def test_invalid_sizes(self, func): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + func([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + func([[1, 1], [2, 2]], (4, -3)) + + @pytest.mark.parametrize('func', [rfftn, irfftn]) + def test_no_axes(self, func): + with assert_raises(ValueError, + match="at least 1 axis must be transformed"): + func([], axes=[]) + + def test_complex_input(self): + with assert_raises(TypeError, match="x must be a real sequence"): + rfftn(np.zeros(10, dtype=np.complex64)) + + +class FakeArray: + def __init__(self, data): + self._data = data + self.__array_interface__ = data.__array_interface__ + + +class FakeArray2: + def __init__(self, data): + self._data = data + + def __array__(self, dtype=None, copy=None): + return self._data + +# TODO: Is this test actually valuable? The behavior it's testing shouldn't be +# relied upon by users except for overwrite_x = False +class TestOverwrite: + """Check input overwrite behavior of the FFT functions.""" + + real_dtypes = [np.float32, np.float64, np.longdouble] + dtypes = real_dtypes + [np.complex64, np.complex128, np.clongdouble] + fftsizes = [8, 16, 32] + + def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite): + x2 = x.copy() + for fake in [lambda x: x, FakeArray, FakeArray2]: + routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x) + + sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format( + routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) + if not should_overwrite: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes, + fftsize, overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + should_overwrite = (overwrite_x + and dtype in overwritable_dtypes + and fftsize <= shape[axis]) + self._check(data, routine, fftsize, axis, + overwrite_x=overwrite_x, + should_overwrite=should_overwrite) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = (np.clongdouble, np.complex128, np.complex64) + self._check_1d(fft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(ifft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + @pytest.mark.parametrize('dtype', real_dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = self.real_dtypes + self._check_1d(irfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(rfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes, + overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + def fftshape_iter(shp): + if len(shp) <= 0: + yield () + else: + for j in (shp[0]//2, shp[0], shp[0]*2): + for rest in fftshape_iter(shp[1:]): + yield (j,) + rest + + def part_shape(shape, axes): + if axes is None: + return shape + else: + return tuple(np.take(shape, axes)) + + def should_overwrite(data, shape, axes): + s = part_shape(data.shape, axes) + return (overwrite_x and + np.prod(shape) <= np.prod(s) + and dtype in overwritable_dtypes) + + for fftshape in fftshape_iter(part_shape(shape, axes)): + self._check(data, routine, fftshape, axes, + overwrite_x=overwrite_x, + should_overwrite=should_overwrite(data, fftshape, axes)) + if data.ndim > 1: + # check fortran order + self._check(data.T, routine, fftshape, axes, + overwrite_x=overwrite_x, + should_overwrite=should_overwrite( + data.T, fftshape, axes)) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), None), + ((16,), (0,)), + ((16, 2), (0,)), + ((2, 16), (1,)), + ((8, 16), None), + ((8, 16), (0, 1)), + ((8, 16, 2), (0, 1)), + ((8, 16, 2), (1, 2)), + ((8, 16, 2), (0,)), + ((8, 16, 2), (1,)), + ((8, 16, 2), (2,)), + ((8, 16, 2), None), + ((8, 16, 2), (0, 1, 2))]) + def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes): + overwritable = (np.clongdouble, np.complex128, np.complex64) + self._check_nd_one(fftn, dtype, shape, axes, overwritable, + overwrite_x) + self._check_nd_one(ifftn, dtype, shape, axes, overwritable, + overwrite_x) + + +@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn, + rfft, irfft, rfftn, irfftn]) +def test_invalid_norm(func): + x = np.arange(10, dtype=float) + with assert_raises(ValueError, + match='Invalid norm value \'o\', should be' + ' "backward", "ortho" or "forward"'): + func(x, norm='o') + + +@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn, + irfft, irfftn, hfft, hfftn]) +def test_swapped_byte_order_complex(func): + rng = np.random.RandomState(1234) + x = rng.rand(10) + 1j * rng.rand(10) + assert_allclose(func(swap_byteorder(x)), func(x)) + + +@pytest.mark.parametrize('func', [ihfft, ihfftn, rfft, rfftn]) +def test_swapped_byte_order_real(func): + rng = np.random.RandomState(1234) + x = rng.rand(10) + assert_allclose(func(swap_byteorder(x)), func(x)) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_real_transforms.py b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_real_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..2cb47f40c6bc0a251a79bb3660fcc9a0f1b10725 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_real_transforms.py @@ -0,0 +1,494 @@ +from os.path import join, dirname +from typing import Callable, Union + +import numpy as np +from numpy.testing import ( + assert_array_almost_equal, assert_equal, assert_allclose) +import pytest +from pytest import raises as assert_raises + +from scipy.fft._pocketfft.realtransforms import ( + dct, idct, dst, idst, dctn, idctn, dstn, idstn) + +fftpack_test_dir = join(dirname(__file__), '..', '..', '..', 'fftpack', 'tests') + +MDATA_COUNT = 8 +FFTWDATA_COUNT = 14 + +def is_longdouble_binary_compatible(): + try: + one = np.frombuffer( + b'\x00\x00\x00\x00\x00\x00\x00\x80\xff\x3f\x00\x00\x00\x00\x00\x00', + dtype=' decimal +dec_map: DecMapType = { + # DCT + (dct, np.float64, 1): 13, + (dct, np.float32, 1): 6, + + (dct, np.float64, 2): 14, + (dct, np.float32, 2): 5, + + (dct, np.float64, 3): 14, + (dct, np.float32, 3): 5, + + (dct, np.float64, 4): 13, + (dct, np.float32, 4): 6, + + # IDCT + (idct, np.float64, 1): 14, + (idct, np.float32, 1): 6, + + (idct, np.float64, 2): 14, + (idct, np.float32, 2): 5, + + (idct, np.float64, 3): 14, + (idct, np.float32, 3): 5, + + (idct, np.float64, 4): 14, + (idct, np.float32, 4): 6, + + # DST + (dst, np.float64, 1): 13, + (dst, np.float32, 1): 6, + + (dst, np.float64, 2): 14, + (dst, np.float32, 2): 6, + + (dst, np.float64, 3): 14, + (dst, np.float32, 3): 7, + + (dst, np.float64, 4): 13, + (dst, np.float32, 4): 5, + + # IDST + (idst, np.float64, 1): 14, + (idst, np.float32, 1): 6, + + (idst, np.float64, 2): 14, + (idst, np.float32, 2): 6, + + (idst, np.float64, 3): 14, + (idst, np.float32, 3): 6, + + (idst, np.float64, 4): 14, + (idst, np.float32, 4): 6, +} + +for k,v in dec_map.copy().items(): + if k[1] == np.float64: + dec_map[(k[0], np.longdouble, k[2])] = v + elif k[1] == np.float32: + dec_map[(k[0], int, k[2])] = v + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +class TestDCT: + def test_definition(self, rdt, type, fftwdata_size, reference_data): + x, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt, reference_data) + y = dct(x, type=type) + assert_equal(y.dtype, dt) + dec = dec_map[(dct, rdt, type)] + assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec)) + + @pytest.mark.parametrize('size', [7, 8, 9, 16, 32, 64]) + def test_axis(self, rdt, type, size): + nt = 2 + dec = dec_map[(dct, rdt, type)] + x = np.random.randn(nt, size) + y = dct(x, type=type) + for j in range(nt): + assert_array_almost_equal(y[j], dct(x[j], type=type), + decimal=dec) + + x = x.T + y = dct(x, axis=0, type=type) + for j in range(nt): + assert_array_almost_equal(y[:,j], dct(x[:,j], type=type), + decimal=dec) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dct1_definition_ortho(rdt, mdata_x): + # Test orthornomal mode. + dec = dec_map[(dct, rdt, 1)] + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dct(x, norm='ortho', type=1) + y2 = naive_dct1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dct2_definition_matlab(mdata_xy, rdt): + # Test correspondence with matlab (orthornomal mode). + dt = np.result_type(np.float32, rdt) + x = np.array(mdata_xy[0], dtype=dt) + + yr = mdata_xy[1] + y = dct(x, norm="ortho", type=2) + dec = dec_map[(dct, rdt, 2)] + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, yr, decimal=dec) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dct3_definition_ortho(mdata_x, rdt): + # Test orthornomal mode. + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dct(x, norm='ortho', type=2) + xi = dct(y, norm="ortho", type=3) + dec = dec_map[(dct, rdt, 3)] + assert_equal(xi.dtype, dt) + assert_array_almost_equal(xi, x, decimal=dec) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dct4_definition_ortho(mdata_x, rdt): + # Test orthornomal mode. + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dct(x, norm='ortho', type=4) + y2 = naive_dct4(x, norm='ortho') + dec = dec_map[(dct, rdt, 4)] + assert_equal(y.dtype, dt) + assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +def test_idct_definition(fftwdata_size, rdt, type, reference_data): + xr, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt, reference_data) + x = idct(yr, type=type) + dec = dec_map[(idct, rdt, type)] + assert_equal(x.dtype, dt) + assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +def test_definition(fftwdata_size, rdt, type, reference_data): + xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt, reference_data) + y = dst(xr, type=type) + dec = dec_map[(dst, rdt, type)] + assert_equal(y.dtype, dt) + assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dst1_definition_ortho(rdt, mdata_x): + # Test orthornomal mode. + dec = dec_map[(dst, rdt, 1)] + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dst(x, norm='ortho', type=1) + y2 = naive_dst1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dst4_definition_ortho(rdt, mdata_x): + # Test orthornomal mode. + dec = dec_map[(dst, rdt, 4)] + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dst(x, norm='ortho', type=4) + y2 = naive_dst4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, y2, decimal=dec) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +def test_idst_definition(fftwdata_size, rdt, type, reference_data): + xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt, reference_data) + x = idst(yr, type=type) + dec = dec_map[(idst, rdt, type)] + assert_equal(x.dtype, dt) + assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec)) + + +@pytest.mark.parametrize('routine', [dct, dst, idct, idst]) +@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble]) +@pytest.mark.parametrize('shape, axis', [ + ((16,), -1), ((16, 2), 0), ((2, 16), 1) +]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +@pytest.mark.parametrize('overwrite_x', [True, False]) +@pytest.mark.parametrize('norm', [None, 'ortho']) +def test_overwrite(routine, dtype, shape, axis, type, norm, overwrite_x): + # Check input overwrite behavior + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + x = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + x = np.random.randn(*shape) + x = x.astype(dtype) + x2 = x.copy() + routine(x2, type, None, axis, norm, overwrite_x=overwrite_x) + + sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format( + routine.__name__, x.dtype, x.shape, None, axis, overwrite_x) + if not overwrite_x: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + +class Test_DCTN_IDCTN: + dec = 14 + dct_type = [1, 2, 3, 4] + norms = [None, 'backward', 'ortho', 'forward'] + rstate = np.random.RandomState(1234) + shape = (32, 16) + data = rstate.randn(*shape) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [None, + 1, (1,), [1], + 0, (0,), [0], + (0, 1), [0, 1], + (-2, -1), [-2, -1]]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', ['ortho']) + def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm): + tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm) + tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm) + assert_array_almost_equal(self.data, tmp, decimal=12) + + @pytest.mark.parametrize('funcn,func', [(dctn, dct), (dstn, dst)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', norms) + def test_dctn_vs_2d_reference(self, funcn, func, dct_type, norm): + y1 = funcn(self.data, type=dct_type, axes=None, norm=norm) + y2 = ref_2d(func, self.data, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('funcn,func', [(idctn, idct), (idstn, idst)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', norms) + def test_idctn_vs_2d_reference(self, funcn, func, dct_type, norm): + fdata = dctn(self.data, type=dct_type, norm=norm) + y1 = funcn(fdata, type=dct_type, norm=norm) + y2 = ref_2d(func, fdata, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + def test_axes_and_shape(self, fforward, finverse): + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, s=self.data.shape[0], axes=(0, 1)) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, s=self.data.shape, axes=0) + + @pytest.mark.parametrize('fforward', [dctn, dstn]) + def test_shape(self, fforward): + tmp = fforward(self.data, s=(128, 128), axes=None) + assert_equal(tmp.shape, (128, 128)) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [1, (1,), [1], + 0, (0,), [0]]) + def test_shape_is_none_with_axes(self, fforward, finverse, axes): + tmp = fforward(self.data, s=None, axes=axes, norm='ortho') + tmp = finverse(tmp, s=None, axes=axes, norm='ortho') + assert_array_almost_equal(self.data, tmp, decimal=self.dec) + + +@pytest.mark.parametrize('func', [dct, dctn, idct, idctn, + dst, dstn, idst, idstn]) +def test_swapped_byte_order(func): + rng = np.random.RandomState(1234) + x = rng.rand(10) + swapped_dt = x.dtype.newbyteorder('S') + assert_allclose(func(x.astype(swapped_dt)), func(x)) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms.py b/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..1c7a3d683dd78d3227a7de88f5c47569d2f4e17f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms.py @@ -0,0 +1,693 @@ +from ._basic import _dispatch +from scipy._lib.uarray import Dispatchable +import numpy as np + +__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'] + + +@_dispatch +def dctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, *, orthogonalize=None): + """ + Return multidimensional Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + s : int or array_like of ints or None, optional + The shape of the result. If both `s` and `axes` (see below) are None, + `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is + ``numpy.take(x.shape, axes, axis=0)``. + If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros. + If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length + ``s[i]``. + If any element of `s` is -1, the size of the corresponding dimension of + `x` is used. + axes : int or array_like of ints or None, optional + Axes over which the DCT is computed. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized DCT variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idctn : Inverse multidimensional DCT + + Notes + ----- + For full details of the DCT types and normalization modes, as well as + references, see `dct`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fft import dctn, idctn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idctn(dctn(y))) + True + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def idctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return multidimensional Inverse Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + s : int or array_like of ints or None, optional + The shape of the result. If both `s` and `axes` (see below) are + None, `s` is ``x.shape``; if `s` is None but `axes` is + not None, then `s` is ``numpy.take(x.shape, axes, axis=0)``. + If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros. + If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length + ``s[i]``. + If any element of `s` is -1, the size of the corresponding dimension of + `x` is used. + axes : int or array_like of ints or None, optional + Axes over which the IDCT is computed. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized IDCT variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dctn : multidimensional DCT + + Notes + ----- + For full details of the IDCT types and normalization modes, as well as + references, see `idct`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fft import dctn, idctn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idctn(dctn(y))) + True + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return multidimensional Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + s : int or array_like of ints or None, optional + The shape of the result. If both `s` and `axes` (see below) are None, + `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is + ``numpy.take(x.shape, axes, axis=0)``. + If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros. + If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length + ``s[i]``. + If any element of `shape` is -1, the size of the corresponding dimension + of `x` is used. + axes : int or array_like of ints or None, optional + Axes over which the DST is computed. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized DST variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idstn : Inverse multidimensional DST + + Notes + ----- + For full details of the DST types and normalization modes, as well as + references, see `dst`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fft import dstn, idstn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idstn(dstn(y))) + True + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def idstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return multidimensional Inverse Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + s : int or array_like of ints or None, optional + The shape of the result. If both `s` and `axes` (see below) are None, + `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is + ``numpy.take(x.shape, axes, axis=0)``. + If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros. + If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length + ``s[i]``. + If any element of `s` is -1, the size of the corresponding dimension of + `x` is used. + axes : int or array_like of ints or None, optional + Axes over which the IDST is computed. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized IDST variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dstn : multidimensional DST + + Notes + ----- + For full details of the IDST types and normalization modes, as well as + references, see `idst`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fft import dstn, idstn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idstn(dstn(y))) + True + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, + orthogonalize=None): + r"""Return the Discrete Cosine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized DCT variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idct : Inverse DCT + + Notes + ----- + For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to + MATLAB ``dct(x)``. + + .. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct + correspondence with the direct Fourier transform. To recover + it you must specify ``orthogonalize=False``. + + For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same + overall factor in both directions. By default, the transform is also + orthogonalized which for types 1, 2 and 3 means the transform definition is + modified to give orthogonality of the DCT matrix (see below). + + For ``norm="backward"``, there is no scaling on `dct` and the `idct` is + scaled by ``1/N`` where ``N`` is the "logical" size of the DCT. For + ``norm="forward"`` the ``1/N`` normalization is applied to the forward + `dct` instead and the `idct` is unnormalized. + + There are, theoretically, 8 types of the DCT, only the first 4 types are + implemented in SciPy.'The' DCT generally refers to DCT type 2, and 'the' + Inverse DCT generally refers to DCT type 3. + + **Type I** + + There are several definitions of the DCT-I; we use the following + (for ``norm="backward"``) + + .. math:: + + y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left( + \frac{\pi k n}{N-1} \right) + + If ``orthogonalize=True``, ``x[0]`` and ``x[N-1]`` are multiplied by a + scaling factor of :math:`\sqrt{2}`, and ``y[0]`` and ``y[N-1]`` are divided + by :math:`\sqrt{2}`. When combined with ``norm="ortho"``, this makes the + corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``). + + .. note:: + The DCT-I is only supported for input size > 1. + + **Type II** + + There are several definitions of the DCT-II; we use the following + (for ``norm="backward"``) + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right) + + If ``orthogonalize=True``, ``y[0]`` is divided by :math:`\sqrt{2}` which, + when combined with ``norm="ortho"``, makes the corresponding matrix of + coefficients orthonormal (``O @ O.T = np.eye(N)``). + + **Type III** + + There are several definitions, we use the following (for + ``norm="backward"``) + + .. math:: + + y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right) + + If ``orthogonalize=True``, ``x[0]`` terms are multiplied by + :math:`\sqrt{2}` which, when combined with ``norm="ortho"``, makes the + corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``). + + The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up + to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of + the orthonormalized DCT-II. + + **Type IV** + + There are several definitions of the DCT-IV; we use the following + (for ``norm="backward"``) + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right) + + ``orthogonalize`` has no effect here, as the DCT-IV matrix is already + orthogonal up to a scale factor of ``2N``. + + References + ---------- + .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J. + Makhoul, `IEEE Transactions on acoustics, speech and signal + processing` vol. 28(1), pp. 27-34, + :doi:`10.1109/TASSP.1980.1163351` (1980). + .. [2] Wikipedia, "Discrete cosine transform", + https://en.wikipedia.org/wiki/Discrete_cosine_transform + + Examples + -------- + The Type 1 DCT is equivalent to the FFT (though faster) for real, + even-symmetrical inputs. The output is also real and even-symmetrical. + Half of the FFT input is used to generate half of the FFT output: + + >>> from scipy.fft import fft, dct + >>> import numpy as np + >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real + array([ 30., -8., 6., -2., 6., -8.]) + >>> dct(np.array([4., 3., 5., 10.]), 1) + array([ 30., -8., 6., -2.]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return the Inverse Discrete Cosine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized IDCT variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + idct : ndarray of real + The transformed input array. + + See Also + -------- + dct : Forward DCT + + Notes + ----- + For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to + MATLAB ``idct(x)``. + + .. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct + correspondence with the inverse direct Fourier transform. To + recover it you must specify ``orthogonalize=False``. + + For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same + overall factor in both directions. By default, the transform is also + orthogonalized which for types 1, 2 and 3 means the transform definition is + modified to give orthogonality of the IDCT matrix (see `dct` for the full + definitions). + + 'The' IDCT is the IDCT-II, which is the same as the normalized DCT-III. + + The IDCT is equivalent to a normal DCT except for the normalization and + type. DCT type 1 and 4 are their own inverse and DCTs 2 and 3 are each + other's inverses. + + Examples + -------- + The Type 1 DCT is equivalent to the DFT for real, even-symmetrical + inputs. The output is also real and even-symmetrical. Half of the IFFT + input is used to generate half of the IFFT output: + + >>> from scipy.fft import ifft, idct + >>> import numpy as np + >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real + array([ 4., 3., 5., 10., 5., 3.]) + >>> idct(np.array([ 30., -8., 6., -2.]), 1) + array([ 4., 3., 5., 10.]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, + orthogonalize=None): + r""" + Return the Discrete Sine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized DST variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + dst : ndarray of reals + The transformed input array. + + See Also + -------- + idst : Inverse DST + + Notes + ----- + .. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct + correspondence with the direct Fourier transform. To recover + it you must specify ``orthogonalize=False``. + + For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same + overall factor in both directions. By default, the transform is also + orthogonalized which for types 2 and 3 means the transform definition is + modified to give orthogonality of the DST matrix (see below). + + For ``norm="backward"``, there is no scaling on the `dst` and the `idst` is + scaled by ``1/N`` where ``N`` is the "logical" size of the DST. + + There are, theoretically, 8 types of the DST for different combinations of + even/odd boundary conditions and boundary off sets [1]_, only the first + 4 types are implemented in SciPy. + + **Type I** + + There are several definitions of the DST-I; we use the following for + ``norm="backward"``. DST-I assumes the input is odd around :math:`n=-1` and + :math:`n=N`. + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right) + + Note that the DST-I is only supported for input size > 1. + The (unnormalized) DST-I is its own inverse, up to a factor :math:`2(N+1)`. + The orthonormalized DST-I is exactly its own inverse. + + ``orthogonalize`` has no effect here, as the DST-I matrix is already + orthogonal up to a scale factor of ``2N``. + + **Type II** + + There are several definitions of the DST-II; we use the following for + ``norm="backward"``. DST-II assumes the input is odd around :math:`n=-1/2` and + :math:`n=N-1/2`; the output is odd around :math:`k=-1` and even around :math:`k=N-1` + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right) + + If ``orthogonalize=True``, ``y[-1]`` is divided :math:`\sqrt{2}` which, when + combined with ``norm="ortho"``, makes the corresponding matrix of + coefficients orthonormal (``O @ O.T = np.eye(N)``). + + **Type III** + + There are several definitions of the DST-III, we use the following (for + ``norm="backward"``). DST-III assumes the input is odd around :math:`n=-1` and + even around :math:`n=N-1` + + .. math:: + + y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left( + \frac{\pi(2k+1)(n+1)}{2N}\right) + + If ``orthogonalize=True``, ``x[-1]`` is multiplied by :math:`\sqrt{2}` + which, when combined with ``norm="ortho"``, makes the corresponding matrix + of coefficients orthonormal (``O @ O.T = np.eye(N)``). + + The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up + to a factor :math:`2N`. The orthonormalized DST-III is exactly the inverse of the + orthonormalized DST-II. + + **Type IV** + + There are several definitions of the DST-IV, we use the following (for + ``norm="backward"``). DST-IV assumes the input is odd around :math:`n=-0.5` and + even around :math:`n=N-0.5` + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right) + + ``orthogonalize`` has no effect here, as the DST-IV matrix is already + orthogonal up to a scale factor of ``2N``. + + The (unnormalized) DST-IV is its own inverse, up to a factor :math:`2N`. The + orthonormalized DST-IV is exactly its own inverse. + + References + ---------- + .. [1] Wikipedia, "Discrete sine transform", + https://en.wikipedia.org/wiki/Discrete_sine_transform + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return the Inverse Discrete Sine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized IDST variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + idst : ndarray of real + The transformed input array. + + See Also + -------- + dst : Forward DST + + Notes + ----- + .. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct + correspondence with the inverse direct Fourier transform. + + For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same + overall factor in both directions. By default, the transform is also + orthogonalized which for types 2 and 3 means the transform definition is + modified to give orthogonality of the DST matrix (see `dst` for the full + definitions). + + 'The' IDST is the IDST-II, which is the same as the normalized DST-III. + + The IDST is equivalent to a normal DST except for the normalization and + type. DST type 1 and 4 are their own inverse and DSTs 2 and 3 are each + other's inverses. + + """ + return (Dispatchable(x, np.ndarray),) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms_backend.py b/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..2042453733bec54860974cc1e20ba908e8c9b94d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms_backend.py @@ -0,0 +1,63 @@ +from scipy._lib._array_api import array_namespace +import numpy as np +from . import _pocketfft + +__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'] + + +def _execute(pocketfft_func, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize): + xp = array_namespace(x) + x = np.asarray(x) + y = pocketfft_func(x, type, s, axes, norm, + overwrite_x=overwrite_x, workers=workers, + orthogonalize=orthogonalize) + return xp.asarray(y) + + +def dctn(x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, orthogonalize=None): + return _execute(_pocketfft.dctn, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize) + + +def idctn(x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, orthogonalize=None): + return _execute(_pocketfft.idctn, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize) + + +def dstn(x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.dstn, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize) + + +def idstn(x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, orthogonalize=None): + return _execute(_pocketfft.idstn, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize) + + +def dct(x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.dct, x, type, n, axis, norm, + overwrite_x, workers, orthogonalize) + + +def idct(x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.idct, x, type, n, axis, norm, + overwrite_x, workers, orthogonalize) + + +def dst(x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.dst, x, type, n, axis, norm, + overwrite_x, workers, orthogonalize) + + +def idst(x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.idst, x, type, n, axis, norm, + overwrite_x, workers, orthogonalize) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/fft/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7919634e7a63b515b55fc12ef8acd0a343255b47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..330c3d48c207fbe262f3e2738fd3eb77f1f77e44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f0f531e1d20034948ccc1ccdb92d7f55d5b9a00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e47ed301efeab12af7666eb6f29d13b00485ed86 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e80ee92bd6ae6be7dc9e76d1eecbc6700cf6a2d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c9093002dc446afdface1ed636d44408ae15ecd Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/mock_backend.py b/venv/lib/python3.10/site-packages/scipy/fft/tests/mock_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..c57a88e0af291ffd68a2a1d62218e8c9459986d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/tests/mock_backend.py @@ -0,0 +1,92 @@ +import numpy as np +import scipy.fft + +class _MockFunction: + def __init__(self, return_value = None): + self.number_calls = 0 + self.return_value = return_value + self.last_args = ([], {}) + + def __call__(self, *args, **kwargs): + self.number_calls += 1 + self.last_args = (args, kwargs) + return self.return_value + + +fft = _MockFunction(np.random.random(10)) +fft2 = _MockFunction(np.random.random(10)) +fftn = _MockFunction(np.random.random(10)) + +ifft = _MockFunction(np.random.random(10)) +ifft2 = _MockFunction(np.random.random(10)) +ifftn = _MockFunction(np.random.random(10)) + +rfft = _MockFunction(np.random.random(10)) +rfft2 = _MockFunction(np.random.random(10)) +rfftn = _MockFunction(np.random.random(10)) + +irfft = _MockFunction(np.random.random(10)) +irfft2 = _MockFunction(np.random.random(10)) +irfftn = _MockFunction(np.random.random(10)) + +hfft = _MockFunction(np.random.random(10)) +hfft2 = _MockFunction(np.random.random(10)) +hfftn = _MockFunction(np.random.random(10)) + +ihfft = _MockFunction(np.random.random(10)) +ihfft2 = _MockFunction(np.random.random(10)) +ihfftn = _MockFunction(np.random.random(10)) + +dct = _MockFunction(np.random.random(10)) +idct = _MockFunction(np.random.random(10)) +dctn = _MockFunction(np.random.random(10)) +idctn = _MockFunction(np.random.random(10)) + +dst = _MockFunction(np.random.random(10)) +idst = _MockFunction(np.random.random(10)) +dstn = _MockFunction(np.random.random(10)) +idstn = _MockFunction(np.random.random(10)) + +fht = _MockFunction(np.random.random(10)) +ifht = _MockFunction(np.random.random(10)) + + +__ua_domain__ = "numpy.scipy.fft" + + +_implements = { + scipy.fft.fft: fft, + scipy.fft.fft2: fft2, + scipy.fft.fftn: fftn, + scipy.fft.ifft: ifft, + scipy.fft.ifft2: ifft2, + scipy.fft.ifftn: ifftn, + scipy.fft.rfft: rfft, + scipy.fft.rfft2: rfft2, + scipy.fft.rfftn: rfftn, + scipy.fft.irfft: irfft, + scipy.fft.irfft2: irfft2, + scipy.fft.irfftn: irfftn, + scipy.fft.hfft: hfft, + scipy.fft.hfft2: hfft2, + scipy.fft.hfftn: hfftn, + scipy.fft.ihfft: ihfft, + scipy.fft.ihfft2: ihfft2, + scipy.fft.ihfftn: ihfftn, + scipy.fft.dct: dct, + scipy.fft.idct: idct, + scipy.fft.dctn: dctn, + scipy.fft.idctn: idctn, + scipy.fft.dst: dst, + scipy.fft.idst: idst, + scipy.fft.dstn: dstn, + scipy.fft.idstn: idstn, + scipy.fft.fht: fht, + scipy.fft.ifht: ifht +} + + +def __ua_function__(method, args, kwargs): + fn = _implements.get(method) + return (fn(*args, **kwargs) if fn is not None + else NotImplemented) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/test_backend.py b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..352ca8ff2a7ab1181b0a3226663dad7ef36ac0fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_backend.py @@ -0,0 +1,98 @@ +from functools import partial + +import numpy as np +import scipy.fft +from scipy.fft import _fftlog, _pocketfft, set_backend +from scipy.fft.tests import mock_backend + +from numpy.testing import assert_allclose, assert_equal +import pytest + +fnames = ('fft', 'fft2', 'fftn', + 'ifft', 'ifft2', 'ifftn', + 'rfft', 'rfft2', 'rfftn', + 'irfft', 'irfft2', 'irfftn', + 'dct', 'idct', 'dctn', 'idctn', + 'dst', 'idst', 'dstn', 'idstn', + 'fht', 'ifht') + +np_funcs = (np.fft.fft, np.fft.fft2, np.fft.fftn, + np.fft.ifft, np.fft.ifft2, np.fft.ifftn, + np.fft.rfft, np.fft.rfft2, np.fft.rfftn, + np.fft.irfft, np.fft.irfft2, np.fft.irfftn, + np.fft.hfft, _pocketfft.hfft2, _pocketfft.hfftn, # np has no hfftn + np.fft.ihfft, _pocketfft.ihfft2, _pocketfft.ihfftn, + _pocketfft.dct, _pocketfft.idct, _pocketfft.dctn, _pocketfft.idctn, + _pocketfft.dst, _pocketfft.idst, _pocketfft.dstn, _pocketfft.idstn, + # must provide required kwargs for fht, ifht + partial(_fftlog.fht, dln=2, mu=0.5), + partial(_fftlog.ifht, dln=2, mu=0.5)) + +funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn, + scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn, + scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn, + scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn, + scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn, + scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn, + scipy.fft.dct, scipy.fft.idct, scipy.fft.dctn, scipy.fft.idctn, + scipy.fft.dst, scipy.fft.idst, scipy.fft.dstn, scipy.fft.idstn, + # must provide required kwargs for fht, ifht + partial(scipy.fft.fht, dln=2, mu=0.5), + partial(scipy.fft.ifht, dln=2, mu=0.5)) + +mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn, + mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn, + mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn, + mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn, + mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn, + mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn, + mock_backend.dct, mock_backend.idct, + mock_backend.dctn, mock_backend.idctn, + mock_backend.dst, mock_backend.idst, + mock_backend.dstn, mock_backend.idstn, + mock_backend.fht, mock_backend.ifht) + + +@pytest.mark.parametrize("func, np_func, mock", zip(funcs, np_funcs, mocks)) +def test_backend_call(func, np_func, mock): + x = np.arange(20).reshape((10,2)) + answer = np_func(x.astype(np.float64)) + assert_allclose(func(x), answer, atol=1e-10) + + with set_backend(mock_backend, only=True): + mock.number_calls = 0 + y = func(x) + assert_equal(y, mock.return_value) + assert_equal(mock.number_calls, 1) + + assert_allclose(func(x), answer, atol=1e-10) + + +plan_funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn, + scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn, + scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn, + scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn, + scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn, + scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn) + +plan_mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn, + mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn, + mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn, + mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn, + mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn, + mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn) + + +@pytest.mark.parametrize("func, mock", zip(plan_funcs, plan_mocks)) +def test_backend_plan(func, mock): + x = np.arange(20).reshape((10, 2)) + + with pytest.raises(NotImplementedError, match='precomputed plan'): + func(x, plan='foo') + + with set_backend(mock_backend, only=True): + mock.number_calls = 0 + y = func(x, plan='foo') + assert_equal(y, mock.return_value) + assert_equal(mock.number_calls, 1) + assert_equal(mock.last_args[1]['plan'], 'foo') diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/test_basic.py b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..55d0f84624704b4172e15e66bdc14faba26db0cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_basic.py @@ -0,0 +1,504 @@ +import queue +import threading +import multiprocessing +import numpy as np +import pytest +from numpy.random import random +from numpy.testing import assert_array_almost_equal, assert_allclose +from pytest import raises as assert_raises +import scipy.fft as fft +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import ( + array_namespace, size, xp_assert_close, xp_assert_equal +) + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")] +skip_if_array_api = pytest.mark.skip_if_array_api + + +# Expected input dtypes. Note that `scipy.fft` is more flexible for numpy, +# but for C2C transforms like `fft.fft`, the array API standard only mandates +# that complex dtypes should work, float32/float64 aren't guaranteed to. +def get_expected_input_dtype(func, xp): + if func in [fft.fft, fft.fftn, fft.fft2, + fft.ifft, fft.ifftn, fft.ifft2, + fft.hfft, fft.hfftn, fft.hfft2, + fft.irfft, fft.irfftn, fft.irfft2]: + dtype = xp.complex128 + elif func in [fft.rfft, fft.rfftn, fft.rfft2, + fft.ihfft, fft.ihfftn, fft.ihfft2]: + dtype = xp.float64 + else: + raise ValueError(f'Unknown FFT function: {func}') + + return dtype + + +def fft1(x): + L = len(x) + phase = -2j*np.pi*(np.arange(L)/float(L)) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x*np.exp(phase), axis=1) + + +class TestFFTShift: + + def test_fft_n(self, xp): + x = xp.asarray([1, 2, 3], dtype=xp.complex128) + if xp.__name__ == 'torch': + assert_raises(RuntimeError, fft.fft, x, 0) + else: + assert_raises(ValueError, fft.fft, x, 0) + + +class TestFFT1D: + + def test_identity(self, xp): + maxlen = 512 + x = xp.asarray(random(maxlen) + 1j*random(maxlen)) + xr = xp.asarray(random(maxlen)) + for i in range(1, maxlen): + xp_assert_close(fft.ifft(fft.fft(x[0:i])), x[0:i], rtol=1e-9, atol=0) + xp_assert_close(fft.irfft(fft.rfft(xr[0:i]), i), xr[0:i], rtol=1e-9, atol=0) + + def test_fft(self, xp): + x = random(30) + 1j*random(30) + expect = xp.asarray(fft1(x)) + x = xp.asarray(x) + xp_assert_close(fft.fft(x), expect) + xp_assert_close(fft.fft(x, norm="backward"), expect) + xp_assert_close(fft.fft(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30, dtype=xp.float64)),) + xp_assert_close(fft.fft(x, norm="forward"), expect / 30) + + def test_ifft(self, xp): + x = xp.asarray(random(30) + 1j*random(30)) + xp_assert_close(fft.ifft(fft.fft(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.ifft(fft.fft(x, norm=norm), norm=norm), x) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_fft2(self, xp): + x = xp.asarray(random((30, 20)) + 1j*random((30, 20))) + expect = fft.fft(fft.fft(x, axis=1), axis=0) + xp_assert_close(fft.fft2(x), expect) + xp_assert_close(fft.fft2(x, norm="backward"), expect) + xp_assert_close(fft.fft2(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64))) + xp_assert_close(fft.fft2(x, norm="forward"), expect / (30 * 20)) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_ifft2(self, xp): + x = xp.asarray(random((30, 20)) + 1j*random((30, 20))) + expect = fft.ifft(fft.ifft(x, axis=1), axis=0) + xp_assert_close(fft.ifft2(x), expect) + xp_assert_close(fft.ifft2(x, norm="backward"), expect) + xp_assert_close(fft.ifft2(x, norm="ortho"), + expect * xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64))) + xp_assert_close(fft.ifft2(x, norm="forward"), expect * (30 * 20)) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_fftn(self, xp): + x = xp.asarray(random((30, 20, 10)) + 1j*random((30, 20, 10))) + expect = fft.fft(fft.fft(fft.fft(x, axis=2), axis=1), axis=0) + xp_assert_close(fft.fftn(x), expect) + xp_assert_close(fft.fftn(x, norm="backward"), expect) + xp_assert_close(fft.fftn(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64))) + xp_assert_close(fft.fftn(x, norm="forward"), expect / (30 * 20 * 10)) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_ifftn(self, xp): + x = xp.asarray(random((30, 20, 10)) + 1j*random((30, 20, 10))) + expect = fft.ifft(fft.ifft(fft.ifft(x, axis=2), axis=1), axis=0) + xp_assert_close(fft.ifftn(x), expect) + xp_assert_close(fft.ifftn(x, norm="backward"), expect) + xp_assert_close( + fft.ifftn(x, norm="ortho"), + fft.ifftn(x) * xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64)) + ) + xp_assert_close(fft.ifftn(x, norm="forward"), expect * (30 * 20 * 10)) + + def test_rfft(self, xp): + x = xp.asarray(random(29), dtype=xp.float64) + for n in [size(x), 2*size(x)]: + for norm in [None, "backward", "ortho", "forward"]: + xp_assert_close(fft.rfft(x, n=n, norm=norm), + fft.fft(xp.asarray(x, dtype=xp.complex128), + n=n, norm=norm)[:(n//2 + 1)]) + xp_assert_close( + fft.rfft(x, n=n, norm="ortho"), + fft.rfft(x, n=n) / xp.sqrt(xp.asarray(n, dtype=xp.float64)) + ) + + def test_irfft(self, xp): + x = xp.asarray(random(30)) + xp_assert_close(fft.irfft(fft.rfft(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.irfft(fft.rfft(x, norm=norm), norm=norm), x) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_rfft2(self, xp): + x = xp.asarray(random((30, 20)), dtype=xp.float64) + expect = fft.fft2(xp.asarray(x, dtype=xp.complex128))[:, :11] + xp_assert_close(fft.rfft2(x), expect) + xp_assert_close(fft.rfft2(x, norm="backward"), expect) + xp_assert_close(fft.rfft2(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64))) + xp_assert_close(fft.rfft2(x, norm="forward"), expect / (30 * 20)) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_irfft2(self, xp): + x = xp.asarray(random((30, 20))) + xp_assert_close(fft.irfft2(fft.rfft2(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.irfft2(fft.rfft2(x, norm=norm), norm=norm), x) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_rfftn(self, xp): + x = xp.asarray(random((30, 20, 10)), dtype=xp.float64) + expect = fft.fftn(xp.asarray(x, dtype=xp.complex128))[:, :, :6] + xp_assert_close(fft.rfftn(x), expect) + xp_assert_close(fft.rfftn(x, norm="backward"), expect) + xp_assert_close(fft.rfftn(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64))) + xp_assert_close(fft.rfftn(x, norm="forward"), expect / (30 * 20 * 10)) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_irfftn(self, xp): + x = xp.asarray(random((30, 20, 10))) + xp_assert_close(fft.irfftn(fft.rfftn(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.irfftn(fft.rfftn(x, norm=norm), norm=norm), x) + + def test_hfft(self, xp): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + x = xp.asarray(x) + x_herm = xp.asarray(x_herm) + expect = xp.real(fft.fft(x)) + xp_assert_close(fft.hfft(x_herm), expect) + xp_assert_close(fft.hfft(x_herm, norm="backward"), expect) + xp_assert_close(fft.hfft(x_herm, norm="ortho"), + expect / xp.sqrt(xp.asarray(30, dtype=xp.float64))) + xp_assert_close(fft.hfft(x_herm, norm="forward"), expect / 30) + + def test_ihfft(self, xp): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + x = xp.asarray(x) + x_herm = xp.asarray(x_herm) + xp_assert_close(fft.ihfft(fft.hfft(x_herm)), x_herm) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.ihfft(fft.hfft(x_herm, norm=norm), norm=norm), x_herm) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_hfft2(self, xp): + x = xp.asarray(random((30, 20))) + xp_assert_close(fft.hfft2(fft.ihfft2(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.hfft2(fft.ihfft2(x, norm=norm), norm=norm), x) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_ihfft2(self, xp): + x = xp.asarray(random((30, 20)), dtype=xp.float64) + expect = fft.ifft2(xp.asarray(x, dtype=xp.complex128))[:, :11] + xp_assert_close(fft.ihfft2(x), expect) + xp_assert_close(fft.ihfft2(x, norm="backward"), expect) + xp_assert_close( + fft.ihfft2(x, norm="ortho"), + expect * xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64)) + ) + xp_assert_close(fft.ihfft2(x, norm="forward"), expect * (30 * 20)) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_hfftn(self, xp): + x = xp.asarray(random((30, 20, 10))) + xp_assert_close(fft.hfftn(fft.ihfftn(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.hfftn(fft.ihfftn(x, norm=norm), norm=norm), x) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + def test_ihfftn(self, xp): + x = xp.asarray(random((30, 20, 10)), dtype=xp.float64) + expect = fft.ifftn(xp.asarray(x, dtype=xp.complex128))[:, :, :6] + xp_assert_close(expect, fft.ihfftn(x)) + xp_assert_close(expect, fft.ihfftn(x, norm="backward")) + xp_assert_close( + fft.ihfftn(x, norm="ortho"), + expect * xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64)) + ) + xp_assert_close(fft.ihfftn(x, norm="forward"), expect * (30 * 20 * 10)) + + def _check_axes(self, op, xp): + dtype = get_expected_input_dtype(op, xp) + x = xp.asarray(random((30, 20, 10)), dtype=dtype) + axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] + xp_test = array_namespace(x) + for a in axes: + op_tr = op(xp_test.permute_dims(x, axes=a)) + tr_op = xp_test.permute_dims(op(x, axes=a), axes=a) + xp_assert_close(op_tr, tr_op) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + @pytest.mark.parametrize("op", [fft.fftn, fft.ifftn, fft.rfftn, fft.irfftn]) + def test_axes_standard(self, op, xp): + self._check_axes(op, xp) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + @pytest.mark.parametrize("op", [fft.hfftn, fft.ihfftn]) + def test_axes_non_standard(self, op, xp): + self._check_axes(op, xp) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + @pytest.mark.parametrize("op", [fft.fftn, fft.ifftn, + fft.rfftn, fft.irfftn]) + def test_axes_subset_with_shape_standard(self, op, xp): + dtype = get_expected_input_dtype(op, xp) + x = xp.asarray(random((16, 8, 4)), dtype=dtype) + axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)] + xp_test = array_namespace(x) + for a in axes: + # different shape on the first two axes + shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax] + for ax in range(x.ndim)]) + # transform only the first two axes + op_tr = op(xp_test.permute_dims(x, axes=a), + s=shape[:2], axes=(0, 1)) + tr_op = xp_test.permute_dims(op(x, s=shape[:2], axes=a[:2]), + axes=a) + xp_assert_close(op_tr, tr_op) + + @skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) + @pytest.mark.parametrize("op", [fft.fft2, fft.ifft2, + fft.rfft2, fft.irfft2, + fft.hfft2, fft.ihfft2, + fft.hfftn, fft.ihfftn]) + def test_axes_subset_with_shape_non_standard(self, op, xp): + dtype = get_expected_input_dtype(op, xp) + x = xp.asarray(random((16, 8, 4)), dtype=dtype) + axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)] + xp_test = array_namespace(x) + for a in axes: + # different shape on the first two axes + shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax] + for ax in range(x.ndim)]) + # transform only the first two axes + op_tr = op(xp_test.permute_dims(x, axes=a), s=shape[:2], axes=(0, 1)) + tr_op = xp_test.permute_dims(op(x, s=shape[:2], axes=a[:2]), axes=a) + xp_assert_close(op_tr, tr_op) + + def test_all_1d_norm_preserving(self, xp): + # verify that round-trip transforms are norm-preserving + x = xp.asarray(random(30), dtype=xp.float64) + xp_test = array_namespace(x) + x_norm = xp_test.linalg.vector_norm(x) + n = size(x) * 2 + func_pairs = [(fft.rfft, fft.irfft), + # hfft: order so the first function takes x.size samples + # (necessary for comparison to x_norm above) + (fft.ihfft, fft.hfft), + # functions that expect complex dtypes at the end + (fft.fft, fft.ifft), + ] + for forw, back in func_pairs: + if forw == fft.fft: + x = xp.asarray(x, dtype=xp.complex128) + x_norm = xp_test.linalg.vector_norm(x) + for n in [size(x), 2*size(x)]: + for norm in ['backward', 'ortho', 'forward']: + tmp = forw(x, n=n, norm=norm) + tmp = back(tmp, n=n, norm=norm) + xp_assert_close(xp_test.linalg.vector_norm(tmp), x_norm) + + @skip_if_array_api(np_only=True) + @pytest.mark.parametrize("dtype", [np.float16, np.longdouble]) + def test_dtypes_nonstandard(self, dtype): + x = random(30).astype(dtype) + out_dtypes = {np.float16: np.complex64, np.longdouble: np.clongdouble} + x_complex = x.astype(out_dtypes[dtype]) + + res_fft = fft.ifft(fft.fft(x)) + res_rfft = fft.irfft(fft.rfft(x)) + res_hfft = fft.hfft(fft.ihfft(x), x.shape[0]) + # Check both numerical results and exact dtype matches + assert_array_almost_equal(res_fft, x_complex) + assert_array_almost_equal(res_rfft, x) + assert_array_almost_equal(res_hfft, x) + assert res_fft.dtype == x_complex.dtype + assert res_rfft.dtype == np.result_type(np.float32, x.dtype) + assert res_hfft.dtype == np.result_type(np.float32, x.dtype) + + @pytest.mark.parametrize("dtype", ["float32", "float64"]) + def test_dtypes_real(self, dtype, xp): + x = xp.asarray(random(30), dtype=getattr(xp, dtype)) + + res_rfft = fft.irfft(fft.rfft(x)) + res_hfft = fft.hfft(fft.ihfft(x), x.shape[0]) + # Check both numerical results and exact dtype matches + rtol = {"float32": 1.2e-4, "float64": 1e-8}[dtype] + xp_assert_close(res_rfft, x, rtol=rtol, atol=0) + xp_assert_close(res_hfft, x, rtol=rtol, atol=0) + + @pytest.mark.parametrize("dtype", ["complex64", "complex128"]) + def test_dtypes_complex(self, dtype, xp): + x = xp.asarray(random(30), dtype=getattr(xp, dtype)) + + res_fft = fft.ifft(fft.fft(x)) + # Check both numerical results and exact dtype matches + rtol = {"complex64": 1.2e-4, "complex128": 1e-8}[dtype] + xp_assert_close(res_fft, x, rtol=rtol, atol=0) + +@skip_if_array_api(np_only=True) +@pytest.mark.parametrize( + "dtype", + [np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble]) +@pytest.mark.parametrize("order", ["F", 'non-contiguous']) +@pytest.mark.parametrize( + "fft", + [fft.fft, fft.fft2, fft.fftn, + fft.ifft, fft.ifft2, fft.ifftn]) +def test_fft_with_order(dtype, order, fft): + # Check that FFT/IFFT produces identical results for C, Fortran and + # non contiguous arrays + rng = np.random.RandomState(42) + X = rng.rand(8, 7, 13).astype(dtype, copy=False) + if order == 'F': + Y = np.asfortranarray(X) + else: + # Make a non contiguous array + Y = X[::-1] + X = np.ascontiguousarray(X[::-1]) + + if fft.__name__.endswith('fft'): + for axis in range(3): + X_res = fft(X, axis=axis) + Y_res = fft(Y, axis=axis) + assert_array_almost_equal(X_res, Y_res) + elif fft.__name__.endswith(('fft2', 'fftn')): + axes = [(0, 1), (1, 2), (0, 2)] + if fft.__name__.endswith('fftn'): + axes.extend([(0,), (1,), (2,), None]) + for ax in axes: + X_res = fft(X, axes=ax) + Y_res = fft(Y, axes=ax) + assert_array_almost_equal(X_res, Y_res) + else: + raise ValueError + + +class TestFFTThreadSafe: + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args, xp=None): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + + # Make sure all threads returned the correct value + for i in range(self.threads): + xp_assert_equal( + q.get(timeout=5), expected, + err_msg='Function returned wrong value in multithreaded context' + ) + + def test_fft(self, xp): + a = xp.ones(self.input_shape, dtype=xp.complex128) + self._test_mtsame(fft.fft, a, xp=xp) + + def test_ifft(self, xp): + a = xp.full(self.input_shape, 1+0j) + self._test_mtsame(fft.ifft, a, xp=xp) + + def test_rfft(self, xp): + a = xp.ones(self.input_shape) + self._test_mtsame(fft.rfft, a, xp=xp) + + def test_irfft(self, xp): + a = xp.full(self.input_shape, 1+0j) + self._test_mtsame(fft.irfft, a, xp=xp) + + def test_hfft(self, xp): + a = xp.ones(self.input_shape, dtype=xp.complex64) + self._test_mtsame(fft.hfft, a, xp=xp) + + def test_ihfft(self, xp): + a = xp.ones(self.input_shape) + self._test_mtsame(fft.ihfft, a, xp=xp) + + +@skip_if_array_api(np_only=True) +@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft]) +def test_multiprocess(func): + # Test that fft still works after fork (gh-10422) + + with multiprocessing.Pool(2) as p: + res = p.map(func, [np.ones(100) for _ in range(4)]) + + expect = func(np.ones(100)) + for x in res: + assert_allclose(x, expect) + + +@skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) +class TestIRFFTN: + + def test_not_last_axis_success(self, xp): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j*ai + a = xp.asarray(a) + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) + + +@skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) +@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft, + fft.fftn, fft.ifftn, + fft.rfftn, fft.irfftn, fft.hfft, fft.ihfft]) +def test_non_standard_params(func, xp): + if func in [fft.rfft, fft.rfftn, fft.ihfft]: + dtype = xp.float64 + else: + dtype = xp.complex128 + + if xp.__name__ != 'numpy': + x = xp.asarray([1, 2, 3], dtype=dtype) + # func(x) should not raise an exception + func(x) + assert_raises(ValueError, func, x, workers=2) + # `plan` param is not tested since SciPy does not use it currently + # but should be tested if it comes into use diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py new file mode 100644 index 0000000000000000000000000000000000000000..d9652facb776e8c791f2b209c1aec69d24ffe6c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py @@ -0,0 +1,169 @@ +import warnings +import numpy as np +import pytest + +from scipy.fft._fftlog import fht, ifht, fhtoffset +from scipy.special import poch + +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import xp_assert_close + +pytestmark = array_api_compatible + + +def test_fht_agrees_with_fftlog(xp): + # check that fht numerically agrees with the output from Fortran FFTLog, + # the results were generated with the provided `fftlogtest` program, + # after fixing how the k array is generated (divide range by n-1, not n) + + # test function, analytical Hankel transform is of the same form + def f(r, mu): + return r**(mu+1)*np.exp(-r**2/2) + + r = np.logspace(-4, 4, 16) + + dln = np.log(r[1]/r[0]) + mu = 0.3 + offset = 0.0 + bias = 0.0 + + a = xp.asarray(f(r, mu)) + + # test 1: compute as given + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [-0.1159922613593045E-02, +0.1625822618458832E-02, + -0.1949518286432330E-02, +0.3789220182554077E-02, + +0.5093959119952945E-03, +0.2785387803618774E-01, + +0.9944952700848897E-01, +0.4599202164586588E+00, + +0.3157462160881342E+00, -0.8201236844404755E-03, + -0.7834031308271878E-03, +0.3931444945110708E-03, + -0.2697710625194777E-03, +0.3568398050238820E-03, + -0.5554454827797206E-03, +0.8286331026468585E-03] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + # test 2: change to optimal offset + offset = fhtoffset(dln, mu, bias=bias) + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [+0.4353768523152057E-04, -0.9197045663594285E-05, + +0.3150140927838524E-03, +0.9149121960963704E-03, + +0.5808089753959363E-02, +0.2548065256377240E-01, + +0.1339477692089897E+00, +0.4821530509479356E+00, + +0.2659899781579785E+00, -0.1116475278448113E-01, + +0.1791441617592385E-02, -0.4181810476548056E-03, + +0.1314963536765343E-03, -0.5422057743066297E-04, + +0.3208681804170443E-04, -0.2696849476008234E-04] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + # test 3: positive bias + bias = 0.8 + offset = fhtoffset(dln, mu, bias=bias) + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [-7.3436673558316850E+00, +0.1710271207817100E+00, + +0.1065374386206564E+00, -0.5121739602708132E-01, + +0.2636649319269470E-01, +0.1697209218849693E-01, + +0.1250215614723183E+00, +0.4739583261486729E+00, + +0.2841149874912028E+00, -0.8312764741645729E-02, + +0.1024233505508988E-02, -0.1644902767389120E-03, + +0.3305775476926270E-04, -0.7786993194882709E-05, + +0.1962258449520547E-05, -0.8977895734909250E-06] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + # test 4: negative bias + bias = -0.8 + offset = fhtoffset(dln, mu, bias=bias) + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [+0.8985777068568745E-05, +0.4074898209936099E-04, + +0.2123969254700955E-03, +0.1009558244834628E-02, + +0.5131386375222176E-02, +0.2461678673516286E-01, + +0.1235812845384476E+00, +0.4719570096404403E+00, + +0.2893487490631317E+00, -0.1686570611318716E-01, + +0.2231398155172505E-01, -0.1480742256379873E-01, + +0.1692387813500801E+00, +0.3097490354365797E+00, + +2.7593607182401860E+00, 10.5251075070045800E+00] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + +@pytest.mark.parametrize('optimal', [True, False]) +@pytest.mark.parametrize('offset', [0.0, 1.0, -1.0]) +@pytest.mark.parametrize('bias', [0, 0.1, -0.1]) +@pytest.mark.parametrize('n', [64, 63]) +def test_fht_identity(n, bias, offset, optimal, xp): + rng = np.random.RandomState(3491349965) + + a = xp.asarray(rng.standard_normal(n)) + dln = rng.uniform(-1, 1) + mu = rng.uniform(-2, 2) + + if optimal: + offset = fhtoffset(dln, mu, initial=offset, bias=bias) + + A = fht(a, dln, mu, offset=offset, bias=bias) + a_ = ifht(A, dln, mu, offset=offset, bias=bias) + + xp_assert_close(a_, a) + + +def test_fht_special_cases(xp): + rng = np.random.RandomState(3491349965) + + a = xp.asarray(rng.standard_normal(64)) + dln = rng.uniform(-1, 1) + + # let x = (mu+1+q)/2, y = (mu+1-q)/2, M = {0, -1, -2, ...} + + # case 1: x in M, y in M => well-defined transform + mu, bias = -4.0, 1.0 + with warnings.catch_warnings(record=True) as record: + fht(a, dln, mu, bias=bias) + assert not record, 'fht warned about a well-defined transform' + + # case 2: x not in M, y in M => well-defined transform + mu, bias = -2.5, 0.5 + with warnings.catch_warnings(record=True) as record: + fht(a, dln, mu, bias=bias) + assert not record, 'fht warned about a well-defined transform' + + # case 3: x in M, y not in M => singular transform + mu, bias = -3.5, 0.5 + with pytest.warns(Warning) as record: + fht(a, dln, mu, bias=bias) + assert record, 'fht did not warn about a singular transform' + + # case 4: x not in M, y in M => singular inverse transform + mu, bias = -2.5, 0.5 + with pytest.warns(Warning) as record: + ifht(a, dln, mu, bias=bias) + assert record, 'ifht did not warn about a singular transform' + + +@pytest.mark.parametrize('n', [64, 63]) +def test_fht_exact(n, xp): + rng = np.random.RandomState(3491349965) + + # for a(r) a power law r^\gamma, the fast Hankel transform produces the + # exact continuous Hankel transform if biased with q = \gamma + + mu = rng.uniform(0, 3) + + # convergence of HT: -1-mu < gamma < 1/2 + gamma = rng.uniform(-1-mu, 1/2) + + r = np.logspace(-2, 2, n) + a = xp.asarray(r**gamma) + + dln = np.log(r[1]/r[0]) + + offset = fhtoffset(dln, mu, initial=0.0, bias=gamma) + + A = fht(a, dln, mu, offset=offset, bias=gamma) + + k = np.exp(offset)/r[::-1] + + # analytical result + At = xp.asarray((2/k)**gamma * poch((mu+1-gamma)/2, gamma)) + + xp_assert_close(A, At) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..c99fea40f7f58480ba3532cb490c78cde48ce4a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py @@ -0,0 +1,445 @@ +"""Includes test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 +Modified for Array API, 2023 + +""" +from scipy.fft._helper import next_fast_len, _init_nd_shape_and_axes +from numpy.testing import assert_equal +from pytest import raises as assert_raises +import pytest +import numpy as np +import sys +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import xp_assert_close, SCIPY_DEVICE +from scipy import fft + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")] +skip_if_array_api = pytest.mark.skip_if_array_api + +_5_smooth_numbers = [ + 2, 3, 4, 5, 6, 8, 9, 10, + 2 * 3 * 5, + 2**3 * 3**5, + 2**3 * 3**3 * 5**2, +] + +def test_next_fast_len(): + for n in _5_smooth_numbers: + assert_equal(next_fast_len(n), n) + + +def _assert_n_smooth(x, n): + x_orig = x + if n < 2: + assert False + + while True: + q, r = divmod(x, 2) + if r != 0: + break + x = q + + for d in range(3, n+1, 2): + while True: + q, r = divmod(x, d) + if r != 0: + break + x = q + + assert x == 1, \ + f'x={x_orig} is not {n}-smooth, remainder={x}' + + +@skip_if_array_api(np_only=True) +class TestNextFastLen: + + def test_next_fast_len(self): + np.random.seed(1234) + + def nums(): + yield from range(1, 1000) + yield 2**5 * 3**5 * 4**5 + 1 + + for n in nums(): + m = next_fast_len(n) + _assert_n_smooth(m, 11) + assert m == next_fast_len(n, False) + + m = next_fast_len(n, True) + _assert_n_smooth(m, 5) + + def test_np_integers(self): + ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64] + for ityp in ITYPES: + x = ityp(12345) + testN = next_fast_len(x) + assert_equal(testN, next_fast_len(int(x))) + + def testnext_fast_len_small(self): + hams = { + 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15, + 16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000 + } + for x, y in hams.items(): + assert_equal(next_fast_len(x, True), y) + + @pytest.mark.xfail(sys.maxsize < 2**32, + reason="Hamming Numbers too large for 32-bit", + raises=ValueError, strict=True) + def testnext_fast_len_big(self): + hams = { + 510183360: 510183360, 510183360 + 1: 512000000, + 511000000: 512000000, + 854296875: 854296875, 854296875 + 1: 859963392, + 196608000000: 196608000000, 196608000000 + 1: 196830000000, + 8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208, + 206391214080000: 206391214080000, + 206391214080000 + 1: 206624260800000, + 470184984576000: 470184984576000, + 470184984576000 + 1: 470715894135000, + 7222041363087360: 7222041363087360, + 7222041363087360 + 1: 7230196133913600, + # power of 5 5**23 + 11920928955078125: 11920928955078125, + 11920928955078125 - 1: 11920928955078125, + # power of 3 3**34 + 16677181699666569: 16677181699666569, + 16677181699666569 - 1: 16677181699666569, + # power of 2 2**54 + 18014398509481984: 18014398509481984, + 18014398509481984 - 1: 18014398509481984, + # above this, int(ceil(n)) == int(ceil(n+1)) + 19200000000000000: 19200000000000000, + 19200000000000000 + 1: 19221679687500000, + 288230376151711744: 288230376151711744, + 288230376151711744 + 1: 288325195312500000, + 288325195312500000 - 1: 288325195312500000, + 288325195312500000: 288325195312500000, + 288325195312500000 + 1: 288555831593533440, + } + for x, y in hams.items(): + assert_equal(next_fast_len(x, True), y) + + def test_keyword_args(self): + assert next_fast_len(11, real=True) == 12 + assert next_fast_len(target=7, real=False) == 7 + + +@skip_if_array_api(cpu_only=True) +class Test_init_nd_shape_and_axes: + + def test_py_0d_defaults(self, xp): + x = xp.asarray(4) + shape = None + axes = None + + shape_expected = () + axes_expected = [] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_0d_defaults(self, xp): + x = xp.asarray(7.) + shape = None + axes = None + + shape_expected = () + axes_expected = [] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_py_1d_defaults(self, xp): + x = xp.asarray([1, 2, 3]) + shape = None + axes = None + + shape_expected = (3,) + axes_expected = [0] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_1d_defaults(self, xp): + x = xp.arange(0, 1, .1) + shape = None + axes = None + + shape_expected = (10,) + axes_expected = [0] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_py_2d_defaults(self, xp): + x = xp.asarray([[1, 2, 3, 4], + [5, 6, 7, 8]]) + shape = None + axes = None + + shape_expected = (2, 4) + axes_expected = [0, 1] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_2d_defaults(self, xp): + x = xp.arange(0, 1, .1) + x = xp.reshape(x, (5, 2)) + shape = None + axes = None + + shape_expected = (5, 2) + axes_expected = [0, 1] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_defaults(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = None + axes = None + + shape_expected = (6, 2, 5, 3, 4) + axes_expected = [0, 1, 2, 3, 4] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_set_shape(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = [10, -1, -1, 1, 4] + axes = None + + shape_expected = (10, 2, 5, 1, 4) + axes_expected = [0, 1, 2, 3, 4] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_set_axes(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = None + axes = [4, 1, 2] + + shape_expected = (4, 2, 5) + axes_expected = [4, 1, 2] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_set_shape_axes(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = [10, -1, 2] + axes = [1, 0, 3] + + shape_expected = (10, 6, 2) + axes_expected = [1, 0, 3] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_shape_axes_subset(self, xp): + x = xp.zeros((2, 3, 4, 5)) + shape, axes = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None) + + assert shape == (5, 5, 5) + assert axes == [1, 2, 3] + + def test_errors(self, xp): + x = xp.zeros(1) + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]]) + + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=None, axes=[1., 2., 3., 4.]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + _init_nd_shape_and_axes(x, shape=None, axes=[1]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + _init_nd_shape_and_axes(x, shape=None, axes=[-2]) + + with assert_raises(ValueError, + match="all axes must be unique"): + _init_nd_shape_and_axes(x, shape=None, axes=[0, 0]) + + with assert_raises(ValueError, match="shape must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None) + + with assert_raises(ValueError, match="shape must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=[1., 2., 3., 4.], axes=None) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + _init_nd_shape_and_axes(xp.zeros([1, 1, 1, 1]), + shape=[1, 2, 3], axes=[1]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[0\]\) specified"): + _init_nd_shape_and_axes(x, shape=[0], axes=None) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[-2\]\) specified"): + _init_nd_shape_and_axes(x, shape=-2, axes=None) + + +@skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) +class TestFFTShift: + + def test_definition(self, xp): + x = xp.asarray([0., 1, 2, 3, 4, -4, -3, -2, -1]) + y = xp.asarray([-4., -3, -2, -1, 0, 1, 2, 3, 4]) + xp_assert_close(fft.fftshift(x), y) + xp_assert_close(fft.ifftshift(y), x) + x = xp.asarray([0., 1, 2, 3, 4, -5, -4, -3, -2, -1]) + y = xp.asarray([-5., -4, -3, -2, -1, 0, 1, 2, 3, 4]) + xp_assert_close(fft.fftshift(x), y) + xp_assert_close(fft.ifftshift(y), x) + + def test_inverse(self, xp): + for n in [1, 4, 9, 100, 211]: + x = xp.asarray(np.random.random((n,))) + xp_assert_close(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self, xp): + freqs = xp.asarray([[0., 1, 2], [3, 4, -4], [-3, -2, -1]]) + shifted = xp.asarray([[-1., -3, -2], [2, 0, 1], [-4, 3, 4]]) + xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shifted) + xp_assert_close(fft.fftshift(freqs, axes=0), fft.fftshift(freqs, axes=(0,))) + xp_assert_close(fft.ifftshift(shifted, axes=(0, 1)), freqs) + xp_assert_close(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + xp_assert_close(fft.fftshift(freqs), shifted) + xp_assert_close(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self, xp): + """ Test 2D input, which has uneven dimension sizes """ + freqs = xp.asarray([ + [0, 1], + [2, 3], + [4, 5] + ], dtype=xp.float64) + + # shift in dimension 0 + shift_dim0 = xp.asarray([ + [4, 5], + [0, 1], + [2, 3] + ], dtype=xp.float64) + xp_assert_close(fft.fftshift(freqs, axes=0), shift_dim0) + xp_assert_close(fft.ifftshift(shift_dim0, axes=0), freqs) + xp_assert_close(fft.fftshift(freqs, axes=(0,)), shift_dim0) + xp_assert_close(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = xp.asarray([ + [1, 0], + [3, 2], + [5, 4] + ], dtype=xp.float64) + xp_assert_close(fft.fftshift(freqs, axes=1), shift_dim1) + xp_assert_close(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = xp.asarray([ + [5, 4], + [1, 0], + [3, 2] + ], dtype=xp.float64) + xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + xp_assert_close(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + xp_assert_close(fft.fftshift(freqs, axes=None), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both, axes=None), freqs) + xp_assert_close(fft.fftshift(freqs), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both), freqs) + + +@skip_if_array_api('array_api_strict', 'cupy', + reasons=['fft not yet implemented by array-api-strict', + 'cupy.fft not yet implemented by array-api-compat']) +class TestFFTFreq: + + def test_definition(self, xp): + device = SCIPY_DEVICE + try: + x = xp.asarray([0, 1, 2, 3, 4, -4, -3, -2, -1], + dtype=xp.float64, device=device) + x2 = xp.asarray([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], + dtype=xp.float64, device=device) + except TypeError: + x = xp.asarray([0, 1, 2, 3, 4, -4, -3, -2, -1], dtype=xp.float64) + x2 = xp.asarray([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], + dtype=xp.float64) + + y = xp.asarray(9 * fft.fftfreq(9, xp=xp), dtype=xp.float64) + xp_assert_close(y, x) + y = xp.asarray(9 * xp.pi * fft.fftfreq(9, xp.pi, xp=xp), dtype=xp.float64) + xp_assert_close(y, x) + + y = xp.asarray(10 * fft.fftfreq(10, xp=xp), dtype=xp.float64) + xp_assert_close(y, x2) + y = xp.asarray(10 * xp.pi * fft.fftfreq(10, xp.pi, xp=xp), dtype=xp.float64) + xp_assert_close(y, x2) + + +@skip_if_array_api('array_api_strict', 'cupy', + reasons=['fft not yet implemented by array-api-strict', + 'cupy.fft not yet implemented by array-api-compat']) +class TestRFFTFreq: + + def test_definition(self, xp): + device = SCIPY_DEVICE + try: + x = xp.asarray([0, 1, 2, 3, 4], dtype=xp.float64, device=device) + x2 = xp.asarray([0, 1, 2, 3, 4, 5], dtype=xp.float64, device=device) + except TypeError: + # work around the `device` keyword not being implemented in numpy yet + x = xp.asarray([0, 1, 2, 3, 4], dtype=xp.float64) + x2 = xp.asarray([0, 1, 2, 3, 4, 5], dtype=xp.float64) + + y = xp.asarray(9 * fft.rfftfreq(9, xp=xp), dtype=xp.float64) + xp_assert_close(y, x) + y = xp.asarray(9 * xp.pi * fft.rfftfreq(9, xp.pi, xp=xp), dtype=xp.float64) + xp_assert_close(y, x) + + y = xp.asarray(10 * fft.rfftfreq(10, xp=xp), dtype=xp.float64) + xp_assert_close(y, x2) + y = xp.asarray(10 * xp.pi * fft.rfftfreq(10, xp.pi, xp=xp), dtype=xp.float64) + xp_assert_close(y, x2) diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py new file mode 100644 index 0000000000000000000000000000000000000000..e771aff63b173d2e939913ccc26467e80ba670c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py @@ -0,0 +1,83 @@ +from scipy import fft +import numpy as np +import pytest +from numpy.testing import assert_allclose +import multiprocessing +import os + + +@pytest.fixture(scope='module') +def x(): + return np.random.randn(512, 128) # Must be large enough to qualify for mt + + +@pytest.mark.parametrize("func", [ + fft.fft, fft.ifft, fft.fft2, fft.ifft2, fft.fftn, fft.ifftn, + fft.rfft, fft.irfft, fft.rfft2, fft.irfft2, fft.rfftn, fft.irfftn, + fft.hfft, fft.ihfft, fft.hfft2, fft.ihfft2, fft.hfftn, fft.ihfftn, + fft.dct, fft.idct, fft.dctn, fft.idctn, + fft.dst, fft.idst, fft.dstn, fft.idstn, +]) +@pytest.mark.parametrize("workers", [2, -1]) +def test_threaded_same(x, func, workers): + expected = func(x, workers=1) + actual = func(x, workers=workers) + assert_allclose(actual, expected) + + +def _mt_fft(x): + return fft.fft(x, workers=2) + + +def test_mixed_threads_processes(x): + # Test that the fft threadpool is safe to use before & after fork + + expect = fft.fft(x, workers=2) + + with multiprocessing.Pool(2) as p: + res = p.map(_mt_fft, [x for _ in range(4)]) + + for r in res: + assert_allclose(r, expect) + + fft.fft(x, workers=2) + + +def test_invalid_workers(x): + cpus = os.cpu_count() + + fft.ifft([1], workers=-cpus) + + with pytest.raises(ValueError, match='workers must not be zero'): + fft.fft(x, workers=0) + + with pytest.raises(ValueError, match='workers value out of range'): + fft.ifft(x, workers=-cpus-1) + + +def test_set_get_workers(): + cpus = os.cpu_count() + assert fft.get_workers() == 1 + with fft.set_workers(4): + assert fft.get_workers() == 4 + + with fft.set_workers(-1): + assert fft.get_workers() == cpus + + assert fft.get_workers() == 4 + + assert fft.get_workers() == 1 + + with fft.set_workers(-cpus): + assert fft.get_workers() == 1 + + +def test_set_workers_invalid(): + + with pytest.raises(ValueError, match='workers must not be zero'): + with fft.set_workers(0): + pass + + with pytest.raises(ValueError, match='workers value out of range'): + with fft.set_workers(-os.cpu_count()-1): + pass diff --git a/venv/lib/python3.10/site-packages/scipy/fft/tests/test_real_transforms.py b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_real_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..9eb5b52d77067771d73cc4f9d418634618d8d469 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/fft/tests/test_real_transforms.py @@ -0,0 +1,234 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +import pytest +import math + +from scipy.fft import dct, idct, dctn, idctn, dst, idst, dstn, idstn +import scipy.fft as fft +from scipy import fftpack +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import copy, xp_assert_close + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")] +skip_if_array_api = pytest.mark.skip_if_array_api + +SQRT_2 = math.sqrt(2) + +# scipy.fft wraps the fftpack versions but with normalized inverse transforms. +# So, the forward transforms and definitions are already thoroughly tested in +# fftpack/test_real_transforms.py + + +@skip_if_array_api(cpu_only=True) +@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("n", [2, 3, 4, 5, 10, 16]) +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +@pytest.mark.parametrize("orthogonalize", [False, True]) +def test_identity_1d(forward, backward, type, n, axis, norm, orthogonalize, xp): + # Test the identity f^-1(f(x)) == x + x = xp.asarray(np.random.rand(n, n)) + + y = forward(x, type, axis=axis, norm=norm, orthogonalize=orthogonalize) + z = backward(y, type, axis=axis, norm=norm, orthogonalize=orthogonalize) + xp_assert_close(z, x) + + pad = [(0, 0)] * 2 + pad[axis] = (0, 4) + + y2 = xp.asarray(np.pad(np.asarray(y), pad, mode='edge')) + z2 = backward(y2, type, n, axis, norm, orthogonalize=orthogonalize) + xp_assert_close(z2, x) + + +@skip_if_array_api(np_only=True, + reasons=['`overwrite_x` only supported for NumPy backend.']) +@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64, + np.complex64, np.complex128]) +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +@pytest.mark.parametrize("overwrite_x", [True, False]) +def test_identity_1d_overwrite(forward, backward, type, dtype, axis, norm, + overwrite_x): + # Test the identity f^-1(f(x)) == x + x = np.random.rand(7, 8).astype(dtype) + x_orig = x.copy() + + y = forward(x, type, axis=axis, norm=norm, overwrite_x=overwrite_x) + y_orig = y.copy() + z = backward(y, type, axis=axis, norm=norm, overwrite_x=overwrite_x) + if not overwrite_x: + assert_allclose(z, x, rtol=1e-6, atol=1e-6) + assert_array_equal(x, x_orig) + assert_array_equal(y, y_orig) + else: + assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6) + + +@skip_if_array_api(cpu_only=True) +@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("shape, axes", + [ + ((4, 4), 0), + ((4, 4), 1), + ((4, 4), None), + ((4, 4), (0, 1)), + ((10, 12), None), + ((10, 12), (0, 1)), + ((4, 5, 6), None), + ((4, 5, 6), 1), + ((4, 5, 6), (0, 2)), + ]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +@pytest.mark.parametrize("orthogonalize", [False, True]) +def test_identity_nd(forward, backward, type, shape, axes, norm, + orthogonalize, xp): + # Test the identity f^-1(f(x)) == x + + x = xp.asarray(np.random.random(shape)) + + if axes is not None: + shape = np.take(shape, axes) + + y = forward(x, type, axes=axes, norm=norm, orthogonalize=orthogonalize) + z = backward(y, type, axes=axes, norm=norm, orthogonalize=orthogonalize) + xp_assert_close(z, x) + + if axes is None: + pad = [(0, 4)] * x.ndim + elif isinstance(axes, int): + pad = [(0, 0)] * x.ndim + pad[axes] = (0, 4) + else: + pad = [(0, 0)] * x.ndim + + for a in axes: + pad[a] = (0, 4) + + # TODO write an array-agnostic pad + y2 = xp.asarray(np.pad(np.asarray(y), pad, mode='edge')) + z2 = backward(y2, type, shape, axes, norm, orthogonalize=orthogonalize) + xp_assert_close(z2, x) + + +@skip_if_array_api(np_only=True, + reasons=['`overwrite_x` only supported for NumPy backend.']) +@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("shape, axes", + [ + ((4, 5), 0), + ((4, 5), 1), + ((4, 5), None), + ]) +@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64, + np.complex64, np.complex128]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +@pytest.mark.parametrize("overwrite_x", [False, True]) +def test_identity_nd_overwrite(forward, backward, type, shape, axes, dtype, + norm, overwrite_x): + # Test the identity f^-1(f(x)) == x + + x = np.random.random(shape).astype(dtype) + x_orig = x.copy() + + if axes is not None: + shape = np.take(shape, axes) + + y = forward(x, type, axes=axes, norm=norm) + y_orig = y.copy() + z = backward(y, type, axes=axes, norm=norm) + if overwrite_x: + assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6) + else: + assert_allclose(z, x, rtol=1e-6, atol=1e-6) + assert_array_equal(x, x_orig) + assert_array_equal(y, y_orig) + + +@skip_if_array_api(cpu_only=True) +@pytest.mark.parametrize("func", ['dct', 'dst', 'dctn', 'dstn']) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +def test_fftpack_equivalience(func, type, norm, xp): + x = np.random.rand(8, 16) + fftpack_res = xp.asarray(getattr(fftpack, func)(x, type, norm=norm)) + x = xp.asarray(x) + fft_res = getattr(fft, func)(x, type, norm=norm) + + xp_assert_close(fft_res, fftpack_res) + + +@skip_if_array_api(cpu_only=True) +@pytest.mark.parametrize("func", [dct, dst, dctn, dstn]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +def test_orthogonalize_default(func, type, xp): + # Test orthogonalize is the default when norm="ortho", but not otherwise + x = xp.asarray(np.random.rand(100)) + + for norm, ortho in [ + ("forward", False), + ("backward", False), + ("ortho", True), + ]: + a = func(x, type=type, norm=norm, orthogonalize=ortho) + b = func(x, type=type, norm=norm) + xp_assert_close(a, b) + + +@skip_if_array_api(cpu_only=True) +@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"]) +@pytest.mark.parametrize("func, type", [ + (dct, 4), (dst, 1), (dst, 4)]) +def test_orthogonalize_noop(func, type, norm, xp): + # Transforms where orthogonalize is a no-op + x = xp.asarray(np.random.rand(100)) + y1 = func(x, type=type, norm=norm, orthogonalize=True) + y2 = func(x, type=type, norm=norm, orthogonalize=False) + xp_assert_close(y1, y2) + + +@skip_if_array_api(cpu_only=True) +@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"]) +def test_orthogonalize_dct1(norm, xp): + x = xp.asarray(np.random.rand(100)) + + x2 = copy(x, xp=xp) + x2[0] *= SQRT_2 + x2[-1] *= SQRT_2 + + y1 = dct(x, type=1, norm=norm, orthogonalize=True) + y2 = dct(x2, type=1, norm=norm, orthogonalize=False) + + y2[0] /= SQRT_2 + y2[-1] /= SQRT_2 + xp_assert_close(y1, y2) + + +@skip_if_array_api(cpu_only=True) +@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"]) +@pytest.mark.parametrize("func", [dct, dst]) +def test_orthogonalize_dcst2(func, norm, xp): + x = xp.asarray(np.random.rand(100)) + y1 = func(x, type=2, norm=norm, orthogonalize=True) + y2 = func(x, type=2, norm=norm, orthogonalize=False) + + y2[0 if func == dct else -1] /= SQRT_2 + xp_assert_close(y1, y2) + + +@skip_if_array_api(cpu_only=True) +@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"]) +@pytest.mark.parametrize("func", [dct, dst]) +def test_orthogonalize_dcst3(func, norm, xp): + x = xp.asarray(np.random.rand(100)) + x2 = copy(x, xp=xp) + x2[0 if func == dct else -1] *= SQRT_2 + + y1 = func(x, type=3, norm=norm, orthogonalize=True) + y2 = func(x2, type=3, norm=norm, orthogonalize=False) + xp_assert_close(y1, y2) diff --git a/venv/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5a993788b4a0cbab93d07e0ae4781f00eba81a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46ec7e99dfd9064e245d3acb37b06c05226115ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62e0608c976a560dea431d361e14c89c89ab39c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/io/_netcdf.py b/venv/lib/python3.10/site-packages/scipy/io/_netcdf.py new file mode 100644 index 0000000000000000000000000000000000000000..4b8b576b8612334169834f58ec2ae7e068f653cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/io/_netcdf.py @@ -0,0 +1,1095 @@ +""" +NetCDF reader/writer module. + +This module is used to read and create NetCDF files. NetCDF files are +accessed through the `netcdf_file` object. Data written to and from NetCDF +files are contained in `netcdf_variable` objects. Attributes are given +as member variables of the `netcdf_file` and `netcdf_variable` objects. + +This module implements the Scientific.IO.NetCDF API to read and create +NetCDF files. The same API is also used in the PyNIO and pynetcdf +modules, allowing these modules to be used interchangeably when working +with NetCDF files. + +Only NetCDF3 is supported here; for NetCDF4 see +`netCDF4-python `__, +which has a similar API. + +""" + +# TODO: +# * properly implement ``_FillValue``. +# * fix character variables. +# * implement PAGESIZE for Python 2.6? + +# The Scientific.IO.NetCDF API allows attributes to be added directly to +# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate +# between user-set attributes and instance attributes, user-set attributes +# are automatically stored in the ``_attributes`` attribute by overloading +#``__setattr__``. This is the reason why the code sometimes uses +#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``; +# otherwise the key would be inserted into userspace attributes. + + +__all__ = ['netcdf_file', 'netcdf_variable'] + + +import warnings +import weakref +from operator import mul +from platform import python_implementation + +import mmap as mm + +import numpy as np +from numpy import frombuffer, dtype, empty, array, asarray +from numpy import little_endian as LITTLE_ENDIAN +from functools import reduce + + +IS_PYPY = python_implementation() == 'PyPy' + +ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00' +ZERO = b'\x00\x00\x00\x00' +NC_BYTE = b'\x00\x00\x00\x01' +NC_CHAR = b'\x00\x00\x00\x02' +NC_SHORT = b'\x00\x00\x00\x03' +NC_INT = b'\x00\x00\x00\x04' +NC_FLOAT = b'\x00\x00\x00\x05' +NC_DOUBLE = b'\x00\x00\x00\x06' +NC_DIMENSION = b'\x00\x00\x00\n' +NC_VARIABLE = b'\x00\x00\x00\x0b' +NC_ATTRIBUTE = b'\x00\x00\x00\x0c' +FILL_BYTE = b'\x81' +FILL_CHAR = b'\x00' +FILL_SHORT = b'\x80\x01' +FILL_INT = b'\x80\x00\x00\x01' +FILL_FLOAT = b'\x7C\xF0\x00\x00' +FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00' + +TYPEMAP = {NC_BYTE: ('b', 1), + NC_CHAR: ('c', 1), + NC_SHORT: ('h', 2), + NC_INT: ('i', 4), + NC_FLOAT: ('f', 4), + NC_DOUBLE: ('d', 8)} + +FILLMAP = {NC_BYTE: FILL_BYTE, + NC_CHAR: FILL_CHAR, + NC_SHORT: FILL_SHORT, + NC_INT: FILL_INT, + NC_FLOAT: FILL_FLOAT, + NC_DOUBLE: FILL_DOUBLE} + +REVERSE = {('b', 1): NC_BYTE, + ('B', 1): NC_CHAR, + ('c', 1): NC_CHAR, + ('h', 2): NC_SHORT, + ('i', 4): NC_INT, + ('f', 4): NC_FLOAT, + ('d', 8): NC_DOUBLE, + + # these come from asarray(1).dtype.char and asarray('foo').dtype.char, + # used when getting the types from generic attributes. + ('l', 4): NC_INT, + ('S', 1): NC_CHAR} + + +class netcdf_file: + """ + A file object for NetCDF data. + + A `netcdf_file` object has two standard attributes: `dimensions` and + `variables`. The values of both are dictionaries, mapping dimension + names to their associated lengths and variable names to variables, + respectively. Application programs should never modify these + dictionaries. + + All other attributes correspond to global attributes defined in the + NetCDF file. Global file attributes are created by assigning to an + attribute of the `netcdf_file` object. + + Parameters + ---------- + filename : string or file-like + string -> filename + mode : {'r', 'w', 'a'}, optional + read-write-append mode, default is 'r' + mmap : None or bool, optional + Whether to mmap `filename` when reading. Default is True + when `filename` is a file name, False when `filename` is a + file-like object. Note that when mmap is in use, data arrays + returned refer directly to the mmapped data on disk, and the + file cannot be closed as long as references to it exist. + version : {1, 2}, optional + version of netcdf to read / write, where 1 means *Classic + format* and 2 means *64-bit offset format*. Default is 1. See + `here `__ + for more info. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. + + Notes + ----- + The major advantage of this module over other modules is that it doesn't + require the code to be linked to the NetCDF libraries. This module is + derived from `pupynere `_. + + NetCDF files are a self-describing binary data format. The file contains + metadata that describes the dimensions and variables in the file. More + details about NetCDF files can be found `here + `__. There + are three main sections to a NetCDF data structure: + + 1. Dimensions + 2. Variables + 3. Attributes + + The dimensions section records the name and length of each dimension used + by the variables. The variables would then indicate which dimensions it + uses and any attributes such as data units, along with containing the data + values for the variable. It is good practice to include a + variable that is the same name as a dimension to provide the values for + that axes. Lastly, the attributes section would contain additional + information such as the name of the file creator or the instrument used to + collect the data. + + When writing data to a NetCDF file, there is often the need to indicate the + 'record dimension'. A record dimension is the unbounded dimension for a + variable. For example, a temperature variable may have dimensions of + latitude, longitude and time. If one wants to add more temperature data to + the NetCDF file as time progresses, then the temperature variable should + have the time dimension flagged as the record dimension. + + In addition, the NetCDF file header contains the position of the data in + the file, so access can be done in an efficient manner without loading + unnecessary data into memory. It uses the ``mmap`` module to create + Numpy arrays mapped to the data on disk, for the same purpose. + + Note that when `netcdf_file` is used to open a file with mmap=True + (default for read-only), arrays returned by it refer to data + directly on the disk. The file should not be closed, and cannot be cleanly + closed when asked, if such arrays are alive. You may want to copy data arrays + obtained from mmapped Netcdf file if they are to be processed after the file + is closed, see the example below. + + Examples + -------- + To create a NetCDF file: + + >>> from scipy.io import netcdf_file + >>> import numpy as np + >>> f = netcdf_file('simple.nc', 'w') + >>> f.history = 'Created for a test' + >>> f.createDimension('time', 10) + >>> time = f.createVariable('time', 'i', ('time',)) + >>> time[:] = np.arange(10) + >>> time.units = 'days since 2008-01-01' + >>> f.close() + + Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice + of the time variable allows for the data to be set in the object, rather + than letting ``arange(10)`` overwrite the ``time`` variable. + + To read the NetCDF file we just created: + + >>> from scipy.io import netcdf_file + >>> f = netcdf_file('simple.nc', 'r') + >>> print(f.history) + b'Created for a test' + >>> time = f.variables['time'] + >>> print(time.units) + b'days since 2008-01-01' + >>> print(time.shape) + (10,) + >>> print(time[-1]) + 9 + + NetCDF files, when opened read-only, return arrays that refer + directly to memory-mapped data on disk: + + >>> data = time[:] + + If the data is to be processed after the file is closed, it needs + to be copied to main memory: + + >>> data = time[:].copy() + >>> del time + >>> f.close() + >>> data.mean() + 4.5 + + A NetCDF file can also be used as context manager: + + >>> from scipy.io import netcdf_file + >>> with netcdf_file('simple.nc', 'r') as f: + ... print(f.history) + b'Created for a test' + + """ + def __init__(self, filename, mode='r', mmap=None, version=1, + maskandscale=False): + """Initialize netcdf_file from fileobj (str or file-like).""" + if mode not in 'rwa': + raise ValueError("Mode must be either 'r', 'w' or 'a'.") + + if hasattr(filename, 'seek'): # file-like + self.fp = filename + self.filename = 'None' + if mmap is None: + mmap = False + elif mmap and not hasattr(filename, 'fileno'): + raise ValueError('Cannot use file object for mmap') + else: # maybe it's a string + self.filename = filename + omode = 'r+' if mode == 'a' else mode + self.fp = open(self.filename, '%sb' % omode) + if mmap is None: + # Mmapped files on PyPy cannot be usually closed + # before the GC runs, so it's better to use mmap=False + # as the default. + mmap = (not IS_PYPY) + + if mode != 'r': + # Cannot read write-only files + mmap = False + + self.use_mmap = mmap + self.mode = mode + self.version_byte = version + self.maskandscale = maskandscale + + self.dimensions = {} + self.variables = {} + + self._dims = [] + self._recs = 0 + self._recsize = 0 + + self._mm = None + self._mm_buf = None + if self.use_mmap: + self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ) + self._mm_buf = np.frombuffer(self._mm, dtype=np.int8) + + self._attributes = {} + + if mode in 'ra': + self._read() + + def __setattr__(self, attr, value): + # Store user defined attributes in a separate dict, + # so we can save them to file later. + try: + self._attributes[attr] = value + except AttributeError: + pass + self.__dict__[attr] = value + + def close(self): + """Closes the NetCDF file.""" + if hasattr(self, 'fp') and not self.fp.closed: + try: + self.flush() + finally: + self.variables = {} + if self._mm_buf is not None: + ref = weakref.ref(self._mm_buf) + self._mm_buf = None + if ref() is None: + # self._mm_buf is gc'd, and we can close the mmap + self._mm.close() + else: + # we cannot close self._mm, since self._mm_buf is + # alive and there may still be arrays referring to it + warnings.warn( + "Cannot close a netcdf_file opened with mmap=True, when " + "netcdf_variables or arrays referring to its data still " + "exist. All data arrays obtained from such files refer " + "directly to data on disk, and must be copied before the " + "file can be cleanly closed. " + "(See netcdf_file docstring for more information on mmap.)", + category=RuntimeWarning, stacklevel=2, + ) + self._mm = None + self.fp.close() + __del__ = close + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def createDimension(self, name, length): + """ + Adds a dimension to the Dimension section of the NetCDF data structure. + + Note that this function merely adds a new dimension that the variables can + reference. The values for the dimension, if desired, should be added as + a variable using `createVariable`, referring to this dimension. + + Parameters + ---------- + name : str + Name of the dimension (Eg, 'lat' or 'time'). + length : int + Length of the dimension. + + See Also + -------- + createVariable + + """ + if length is None and self._dims: + raise ValueError("Only first dimension may be unlimited!") + + self.dimensions[name] = length + self._dims.append(name) + + def createVariable(self, name, type, dimensions): + """ + Create an empty variable for the `netcdf_file` object, specifying its data + type and the dimensions it uses. + + Parameters + ---------- + name : str + Name of the new variable. + type : dtype or str + Data type of the variable. + dimensions : sequence of str + List of the dimension names used by the variable, in the desired order. + + Returns + ------- + variable : netcdf_variable + The newly created ``netcdf_variable`` object. + This object has also been added to the `netcdf_file` object as well. + + See Also + -------- + createDimension + + Notes + ----- + Any dimensions to be used by the variable should already exist in the + NetCDF data structure or should be created by `createDimension` prior to + creating the NetCDF variable. + + """ + shape = tuple([self.dimensions[dim] for dim in dimensions]) + shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for NumPy + + type = dtype(type) + typecode, size = type.char, type.itemsize + if (typecode, size) not in REVERSE: + raise ValueError("NetCDF 3 does not support type %s" % type) + + # convert to big endian always for NetCDF 3 + data = empty(shape_, dtype=type.newbyteorder("B")) + self.variables[name] = netcdf_variable( + data, typecode, size, shape, dimensions, + maskandscale=self.maskandscale) + return self.variables[name] + + def flush(self): + """ + Perform a sync-to-disk flush if the `netcdf_file` object is in write mode. + + See Also + -------- + sync : Identical function + + """ + if hasattr(self, 'mode') and self.mode in 'wa': + self._write() + sync = flush + + def _write(self): + self.fp.seek(0) + self.fp.write(b'CDF') + self.fp.write(array(self.version_byte, '>b').tobytes()) + + # Write headers and data. + self._write_numrecs() + self._write_dim_array() + self._write_gatt_array() + self._write_var_array() + + def _write_numrecs(self): + # Get highest record count from all record variables. + for var in self.variables.values(): + if var.isrec and len(var.data) > self._recs: + self.__dict__['_recs'] = len(var.data) + self._pack_int(self._recs) + + def _write_dim_array(self): + if self.dimensions: + self.fp.write(NC_DIMENSION) + self._pack_int(len(self.dimensions)) + for name in self._dims: + self._pack_string(name) + length = self.dimensions[name] + self._pack_int(length or 0) # replace None with 0 for record dimension + else: + self.fp.write(ABSENT) + + def _write_gatt_array(self): + self._write_att_array(self._attributes) + + def _write_att_array(self, attributes): + if attributes: + self.fp.write(NC_ATTRIBUTE) + self._pack_int(len(attributes)) + for name, values in attributes.items(): + self._pack_string(name) + self._write_att_values(values) + else: + self.fp.write(ABSENT) + + def _write_var_array(self): + if self.variables: + self.fp.write(NC_VARIABLE) + self._pack_int(len(self.variables)) + + # Sort variable names non-recs first, then recs. + def sortkey(n): + v = self.variables[n] + if v.isrec: + return (-1,) + return v._shape + variables = sorted(self.variables, key=sortkey, reverse=True) + + # Set the metadata for all variables. + for name in variables: + self._write_var_metadata(name) + # Now that we have the metadata, we know the vsize of + # each record variable, so we can calculate recsize. + self.__dict__['_recsize'] = sum([ + var._vsize for var in self.variables.values() + if var.isrec]) + # Set the data for all variables. + for name in variables: + self._write_var_data(name) + else: + self.fp.write(ABSENT) + + def _write_var_metadata(self, name): + var = self.variables[name] + + self._pack_string(name) + self._pack_int(len(var.dimensions)) + for dimname in var.dimensions: + dimid = self._dims.index(dimname) + self._pack_int(dimid) + + self._write_att_array(var._attributes) + + nc_type = REVERSE[var.typecode(), var.itemsize()] + self.fp.write(nc_type) + + if not var.isrec: + vsize = var.data.size * var.data.itemsize + vsize += -vsize % 4 + else: # record variable + try: + vsize = var.data[0].size * var.data.itemsize + except IndexError: + vsize = 0 + rec_vars = len([v for v in self.variables.values() + if v.isrec]) + if rec_vars > 1: + vsize += -vsize % 4 + self.variables[name].__dict__['_vsize'] = vsize + self._pack_int(vsize) + + # Pack a bogus begin, and set the real value later. + self.variables[name].__dict__['_begin'] = self.fp.tell() + self._pack_begin(0) + + def _write_var_data(self, name): + var = self.variables[name] + + # Set begin in file header. + the_beguine = self.fp.tell() + self.fp.seek(var._begin) + self._pack_begin(the_beguine) + self.fp.seek(the_beguine) + + # Write data. + if not var.isrec: + self.fp.write(var.data.tobytes()) + count = var.data.size * var.data.itemsize + self._write_var_padding(var, var._vsize - count) + else: # record variable + # Handle rec vars with shape[0] < nrecs. + if self._recs > len(var.data): + shape = (self._recs,) + var.data.shape[1:] + # Resize in-place does not always work since + # the array might not be single-segment + try: + var.data.resize(shape) + except ValueError: + dtype = var.data.dtype + var.__dict__['data'] = np.resize(var.data, shape).astype(dtype) + + pos0 = pos = self.fp.tell() + for rec in var.data: + # Apparently scalars cannot be converted to big endian. If we + # try to convert a ``=i4`` scalar to, say, '>i4' the dtype + # will remain as ``=i4``. + if not rec.shape and (rec.dtype.byteorder == '<' or + (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)): + rec = rec.byteswap() + self.fp.write(rec.tobytes()) + # Padding + count = rec.size * rec.itemsize + self._write_var_padding(var, var._vsize - count) + pos += self._recsize + self.fp.seek(pos) + self.fp.seek(pos0 + var._vsize) + + def _write_var_padding(self, var, size): + encoded_fill_value = var._get_encoded_fill_value() + num_fills = size // len(encoded_fill_value) + self.fp.write(encoded_fill_value * num_fills) + + def _write_att_values(self, values): + if hasattr(values, 'dtype'): + nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] + else: + types = [(int, NC_INT), (float, NC_FLOAT), (str, NC_CHAR)] + + # bytes index into scalars in py3k. Check for "string" types + if isinstance(values, (str, bytes)): + sample = values + else: + try: + sample = values[0] # subscriptable? + except TypeError: + sample = values # scalar + + for class_, nc_type in types: + if isinstance(sample, class_): + break + + typecode, size = TYPEMAP[nc_type] + dtype_ = '>%s' % typecode + # asarray() dies with bytes and '>c' in py3k. Change to 'S' + dtype_ = 'S' if dtype_ == '>c' else dtype_ + + values = asarray(values, dtype=dtype_) + + self.fp.write(nc_type) + + if values.dtype.char == 'S': + nelems = values.itemsize + else: + nelems = values.size + self._pack_int(nelems) + + if not values.shape and (values.dtype.byteorder == '<' or + (values.dtype.byteorder == '=' and LITTLE_ENDIAN)): + values = values.byteswap() + self.fp.write(values.tobytes()) + count = values.size * values.itemsize + self.fp.write(b'\x00' * (-count % 4)) # pad + + def _read(self): + # Check magic bytes and version + magic = self.fp.read(3) + if not magic == b'CDF': + raise TypeError("Error: %s is not a valid NetCDF 3 file" % + self.filename) + self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0] + + # Read file headers and set data. + self._read_numrecs() + self._read_dim_array() + self._read_gatt_array() + self._read_var_array() + + def _read_numrecs(self): + self.__dict__['_recs'] = self._unpack_int() + + def _read_dim_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_DIMENSION]: + raise ValueError("Unexpected header.") + count = self._unpack_int() + + for dim in range(count): + name = self._unpack_string().decode('latin1') + length = self._unpack_int() or None # None for record dimension + self.dimensions[name] = length + self._dims.append(name) # preserve order + + def _read_gatt_array(self): + for k, v in self._read_att_array().items(): + self.__setattr__(k, v) + + def _read_att_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_ATTRIBUTE]: + raise ValueError("Unexpected header.") + count = self._unpack_int() + + attributes = {} + for attr in range(count): + name = self._unpack_string().decode('latin1') + attributes[name] = self._read_att_values() + return attributes + + def _read_var_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_VARIABLE]: + raise ValueError("Unexpected header.") + + begin = 0 + dtypes = {'names': [], 'formats': []} + rec_vars = [] + count = self._unpack_int() + for var in range(count): + (name, dimensions, shape, attributes, + typecode, size, dtype_, begin_, vsize) = self._read_var() + # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html + # Note that vsize is the product of the dimension lengths + # (omitting the record dimension) and the number of bytes + # per value (determined from the type), increased to the + # next multiple of 4, for each variable. If a record + # variable, this is the amount of space per record. The + # netCDF "record size" is calculated as the sum of the + # vsize's of all the record variables. + # + # The vsize field is actually redundant, because its value + # may be computed from other information in the header. The + # 32-bit vsize field is not large enough to contain the size + # of variables that require more than 2^32 - 4 bytes, so + # 2^32 - 1 is used in the vsize field for such variables. + if shape and shape[0] is None: # record variable + rec_vars.append(name) + # The netCDF "record size" is calculated as the sum of + # the vsize's of all the record variables. + self.__dict__['_recsize'] += vsize + if begin == 0: + begin = begin_ + dtypes['names'].append(name) + dtypes['formats'].append(str(shape[1:]) + dtype_) + + # Handle padding with a virtual variable. + if typecode in 'bch': + actual_size = reduce(mul, (1,) + shape[1:]) * size + padding = -actual_size % 4 + if padding: + dtypes['names'].append('_padding_%d' % var) + dtypes['formats'].append('(%d,)>b' % padding) + + # Data will be set later. + data = None + else: # not a record variable + # Calculate size to avoid problems with vsize (above) + a_size = reduce(mul, shape, 1) * size + if self.use_mmap: + data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_) + data.shape = shape + else: + pos = self.fp.tell() + self.fp.seek(begin_) + data = frombuffer(self.fp.read(a_size), dtype=dtype_ + ).copy() + data.shape = shape + self.fp.seek(pos) + + # Add variable. + self.variables[name] = netcdf_variable( + data, typecode, size, shape, dimensions, attributes, + maskandscale=self.maskandscale) + + if rec_vars: + # Remove padding when only one record variable. + if len(rec_vars) == 1: + dtypes['names'] = dtypes['names'][:1] + dtypes['formats'] = dtypes['formats'][:1] + + # Build rec array. + if self.use_mmap: + buf = self._mm_buf[begin:begin+self._recs*self._recsize] + rec_array = buf.view(dtype=dtypes) + rec_array.shape = (self._recs,) + else: + pos = self.fp.tell() + self.fp.seek(begin) + rec_array = frombuffer(self.fp.read(self._recs*self._recsize), + dtype=dtypes).copy() + rec_array.shape = (self._recs,) + self.fp.seek(pos) + + for var in rec_vars: + self.variables[var].__dict__['data'] = rec_array[var] + + def _read_var(self): + name = self._unpack_string().decode('latin1') + dimensions = [] + shape = [] + dims = self._unpack_int() + + for i in range(dims): + dimid = self._unpack_int() + dimname = self._dims[dimid] + dimensions.append(dimname) + dim = self.dimensions[dimname] + shape.append(dim) + dimensions = tuple(dimensions) + shape = tuple(shape) + + attributes = self._read_att_array() + nc_type = self.fp.read(4) + vsize = self._unpack_int() + begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() + + typecode, size = TYPEMAP[nc_type] + dtype_ = '>%s' % typecode + + return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize + + def _read_att_values(self): + nc_type = self.fp.read(4) + n = self._unpack_int() + + typecode, size = TYPEMAP[nc_type] + + count = n*size + values = self.fp.read(int(count)) + self.fp.read(-count % 4) # read padding + + if typecode != 'c': + values = frombuffer(values, dtype='>%s' % typecode).copy() + if values.shape == (1,): + values = values[0] + else: + values = values.rstrip(b'\x00') + return values + + def _pack_begin(self, begin): + if self.version_byte == 1: + self._pack_int(begin) + elif self.version_byte == 2: + self._pack_int64(begin) + + def _pack_int(self, value): + self.fp.write(array(value, '>i').tobytes()) + _pack_int32 = _pack_int + + def _unpack_int(self): + return int(frombuffer(self.fp.read(4), '>i')[0]) + _unpack_int32 = _unpack_int + + def _pack_int64(self, value): + self.fp.write(array(value, '>q').tobytes()) + + def _unpack_int64(self): + return frombuffer(self.fp.read(8), '>q')[0] + + def _pack_string(self, s): + count = len(s) + self._pack_int(count) + self.fp.write(s.encode('latin1')) + self.fp.write(b'\x00' * (-count % 4)) # pad + + def _unpack_string(self): + count = self._unpack_int() + s = self.fp.read(count).rstrip(b'\x00') + self.fp.read(-count % 4) # read padding + return s + + +class netcdf_variable: + """ + A data object for netcdf files. + + `netcdf_variable` objects are constructed by calling the method + `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable` + objects behave much like array objects defined in numpy, except that their + data resides in a file. Data is read by indexing and written by assigning + to an indexed subset; the entire array can be accessed by the index ``[:]`` + or (for scalars) by using the methods `getValue` and `assignValue`. + `netcdf_variable` objects also have attribute `shape` with the same meaning + as for arrays, but the shape cannot be modified. There is another read-only + attribute `dimensions`, whose value is the tuple of dimension names. + + All other attributes correspond to variable attributes defined in + the NetCDF file. Variable attributes are created by assigning to an + attribute of the `netcdf_variable` object. + + Parameters + ---------- + data : array_like + The data array that holds the values for the variable. + Typically, this is initialized as empty, but with the proper shape. + typecode : dtype character code + Desired data-type for the data array. + size : int + Desired element size for the data array. + shape : sequence of ints + The shape of the array. This should match the lengths of the + variable's dimensions. + dimensions : sequence of strings + The names of the dimensions used by the variable. Must be in the + same order of the dimension lengths given by `shape`. + attributes : dict, optional + Attribute values (any type) keyed by string names. These attributes + become attributes for the netcdf_variable object. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. + + + Attributes + ---------- + dimensions : list of str + List of names of dimensions used by the variable object. + isrec, shape + Properties + + See also + -------- + isrec, shape + + """ + def __init__(self, data, typecode, size, shape, dimensions, + attributes=None, + maskandscale=False): + self.data = data + self._typecode = typecode + self._size = size + self._shape = shape + self.dimensions = dimensions + self.maskandscale = maskandscale + + self._attributes = attributes or {} + for k, v in self._attributes.items(): + self.__dict__[k] = v + + def __setattr__(self, attr, value): + # Store user defined attributes in a separate dict, + # so we can save them to file later. + try: + self._attributes[attr] = value + except AttributeError: + pass + self.__dict__[attr] = value + + def isrec(self): + """Returns whether the variable has a record dimension or not. + + A record dimension is a dimension along which additional data could be + easily appended in the netcdf data structure without much rewriting of + the data file. This attribute is a read-only property of the + `netcdf_variable`. + + """ + return bool(self.data.shape) and not self._shape[0] + isrec = property(isrec) + + def shape(self): + """Returns the shape tuple of the data variable. + + This is a read-only attribute and can not be modified in the + same manner of other numpy arrays. + """ + return self.data.shape + shape = property(shape) + + def getValue(self): + """ + Retrieve a scalar value from a `netcdf_variable` of length one. + + Raises + ------ + ValueError + If the netcdf variable is an array of length greater than one, + this exception will be raised. + + """ + return self.data.item() + + def assignValue(self, value): + """ + Assign a scalar value to a `netcdf_variable` of length one. + + Parameters + ---------- + value : scalar + Scalar value (of compatible type) to assign to a length-one netcdf + variable. This value will be written to file. + + Raises + ------ + ValueError + If the input is not a scalar, or if the destination is not a length-one + netcdf variable. + + """ + if not self.data.flags.writeable: + # Work-around for a bug in NumPy. Calling itemset() on a read-only + # memory-mapped array causes a seg. fault. + # See NumPy ticket #1622, and SciPy ticket #1202. + # This check for `writeable` can be removed when the oldest version + # of NumPy still supported by scipy contains the fix for #1622. + raise RuntimeError("variable is not writeable") + + self.data[:] = value + + def typecode(self): + """ + Return the typecode of the variable. + + Returns + ------- + typecode : char + The character typecode of the variable (e.g., 'i' for int). + + """ + return self._typecode + + def itemsize(self): + """ + Return the itemsize of the variable. + + Returns + ------- + itemsize : int + The element size of the variable (e.g., 8 for float64). + + """ + return self._size + + def __getitem__(self, index): + if not self.maskandscale: + return self.data[index] + + data = self.data[index].copy() + missing_value = self._get_missing_value() + data = self._apply_missing_value(data, missing_value) + scale_factor = self._attributes.get('scale_factor') + add_offset = self._attributes.get('add_offset') + if add_offset is not None or scale_factor is not None: + data = data.astype(np.float64) + if scale_factor is not None: + data = data * scale_factor + if add_offset is not None: + data += add_offset + + return data + + def __setitem__(self, index, data): + if self.maskandscale: + missing_value = ( + self._get_missing_value() or + getattr(data, 'fill_value', 999999)) + self._attributes.setdefault('missing_value', missing_value) + self._attributes.setdefault('_FillValue', missing_value) + data = ((data - self._attributes.get('add_offset', 0.0)) / + self._attributes.get('scale_factor', 1.0)) + data = np.ma.asarray(data).filled(missing_value) + if self._typecode not in 'fd' and data.dtype.kind == 'f': + data = np.round(data) + + # Expand data for record vars? + if self.isrec: + if isinstance(index, tuple): + rec_index = index[0] + else: + rec_index = index + if isinstance(rec_index, slice): + recs = (rec_index.start or 0) + len(data) + else: + recs = rec_index + 1 + if recs > len(self.data): + shape = (recs,) + self._shape[1:] + # Resize in-place does not always work since + # the array might not be single-segment + try: + self.data.resize(shape) + except ValueError: + dtype = self.data.dtype + self.__dict__['data'] = np.resize(self.data, shape).astype(dtype) + self.data[index] = data + + def _default_encoded_fill_value(self): + """ + The default encoded fill-value for this Variable's data type. + """ + nc_type = REVERSE[self.typecode(), self.itemsize()] + return FILLMAP[nc_type] + + def _get_encoded_fill_value(self): + """ + Returns the encoded fill value for this variable as bytes. + + This is taken from either the _FillValue attribute, or the default fill + value for this variable's data type. + """ + if '_FillValue' in self._attributes: + fill_value = np.array(self._attributes['_FillValue'], + dtype=self.data.dtype).tobytes() + if len(fill_value) == self.itemsize(): + return fill_value + else: + return self._default_encoded_fill_value() + else: + return self._default_encoded_fill_value() + + def _get_missing_value(self): + """ + Returns the value denoting "no data" for this variable. + + If this variable does not have a missing/fill value, returns None. + + If both _FillValue and missing_value are given, give precedence to + _FillValue. The netCDF standard gives special meaning to _FillValue; + missing_value is just used for compatibility with old datasets. + """ + + if '_FillValue' in self._attributes: + missing_value = self._attributes['_FillValue'] + elif 'missing_value' in self._attributes: + missing_value = self._attributes['missing_value'] + else: + missing_value = None + + return missing_value + + @staticmethod + def _apply_missing_value(data, missing_value): + """ + Applies the given missing value to the data array. + + Returns a numpy.ma array, with any value equal to missing_value masked + out (unless missing_value is None, in which case the original array is + returned). + """ + + if missing_value is None: + newdata = data + else: + try: + missing_value_isnan = np.isnan(missing_value) + except (TypeError, NotImplementedError): + # some data types (e.g., characters) cannot be tested for NaN + missing_value_isnan = False + + if missing_value_isnan: + mymask = np.isnan(data) + else: + mymask = (data == missing_value) + + newdata = np.ma.masked_where(mymask, data) + + return newdata + + +NetCDFFile = netcdf_file +NetCDFVariable = netcdf_variable diff --git a/venv/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..34fc29c089d790546d6d3932752543dcc9cb95c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/io/tests/test_fortran.py b/venv/lib/python3.10/site-packages/scipy/io/tests/test_fortran.py new file mode 100644 index 0000000000000000000000000000000000000000..905140764411ea0fabf760f2288c546f636f232a --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/io/tests/test_fortran.py @@ -0,0 +1,236 @@ +''' Tests for fortran sequential files ''' + +import tempfile +import shutil +from os import path +from glob import iglob +import re + +from numpy.testing import assert_equal, assert_allclose +import numpy as np +import pytest + +from scipy.io import (FortranFile, + _test_fortran, + FortranEOFError, + FortranFormattingError) + + +DATA_PATH = path.join(path.dirname(__file__), 'data') + + +def test_fortranfiles_read(): + for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")): + m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I) + if not m: + raise RuntimeError("Couldn't match %s filename to regex" % filename) + + dims = (int(m.group(2)), int(m.group(3)), int(m.group(4))) + + dtype = m.group(1).replace('s', '<') + + f = FortranFile(filename, 'r', '= (16+2): + ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0] + bytes_read += 2 + if ext_chunk_size >= 22: + extensible_chunk_data = fid.read(22) + bytes_read += 22 + raw_guid = extensible_chunk_data[2+4:2+4+16] + # GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361) + # MS GUID byte order: first three groups are native byte order, + # rest is Big Endian + if is_big_endian: + tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71' + else: + tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71' + if raw_guid.endswith(tail): + format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0] + else: + raise ValueError("Binary structure of wave file is not compliant") + + if format_tag not in KNOWN_WAVE_FORMATS: + _raise_bad_format(format_tag) + + # move file pointer to next chunk + if size > bytes_read: + fid.read(size - bytes_read) + + # fmt should always be 16, 18 or 40, but handle it just in case + _handle_pad_byte(fid, size) + + if format_tag == WAVE_FORMAT.PCM: + if bytes_per_second != fs * block_align: + raise ValueError("WAV header is invalid: nAvgBytesPerSec must" + " equal product of nSamplesPerSec and" + " nBlockAlign, but file has nSamplesPerSec =" + f" {fs}, nBlockAlign = {block_align}, and" + f" nAvgBytesPerSec = {bytes_per_second}") + + return (size, format_tag, channels, fs, bytes_per_second, block_align, + bit_depth) + + +def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, + block_align, mmap=False): + """ + Notes + ----- + Assumes file pointer is immediately after the 'data' id + + It's possible to not use all available bits in a container, or to store + samples in a container bigger than necessary, so bytes_per_sample uses + the actual reported container size (nBlockAlign / nChannels). Real-world + examples: + + Adobe Audition's "24-bit packed int (type 1, 20-bit)" + + nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20 + + http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav + is: + + nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12 + + http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf + gives an example of: + + nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20 + """ + if is_big_endian: + fmt = '>' + else: + fmt = '<' + + # Size of the data subchunk in bytes + size = struct.unpack(fmt+'I', fid.read(4))[0] + + # Number of bytes per sample (sample container size) + bytes_per_sample = block_align // channels + n_samples = size // bytes_per_sample + + if format_tag == WAVE_FORMAT.PCM: + if 1 <= bit_depth <= 8: + dtype = 'u1' # WAV of 8-bit integer or less are unsigned + elif bytes_per_sample in {3, 5, 6, 7}: + # No compatible dtype. Load as raw bytes for reshaping later. + dtype = 'V1' + elif bit_depth <= 64: + # Remaining bit depths can map directly to signed numpy dtypes + dtype = f'{fmt}i{bytes_per_sample}' + else: + raise ValueError("Unsupported bit depth: the WAV file " + f"has {bit_depth}-bit integer data.") + elif format_tag == WAVE_FORMAT.IEEE_FLOAT: + if bit_depth in {32, 64}: + dtype = f'{fmt}f{bytes_per_sample}' + else: + raise ValueError("Unsupported bit depth: the WAV file " + f"has {bit_depth}-bit floating-point data.") + else: + _raise_bad_format(format_tag) + + start = fid.tell() + if not mmap: + try: + count = size if dtype == 'V1' else n_samples + data = numpy.fromfile(fid, dtype=dtype, count=count) + except io.UnsupportedOperation: # not a C-like file + fid.seek(start, 0) # just in case it seeked, though it shouldn't + data = numpy.frombuffer(fid.read(size), dtype=dtype) + + if dtype == 'V1': + # Rearrange raw bytes into smallest compatible numpy dtype + dt = f'{fmt}i4' if bytes_per_sample == 3 else f'{fmt}i8' + a = numpy.zeros((len(data) // bytes_per_sample, numpy.dtype(dt).itemsize), + dtype='V1') + if is_big_endian: + a[:, :bytes_per_sample] = data.reshape((-1, bytes_per_sample)) + else: + a[:, -bytes_per_sample:] = data.reshape((-1, bytes_per_sample)) + data = a.view(dt).reshape(a.shape[:-1]) + else: + if bytes_per_sample in {1, 2, 4, 8}: + start = fid.tell() + data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start, + shape=(n_samples,)) + fid.seek(start + size) + else: + raise ValueError("mmap=True not compatible with " + f"{bytes_per_sample}-byte container size.") + + _handle_pad_byte(fid, size) + + if channels > 1: + data = data.reshape(-1, channels) + return data + + +def _skip_unknown_chunk(fid, is_big_endian): + if is_big_endian: + fmt = '>I' + else: + fmt = '>> from os.path import dirname, join as pjoin + >>> from scipy.io import wavfile + >>> import scipy.io + + Get the filename for an example .wav file from the tests/data directory. + + >>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data') + >>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav') + + Load the .wav file contents. + + >>> samplerate, data = wavfile.read(wav_fname) + >>> print(f"number of channels = {data.shape[1]}") + number of channels = 2 + >>> length = data.shape[0] / samplerate + >>> print(f"length = {length}s") + length = 0.01s + + Plot the waveform. + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> time = np.linspace(0., length, data.shape[0]) + >>> plt.plot(time, data[:, 0], label="Left channel") + >>> plt.plot(time, data[:, 1], label="Right channel") + >>> plt.legend() + >>> plt.xlabel("Time [s]") + >>> plt.ylabel("Amplitude") + >>> plt.show() + + """ + if hasattr(filename, 'read'): + fid = filename + mmap = False + else: + fid = open(filename, 'rb') + + try: + file_size, is_big_endian = _read_riff_chunk(fid) + fmt_chunk_received = False + data_chunk_received = False + while fid.tell() < file_size: + # read the next chunk + chunk_id = fid.read(4) + + if not chunk_id: + if data_chunk_received: + # End of file but data successfully read + warnings.warn( + f"Reached EOF prematurely; finished at {fid.tell():d} bytes, " + f"expected {file_size:d} bytes from header.", + WavFileWarning, stacklevel=2) + break + else: + raise ValueError("Unexpected end of file.") + elif len(chunk_id) < 4: + msg = f"Incomplete chunk ID: {repr(chunk_id)}" + # If we have the data, ignore the broken chunk + if fmt_chunk_received and data_chunk_received: + warnings.warn(msg + ", ignoring it.", WavFileWarning, + stacklevel=2) + else: + raise ValueError(msg) + + if chunk_id == b'fmt ': + fmt_chunk_received = True + fmt_chunk = _read_fmt_chunk(fid, is_big_endian) + format_tag, channels, fs = fmt_chunk[1:4] + bit_depth = fmt_chunk[6] + block_align = fmt_chunk[5] + elif chunk_id == b'fact': + _skip_unknown_chunk(fid, is_big_endian) + elif chunk_id == b'data': + data_chunk_received = True + if not fmt_chunk_received: + raise ValueError("No fmt chunk before data") + data = _read_data_chunk(fid, format_tag, channels, bit_depth, + is_big_endian, block_align, mmap) + elif chunk_id == b'LIST': + # Someday this could be handled properly but for now skip it + _skip_unknown_chunk(fid, is_big_endian) + elif chunk_id in {b'JUNK', b'Fake'}: + # Skip alignment chunks without warning + _skip_unknown_chunk(fid, is_big_endian) + else: + warnings.warn("Chunk (non-data) not understood, skipping it.", + WavFileWarning, stacklevel=2) + _skip_unknown_chunk(fid, is_big_endian) + finally: + if not hasattr(filename, 'read'): + fid.close() + else: + fid.seek(0) + + return fs, data + + +def write(filename, rate, data): + """ + Write a NumPy array as a WAV file. + + Parameters + ---------- + filename : string or open file handle + Output wav file. + rate : int + The sample rate (in samples/sec). + data : ndarray + A 1-D or 2-D NumPy array of either integer or float data-type. + + Notes + ----- + * Writes a simple uncompressed WAV file. + * To write multiple-channels, use a 2-D array of shape + (Nsamples, Nchannels). + * The bits-per-sample and PCM/float will be determined by the data-type. + + Common data types: [1]_ + + ===================== =========== =========== ============= + WAV format Min Max NumPy dtype + ===================== =========== =========== ============= + 32-bit floating-point -1.0 +1.0 float32 + 32-bit PCM -2147483648 +2147483647 int32 + 16-bit PCM -32768 +32767 int16 + 8-bit PCM 0 255 uint8 + ===================== =========== =========== ============= + + Note that 8-bit PCM is unsigned. + + References + ---------- + .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming + Interface and Data Specifications 1.0", section "Data Format of the + Samples", August 1991 + http://www.tactilemedia.com/info/MCI_Control_Info.html + + Examples + -------- + Create a 100Hz sine wave, sampled at 44100Hz. + Write to 16-bit PCM, Mono. + + >>> from scipy.io.wavfile import write + >>> import numpy as np + >>> samplerate = 44100; fs = 100 + >>> t = np.linspace(0., 1., samplerate) + >>> amplitude = np.iinfo(np.int16).max + >>> data = amplitude * np.sin(2. * np.pi * fs * t) + >>> write("example.wav", samplerate, data.astype(np.int16)) + + """ + if hasattr(filename, 'write'): + fid = filename + else: + fid = open(filename, 'wb') + + fs = rate + + try: + dkind = data.dtype.kind + allowed_dtypes = ['float32', 'float64', + 'uint8', 'int16', 'int32', 'int64'] + if data.dtype.name not in allowed_dtypes: + raise ValueError("Unsupported data type '%s'" % data.dtype) + + header_data = b'' + + header_data += b'RIFF' + header_data += b'\x00\x00\x00\x00' + header_data += b'WAVE' + + # fmt chunk + header_data += b'fmt ' + if dkind == 'f': + format_tag = WAVE_FORMAT.IEEE_FLOAT + else: + format_tag = WAVE_FORMAT.PCM + if data.ndim == 1: + channels = 1 + else: + channels = data.shape[1] + bit_depth = data.dtype.itemsize * 8 + bytes_per_second = fs*(bit_depth // 8)*channels + block_align = channels * (bit_depth // 8) + + fmt_chunk_data = struct.pack(' 0xFFFFFFFF: + raise ValueError("Data exceeds wave file size limit") + + fid.write(header_data) + + # data chunk + fid.write(b'data') + fid.write(struct.pack('' or (data.dtype.byteorder == '=' and + sys.byteorder == 'big'): + data = data.byteswap() + _array_tofile(fid, data) + + # Determine file size and place it in correct + # position at start of the file. + size = fid.tell() + fid.seek(4) + fid.write(struct.pack('`_, reading +which is recommended. + +Basic usage +----------- + +1. Define the function you want to fit against.:: + + def f(B, x): + '''Linear function y = m*x + b''' + # B is a vector of the parameters. + # x is an array of the current x values. + # x is in the same format as the x passed to Data or RealData. + # + # Return an array in the same format as y passed to Data or RealData. + return B[0]*x + B[1] + +2. Create a Model.:: + + linear = Model(f) + +3. Create a Data or RealData instance.:: + + mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2)) + + or, when the actual covariances are known:: + + mydata = RealData(x, y, sx=sx, sy=sy) + +4. Instantiate ODR with your data, model and initial parameter estimate.:: + + myodr = ODR(mydata, linear, beta0=[1., 2.]) + +5. Run the fit.:: + + myoutput = myodr.run() + +6. Examine output.:: + + myoutput.pprint() + + +References +---------- +.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression," + in "Statistical analysis of measurement error models and + applications: proceedings of the AMS-IMS-SIAM joint summer research + conference held June 10-16, 1989," Contemporary Mathematics, + vol. 112, pg. 186, 1990. + +""" +# version: 0.7 +# author: Robert Kern +# date: 2006-09-21 + +from ._odrpack import * +from ._models import * +from . import _add_newdocs + +# Deprecated namespaces, to be removed in v2.0.0 +from . import models, odrpack + +__all__ = [s for s in dir() + if not (s.startswith('_') or s in ('odr_stop', 'odr_error'))] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..93914c367f5872dbd6780187906f5b5c22f29682 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c43e7b34d310972317cd09aa49ffa8a5b55553b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..456b6e128f332ba7f4c75ff0225c3e473bd9e100 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcfbb2e1e2f2762223313a6e7b526363434d877f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..274ce7ec4e3c144414950ce0697d63d354c18236 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edfbc4bea5ae28ae448f45135830f0d7c8c5ff4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..858566cb86ebd5b95b629138a3db92024aa78b95 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py b/venv/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..e09fb6cc8c5f1523dfbeaef466a5b76bd22c01bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py @@ -0,0 +1,34 @@ +from numpy.lib import add_newdoc + +add_newdoc('scipy.odr', 'odr', + """ + odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None, + ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0, + taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None, + scld=None, work=None, iwork=None, full_output=0) + + Low-level function for ODR. + + See Also + -------- + ODR : The ODR class gathers all information and coordinates the running of the + main fitting routine. + Model : The Model class stores information about the function you wish to fit. + Data : The data to fit. + RealData : Data with weights as actual std. dev.s and/or covariances. + + Notes + ----- + This is a function performing the same operation as the `ODR`, + `Model`, and `Data` classes together. The parameters of this + function are explained in the class documentation. + + """) + +add_newdoc('scipy.odr.__odrpack', '_set_exceptions', + """ + _set_exceptions(odr_error, odr_stop) + + Internal function: set exception classes. + + """) diff --git a/venv/lib/python3.10/site-packages/scipy/odr/_models.py b/venv/lib/python3.10/site-packages/scipy/odr/_models.py new file mode 100644 index 0000000000000000000000000000000000000000..e0a8d2275dcc4698a9ea61be5871d62069be2599 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/odr/_models.py @@ -0,0 +1,315 @@ +""" Collection of Model instances for use with the odrpack fitting package. +""" +import numpy as np +from scipy.odr._odrpack import Model + +__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic', + 'polynomial'] + + +def _lin_fcn(B, x): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + (x*b).sum(axis=0) + + +def _lin_fjb(B, x): + a = np.ones(x.shape[-1], float) + res = np.concatenate((a, x.ravel())) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _lin_fjd(B, x): + b = B[1:] + b = np.repeat(b, (x.shape[-1],)*b.shape[-1], axis=0) + b.shape = x.shape + return b + + +def _lin_est(data): + # Eh. The answer is analytical, so just return all ones. + # Don't return zeros since that will interfere with + # ODRPACK's auto-scaling procedures. + + if len(data.x.shape) == 2: + m = data.x.shape[0] + else: + m = 1 + + return np.ones((m + 1,), float) + + +def _poly_fcn(B, x, powers): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + np.sum(b * np.power(x, powers), axis=0) + + +def _poly_fjacb(B, x, powers): + res = np.concatenate((np.ones(x.shape[-1], float), + np.power(x, powers).flat)) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _poly_fjacd(B, x, powers): + b = B[1:] + b.shape = (b.shape[0], 1) + + b = b * powers + + return np.sum(b * np.power(x, powers-1), axis=0) + + +def _exp_fcn(B, x): + return B[0] + np.exp(B[1] * x) + + +def _exp_fjd(B, x): + return B[1] * np.exp(B[1] * x) + + +def _exp_fjb(B, x): + res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x))) + res.shape = (2, x.shape[-1]) + return res + + +def _exp_est(data): + # Eh. + return np.array([1., 1.]) + + +class _MultilinearModel(Model): + r""" + Arbitrary-dimensional linear model + + This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i` + + Examples + -------- + We can calculate orthogonal distance regression with an arbitrary + dimensional linear model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 10.0 + 5.0 * x + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.multilinear) + >>> output = odr_obj.run() + >>> print(output.beta) + [10. 5.] + + """ + + def __init__(self): + super().__init__( + _lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est, + meta={'name': 'Arbitrary-dimensional Linear', + 'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]', + 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'}) + + +multilinear = _MultilinearModel() + + +def polynomial(order): + """ + Factory function for a general polynomial model. + + Parameters + ---------- + order : int or sequence + If an integer, it becomes the order of the polynomial to fit. If + a sequence of numbers, then these are the explicit powers in the + polynomial. + A constant term (power 0) is always included, so don't include 0. + Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)). + + Returns + ------- + polynomial : Model instance + Model instance. + + Examples + -------- + We can fit an input data using orthogonal distance regression (ODR) with + a polynomial model: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import odr + >>> x = np.linspace(0.0, 5.0) + >>> y = np.sin(x) + >>> poly_model = odr.polynomial(3) # using third order polynomial model + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, poly_model) + >>> output = odr_obj.run() # running ODR fitting + >>> poly = np.poly1d(output.beta[::-1]) + >>> poly_y = poly(x) + >>> plt.plot(x, y, label="input data") + >>> plt.plot(x, poly_y, label="polynomial ODR") + >>> plt.legend() + >>> plt.show() + + """ + + powers = np.asarray(order) + if powers.shape == (): + # Scalar. + powers = np.arange(1, powers + 1) + + powers.shape = (len(powers), 1) + len_beta = len(powers) + 1 + + def _poly_est(data, len_beta=len_beta): + # Eh. Ignore data and return all ones. + return np.ones((len_beta,), float) + + return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb, + estimate=_poly_est, extra_args=(powers,), + meta={'name': 'Sorta-general Polynomial', + 'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1), + 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' % + (len_beta-1)}) + + +class _ExponentialModel(Model): + r""" + Exponential model + + This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}` + + Examples + -------- + We can calculate orthogonal distance regression with an exponential model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = -10.0 + np.exp(0.5*x) + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.exponential) + >>> output = odr_obj.run() + >>> print(output.beta) + [-10. 0.5] + + """ + + def __init__(self): + super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb, + estimate=_exp_est, + meta={'name': 'Exponential', + 'equ': 'y= B_0 + exp(B_1 * x)', + 'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'}) + + +exponential = _ExponentialModel() + + +def _unilin(B, x): + return x*B[0] + B[1] + + +def _unilin_fjd(B, x): + return np.ones(x.shape, float) * B[0] + + +def _unilin_fjb(B, x): + _ret = np.concatenate((x, np.ones(x.shape, float))) + _ret.shape = (2,) + x.shape + + return _ret + + +def _unilin_est(data): + return (1., 1.) + + +def _quadratic(B, x): + return x*(x*B[0] + B[1]) + B[2] + + +def _quad_fjd(B, x): + return 2*x*B[0] + B[1] + + +def _quad_fjb(B, x): + _ret = np.concatenate((x*x, x, np.ones(x.shape, float))) + _ret.shape = (3,) + x.shape + + return _ret + + +def _quad_est(data): + return (1.,1.,1.) + + +class _UnilinearModel(Model): + r""" + Univariate linear model + + This model is defined by :math:`y = \beta_0 x + \beta_1` + + Examples + -------- + We can calculate orthogonal distance regression with an unilinear model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 1.0 * x + 2.0 + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.unilinear) + >>> output = odr_obj.run() + >>> print(output.beta) + [1. 2.] + + """ + + def __init__(self): + super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, + estimate=_unilin_est, + meta={'name': 'Univariate Linear', + 'equ': 'y = B_0 * x + B_1', + 'TeXequ': '$y = \\beta_0 x + \\beta_1$'}) + + +unilinear = _UnilinearModel() + + +class _QuadraticModel(Model): + r""" + Quadratic model + + This model is defined by :math:`y = \beta_0 x^2 + \beta_1 x + \beta_2` + + Examples + -------- + We can calculate orthogonal distance regression with a quadratic model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 1.0 * x ** 2 + 2.0 * x + 3.0 + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.quadratic) + >>> output = odr_obj.run() + >>> print(output.beta) + [1. 2. 3.] + + """ + + def __init__(self): + super().__init__( + _quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est, + meta={'name': 'Quadratic', + 'equ': 'y = B_0*x**2 + B_1*x + B_2', + 'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'}) + + +quadratic = _QuadraticModel() diff --git a/venv/lib/python3.10/site-packages/scipy/odr/_odrpack.py b/venv/lib/python3.10/site-packages/scipy/odr/_odrpack.py new file mode 100644 index 0000000000000000000000000000000000000000..47d21b6b452ea84e0bec3f2ffb803f5912eee5cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/odr/_odrpack.py @@ -0,0 +1,1150 @@ +""" +Python wrappers for Orthogonal Distance Regression (ODRPACK). + +Notes +===== + +* Array formats -- FORTRAN stores its arrays in memory column first, i.e., an + array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently, + NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For + efficiency and convenience, the input and output arrays of the fitting + function (and its Jacobians) are passed to FORTRAN without transposition. + Therefore, where the ODRPACK documentation says that the X array is of shape + (N, M), it will be passed to the Python function as an array of shape (M, N). + If M==1, the 1-D case, then nothing matters; if M>1, then your + Python functions will be dealing with arrays that are indexed in reverse of + the ODRPACK documentation. No real issue, but watch out for your indexing of + the Jacobians: the i,jth elements (@f_i/@x_j) evaluated at the nth + observation will be returned as jacd[j, i, n]. Except for the Jacobians, it + really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course, + you can always use the transpose() function from SciPy explicitly. + +* Examples -- See the accompanying file test/test.py for examples of how to set + up fits of your own. Some are taken from the User's Guide; some are from + other sources. + +* Models -- Some common models are instantiated in the accompanying module + models.py . Contributions are welcome. + +Credits +======= + +* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs. + +Robert Kern +robert.kern@gmail.com + +""" +import os + +import numpy +from warnings import warn +from scipy.odr import __odrpack + +__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop', + 'Data', 'RealData', 'Model', 'Output', 'ODR', + 'odr_error', 'odr_stop'] + +odr = __odrpack.odr + + +class OdrWarning(UserWarning): + """ + Warning indicating that the data passed into + ODR will cause problems when passed into 'odr' + that the user should be aware of. + """ + pass + + +class OdrError(Exception): + """ + Exception indicating an error in fitting. + + This is raised by `~scipy.odr.odr` if an error occurs during fitting. + """ + pass + + +class OdrStop(Exception): + """ + Exception stopping fitting. + + You can raise this exception in your objective function to tell + `~scipy.odr.odr` to stop fitting. + """ + pass + + +# Backwards compatibility +odr_error = OdrError +odr_stop = OdrStop + +__odrpack._set_exceptions(OdrError, OdrStop) + + +def _conv(obj, dtype=None): + """ Convert an object to the preferred form for input to the odr routine. + """ + + if obj is None: + return obj + else: + if dtype is None: + obj = numpy.asarray(obj) + else: + obj = numpy.asarray(obj, dtype) + if obj.shape == (): + # Scalar. + return obj.dtype.type(obj) + else: + return obj + + +def _report_error(info): + """ Interprets the return code of the odr routine. + + Parameters + ---------- + info : int + The return code of the odr routine. + + Returns + ------- + problems : list(str) + A list of messages about why the odr() routine stopped. + """ + + stopreason = ('Blank', + 'Sum of squares convergence', + 'Parameter convergence', + 'Both sum of squares and parameter convergence', + 'Iteration limit reached')[info % 5] + + if info >= 5: + # questionable results or fatal error + + I = (info//10000 % 10, + info//1000 % 10, + info//100 % 10, + info//10 % 10, + info % 10) + problems = [] + + if I[0] == 0: + if I[1] != 0: + problems.append('Derivatives possibly not correct') + if I[2] != 0: + problems.append('Error occurred in callback') + if I[3] != 0: + problems.append('Problem is not full rank at solution') + problems.append(stopreason) + elif I[0] == 1: + if I[1] != 0: + problems.append('N < 1') + if I[2] != 0: + problems.append('M < 1') + if I[3] != 0: + problems.append('NP < 1 or NP > N') + if I[4] != 0: + problems.append('NQ < 1') + elif I[0] == 2: + if I[1] != 0: + problems.append('LDY and/or LDX incorrect') + if I[2] != 0: + problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect') + if I[3] != 0: + problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect') + if I[4] != 0: + problems.append('LWORK and/or LIWORK too small') + elif I[0] == 3: + if I[1] != 0: + problems.append('STPB and/or STPD incorrect') + if I[2] != 0: + problems.append('SCLB and/or SCLD incorrect') + if I[3] != 0: + problems.append('WE incorrect') + if I[4] != 0: + problems.append('WD incorrect') + elif I[0] == 4: + problems.append('Error in derivatives') + elif I[0] == 5: + problems.append('Error occurred in callback') + elif I[0] == 6: + problems.append('Numerical error detected') + + return problems + + else: + return [stopreason] + + +class Data: + """ + The data to fit. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + we : array_like, optional + If `we` is a scalar, then that value is used for all data points (and + all dimensions of the response variable). + If `we` is a rank-1 array of length q (the dimensionality of the + response variable), then this vector is the diagonal of the covariant + weighting matrix for all data points. + If `we` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the i'th response variable + observation (single-dimensional only). + If `we` is a rank-2 array of shape (q, q), then this is the full + covariant weighting matrix broadcast to each observation. + If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the + diagonal of the covariant weighting matrix for the i'th observation. + If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + If the fit is implicit, then only a positive scalar value is used. + wd : array_like, optional + If `wd` is a scalar, then that value is used for all data points + (and all dimensions of the input variable). If `wd` = 0, then the + covariant weighting matrix for each observation is set to the identity + matrix (so each dimension of each observation has the same weight). + If `wd` is a rank-1 array of length m (the dimensionality of the input + variable), then this vector is the diagonal of the covariant weighting + matrix for all data points. + If `wd` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the ith input variable observation + (single-dimensional only). + If `wd` is a rank-2 array of shape (m, m), then this is the full + covariant weighting matrix broadcast to each observation. + If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the + diagonal of the covariant weighting matrix for the ith observation. + If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + fix : array_like of ints, optional + The `fix` argument is the same as ifixx in the class ODR. It is an + array of integers with the same shape as data.x that determines which + input observations are treated as fixed. One can use a sequence of + length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + Each argument is attached to the member of the instance of the same name. + The structures of `x` and `y` are described in the Model class docstring. + If `y` is an integer, then the Data instance can only be used to fit with + implicit models where the dimensionality of the response is equal to the + specified value of `y`. + + The `we` argument weights the effect a deviation in the response variable + has on the fit. The `wd` argument weights the effect a deviation in the + input variable has on the fit. To handle multidimensional inputs and + responses easily, the structure of these arguments has the n'th + dimensional axis first. These arguments heavily use the structured + arguments feature of ODRPACK to conveniently and flexibly support all + options. See the ODRPACK User's Guide for a full explanation of how these + weights are used in the algorithm. Basically, a higher value of the weight + for a particular data point makes a deviation at that point more + detrimental to the fit. + + """ + + def __init__(self, x, y=None, we=None, wd=None, fix=None, meta=None): + self.x = _conv(x) + + if not isinstance(self.x, numpy.ndarray): + raise ValueError("Expected an 'ndarray' of data for 'x', " + f"but instead got data of type '{type(self.x).__name__}'") + + self.y = _conv(y) + self.we = _conv(we) + self.wd = _conv(wd) + self.fix = _conv(fix) + self.meta = {} if meta is None else meta + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + by keywords. + + Examples + -------- + :: + + data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata dictionary. + """ + if attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + + +class RealData(Data): + """ + The data, with weightings as actual standard deviations and/or + covariances. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + sx : array_like, optional + Standard deviations of `x`. + `sx` are standard deviations of `x` and are converted to weights by + dividing 1.0 by their squares. + sy : array_like, optional + Standard deviations of `y`. + `sy` are standard deviations of `y` and are converted to weights by + dividing 1.0 by their squares. + covx : array_like, optional + Covariance of `x` + `covx` is an array of covariance matrices of `x` and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + covy : array_like, optional + Covariance of `y` + `covy` is an array of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + fix : array_like, optional + The argument and member fix is the same as Data.fix and ODR.ifixx: + It is an array of integers with the same shape as `x` that + determines which input observations are treated as fixed. One can + use a sequence of length m (the dimensionality of the input + observations) to fix some dimensions for all observations. A value + of 0 fixes the observation, a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + The weights `wd` and `we` are computed from provided values as follows: + + `sx` and `sy` are converted to weights by dividing 1.0 by their squares. + For example, ``wd = 1./numpy.power(`sx`, 2)``. + + `covx` and `covy` are arrays of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's covariance + matrix. For example, ``we[i] = numpy.linalg.inv(covy[i])``. + + These arguments follow the same structured argument conventions as wd and + we only restricted by their natures: `sx` and `sy` can't be rank-3, but + `covx` and `covy` can be. + + Only set *either* `sx` or `covx` (not both). Setting both will raise an + exception. Same with `sy` and `covy`. + + """ + + def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None, + fix=None, meta=None): + if (sx is not None) and (covx is not None): + raise ValueError("cannot set both sx and covx") + if (sy is not None) and (covy is not None): + raise ValueError("cannot set both sy and covy") + + # Set flags for __getattr__ + self._ga_flags = {} + if sx is not None: + self._ga_flags['wd'] = 'sx' + else: + self._ga_flags['wd'] = 'covx' + if sy is not None: + self._ga_flags['we'] = 'sy' + else: + self._ga_flags['we'] = 'covy' + + self.x = _conv(x) + + if not isinstance(self.x, numpy.ndarray): + raise ValueError("Expected an 'ndarray' of data for 'x', " + f"but instead got data of type '{type(self.x).__name__}'") + + self.y = _conv(y) + self.sx = _conv(sx) + self.sy = _conv(sy) + self.covx = _conv(covx) + self.covy = _conv(covy) + self.fix = _conv(fix) + self.meta = {} if meta is None else meta + + def _sd2wt(self, sd): + """ Convert standard deviation to weights. + """ + + return 1./numpy.power(sd, 2) + + def _cov2wt(self, cov): + """ Convert covariance matrix(-ices) to weights. + """ + + from scipy.linalg import inv + + if len(cov.shape) == 2: + return inv(cov) + else: + weights = numpy.zeros(cov.shape, float) + + for i in range(cov.shape[-1]): # n + weights[:,:,i] = inv(cov[:,:,i]) + + return weights + + def __getattr__(self, attr): + lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx), + ('wd', 'covx'): (self._cov2wt, self.covx), + ('we', 'sy'): (self._sd2wt, self.sy), + ('we', 'covy'): (self._cov2wt, self.covy)} + + if attr not in ('wd', 'we'): + if attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + else: + func, arg = lookup_tbl[(attr, self._ga_flags[attr])] + + if arg is not None: + return func(*(arg,)) + else: + return None + + +class Model: + """ + The Model class stores information about the function you wish to fit. + + It stores the function itself, at the least, and optionally stores + functions which compute the Jacobians used during fitting. Also, one + can provide a function that will provide reasonable starting values + for the fit parameters possibly given the set of data. + + Parameters + ---------- + fcn : function + fcn(beta, x) --> y + fjacb : function + Jacobian of fcn wrt the fit parameters beta. + + fjacb(beta, x) --> @f_i(x,B)/@B_j + fjacd : function + Jacobian of fcn wrt the (possibly multidimensional) input + variable. + + fjacd(beta, x) --> @f_i(x,B)/@x_j + extra_args : tuple, optional + If specified, `extra_args` should be a tuple of extra + arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called + by `apply(fcn, (beta, x) + extra_args)` + estimate : array_like of rank-1 + Provides estimates of the fit parameters from the data + + estimate(data) --> estbeta + implicit : boolean + If TRUE, specifies that the model + is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit + against + meta : dict, optional + freeform dictionary of metadata for the model + + Notes + ----- + Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and + return a NumPy array. The `estimate` object takes an instance of the + Data class. + + Here are the rules for the shapes of the argument and return + arrays of the callback functions: + + `x` + if the input data is single-dimensional, then `x` is rank-1 + array; i.e., ``x = array([1, 2, 3, ...]); x.shape = (n,)`` + If the input data is multi-dimensional, then `x` is a rank-2 array; + i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``. + In all cases, it has the same shape as the input data array passed to + `~scipy.odr.odr`. `m` is the dimensionality of the input data, + `n` is the number of observations. + `y` + if the response variable is single-dimensional, then `y` is a + rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``. + If the response variable is multi-dimensional, then `y` is a rank-2 + array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape = + (q, n)`` where `q` is the dimensionality of the response variable. + `beta` + rank-1 array of length `p` where `p` is the number of parameters; + i.e. ``beta = array([B_1, B_2, ..., B_p])`` + `fjacb` + if the response variable is multi-dimensional, then the + return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] = + d f_l(X,B)/d B_k`` evaluated at the ith data point. If `q == 1`, then + the return array is only rank-2 and with shape `(p, n)`. + `fjacd` + as with fjacb, only the return array's shape is `(q, m, n)` + such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the ith data + point. If `q == 1`, then the return array's shape is `(m, n)`. If + `m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`. + + """ + + def __init__(self, fcn, fjacb=None, fjacd=None, + extra_args=None, estimate=None, implicit=0, meta=None): + + self.fcn = fcn + self.fjacb = fjacb + self.fjacd = fjacd + + if extra_args is not None: + extra_args = tuple(extra_args) + + self.extra_args = extra_args + self.estimate = estimate + self.implicit = implicit + self.meta = meta if meta is not None else {} + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + here. + + Examples + -------- + set_meta(name="Exponential", equation="y = a exp(b x) + c") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata. + """ + + if attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + + +class Output: + """ + The Output class stores the output of an ODR run. + + Attributes + ---------- + beta : ndarray + Estimated parameter values, of shape (q,). + sd_beta : ndarray + Standard deviations of the estimated parameters, of shape (p,). + cov_beta : ndarray + Covariance matrix of the estimated parameters, of shape (p,p). + Note that this `cov_beta` is not scaled by the residual variance + `res_var`, whereas `sd_beta` is. This means + ``np.sqrt(np.diag(output.cov_beta * output.res_var))`` is the same + result as `output.sd_beta`. + delta : ndarray, optional + Array of estimated errors in input variables, of same shape as `x`. + eps : ndarray, optional + Array of estimated errors in response variables, of same shape as `y`. + xplus : ndarray, optional + Array of ``x + delta``. + y : ndarray, optional + Array ``y = fcn(x + delta)``. + res_var : float, optional + Residual variance. + sum_square : float, optional + Sum of squares error. + sum_square_delta : float, optional + Sum of squares of delta error. + sum_square_eps : float, optional + Sum of squares of eps error. + inv_condnum : float, optional + Inverse condition number (cf. ODRPACK UG p. 77). + rel_error : float, optional + Relative error in function values computed within fcn. + work : ndarray, optional + Final work array. + work_ind : dict, optional + Indices into work for drawing out values (cf. ODRPACK UG p. 83). + info : int, optional + Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38). + stopreason : list of str, optional + `info` interpreted into English. + + Notes + ----- + Takes one argument for initialization, the return value from the + function `~scipy.odr.odr`. The attributes listed as "optional" above are + only present if `~scipy.odr.odr` was run with ``full_output=1``. + + """ + + def __init__(self, output): + self.beta = output[0] + self.sd_beta = output[1] + self.cov_beta = output[2] + + if len(output) == 4: + # full output + self.__dict__.update(output[3]) + self.stopreason = _report_error(self.info) + + def pprint(self): + """ Pretty-print important results. + """ + + print('Beta:', self.beta) + print('Beta Std Error:', self.sd_beta) + print('Beta Covariance:', self.cov_beta) + if hasattr(self, 'info'): + print('Residual Variance:',self.res_var) + print('Inverse Condition #:', self.inv_condnum) + print('Reason(s) for Halting:') + for r in self.stopreason: + print(' %s' % r) + + +class ODR: + """ + The ODR class gathers all information and coordinates the running of the + main fitting routine. + + Members of instances of the ODR class have the same names as the arguments + to the initialization routine. + + Parameters + ---------- + data : Data class instance + instance of the Data class + model : Model class instance + instance of the Model class + + Other Parameters + ---------------- + beta0 : array_like of rank-1 + a rank-1 sequence of initial parameter values. Optional if + model provides an "estimate" function to estimate these values. + delta0 : array_like of floats of rank-1, optional + a (double-precision) float array to hold the initial values of + the errors in the input variables. Must be same shape as data.x + ifixb : array_like of ints of rank-1, optional + sequence of integers with the same length as beta0 that determines + which parameters are held fixed. A value of 0 fixes the parameter, + a value > 0 makes the parameter free. + ifixx : array_like of ints with same shape as data.x, optional + an array of integers with the same shape as data.x that determines + which input observations are treated as fixed. One can use a sequence + of length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + job : int, optional + an integer telling ODRPACK what tasks to perform. See p. 31 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_job post-initialization for a more readable interface. + iprint : int, optional + an integer telling ODRPACK what to print. See pp. 33-34 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_iprint post-initialization for a more readable interface. + errfile : str, optional + string with the filename to print ODRPACK errors to. If the file already + exists, an error will be thrown. The `overwrite` argument can be used to + prevent this. *Do Not Open This File Yourself!* + rptfile : str, optional + string with the filename to print ODRPACK summaries to. If the file + already exists, an error will be thrown. The `overwrite` argument can be + used to prevent this. *Do Not Open This File Yourself!* + ndigit : int, optional + integer specifying the number of reliable digits in the computation + of the function. + taufac : float, optional + float specifying the initial trust region. The default value is 1. + The initial trust region is equal to taufac times the length of the + first computed Gauss-Newton step. taufac must be less than 1. + sstol : float, optional + float specifying the tolerance for convergence based on the relative + change in the sum-of-squares. The default value is eps**(1/2) where eps + is the smallest value such that 1 + eps > 1 for double precision + computation on the machine. sstol must be less than 1. + partol : float, optional + float specifying the tolerance for convergence based on the relative + change in the estimated parameters. The default value is eps**(2/3) for + explicit models and ``eps**(1/3)`` for implicit models. partol must be less + than 1. + maxit : int, optional + integer specifying the maximum number of iterations to perform. For + first runs, maxit is the total number of iterations performed and + defaults to 50. For restarts, maxit is the number of additional + iterations to perform and defaults to 10. + stpb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute + finite difference derivatives wrt the parameters. + stpd : optional + array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative + step sizes to compute finite difference derivatives wrt the input + variable errors. If stpd is a rank-1 array with length m (the + dimensionality of the input variable), then the values are broadcast to + all observations. + sclb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of scaling factors for the + parameters. The purpose of these scaling factors are to scale all of + the parameters to around unity. Normally appropriate scaling factors + are computed if this argument is not specified. Specify them yourself + if the automatic procedure goes awry. + scld : array_like, optional + array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling + factors for the *errors* in the input variables. Again, these factors + are automatically computed if you do not provide them. If scld.shape == + (m,), then the scaling factors are broadcast to all observations. + work : ndarray, optional + array to hold the double-valued working data for ODRPACK. When + restarting, takes the value of self.output.work. + iwork : ndarray, optional + array to hold the integer-valued working data for ODRPACK. When + restarting, takes the value of self.output.iwork. + overwrite : bool, optional + If it is True, output files defined by `errfile` and `rptfile` are + overwritten. The default is False. + + Attributes + ---------- + data : Data + The data for this fit + model : Model + The model used in fit + output : Output + An instance if the Output class containing all of the returned + data from an invocation of ODR.run() or ODR.restart() + + """ + + def __init__(self, data, model, beta0=None, delta0=None, ifixb=None, + ifixx=None, job=None, iprint=None, errfile=None, rptfile=None, + ndigit=None, taufac=None, sstol=None, partol=None, maxit=None, + stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None, + overwrite=False): + + self.data = data + self.model = model + + if beta0 is None: + if self.model.estimate is not None: + self.beta0 = _conv(self.model.estimate(self.data)) + else: + raise ValueError( + "must specify beta0 or provide an estimator with the model" + ) + else: + self.beta0 = _conv(beta0) + + if ifixx is None and data.fix is not None: + ifixx = data.fix + + if overwrite: + # remove output files for overwriting. + if rptfile is not None and os.path.exists(rptfile): + os.remove(rptfile) + if errfile is not None and os.path.exists(errfile): + os.remove(errfile) + + self.delta0 = _conv(delta0) + # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit + # platforms. + # XXX: some other FORTRAN compilers may not agree. + self.ifixx = _conv(ifixx, dtype=numpy.int32) + self.ifixb = _conv(ifixb, dtype=numpy.int32) + self.job = job + self.iprint = iprint + self.errfile = errfile + self.rptfile = rptfile + self.ndigit = ndigit + self.taufac = taufac + self.sstol = sstol + self.partol = partol + self.maxit = maxit + self.stpb = _conv(stpb) + self.stpd = _conv(stpd) + self.sclb = _conv(sclb) + self.scld = _conv(scld) + self.work = _conv(work) + self.iwork = _conv(iwork) + + self.output = None + + self._check() + + def _check(self): + """ Check the inputs for consistency, but don't bother checking things + that the builtin function odr will check. + """ + + x_s = list(self.data.x.shape) + + if isinstance(self.data.y, numpy.ndarray): + y_s = list(self.data.y.shape) + if self.model.implicit: + raise OdrError("an implicit model cannot use response data") + else: + # implicit model with q == self.data.y + y_s = [self.data.y, x_s[-1]] + if not self.model.implicit: + raise OdrError("an explicit model needs response data") + self.set_job(fit_type=1) + + if x_s[-1] != y_s[-1]: + raise OdrError("number of observations do not match") + + n = x_s[-1] + + if len(x_s) == 2: + m = x_s[0] + else: + m = 1 + if len(y_s) == 2: + q = y_s[0] + else: + q = 1 + + p = len(self.beta0) + + # permissible output array shapes + + fcn_perms = [(q, n)] + fjacd_perms = [(q, m, n)] + fjacb_perms = [(q, p, n)] + + if q == 1: + fcn_perms.append((n,)) + fjacd_perms.append((m, n)) + fjacb_perms.append((p, n)) + if m == 1: + fjacd_perms.append((q, n)) + if p == 1: + fjacb_perms.append((q, n)) + if m == q == 1: + fjacd_perms.append((n,)) + if p == q == 1: + fjacb_perms.append((n,)) + + # try evaluating the supplied functions to make sure they provide + # sensible outputs + + arglist = (self.beta0, self.data.x) + if self.model.extra_args is not None: + arglist = arglist + self.model.extra_args + res = self.model.fcn(*arglist) + + if res.shape not in fcn_perms: + print(res.shape) + print(fcn_perms) + raise OdrError("fcn does not output %s-shaped array" % y_s) + + if self.model.fjacd is not None: + res = self.model.fjacd(*arglist) + if res.shape not in fjacd_perms: + raise OdrError( + "fjacd does not output %s-shaped array" % repr((q, m, n))) + if self.model.fjacb is not None: + res = self.model.fjacb(*arglist) + if res.shape not in fjacb_perms: + raise OdrError( + "fjacb does not output %s-shaped array" % repr((q, p, n))) + + # check shape of delta0 + + if self.delta0 is not None and self.delta0.shape != self.data.x.shape: + raise OdrError( + "delta0 is not a %s-shaped array" % repr(self.data.x.shape)) + + if self.data.x.size == 0: + warn("Empty data detected for ODR instance. " + "Do not expect any fitting to occur", + OdrWarning, stacklevel=3) + + def _gen_work(self): + """ Generate a suitable work array if one does not already exist. + """ + + n = self.data.x.shape[-1] + p = self.beta0.shape[0] + + if len(self.data.x.shape) == 2: + m = self.data.x.shape[0] + else: + m = 1 + + if self.model.implicit: + q = self.data.y + elif len(self.data.y.shape) == 2: + q = self.data.y.shape[0] + else: + q = 1 + + if self.data.we is None: + ldwe = ld2we = 1 + elif len(self.data.we.shape) == 3: + ld2we, ldwe = self.data.we.shape[1:] + else: + we = self.data.we + ldwe = 1 + ld2we = 1 + if we.ndim == 1 and q == 1: + ldwe = n + elif we.ndim == 2: + if we.shape == (q, q): + ld2we = q + elif we.shape == (q, n): + ldwe = n + + if self.job % 10 < 2: + # ODR not OLS + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p + + 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q) + else: + # OLS not ODR + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p + + 5*q + q*(p+m) + ldwe*ld2we*q) + + if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\ + and self.work.dtype.str.endswith('f8'): + # the existing array is fine + return + else: + self.work = numpy.zeros((lwork,), float) + + def set_job(self, fit_type=None, deriv=None, var_calc=None, + del_init=None, restart=None): + """ + Sets the "job" parameter is a hopefully comprehensible way. + + If an argument is not specified, then the value is left as is. The + default value from class initialization is for all of these options set + to 0. + + Parameters + ---------- + fit_type : {0, 1, 2} int + 0 -> explicit ODR + + 1 -> implicit ODR + + 2 -> ordinary least-squares + deriv : {0, 1, 2, 3} int + 0 -> forward finite differences + + 1 -> central finite differences + + 2 -> user-supplied derivatives (Jacobians) with results + checked by ODRPACK + + 3 -> user-supplied derivatives, no checking + var_calc : {0, 1, 2} int + 0 -> calculate asymptotic covariance matrix and fit + parameter uncertainties (V_B, s_B) using derivatives + recomputed at the final solution + + 1 -> calculate V_B and s_B using derivatives from last iteration + + 2 -> do not calculate V_B and s_B + del_init : {0, 1} int + 0 -> initial input variable offsets set to 0 + + 1 -> initial offsets provided by user in variable "work" + restart : {0, 1} int + 0 -> fit is not a restart + + 1 -> fit is a restart + + Notes + ----- + The permissible values are different from those given on pg. 31 of the + ODRPACK User's Guide only in that one cannot specify numbers greater than + the last value for each variable. + + If one does not supply functions to compute the Jacobians, the fitting + procedure will change deriv to 0, finite differences, as a default. To + initialize the input variable offsets by yourself, set del_init to 1 and + put the offsets into the "work" variable correctly. + + """ + + if self.job is None: + job_l = [0, 0, 0, 0, 0] + else: + job_l = [self.job // 10000 % 10, + self.job // 1000 % 10, + self.job // 100 % 10, + self.job // 10 % 10, + self.job % 10] + + if fit_type in (0, 1, 2): + job_l[4] = fit_type + if deriv in (0, 1, 2, 3): + job_l[3] = deriv + if var_calc in (0, 1, 2): + job_l[2] = var_calc + if del_init in (0, 1): + job_l[1] = del_init + if restart in (0, 1): + job_l[0] = restart + + self.job = (job_l[0]*10000 + job_l[1]*1000 + + job_l[2]*100 + job_l[3]*10 + job_l[4]) + + def set_iprint(self, init=None, so_init=None, + iter=None, so_iter=None, iter_step=None, final=None, so_final=None): + """ Set the iprint parameter for the printing of computation reports. + + If any of the arguments are specified here, then they are set in the + iprint member. If iprint is not set manually or with this method, then + ODRPACK defaults to no printing. If no filename is specified with the + member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to + print to stdout in addition to the specified filename by setting the + so_* arguments to this function, but one cannot specify to print to + stdout but not a file since one can do that by not specifying a rptfile + filename. + + There are three reports: initialization, iteration, and final reports. + They are represented by the arguments init, iter, and final + respectively. The permissible values are 0, 1, and 2 representing "no + report", "short report", and "long report" respectively. + + The argument iter_step (0 <= iter_step <= 9) specifies how often to make + the iteration report; the report will be made for every iter_step'th + iteration starting with iteration one. If iter_step == 0, then no + iteration report is made, regardless of the other arguments. + + If the rptfile is None, then any so_* arguments supplied will raise an + exception. + """ + if self.iprint is None: + self.iprint = 0 + + ip = [self.iprint // 1000 % 10, + self.iprint // 100 % 10, + self.iprint // 10 % 10, + self.iprint % 10] + + # make a list to convert iprint digits to/from argument inputs + # rptfile, stdout + ip2arg = [[0, 0], # none, none + [1, 0], # short, none + [2, 0], # long, none + [1, 1], # short, short + [2, 1], # long, short + [1, 2], # short, long + [2, 2]] # long, long + + if (self.rptfile is None and + (so_init is not None or + so_iter is not None or + so_final is not None)): + raise OdrError( + "no rptfile specified, cannot output to stdout twice") + + iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]] + + if init is not None: + iprint_l[0] = init + if so_init is not None: + iprint_l[1] = so_init + if iter is not None: + iprint_l[2] = iter + if so_iter is not None: + iprint_l[3] = so_iter + if final is not None: + iprint_l[4] = final + if so_final is not None: + iprint_l[5] = so_final + + if iter_step in range(10): + # 0..9 + ip[2] = iter_step + + ip[0] = ip2arg.index(iprint_l[0:2]) + ip[1] = ip2arg.index(iprint_l[2:4]) + ip[3] = ip2arg.index(iprint_l[4:6]) + + self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3] + + def run(self): + """ Run the fitting routine with all of the information given and with ``full_output=1``. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ # noqa: E501 + + args = (self.model.fcn, self.beta0, self.data.y, self.data.x) + kwds = {'full_output': 1} + kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile', + 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb', + 'stpd', 'sclb', 'scld', 'work', 'iwork'] + + if self.delta0 is not None and (self.job // 10000) % 10 == 0: + # delta0 provided and fit is not a restart + self._gen_work() + + d0 = numpy.ravel(self.delta0) + + self.work[:len(d0)] = d0 + + # set the kwds from other objects explicitly + if self.model.fjacb is not None: + kwds['fjacb'] = self.model.fjacb + if self.model.fjacd is not None: + kwds['fjacd'] = self.model.fjacd + if self.data.we is not None: + kwds['we'] = self.data.we + if self.data.wd is not None: + kwds['wd'] = self.data.wd + if self.model.extra_args is not None: + kwds['extra_args'] = self.model.extra_args + + # implicitly set kwds from self's members + for attr in kwd_l: + obj = getattr(self, attr) + if obj is not None: + kwds[attr] = obj + + self.output = Output(odr(*args, **kwds)) + + return self.output + + def restart(self, iter=None): + """ Restarts the run with iter more iterations. + + Parameters + ---------- + iter : int, optional + ODRPACK's default for the number of new iterations is 10. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ + + if self.output is None: + raise OdrError("cannot restart: run() has not been called before") + + self.set_job(restart=1) + self.work = self.output.work + self.iwork = self.output.iwork + + self.maxit = iter + + return self.run() diff --git a/venv/lib/python3.10/site-packages/scipy/odr/models.py b/venv/lib/python3.10/site-packages/scipy/odr/models.py new file mode 100644 index 0000000000000000000000000000000000000000..0289b59747bb68a4954e58732ac69d7df144f5f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/odr/models.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.odr` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'Model', 'exponential', 'multilinear', 'unilinear', + 'quadratic', 'polynomial' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="odr", module="models", + private_modules=["_models"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/odr/odrpack.py b/venv/lib/python3.10/site-packages/scipy/odr/odrpack.py new file mode 100644 index 0000000000000000000000000000000000000000..192fb3342b7957703996957c882d44656706e41b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/odr/odrpack.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.odr` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'odr', 'OdrWarning', 'OdrError', 'OdrStop', + 'Data', 'RealData', 'Model', 'Output', 'ODR', + 'odr_error', 'odr_stop' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="odr", module="odrpack", + private_modules=["_odrpack"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/odr/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/odr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2ac813857fc77bccb20079e7f03f7b4b5c9c7bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65aefdb9fa4a7d53390b440ceed71995cbf549cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py b/venv/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py new file mode 100644 index 0000000000000000000000000000000000000000..3b30d46f1e8f0935fc9fb2116118292679d8941b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py @@ -0,0 +1,565 @@ +import tempfile +import shutil +import os + +import numpy as np +from numpy import pi +from numpy.testing import (assert_array_almost_equal, + assert_equal, assert_warns, + assert_allclose) +import pytest +from pytest import raises as assert_raises + +from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning, + multilinear, exponential, unilinear, quadratic, + polynomial) + + +class TestODR: + + # Bad Data for 'x' + + def test_bad_data(self): + assert_raises(ValueError, Data, 2, 1) + assert_raises(ValueError, RealData, 2, 1) + + # Empty Data for 'x' + def empty_data_func(self, B, x): + return B[0]*x + B[1] + + def test_empty_data(self): + beta0 = [0.02, 0.0] + linear = Model(self.empty_data_func) + + empty_dat = Data([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + empty_dat = RealData([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + # Explicit Example + + def explicit_fcn(self, B, x): + ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) + return ret + + def explicit_fjd(self, B, x): + eBx = np.exp(B[2]*x) + ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx + return ret + + def explicit_fjb(self, B, x): + eBx = np.exp(B[2]*x) + res = np.vstack([np.ones(x.shape[-1]), + np.power(eBx-1.0, 2), + B[1]*2.0*(eBx-1.0)*eBx*x]) + return res + + def test_explicit(self): + explicit_mod = Model( + self.explicit_fcn, + fjacb=self.explicit_fjb, + fjacd=self.explicit_fjd, + meta=dict(name='Sample Explicit Model', + ref='ODRPACK UG, pg. 39'), + ) + explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], + [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, + 1213.8,1215.5,1212.]) + explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], + ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) + explicit_odr.set_job(deriv=2) + explicit_odr.set_iprint(init=0, iter=0, final=0) + + out = explicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.2646548050648876e+03, -5.4018409956678255e+01, + -8.7849712165253724e-02]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[4.4949592379003039e-01, -3.7421976890364739e-01, + -8.0978217468468912e-04], + [-3.7421976890364739e-01, 1.0529686462751804e+00, + -1.9453521827942002e-03], + [-8.0978217468468912e-04, -1.9453521827942002e-03, + 1.6827336938454476e-05]]), + ) + + # Implicit Example + + def implicit_fcn(self, B, x): + return (B[2]*np.power(x[0]-B[0], 2) + + 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + + B[4]*np.power(x[1]-B[1], 2) - 1.0) + + def test_implicit(self): + implicit_mod = Model( + self.implicit_fcn, + implicit=1, + meta=dict(name='Sample Implicit Model', + ref='ODRPACK UG, pg. 49'), + ) + implicit_dat = Data([ + [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, + -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], + [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, + -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], + 1, + ) + implicit_odr = ODR(implicit_dat, implicit_mod, + beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) + + out = implicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, + 0.0162299708984738, 0.0797537982976416]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314, + 0.0027500347539902, 0.0034962501532468]), + ) + assert_allclose( + out.cov_beta, + np.array([[2.1089274602333052e+00, -1.9437686411979040e+00, + 7.0263550868344446e-02, -4.7175267373474862e-02, + 5.2515575927380355e-02], + [-1.9437686411979040e+00, 2.0481509222414456e+00, + -6.1600515853057307e-02, 4.6268827806232933e-02, + -5.8822307501391467e-02], + [7.0263550868344446e-02, -6.1600515853057307e-02, + 2.8659542561579308e-03, -1.4628662260014491e-03, + 1.4528860663055824e-03], + [-4.7175267373474862e-02, 4.6268827806232933e-02, + -1.4628662260014491e-03, 1.2855592885514335e-03, + -1.2692942951415293e-03], + [5.2515575927380355e-02, -5.8822307501391467e-02, + 1.4528860663055824e-03, -1.2692942951415293e-03, + 2.0778813389755596e-03]]), + rtol=1e-6, atol=2e-6, + ) + + # Multi-variable Example + + def multi_fcn(self, B, x): + if (x < 0.0).any(): + raise OdrStop + theta = pi*B[3]/2. + ctheta = np.cos(theta) + stheta = np.sin(theta) + omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) + phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) + r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + + np.power(omega*stheta, 2)), -B[4]) + ret = np.vstack([B[1] + r*np.cos(B[4]*phi), + r*np.sin(B[4]*phi)]) + return ret + + def test_multi(self): + multi_mod = Model( + self.multi_fcn, + meta=dict(name='Sample Multi-Response Model', + ref='ODRPACK UG, pg. 56'), + ) + + multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, + 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, + 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) + multi_y = np.array([ + [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, + 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, + 2.934, 2.876, 2.838, 2.798, 2.759], + [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, + 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, + 0.202, 0.182, 0.168, 0.153, 0.139], + ]) + n = len(multi_x) + multi_we = np.zeros((2, 2, n), dtype=float) + multi_ifixx = np.ones(n, dtype=int) + multi_delta = np.zeros(n, dtype=float) + + multi_we[0,0,:] = 559.6 + multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 + multi_we[1,1,:] = 8397.0 + + for i in range(n): + if multi_x[i] < 100.0: + multi_ifixx[i] = 0 + elif multi_x[i] <= 150.0: + pass # defaults are fine + elif multi_x[i] <= 1000.0: + multi_delta[i] = 25.0 + elif multi_x[i] <= 10000.0: + multi_delta[i] = 560.0 + elif multi_x[i] <= 100000.0: + multi_delta[i] = 9500.0 + else: + multi_delta[i] = 144000.0 + if multi_x[i] == 100.0 or multi_x[i] == 150.0: + multi_we[:,:,i] = 0.0 + + multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), + we=multi_we) + multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], + delta0=multi_delta, ifixx=multi_ifixx) + multi_odr.set_job(deriv=1, del_init=1) + + out = multi_odr.run() + assert_array_almost_equal( + out.beta, + np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978, + 0.5101147161764654, 0.5173902330489161]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757, + 0.0132642749596149, 0.0288529201353984]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406, + -0.0058700836512467, 0.011281212888768], + [0.0036159705923791, 0.0064793789429006, 0.0517610978353126, + -0.0051181304940204, 0.0130726943624117], + [0.0438637051470406, 0.0517610978353126, 0.5182263323095322, + -0.0563083340093696, 0.1269490939468611], + [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, + 0.0066939246261263, -0.0140184391377962], + [0.011281212888768, 0.0130726943624117, 0.1269490939468611, + -0.0140184391377962, 0.0316733013820852]]), + ) + + # Pearson's Data + # K. Pearson, Philosophical Magazine, 2, 559 (1901) + + def pearson_fcn(self, B, x): + return B[0] + B[1]*x + + def test_pearson(self): + p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) + p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) + p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) + p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) + + p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) + + # Reverse the data to test invariance of results + pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) + + p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) + + p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) + pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) + + out = p_odr.run() + assert_array_almost_equal( + out.beta, + np.array([5.4767400299231674, -0.4796082367610305]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.3590121690702467, 0.0706291186037444]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0854275622946333, -0.0161807025443155], + [-0.0161807025443155, 0.003306337993922]]), + ) + + rout = pr_odr.run() + assert_array_almost_equal( + rout.beta, + np.array([11.4192022410781231, -2.0850374506165474]), + ) + assert_array_almost_equal( + rout.sd_beta, + np.array([0.9820231665657161, 0.3070515616198911]), + ) + assert_array_almost_equal( + rout.cov_beta, + np.array([[0.6391799462548782, -0.1955657291119177], + [-0.1955657291119177, 0.0624888159223392]]), + ) + + # Lorentz Peak + # The data is taken from one of the undergraduate physics labs I performed. + + def lorentz(self, beta, x): + return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - + beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) + + def test_lorentz(self): + l_sy = np.array([.29]*18) + l_sx = np.array([.000972971,.000948268,.000707632,.000706679, + .000706074, .000703918,.000698955,.000456856, + .000455207,.000662717,.000654619,.000652694, + .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) + + l_dat = RealData( + [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, + 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, + 3.6562, 3.62498, 3.55525, 3.41886], + [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, + 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], + sx=l_sx, + sy=l_sy, + ) + l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) + l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) + + out = l_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.4306780846149925e+03, 1.3390509034538309e-01, + 3.7798193600109009e+00]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([7.3621186811330963e-01, 3.5068899941471650e-04, + 2.4451209281408992e-04]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[2.4714409064597873e-01, -6.9067261911110836e-05, + -3.1236953270424990e-05], + [-6.9067261911110836e-05, 5.6077531517333009e-08, + 3.6133261832722601e-08], + [-3.1236953270424990e-05, 3.6133261832722601e-08, + 2.7261220025171730e-08]]), + ) + + def test_ticket_1253(self): + def linear(c, x): + return c[0]*x+c[1] + + c = [2.0, 3.0] + x = np.linspace(0, 10) + y = linear(c, x) + + model = Model(linear) + data = Data(x, y, wd=1.0, we=1.0) + job = ODR(data, model, beta0=[1.0, 1.0]) + result = job.run() + assert_equal(result.info, 2) + + # Verify fix for gh-9140 + + def test_ifixx(self): + x1 = [-2.01, -0.99, -0.001, 1.02, 1.98] + x2 = [3.98, 1.01, 0.001, 0.998, 4.01] + fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int))) + data = Data(np.vstack((x1, x2)), y=1, fix=fix) + model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True) + + odr1 = ODR(data, model, beta0=np.array([1.])) + sol1 = odr1.run() + odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix) + sol2 = odr2.run() + assert_equal(sol1.beta, sol2.beta) + + # verify bugfix for #11800 in #11802 + def test_ticket_11800(self): + # parameters + beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5]) + nr_measurements = 10 + + std_dev_x = 0.01 + x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866, + -0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301], + [-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829, + 0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]]) + + std_dev_y = 0.05 + y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642, + 0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929], + [0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536, + -0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]]) + + beta_solution = np.array([ + 2.62920235756665876536e+00, -1.26608484996299608838e+02, + 1.29703572775403074502e+02, -1.88560985401185465804e+00, + 7.83834160771274923718e+01, -7.64124076838087091801e+01]) + + # model's function and Jacobians + def func(beta, x): + y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :] + y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :] + + return np.vstack((y0, y1)) + + def df_dbeta_odr(beta, x): + nr_meas = np.shape(x)[1] + zeros = np.zeros(nr_meas) + ones = np.ones(nr_meas) + + dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros]) + dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]]) + + return np.stack((dy0, dy1)) + + def df_dx_odr(beta, x): + nr_meas = np.shape(x)[1] + ones = np.ones(nr_meas) + + dy0 = np.array([beta[1] * ones, beta[2] * ones]) + dy1 = np.array([beta[4] * ones, beta[5] * ones]) + return np.stack((dy0, dy1)) + + # do measurements with errors in independent and dependent variables + x0_true = np.linspace(1, 10, nr_measurements) + x1_true = np.linspace(1, 10, nr_measurements) + x_true = np.array([x0_true, x1_true]) + + y_true = func(beta_true, x_true) + + x_meas = x_true + x_error + y_meas = y_true + y_error + + # estimate model's parameters + model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr) + + data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y) + + odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100) + #odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1) + odr_obj.set_job(deriv=3) + + odr_out = odr_obj.run() + + # check results + assert_equal(odr_out.info, 1) + assert_array_almost_equal(odr_out.beta, beta_solution) + + def test_multilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 10.0 + 5.0 * x + data = Data(x, y) + odr_obj = ODR(data, multilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [10.0, 5.0]) + + def test_exponential_model(self): + x = np.linspace(0.0, 5.0) + y = -10.0 + np.exp(0.5*x) + data = Data(x, y) + odr_obj = ODR(data, exponential) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [-10.0, 0.5]) + + def test_polynomial_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3 + poly_model = polynomial(3) + data = Data(x, y) + odr_obj = ODR(data, poly_model) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0]) + + def test_unilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = Data(x, y) + odr_obj = ODR(data, unilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0]) + + def test_quadratic_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x ** 2 + 2.0 * x + 3.0 + data = Data(x, y) + odr_obj = ODR(data, quadratic) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0]) + + def test_work_ind(self): + + def func(par, x): + b0, b1 = par + return b0 + b1 * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + # do the fitting + linear_model = Model(func) + real_data = RealData(x, y, sx=x_err, sy=y_err) + odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4]) + odr_obj.set_job(fit_type=0) + out = odr_obj.run() + + sd_ind = out.work_ind['sd'] + assert_array_almost_equal(out.sd_beta, + out.work[sd_ind:sd_ind + len(out.sd_beta)]) + + @pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better " + "not to run this test, see gh-13127") + def test_output_file_overwrite(self): + """ + Verify fix for gh-1892 + """ + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + data = Data(np.arange(10), 12 * np.arange(10)) + tmp_dir = tempfile.mkdtemp() + error_file_path = os.path.join(tmp_dir, "error.dat") + report_file_path = os.path.join(tmp_dir, "report.dat") + try: + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path).run() + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path, overwrite=True).run() + finally: + # remove output files for clean up + shutil.rmtree(tmp_dir) + + def test_odr_model_default_meta(self): + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + p.set_meta(name='Sample Model Meta', ref='ODRPACK') + assert_equal(p.meta, {'name': 'Sample Model Meta', 'ref': 'ODRPACK'}) + + def test_work_array_del_init(self): + """ + Verify fix for gh-18739 where del_init=1 fails. + """ + def func(b, x): + return b[0] + b[1] * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + linear_model = Model(func) + # Try various shapes of the `we` array from various `sy` and `covy` + rd0 = RealData(x, y, sx=x_err, sy=y_err) + rd1 = RealData(x, y, sx=x_err, sy=0.1) + rd2 = RealData(x, y, sx=x_err, sy=[0.1]) + rd3 = RealData(x, y, sx=x_err, sy=np.full((1, n_data), 0.1)) + rd4 = RealData(x, y, sx=x_err, covy=[[0.01]]) + rd5 = RealData(x, y, sx=x_err, covy=np.full((1, 1, n_data), 0.01)) + for rd in [rd0, rd1, rd2, rd3, rd4, rd5]: + odr_obj = ODR(rd, linear_model, beta0=[0.4, 0.4], + delta0=np.full(n_data, -0.1)) + odr_obj.set_job(fit_type=0, del_init=1) + # Just make sure that it runs without raising an exception. + odr_obj.run()