diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..887cf31003cd8de9f6cdc837b7ce56c692dccb74 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/__init__.py @@ -0,0 +1,594 @@ +# Copyright (C) 2022-2023 Adam Lugowski. All rights reserved. +# Use of this source code is governed by the BSD 2-clause license found in +# the LICENSE.txt file. +# SPDX-License-Identifier: BSD-2-Clause +""" +Matrix Market I/O with a C++ backend. +See http://math.nist.gov/MatrixMarket/formats.html +for information about the Matrix Market format. + +.. versionadded:: 1.12.0 +""" +import io +import os + +import numpy as np +import scipy.sparse +from scipy.io import _mmio + +__all__ = ['mminfo', 'mmread', 'mmwrite'] + +PARALLELISM = 0 +""" +Number of threads that `mmread()` and `mmwrite()` use. +0 means number of CPUs in the system. +Use `threadpoolctl` to set this value. +""" + +ALWAYS_FIND_SYMMETRY = False +""" +Whether mmwrite() with symmetry='AUTO' will always search for symmetry +inside the matrix. This is scipy.io._mmio.mmwrite()'s default behavior, +but has a significant performance cost on large matrices. +""" + +_field_to_dtype = { + "integer": "int64", + "unsigned-integer": "uint64", + "real": "float64", + "complex": "complex", + "pattern": "float64", +} + + +def _fmm_version(): + from . import _fmm_core + return _fmm_core.__version__ + + +# Register with threadpoolctl, if available +try: + import threadpoolctl + + class _FMMThreadPoolCtlController(threadpoolctl.LibController): + user_api = "scipy" + internal_api = "scipy_mmio" + + filename_prefixes = ("_fmm_core",) + + def get_num_threads(self): + global PARALLELISM + return PARALLELISM + + def set_num_threads(self, num_threads): + global PARALLELISM + PARALLELISM = num_threads + + def get_version(self): + return _fmm_version + + def set_additional_attributes(self): + pass + + threadpoolctl.register(_FMMThreadPoolCtlController) +except (ImportError, AttributeError): + # threadpoolctl not installed or version too old + pass + + +class _TextToBytesWrapper(io.BufferedReader): + """ + Convert a TextIOBase string stream to a byte stream. + """ + + def __init__(self, text_io_buffer, encoding=None, errors=None, **kwargs): + super().__init__(text_io_buffer, **kwargs) + self.encoding = encoding or text_io_buffer.encoding or 'utf-8' + self.errors = errors or text_io_buffer.errors or 'strict' + + def __del__(self): + # do not close the wrapped stream + self.detach() + + def _encoding_call(self, method_name, *args, **kwargs): + raw_method = getattr(self.raw, method_name) + val = raw_method(*args, **kwargs) + return val.encode(self.encoding, errors=self.errors) + + def read(self, size=-1): + return self._encoding_call('read', size) + + def read1(self, size=-1): + return self._encoding_call('read1', size) + + def peek(self, size=-1): + return self._encoding_call('peek', size) + + def seek(self, offset, whence=0): + # Random seeks are not allowed because of non-trivial conversion + # between byte and character offsets, + # with the possibility of a byte offset landing within a character. + if offset == 0 and whence == 0 or \ + offset == 0 and whence == 2: + # seek to start or end is ok + super().seek(offset, whence) + else: + # Drop any other seek + # In this application this may happen when pystreambuf seeks during sync(), + # which can happen when closing a partially-read stream. + # Ex. when mminfo() only reads the header then exits. + pass + + +def _read_body_array(cursor): + """ + Read MatrixMarket array body + """ + from . import _fmm_core + + vals = np.zeros(cursor.header.shape, dtype=_field_to_dtype.get(cursor.header.field)) + _fmm_core.read_body_array(cursor, vals) + return vals + + +def _read_body_coo(cursor, generalize_symmetry=True): + """ + Read MatrixMarket coordinate body + """ + from . import _fmm_core + + index_dtype = "int32" + if cursor.header.nrows >= 2**31 or cursor.header.ncols >= 2**31: + # Dimensions are too large to fit in int32 + index_dtype = "int64" + + i = np.zeros(cursor.header.nnz, dtype=index_dtype) + j = np.zeros(cursor.header.nnz, dtype=index_dtype) + data = np.zeros(cursor.header.nnz, dtype=_field_to_dtype.get(cursor.header.field)) + + _fmm_core.read_body_coo(cursor, i, j, data) + + if generalize_symmetry and cursor.header.symmetry != "general": + off_diagonal_mask = (i != j) + off_diagonal_rows = i[off_diagonal_mask] + off_diagonal_cols = j[off_diagonal_mask] + off_diagonal_data = data[off_diagonal_mask] + + if cursor.header.symmetry == "skew-symmetric": + off_diagonal_data *= -1 + elif cursor.header.symmetry == "hermitian": + off_diagonal_data = off_diagonal_data.conjugate() + + i = np.concatenate((i, off_diagonal_cols)) + j = np.concatenate((j, off_diagonal_rows)) + data = np.concatenate((data, off_diagonal_data)) + + return (data, (i, j)), cursor.header.shape + + +def _get_read_cursor(source, parallelism=None): + """ + Open file for reading. + """ + from . import _fmm_core + + ret_stream_to_close = None + if parallelism is None: + parallelism = PARALLELISM + + try: + source = os.fspath(source) + # It's a file path + is_path = True + except TypeError: + is_path = False + + if is_path: + path = str(source) + if path.endswith('.gz'): + import gzip + source = gzip.GzipFile(path, 'r') + ret_stream_to_close = source + elif path.endswith('.bz2'): + import bz2 + source = bz2.BZ2File(path, 'rb') + ret_stream_to_close = source + else: + return _fmm_core.open_read_file(path, parallelism), ret_stream_to_close + + # Stream object. + if hasattr(source, "read"): + if isinstance(source, io.TextIOBase): + source = _TextToBytesWrapper(source) + return _fmm_core.open_read_stream(source, parallelism), ret_stream_to_close + else: + raise TypeError("Unknown source type") + + +def _get_write_cursor(target, h=None, comment=None, parallelism=None, + symmetry="general", precision=None): + """ + Open file for writing. + """ + from . import _fmm_core + + if parallelism is None: + parallelism = PARALLELISM + if comment is None: + comment = '' + if symmetry is None: + symmetry = "general" + if precision is None: + precision = -1 + + if not h: + h = _fmm_core.header(comment=comment, symmetry=symmetry) + + try: + target = os.fspath(target) + # It's a file path + return _fmm_core.open_write_file(str(target), h, parallelism, precision) + except TypeError: + pass + + if hasattr(target, "write"): + # Stream object. + if isinstance(target, io.TextIOBase): + raise TypeError("target stream must be open in binary mode.") + return _fmm_core.open_write_stream(target, h, parallelism, precision) + else: + raise TypeError("Unknown source object") + + +def _apply_field(data, field, no_pattern=False): + """ + Ensure that ``data.dtype`` is compatible with the specified MatrixMarket field type. + + Parameters + ---------- + data : ndarray + Input array. + + field : str + Matrix Market field, such as 'real', 'complex', 'integer', 'pattern'. + + no_pattern : bool, optional + Whether an empty array may be returned for a 'pattern' field. + + Returns + ------- + data : ndarray + Input data if no conversion necessary, or a converted version + """ + + if field is None: + return data + if field == "pattern": + if no_pattern: + return data + else: + return np.zeros(0) + + dtype = _field_to_dtype.get(field, None) + if dtype is None: + raise ValueError("Invalid field.") + + return np.asarray(data, dtype=dtype) + + +def _validate_symmetry(symmetry): + """ + Check that the symmetry parameter is one that MatrixMarket allows.. + """ + if symmetry is None: + return "general" + + symmetry = str(symmetry).lower() + symmetries = ["general", "symmetric", "skew-symmetric", "hermitian"] + if symmetry not in symmetries: + raise ValueError("Invalid symmetry. Must be one of: " + ", ".join(symmetries)) + + return symmetry + + +def mmread(source): + """ + Reads the contents of a Matrix Market file-like 'source' into a matrix. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extensions .mtx, .mtz.gz) + or open file-like object. + + Returns + ------- + a : ndarray or coo_matrix + Dense or sparse matrix depending on the matrix format in the + Matrix Market file. + + Notes + ----- + .. versionchanged:: 1.12.0 + C++ implementation. + + Examples + -------- + >>> from io import StringIO + >>> from scipy.io import mmread + + >>> text = '''%%MatrixMarket matrix coordinate real general + ... 5 5 7 + ... 2 3 1.0 + ... 3 4 2.0 + ... 3 5 3.0 + ... 4 1 4.0 + ... 4 2 5.0 + ... 4 3 6.0 + ... 4 4 7.0 + ... ''' + + ``mmread(source)`` returns the data as sparse matrix in COO format. + + >>> m = mmread(StringIO(text)) + >>> m + <5x5 sparse matrix of type '' + with 7 stored elements in COOrdinate format> + >>> m.A + array([[0., 0., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 2., 3.], + [4., 5., 6., 7., 0.], + [0., 0., 0., 0., 0.]]) + + This method is threaded. + The default number of threads is equal to the number of CPUs in the system. + Use `threadpoolctl `_ to override: + + >>> import threadpoolctl + >>> + >>> with threadpoolctl.threadpool_limits(limits=2): + ... m = mmread(StringIO(text)) + + """ + cursor, stream_to_close = _get_read_cursor(source) + + if cursor.header.format == "array": + mat = _read_body_array(cursor) + if stream_to_close: + stream_to_close.close() + return mat + else: + from scipy.sparse import coo_matrix + triplet, shape = _read_body_coo(cursor, generalize_symmetry=True) + if stream_to_close: + stream_to_close.close() + return coo_matrix(triplet, shape=shape) + + +def mmwrite(target, a, comment=None, field=None, precision=None, symmetry="AUTO"): + r""" + Writes the sparse or dense array `a` to Matrix Market file-like `target`. + + Parameters + ---------- + target : str or file-like + Matrix Market filename (extension .mtx) or open file-like object. + a : array like + Sparse or dense 2-D array. + comment : str, optional + Comments to be prepended to the Matrix Market file. + field : None or str, optional + Either 'real', 'complex', 'pattern', or 'integer'. + precision : None or int, optional + Number of digits to display for real or complex values. + symmetry : None or str, optional + Either 'AUTO', 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + If symmetry is None the symmetry type of 'a' is determined by its + values. If symmetry is 'AUTO' the symmetry type of 'a' is either + determined or set to 'general', at mmwrite's discretion. + + Returns + ------- + None + + Notes + ----- + .. versionchanged:: 1.12.0 + C++ implementation. + + Examples + -------- + >>> from io import BytesIO + >>> import numpy as np + >>> from scipy.sparse import coo_matrix + >>> from scipy.io import mmwrite + + Write a small NumPy array to a matrix market file. The file will be + written in the ``'array'`` format. + + >>> a = np.array([[1.0, 0, 0, 0], [0, 2.5, 0, 6.25]]) + >>> target = BytesIO() + >>> mmwrite(target, a) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array real general + % + 2 4 + 1 + 0 + 0 + 2.5 + 0 + 0 + 0 + 6.25 + + Add a comment to the output file, and set the precision to 3. + + >>> target = BytesIO() + >>> mmwrite(target, a, comment='\n Some test data.\n', precision=3) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array real general + % + % Some test data. + % + 2 4 + 1.00e+00 + 0.00e+00 + 0.00e+00 + 2.50e+00 + 0.00e+00 + 0.00e+00 + 0.00e+00 + 6.25e+00 + + Convert to a sparse matrix before calling ``mmwrite``. This will + result in the output format being ``'coordinate'`` rather than + ``'array'``. + + >>> target = BytesIO() + >>> mmwrite(target, coo_matrix(a), precision=3) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix coordinate real general + % + 2 4 3 + 1 1 1.00e+00 + 2 2 2.50e+00 + 2 4 6.25e+00 + + Write a complex Hermitian array to a matrix market file. Note that + only six values are actually written to the file; the other values + are implied by the symmetry. + + >>> z = np.array([[3, 1+2j, 4-3j], [1-2j, 1, -5j], [4+3j, 5j, 2.5]]) + >>> z + array([[ 3. +0.j, 1. +2.j, 4. -3.j], + [ 1. -2.j, 1. +0.j, -0. -5.j], + [ 4. +3.j, 0. +5.j, 2.5+0.j]]) + + >>> target = BytesIO() + >>> mmwrite(target, z, precision=2) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array complex hermitian + % + 3 3 + 3.0e+00 0.0e+00 + 1.0e+00 -2.0e+00 + 4.0e+00 3.0e+00 + 1.0e+00 0.0e+00 + 0.0e+00 5.0e+00 + 2.5e+00 0.0e+00 + + This method is threaded. + The default number of threads is equal to the number of CPUs in the system. + Use `threadpoolctl `_ to override: + + >>> import threadpoolctl + >>> + >>> target = BytesIO() + >>> with threadpoolctl.threadpool_limits(limits=2): + ... mmwrite(target, a) + + """ + from . import _fmm_core + + if isinstance(a, list) or isinstance(a, tuple) or hasattr(a, "__array__"): + a = np.asarray(a) + + if symmetry == "AUTO": + if ALWAYS_FIND_SYMMETRY or (hasattr(a, "shape") and max(a.shape) < 100): + symmetry = None + else: + symmetry = "general" + + if symmetry is None: + symmetry = _mmio.MMFile()._get_symmetry(a) + + symmetry = _validate_symmetry(symmetry) + cursor = _get_write_cursor(target, comment=comment, + precision=precision, symmetry=symmetry) + + if isinstance(a, np.ndarray): + # Write dense numpy arrays + a = _apply_field(a, field, no_pattern=True) + _fmm_core.write_body_array(cursor, a) + + elif scipy.sparse.issparse(a): + # Write sparse scipy matrices + a = a.tocoo() + + if symmetry is not None and symmetry != "general": + # A symmetric matrix only specifies the elements below the diagonal. + # Ensure that the matrix satisfies this requirement. + from scipy.sparse import coo_array + lower_triangle_mask = a.row >= a.col + a = coo_array((a.data[lower_triangle_mask], + (a.row[lower_triangle_mask], + a.col[lower_triangle_mask])), shape=a.shape) + + data = _apply_field(a.data, field) + _fmm_core.write_body_coo(cursor, a.shape, a.row, a.col, data) + + else: + raise ValueError("unknown matrix type: %s" % type(a)) + + +def mminfo(source): + """ + Return size and storage parameters from Matrix Market file-like 'source'. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extension .mtx) or open file-like object + + Returns + ------- + rows : int + Number of matrix rows. + cols : int + Number of matrix columns. + entries : int + Number of non-zero entries of a sparse matrix + or rows*cols for a dense matrix. + format : str + Either 'coordinate' or 'array'. + field : str + Either 'real', 'complex', 'pattern', or 'integer'. + symmetry : str + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + + Notes + ----- + .. versionchanged:: 1.12.0 + C++ implementation. + + Examples + -------- + >>> from io import StringIO + >>> from scipy.io import mminfo + + >>> text = '''%%MatrixMarket matrix coordinate real general + ... 5 5 7 + ... 2 3 1.0 + ... 3 4 2.0 + ... 3 5 3.0 + ... 4 1 4.0 + ... 4 2 5.0 + ... 4 3 6.0 + ... 4 4 7.0 + ... ''' + + + ``mminfo(source)`` returns the number of rows, number of columns, + format, field type and symmetry attribute of the source file. + + >>> mminfo(StringIO(text)) + (5, 5, 7, 'coordinate', 'real', 'general') + """ + cursor, stream_to_close = _get_read_cursor(source, 1) + h = cursor.header + cursor.close() + if stream_to_close: + stream_to_close.close() + return h.nrows, h.ncols, h.nnz, h.format, h.field, h.symmetry diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdcbf2cbd6787d8f8a1c158c271ec1407362f708 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f3257cca30b6b2d7fff4e3f27ac0c9bce135a0f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__init__.py @@ -0,0 +1,63 @@ +""" +MATLAB® file utilities (:mod:`scipy.io.matlab`) +=============================================== + +.. currentmodule:: scipy.io.matlab + +This submodule is meant to provide lower-level file utilities related to reading +and writing MATLAB files. + +.. autosummary:: + :toctree: generated/ + + matfile_version - Get the MATLAB file version + MatReadError - Exception indicating a read issue + MatReadWarning - Warning class for read issues + MatWriteError - Exception indicating a write issue + mat_struct - Class used when ``struct_as_record=False`` + +.. autosummary:: + :toctree: generated/ + :template: autosummary/ndarray_subclass.rst + :nosignatures: + + MatlabObject - Class for a MATLAB object + MatlabOpaque - Class for a MATLAB opaque matrix + MatlabFunction - Class for a MATLAB function object + +The following utilities that live in the :mod:`scipy.io` +namespace also exist in this namespace: + +.. autosummary:: + :toctree: generated/ + + loadmat - Read a MATLAB style mat file (version 4 through 7.1) + savemat - Write a MATLAB style mat file (version 4 through 7.1) + whosmat - List contents of a MATLAB style mat file (version 4 through 7.1) + +Notes +----- +MATLAB(R) is a registered trademark of The MathWorks, Inc., 3 Apple Hill +Drive, Natick, MA 01760-2098, USA. + +""" +# Matlab file read and write utilities +from ._mio import loadmat, savemat, whosmat +from ._mio5 import MatlabFunction +from ._mio5_params import MatlabObject, MatlabOpaque, mat_struct +from ._miobase import (matfile_version, MatReadError, MatReadWarning, + MatWriteError) + +# Deprecated namespaces, to be removed in v2.0.0 +from .import (mio, mio5, mio5_params, mio4, byteordercodes, + miobase, mio_utils, streams, mio5_utils) + +__all__ = [ + 'loadmat', 'savemat', 'whosmat', 'MatlabObject', + 'matfile_version', 'MatReadError', 'MatReadWarning', + 'MatWriteError', 'mat_struct', 'MatlabOpaque', 'MatlabFunction' +] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6492a0177df41d1e2c8678beffe29ea65b46baaa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_byteordercodes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_byteordercodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0cc368a4017d5929155fda1a0d3fbee94b601c0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_byteordercodes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6af24de5c5b53577754197f30febbbe1f9525bc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio4.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio4.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..169dc50597f1be29e791d2c884be8e814cb8534b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio4.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio5.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83a8a100667de1bace1aff8aee0f46cae475e28b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio5.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio5_params.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio5_params.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e508df8f3bc32aa2da77c1c7cc5eb91abd4cf09a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_mio5_params.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_miobase.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_miobase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16c3cf830cbb88bb133582d8df1f8b4cbc8829b2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/_miobase.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/byteordercodes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/byteordercodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..152746c4e383b333602e11bd592cfe6e2e9adc1a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/byteordercodes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf486c52c564480abbbfce9a93283a3f70f6ae9b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio4.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio4.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..246edb0197621a1f1cdc667881edf215cc4715fb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio4.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio5.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8e57682fd81263f95472f56f840652907f4f5d0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio5.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio5_params.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio5_params.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ec9bf3943100ad061dd606fe910d80a09fa87c4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio5_params.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio5_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio5_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7258931154379905da604c538e6bb23a2853c4cf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio5_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83f176b4aa9e074fd213e446a249b782869ff744 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/mio_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/miobase.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/miobase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0029b1d47d7f47a35a4751c6c6bc5472d87461e9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/miobase.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/streams.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/streams.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5a3cf031712ca0ad104e1c0b44fda62fb86e375 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/__pycache__/streams.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_byteordercodes.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_byteordercodes.py new file mode 100644 index 0000000000000000000000000000000000000000..f9c02a0e01306dae327079b3c9b0e5bc6a74b30e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_byteordercodes.py @@ -0,0 +1,75 @@ +''' Byteorder utilities for system - numpy byteorder encoding + +Converts a variety of string codes for little endian, big endian, +native byte order and swapped byte order to explicit NumPy endian +codes - one of '<' (little endian) or '>' (big endian) + +''' +import sys + +__all__ = [ + 'aliases', 'native_code', 'swapped_code', + 'sys_is_le', 'to_numpy_code' +] + +sys_is_le = sys.byteorder == 'little' +native_code = sys_is_le and '<' or '>' +swapped_code = sys_is_le and '>' or '<' + +aliases = {'little': ('little', '<', 'l', 'le'), + 'big': ('big', '>', 'b', 'be'), + 'native': ('native', '='), + 'swapped': ('swapped', 'S')} + + +def to_numpy_code(code): + """ + Convert various order codings to NumPy format. + + Parameters + ---------- + code : str + The code to convert. It is converted to lower case before parsing. + Legal values are: + 'little', 'big', 'l', 'b', 'le', 'be', '<', '>', 'native', '=', + 'swapped', 's'. + + Returns + ------- + out_code : {'<', '>'} + Here '<' is the numpy dtype code for little endian, + and '>' is the code for big endian. + + Examples + -------- + >>> import sys + >>> from scipy.io.matlab._byteordercodes import to_numpy_code + >>> sys_is_le = (sys.byteorder == 'little') + >>> sys_is_le + True + >>> to_numpy_code('big') + '>' + >>> to_numpy_code('little') + '<' + >>> nc = to_numpy_code('native') + >>> nc == '<' if sys_is_le else nc == '>' + True + >>> sc = to_numpy_code('swapped') + >>> sc == '>' if sys_is_le else sc == '<' + True + + """ + code = code.lower() + if code is None: + return native_code + if code in aliases['little']: + return '<' + elif code in aliases['big']: + return '>' + elif code in aliases['native']: + return native_code + elif code in aliases['swapped']: + return swapped_code + else: + raise ValueError( + 'We cannot handle byte order %s' % code) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio.py new file mode 100644 index 0000000000000000000000000000000000000000..5be5301da81d28b0e5a62f5df867f4bb7e0a6a94 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio.py @@ -0,0 +1,359 @@ +""" +Module for reading and writing matlab (TM) .mat files +""" +# Authors: Travis Oliphant, Matthew Brett + +from contextlib import contextmanager + +from ._miobase import _get_matfile_version, docfiller +from ._mio4 import MatFile4Reader, MatFile4Writer +from ._mio5 import MatFile5Reader, MatFile5Writer + +__all__ = ['mat_reader_factory', 'loadmat', 'savemat', 'whosmat'] + + +@contextmanager +def _open_file_context(file_like, appendmat, mode='rb'): + f, opened = _open_file(file_like, appendmat, mode) + try: + yield f + finally: + if opened: + f.close() + + +def _open_file(file_like, appendmat, mode='rb'): + """ + Open `file_like` and return as file-like object. First, check if object is + already file-like; if so, return it as-is. Otherwise, try to pass it + to open(). If that fails, and `file_like` is a string, and `appendmat` is true, + append '.mat' and try again. + """ + reqs = {'read'} if set(mode) & set('r+') else set() + if set(mode) & set('wax+'): + reqs.add('write') + if reqs.issubset(dir(file_like)): + return file_like, False + + try: + return open(file_like, mode), True + except OSError as e: + # Probably "not found" + if isinstance(file_like, str): + if appendmat and not file_like.endswith('.mat'): + file_like += '.mat' + return open(file_like, mode), True + else: + raise OSError( + 'Reader needs file name or open file-like object' + ) from e + + +@docfiller +def mat_reader_factory(file_name, appendmat=True, **kwargs): + """ + Create reader for matlab .mat format files. + + Parameters + ---------- + %(file_arg)s + %(append_arg)s + %(load_args)s + %(struct_arg)s + + Returns + ------- + matreader : MatFileReader object + Initialized instance of MatFileReader class matching the mat file + type detected in `filename`. + file_opened : bool + Whether the file was opened by this routine. + + """ + byte_stream, file_opened = _open_file(file_name, appendmat) + mjv, mnv = _get_matfile_version(byte_stream) + if mjv == 0: + return MatFile4Reader(byte_stream, **kwargs), file_opened + elif mjv == 1: + return MatFile5Reader(byte_stream, **kwargs), file_opened + elif mjv == 2: + raise NotImplementedError('Please use HDF reader for matlab v7.3 ' + 'files, e.g. h5py') + else: + raise TypeError('Did not recognize version %s' % mjv) + + +@docfiller +def loadmat(file_name, mdict=None, appendmat=True, **kwargs): + """ + Load MATLAB file. + + Parameters + ---------- + file_name : str + Name of the mat file (do not need .mat extension if + appendmat==True). Can also pass open file-like object. + mdict : dict, optional + Dictionary in which to insert matfile variables. + appendmat : bool, optional + True to append the .mat extension to the end of the given + filename, if not already present. Default is True. + byte_order : str or None, optional + None by default, implying byte order guessed from mat + file. Otherwise can be one of ('native', '=', 'little', '<', + 'BIG', '>'). + mat_dtype : bool, optional + If True, return arrays in same dtype as would be loaded into + MATLAB (instead of the dtype with which they are saved). + squeeze_me : bool, optional + Whether to squeeze unit matrix dimensions or not. + chars_as_strings : bool, optional + Whether to convert char arrays to string arrays. + matlab_compatible : bool, optional + Returns matrices as would be loaded by MATLAB (implies + squeeze_me=False, chars_as_strings=False, mat_dtype=True, + struct_as_record=True). + struct_as_record : bool, optional + Whether to load MATLAB structs as NumPy record arrays, or as + old-style NumPy arrays with dtype=object. Setting this flag to + False replicates the behavior of scipy version 0.7.x (returning + NumPy object arrays). The default setting is True, because it + allows easier round-trip load and save of MATLAB files. + verify_compressed_data_integrity : bool, optional + Whether the length of compressed sequences in the MATLAB file + should be checked, to ensure that they are not longer than we expect. + It is advisable to enable this (the default) because overlong + compressed sequences in MATLAB files generally indicate that the + files have experienced some sort of corruption. + variable_names : None or sequence + If None (the default) - read all variables in file. Otherwise, + `variable_names` should be a sequence of strings, giving names of the + MATLAB variables to read from the file. The reader will skip any + variable with a name not in this sequence, possibly saving some read + processing. + simplify_cells : False, optional + If True, return a simplified dict structure (which is useful if the mat + file contains cell arrays). Note that this only affects the structure + of the result and not its contents (which is identical for both output + structures). If True, this automatically sets `struct_as_record` to + False and `squeeze_me` to True, which is required to simplify cells. + + Returns + ------- + mat_dict : dict + dictionary with variable names as keys, and loaded matrices as + values. + + Notes + ----- + v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. + + You will need an HDF5 Python library to read MATLAB 7.3 format mat + files. Because SciPy does not supply one, we do not implement the + HDF5 / 7.3 interface here. + + Examples + -------- + >>> from os.path import dirname, join as pjoin + >>> import scipy.io as sio + + Get the filename for an example .mat file from the tests/data directory. + + >>> data_dir = pjoin(dirname(sio.__file__), 'matlab', 'tests', 'data') + >>> mat_fname = pjoin(data_dir, 'testdouble_7.4_GLNX86.mat') + + Load the .mat file contents. + + >>> mat_contents = sio.loadmat(mat_fname) + + The result is a dictionary, one key/value pair for each variable: + + >>> sorted(mat_contents.keys()) + ['__globals__', '__header__', '__version__', 'testdouble'] + >>> mat_contents['testdouble'] + array([[0. , 0.78539816, 1.57079633, 2.35619449, 3.14159265, + 3.92699082, 4.71238898, 5.49778714, 6.28318531]]) + + By default SciPy reads MATLAB structs as structured NumPy arrays where the + dtype fields are of type `object` and the names correspond to the MATLAB + struct field names. This can be disabled by setting the optional argument + `struct_as_record=False`. + + Get the filename for an example .mat file that contains a MATLAB struct + called `teststruct` and load the contents. + + >>> matstruct_fname = pjoin(data_dir, 'teststruct_7.4_GLNX86.mat') + >>> matstruct_contents = sio.loadmat(matstruct_fname) + >>> teststruct = matstruct_contents['teststruct'] + >>> teststruct.dtype + dtype([('stringfield', 'O'), ('doublefield', 'O'), ('complexfield', 'O')]) + + The size of the structured array is the size of the MATLAB struct, not the + number of elements in any particular field. The shape defaults to 2-D + unless the optional argument `squeeze_me=True`, in which case all length 1 + dimensions are removed. + + >>> teststruct.size + 1 + >>> teststruct.shape + (1, 1) + + Get the 'stringfield' of the first element in the MATLAB struct. + + >>> teststruct[0, 0]['stringfield'] + array(['Rats live on no evil star.'], + dtype='>> teststruct['doublefield'][0, 0] + array([[ 1.41421356, 2.71828183, 3.14159265]]) + + Load the MATLAB struct, squeezing out length 1 dimensions, and get the item + from the 'complexfield'. + + >>> matstruct_squeezed = sio.loadmat(matstruct_fname, squeeze_me=True) + >>> matstruct_squeezed['teststruct'].shape + () + >>> matstruct_squeezed['teststruct']['complexfield'].shape + () + >>> matstruct_squeezed['teststruct']['complexfield'].item() + array([ 1.41421356+1.41421356j, 2.71828183+2.71828183j, + 3.14159265+3.14159265j]) + """ + variable_names = kwargs.pop('variable_names', None) + with _open_file_context(file_name, appendmat) as f: + MR, _ = mat_reader_factory(f, **kwargs) + matfile_dict = MR.get_variables(variable_names) + + if mdict is not None: + mdict.update(matfile_dict) + else: + mdict = matfile_dict + + return mdict + + +@docfiller +def savemat(file_name, mdict, + appendmat=True, + format='5', + long_field_names=False, + do_compression=False, + oned_as='row'): + """ + Save a dictionary of names and arrays into a MATLAB-style .mat file. + + This saves the array objects in the given dictionary to a MATLAB- + style .mat file. + + Parameters + ---------- + file_name : str or file-like object + Name of the .mat file (.mat extension not needed if ``appendmat == + True``). + Can also pass open file_like object. + mdict : dict + Dictionary from which to save matfile variables. + appendmat : bool, optional + True (the default) to append the .mat extension to the end of the + given filename, if not already present. + format : {'5', '4'}, string, optional + '5' (the default) for MATLAB 5 and up (to 7.2), + '4' for MATLAB 4 .mat files. + long_field_names : bool, optional + False (the default) - maximum field name length in a structure is + 31 characters which is the documented maximum length. + True - maximum field name length in a structure is 63 characters + which works for MATLAB 7.6+. + do_compression : bool, optional + Whether or not to compress matrices on write. Default is False. + oned_as : {'row', 'column'}, optional + If 'column', write 1-D NumPy arrays as column vectors. + If 'row', write 1-D NumPy arrays as row vectors. + + Examples + -------- + >>> from scipy.io import savemat + >>> import numpy as np + >>> a = np.arange(20) + >>> mdic = {"a": a, "label": "experiment"} + >>> mdic + {'a': array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19]), + 'label': 'experiment'} + >>> savemat("matlab_matrix.mat", mdic) + """ + with _open_file_context(file_name, appendmat, 'wb') as file_stream: + if format == '4': + if long_field_names: + message = "Long field names are not available for version 4 files" + raise ValueError(message) + MW = MatFile4Writer(file_stream, oned_as) + elif format == '5': + MW = MatFile5Writer(file_stream, + do_compression=do_compression, + unicode_strings=True, + long_field_names=long_field_names, + oned_as=oned_as) + else: + raise ValueError("Format should be '4' or '5'") + MW.put_variables(mdict) + + +@docfiller +def whosmat(file_name, appendmat=True, **kwargs): + """ + List variables inside a MATLAB file. + + Parameters + ---------- + %(file_arg)s + %(append_arg)s + %(load_args)s + %(struct_arg)s + + Returns + ------- + variables : list of tuples + A list of tuples, where each tuple holds the matrix name (a string), + its shape (tuple of ints), and its data class (a string). + Possible data classes are: int8, uint8, int16, uint16, int32, uint32, + int64, uint64, single, double, cell, struct, object, char, sparse, + function, opaque, logical, unknown. + + Notes + ----- + v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. + + You will need an HDF5 python library to read matlab 7.3 format mat + files (e.g. h5py). Because SciPy does not supply one, we do not implement the + HDF5 / 7.3 interface here. + + .. versionadded:: 0.12.0 + + Examples + -------- + >>> from io import BytesIO + >>> import numpy as np + >>> from scipy.io import savemat, whosmat + + Create some arrays, and use `savemat` to write them to a ``BytesIO`` + instance. + + >>> a = np.array([[10, 20, 30], [11, 21, 31]], dtype=np.int32) + >>> b = np.geomspace(1, 10, 5) + >>> f = BytesIO() + >>> savemat(f, {'a': a, 'b': b}) + + Use `whosmat` to inspect ``f``. Each tuple in the output list gives + the name, shape and data type of the array in ``f``. + + >>> whosmat(f) + [('a', (2, 3), 'int32'), ('b', (1, 5), 'double')] + + """ + with _open_file_context(file_name, appendmat) as f: + ML, file_opened = mat_reader_factory(f, **kwargs) + variables = ML.list_variables() + return variables diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio4.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio4.py new file mode 100644 index 0000000000000000000000000000000000000000..34ddac19db4d3f0f5d0e129eff6bcee832eb5ec3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio4.py @@ -0,0 +1,624 @@ +''' Classes for read / write of matlab (TM) 4 files +''' +import sys +import warnings + +import numpy as np + +import scipy.sparse + +from ._miobase import (MatFileReader, docfiller, matdims, read_dtype, + convert_dtypes, arr_to_chars, arr_dtype_number) + +from ._mio_utils import squeeze_element, chars_to_strings +from functools import reduce + + +__all__ = [ + 'MatFile4Reader', 'MatFile4Writer', 'SYS_LITTLE_ENDIAN', + 'VarHeader4', 'VarReader4', 'VarWriter4', 'arr_to_2d', 'mclass_info', + 'mdtypes_template', 'miDOUBLE', 'miINT16', 'miINT32', 'miSINGLE', + 'miUINT16', 'miUINT8', 'mxCHAR_CLASS', 'mxFULL_CLASS', 'mxSPARSE_CLASS', + 'np_to_mtypes', 'order_codes' +] + + +SYS_LITTLE_ENDIAN = sys.byteorder == 'little' + +miDOUBLE = 0 +miSINGLE = 1 +miINT32 = 2 +miINT16 = 3 +miUINT16 = 4 +miUINT8 = 5 + +mdtypes_template = { + miDOUBLE: 'f8', + miSINGLE: 'f4', + miINT32: 'i4', + miINT16: 'i2', + miUINT16: 'u2', + miUINT8: 'u1', + 'header': [('mopt', 'i4'), + ('mrows', 'i4'), + ('ncols', 'i4'), + ('imagf', 'i4'), + ('namlen', 'i4')], + 'U1': 'U1', + } + +np_to_mtypes = { + 'f8': miDOUBLE, + 'c32': miDOUBLE, + 'c24': miDOUBLE, + 'c16': miDOUBLE, + 'f4': miSINGLE, + 'c8': miSINGLE, + 'i4': miINT32, + 'i2': miINT16, + 'u2': miUINT16, + 'u1': miUINT8, + 'S1': miUINT8, + } + +# matrix classes +mxFULL_CLASS = 0 +mxCHAR_CLASS = 1 +mxSPARSE_CLASS = 2 + +order_codes = { + 0: '<', + 1: '>', + 2: 'VAX D-float', # ! + 3: 'VAX G-float', + 4: 'Cray', # !! + } + +mclass_info = { + mxFULL_CLASS: 'double', + mxCHAR_CLASS: 'char', + mxSPARSE_CLASS: 'sparse', + } + + +class VarHeader4: + # Mat4 variables never logical or global + is_logical = False + is_global = False + + def __init__(self, + name, + dtype, + mclass, + dims, + is_complex): + self.name = name + self.dtype = dtype + self.mclass = mclass + self.dims = dims + self.is_complex = is_complex + + +class VarReader4: + ''' Class to read matlab 4 variables ''' + + def __init__(self, file_reader): + self.file_reader = file_reader + self.mat_stream = file_reader.mat_stream + self.dtypes = file_reader.dtypes + self.chars_as_strings = file_reader.chars_as_strings + self.squeeze_me = file_reader.squeeze_me + + def read_header(self): + ''' Read and return header for variable ''' + data = read_dtype(self.mat_stream, self.dtypes['header']) + name = self.mat_stream.read(int(data['namlen'])).strip(b'\x00') + if data['mopt'] < 0 or data['mopt'] > 5000: + raise ValueError('Mat 4 mopt wrong format, byteswapping problem?') + M, rest = divmod(data['mopt'], 1000) # order code + if M not in (0, 1): + warnings.warn("We do not support byte ordering '%s'; returned " + "data may be corrupt" % order_codes[M], + UserWarning, stacklevel=3) + O, rest = divmod(rest, 100) # unused, should be 0 + if O != 0: + raise ValueError('O in MOPT integer should be 0, wrong format?') + P, rest = divmod(rest, 10) # data type code e.g miDOUBLE (see above) + T = rest # matrix type code e.g., mxFULL_CLASS (see above) + dims = (data['mrows'], data['ncols']) + is_complex = data['imagf'] == 1 + dtype = self.dtypes[P] + return VarHeader4( + name, + dtype, + T, + dims, + is_complex) + + def array_from_header(self, hdr, process=True): + mclass = hdr.mclass + if mclass == mxFULL_CLASS: + arr = self.read_full_array(hdr) + elif mclass == mxCHAR_CLASS: + arr = self.read_char_array(hdr) + if process and self.chars_as_strings: + arr = chars_to_strings(arr) + elif mclass == mxSPARSE_CLASS: + # no current processing (below) makes sense for sparse + return self.read_sparse_array(hdr) + else: + raise TypeError('No reader for class code %s' % mclass) + if process and self.squeeze_me: + return squeeze_element(arr) + return arr + + def read_sub_array(self, hdr, copy=True): + ''' Mat4 read using header `hdr` dtype and dims + + Parameters + ---------- + hdr : object + object with attributes ``dtype``, ``dims``. dtype is assumed to be + the correct endianness + copy : bool, optional + copies array before return if True (default True) + (buffer is usually read only) + + Returns + ------- + arr : ndarray + of dtype given by `hdr` ``dtype`` and shape given by `hdr` ``dims`` + ''' + dt = hdr.dtype + dims = hdr.dims + num_bytes = dt.itemsize + for d in dims: + num_bytes *= d + buffer = self.mat_stream.read(int(num_bytes)) + if len(buffer) != num_bytes: + raise ValueError("Not enough bytes to read matrix '%s'; is this " + "a badly-formed file? Consider listing matrices " + "with `whosmat` and loading named matrices with " + "`variable_names` kwarg to `loadmat`" % hdr.name) + arr = np.ndarray(shape=dims, + dtype=dt, + buffer=buffer, + order='F') + if copy: + arr = arr.copy() + return arr + + def read_full_array(self, hdr): + ''' Full (rather than sparse) matrix getter + + Read matrix (array) can be real or complex + + Parameters + ---------- + hdr : ``VarHeader4`` instance + + Returns + ------- + arr : ndarray + complex array if ``hdr.is_complex`` is True, otherwise a real + numeric array + ''' + if hdr.is_complex: + # avoid array copy to save memory + res = self.read_sub_array(hdr, copy=False) + res_j = self.read_sub_array(hdr, copy=False) + return res + (res_j * 1j) + return self.read_sub_array(hdr) + + def read_char_array(self, hdr): + ''' latin-1 text matrix (char matrix) reader + + Parameters + ---------- + hdr : ``VarHeader4`` instance + + Returns + ------- + arr : ndarray + with dtype 'U1', shape given by `hdr` ``dims`` + ''' + arr = self.read_sub_array(hdr).astype(np.uint8) + S = arr.tobytes().decode('latin-1') + return np.ndarray(shape=hdr.dims, + dtype=np.dtype('U1'), + buffer=np.array(S)).copy() + + def read_sparse_array(self, hdr): + ''' Read and return sparse matrix type + + Parameters + ---------- + hdr : ``VarHeader4`` instance + + Returns + ------- + arr : ``scipy.sparse.coo_matrix`` + with dtype ``float`` and shape read from the sparse matrix data + + Notes + ----- + MATLAB 4 real sparse arrays are saved in a N+1 by 3 array format, where + N is the number of non-zero values. Column 1 values [0:N] are the + (1-based) row indices of the each non-zero value, column 2 [0:N] are the + column indices, column 3 [0:N] are the (real) values. The last values + [-1,0:2] of the rows, column indices are shape[0] and shape[1] + respectively of the output matrix. The last value for the values column + is a padding 0. mrows and ncols values from the header give the shape of + the stored matrix, here [N+1, 3]. Complex data are saved as a 4 column + matrix, where the fourth column contains the imaginary component; the + last value is again 0. Complex sparse data do *not* have the header + ``imagf`` field set to True; the fact that the data are complex is only + detectable because there are 4 storage columns. + ''' + res = self.read_sub_array(hdr) + tmp = res[:-1,:] + # All numbers are float64 in Matlab, but SciPy sparse expects int shape + dims = (int(res[-1,0]), int(res[-1,1])) + I = np.ascontiguousarray(tmp[:,0],dtype='intc') # fixes byte order also + J = np.ascontiguousarray(tmp[:,1],dtype='intc') + I -= 1 # for 1-based indexing + J -= 1 + if res.shape[1] == 3: + V = np.ascontiguousarray(tmp[:,2],dtype='float') + else: + V = np.ascontiguousarray(tmp[:,2],dtype='complex') + V.imag = tmp[:,3] + return scipy.sparse.coo_matrix((V,(I,J)), dims) + + def shape_from_header(self, hdr): + '''Read the shape of the array described by the header. + The file position after this call is unspecified. + ''' + mclass = hdr.mclass + if mclass == mxFULL_CLASS: + shape = tuple(map(int, hdr.dims)) + elif mclass == mxCHAR_CLASS: + shape = tuple(map(int, hdr.dims)) + if self.chars_as_strings: + shape = shape[:-1] + elif mclass == mxSPARSE_CLASS: + dt = hdr.dtype + dims = hdr.dims + + if not (len(dims) == 2 and dims[0] >= 1 and dims[1] >= 1): + return () + + # Read only the row and column counts + self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1) + rows = np.ndarray(shape=(), dtype=dt, + buffer=self.mat_stream.read(dt.itemsize)) + self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1) + cols = np.ndarray(shape=(), dtype=dt, + buffer=self.mat_stream.read(dt.itemsize)) + + shape = (int(rows), int(cols)) + else: + raise TypeError('No reader for class code %s' % mclass) + + if self.squeeze_me: + shape = tuple([x for x in shape if x != 1]) + return shape + + +class MatFile4Reader(MatFileReader): + ''' Reader for Mat4 files ''' + @docfiller + def __init__(self, mat_stream, *args, **kwargs): + ''' Initialize matlab 4 file reader + + %(matstream_arg)s + %(load_args)s + ''' + super().__init__(mat_stream, *args, **kwargs) + self._matrix_reader = None + + def guess_byte_order(self): + self.mat_stream.seek(0) + mopt = read_dtype(self.mat_stream, np.dtype('i4')) + self.mat_stream.seek(0) + if mopt == 0: + return '<' + if mopt < 0 or mopt > 5000: + # Number must have been byteswapped + return SYS_LITTLE_ENDIAN and '>' or '<' + # Not byteswapped + return SYS_LITTLE_ENDIAN and '<' or '>' + + def initialize_read(self): + ''' Run when beginning read of variables + + Sets up readers from parameters in `self` + ''' + self.dtypes = convert_dtypes(mdtypes_template, self.byte_order) + self._matrix_reader = VarReader4(self) + + def read_var_header(self): + ''' Read and return header, next position + + Parameters + ---------- + None + + Returns + ------- + header : object + object that can be passed to self.read_var_array, and that + has attributes ``name`` and ``is_global`` + next_position : int + position in stream of next variable + ''' + hdr = self._matrix_reader.read_header() + n = reduce(lambda x, y: x*y, hdr.dims, 1) # fast product + remaining_bytes = hdr.dtype.itemsize * n + if hdr.is_complex and not hdr.mclass == mxSPARSE_CLASS: + remaining_bytes *= 2 + next_position = self.mat_stream.tell() + remaining_bytes + return hdr, next_position + + def read_var_array(self, header, process=True): + ''' Read array, given `header` + + Parameters + ---------- + header : header object + object with fields defining variable header + process : {True, False}, optional + If True, apply recursive post-processing during loading of array. + + Returns + ------- + arr : array + array with post-processing applied or not according to + `process`. + ''' + return self._matrix_reader.array_from_header(header, process) + + def get_variables(self, variable_names=None): + ''' get variables from stream as dictionary + + Parameters + ---------- + variable_names : None or str or sequence of str, optional + variable name, or sequence of variable names to get from Mat file / + file stream. If None, then get all variables in file. + ''' + if isinstance(variable_names, str): + variable_names = [variable_names] + elif variable_names is not None: + variable_names = list(variable_names) + self.mat_stream.seek(0) + # set up variable reader + self.initialize_read() + mdict = {} + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + if variable_names is not None and name not in variable_names: + self.mat_stream.seek(next_position) + continue + mdict[name] = self.read_var_array(hdr) + self.mat_stream.seek(next_position) + if variable_names is not None: + variable_names.remove(name) + if len(variable_names) == 0: + break + return mdict + + def list_variables(self): + ''' list variables from stream ''' + self.mat_stream.seek(0) + # set up variable reader + self.initialize_read() + vars = [] + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + shape = self._matrix_reader.shape_from_header(hdr) + info = mclass_info.get(hdr.mclass, 'unknown') + vars.append((name, shape, info)) + + self.mat_stream.seek(next_position) + return vars + + +def arr_to_2d(arr, oned_as='row'): + ''' Make ``arr`` exactly two dimensional + + If `arr` has more than 2 dimensions, raise a ValueError + + Parameters + ---------- + arr : array + oned_as : {'row', 'column'}, optional + Whether to reshape 1-D vectors as row vectors or column vectors. + See documentation for ``matdims`` for more detail + + Returns + ------- + arr2d : array + 2-D version of the array + ''' + dims = matdims(arr, oned_as) + if len(dims) > 2: + raise ValueError('Matlab 4 files cannot save arrays with more than ' + '2 dimensions') + return arr.reshape(dims) + + +class VarWriter4: + def __init__(self, file_writer): + self.file_stream = file_writer.file_stream + self.oned_as = file_writer.oned_as + + def write_bytes(self, arr): + self.file_stream.write(arr.tobytes(order='F')) + + def write_string(self, s): + self.file_stream.write(s) + + def write_header(self, name, shape, P=miDOUBLE, T=mxFULL_CLASS, imagf=0): + ''' Write header for given data options + + Parameters + ---------- + name : str + name of variable + shape : sequence + Shape of array as it will be read in matlab + P : int, optional + code for mat4 data type, one of ``miDOUBLE, miSINGLE, miINT32, + miINT16, miUINT16, miUINT8`` + T : int, optional + code for mat4 matrix class, one of ``mxFULL_CLASS, mxCHAR_CLASS, + mxSPARSE_CLASS`` + imagf : int, optional + flag indicating complex + ''' + header = np.empty((), mdtypes_template['header']) + M = not SYS_LITTLE_ENDIAN + O = 0 + header['mopt'] = (M * 1000 + + O * 100 + + P * 10 + + T) + header['mrows'] = shape[0] + header['ncols'] = shape[1] + header['imagf'] = imagf + header['namlen'] = len(name) + 1 + self.write_bytes(header) + data = name + '\0' + self.write_string(data.encode('latin1')) + + def write(self, arr, name): + ''' Write matrix `arr`, with name `name` + + Parameters + ---------- + arr : array_like + array to write + name : str + name in matlab workspace + ''' + # we need to catch sparse first, because np.asarray returns an + # an object array for scipy.sparse + if scipy.sparse.issparse(arr): + self.write_sparse(arr, name) + return + arr = np.asarray(arr) + dt = arr.dtype + if not dt.isnative: + arr = arr.astype(dt.newbyteorder('=')) + dtt = dt.type + if dtt is np.object_: + raise TypeError('Cannot save object arrays in Mat4') + elif dtt is np.void: + raise TypeError('Cannot save void type arrays') + elif dtt in (np.str_, np.bytes_): + self.write_char(arr, name) + return + self.write_numeric(arr, name) + + def write_numeric(self, arr, name): + arr = arr_to_2d(arr, self.oned_as) + imagf = arr.dtype.kind == 'c' + try: + P = np_to_mtypes[arr.dtype.str[1:]] + except KeyError: + if imagf: + arr = arr.astype('c128') + else: + arr = arr.astype('f8') + P = miDOUBLE + self.write_header(name, + arr.shape, + P=P, + T=mxFULL_CLASS, + imagf=imagf) + if imagf: + self.write_bytes(arr.real) + self.write_bytes(arr.imag) + else: + self.write_bytes(arr) + + def write_char(self, arr, name): + if arr.dtype.type == np.str_ and arr.dtype.itemsize != np.dtype('U1').itemsize: + arr = arr_to_chars(arr) + arr = arr_to_2d(arr, self.oned_as) + dims = arr.shape + self.write_header( + name, + dims, + P=miUINT8, + T=mxCHAR_CLASS) + if arr.dtype.kind == 'U': + # Recode unicode to latin1 + n_chars = np.prod(dims) + st_arr = np.ndarray(shape=(), + dtype=arr_dtype_number(arr, n_chars), + buffer=arr) + st = st_arr.item().encode('latin-1') + arr = np.ndarray(shape=dims, dtype='S1', buffer=st) + self.write_bytes(arr) + + def write_sparse(self, arr, name): + ''' Sparse matrices are 2-D + + See docstring for VarReader4.read_sparse_array + ''' + A = arr.tocoo() # convert to sparse COO format (ijv) + imagf = A.dtype.kind == 'c' + ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8') + ijv[:-1,0] = A.row + ijv[:-1,1] = A.col + ijv[:-1,0:2] += 1 # 1 based indexing + if imagf: + ijv[:-1,2] = A.data.real + ijv[:-1,3] = A.data.imag + else: + ijv[:-1,2] = A.data + ijv[-1,0:2] = A.shape + self.write_header( + name, + ijv.shape, + P=miDOUBLE, + T=mxSPARSE_CLASS) + self.write_bytes(ijv) + + +class MatFile4Writer: + ''' Class for writing matlab 4 format files ''' + def __init__(self, file_stream, oned_as=None): + self.file_stream = file_stream + if oned_as is None: + oned_as = 'row' + self.oned_as = oned_as + self._matrix_writer = None + + def put_variables(self, mdict, write_header=None): + ''' Write variables in `mdict` to stream + + Parameters + ---------- + mdict : mapping + mapping with method ``items`` return name, contents pairs + where ``name`` which will appeak in the matlab workspace in + file load, and ``contents`` is something writeable to a + matlab file, such as a NumPy array. + write_header : {None, True, False} + If True, then write the matlab file header before writing the + variables. If None (the default) then write the file header + if we are at position 0 in the stream. By setting False + here, and setting the stream position to the end of the file, + you can append variables to a matlab file + ''' + # there is no header for a matlab 4 mat file, so we ignore the + # ``write_header`` input argument. It's there for compatibility + # with the matlab 5 version of this method + self._matrix_writer = VarWriter4(self) + for name, var in mdict.items(): + self._matrix_writer.write(var, name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio5.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio5.py new file mode 100644 index 0000000000000000000000000000000000000000..4d30a71e798f920111cae531fcdb3ecccb609b88 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio5.py @@ -0,0 +1,892 @@ +''' Classes for read / write of matlab (TM) 5 files + +The matfile specification last found here: + +https://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf + +(as of December 5 2008) + +================================= + Note on functions and mat files +================================= + +The document above does not give any hints as to the storage of matlab +function handles, or anonymous function handles. I had, therefore, to +guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and +``mxOPAQUE_CLASS`` by looking at example mat files. + +``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to +contain a struct matrix with a set pattern of fields. For anonymous +functions, a sub-fields of one of these fields seems to contain the +well-named ``mxOPAQUE_CLASS``. This seems to contain: + +* array flags as for any matlab matrix +* 3 int8 strings +* a matrix + +It seems that whenever the mat file contains a ``mxOPAQUE_CLASS`` +instance, there is also an un-named matrix (name == '') at the end of +the mat file. I'll call this the ``__function_workspace__`` matrix. + +When I saved two anonymous functions in a mat file, or appended another +anonymous function to the mat file, there was still only one +``__function_workspace__`` un-named matrix at the end, but larger than +that for a mat file with a single anonymous function, suggesting that +the workspaces for the two functions had been merged. + +The ``__function_workspace__`` matrix appears to be of double class +(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in +the format of a mini .mat file, without the first 124 bytes of the file +header (the description and the subsystem_offset), but with the version +U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes, +presumably for 8 byte padding, and then a series of ``miMATRIX`` +entries, as in a standard mat file. The ``miMATRIX`` entries appear to +be series of un-named (name == '') matrices, and may also contain arrays +of this same mini-mat format. + +I guess that: + +* saving an anonymous function back to a mat file will need the + associated ``__function_workspace__`` matrix saved as well for the + anonymous function to work correctly. +* appending to a mat file that has a ``__function_workspace__`` would + involve first pulling off this workspace, appending, checking whether + there were any more anonymous functions appended, and then somehow + merging the relevant workspaces, and saving at the end of the mat + file. + +The mat files I was playing with are in ``tests/data``: + +* sqr.mat +* parabola.mat +* some_functions.mat + +See ``tests/test_mio.py:test_mio_funcs.py`` for the debugging +script I was working with. + +Small fragments of current code adapted from matfile.py by Heiko +Henkelmann; parts of the code for simplify_cells=True adapted from +http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/. +''' + +import os +import time +import sys +import zlib + +from io import BytesIO + +import warnings + +import numpy as np + +import scipy.sparse + +from ._byteordercodes import native_code, swapped_code + +from ._miobase import (MatFileReader, docfiller, matdims, read_dtype, + arr_to_chars, arr_dtype_number, MatWriteError, + MatReadError, MatReadWarning) + +# Reader object for matlab 5 format variables +from ._mio5_utils import VarReader5 + +# Constants and helper objects +from ._mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES, + NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8, + miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS, + mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS, + mxDOUBLE_CLASS, mclass_info, mat_struct) + +from ._streams import ZlibInputStream + + +def _has_struct(elem): + """Determine if elem is an array and if first array item is a struct.""" + return (isinstance(elem, np.ndarray) and (elem.size > 0) and (elem.ndim > 0) and + isinstance(elem[0], mat_struct)) + + +def _inspect_cell_array(ndarray): + """Construct lists from cell arrays (loaded as numpy ndarrays), recursing + into items if they contain mat_struct objects.""" + elem_list = [] + for sub_elem in ndarray: + if isinstance(sub_elem, mat_struct): + elem_list.append(_matstruct_to_dict(sub_elem)) + elif _has_struct(sub_elem): + elem_list.append(_inspect_cell_array(sub_elem)) + else: + elem_list.append(sub_elem) + return elem_list + + +def _matstruct_to_dict(matobj): + """Construct nested dicts from mat_struct objects.""" + d = {} + for f in matobj._fieldnames: + elem = matobj.__dict__[f] + if isinstance(elem, mat_struct): + d[f] = _matstruct_to_dict(elem) + elif _has_struct(elem): + d[f] = _inspect_cell_array(elem) + else: + d[f] = elem + return d + + +def _simplify_cells(d): + """Convert mat objects in dict to nested dicts.""" + for key in d: + if isinstance(d[key], mat_struct): + d[key] = _matstruct_to_dict(d[key]) + elif _has_struct(d[key]): + d[key] = _inspect_cell_array(d[key]) + return d + + +class MatFile5Reader(MatFileReader): + ''' Reader for Mat 5 mat files + Adds the following attribute to base class + + uint16_codec - char codec to use for uint16 char arrays + (defaults to system default codec) + + Uses variable reader that has the following standard interface (see + abstract class in ``miobase``:: + + __init__(self, file_reader) + read_header(self) + array_from_header(self) + + and added interface:: + + set_stream(self, stream) + read_full_tag(self) + + ''' + @docfiller + def __init__(self, + mat_stream, + byte_order=None, + mat_dtype=False, + squeeze_me=False, + chars_as_strings=True, + matlab_compatible=False, + struct_as_record=True, + verify_compressed_data_integrity=True, + uint16_codec=None, + simplify_cells=False): + '''Initializer for matlab 5 file format reader + + %(matstream_arg)s + %(load_args)s + %(struct_arg)s + uint16_codec : {None, string} + Set codec to use for uint16 char arrays (e.g., 'utf-8'). + Use system default codec if None + ''' + super().__init__( + mat_stream, + byte_order, + mat_dtype, + squeeze_me, + chars_as_strings, + matlab_compatible, + struct_as_record, + verify_compressed_data_integrity, + simplify_cells) + # Set uint16 codec + if not uint16_codec: + uint16_codec = sys.getdefaultencoding() + self.uint16_codec = uint16_codec + # placeholders for readers - see initialize_read method + self._file_reader = None + self._matrix_reader = None + + def guess_byte_order(self): + ''' Guess byte order. + Sets stream pointer to 0''' + self.mat_stream.seek(126) + mi = self.mat_stream.read(2) + self.mat_stream.seek(0) + return mi == b'IM' and '<' or '>' + + def read_file_header(self): + ''' Read in mat 5 file header ''' + hdict = {} + hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header'] + hdr = read_dtype(self.mat_stream, hdr_dtype) + hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000') + v_major = hdr['version'] >> 8 + v_minor = hdr['version'] & 0xFF + hdict['__version__'] = '%d.%d' % (v_major, v_minor) + return hdict + + def initialize_read(self): + ''' Run when beginning read of variables + + Sets up readers from parameters in `self` + ''' + # reader for top level stream. We need this extra top-level + # reader because we use the matrix_reader object to contain + # compressed matrices (so they have their own stream) + self._file_reader = VarReader5(self) + # reader for matrix streams + self._matrix_reader = VarReader5(self) + + def read_var_header(self): + ''' Read header, return header, next position + + Header has to define at least .name and .is_global + + Parameters + ---------- + None + + Returns + ------- + header : object + object that can be passed to self.read_var_array, and that + has attributes .name and .is_global + next_position : int + position in stream of next variable + ''' + mdtype, byte_count = self._file_reader.read_full_tag() + if not byte_count > 0: + raise ValueError("Did not read any bytes") + next_pos = self.mat_stream.tell() + byte_count + if mdtype == miCOMPRESSED: + # Make new stream from compressed data + stream = ZlibInputStream(self.mat_stream, byte_count) + self._matrix_reader.set_stream(stream) + check_stream_limit = self.verify_compressed_data_integrity + mdtype, byte_count = self._matrix_reader.read_full_tag() + else: + check_stream_limit = False + self._matrix_reader.set_stream(self.mat_stream) + if not mdtype == miMATRIX: + raise TypeError('Expecting miMATRIX type here, got %d' % mdtype) + header = self._matrix_reader.read_header(check_stream_limit) + return header, next_pos + + def read_var_array(self, header, process=True): + ''' Read array, given `header` + + Parameters + ---------- + header : header object + object with fields defining variable header + process : {True, False} bool, optional + If True, apply recursive post-processing during loading of + array. + + Returns + ------- + arr : array + array with post-processing applied or not according to + `process`. + ''' + return self._matrix_reader.array_from_header(header, process) + + def get_variables(self, variable_names=None): + ''' get variables from stream as dictionary + + variable_names - optional list of variable names to get + + If variable_names is None, then get all variables in file + ''' + if isinstance(variable_names, str): + variable_names = [variable_names] + elif variable_names is not None: + variable_names = list(variable_names) + + self.mat_stream.seek(0) + # Here we pass all the parameters in self to the reading objects + self.initialize_read() + mdict = self.read_file_header() + mdict['__globals__'] = [] + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + if name in mdict: + warnings.warn('Duplicate variable name "%s" in stream' + ' - replacing previous with new\n' + 'Consider mio5.varmats_from_mat to split ' + 'file into single variable files' % name, + MatReadWarning, stacklevel=2) + if name == '': + # can only be a matlab 7 function workspace + name = '__function_workspace__' + # We want to keep this raw because mat_dtype processing + # will break the format (uint8 as mxDOUBLE_CLASS) + process = False + else: + process = True + if variable_names is not None and name not in variable_names: + self.mat_stream.seek(next_position) + continue + try: + res = self.read_var_array(hdr, process) + except MatReadError as err: + warnings.warn( + f'Unreadable variable "{name}", because "{err}"', + Warning, stacklevel=2) + res = "Read error: %s" % err + self.mat_stream.seek(next_position) + mdict[name] = res + if hdr.is_global: + mdict['__globals__'].append(name) + if variable_names is not None: + variable_names.remove(name) + if len(variable_names) == 0: + break + if self.simplify_cells: + return _simplify_cells(mdict) + else: + return mdict + + def list_variables(self): + ''' list variables from stream ''' + self.mat_stream.seek(0) + # Here we pass all the parameters in self to the reading objects + self.initialize_read() + self.read_file_header() + vars = [] + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + if name == '': + # can only be a matlab 7 function workspace + name = '__function_workspace__' + + shape = self._matrix_reader.shape_from_header(hdr) + if hdr.is_logical: + info = 'logical' + else: + info = mclass_info.get(hdr.mclass, 'unknown') + vars.append((name, shape, info)) + + self.mat_stream.seek(next_position) + return vars + + +def varmats_from_mat(file_obj): + """ Pull variables out of mat 5 file as a sequence of mat file objects + + This can be useful with a difficult mat file, containing unreadable + variables. This routine pulls the variables out in raw form and puts them, + unread, back into a file stream for saving or reading. Another use is the + pathological case where there is more than one variable of the same name in + the file; this routine returns the duplicates, whereas the standard reader + will overwrite duplicates in the returned dictionary. + + The file pointer in `file_obj` will be undefined. File pointers for the + returned file-like objects are set at 0. + + Parameters + ---------- + file_obj : file-like + file object containing mat file + + Returns + ------- + named_mats : list + list contains tuples of (name, BytesIO) where BytesIO is a file-like + object containing mat file contents as for a single variable. The + BytesIO contains a string with the original header and a single var. If + ``var_file_obj`` is an individual BytesIO instance, then save as a mat + file with something like ``open('test.mat', + 'wb').write(var_file_obj.read())`` + + Examples + -------- + >>> import scipy.io + >>> import numpy as np + >>> from io import BytesIO + >>> from scipy.io.matlab._mio5 import varmats_from_mat + >>> mat_fileobj = BytesIO() + >>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'}) + >>> varmats = varmats_from_mat(mat_fileobj) + >>> sorted([name for name, str_obj in varmats]) + ['a', 'b'] + """ + rdr = MatFile5Reader(file_obj) + file_obj.seek(0) + # Raw read of top-level file header + hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize + raw_hdr = file_obj.read(hdr_len) + # Initialize variable reading + file_obj.seek(0) + rdr.initialize_read() + rdr.read_file_header() + next_position = file_obj.tell() + named_mats = [] + while not rdr.end_of_stream(): + start_position = next_position + hdr, next_position = rdr.read_var_header() + name = 'None' if hdr.name is None else hdr.name.decode('latin1') + # Read raw variable string + file_obj.seek(start_position) + byte_count = next_position - start_position + var_str = file_obj.read(byte_count) + # write to stringio object + out_obj = BytesIO() + out_obj.write(raw_hdr) + out_obj.write(var_str) + out_obj.seek(0) + named_mats.append((name, out_obj)) + return named_mats + + +class EmptyStructMarker: + """ Class to indicate presence of empty matlab struct on output """ + + +def to_writeable(source): + ''' Convert input object ``source`` to something we can write + + Parameters + ---------- + source : object + + Returns + ------- + arr : None or ndarray or EmptyStructMarker + If `source` cannot be converted to something we can write to a matfile, + return None. If `source` is equivalent to an empty dictionary, return + ``EmptyStructMarker``. Otherwise return `source` converted to an + ndarray with contents for writing to matfile. + ''' + if isinstance(source, np.ndarray): + return source + if source is None: + return None + if hasattr(source, "__array__"): + return np.asarray(source) + # Objects that implement mappings + is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and + hasattr(source, 'items')) + # Objects that don't implement mappings, but do have dicts + if isinstance(source, np.generic): + # NumPy scalars are never mappings (PyPy issue workaround) + pass + elif not is_mapping and hasattr(source, '__dict__'): + source = {key: value for key, value in source.__dict__.items() + if not key.startswith('_')} + is_mapping = True + if is_mapping: + dtype = [] + values = [] + for field, value in source.items(): + if (isinstance(field, str) and + field[0] not in '_0123456789'): + dtype.append((str(field), object)) + values.append(value) + if dtype: + return np.array([tuple(values)], dtype) + else: + return EmptyStructMarker + # Next try and convert to an array + try: + narr = np.asanyarray(source) + except ValueError: + narr = np.asanyarray(source, dtype=object) + if narr.dtype.type in (object, np.object_) and \ + narr.shape == () and narr == source: + # No interesting conversion possible + return None + return narr + + +# Native byte ordered dtypes for convenience for writers +NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header'] +NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full'] +NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata'] +NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags'] + + +class VarWriter5: + ''' Generic matlab matrix writing class ''' + mat_tag = np.zeros((), NDT_TAG_FULL) + mat_tag['mdtype'] = miMATRIX + + def __init__(self, file_writer): + self.file_stream = file_writer.file_stream + self.unicode_strings = file_writer.unicode_strings + self.long_field_names = file_writer.long_field_names + self.oned_as = file_writer.oned_as + # These are used for top level writes, and unset after + self._var_name = None + self._var_is_global = False + + def write_bytes(self, arr): + self.file_stream.write(arr.tobytes(order='F')) + + def write_string(self, s): + self.file_stream.write(s) + + def write_element(self, arr, mdtype=None): + ''' write tag and data ''' + if mdtype is None: + mdtype = NP_TO_MTYPES[arr.dtype.str[1:]] + # Array needs to be in native byte order + if arr.dtype.byteorder == swapped_code: + arr = arr.byteswap().view(arr.dtype.newbyteorder()) + byte_count = arr.size*arr.itemsize + if byte_count <= 4: + self.write_smalldata_element(arr, mdtype, byte_count) + else: + self.write_regular_element(arr, mdtype, byte_count) + + def write_smalldata_element(self, arr, mdtype, byte_count): + # write tag with embedded data + tag = np.zeros((), NDT_TAG_SMALL) + tag['byte_count_mdtype'] = (byte_count << 16) + mdtype + # if arr.tobytes is < 4, the element will be zero-padded as needed. + tag['data'] = arr.tobytes(order='F') + self.write_bytes(tag) + + def write_regular_element(self, arr, mdtype, byte_count): + # write tag, data + tag = np.zeros((), NDT_TAG_FULL) + tag['mdtype'] = mdtype + tag['byte_count'] = byte_count + self.write_bytes(tag) + self.write_bytes(arr) + # pad to next 64-bit boundary + bc_mod_8 = byte_count % 8 + if bc_mod_8: + self.file_stream.write(b'\x00' * (8-bc_mod_8)) + + def write_header(self, + shape, + mclass, + is_complex=False, + is_logical=False, + nzmax=0): + ''' Write header for given data options + shape : sequence + array shape + mclass - mat5 matrix class + is_complex - True if matrix is complex + is_logical - True if matrix is logical + nzmax - max non zero elements for sparse arrays + + We get the name and the global flag from the object, and reset + them to defaults after we've used them + ''' + # get name and is_global from one-shot object store + name = self._var_name + is_global = self._var_is_global + # initialize the top-level matrix tag, store position + self._mat_tag_pos = self.file_stream.tell() + self.write_bytes(self.mat_tag) + # write array flags (complex, global, logical, class, nzmax) + af = np.zeros((), NDT_ARRAY_FLAGS) + af['data_type'] = miUINT32 + af['byte_count'] = 8 + flags = is_complex << 3 | is_global << 2 | is_logical << 1 + af['flags_class'] = mclass | flags << 8 + af['nzmax'] = nzmax + self.write_bytes(af) + # shape + self.write_element(np.array(shape, dtype='i4')) + # write name + name = np.asarray(name) + if name == '': # empty string zero-terminated + self.write_smalldata_element(name, miINT8, 0) + else: + self.write_element(name, miINT8) + # reset the one-shot store to defaults + self._var_name = '' + self._var_is_global = False + + def update_matrix_tag(self, start_pos): + curr_pos = self.file_stream.tell() + self.file_stream.seek(start_pos) + byte_count = curr_pos - start_pos - 8 + if byte_count >= 2**32: + raise MatWriteError("Matrix too large to save with Matlab " + "5 format") + self.mat_tag['byte_count'] = byte_count + self.write_bytes(self.mat_tag) + self.file_stream.seek(curr_pos) + + def write_top(self, arr, name, is_global): + """ Write variable at top level of mat file + + Parameters + ---------- + arr : array_like + array-like object to create writer for + name : str, optional + name as it will appear in matlab workspace + default is empty string + is_global : {False, True}, optional + whether variable will be global on load into matlab + """ + # these are set before the top-level header write, and unset at + # the end of the same write, because they do not apply for lower levels + self._var_is_global = is_global + self._var_name = name + # write the header and data + self.write(arr) + + def write(self, arr): + ''' Write `arr` to stream at top and sub levels + + Parameters + ---------- + arr : array_like + array-like object to create writer for + ''' + # store position, so we can update the matrix tag + mat_tag_pos = self.file_stream.tell() + # First check if these are sparse + if scipy.sparse.issparse(arr): + self.write_sparse(arr) + self.update_matrix_tag(mat_tag_pos) + return + # Try to convert things that aren't arrays + narr = to_writeable(arr) + if narr is None: + raise TypeError(f'Could not convert {arr} (type {type(arr)}) to array') + if isinstance(narr, MatlabObject): + self.write_object(narr) + elif isinstance(narr, MatlabFunction): + raise MatWriteError('Cannot write matlab functions') + elif narr is EmptyStructMarker: # empty struct array + self.write_empty_struct() + elif narr.dtype.fields: # struct array + self.write_struct(narr) + elif narr.dtype.hasobject: # cell array + self.write_cells(narr) + elif narr.dtype.kind in ('U', 'S'): + if self.unicode_strings: + codec = 'UTF8' + else: + codec = 'ascii' + self.write_char(narr, codec) + else: + self.write_numeric(narr) + self.update_matrix_tag(mat_tag_pos) + + def write_numeric(self, arr): + imagf = arr.dtype.kind == 'c' + logif = arr.dtype.kind == 'b' + try: + mclass = NP_TO_MXTYPES[arr.dtype.str[1:]] + except KeyError: + # No matching matlab type, probably complex256 / float128 / float96 + # Cast data to complex128 / float64. + if imagf: + arr = arr.astype('c128') + elif logif: + arr = arr.astype('i1') # Should only contain 0/1 + else: + arr = arr.astype('f8') + mclass = mxDOUBLE_CLASS + self.write_header(matdims(arr, self.oned_as), + mclass, + is_complex=imagf, + is_logical=logif) + if imagf: + self.write_element(arr.real) + self.write_element(arr.imag) + else: + self.write_element(arr) + + def write_char(self, arr, codec='ascii'): + ''' Write string array `arr` with given `codec` + ''' + if arr.size == 0 or np.all(arr == ''): + # This an empty string array or a string array containing + # only empty strings. Matlab cannot distinguish between a + # string array that is empty, and a string array containing + # only empty strings, because it stores strings as arrays of + # char. There is no way of having an array of char that is + # not empty, but contains an empty string. We have to + # special-case the array-with-empty-strings because even + # empty strings have zero padding, which would otherwise + # appear in matlab as a string with a space. + shape = (0,) * np.max([arr.ndim, 2]) + self.write_header(shape, mxCHAR_CLASS) + self.write_smalldata_element(arr, miUTF8, 0) + return + # non-empty string. + # + # Convert to char array + arr = arr_to_chars(arr) + # We have to write the shape directly, because we are going + # recode the characters, and the resulting stream of chars + # may have a different length + shape = arr.shape + self.write_header(shape, mxCHAR_CLASS) + if arr.dtype.kind == 'U' and arr.size: + # Make one long string from all the characters. We need to + # transpose here, because we're flattening the array, before + # we write the bytes. The bytes have to be written in + # Fortran order. + n_chars = np.prod(shape) + st_arr = np.ndarray(shape=(), + dtype=arr_dtype_number(arr, n_chars), + buffer=arr.T.copy()) # Fortran order + # Recode with codec to give byte string + st = st_arr.item().encode(codec) + # Reconstruct as 1-D byte array + arr = np.ndarray(shape=(len(st),), + dtype='S1', + buffer=st) + self.write_element(arr, mdtype=miUTF8) + + def write_sparse(self, arr): + ''' Sparse matrices are 2D + ''' + A = arr.tocsc() # convert to sparse CSC format + A.sort_indices() # MATLAB expects sorted row indices + is_complex = (A.dtype.kind == 'c') + is_logical = (A.dtype.kind == 'b') + nz = A.nnz + self.write_header(matdims(arr, self.oned_as), + mxSPARSE_CLASS, + is_complex=is_complex, + is_logical=is_logical, + # matlab won't load file with 0 nzmax + nzmax=1 if nz == 0 else nz) + self.write_element(A.indices.astype('i4')) + self.write_element(A.indptr.astype('i4')) + self.write_element(A.data.real) + if is_complex: + self.write_element(A.data.imag) + + def write_cells(self, arr): + self.write_header(matdims(arr, self.oned_as), + mxCELL_CLASS) + # loop over data, column major + A = np.atleast_2d(arr).flatten('F') + for el in A: + self.write(el) + + def write_empty_struct(self): + self.write_header((1, 1), mxSTRUCT_CLASS) + # max field name length set to 1 in an example matlab struct + self.write_element(np.array(1, dtype=np.int32)) + # Field names element is empty + self.write_element(np.array([], dtype=np.int8)) + + def write_struct(self, arr): + self.write_header(matdims(arr, self.oned_as), + mxSTRUCT_CLASS) + self._write_items(arr) + + def _write_items(self, arr): + # write fieldnames + fieldnames = [f[0] for f in arr.dtype.descr] + length = max([len(fieldname) for fieldname in fieldnames])+1 + max_length = (self.long_field_names and 64) or 32 + if length > max_length: + raise ValueError("Field names are restricted to %d characters" % + (max_length-1)) + self.write_element(np.array([length], dtype='i4')) + self.write_element( + np.array(fieldnames, dtype='S%d' % (length)), + mdtype=miINT8) + A = np.atleast_2d(arr).flatten('F') + for el in A: + for f in fieldnames: + self.write(el[f]) + + def write_object(self, arr): + '''Same as writing structs, except different mx class, and extra + classname element after header + ''' + self.write_header(matdims(arr, self.oned_as), + mxOBJECT_CLASS) + self.write_element(np.array(arr.classname, dtype='S'), + mdtype=miINT8) + self._write_items(arr) + + +class MatFile5Writer: + ''' Class for writing mat5 files ''' + + @docfiller + def __init__(self, file_stream, + do_compression=False, + unicode_strings=False, + global_vars=None, + long_field_names=False, + oned_as='row'): + ''' Initialize writer for matlab 5 format files + + Parameters + ---------- + %(do_compression)s + %(unicode_strings)s + global_vars : None or sequence of strings, optional + Names of variables to be marked as global for matlab + %(long_fields)s + %(oned_as)s + ''' + self.file_stream = file_stream + self.do_compression = do_compression + self.unicode_strings = unicode_strings + if global_vars: + self.global_vars = global_vars + else: + self.global_vars = [] + self.long_field_names = long_field_names + self.oned_as = oned_as + self._matrix_writer = None + + def write_file_header(self): + # write header + hdr = np.zeros((), NDT_FILE_HDR) + hdr['description'] = (f'MATLAB 5.0 MAT-file Platform: {os.name}, ' + f'Created on: {time.asctime()}') + hdr['version'] = 0x0100 + hdr['endian_test'] = np.ndarray(shape=(), + dtype='S2', + buffer=np.uint16(0x4d49)) + self.file_stream.write(hdr.tobytes()) + + def put_variables(self, mdict, write_header=None): + ''' Write variables in `mdict` to stream + + Parameters + ---------- + mdict : mapping + mapping with method ``items`` returns name, contents pairs where + ``name`` which will appear in the matlab workspace in file load, and + ``contents`` is something writeable to a matlab file, such as a NumPy + array. + write_header : {None, True, False}, optional + If True, then write the matlab file header before writing the + variables. If None (the default) then write the file header + if we are at position 0 in the stream. By setting False + here, and setting the stream position to the end of the file, + you can append variables to a matlab file + ''' + # write header if requested, or None and start of file + if write_header is None: + write_header = self.file_stream.tell() == 0 + if write_header: + self.write_file_header() + self._matrix_writer = VarWriter5(self) + for name, var in mdict.items(): + if name[0] == '_': + continue + is_global = name in self.global_vars + if self.do_compression: + stream = BytesIO() + self._matrix_writer.file_stream = stream + self._matrix_writer.write_top(var, name.encode('latin1'), is_global) + out_str = zlib.compress(stream.getvalue()) + tag = np.empty((), NDT_TAG_FULL) + tag['mdtype'] = miCOMPRESSED + tag['byte_count'] = len(out_str) + self.file_stream.write(tag.tobytes()) + self.file_stream.write(out_str) + else: # not compressing + self._matrix_writer.write_top(var, name.encode('latin1'), is_global) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio5_params.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio5_params.py new file mode 100644 index 0000000000000000000000000000000000000000..0d60b8e7a4a2dd1e6a336139f67ce984743e27bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio5_params.py @@ -0,0 +1,281 @@ +''' Constants and classes for matlab 5 read and write + +See also mio5_utils.pyx where these same constants arise as c enums. + +If you make changes in this file, don't forget to change mio5_utils.pyx +''' +import numpy as np + +from ._miobase import convert_dtypes + + +__all__ = [ + 'MDTYPES', 'MatlabFunction', 'MatlabObject', 'MatlabOpaque', + 'NP_TO_MTYPES', 'NP_TO_MXTYPES', 'OPAQUE_DTYPE', 'codecs_template', + 'mat_struct', 'mclass_dtypes_template', 'mclass_info', 'mdtypes_template', + 'miCOMPRESSED', 'miDOUBLE', 'miINT16', 'miINT32', 'miINT64', 'miINT8', + 'miMATRIX', 'miSINGLE', 'miUINT16', 'miUINT32', 'miUINT64', 'miUINT8', + 'miUTF16', 'miUTF32', 'miUTF8', 'mxCELL_CLASS', 'mxCHAR_CLASS', + 'mxDOUBLE_CLASS', 'mxFUNCTION_CLASS', 'mxINT16_CLASS', 'mxINT32_CLASS', + 'mxINT64_CLASS', 'mxINT8_CLASS', 'mxOBJECT_CLASS', + 'mxOBJECT_CLASS_FROM_MATRIX_H', 'mxOPAQUE_CLASS', 'mxSINGLE_CLASS', + 'mxSPARSE_CLASS', 'mxSTRUCT_CLASS', 'mxUINT16_CLASS', 'mxUINT32_CLASS', + 'mxUINT64_CLASS', 'mxUINT8_CLASS' +] +miINT8 = 1 +miUINT8 = 2 +miINT16 = 3 +miUINT16 = 4 +miINT32 = 5 +miUINT32 = 6 +miSINGLE = 7 +miDOUBLE = 9 +miINT64 = 12 +miUINT64 = 13 +miMATRIX = 14 +miCOMPRESSED = 15 +miUTF8 = 16 +miUTF16 = 17 +miUTF32 = 18 + +mxCELL_CLASS = 1 +mxSTRUCT_CLASS = 2 +# The March 2008 edition of "Matlab 7 MAT-File Format" says that +# mxOBJECT_CLASS = 3, whereas matrix.h says that mxLOGICAL = 3. +# Matlab 2008a appears to save logicals as type 9, so we assume that +# the document is correct. See type 18, below. +mxOBJECT_CLASS = 3 +mxCHAR_CLASS = 4 +mxSPARSE_CLASS = 5 +mxDOUBLE_CLASS = 6 +mxSINGLE_CLASS = 7 +mxINT8_CLASS = 8 +mxUINT8_CLASS = 9 +mxINT16_CLASS = 10 +mxUINT16_CLASS = 11 +mxINT32_CLASS = 12 +mxUINT32_CLASS = 13 +# The following are not in the March 2008 edition of "Matlab 7 +# MAT-File Format," but were guessed from matrix.h. +mxINT64_CLASS = 14 +mxUINT64_CLASS = 15 +mxFUNCTION_CLASS = 16 +# Not doing anything with these at the moment. +mxOPAQUE_CLASS = 17 # This appears to be a function workspace +# Thread 'saving/loading symbol table of annymous functions', +# octave-maintainers, April-May 2007 +# https://lists.gnu.org/archive/html/octave-maintainers/2007-04/msg00031.html +# https://lists.gnu.org/archive/html/octave-maintainers/2007-05/msg00032.html +# (Was/Deprecated: https://www-old.cae.wisc.edu/pipermail/octave-maintainers/2007-May/002824.html) +mxOBJECT_CLASS_FROM_MATRIX_H = 18 + +mdtypes_template = { + miINT8: 'i1', + miUINT8: 'u1', + miINT16: 'i2', + miUINT16: 'u2', + miINT32: 'i4', + miUINT32: 'u4', + miSINGLE: 'f4', + miDOUBLE: 'f8', + miINT64: 'i8', + miUINT64: 'u8', + miUTF8: 'u1', + miUTF16: 'u2', + miUTF32: 'u4', + 'file_header': [('description', 'S116'), + ('subsystem_offset', 'i8'), + ('version', 'u2'), + ('endian_test', 'S2')], + 'tag_full': [('mdtype', 'u4'), ('byte_count', 'u4')], + 'tag_smalldata':[('byte_count_mdtype', 'u4'), ('data', 'S4')], + 'array_flags': [('data_type', 'u4'), + ('byte_count', 'u4'), + ('flags_class','u4'), + ('nzmax', 'u4')], + 'U1': 'U1', + } + +mclass_dtypes_template = { + mxINT8_CLASS: 'i1', + mxUINT8_CLASS: 'u1', + mxINT16_CLASS: 'i2', + mxUINT16_CLASS: 'u2', + mxINT32_CLASS: 'i4', + mxUINT32_CLASS: 'u4', + mxINT64_CLASS: 'i8', + mxUINT64_CLASS: 'u8', + mxSINGLE_CLASS: 'f4', + mxDOUBLE_CLASS: 'f8', + } + +mclass_info = { + mxINT8_CLASS: 'int8', + mxUINT8_CLASS: 'uint8', + mxINT16_CLASS: 'int16', + mxUINT16_CLASS: 'uint16', + mxINT32_CLASS: 'int32', + mxUINT32_CLASS: 'uint32', + mxINT64_CLASS: 'int64', + mxUINT64_CLASS: 'uint64', + mxSINGLE_CLASS: 'single', + mxDOUBLE_CLASS: 'double', + mxCELL_CLASS: 'cell', + mxSTRUCT_CLASS: 'struct', + mxOBJECT_CLASS: 'object', + mxCHAR_CLASS: 'char', + mxSPARSE_CLASS: 'sparse', + mxFUNCTION_CLASS: 'function', + mxOPAQUE_CLASS: 'opaque', + } + +NP_TO_MTYPES = { + 'f8': miDOUBLE, + 'c32': miDOUBLE, + 'c24': miDOUBLE, + 'c16': miDOUBLE, + 'f4': miSINGLE, + 'c8': miSINGLE, + 'i8': miINT64, + 'i4': miINT32, + 'i2': miINT16, + 'i1': miINT8, + 'u8': miUINT64, + 'u4': miUINT32, + 'u2': miUINT16, + 'u1': miUINT8, + 'S1': miUINT8, + 'U1': miUTF16, + 'b1': miUINT8, # not standard but seems MATLAB uses this (gh-4022) + } + + +NP_TO_MXTYPES = { + 'f8': mxDOUBLE_CLASS, + 'c32': mxDOUBLE_CLASS, + 'c24': mxDOUBLE_CLASS, + 'c16': mxDOUBLE_CLASS, + 'f4': mxSINGLE_CLASS, + 'c8': mxSINGLE_CLASS, + 'i8': mxINT64_CLASS, + 'i4': mxINT32_CLASS, + 'i2': mxINT16_CLASS, + 'i1': mxINT8_CLASS, + 'u8': mxUINT64_CLASS, + 'u4': mxUINT32_CLASS, + 'u2': mxUINT16_CLASS, + 'u1': mxUINT8_CLASS, + 'S1': mxUINT8_CLASS, + 'b1': mxUINT8_CLASS, # not standard but seems MATLAB uses this + } + +''' Before release v7.1 (release 14) matlab (TM) used the system +default character encoding scheme padded out to 16-bits. Release 14 +and later use Unicode. When saving character data, R14 checks if it +can be encoded in 7-bit ascii, and saves in that format if so.''' + +codecs_template = { + miUTF8: {'codec': 'utf_8', 'width': 1}, + miUTF16: {'codec': 'utf_16', 'width': 2}, + miUTF32: {'codec': 'utf_32','width': 4}, + } + + +def _convert_codecs(template, byte_order): + ''' Convert codec template mapping to byte order + + Set codecs not on this system to None + + Parameters + ---------- + template : mapping + key, value are respectively codec name, and root name for codec + (without byte order suffix) + byte_order : {'<', '>'} + code for little or big endian + + Returns + ------- + codecs : dict + key, value are name, codec (as in .encode(codec)) + ''' + codecs = {} + postfix = byte_order == '<' and '_le' or '_be' + for k, v in template.items(): + codec = v['codec'] + try: + " ".encode(codec) + except LookupError: + codecs[k] = None + continue + if v['width'] > 1: + codec += postfix + codecs[k] = codec + return codecs.copy() + + +MDTYPES = {} +for _bytecode in '<>': + _def = {'dtypes': convert_dtypes(mdtypes_template, _bytecode), + 'classes': convert_dtypes(mclass_dtypes_template, _bytecode), + 'codecs': _convert_codecs(codecs_template, _bytecode)} + MDTYPES[_bytecode] = _def + + +class mat_struct: + """Placeholder for holding read data from structs. + + We use instances of this class when the user passes False as a value to the + ``struct_as_record`` parameter of the :func:`scipy.io.loadmat` function. + """ + pass + + +class MatlabObject(np.ndarray): + """Subclass of ndarray to signal this is a matlab object. + + This is a simple subclass of :class:`numpy.ndarray` meant to be used + by :func:`scipy.io.loadmat` and should not be instantiated directly. + """ + + def __new__(cls, input_array, classname=None): + # Input array is an already formed ndarray instance + # We first cast to be our class type + obj = np.asarray(input_array).view(cls) + # add the new attribute to the created instance + obj.classname = classname + # Finally, we must return the newly created object: + return obj + + def __array_finalize__(self,obj): + # reset the attribute from passed original object + self.classname = getattr(obj, 'classname', None) + # We do not need to return anything + + +class MatlabFunction(np.ndarray): + """Subclass for a MATLAB function. + + This is a simple subclass of :class:`numpy.ndarray` meant to be used + by :func:`scipy.io.loadmat` and should not be directly instantiated. + """ + + def __new__(cls, input_array): + obj = np.asarray(input_array).view(cls) + return obj + + +class MatlabOpaque(np.ndarray): + """Subclass for a MATLAB opaque matrix. + + This is a simple subclass of :class:`numpy.ndarray` meant to be used + by :func:`scipy.io.loadmat` and should not be directly instantiated. + """ + + def __new__(cls, input_array): + obj = np.asarray(input_array).view(cls) + return obj + + +OPAQUE_DTYPE = np.dtype( + [('s0', 'O'), ('s1', 'O'), ('s2', 'O'), ('arr', 'O')]) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio5_utils.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio5_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a64dd7f74f74dcf45c35cdb99f88500e7e63a1cc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio5_utils.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio_utils.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ab6f31e9e558c760b6664ef2594159dabbd1c47d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_mio_utils.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_miobase.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_miobase.py new file mode 100644 index 0000000000000000000000000000000000000000..a6a21c33aa3eddef8345c1df94ecd787aa64830d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_miobase.py @@ -0,0 +1,429 @@ +# Authors: Travis Oliphant, Matthew Brett + +""" +Base classes for MATLAB file stream reading. + +MATLAB is a registered trademark of the Mathworks inc. +""" + +import numpy as np +from scipy._lib import doccer + +from . import _byteordercodes as boc + +__all__ = [ + 'MatFileReader', 'MatReadError', 'MatReadWarning', + 'MatVarReader', 'MatWriteError', 'arr_dtype_number', + 'arr_to_chars', 'convert_dtypes', 'doc_dict', + 'docfiller', 'get_matfile_version', + 'matdims', 'read_dtype' +] + +class MatReadError(Exception): + """Exception indicating a read issue.""" + + +class MatWriteError(Exception): + """Exception indicating a write issue.""" + + +class MatReadWarning(UserWarning): + """Warning class for read issues.""" + + +doc_dict = \ + {'file_arg': + '''file_name : str + Name of the mat file (do not need .mat extension if + appendmat==True) Can also pass open file-like object.''', + 'append_arg': + '''appendmat : bool, optional + True to append the .mat extension to the end of the given + filename, if not already present. Default is True.''', + 'load_args': + '''byte_order : str or None, optional + None by default, implying byte order guessed from mat + file. Otherwise can be one of ('native', '=', 'little', '<', + 'BIG', '>'). +mat_dtype : bool, optional + If True, return arrays in same dtype as would be loaded into + MATLAB (instead of the dtype with which they are saved). +squeeze_me : bool, optional + Whether to squeeze unit matrix dimensions or not. +chars_as_strings : bool, optional + Whether to convert char arrays to string arrays. +matlab_compatible : bool, optional + Returns matrices as would be loaded by MATLAB (implies + squeeze_me=False, chars_as_strings=False, mat_dtype=True, + struct_as_record=True).''', + 'struct_arg': + '''struct_as_record : bool, optional + Whether to load MATLAB structs as NumPy record arrays, or as + old-style NumPy arrays with dtype=object. Setting this flag to + False replicates the behavior of SciPy version 0.7.x (returning + numpy object arrays). The default setting is True, because it + allows easier round-trip load and save of MATLAB files.''', + 'matstream_arg': + '''mat_stream : file-like + Object with file API, open for reading.''', + 'long_fields': + '''long_field_names : bool, optional + * False - maximum field name length in a structure is 31 characters + which is the documented maximum length. This is the default. + * True - maximum field name length in a structure is 63 characters + which works for MATLAB 7.6''', + 'do_compression': + '''do_compression : bool, optional + Whether to compress matrices on write. Default is False.''', + 'oned_as': + '''oned_as : {'row', 'column'}, optional + If 'column', write 1-D NumPy arrays as column vectors. + If 'row', write 1D NumPy arrays as row vectors.''', + 'unicode_strings': + '''unicode_strings : bool, optional + If True, write strings as Unicode, else MATLAB usual encoding.'''} + +docfiller = doccer.filldoc(doc_dict) + +''' + + Note on architecture +====================== + +There are three sets of parameters relevant for reading files. The +first are *file read parameters* - containing options that are common +for reading the whole file, and therefore every variable within that +file. At the moment these are: + +* mat_stream +* dtypes (derived from byte code) +* byte_order +* chars_as_strings +* squeeze_me +* struct_as_record (MATLAB 5 files) +* class_dtypes (derived from order code, MATLAB 5 files) +* codecs (MATLAB 5 files) +* uint16_codec (MATLAB 5 files) + +Another set of parameters are those that apply only to the current +variable being read - the *header*: + +* header related variables (different for v4 and v5 mat files) +* is_complex +* mclass +* var_stream + +With the header, we need ``next_position`` to tell us where the next +variable in the stream is. + +Then, for each element in a matrix, there can be *element read +parameters*. An element is, for example, one element in a MATLAB cell +array. At the moment, these are: + +* mat_dtype + +The file-reading object contains the *file read parameters*. The +*header* is passed around as a data object, or may be read and discarded +in a single function. The *element read parameters* - the mat_dtype in +this instance, is passed into a general post-processing function - see +``mio_utils`` for details. +''' + + +def convert_dtypes(dtype_template, order_code): + ''' Convert dtypes in mapping to given order + + Parameters + ---------- + dtype_template : mapping + mapping with values returning numpy dtype from ``np.dtype(val)`` + order_code : str + an order code suitable for using in ``dtype.newbyteorder()`` + + Returns + ------- + dtypes : mapping + mapping where values have been replaced by + ``np.dtype(val).newbyteorder(order_code)`` + + ''' + dtypes = dtype_template.copy() + for k in dtypes: + dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code) + return dtypes + + +def read_dtype(mat_stream, a_dtype): + """ + Generic get of byte stream data of known type + + Parameters + ---------- + mat_stream : file_like object + MATLAB (tm) mat file stream + a_dtype : dtype + dtype of array to read. `a_dtype` is assumed to be correct + endianness. + + Returns + ------- + arr : ndarray + Array of dtype `a_dtype` read from stream. + + """ + num_bytes = a_dtype.itemsize + arr = np.ndarray(shape=(), + dtype=a_dtype, + buffer=mat_stream.read(num_bytes), + order='F') + return arr + + +def matfile_version(file_name, *, appendmat=True): + """ + Return major, minor tuple depending on apparent mat file type + + Where: + + #. 0,x -> version 4 format mat files + #. 1,x -> version 5 format mat files + #. 2,x -> version 7.3 format mat files (HDF format) + + Parameters + ---------- + file_name : str + Name of the mat file (do not need .mat extension if + appendmat==True). Can also pass open file-like object. + appendmat : bool, optional + True to append the .mat extension to the end of the given + filename, if not already present. Default is True. + + Returns + ------- + major_version : {0, 1, 2} + major MATLAB File format version + minor_version : int + minor MATLAB file format version + + Raises + ------ + MatReadError + If the file is empty. + ValueError + The matfile version is unknown. + + Notes + ----- + Has the side effect of setting the file read pointer to 0 + """ + from ._mio import _open_file_context + with _open_file_context(file_name, appendmat=appendmat) as fileobj: + return _get_matfile_version(fileobj) + + +get_matfile_version = matfile_version + + +def _get_matfile_version(fileobj): + # Mat4 files have a zero somewhere in first 4 bytes + fileobj.seek(0) + mopt_bytes = fileobj.read(4) + if len(mopt_bytes) == 0: + raise MatReadError("Mat file appears to be empty") + mopt_ints = np.ndarray(shape=(4,), dtype=np.uint8, buffer=mopt_bytes) + if 0 in mopt_ints: + fileobj.seek(0) + return (0,0) + # For 5 format or 7.3 format we need to read an integer in the + # header. Bytes 124 through 128 contain a version integer and an + # endian test string + fileobj.seek(124) + tst_str = fileobj.read(4) + fileobj.seek(0) + maj_ind = int(tst_str[2] == b'I'[0]) + maj_val = int(tst_str[maj_ind]) + min_val = int(tst_str[1 - maj_ind]) + ret = (maj_val, min_val) + if maj_val in (1, 2): + return ret + raise ValueError('Unknown mat file type, version {}, {}'.format(*ret)) + + +def matdims(arr, oned_as='column'): + """ + Determine equivalent MATLAB dimensions for given array + + Parameters + ---------- + arr : ndarray + Input array + oned_as : {'column', 'row'}, optional + Whether 1-D arrays are returned as MATLAB row or column matrices. + Default is 'column'. + + Returns + ------- + dims : tuple + Shape tuple, in the form MATLAB expects it. + + Notes + ----- + We had to decide what shape a 1 dimensional array would be by + default. ``np.atleast_2d`` thinks it is a row vector. The + default for a vector in MATLAB (e.g., ``>> 1:12``) is a row vector. + + Versions of scipy up to and including 0.11 resulted (accidentally) + in 1-D arrays being read as column vectors. For the moment, we + maintain the same tradition here. + + Examples + -------- + >>> import numpy as np + >>> from scipy.io.matlab._miobase import matdims + >>> matdims(np.array(1)) # NumPy scalar + (1, 1) + >>> matdims(np.array([1])) # 1-D array, 1 element + (1, 1) + >>> matdims(np.array([1,2])) # 1-D array, 2 elements + (2, 1) + >>> matdims(np.array([[2],[3]])) # 2-D array, column vector + (2, 1) + >>> matdims(np.array([[2,3]])) # 2-D array, row vector + (1, 2) + >>> matdims(np.array([[[2,3]]])) # 3-D array, rowish vector + (1, 1, 2) + >>> matdims(np.array([])) # empty 1-D array + (0, 0) + >>> matdims(np.array([[]])) # empty 2-D array + (0, 0) + >>> matdims(np.array([[[]]])) # empty 3-D array + (0, 0, 0) + + Optional argument flips 1-D shape behavior. + + >>> matdims(np.array([1,2]), 'row') # 1-D array, 2 elements + (1, 2) + + The argument has to make sense though + + >>> matdims(np.array([1,2]), 'bizarre') + Traceback (most recent call last): + ... + ValueError: 1-D option "bizarre" is strange + + """ + shape = arr.shape + if shape == (): # scalar + return (1, 1) + if len(shape) == 1: # 1D + if shape[0] == 0: + return (0, 0) + elif oned_as == 'column': + return shape + (1,) + elif oned_as == 'row': + return (1,) + shape + else: + raise ValueError('1-D option "%s" is strange' + % oned_as) + return shape + + +class MatVarReader: + ''' Abstract class defining required interface for var readers''' + def __init__(self, file_reader): + pass + + def read_header(self): + ''' Returns header ''' + pass + + def array_from_header(self, header): + ''' Reads array given header ''' + pass + + +class MatFileReader: + """ Base object for reading mat files + + To make this class functional, you will need to override the + following methods: + + matrix_getter_factory - gives object to fetch next matrix from stream + guess_byte_order - guesses file byte order from file + """ + + @docfiller + def __init__(self, mat_stream, + byte_order=None, + mat_dtype=False, + squeeze_me=False, + chars_as_strings=True, + matlab_compatible=False, + struct_as_record=True, + verify_compressed_data_integrity=True, + simplify_cells=False): + ''' + Initializer for mat file reader + + mat_stream : file-like + object with file API, open for reading + %(load_args)s + ''' + # Initialize stream + self.mat_stream = mat_stream + self.dtypes = {} + if not byte_order: + byte_order = self.guess_byte_order() + else: + byte_order = boc.to_numpy_code(byte_order) + self.byte_order = byte_order + self.struct_as_record = struct_as_record + if matlab_compatible: + self.set_matlab_compatible() + else: + self.squeeze_me = squeeze_me + self.chars_as_strings = chars_as_strings + self.mat_dtype = mat_dtype + self.verify_compressed_data_integrity = verify_compressed_data_integrity + self.simplify_cells = simplify_cells + if simplify_cells: + self.squeeze_me = True + self.struct_as_record = False + + def set_matlab_compatible(self): + ''' Sets options to return arrays as MATLAB loads them ''' + self.mat_dtype = True + self.squeeze_me = False + self.chars_as_strings = False + + def guess_byte_order(self): + ''' As we do not know what file type we have, assume native ''' + return boc.native_code + + def end_of_stream(self): + b = self.mat_stream.read(1) + curpos = self.mat_stream.tell() + self.mat_stream.seek(curpos-1) + return len(b) == 0 + + +def arr_dtype_number(arr, num): + ''' Return dtype for given number of items per element''' + return np.dtype(arr.dtype.str[:2] + str(num)) + + +def arr_to_chars(arr): + ''' Convert string array to char array ''' + dims = list(arr.shape) + if not dims: + dims = [1] + dims.append(int(arr.dtype.str[2:])) + arr = np.ndarray(shape=dims, + dtype=arr_dtype_number(arr, 1), + buffer=arr) + empties = [arr == np.array('', dtype=arr.dtype)] + if not np.any(empties): + return arr + arr = arr.copy() + arr[tuple(empties)] = ' ' + return arr diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6b48d5b9a1416012c0a4cf5b8452f70ef21c90ec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio.py new file mode 100644 index 0000000000000000000000000000000000000000..492d62cb70006fbafe73064831a03aa58ab4c9e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'mat_reader_factory', 'loadmat', 'savemat', 'whosmat', + 'contextmanager', 'docfiller', + 'MatFile4Reader', 'MatFile4Writer', 'MatFile5Reader', 'MatFile5Writer' +] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio", + private_modules=["_mio"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio4.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio4.py new file mode 100644 index 0000000000000000000000000000000000000000..6807397b34d8c6e699626d2a736905be75861f77 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio4.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'MatFile4Reader', 'MatFile4Writer', 'SYS_LITTLE_ENDIAN', + 'VarHeader4', 'VarReader4', 'VarWriter4', 'arr_to_2d', 'mclass_info', + 'mdtypes_template', 'miDOUBLE', 'miINT16', 'miINT32', 'miSINGLE', + 'miUINT16', 'miUINT8', 'mxCHAR_CLASS', 'mxFULL_CLASS', 'mxSPARSE_CLASS', + 'np_to_mtypes', 'order_codes', 'MatFileReader', 'docfiller', + 'matdims', 'read_dtype', 'convert_dtypes', 'arr_to_chars', + 'arr_dtype_number', 'squeeze_element', 'chars_to_strings' +] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio4", + private_modules=["_mio4"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5.py new file mode 100644 index 0000000000000000000000000000000000000000..65f541d3a2a1a1230c9cb621f8850eb30454daa6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5.py @@ -0,0 +1,28 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'mclass_info', 'mxCHAR_CLASS', 'mxSPARSE_CLASS', + 'BytesIO', 'native_code', + 'swapped_code', 'MatFileReader', 'docfiller', 'matdims', + 'read_dtype', 'arr_to_chars', 'arr_dtype_number', 'MatWriteError', + 'MatReadError', 'MatReadWarning', 'VarReader5', 'MatlabObject', + 'MatlabFunction', 'MDTYPES', 'NP_TO_MTYPES', 'NP_TO_MXTYPES', + 'miCOMPRESSED', 'miMATRIX', 'miINT8', 'miUTF8', 'miUINT32', + 'mxCELL_CLASS', 'mxSTRUCT_CLASS', 'mxOBJECT_CLASS', 'mxDOUBLE_CLASS', + 'mat_struct', 'ZlibInputStream', 'MatFile5Reader', 'varmats_from_mat', + 'EmptyStructMarker', 'to_writeable', 'NDT_FILE_HDR', 'NDT_TAG_FULL', + 'NDT_TAG_SMALL', 'NDT_ARRAY_FLAGS', 'VarWriter5', 'MatFile5Writer' +] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio5", + private_modules=["_mio5"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5_utils.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1f0eeab6e6e6a3976c86f3446356572522e45dbe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5_utils.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'VarHeader5', 'VarReader5', 'byteswap_u4', 'chars_to_strings', + 'csc_matrix', 'mio5p', 'pycopy', 'swapped_code', 'squeeze_element' +] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio5_utils", + private_modules=["_mio5_utils"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio_utils.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..62d32965e0197f60d7c34efbc6f39c6c904b6f49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio_utils.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ['squeeze_element', 'chars_to_strings'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio_utils", + private_modules=["_mio_utils"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/miobase.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/miobase.py new file mode 100644 index 0000000000000000000000000000000000000000..034229d14dddf279b85b77ef649a601bac1d2ec6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/miobase.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'MatFileReader', 'MatReadError', 'MatReadWarning', + 'MatVarReader', 'MatWriteError', 'arr_dtype_number', + 'arr_to_chars', 'convert_dtypes', 'doc_dict', + 'docfiller', 'get_matfile_version', + 'matdims', 'read_dtype', 'doccer', 'boc' +] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="miobase", + private_modules=["_miobase"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/bad_miuint32.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/bad_miuint32.mat new file mode 100644 index 0000000000000000000000000000000000000000..c9ab357ec85972cf0014752a1e0ccb08ff284af9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/bad_miuint32.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/big_endian.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/big_endian.mat new file mode 100644 index 0000000000000000000000000000000000000000..2a0c982c298fba9df96fd5a927a9c08ee12b09df Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/big_endian.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_data.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_data.mat new file mode 100644 index 0000000000000000000000000000000000000000..45a2ef4e39755ea1f41aab045f18a035af58ea07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_data.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/logical_sparse.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/logical_sparse.mat new file mode 100644 index 0000000000000000000000000000000000000000..a60ad5b605a9dc6b0d85eb0a0e3e655c4955dd34 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/logical_sparse.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miuint32_for_miint32.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miuint32_for_miint32.mat new file mode 100644 index 0000000000000000000000000000000000000000..fd2c4994578edbf31431902ecfcb601b11f60b0b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miuint32_for_miint32.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miutf8_array_name.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miutf8_array_name.mat new file mode 100644 index 0000000000000000000000000000000000000000..ccfdaa8adb7879ba852eab9ce55b602e11dad06d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/miutf8_array_name.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat new file mode 100644 index 0000000000000000000000000000000000000000..35dcb715bca4cb7f4b0dca287648ef8ee797cd73 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/parabola.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/parabola.mat new file mode 100644 index 0000000000000000000000000000000000000000..66350532a7737c475a3ae6ef1b1d8406543d890e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/parabola.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/some_functions.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/some_functions.mat new file mode 100644 index 0000000000000000000000000000000000000000..cc818593b48dd8d29a40a827210b54373e5acf50 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/some_functions.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..232a051c774105176c28c9718c2cd46f1a1ee1af Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_skip_variable.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_skip_variable.mat new file mode 100644 index 0000000000000000000000000000000000000000..efbe3fec64ee54c9f8b3998e5035ccfa251e74ff Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_skip_variable.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..512f7d889420a016094a903585f27acaa50bc658 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..4198a4f2aeb8effcccf94a9c0114539f98124179 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..2c7826eeacdb456e5290cafba343703c7596d191 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..b3b086cc31dce2de1e300a1d018b0bf5661b69f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..8e36c0c8ce62d7559b60fde454a96e8eefcbcb92 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..e7dec3b81abdae8769e0ae0329948548f4038adf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..8b244044cf3028df9a019a259d8fc533b80f7fb7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..adb6c28ee95d1cf8bf3bfeb72295d1a7848020f8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..6066c1e30f69b76afdb8d251ecefd8cd9e1acde5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..eb537ab1042b0f989d49711b1a36cc508946fe55 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..59d243c4de4fbb3fa653753e40651a6d0a4f4967 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..cdb4191c7d2eb0ac66d4f6add250e1f6a604d892 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..55cbd3c1b3d65630beae47832ffbcc7a6fd43354 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..a8a615a320f9c8db068a9120c1ceb2e49bb0ea6d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..15424266a3bd4aa1e7525a8fdc4945b51d2b5ad6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..63059b84476749119f44ebefda795f85f6ab27d7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..954e39beb8156b460ca904ff66261d8f2fc338cb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..6feb6e42375ebebf6dd9440ee09312204cbf1a33 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..028841f9d3aae42d6cf782db14634cbe375f0a05 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..c6dccc00289f61787b235f4299aa5a14ab4f6d07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..faf9221b776eee67cd5d2971da5ba77732ef8016 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_byteordercodes.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_byteordercodes.py new file mode 100644 index 0000000000000000000000000000000000000000..535434d188ff575029cc7a0de807b0daa7348f73 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_byteordercodes.py @@ -0,0 +1,29 @@ +''' Tests for byteorder module ''' + +import sys + +from numpy.testing import assert_ +from pytest import raises as assert_raises + +import scipy.io.matlab._byteordercodes as sibc + + +def test_native(): + native_is_le = sys.byteorder == 'little' + assert_(sibc.sys_is_le == native_is_le) + + +def test_to_numpy(): + if sys.byteorder == 'little': + assert_(sibc.to_numpy_code('native') == '<') + assert_(sibc.to_numpy_code('swapped') == '>') + else: + assert_(sibc.to_numpy_code('native') == '>') + assert_(sibc.to_numpy_code('swapped') == '<') + assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('=')) + assert_(sibc.to_numpy_code('big') == '>') + for code in ('little', '<', 'l', 'L', 'le'): + assert_(sibc.to_numpy_code(code) == '<') + for code in ('big', '>', 'b', 'B', 'be'): + assert_(sibc.to_numpy_code(code) == '>') + assert_raises(ValueError, sibc.to_numpy_code, 'silly string') diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio.py new file mode 100644 index 0000000000000000000000000000000000000000..cd3ee2fb35c381c0a12c7aee15e4e160ef74ac1b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio.py @@ -0,0 +1,1339 @@ +''' Nose test generators + +Need function load / save / roundtrip tests + +''' +import os +from collections import OrderedDict +from os.path import join as pjoin, dirname +from glob import glob +from io import BytesIO +import re +from tempfile import mkdtemp + +import warnings +import shutil +import gzip + +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_equal, assert_, assert_warns, assert_allclose) +import pytest +from pytest import raises as assert_raises + +import numpy as np +from numpy import array +import scipy.sparse as SP + +import scipy.io +from scipy.io.matlab import MatlabOpaque, MatlabFunction, MatlabObject +import scipy.io.matlab._byteordercodes as boc +from scipy.io.matlab._miobase import ( + matdims, MatWriteError, MatReadError, matfile_version) +from scipy.io.matlab._mio import mat_reader_factory, loadmat, savemat, whosmat +from scipy.io.matlab._mio5 import ( + MatFile5Writer, MatFile5Reader, varmats_from_mat, to_writeable, + EmptyStructMarker) +import scipy.io.matlab._mio5_params as mio5p +from scipy._lib._util import VisibleDeprecationWarning + + +test_data_path = pjoin(dirname(__file__), 'data') + + +def mlarr(*args, **kwargs): + """Convenience function to return matlab-compatible 2-D array.""" + arr = np.array(*args, **kwargs) + arr.shape = matdims(arr) + return arr + + +# Define cases to test +theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9) +case_table4 = [ + {'name': 'double', + 'classes': {'testdouble': 'double'}, + 'expected': {'testdouble': theta} + }] +case_table4.append( + {'name': 'string', + 'classes': {'teststring': 'char'}, + 'expected': {'teststring': + array(['"Do nine men interpret?" "Nine men," I nod.'])} + }) +case_table4.append( + {'name': 'complex', + 'classes': {'testcomplex': 'double'}, + 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)} + }) +A = np.zeros((3,5)) +A[0] = list(range(1,6)) +A[:,0] = list(range(1,4)) +case_table4.append( + {'name': 'matrix', + 'classes': {'testmatrix': 'double'}, + 'expected': {'testmatrix': A}, + }) +case_table4.append( + {'name': 'sparse', + 'classes': {'testsparse': 'sparse'}, + 'expected': {'testsparse': SP.coo_matrix(A)}, + }) +B = A.astype(complex) +B[0,0] += 1j +case_table4.append( + {'name': 'sparsecomplex', + 'classes': {'testsparsecomplex': 'sparse'}, + 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, + }) +case_table4.append( + {'name': 'multi', + 'classes': {'theta': 'double', 'a': 'double'}, + 'expected': {'theta': theta, 'a': A}, + }) +case_table4.append( + {'name': 'minus', + 'classes': {'testminus': 'double'}, + 'expected': {'testminus': mlarr(-1)}, + }) +case_table4.append( + {'name': 'onechar', + 'classes': {'testonechar': 'char'}, + 'expected': {'testonechar': array(['r'])}, + }) +# Cell arrays stored as object arrays +CA = mlarr(( # tuple for object array creation + [], + mlarr([1]), + mlarr([[1,2]]), + mlarr([[1,2,3]])), dtype=object).reshape(1,-1) +CA[0,0] = array( + ['This cell contains this string and 3 arrays of increasing length']) +case_table5 = [ + {'name': 'cell', + 'classes': {'testcell': 'cell'}, + 'expected': {'testcell': CA}}] +CAE = mlarr(( # tuple for object array creation + mlarr(1), + mlarr(2), + mlarr([]), + mlarr([]), + mlarr(3)), dtype=object).reshape(1,-1) +objarr = np.empty((1,1),dtype=object) +objarr[0,0] = mlarr(1) +case_table5.append( + {'name': 'scalarcell', + 'classes': {'testscalarcell': 'cell'}, + 'expected': {'testscalarcell': objarr} + }) +case_table5.append( + {'name': 'emptycell', + 'classes': {'testemptycell': 'cell'}, + 'expected': {'testemptycell': CAE}}) +case_table5.append( + {'name': 'stringarray', + 'classes': {'teststringarray': 'char'}, + 'expected': {'teststringarray': array( + ['one ', 'two ', 'three'])}, + }) +case_table5.append( + {'name': '3dmatrix', + 'classes': {'test3dmatrix': 'double'}, + 'expected': { + 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))} + }) +st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3) +dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']] +st1 = np.zeros((1,1), dtype) +st1['stringfield'][0,0] = array(['Rats live on no evil star.']) +st1['doublefield'][0,0] = st_sub_arr +st1['complexfield'][0,0] = st_sub_arr * (1 + 1j) +case_table5.append( + {'name': 'struct', + 'classes': {'teststruct': 'struct'}, + 'expected': {'teststruct': st1} + }) +CN = np.zeros((1,2), dtype=object) +CN[0,0] = mlarr(1) +CN[0,1] = np.zeros((1,3), dtype=object) +CN[0,1][0,0] = mlarr(2, dtype=np.uint8) +CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8) +CN[0,1][0,2] = np.zeros((1,2), dtype=object) +CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8) +CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8) +case_table5.append( + {'name': 'cellnest', + 'classes': {'testcellnest': 'cell'}, + 'expected': {'testcellnest': CN}, + }) +st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']]) +st2[0,0]['one'] = mlarr(1) +st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)]) +st2[0,0]['two'][0,0]['three'] = array(['number 3']) +case_table5.append( + {'name': 'structnest', + 'classes': {'teststructnest': 'struct'}, + 'expected': {'teststructnest': st2} + }) +a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']]) +a[0,0]['one'] = mlarr(1) +a[0,0]['two'] = mlarr(2) +a[0,1]['one'] = array(['number 1']) +a[0,1]['two'] = array(['number 2']) +case_table5.append( + {'name': 'structarr', + 'classes': {'teststructarr': 'struct'}, + 'expected': {'teststructarr': a} + }) +ODT = np.dtype([(n, object) for n in + ['expr', 'inputExpr', 'args', + 'isEmpty', 'numArgs', 'version']]) +MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline') +m0 = MO[0,0] +m0['expr'] = array(['x']) +m0['inputExpr'] = array([' x = INLINE_INPUTS_{1};']) +m0['args'] = array(['x']) +m0['isEmpty'] = mlarr(0) +m0['numArgs'] = mlarr(1) +m0['version'] = mlarr(1) +case_table5.append( + {'name': 'object', + 'classes': {'testobject': 'object'}, + 'expected': {'testobject': MO} + }) +fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb') +u_str = fp_u_str.read().decode('utf-8') +fp_u_str.close() +case_table5.append( + {'name': 'unicode', + 'classes': {'testunicode': 'char'}, + 'expected': {'testunicode': array([u_str])} + }) +case_table5.append( + {'name': 'sparse', + 'classes': {'testsparse': 'sparse'}, + 'expected': {'testsparse': SP.coo_matrix(A)}, + }) +case_table5.append( + {'name': 'sparsecomplex', + 'classes': {'testsparsecomplex': 'sparse'}, + 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, + }) +case_table5.append( + {'name': 'bool', + 'classes': {'testbools': 'logical'}, + 'expected': {'testbools': + array([[True], [False]])}, + }) + +case_table5_rt = case_table5[:] +# Inline functions can't be concatenated in matlab, so RT only +case_table5_rt.append( + {'name': 'objectarray', + 'classes': {'testobjectarray': 'object'}, + 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}}) + + +def types_compatible(var1, var2): + """Check if types are same or compatible. + + 0-D numpy scalars are compatible with bare python scalars. + """ + type1 = type(var1) + type2 = type(var2) + if type1 is type2: + return True + if type1 is np.ndarray and var1.shape == (): + return type(var1.item()) is type2 + if type2 is np.ndarray and var2.shape == (): + return type(var2.item()) is type1 + return False + + +def _check_level(label, expected, actual): + """ Check one level of a potentially nested array """ + if SP.issparse(expected): # allow different types of sparse matrices + assert_(SP.issparse(actual)) + assert_array_almost_equal(actual.toarray(), + expected.toarray(), + err_msg=label, + decimal=5) + return + # Check types are as expected + assert_(types_compatible(expected, actual), + f"Expected type {type(expected)}, got {type(actual)} at {label}") + # A field in a record array may not be an ndarray + # A scalar from a record array will be type np.void + if not isinstance(expected, + (np.void, np.ndarray, MatlabObject)): + assert_equal(expected, actual) + return + # This is an ndarray-like thing + assert_(expected.shape == actual.shape, + msg=f'Expected shape {expected.shape}, got {actual.shape} at {label}') + ex_dtype = expected.dtype + if ex_dtype.hasobject: # array of objects + if isinstance(expected, MatlabObject): + assert_equal(expected.classname, actual.classname) + for i, ev in enumerate(expected): + level_label = "%s, [%d], " % (label, i) + _check_level(level_label, ev, actual[i]) + return + if ex_dtype.fields: # probably recarray + for fn in ex_dtype.fields: + level_label = f"{label}, field {fn}, " + _check_level(level_label, + expected[fn], actual[fn]) + return + if ex_dtype.type in (str, # string or bool + np.str_, + np.bool_): + assert_equal(actual, expected, err_msg=label) + return + # Something numeric + assert_array_almost_equal(actual, expected, err_msg=label, decimal=5) + + +def _load_check_case(name, files, case): + for file_name in files: + matdict = loadmat(file_name, struct_as_record=True) + label = f"test {name}; file {file_name}" + for k, expected in case.items(): + k_label = f"{label}, variable {k}" + assert_(k in matdict, "Missing key at %s" % k_label) + _check_level(k_label, expected, matdict[k]) + + +def _whos_check_case(name, files, case, classes): + for file_name in files: + label = f"test {name}; file {file_name}" + + whos = whosmat(file_name) + + expected_whos = [ + (k, expected.shape, classes[k]) for k, expected in case.items()] + + whos.sort() + expected_whos.sort() + assert_equal(whos, expected_whos, + f"{label}: {whos!r} != {expected_whos!r}" + ) + + +# Round trip tests +def _rt_check_case(name, expected, format): + mat_stream = BytesIO() + savemat(mat_stream, expected, format=format) + mat_stream.seek(0) + _load_check_case(name, [mat_stream], expected) + + +# generator for tests +def _cases(version, filt='test%(name)s_*.mat'): + if version == '4': + cases = case_table4 + elif version == '5': + cases = case_table5 + else: + assert version == '5_rt' + cases = case_table5_rt + for case in cases: + name = case['name'] + expected = case['expected'] + if filt is None: + files = None + else: + use_filt = pjoin(test_data_path, filt % dict(name=name)) + files = glob(use_filt) + assert len(files) > 0, \ + f"No files for test {name} using filter {filt}" + classes = case['classes'] + yield name, files, expected, classes + + +@pytest.mark.parametrize('version', ('4', '5')) +def test_load(version): + for case in _cases(version): + _load_check_case(*case[:3]) + + +@pytest.mark.parametrize('version', ('4', '5')) +def test_whos(version): + for case in _cases(version): + _whos_check_case(*case) + + +# generator for round trip tests +@pytest.mark.parametrize('version, fmts', [ + ('4', ['4', '5']), + ('5_rt', ['5']), +]) +def test_round_trip(version, fmts): + for case in _cases(version, filt=None): + for fmt in fmts: + _rt_check_case(case[0], case[2], fmt) + + +def test_gzip_simple(): + xdense = np.zeros((20,20)) + xdense[2,3] = 2.3 + xdense[4,5] = 4.5 + x = SP.csc_matrix(xdense) + + name = 'gzip_test' + expected = {'x':x} + format = '4' + + tmpdir = mkdtemp() + try: + fname = pjoin(tmpdir,name) + mat_stream = gzip.open(fname, mode='wb') + savemat(mat_stream, expected, format=format) + mat_stream.close() + + mat_stream = gzip.open(fname, mode='rb') + actual = loadmat(mat_stream, struct_as_record=True) + mat_stream.close() + finally: + shutil.rmtree(tmpdir) + + assert_array_almost_equal(actual['x'].toarray(), + expected['x'].toarray(), + err_msg=repr(actual)) + + +def test_multiple_open(): + # Ticket #1039, on Windows: check that files are not left open + tmpdir = mkdtemp() + try: + x = dict(x=np.zeros((2, 2))) + + fname = pjoin(tmpdir, "a.mat") + + # Check that file is not left open + savemat(fname, x) + os.unlink(fname) + savemat(fname, x) + loadmat(fname) + os.unlink(fname) + + # Check that stream is left open + f = open(fname, 'wb') + savemat(f, x) + f.seek(0) + f.close() + + f = open(fname, 'rb') + loadmat(f) + f.seek(0) + f.close() + finally: + shutil.rmtree(tmpdir) + + +def test_mat73(): + # Check any hdf5 files raise an error + filenames = glob( + pjoin(test_data_path, 'testhdf5*.mat')) + assert_(len(filenames) > 0) + for filename in filenames: + fp = open(filename, 'rb') + assert_raises(NotImplementedError, + loadmat, + fp, + struct_as_record=True) + fp.close() + + +def test_warnings(): + # This test is an echo of the previous behavior, which was to raise a + # warning if the user triggered a search for mat files on the Python system + # path. We can remove the test in the next version after upcoming (0.13). + fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat') + with warnings.catch_warnings(): + warnings.simplefilter('error') + # This should not generate a warning + loadmat(fname, struct_as_record=True) + # This neither + loadmat(fname, struct_as_record=False) + + +def test_regression_653(): + # Saving a dictionary with only invalid keys used to raise an error. Now we + # save this as an empty struct in matlab space. + sio = BytesIO() + savemat(sio, {'d':{1:2}}, format='5') + back = loadmat(sio)['d'] + # Check we got an empty struct equivalent + assert_equal(back.shape, (1,1)) + assert_equal(back.dtype, np.dtype(object)) + assert_(back[0,0] is None) + + +def test_structname_len(): + # Test limit for length of field names in structs + lim = 31 + fldname = 'a' * lim + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + savemat(BytesIO(), {'longstruct': st1}, format='5') + fldname = 'a' * (lim+1) + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + assert_raises(ValueError, savemat, BytesIO(), + {'longstruct': st1}, format='5') + + +def test_4_and_long_field_names_incompatible(): + # Long field names option not supported in 4 + my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)]) + assert_raises(ValueError, savemat, BytesIO(), + {'my_struct':my_struct}, format='4', long_field_names=True) + + +def test_long_field_names(): + # Test limit for length of field names in structs + lim = 63 + fldname = 'a' * lim + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) + fldname = 'a' * (lim+1) + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + assert_raises(ValueError, savemat, BytesIO(), + {'longstruct': st1}, format='5',long_field_names=True) + + +def test_long_field_names_in_struct(): + # Regression test - long_field_names was erased if you passed a struct + # within a struct + lim = 63 + fldname = 'a' * lim + cell = np.ndarray((1,2),dtype=object) + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + cell[0,0] = st1 + cell[0,1] = st1 + savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True) + # + # Check to make sure it fails with long field names off + # + assert_raises(ValueError, savemat, BytesIO(), + {'longstruct': cell}, format='5', long_field_names=False) + + +def test_cell_with_one_thing_in_it(): + # Regression test - make a cell array that's 1 x 2 and put two + # strings in it. It works. Make a cell array that's 1 x 1 and put + # a string in it. It should work but, in the old days, it didn't. + cells = np.ndarray((1,2),dtype=object) + cells[0,0] = 'Hello' + cells[0,1] = 'World' + savemat(BytesIO(), {'x': cells}, format='5') + + cells = np.ndarray((1,1),dtype=object) + cells[0,0] = 'Hello, world' + savemat(BytesIO(), {'x': cells}, format='5') + + +def test_writer_properties(): + # Tests getting, setting of properties of matrix writer + mfw = MatFile5Writer(BytesIO()) + assert_equal(mfw.global_vars, []) + mfw.global_vars = ['avar'] + assert_equal(mfw.global_vars, ['avar']) + assert_equal(mfw.unicode_strings, False) + mfw.unicode_strings = True + assert_equal(mfw.unicode_strings, True) + assert_equal(mfw.long_field_names, False) + mfw.long_field_names = True + assert_equal(mfw.long_field_names, True) + + +def test_use_small_element(): + # Test whether we're using small data element or not + sio = BytesIO() + wtr = MatFile5Writer(sio) + # First check size for no sde for name + arr = np.zeros(10) + wtr.put_variables({'aaaaa': arr}) + w_sz = len(sio.getvalue()) + # Check small name results in largish difference in size + sio.truncate(0) + sio.seek(0) + wtr.put_variables({'aaaa': arr}) + assert_(w_sz - len(sio.getvalue()) > 4) + # Whereas increasing name size makes less difference + sio.truncate(0) + sio.seek(0) + wtr.put_variables({'aaaaaa': arr}) + assert_(len(sio.getvalue()) - w_sz < 4) + + +def test_save_dict(): + # Test that both dict and OrderedDict can be saved (as recarray), + # loaded as matstruct, and preserve order + ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)]) + for dict_type in (dict, OrderedDict): + # Initialize with tuples to keep order + d = dict_type([('a', 1), ('b', 2)]) + stream = BytesIO() + savemat(stream, {'dict': d}) + stream.seek(0) + vals = loadmat(stream)['dict'] + assert_equal(vals.dtype.names, ('a', 'b')) + assert_array_equal(vals, ab_exp) + + +def test_1d_shape(): + # New 5 behavior is 1D -> row vector + arr = np.arange(5) + for format in ('4', '5'): + # Column is the default + stream = BytesIO() + savemat(stream, {'oned': arr}, format=format) + vals = loadmat(stream) + assert_equal(vals['oned'].shape, (1, 5)) + # can be explicitly 'column' for oned_as + stream = BytesIO() + savemat(stream, {'oned':arr}, + format=format, + oned_as='column') + vals = loadmat(stream) + assert_equal(vals['oned'].shape, (5,1)) + # but different from 'row' + stream = BytesIO() + savemat(stream, {'oned':arr}, + format=format, + oned_as='row') + vals = loadmat(stream) + assert_equal(vals['oned'].shape, (1,5)) + + +def test_compression(): + arr = np.zeros(100).reshape((5,20)) + arr[2,10] = 1 + stream = BytesIO() + savemat(stream, {'arr':arr}) + raw_len = len(stream.getvalue()) + vals = loadmat(stream) + assert_array_equal(vals['arr'], arr) + stream = BytesIO() + savemat(stream, {'arr':arr}, do_compression=True) + compressed_len = len(stream.getvalue()) + vals = loadmat(stream) + assert_array_equal(vals['arr'], arr) + assert_(raw_len > compressed_len) + # Concatenate, test later + arr2 = arr.copy() + arr2[0,0] = 1 + stream = BytesIO() + savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False) + vals = loadmat(stream) + assert_array_equal(vals['arr2'], arr2) + stream = BytesIO() + savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True) + vals = loadmat(stream) + assert_array_equal(vals['arr2'], arr2) + + +def test_single_object(): + stream = BytesIO() + savemat(stream, {'A':np.array(1, dtype=object)}) + + +def test_skip_variable(): + # Test skipping over the first of two variables in a MAT file + # using mat_reader_factory and put_variables to read them in. + # + # This is a regression test of a problem that's caused by + # using the compressed file reader seek instead of the raw file + # I/O seek when skipping over a compressed chunk. + # + # The problem arises when the chunk is large: this file has + # a 256x256 array of random (uncompressible) doubles. + # + filename = pjoin(test_data_path,'test_skip_variable.mat') + # + # Prove that it loads with loadmat + # + d = loadmat(filename, struct_as_record=True) + assert_('first' in d) + assert_('second' in d) + # + # Make the factory + # + factory, file_opened = mat_reader_factory(filename, struct_as_record=True) + # + # This is where the factory breaks with an error in MatMatrixGetter.to_next + # + d = factory.get_variables('second') + assert_('second' in d) + factory.mat_stream.close() + + +def test_empty_struct(): + # ticket 885 + filename = pjoin(test_data_path,'test_empty_struct.mat') + # before ticket fix, this would crash with ValueError, empty data + # type + d = loadmat(filename, struct_as_record=True) + a = d['a'] + assert_equal(a.shape, (1,1)) + assert_equal(a.dtype, np.dtype(object)) + assert_(a[0,0] is None) + stream = BytesIO() + arr = np.array((), dtype='U') + # before ticket fix, this used to give data type not understood + savemat(stream, {'arr':arr}) + d = loadmat(stream) + a2 = d['arr'] + assert_array_equal(a2, arr) + + +def test_save_empty_dict(): + # saving empty dict also gives empty struct + stream = BytesIO() + savemat(stream, {'arr': {}}) + d = loadmat(stream) + a = d['arr'] + assert_equal(a.shape, (1,1)) + assert_equal(a.dtype, np.dtype(object)) + assert_(a[0,0] is None) + + +def assert_any_equal(output, alternatives): + """ Assert `output` is equal to at least one element in `alternatives` + """ + one_equal = False + for expected in alternatives: + if np.all(output == expected): + one_equal = True + break + assert_(one_equal) + + +def test_to_writeable(): + # Test to_writeable function + res = to_writeable(np.array([1])) # pass through ndarrays + assert_equal(res.shape, (1,)) + assert_array_equal(res, 1) + # Dict fields can be written in any order + expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')]) + expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')]) + alternatives = (expected1, expected2) + assert_any_equal(to_writeable({'a':1,'b':2}), alternatives) + # Fields with underscores discarded + assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives) + # Not-string fields discarded + assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives) + # String fields that are valid Python identifiers discarded + assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives) + # Object with field names is equivalent + + class klass: + pass + + c = klass + c.a = 1 + c.b = 2 + assert_any_equal(to_writeable(c), alternatives) + # empty list and tuple go to empty array + res = to_writeable([]) + assert_equal(res.shape, (0,)) + assert_equal(res.dtype.type, np.float64) + res = to_writeable(()) + assert_equal(res.shape, (0,)) + assert_equal(res.dtype.type, np.float64) + # None -> None + assert_(to_writeable(None) is None) + # String to strings + assert_equal(to_writeable('a string').dtype.type, np.str_) + # Scalars to numpy to NumPy scalars + res = to_writeable(1) + assert_equal(res.shape, ()) + assert_equal(res.dtype.type, np.array(1).dtype.type) + assert_array_equal(res, 1) + # Empty dict returns EmptyStructMarker + assert_(to_writeable({}) is EmptyStructMarker) + # Object does not have (even empty) __dict__ + assert_(to_writeable(object()) is None) + # Custom object does have empty __dict__, returns EmptyStructMarker + + class C: + pass + + assert_(to_writeable(c()) is EmptyStructMarker) + # dict keys with legal characters are convertible + res = to_writeable({'a': 1})['a'] + assert_equal(res.shape, (1,)) + assert_equal(res.dtype.type, np.object_) + # Only fields with illegal characters, falls back to EmptyStruct + assert_(to_writeable({'1':1}) is EmptyStructMarker) + assert_(to_writeable({'_a':1}) is EmptyStructMarker) + # Unless there are valid fields, in which case structured array + assert_equal(to_writeable({'1':1, 'f': 2}), + np.array([(2,)], dtype=[('f', '|O8')])) + + +def test_recarray(): + # check roundtrip of structured array + dt = [('f1', 'f8'), + ('f2', 'S10')] + arr = np.zeros((2,), dtype=dt) + arr[0]['f1'] = 0.5 + arr[0]['f2'] = 'python' + arr[1]['f1'] = 99 + arr[1]['f2'] = 'not perl' + stream = BytesIO() + savemat(stream, {'arr': arr}) + d = loadmat(stream, struct_as_record=False) + a20 = d['arr'][0,0] + assert_equal(a20.f1, 0.5) + assert_equal(a20.f2, 'python') + d = loadmat(stream, struct_as_record=True) + a20 = d['arr'][0,0] + assert_equal(a20['f1'], 0.5) + assert_equal(a20['f2'], 'python') + # structs always come back as object types + assert_equal(a20.dtype, np.dtype([('f1', 'O'), + ('f2', 'O')])) + a21 = d['arr'].flat[1] + assert_equal(a21['f1'], 99) + assert_equal(a21['f2'], 'not perl') + + +def test_save_object(): + class C: + pass + c = C() + c.field1 = 1 + c.field2 = 'a string' + stream = BytesIO() + savemat(stream, {'c': c}) + d = loadmat(stream, struct_as_record=False) + c2 = d['c'][0,0] + assert_equal(c2.field1, 1) + assert_equal(c2.field2, 'a string') + d = loadmat(stream, struct_as_record=True) + c2 = d['c'][0,0] + assert_equal(c2['field1'], 1) + assert_equal(c2['field2'], 'a string') + + +def test_read_opts(): + # tests if read is seeing option sets, at initialization and after + # initialization + arr = np.arange(6).reshape(1,6) + stream = BytesIO() + savemat(stream, {'a': arr}) + rdr = MatFile5Reader(stream) + back_dict = rdr.get_variables() + rarr = back_dict['a'] + assert_array_equal(rarr, arr) + rdr = MatFile5Reader(stream, squeeze_me=True) + assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,))) + rdr.squeeze_me = False + assert_array_equal(rarr, arr) + rdr = MatFile5Reader(stream, byte_order=boc.native_code) + assert_array_equal(rdr.get_variables()['a'], arr) + # inverted byte code leads to error on read because of swapped + # header etc. + rdr = MatFile5Reader(stream, byte_order=boc.swapped_code) + assert_raises(Exception, rdr.get_variables) + rdr.byte_order = boc.native_code + assert_array_equal(rdr.get_variables()['a'], arr) + arr = np.array(['a string']) + stream.truncate(0) + stream.seek(0) + savemat(stream, {'a': arr}) + rdr = MatFile5Reader(stream) + assert_array_equal(rdr.get_variables()['a'], arr) + rdr = MatFile5Reader(stream, chars_as_strings=False) + carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1')) + assert_array_equal(rdr.get_variables()['a'], carr) + rdr.chars_as_strings = True + assert_array_equal(rdr.get_variables()['a'], arr) + + +def test_empty_string(): + # make sure reading empty string does not raise error + estring_fname = pjoin(test_data_path, 'single_empty_string.mat') + fp = open(estring_fname, 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_array_equal(d['a'], np.array([], dtype='U1')) + # Empty string round trip. Matlab cannot distinguish + # between a string array that is empty, and a string array + # containing a single empty string, because it stores strings as + # arrays of char. There is no way of having an array of char that + # is not empty, but contains an empty string. + stream = BytesIO() + savemat(stream, {'a': np.array([''])}) + rdr = MatFile5Reader(stream) + d = rdr.get_variables() + assert_array_equal(d['a'], np.array([], dtype='U1')) + stream.truncate(0) + stream.seek(0) + savemat(stream, {'a': np.array([], dtype='U1')}) + rdr = MatFile5Reader(stream) + d = rdr.get_variables() + assert_array_equal(d['a'], np.array([], dtype='U1')) + stream.close() + + +def test_corrupted_data(): + import zlib + for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'), + (zlib.error, 'corrupted_zlib_checksum.mat')]: + with open(pjoin(test_data_path, fname), 'rb') as fp: + rdr = MatFile5Reader(fp) + assert_raises(exc, rdr.get_variables) + + +def test_corrupted_data_check_can_be_disabled(): + with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp: + rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False) + rdr.get_variables() + + +def test_read_both_endian(): + # make sure big- and little- endian data is read correctly + for fname in ('big_endian.mat', 'little_endian.mat'): + fp = open(pjoin(test_data_path, fname), 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_array_equal(d['strings'], + np.array([['hello'], + ['world']], dtype=object)) + assert_array_equal(d['floats'], + np.array([[2., 3.], + [3., 4.]], dtype=np.float32)) + + +def test_write_opposite_endian(): + # We don't support writing opposite endian .mat files, but we need to behave + # correctly if the user supplies an other-endian NumPy array to write out. + float_arr = np.array([[2., 3.], + [3., 4.]]) + int_arr = np.arange(6).reshape((2, 3)) + uni_arr = np.array(['hello', 'world'], dtype='U') + stream = BytesIO() + savemat(stream, { + 'floats': float_arr.byteswap().view(float_arr.dtype.newbyteorder()), + 'ints': int_arr.byteswap().view(int_arr.dtype.newbyteorder()), + 'uni_arr': uni_arr.byteswap().view(uni_arr.dtype.newbyteorder()), + }) + rdr = MatFile5Reader(stream) + d = rdr.get_variables() + assert_array_equal(d['floats'], float_arr) + assert_array_equal(d['ints'], int_arr) + assert_array_equal(d['uni_arr'], uni_arr) + stream.close() + + +def test_logical_array(): + # The roundtrip test doesn't verify that we load the data up with the + # correct (bool) dtype + with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj: + rdr = MatFile5Reader(fobj, mat_dtype=True) + d = rdr.get_variables() + x = np.array([[True], [False]], dtype=np.bool_) + assert_array_equal(d['testbools'], x) + assert_equal(d['testbools'].dtype, x.dtype) + + +def test_logical_out_type(): + # Confirm that bool type written as uint8, uint8 class + # See gh-4022 + stream = BytesIO() + barr = np.array([False, True, False]) + savemat(stream, {'barray': barr}) + stream.seek(0) + reader = MatFile5Reader(stream) + reader.initialize_read() + reader.read_file_header() + hdr, _ = reader.read_var_header() + assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS) + assert_equal(hdr.is_logical, True) + var = reader.read_var_array(hdr, False) + assert_equal(var.dtype.type, np.uint8) + + +def test_roundtrip_zero_dimensions(): + stream = BytesIO() + savemat(stream, {'d':np.empty((10, 0))}) + d = loadmat(stream) + assert d['d'].shape == (10, 0) + + +def test_mat4_3d(): + # test behavior when writing 3-D arrays to matlab 4 files + stream = BytesIO() + arr = np.arange(24).reshape((2,3,4)) + assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4') + + +def test_func_read(): + func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat') + fp = open(func_eg, 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert isinstance(d['testfunc'], MatlabFunction) + stream = BytesIO() + wtr = MatFile5Writer(stream) + assert_raises(MatWriteError, wtr.put_variables, d) + + +def test_mat_dtype(): + double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat') + fp = open(double_eg, 'rb') + rdr = MatFile5Reader(fp, mat_dtype=False) + d = rdr.get_variables() + fp.close() + assert_equal(d['testmatrix'].dtype.kind, 'u') + + fp = open(double_eg, 'rb') + rdr = MatFile5Reader(fp, mat_dtype=True) + d = rdr.get_variables() + fp.close() + assert_equal(d['testmatrix'].dtype.kind, 'f') + + +def test_sparse_in_struct(): + # reproduces bug found by DC where Cython code was insisting on + # ndarray return type, but getting sparse matrix + st = {'sparsefield': SP.coo_matrix(np.eye(4))} + stream = BytesIO() + savemat(stream, {'a':st}) + d = loadmat(stream, struct_as_record=True) + assert_array_equal(d['a'][0, 0]['sparsefield'].toarray(), np.eye(4)) + + +def test_mat_struct_squeeze(): + stream = BytesIO() + in_d = {'st':{'one':1, 'two':2}} + savemat(stream, in_d) + # no error without squeeze + loadmat(stream, struct_as_record=False) + # previous error was with squeeze, with mat_struct + loadmat(stream, struct_as_record=False, squeeze_me=True) + + +def test_scalar_squeeze(): + stream = BytesIO() + in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}} + savemat(stream, in_d) + out_d = loadmat(stream, squeeze_me=True) + assert_(isinstance(out_d['scalar'], float)) + assert_(isinstance(out_d['string'], str)) + assert_(isinstance(out_d['st'], np.ndarray)) + + +def test_str_round(): + # from report by Angus McMorland on mailing list 3 May 2010 + stream = BytesIO() + in_arr = np.array(['Hello', 'Foob']) + out_arr = np.array(['Hello', 'Foob ']) + savemat(stream, dict(a=in_arr)) + res = loadmat(stream) + # resulted in ['HloolFoa', 'elWrdobr'] + assert_array_equal(res['a'], out_arr) + stream.truncate(0) + stream.seek(0) + # Make Fortran ordered version of string + in_str = in_arr.tobytes(order='F') + in_from_str = np.ndarray(shape=a.shape, + dtype=in_arr.dtype, + order='F', + buffer=in_str) + savemat(stream, dict(a=in_from_str)) + assert_array_equal(res['a'], out_arr) + # unicode save did lead to buffer too small error + stream.truncate(0) + stream.seek(0) + in_arr_u = in_arr.astype('U') + out_arr_u = out_arr.astype('U') + savemat(stream, {'a': in_arr_u}) + res = loadmat(stream) + assert_array_equal(res['a'], out_arr_u) + + +def test_fieldnames(): + # Check that field names are as expected + stream = BytesIO() + savemat(stream, {'a': {'a':1, 'b':2}}) + res = loadmat(stream) + field_names = res['a'].dtype.names + assert_equal(set(field_names), {'a', 'b'}) + + +def test_loadmat_varnames(): + # Test that we can get just one variable from a mat file using loadmat + mat5_sys_names = ['__globals__', + '__header__', + '__version__'] + for eg_file, sys_v_names in ( + (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin( + test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)): + vars = loadmat(eg_file) + assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names)) + vars = loadmat(eg_file, variable_names='a') + assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=['a']) + assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=['theta']) + assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=('theta',)) + assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=[]) + assert_equal(set(vars.keys()), set(sys_v_names)) + vnames = ['theta'] + vars = loadmat(eg_file, variable_names=vnames) + assert_equal(vnames, ['theta']) + + +def test_round_types(): + # Check that saving, loading preserves dtype in most cases + arr = np.arange(10) + stream = BytesIO() + for dts in ('f8','f4','i8','i4','i2','i1', + 'u8','u4','u2','u1','c16','c8'): + stream.truncate(0) + stream.seek(0) # needed for BytesIO in Python 3 + savemat(stream, {'arr': arr.astype(dts)}) + vars = loadmat(stream) + assert_equal(np.dtype(dts), vars['arr'].dtype) + + +def test_varmats_from_mat(): + # Make a mat file with several variables, write it, read it back + names_vars = (('arr', mlarr(np.arange(10))), + ('mystr', mlarr('a string')), + ('mynum', mlarr(10))) + + # Dict like thing to give variables in defined order + class C: + def items(self): + return names_vars + stream = BytesIO() + savemat(stream, C()) + varmats = varmats_from_mat(stream) + assert_equal(len(varmats), 3) + for i in range(3): + name, var_stream = varmats[i] + exp_name, exp_res = names_vars[i] + assert_equal(name, exp_name) + res = loadmat(var_stream) + assert_array_equal(res[name], exp_res) + + +def test_one_by_zero(): + # Test 1x0 chars get read correctly + func_eg = pjoin(test_data_path, 'one_by_zero_char.mat') + fp = open(func_eg, 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_equal(d['var'].shape, (0,)) + + +def test_load_mat4_le(): + # We were getting byte order wrong when reading little-endian floa64 dense + # matrices on big-endian platforms + mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat') + vars = loadmat(mat4_fname) + assert_array_equal(vars['a'], [[0.1, 1.2]]) + + +def test_unicode_mat4(): + # Mat4 should save unicode as latin1 + bio = BytesIO() + var = {'second_cat': 'Schrödinger'} + savemat(bio, var, format='4') + var_back = loadmat(bio) + assert_equal(var_back['second_cat'], var['second_cat']) + + +def test_logical_sparse(): + # Test we can read logical sparse stored in mat file as bytes. + # See https://github.com/scipy/scipy/issues/3539. + # In some files saved by MATLAB, the sparse data elements (Real Part + # Subelement in MATLAB speak) are stored with apparent type double + # (miDOUBLE) but are in fact single bytes. + filename = pjoin(test_data_path,'logical_sparse.mat') + # Before fix, this would crash with: + # ValueError: indices and data should have the same size + d = loadmat(filename, struct_as_record=True) + log_sp = d['sp_log_5_4'] + assert_(isinstance(log_sp, SP.csc_matrix)) + assert_equal(log_sp.dtype.type, np.bool_) + assert_array_equal(log_sp.toarray(), + [[True, True, True, False], + [False, False, True, False], + [False, False, True, False], + [False, False, False, False], + [False, False, False, False]]) + + +def test_empty_sparse(): + # Can we read empty sparse matrices? + sio = BytesIO() + import scipy.sparse + empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]]) + savemat(sio, dict(x=empty_sparse)) + sio.seek(0) + res = loadmat(sio) + assert_array_equal(res['x'].shape, empty_sparse.shape) + assert_array_equal(res['x'].toarray(), 0) + # Do empty sparse matrices get written with max nnz 1? + # See https://github.com/scipy/scipy/issues/4208 + sio.seek(0) + reader = MatFile5Reader(sio) + reader.initialize_read() + reader.read_file_header() + hdr, _ = reader.read_var_header() + assert_equal(hdr.nzmax, 1) + + +def test_empty_mat_error(): + # Test we get a specific warning for an empty mat file + sio = BytesIO() + assert_raises(MatReadError, loadmat, sio) + + +def test_miuint32_compromise(): + # Reader should accept miUINT32 for miINT32, but check signs + # mat file with miUINT32 for miINT32, but OK values + filename = pjoin(test_data_path, 'miuint32_for_miint32.mat') + res = loadmat(filename) + assert_equal(res['an_array'], np.arange(10)[None, :]) + # mat file with miUINT32 for miINT32, with negative value + filename = pjoin(test_data_path, 'bad_miuint32.mat') + with assert_raises(ValueError): + loadmat(filename) + + +def test_miutf8_for_miint8_compromise(): + # Check reader accepts ascii as miUTF8 for array names + filename = pjoin(test_data_path, 'miutf8_array_name.mat') + res = loadmat(filename) + assert_equal(res['array_name'], [[1]]) + # mat file with non-ascii utf8 name raises error + filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat') + with assert_raises(ValueError): + loadmat(filename) + + +def test_bad_utf8(): + # Check that reader reads bad UTF with 'replace' option + filename = pjoin(test_data_path,'broken_utf8.mat') + res = loadmat(filename) + assert_equal(res['bad_string'], + b'\x80 am broken'.decode('utf8', 'replace')) + + +def test_save_unicode_field(tmpdir): + filename = os.path.join(str(tmpdir), 'test.mat') + test_dict = {'a':{'b':1,'c':'test_str'}} + savemat(filename, test_dict) + + +def test_save_custom_array_type(tmpdir): + class CustomArray: + def __array__(self, dtype=None, copy=None): + return np.arange(6.0).reshape(2, 3) + a = CustomArray() + filename = os.path.join(str(tmpdir), 'test.mat') + savemat(filename, {'a': a}) + out = loadmat(filename) + assert_array_equal(out['a'], np.array(a)) + + +def test_filenotfound(): + # Check the correct error is thrown + assert_raises(OSError, loadmat, "NotExistentFile00.mat") + assert_raises(OSError, loadmat, "NotExistentFile00") + + +def test_simplify_cells(): + # Test output when simplify_cells=True + filename = pjoin(test_data_path, 'testsimplecell.mat') + res1 = loadmat(filename, simplify_cells=True) + res2 = loadmat(filename, simplify_cells=False) + assert_(isinstance(res1["s"], dict)) + assert_(isinstance(res2["s"], np.ndarray)) + assert_array_equal(res1["s"]["mycell"], np.array(["a", "b", "c"])) + + +@pytest.mark.parametrize('version, filt, regex', [ + (0, '_4*_*', None), + (1, '_5*_*', None), + (1, '_6*_*', None), + (1, '_7*_*', '^((?!hdf5).)*$'), # not containing hdf5 + (2, '_7*_*', '.*hdf5.*'), + (1, '8*_*', None), +]) +def test_matfile_version(version, filt, regex): + use_filt = pjoin(test_data_path, 'test*%s.mat' % filt) + files = glob(use_filt) + if regex is not None: + files = [file for file in files if re.match(regex, file) is not None] + assert len(files) > 0, \ + f"No files for version {version} using filter {filt}" + for file in files: + got_version = matfile_version(file) + assert got_version[0] == version + + +def test_opaque(): + """Test that we can read a MatlabOpaque object.""" + data = loadmat(pjoin(test_data_path, 'parabola.mat')) + assert isinstance(data['parabola'], MatlabFunction) + assert isinstance(data['parabola'].item()[3].item()[3], MatlabOpaque) + + +def test_opaque_simplify(): + """Test that we can read a MatlabOpaque object when simplify_cells=True.""" + data = loadmat(pjoin(test_data_path, 'parabola.mat'), simplify_cells=True) + assert isinstance(data['parabola'], MatlabFunction) + + +def test_deprecation(): + """Test that access to previous attributes still works.""" + # This should be accessible immediately from scipy.io import + with assert_warns(DeprecationWarning): + scipy.io.matlab.mio5_params.MatlabOpaque + + # These should be importable but warn as well + with assert_warns(DeprecationWarning): + from scipy.io.matlab.miobase import MatReadError # noqa: F401 + + +def test_gh_17992(tmp_path): + rng = np.random.default_rng(12345) + outfile = tmp_path / "lists.mat" + array_one = rng.random((5,3)) + array_two = rng.random((6,3)) + list_of_arrays = [array_one, array_two] + # warning suppression only needed for NumPy < 1.24.0 + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning) + savemat(outfile, + {'data': list_of_arrays}, + long_field_names=True, + do_compression=True) + # round trip check + new_dict = {} + loadmat(outfile, + new_dict) + assert_allclose(new_dict["data"][0][0], array_one) + assert_allclose(new_dict["data"][0][1], array_two) + + +def test_gh_19659(tmp_path): + d = { + "char_array": np.array([list("char"), list("char")], dtype="U1"), + "string_array": np.array(["string", "string"]), + } + outfile = tmp_path / "tmp.mat" + # should not error: + savemat(outfile, d, format="4") diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio5_utils.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b3f27114c4a4ed10c1a2526058f4d0dbbd0e5638 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio5_utils.py @@ -0,0 +1,179 @@ +""" Testing mio5_utils Cython module + +""" +import sys + +from io import BytesIO + +import numpy as np + +from numpy.testing import assert_array_equal, assert_equal, assert_ +from pytest import raises as assert_raises + +import scipy.io.matlab._byteordercodes as boc +import scipy.io.matlab._streams as streams +import scipy.io.matlab._mio5_params as mio5p +import scipy.io.matlab._mio5_utils as m5u + + +def test_byteswap(): + for val in ( + 1, + 0x100, + 0x10000): + a = np.array(val, dtype=np.uint32) + b = a.byteswap() + c = m5u.byteswap_u4(a) + assert_equal(b.item(), c) + d = m5u.byteswap_u4(c) + assert_equal(a.item(), d) + + +def _make_tag(base_dt, val, mdtype, sde=False): + ''' Makes a simple matlab tag, full or sde ''' + base_dt = np.dtype(base_dt) + bo = boc.to_numpy_code(base_dt.byteorder) + byte_count = base_dt.itemsize + if not sde: + udt = bo + 'u4' + padding = 8 - (byte_count % 8) + all_dt = [('mdtype', udt), + ('byte_count', udt), + ('val', base_dt)] + if padding: + all_dt.append(('padding', 'u1', padding)) + else: # is sde + udt = bo + 'u2' + padding = 4-byte_count + if bo == '<': # little endian + all_dt = [('mdtype', udt), + ('byte_count', udt), + ('val', base_dt)] + else: # big endian + all_dt = [('byte_count', udt), + ('mdtype', udt), + ('val', base_dt)] + if padding: + all_dt.append(('padding', 'u1', padding)) + tag = np.zeros((1,), dtype=all_dt) + tag['mdtype'] = mdtype + tag['byte_count'] = byte_count + tag['val'] = val + return tag + + +def _write_stream(stream, *strings): + stream.truncate(0) + stream.seek(0) + for s in strings: + stream.write(s) + stream.seek(0) + + +def _make_readerlike(stream, byte_order=boc.native_code): + class R: + pass + r = R() + r.mat_stream = stream + r.byte_order = byte_order + r.struct_as_record = True + r.uint16_codec = sys.getdefaultencoding() + r.chars_as_strings = False + r.mat_dtype = False + r.squeeze_me = False + return r + + +def test_read_tag(): + # mainly to test errors + # make reader-like thing + str_io = BytesIO() + r = _make_readerlike(str_io) + c_reader = m5u.VarReader5(r) + # This works for StringIO but _not_ BytesIO + assert_raises(OSError, c_reader.read_tag) + # bad SDE + tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) + tag['byte_count'] = 5 + _write_stream(str_io, tag.tobytes()) + assert_raises(ValueError, c_reader.read_tag) + + +def test_read_stream(): + tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) + tag_str = tag.tobytes() + str_io = BytesIO(tag_str) + st = streams.make_stream(str_io) + s = streams._read_into(st, tag.itemsize) + assert_equal(s, tag.tobytes()) + + +def test_read_numeric(): + # make reader-like thing + str_io = BytesIO() + r = _make_readerlike(str_io) + # check simplest of tags + for base_dt, val, mdtype in (('u2', 30, mio5p.miUINT16), + ('i4', 1, mio5p.miINT32), + ('i2', -1, mio5p.miINT16)): + for byte_code in ('<', '>'): + r.byte_order = byte_code + c_reader = m5u.VarReader5(r) + assert_equal(c_reader.little_endian, byte_code == '<') + assert_equal(c_reader.is_swapped, byte_code != boc.native_code) + for sde_f in (False, True): + dt = np.dtype(base_dt).newbyteorder(byte_code) + a = _make_tag(dt, val, mdtype, sde_f) + a_str = a.tobytes() + _write_stream(str_io, a_str) + el = c_reader.read_numeric() + assert_equal(el, val) + # two sequential reads + _write_stream(str_io, a_str, a_str) + el = c_reader.read_numeric() + assert_equal(el, val) + el = c_reader.read_numeric() + assert_equal(el, val) + + +def test_read_numeric_writeable(): + # make reader-like thing + str_io = BytesIO() + r = _make_readerlike(str_io, '<') + c_reader = m5u.VarReader5(r) + dt = np.dtype('' + rdr.mat_stream.read(4) # presumably byte padding + mdict = read_minimat_vars(rdr) + fp.close() + return mdict + + +def test_jottings(): + # example + fname = os.path.join(test_data_path, 'parabola.mat') + read_workspace_vars(fname) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio_utils.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1d19a9797faa2221307a7330b69fffa26410f624 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_mio_utils.py @@ -0,0 +1,45 @@ +""" Testing + +""" + +import numpy as np + +from numpy.testing import assert_array_equal, assert_ + +from scipy.io.matlab._mio_utils import squeeze_element, chars_to_strings + + +def test_squeeze_element(): + a = np.zeros((1,3)) + assert_array_equal(np.squeeze(a), squeeze_element(a)) + # 0-D output from squeeze gives scalar + sq_int = squeeze_element(np.zeros((1,1), dtype=float)) + assert_(isinstance(sq_int, float)) + # Unless it's a structured array + sq_sa = squeeze_element(np.zeros((1,1),dtype=[('f1', 'f')])) + assert_(isinstance(sq_sa, np.ndarray)) + # Squeezing empty arrays maintain their dtypes. + sq_empty = squeeze_element(np.empty(0, np.uint8)) + assert sq_empty.dtype == np.uint8 + + +def test_chars_strings(): + # chars as strings + strings = ['learn ', 'python', 'fast ', 'here '] + str_arr = np.array(strings, dtype='U6') # shape (4,) + chars = [list(s) for s in strings] + char_arr = np.array(chars, dtype='U1') # shape (4,6) + assert_array_equal(chars_to_strings(char_arr), str_arr) + ca2d = char_arr.reshape((2,2,6)) + sa2d = str_arr.reshape((2,2)) + assert_array_equal(chars_to_strings(ca2d), sa2d) + ca3d = char_arr.reshape((1,2,2,6)) + sa3d = str_arr.reshape((1,2,2)) + assert_array_equal(chars_to_strings(ca3d), sa3d) + # Fortran ordered arrays + char_arrf = np.array(chars, dtype='U1', order='F') # shape (4,6) + assert_array_equal(chars_to_strings(char_arrf), str_arr) + # empty array + arr = np.array([['']], dtype='U1') + out_arr = np.array([''], dtype='U1') + assert_array_equal(chars_to_strings(arr), out_arr) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_miobase.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_miobase.py new file mode 100644 index 0000000000000000000000000000000000000000..e07024f980823ce54ef7aa9ad08586f426d94b25 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_miobase.py @@ -0,0 +1,32 @@ +""" Testing miobase module +""" + +import numpy as np + +from numpy.testing import assert_equal +from pytest import raises as assert_raises + +from scipy.io.matlab._miobase import matdims + + +def test_matdims(): + # Test matdims dimension finder + assert_equal(matdims(np.array(1)), (1, 1)) # NumPy scalar + assert_equal(matdims(np.array([1])), (1, 1)) # 1-D array, 1 element + assert_equal(matdims(np.array([1,2])), (2, 1)) # 1-D array, 2 elements + assert_equal(matdims(np.array([[2],[3]])), (2, 1)) # 2-D array, column vector + assert_equal(matdims(np.array([[2,3]])), (1, 2)) # 2-D array, row vector + # 3d array, rowish vector + assert_equal(matdims(np.array([[[2,3]]])), (1, 1, 2)) + assert_equal(matdims(np.array([])), (0, 0)) # empty 1-D array + assert_equal(matdims(np.array([[]])), (1, 0)) # empty 2-D array + assert_equal(matdims(np.array([[[]]])), (1, 1, 0)) # empty 3-D array + assert_equal(matdims(np.empty((1, 0, 1))), (1, 0, 1)) # empty 3-D array + # Optional argument flips 1-D shape behavior. + assert_equal(matdims(np.array([1,2]), 'row'), (1, 2)) # 1-D array, 2 elements + # The argument has to make sense though + assert_raises(ValueError, matdims, np.array([1,2]), 'bizarre') + # Check empty sparse matrices get their own shape + from scipy.sparse import csr_matrix, csc_matrix + assert_equal(matdims(csr_matrix(np.zeros((3, 3)))), (3, 3)) + assert_equal(matdims(csc_matrix(np.zeros((2, 2)))), (2, 2)) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_pathological.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_pathological.py new file mode 100644 index 0000000000000000000000000000000000000000..c5c86decb7e90f69f293e90eba74fb47dd4f1277 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_pathological.py @@ -0,0 +1,33 @@ +""" Test reading of files not conforming to matlab specification + +We try and read any file that matlab reads, these files included +""" +from os.path import dirname, join as pjoin + +from numpy.testing import assert_ +from pytest import raises as assert_raises + +from scipy.io.matlab._mio import loadmat + +TEST_DATA_PATH = pjoin(dirname(__file__), 'data') + + +def test_multiple_fieldnames(): + # Example provided by Dharhas Pothina + # Extracted using mio5.varmats_from_mat + multi_fname = pjoin(TEST_DATA_PATH, 'nasty_duplicate_fieldnames.mat') + vars = loadmat(multi_fname) + funny_names = vars['Summary'].dtype.names + assert_({'_1_Station_Q', '_2_Station_Q', + '_3_Station_Q'}.issubset(funny_names)) + + +def test_malformed1(): + # Example from gh-6072 + # Contains malformed header data, which previously resulted into a + # buffer overflow. + # + # Should raise an exception, not segfault + fname = pjoin(TEST_DATA_PATH, 'malformed1.mat') + with open(fname, 'rb') as f: + assert_raises(ValueError, loadmat, f) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_streams.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_streams.py new file mode 100644 index 0000000000000000000000000000000000000000..d8768d8e9251c6e47debeb65dff3ec056d38ee56 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/tests/test_streams.py @@ -0,0 +1,232 @@ +""" Testing + +""" + +import os +import zlib + +from io import BytesIO + + +from tempfile import mkstemp +from contextlib import contextmanager + +import numpy as np + +from numpy.testing import assert_, assert_equal +from pytest import raises as assert_raises + +from scipy.io.matlab._streams import (make_stream, + GenericStream, ZlibInputStream, + _read_into, _read_string, BLOCK_SIZE) + + +@contextmanager +def setup_test_file(): + val = b'a\x00string' + fd, fname = mkstemp() + + with os.fdopen(fd, 'wb') as fs: + fs.write(val) + with open(fname, 'rb') as fs: + gs = BytesIO(val) + cs = BytesIO(val) + yield fs, gs, cs + os.unlink(fname) + + +def test_make_stream(): + with setup_test_file() as (fs, gs, cs): + # test stream initialization + assert_(isinstance(make_stream(gs), GenericStream)) + + +def test_tell_seek(): + with setup_test_file() as (fs, gs, cs): + for s in (fs, gs, cs): + st = make_stream(s) + res = st.seek(0) + assert_equal(res, 0) + assert_equal(st.tell(), 0) + res = st.seek(5) + assert_equal(res, 0) + assert_equal(st.tell(), 5) + res = st.seek(2, 1) + assert_equal(res, 0) + assert_equal(st.tell(), 7) + res = st.seek(-2, 2) + assert_equal(res, 0) + assert_equal(st.tell(), 6) + + +def test_read(): + with setup_test_file() as (fs, gs, cs): + for s in (fs, gs, cs): + st = make_stream(s) + st.seek(0) + res = st.read(-1) + assert_equal(res, b'a\x00string') + st.seek(0) + res = st.read(4) + assert_equal(res, b'a\x00st') + # read into + st.seek(0) + res = _read_into(st, 4) + assert_equal(res, b'a\x00st') + res = _read_into(st, 4) + assert_equal(res, b'ring') + assert_raises(OSError, _read_into, st, 2) + # read alloc + st.seek(0) + res = _read_string(st, 4) + assert_equal(res, b'a\x00st') + res = _read_string(st, 4) + assert_equal(res, b'ring') + assert_raises(OSError, _read_string, st, 2) + + +class TestZlibInputStream: + def _get_data(self, size): + data = np.random.randint(0, 256, size).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data) + stream = BytesIO(compressed_data) + return stream, len(compressed_data), data + + def test_read(self): + SIZES = [0, 1, 10, BLOCK_SIZE//2, BLOCK_SIZE-1, + BLOCK_SIZE, BLOCK_SIZE+1, 2*BLOCK_SIZE-1] + + READ_SIZES = [BLOCK_SIZE//2, BLOCK_SIZE-1, + BLOCK_SIZE, BLOCK_SIZE+1] + + def check(size, read_size): + compressed_stream, compressed_data_len, data = self._get_data(size) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + data2 = b'' + so_far = 0 + while True: + block = stream.read(min(read_size, + size - so_far)) + if not block: + break + so_far += len(block) + data2 += block + assert_equal(data, data2) + + for size in SIZES: + for read_size in READ_SIZES: + check(size, read_size) + + def test_read_max_length(self): + size = 1234 + data = np.random.randint(0, 256, size).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data) + compressed_stream = BytesIO(compressed_data + b"abbacaca") + stream = ZlibInputStream(compressed_stream, len(compressed_data)) + + stream.read(len(data)) + assert_equal(compressed_stream.tell(), len(compressed_data)) + + assert_raises(OSError, stream.read, 1) + + def test_read_bad_checksum(self): + data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data) + + # break checksum + compressed_data = (compressed_data[:-1] + + bytes([(compressed_data[-1] + 1) & 255])) + + compressed_stream = BytesIO(compressed_data) + stream = ZlibInputStream(compressed_stream, len(compressed_data)) + + assert_raises(zlib.error, stream.read, len(data)) + + def test_seek(self): + compressed_stream, compressed_data_len, data = self._get_data(1024) + + stream = ZlibInputStream(compressed_stream, compressed_data_len) + + stream.seek(123) + p = 123 + assert_equal(stream.tell(), p) + d1 = stream.read(11) + assert_equal(d1, data[p:p+11]) + + stream.seek(321, 1) + p = 123+11+321 + assert_equal(stream.tell(), p) + d2 = stream.read(21) + assert_equal(d2, data[p:p+21]) + + stream.seek(641, 0) + p = 641 + assert_equal(stream.tell(), p) + d3 = stream.read(11) + assert_equal(d3, data[p:p+11]) + + assert_raises(OSError, stream.seek, 10, 2) + assert_raises(OSError, stream.seek, -1, 1) + assert_raises(ValueError, stream.seek, 1, 123) + + stream.seek(10000, 1) + assert_raises(OSError, stream.read, 12) + + def test_seek_bad_checksum(self): + data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data) + + # break checksum + compressed_data = (compressed_data[:-1] + + bytes([(compressed_data[-1] + 1) & 255])) + + compressed_stream = BytesIO(compressed_data) + stream = ZlibInputStream(compressed_stream, len(compressed_data)) + + assert_raises(zlib.error, stream.seek, len(data)) + + def test_all_data_read(self): + compressed_stream, compressed_data_len, data = self._get_data(1024) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + assert_(not stream.all_data_read()) + stream.seek(512) + assert_(not stream.all_data_read()) + stream.seek(1024) + assert_(stream.all_data_read()) + + def test_all_data_read_overlap(self): + COMPRESSION_LEVEL = 6 + + data = np.arange(33707000).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data, COMPRESSION_LEVEL) + compressed_data_len = len(compressed_data) + + # check that part of the checksum overlaps + assert_(compressed_data_len == BLOCK_SIZE + 2) + + compressed_stream = BytesIO(compressed_data) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + assert_(not stream.all_data_read()) + stream.seek(len(data)) + assert_(stream.all_data_read()) + + def test_all_data_read_bad_checksum(self): + COMPRESSION_LEVEL = 6 + + data = np.arange(33707000).astype(np.uint8).tobytes() + compressed_data = zlib.compress(data, COMPRESSION_LEVEL) + compressed_data_len = len(compressed_data) + + # check that part of the checksum overlaps + assert_(compressed_data_len == BLOCK_SIZE + 2) + + # break checksum + compressed_data = (compressed_data[:-1] + + bytes([(compressed_data[-1] + 1) & 255])) + + compressed_stream = BytesIO(compressed_data) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + assert_(not stream.all_data_read()) + stream.seek(len(data)) + + assert_raises(zlib.error, stream.all_data_read) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav new file mode 100644 index 0000000000000000000000000000000000000000..20b9264eeb61d4575b9c0484cc0b7cd59f2efe0a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:823bfffe783dc47fec9b7e21cb109b0a03b800ffbe3d901f0ce02b1f9a269233 +size 3586 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav new file mode 100644 index 0000000000000000000000000000000000000000..aa79a3ae4097ba68e9a0071f273abd466d0dcc48 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f42cbcafda573682ecd81a7c780d65d4eaf079ec9ad15fe1bd0dde2a1a1d213 +size 3586 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav new file mode 100644 index 0000000000000000000000000000000000000000..8c227b753036941bf9b2488c1064a89c7608c66c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:617d60f2a7423801b5eaf5fd1baab84ac7c28f6655935f7d8f30d0f12d335982 +size 72 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav new file mode 100644 index 0000000000000000000000000000000000000000..b3c27ca654cd8fb73621c6ac226eb340fe808ed1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0685020dcb771a263f24957f1e88211779b301e6d3d7c8f4d9cf8c3a7d7d2b15 +size 70 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav new file mode 100644 index 0000000000000000000000000000000000000000..fdd1849f98e5883c46e6c2889e7a32f484b1e847 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b76320ae2de1e892d00de92bc0884304e686e3a394cc7ca7533d2929bbcea4d5 +size 90 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..23be5c7b67cc6c3209b9d66d2897e1daf2fb54e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c82bf4ba1faec7fb2426cc5e3a3ce88346019376503bf930e4724732ab55c88d +size 90 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..2e9dc76040faaea314a81b66b024a01b63d1ff73 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bded7a0facf1890c887c9ceea68a2fff562639c95966b783a73d0a03375763b +size 134 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..09ea515215b4f92f881bf97900ac2b56ee0c89ca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52ee503d071bb671659f139de33146c698930b8c20769e893a8609d95319214d +size 164 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..d2fa9b3a9e9ba86eefbd1cee5378ba56a083cf3c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c9bc653b1a9817742addae3ccc943b6989e0cc9c32be95630caa55a34f6dc16 +size 89