diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_double_ref.npz b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_double_ref.npz new file mode 100644 index 0000000000000000000000000000000000000000..e1e3d620400746177b560b9193efce03c2841e99 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_double_ref.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a60c649415b645223924d8342ccc5c097801c86901287a369e53fc9259f5ec4e +size 162120 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_longdouble_ref.npz b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_longdouble_ref.npz new file mode 100644 index 0000000000000000000000000000000000000000..b1a646889c9889541e8d368c8c2d96520d183dc4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_longdouble_ref.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a406cbd4dad04d0c59dd38f54416fb49424c82229c1a074b6a44ec0cde2000e3 +size 296072 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_single_ref.npz b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_single_ref.npz new file mode 100644 index 0000000000000000000000000000000000000000..a42748dba14b7ff0d2f53ce4cd5a86a4f08e5d93 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_single_ref.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:276a9141318e6fc36e4ab6ff54a61b64054ef8849b660f17359e5f541b43c526 +size 95144 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test.npz b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test.npz new file mode 100644 index 0000000000000000000000000000000000000000..1e5a4e06615c6bcc58f0feff20f73e83439a937d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36de804a22d8fdea054590ce49ddf3c859838b7d89193c56b3bcb660cbf43797 +size 11968 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1417abe144142078b38c45794f73178cec486b8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/__init__.py @@ -0,0 +1,116 @@ +""" +================================== +Input and output (:mod:`scipy.io`) +================================== + +.. currentmodule:: scipy.io + +SciPy has many modules, classes, and functions available to read data +from and write data to a variety of file formats. + +.. seealso:: `NumPy IO routines `__ + +MATLAB® files +============= + +.. autosummary:: + :toctree: generated/ + + loadmat - Read a MATLAB style mat file (version 4 through 7.1) + savemat - Write a MATLAB style mat file (version 4 through 7.1) + whosmat - List contents of a MATLAB style mat file (version 4 through 7.1) + +For low-level MATLAB reading and writing utilities, see `scipy.io.matlab`. + +IDL® files +========== + +.. autosummary:: + :toctree: generated/ + + readsav - Read an IDL 'save' file + +Matrix Market files +=================== + +.. autosummary:: + :toctree: generated/ + + mminfo - Query matrix info from Matrix Market formatted file + mmread - Read matrix from Matrix Market formatted file + mmwrite - Write matrix to Matrix Market formatted file + +Unformatted Fortran files +=============================== + +.. autosummary:: + :toctree: generated/ + + FortranFile - A file object for unformatted sequential Fortran files + FortranEOFError - Exception indicating the end of a well-formed file + FortranFormattingError - Exception indicating an inappropriate end + +Netcdf +====== + +.. autosummary:: + :toctree: generated/ + + netcdf_file - A file object for NetCDF data + netcdf_variable - A data object for the netcdf module + +Harwell-Boeing files +==================== + +.. autosummary:: + :toctree: generated/ + + hb_read -- read H-B file + hb_write -- write H-B file + +Wav sound files (:mod:`scipy.io.wavfile`) +========================================= + +.. module:: scipy.io.wavfile + +.. autosummary:: + :toctree: generated/ + + read + write + WavFileWarning + +Arff files (:mod:`scipy.io.arff`) +================================= + +.. module:: scipy.io.arff + +.. autosummary:: + :toctree: generated/ + + loadarff + MetaData + ArffError + ParseArffError +""" +# matfile read and write +from .matlab import loadmat, savemat, whosmat + +# netCDF file support +from ._netcdf import netcdf_file, netcdf_variable + +# Fortran file support +from ._fortran import FortranFile, FortranEOFError, FortranFormattingError + +from ._fast_matrix_market import mminfo, mmread, mmwrite +from ._idl import readsav +from ._harwell_boeing import hb_read, hb_write + +# Deprecated namespaces, to be removed in v2.0.0 +from . import arff, harwell_boeing, idl, mmio, netcdf, wavfile + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/_fortran.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/_fortran.py new file mode 100644 index 0000000000000000000000000000000000000000..a4c93c57bcdab3b5a40437a147a384167772c306 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/_fortran.py @@ -0,0 +1,354 @@ +""" +Module to read / write Fortran unformatted sequential files. + +This is in the spirit of code written by Neil Martinsen-Burrell and Joe Zuntz. + +""" +import warnings +import numpy as np + +__all__ = ['FortranFile', 'FortranEOFError', 'FortranFormattingError'] + + +class FortranEOFError(TypeError, OSError): + """Indicates that the file ended properly. + + This error descends from TypeError because the code used to raise + TypeError (and this was the only way to know that the file had + ended) so users might have ``except TypeError:``. + + """ + pass + + +class FortranFormattingError(TypeError, OSError): + """Indicates that the file ended mid-record. + + Descends from TypeError for backward compatibility. + + """ + pass + + +class FortranFile: + """ + A file object for unformatted sequential files from Fortran code. + + Parameters + ---------- + filename : file or str + Open file object or filename. + mode : {'r', 'w'}, optional + Read-write mode, default is 'r'. + header_dtype : dtype, optional + Data type of the header. Size and endianness must match the input/output file. + + Notes + ----- + These files are broken up into records of unspecified types. The size of + each record is given at the start (although the size of this header is not + standard) and the data is written onto disk without any formatting. Fortran + compilers supporting the BACKSPACE statement will write a second copy of + the size to facilitate backwards seeking. + + This class only supports files written with both sizes for the record. + It also does not support the subrecords used in Intel and gfortran compilers + for records which are greater than 2GB with a 4-byte header. + + An example of an unformatted sequential file in Fortran would be written as:: + + OPEN(1, FILE=myfilename, FORM='unformatted') + + WRITE(1) myvariable + + Since this is a non-standard file format, whose contents depend on the + compiler and the endianness of the machine, caution is advised. Files from + gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work. + + Consider using Fortran direct-access files or files from the newer Stream + I/O, which can be easily read by `numpy.fromfile`. + + Examples + -------- + To create an unformatted sequential Fortran file: + + >>> from scipy.io import FortranFile + >>> import numpy as np + >>> f = FortranFile('test.unf', 'w') + >>> f.write_record(np.array([1,2,3,4,5], dtype=np.int32)) + >>> f.write_record(np.linspace(0,1,20).reshape((5,4)).T) + >>> f.close() + + To read this file: + + >>> f = FortranFile('test.unf', 'r') + >>> print(f.read_ints(np.int32)) + [1 2 3 4 5] + >>> print(f.read_reals(float).reshape((5,4), order="F")) + [[0. 0.05263158 0.10526316 0.15789474] + [0.21052632 0.26315789 0.31578947 0.36842105] + [0.42105263 0.47368421 0.52631579 0.57894737] + [0.63157895 0.68421053 0.73684211 0.78947368] + [0.84210526 0.89473684 0.94736842 1. ]] + >>> f.close() + + Or, in Fortran:: + + integer :: a(5), i + double precision :: b(5,4) + open(1, file='test.unf', form='unformatted') + read(1) a + read(1) b + close(1) + write(*,*) a + do i = 1, 5 + write(*,*) b(i,:) + end do + + """ + def __init__(self, filename, mode='r', header_dtype=np.uint32): + if header_dtype is None: + raise ValueError('Must specify dtype') + + header_dtype = np.dtype(header_dtype) + if header_dtype.kind != 'u': + warnings.warn("Given a dtype which is not unsigned.", stacklevel=2) + + if mode not in 'rw' or len(mode) != 1: + raise ValueError('mode must be either r or w') + + if hasattr(filename, 'seek'): + self._fp = filename + else: + self._fp = open(filename, '%sb' % mode) + + self._header_dtype = header_dtype + + def _read_size(self, eof_ok=False): + n = self._header_dtype.itemsize + b = self._fp.read(n) + if (not b) and eof_ok: + raise FortranEOFError("End of file occurred at end of record") + elif len(b) < n: + raise FortranFormattingError( + "End of file in the middle of the record size") + return int(np.frombuffer(b, dtype=self._header_dtype, count=1)[0]) + + def write_record(self, *items): + """ + Write a record (including sizes) to the file. + + Parameters + ---------- + *items : array_like + The data arrays to write. + + Notes + ----- + Writes data items to a file:: + + write_record(a.T, b.T, c.T, ...) + + write(1) a, b, c, ... + + Note that data in multidimensional arrays is written in + row-major order --- to make them read correctly by Fortran + programs, you need to transpose the arrays yourself when + writing them. + + """ + items = tuple(np.asarray(item) for item in items) + total_size = sum(item.nbytes for item in items) + + nb = np.array([total_size], dtype=self._header_dtype) + + nb.tofile(self._fp) + for item in items: + item.tofile(self._fp) + nb.tofile(self._fp) + + def read_record(self, *dtypes, **kwargs): + """ + Reads a record of a given type from the file. + + Parameters + ---------- + *dtypes : dtypes, optional + Data type(s) specifying the size and endianness of the data. + + Returns + ------- + data : ndarray + A 1-D array object. + + Raises + ------ + FortranEOFError + To signal that no further records are available + FortranFormattingError + To signal that the end of the file was encountered + part-way through a record + + Notes + ----- + If the record contains a multidimensional array, you can specify + the size in the dtype. For example:: + + INTEGER var(5,4) + + can be read with:: + + read_record('(4,5)i4').T + + Note that this function does **not** assume the file data is in Fortran + column major order, so you need to (i) swap the order of dimensions + when reading and (ii) transpose the resulting array. + + Alternatively, you can read the data as a 1-D array and handle the + ordering yourself. For example:: + + read_record('i4').reshape(5, 4, order='F') + + For records that contain several variables or mixed types (as opposed + to single scalar or array types), give them as separate arguments:: + + double precision :: a + integer :: b + write(1) a, b + + record = f.read_record('u1', + 2: '>i2', + 3: '>i4', + 4: '>f4', + 5: '>f8', + 6: '>c8', + 7: '|O', + 8: '|O', + 9: '>c16', + 10: '|O', + 11: '|O', + 12: '>u2', + 13: '>u4', + 14: '>i8', + 15: '>u8'} + +# Define the different record types that can be found in an IDL save file +RECTYPE_DICT = {0: "START_MARKER", + 1: "COMMON_VARIABLE", + 2: "VARIABLE", + 3: "SYSTEM_VARIABLE", + 6: "END_MARKER", + 10: "TIMESTAMP", + 12: "COMPILED", + 13: "IDENTIFICATION", + 14: "VERSION", + 15: "HEAP_HEADER", + 16: "HEAP_DATA", + 17: "PROMOTE64", + 19: "NOTICE", + 20: "DESCRIPTION"} + +# Define a dictionary to contain structure definitions +STRUCT_DICT = {} + + +def _align_32(f): + '''Align to the next 32-bit position in a file''' + + pos = f.tell() + if pos % 4 != 0: + f.seek(pos + 4 - pos % 4) + return + + +def _skip_bytes(f, n): + '''Skip `n` bytes''' + f.read(n) + return + + +def _read_bytes(f, n): + '''Read the next `n` bytes''' + return f.read(n) + + +def _read_byte(f): + '''Read a single byte''' + return np.uint8(struct.unpack('>B', f.read(4)[:1])[0]) + + +def _read_long(f): + '''Read a signed 32-bit integer''' + return np.int32(struct.unpack('>l', f.read(4))[0]) + + +def _read_int16(f): + '''Read a signed 16-bit integer''' + return np.int16(struct.unpack('>h', f.read(4)[2:4])[0]) + + +def _read_int32(f): + '''Read a signed 32-bit integer''' + return np.int32(struct.unpack('>i', f.read(4))[0]) + + +def _read_int64(f): + '''Read a signed 64-bit integer''' + return np.int64(struct.unpack('>q', f.read(8))[0]) + + +def _read_uint16(f): + '''Read an unsigned 16-bit integer''' + return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0]) + + +def _read_uint32(f): + '''Read an unsigned 32-bit integer''' + return np.uint32(struct.unpack('>I', f.read(4))[0]) + + +def _read_uint64(f): + '''Read an unsigned 64-bit integer''' + return np.uint64(struct.unpack('>Q', f.read(8))[0]) + + +def _read_float32(f): + '''Read a 32-bit float''' + return np.float32(struct.unpack('>f', f.read(4))[0]) + + +def _read_float64(f): + '''Read a 64-bit float''' + return np.float64(struct.unpack('>d', f.read(8))[0]) + + +class Pointer: + '''Class used to define pointers''' + + def __init__(self, index): + self.index = index + return + + +class ObjectPointer(Pointer): + '''Class used to define object pointers''' + pass + + +def _read_string(f): + '''Read a string''' + length = _read_long(f) + if length > 0: + chars = _read_bytes(f, length).decode('latin1') + _align_32(f) + else: + chars = '' + return chars + + +def _read_string_data(f): + '''Read a data string (length is specified twice)''' + length = _read_long(f) + if length > 0: + length = _read_long(f) + string_data = _read_bytes(f, length) + _align_32(f) + else: + string_data = '' + return string_data + + +def _read_data(f, dtype): + '''Read a variable with a specified data type''' + if dtype == 1: + if _read_int32(f) != 1: + raise Exception("Error occurred while reading byte variable") + return _read_byte(f) + elif dtype == 2: + return _read_int16(f) + elif dtype == 3: + return _read_int32(f) + elif dtype == 4: + return _read_float32(f) + elif dtype == 5: + return _read_float64(f) + elif dtype == 6: + real = _read_float32(f) + imag = _read_float32(f) + return np.complex64(real + imag * 1j) + elif dtype == 7: + return _read_string_data(f) + elif dtype == 8: + raise Exception("Should not be here - please report this") + elif dtype == 9: + real = _read_float64(f) + imag = _read_float64(f) + return np.complex128(real + imag * 1j) + elif dtype == 10: + return Pointer(_read_int32(f)) + elif dtype == 11: + return ObjectPointer(_read_int32(f)) + elif dtype == 12: + return _read_uint16(f) + elif dtype == 13: + return _read_uint32(f) + elif dtype == 14: + return _read_int64(f) + elif dtype == 15: + return _read_uint64(f) + else: + raise Exception("Unknown IDL type: %i - please report this" % dtype) + + +def _read_structure(f, array_desc, struct_desc): + ''' + Read a structure, with the array and structure descriptors given as + `array_desc` and `structure_desc` respectively. + ''' + + nrows = array_desc['nelements'] + columns = struct_desc['tagtable'] + + dtype = [] + for col in columns: + if col['structure'] or col['array']: + dtype.append(((col['name'].lower(), col['name']), np.object_)) + else: + if col['typecode'] in DTYPE_DICT: + dtype.append(((col['name'].lower(), col['name']), + DTYPE_DICT[col['typecode']])) + else: + raise Exception("Variable type %i not implemented" % + col['typecode']) + + structure = np.rec.recarray((nrows, ), dtype=dtype) + + for i in range(nrows): + for col in columns: + dtype = col['typecode'] + if col['structure']: + structure[col['name']][i] = _read_structure(f, + struct_desc['arrtable'][col['name']], + struct_desc['structtable'][col['name']]) + elif col['array']: + structure[col['name']][i] = _read_array(f, dtype, + struct_desc['arrtable'][col['name']]) + else: + structure[col['name']][i] = _read_data(f, dtype) + + # Reshape structure if needed + if array_desc['ndims'] > 1: + dims = array_desc['dims'][:int(array_desc['ndims'])] + dims.reverse() + structure = structure.reshape(dims) + + return structure + + +def _read_array(f, typecode, array_desc): + ''' + Read an array of type `typecode`, with the array descriptor given as + `array_desc`. + ''' + + if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]: + + if typecode == 1: + nbytes = _read_int32(f) + if nbytes != array_desc['nbytes']: + warnings.warn("Not able to verify number of bytes from header", + stacklevel=3) + + # Read bytes as numpy array + array = np.frombuffer(f.read(array_desc['nbytes']), + dtype=DTYPE_DICT[typecode]) + + elif typecode in [2, 12]: + + # These are 2 byte types, need to skip every two as they are not packed + + array = np.frombuffer(f.read(array_desc['nbytes']*2), + dtype=DTYPE_DICT[typecode])[1::2] + + else: + + # Read bytes into list + array = [] + for i in range(array_desc['nelements']): + dtype = typecode + data = _read_data(f, dtype) + array.append(data) + + array = np.array(array, dtype=np.object_) + + # Reshape array if needed + if array_desc['ndims'] > 1: + dims = array_desc['dims'][:int(array_desc['ndims'])] + dims.reverse() + array = array.reshape(dims) + + # Go to next alignment position + _align_32(f) + + return array + + +def _read_record(f): + '''Function to read in a full record''' + + record = {'rectype': _read_long(f)} + + nextrec = _read_uint32(f) + nextrec += _read_uint32(f).astype(np.int64) * 2**32 + + _skip_bytes(f, 4) + + if record['rectype'] not in RECTYPE_DICT: + raise Exception("Unknown RECTYPE: %i" % record['rectype']) + + record['rectype'] = RECTYPE_DICT[record['rectype']] + + if record['rectype'] in ["VARIABLE", "HEAP_DATA"]: + + if record['rectype'] == "VARIABLE": + record['varname'] = _read_string(f) + else: + record['heap_index'] = _read_long(f) + _skip_bytes(f, 4) + + rectypedesc = _read_typedesc(f) + + if rectypedesc['typecode'] == 0: + + if nextrec == f.tell(): + record['data'] = None # Indicates NULL value + else: + raise ValueError("Unexpected type code: 0") + + else: + + varstart = _read_long(f) + if varstart != 7: + raise Exception("VARSTART is not 7") + + if rectypedesc['structure']: + record['data'] = _read_structure(f, rectypedesc['array_desc'], + rectypedesc['struct_desc']) + elif rectypedesc['array']: + record['data'] = _read_array(f, rectypedesc['typecode'], + rectypedesc['array_desc']) + else: + dtype = rectypedesc['typecode'] + record['data'] = _read_data(f, dtype) + + elif record['rectype'] == "TIMESTAMP": + + _skip_bytes(f, 4*256) + record['date'] = _read_string(f) + record['user'] = _read_string(f) + record['host'] = _read_string(f) + + elif record['rectype'] == "VERSION": + + record['format'] = _read_long(f) + record['arch'] = _read_string(f) + record['os'] = _read_string(f) + record['release'] = _read_string(f) + + elif record['rectype'] == "IDENTIFICATON": + + record['author'] = _read_string(f) + record['title'] = _read_string(f) + record['idcode'] = _read_string(f) + + elif record['rectype'] == "NOTICE": + + record['notice'] = _read_string(f) + + elif record['rectype'] == "DESCRIPTION": + + record['description'] = _read_string_data(f) + + elif record['rectype'] == "HEAP_HEADER": + + record['nvalues'] = _read_long(f) + record['indices'] = [_read_long(f) for _ in range(record['nvalues'])] + + elif record['rectype'] == "COMMONBLOCK": + + record['nvars'] = _read_long(f) + record['name'] = _read_string(f) + record['varnames'] = [_read_string(f) for _ in range(record['nvars'])] + + elif record['rectype'] == "END_MARKER": + + record['end'] = True + + elif record['rectype'] == "UNKNOWN": + + warnings.warn("Skipping UNKNOWN record", stacklevel=3) + + elif record['rectype'] == "SYSTEM_VARIABLE": + + warnings.warn("Skipping SYSTEM_VARIABLE record", stacklevel=3) + + else: + + raise Exception(f"record['rectype']={record['rectype']} not implemented") + + f.seek(nextrec) + + return record + + +def _read_typedesc(f): + '''Function to read in a type descriptor''' + + typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)} + + if typedesc['varflags'] & 2 == 2: + raise Exception("System variables not implemented") + + typedesc['array'] = typedesc['varflags'] & 4 == 4 + typedesc['structure'] = typedesc['varflags'] & 32 == 32 + + if typedesc['structure']: + typedesc['array_desc'] = _read_arraydesc(f) + typedesc['struct_desc'] = _read_structdesc(f) + elif typedesc['array']: + typedesc['array_desc'] = _read_arraydesc(f) + + return typedesc + + +def _read_arraydesc(f): + '''Function to read in an array descriptor''' + + arraydesc = {'arrstart': _read_long(f)} + + if arraydesc['arrstart'] == 8: + + _skip_bytes(f, 4) + + arraydesc['nbytes'] = _read_long(f) + arraydesc['nelements'] = _read_long(f) + arraydesc['ndims'] = _read_long(f) + + _skip_bytes(f, 8) + + arraydesc['nmax'] = _read_long(f) + + arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])] + + elif arraydesc['arrstart'] == 18: + + warnings.warn("Using experimental 64-bit array read", stacklevel=3) + + _skip_bytes(f, 8) + + arraydesc['nbytes'] = _read_uint64(f) + arraydesc['nelements'] = _read_uint64(f) + arraydesc['ndims'] = _read_long(f) + + _skip_bytes(f, 8) + + arraydesc['nmax'] = 8 + + arraydesc['dims'] = [] + for d in range(arraydesc['nmax']): + v = _read_long(f) + if v != 0: + raise Exception("Expected a zero in ARRAY_DESC") + arraydesc['dims'].append(_read_long(f)) + + else: + + raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart']) + + return arraydesc + + +def _read_structdesc(f): + '''Function to read in a structure descriptor''' + + structdesc = {} + + structstart = _read_long(f) + if structstart != 9: + raise Exception("STRUCTSTART should be 9") + + structdesc['name'] = _read_string(f) + predef = _read_long(f) + structdesc['ntags'] = _read_long(f) + structdesc['nbytes'] = _read_long(f) + + structdesc['predef'] = predef & 1 + structdesc['inherits'] = predef & 2 + structdesc['is_super'] = predef & 4 + + if not structdesc['predef']: + + structdesc['tagtable'] = [_read_tagdesc(f) + for _ in range(structdesc['ntags'])] + + for tag in structdesc['tagtable']: + tag['name'] = _read_string(f) + + structdesc['arrtable'] = {tag['name']: _read_arraydesc(f) + for tag in structdesc['tagtable'] + if tag['array']} + + structdesc['structtable'] = {tag['name']: _read_structdesc(f) + for tag in structdesc['tagtable'] + if tag['structure']} + + if structdesc['inherits'] or structdesc['is_super']: + structdesc['classname'] = _read_string(f) + structdesc['nsupclasses'] = _read_long(f) + structdesc['supclassnames'] = [ + _read_string(f) for _ in range(structdesc['nsupclasses'])] + structdesc['supclasstable'] = [ + _read_structdesc(f) for _ in range(structdesc['nsupclasses'])] + + STRUCT_DICT[structdesc['name']] = structdesc + + else: + + if structdesc['name'] not in STRUCT_DICT: + raise Exception("PREDEF=1 but can't find definition") + + structdesc = STRUCT_DICT[structdesc['name']] + + return structdesc + + +def _read_tagdesc(f): + '''Function to read in a tag descriptor''' + + tagdesc = {'offset': _read_long(f)} + + if tagdesc['offset'] == -1: + tagdesc['offset'] = _read_uint64(f) + + tagdesc['typecode'] = _read_long(f) + tagflags = _read_long(f) + + tagdesc['array'] = tagflags & 4 == 4 + tagdesc['structure'] = tagflags & 32 == 32 + tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT + # Assume '10'x is scalar + + return tagdesc + + +def _replace_heap(variable, heap): + + if isinstance(variable, Pointer): + + while isinstance(variable, Pointer): + + if variable.index == 0: + variable = None + else: + if variable.index in heap: + variable = heap[variable.index] + else: + warnings.warn("Variable referenced by pointer not found " + "in heap: variable will be set to None", + stacklevel=3) + variable = None + + replace, new = _replace_heap(variable, heap) + + if replace: + variable = new + + return True, variable + + elif isinstance(variable, np.rec.recarray): + + # Loop over records + for ir, record in enumerate(variable): + + replace, new = _replace_heap(record, heap) + + if replace: + variable[ir] = new + + return False, variable + + elif isinstance(variable, np.record): + + # Loop over values + for iv, value in enumerate(variable): + + replace, new = _replace_heap(value, heap) + + if replace: + variable[iv] = new + + return False, variable + + elif isinstance(variable, np.ndarray): + + # Loop over values if type is np.object_ + if variable.dtype.type is np.object_: + + for iv in range(variable.size): + + replace, new = _replace_heap(variable.item(iv), heap) + + if replace: + variable.reshape(-1)[iv] = new + + return False, variable + + else: + + return False, variable + + +class AttrDict(dict): + ''' + A case-insensitive dictionary with access via item, attribute, and call + notations: + + >>> from scipy.io._idl import AttrDict + >>> d = AttrDict() + >>> d['Variable'] = 123 + >>> d['Variable'] + 123 + >>> d.Variable + 123 + >>> d.variable + 123 + >>> d('VARIABLE') + 123 + >>> d['missing'] + Traceback (most recent error last): + ... + KeyError: 'missing' + >>> d.missing + Traceback (most recent error last): + ... + AttributeError: 'AttrDict' object has no attribute 'missing' + ''' + + def __init__(self, init={}): + dict.__init__(self, init) + + def __getitem__(self, name): + return super().__getitem__(name.lower()) + + def __setitem__(self, key, value): + return super().__setitem__(key.lower(), value) + + def __getattr__(self, name): + try: + return self.__getitem__(name) + except KeyError: + raise AttributeError( + f"'{type(self)}' object has no attribute '{name}'") from None + + __setattr__ = __setitem__ + __call__ = __getitem__ + + +def readsav(file_name, idict=None, python_dict=False, + uncompressed_file_name=None, verbose=False): + """ + Read an IDL .sav file. + + Parameters + ---------- + file_name : str + Name of the IDL save file. + idict : dict, optional + Dictionary in which to insert .sav file variables. + python_dict : bool, optional + By default, the object return is not a Python dictionary, but a + case-insensitive dictionary with item, attribute, and call access + to variables. To get a standard Python dictionary, set this option + to True. + uncompressed_file_name : str, optional + This option only has an effect for .sav files written with the + /compress option. If a file name is specified, compressed .sav + files are uncompressed to this file. Otherwise, readsav will use + the `tempfile` module to determine a temporary filename + automatically, and will remove the temporary file upon successfully + reading it in. + verbose : bool, optional + Whether to print out information about the save file, including + the records read, and available variables. + + Returns + ------- + idl_dict : AttrDict or dict + If `python_dict` is set to False (default), this function returns a + case-insensitive dictionary with item, attribute, and call access + to variables. If `python_dict` is set to True, this function + returns a Python dictionary with all variable names in lowercase. + If `idict` was specified, then variables are written to the + dictionary specified, and the updated dictionary is returned. + + Examples + -------- + >>> from os.path import dirname, join as pjoin + >>> import scipy.io as sio + >>> from scipy.io import readsav + + Get the filename for an example .sav file from the tests/data directory. + + >>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data') + >>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav') + + Load the .sav file contents. + + >>> sav_data = readsav(sav_fname) + + Get keys of the .sav file contents. + + >>> print(sav_data.keys()) + dict_keys(['array1d']) + + Access a content with a key. + + >>> print(sav_data['array1d']) + [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0.] + + """ + + # Initialize record and variable holders + records = [] + if python_dict or idict: + variables = {} + else: + variables = AttrDict() + + # Open the IDL file + f = open(file_name, 'rb') + + # Read the signature, which should be 'SR' + signature = _read_bytes(f, 2) + if signature != b'SR': + raise Exception("Invalid SIGNATURE: %s" % signature) + + # Next, the record format, which is '\x00\x04' for normal .sav + # files, and '\x00\x06' for compressed .sav files. + recfmt = _read_bytes(f, 2) + + if recfmt == b'\x00\x04': + pass + + elif recfmt == b'\x00\x06': + + if verbose: + print("IDL Save file is compressed") + + if uncompressed_file_name: + fout = open(uncompressed_file_name, 'w+b') + else: + fout = tempfile.NamedTemporaryFile(suffix='.sav') + + if verbose: + print(" -> expanding to %s" % fout.name) + + # Write header + fout.write(b'SR\x00\x04') + + # Cycle through records + while True: + + # Read record type + rectype = _read_long(f) + fout.write(struct.pack('>l', int(rectype))) + + # Read position of next record and return as int + nextrec = _read_uint32(f) + nextrec += _read_uint32(f).astype(np.int64) * 2**32 + + # Read the unknown 4 bytes + unknown = f.read(4) + + # Check if the end of the file has been reached + if RECTYPE_DICT[rectype] == 'END_MARKER': + modval = np.int64(2**32) + fout.write(struct.pack('>I', int(nextrec) % modval)) + fout.write( + struct.pack('>I', int((nextrec - (nextrec % modval)) / modval)) + ) + fout.write(unknown) + break + + # Find current position + pos = f.tell() + + # Decompress record + rec_string = zlib.decompress(f.read(nextrec-pos)) + + # Find new position of next record + nextrec = fout.tell() + len(rec_string) + 12 + + # Write out record + fout.write(struct.pack('>I', int(nextrec % 2**32))) + fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32))) + fout.write(unknown) + fout.write(rec_string) + + # Close the original compressed file + f.close() + + # Set f to be the decompressed file, and skip the first four bytes + f = fout + f.seek(4) + + else: + raise Exception("Invalid RECFMT: %s" % recfmt) + + # Loop through records, and add them to the list + while True: + r = _read_record(f) + records.append(r) + if 'end' in r: + if r['end']: + break + + # Close the file + f.close() + + # Find heap data variables + heap = {} + for r in records: + if r['rectype'] == "HEAP_DATA": + heap[r['heap_index']] = r['data'] + + # Find all variables + for r in records: + if r['rectype'] == "VARIABLE": + replace, new = _replace_heap(r['data'], heap) + if replace: + r['data'] = new + variables[r['varname'].lower()] = r['data'] + + if verbose: + + # Print out timestamp info about the file + for record in records: + if record['rectype'] == "TIMESTAMP": + print("-"*50) + print("Date: %s" % record['date']) + print("User: %s" % record['user']) + print("Host: %s" % record['host']) + break + + # Print out version info about the file + for record in records: + if record['rectype'] == "VERSION": + print("-"*50) + print("Format: %s" % record['format']) + print("Architecture: %s" % record['arch']) + print("Operating System: %s" % record['os']) + print("IDL Version: %s" % record['release']) + break + + # Print out identification info about the file + for record in records: + if record['rectype'] == "IDENTIFICATON": + print("-"*50) + print("Author: %s" % record['author']) + print("Title: %s" % record['title']) + print("ID Code: %s" % record['idcode']) + break + + # Print out descriptions saved with the file + for record in records: + if record['rectype'] == "DESCRIPTION": + print("-"*50) + print("Description: %s" % record['description']) + break + + print("-"*50) + print("Successfully read %i records of which:" % + (len(records))) + + # Create convenience list of record types + rectypes = [r['rectype'] for r in records] + + for rt in set(rectypes): + if rt != 'END_MARKER': + print(" - %i are of type %s" % (rectypes.count(rt), rt)) + print("-"*50) + + if 'VARIABLE' in rectypes: + print("Available variables:") + for var in variables: + print(f" - {var} [{type(variables[var])}]") + print("-"*50) + + if idict: + for var in variables: + idict[var] = variables[var] + return idict + else: + return variables diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/_mmio.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/_mmio.py new file mode 100644 index 0000000000000000000000000000000000000000..d39e03551b14cd22aa850846890fc313a8be7795 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/_mmio.py @@ -0,0 +1,961 @@ +""" + Matrix Market I/O in Python. + See http://math.nist.gov/MatrixMarket/formats.html + for information about the Matrix Market format. +""" +# +# Author: Pearu Peterson +# Created: October, 2004 +# +# References: +# http://math.nist.gov/MatrixMarket/ +# +import os + +import numpy as np +from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate, + ones, can_cast) + +from scipy.sparse import coo_matrix, issparse + +__all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile'] + + +# ----------------------------------------------------------------------------- +def asstr(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + +def mminfo(source): + """ + Return size and storage parameters from Matrix Market file-like 'source'. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extension .mtx) or open file-like object + + Returns + ------- + rows : int + Number of matrix rows. + cols : int + Number of matrix columns. + entries : int + Number of non-zero entries of a sparse matrix + or rows*cols for a dense matrix. + format : str + Either 'coordinate' or 'array'. + field : str + Either 'real', 'complex', 'pattern', or 'integer'. + symmetry : str + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + + Examples + -------- + >>> from io import StringIO + >>> from scipy.io import mminfo + + >>> text = '''%%MatrixMarket matrix coordinate real general + ... 5 5 7 + ... 2 3 1.0 + ... 3 4 2.0 + ... 3 5 3.0 + ... 4 1 4.0 + ... 4 2 5.0 + ... 4 3 6.0 + ... 4 4 7.0 + ... ''' + + + ``mminfo(source)`` returns the number of rows, number of columns, + format, field type and symmetry attribute of the source file. + + >>> mminfo(StringIO(text)) + (5, 5, 7, 'coordinate', 'real', 'general') + """ + return MMFile.info(source) + +# ----------------------------------------------------------------------------- + + +def mmread(source): + """ + Reads the contents of a Matrix Market file-like 'source' into a matrix. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extensions .mtx, .mtz.gz) + or open file-like object. + + Returns + ------- + a : ndarray or coo_matrix + Dense or sparse matrix depending on the matrix format in the + Matrix Market file. + + Examples + -------- + >>> from io import StringIO + >>> from scipy.io import mmread + + >>> text = '''%%MatrixMarket matrix coordinate real general + ... 5 5 7 + ... 2 3 1.0 + ... 3 4 2.0 + ... 3 5 3.0 + ... 4 1 4.0 + ... 4 2 5.0 + ... 4 3 6.0 + ... 4 4 7.0 + ... ''' + + ``mmread(source)`` returns the data as sparse matrix in COO format. + + >>> m = mmread(StringIO(text)) + >>> m + <5x5 sparse matrix of type '' + with 7 stored elements in COOrdinate format> + >>> m.A + array([[0., 0., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 2., 3.], + [4., 5., 6., 7., 0.], + [0., 0., 0., 0., 0.]]) + """ + return MMFile().read(source) + +# ----------------------------------------------------------------------------- + + +def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None): + r""" + Writes the sparse or dense array `a` to Matrix Market file-like `target`. + + Parameters + ---------- + target : str or file-like + Matrix Market filename (extension .mtx) or open file-like object. + a : array like + Sparse or dense 2-D array. + comment : str, optional + Comments to be prepended to the Matrix Market file. + field : None or str, optional + Either 'real', 'complex', 'pattern', or 'integer'. + precision : None or int, optional + Number of digits to display for real or complex values. + symmetry : None or str, optional + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + If symmetry is None the symmetry type of 'a' is determined by its + values. + + Returns + ------- + None + + Examples + -------- + >>> from io import BytesIO + >>> import numpy as np + >>> from scipy.sparse import coo_matrix + >>> from scipy.io import mmwrite + + Write a small NumPy array to a matrix market file. The file will be + written in the ``'array'`` format. + + >>> a = np.array([[1.0, 0, 0, 0], [0, 2.5, 0, 6.25]]) + >>> target = BytesIO() + >>> mmwrite(target, a) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array real general + % + 2 4 + 1.0000000000000000e+00 + 0.0000000000000000e+00 + 0.0000000000000000e+00 + 2.5000000000000000e+00 + 0.0000000000000000e+00 + 0.0000000000000000e+00 + 0.0000000000000000e+00 + 6.2500000000000000e+00 + + Add a comment to the output file, and set the precision to 3. + + >>> target = BytesIO() + >>> mmwrite(target, a, comment='\n Some test data.\n', precision=3) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array real general + % + % Some test data. + % + 2 4 + 1.000e+00 + 0.000e+00 + 0.000e+00 + 2.500e+00 + 0.000e+00 + 0.000e+00 + 0.000e+00 + 6.250e+00 + + Convert to a sparse matrix before calling ``mmwrite``. This will + result in the output format being ``'coordinate'`` rather than + ``'array'``. + + >>> target = BytesIO() + >>> mmwrite(target, coo_matrix(a), precision=3) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix coordinate real general + % + 2 4 3 + 1 1 1.00e+00 + 2 2 2.50e+00 + 2 4 6.25e+00 + + Write a complex Hermitian array to a matrix market file. Note that + only six values are actually written to the file; the other values + are implied by the symmetry. + + >>> z = np.array([[3, 1+2j, 4-3j], [1-2j, 1, -5j], [4+3j, 5j, 2.5]]) + >>> z + array([[ 3. +0.j, 1. +2.j, 4. -3.j], + [ 1. -2.j, 1. +0.j, -0. -5.j], + [ 4. +3.j, 0. +5.j, 2.5+0.j]]) + + >>> target = BytesIO() + >>> mmwrite(target, z, precision=2) + >>> print(target.getvalue().decode('latin1')) + %%MatrixMarket matrix array complex hermitian + % + 3 3 + 3.00e+00 0.00e+00 + 1.00e+00 -2.00e+00 + 4.00e+00 3.00e+00 + 1.00e+00 0.00e+00 + 0.00e+00 5.00e+00 + 2.50e+00 0.00e+00 + + """ + MMFile().write(target, a, comment, field, precision, symmetry) + + +############################################################################### +class MMFile: + __slots__ = ('_rows', + '_cols', + '_entries', + '_format', + '_field', + '_symmetry') + + @property + def rows(self): + return self._rows + + @property + def cols(self): + return self._cols + + @property + def entries(self): + return self._entries + + @property + def format(self): + return self._format + + @property + def field(self): + return self._field + + @property + def symmetry(self): + return self._symmetry + + @property + def has_symmetry(self): + return self._symmetry in (self.SYMMETRY_SYMMETRIC, + self.SYMMETRY_SKEW_SYMMETRIC, + self.SYMMETRY_HERMITIAN) + + # format values + FORMAT_COORDINATE = 'coordinate' + FORMAT_ARRAY = 'array' + FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY) + + @classmethod + def _validate_format(self, format): + if format not in self.FORMAT_VALUES: + msg = f'unknown format type {format}, must be one of {self.FORMAT_VALUES}' + raise ValueError(msg) + + # field values + FIELD_INTEGER = 'integer' + FIELD_UNSIGNED = 'unsigned-integer' + FIELD_REAL = 'real' + FIELD_COMPLEX = 'complex' + FIELD_PATTERN = 'pattern' + FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX, + FIELD_PATTERN) + + @classmethod + def _validate_field(self, field): + if field not in self.FIELD_VALUES: + msg = f'unknown field type {field}, must be one of {self.FIELD_VALUES}' + raise ValueError(msg) + + # symmetry values + SYMMETRY_GENERAL = 'general' + SYMMETRY_SYMMETRIC = 'symmetric' + SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric' + SYMMETRY_HERMITIAN = 'hermitian' + SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC, + SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN) + + @classmethod + def _validate_symmetry(self, symmetry): + if symmetry not in self.SYMMETRY_VALUES: + raise ValueError(f'unknown symmetry type {symmetry}, ' + f'must be one of {self.SYMMETRY_VALUES}') + + DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp', + FIELD_UNSIGNED: 'uint64', + FIELD_REAL: 'd', + FIELD_COMPLEX: 'D', + FIELD_PATTERN: 'd'} + + # ------------------------------------------------------------------------- + @staticmethod + def reader(): + pass + + # ------------------------------------------------------------------------- + @staticmethod + def writer(): + pass + + # ------------------------------------------------------------------------- + @classmethod + def info(self, source): + """ + Return size, storage parameters from Matrix Market file-like 'source'. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extension .mtx) or open file-like object + + Returns + ------- + rows : int + Number of matrix rows. + cols : int + Number of matrix columns. + entries : int + Number of non-zero entries of a sparse matrix + or rows*cols for a dense matrix. + format : str + Either 'coordinate' or 'array'. + field : str + Either 'real', 'complex', 'pattern', or 'integer'. + symmetry : str + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + """ + + stream, close_it = self._open(source) + + try: + + # read and validate header line + line = stream.readline() + mmid, matrix, format, field, symmetry = \ + (asstr(part.strip()) for part in line.split()) + if not mmid.startswith('%%MatrixMarket'): + raise ValueError('source is not in Matrix Market format') + if not matrix.lower() == 'matrix': + raise ValueError("Problem reading file header: " + line) + + # http://math.nist.gov/MatrixMarket/formats.html + if format.lower() == 'array': + format = self.FORMAT_ARRAY + elif format.lower() == 'coordinate': + format = self.FORMAT_COORDINATE + + # skip comments + # line.startswith('%') + while line: + if line.lstrip() and line.lstrip()[0] in ['%', 37]: + line = stream.readline() + else: + break + + # skip empty lines + while not line.strip(): + line = stream.readline() + + split_line = line.split() + if format == self.FORMAT_ARRAY: + if not len(split_line) == 2: + raise ValueError("Header line not of length 2: " + + line.decode('ascii')) + rows, cols = map(int, split_line) + entries = rows * cols + else: + if not len(split_line) == 3: + raise ValueError("Header line not of length 3: " + + line.decode('ascii')) + rows, cols, entries = map(int, split_line) + + return (rows, cols, entries, format, field.lower(), + symmetry.lower()) + + finally: + if close_it: + stream.close() + + # ------------------------------------------------------------------------- + @staticmethod + def _open(filespec, mode='rb'): + """ Return an open file stream for reading based on source. + + If source is a file name, open it (after trying to find it with mtx and + gzipped mtx extensions). Otherwise, just return source. + + Parameters + ---------- + filespec : str or file-like + String giving file name or file-like object + mode : str, optional + Mode with which to open file, if `filespec` is a file name. + + Returns + ------- + fobj : file-like + Open file-like object. + close_it : bool + True if the calling function should close this file when done, + false otherwise. + """ + # If 'filespec' is path-like (str, pathlib.Path, os.DirEntry, other class + # implementing a '__fspath__' method), try to convert it to str. If this + # fails by throwing a 'TypeError', assume it's an open file handle and + # return it as-is. + try: + filespec = os.fspath(filespec) + except TypeError: + return filespec, False + + # 'filespec' is definitely a str now + + # open for reading + if mode[0] == 'r': + + # determine filename plus extension + if not os.path.isfile(filespec): + if os.path.isfile(filespec+'.mtx'): + filespec = filespec + '.mtx' + elif os.path.isfile(filespec+'.mtx.gz'): + filespec = filespec + '.mtx.gz' + elif os.path.isfile(filespec+'.mtx.bz2'): + filespec = filespec + '.mtx.bz2' + # open filename + if filespec.endswith('.gz'): + import gzip + stream = gzip.open(filespec, mode) + elif filespec.endswith('.bz2'): + import bz2 + stream = bz2.BZ2File(filespec, 'rb') + else: + stream = open(filespec, mode) + + # open for writing + else: + if filespec[-4:] != '.mtx': + filespec = filespec + '.mtx' + stream = open(filespec, mode) + + return stream, True + + # ------------------------------------------------------------------------- + @staticmethod + def _get_symmetry(a): + m, n = a.shape + if m != n: + return MMFile.SYMMETRY_GENERAL + issymm = True + isskew = True + isherm = a.dtype.char in 'FD' + + # sparse input + if issparse(a): + # check if number of nonzero entries of lower and upper triangle + # matrix are equal + a = a.tocoo() + (row, col) = a.nonzero() + if (row < col).sum() != (row > col).sum(): + return MMFile.SYMMETRY_GENERAL + + # define iterator over symmetric pair entries + a = a.todok() + + def symm_iterator(): + for ((i, j), aij) in a.items(): + if i > j: + aji = a[j, i] + yield (aij, aji, False) + elif i == j: + yield (aij, aij, True) + + # non-sparse input + else: + # define iterator over symmetric pair entries + def symm_iterator(): + for j in range(n): + for i in range(j, n): + aij, aji = a[i][j], a[j][i] + yield (aij, aji, i == j) + + # check for symmetry + # yields aij, aji, is_diagonal + for (aij, aji, is_diagonal) in symm_iterator(): + if isskew and is_diagonal and aij != 0: + isskew = False + else: + if issymm and aij != aji: + issymm = False + with np.errstate(over="ignore"): + # This can give a warning for uint dtypes, so silence that + if isskew and aij != -aji: + isskew = False + if isherm and aij != conj(aji): + isherm = False + if not (issymm or isskew or isherm): + break + + # return symmetry value + if issymm: + return MMFile.SYMMETRY_SYMMETRIC + if isskew: + return MMFile.SYMMETRY_SKEW_SYMMETRIC + if isherm: + return MMFile.SYMMETRY_HERMITIAN + return MMFile.SYMMETRY_GENERAL + + # ------------------------------------------------------------------------- + @staticmethod + def _field_template(field, precision): + return {MMFile.FIELD_REAL: '%%.%ie\n' % precision, + MMFile.FIELD_INTEGER: '%i\n', + MMFile.FIELD_UNSIGNED: '%u\n', + MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' % + (precision, precision) + }.get(field, None) + + # ------------------------------------------------------------------------- + def __init__(self, **kwargs): + self._init_attrs(**kwargs) + + # ------------------------------------------------------------------------- + def read(self, source): + """ + Reads the contents of a Matrix Market file-like 'source' into a matrix. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extensions .mtx, .mtz.gz) + or open file object. + + Returns + ------- + a : ndarray or coo_matrix + Dense or sparse matrix depending on the matrix format in the + Matrix Market file. + """ + stream, close_it = self._open(source) + + try: + self._parse_header(stream) + return self._parse_body(stream) + + finally: + if close_it: + stream.close() + + # ------------------------------------------------------------------------- + def write(self, target, a, comment='', field=None, precision=None, + symmetry=None): + """ + Writes sparse or dense array `a` to Matrix Market file-like `target`. + + Parameters + ---------- + target : str or file-like + Matrix Market filename (extension .mtx) or open file-like object. + a : array like + Sparse or dense 2-D array. + comment : str, optional + Comments to be prepended to the Matrix Market file. + field : None or str, optional + Either 'real', 'complex', 'pattern', or 'integer'. + precision : None or int, optional + Number of digits to display for real or complex values. + symmetry : None or str, optional + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + If symmetry is None the symmetry type of 'a' is determined by its + values. + """ + + stream, close_it = self._open(target, 'wb') + + try: + self._write(stream, a, comment, field, precision, symmetry) + + finally: + if close_it: + stream.close() + else: + stream.flush() + + # ------------------------------------------------------------------------- + def _init_attrs(self, **kwargs): + """ + Initialize each attributes with the corresponding keyword arg value + or a default of None + """ + + attrs = self.__class__.__slots__ + public_attrs = [attr[1:] for attr in attrs] + invalid_keys = set(kwargs.keys()) - set(public_attrs) + + if invalid_keys: + raise ValueError('''found {} invalid keyword arguments, please only + use {}'''.format(tuple(invalid_keys), + public_attrs)) + + for attr in attrs: + setattr(self, attr, kwargs.get(attr[1:], None)) + + # ------------------------------------------------------------------------- + def _parse_header(self, stream): + rows, cols, entries, format, field, symmetry = \ + self.__class__.info(stream) + self._init_attrs(rows=rows, cols=cols, entries=entries, format=format, + field=field, symmetry=symmetry) + + # ------------------------------------------------------------------------- + def _parse_body(self, stream): + rows, cols, entries, format, field, symm = (self.rows, self.cols, + self.entries, self.format, + self.field, self.symmetry) + + dtype = self.DTYPES_BY_FIELD.get(field, None) + + has_symmetry = self.has_symmetry + is_integer = field == self.FIELD_INTEGER + is_unsigned_integer = field == self.FIELD_UNSIGNED + is_complex = field == self.FIELD_COMPLEX + is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC + is_herm = symm == self.SYMMETRY_HERMITIAN + is_pattern = field == self.FIELD_PATTERN + + if format == self.FORMAT_ARRAY: + a = zeros((rows, cols), dtype=dtype) + line = 1 + i, j = 0, 0 + if is_skew: + a[i, j] = 0 + if i < rows - 1: + i += 1 + while line: + line = stream.readline() + # line.startswith('%') + if not line or line[0] in ['%', 37] or not line.strip(): + continue + if is_integer: + aij = int(line) + elif is_unsigned_integer: + aij = int(line) + elif is_complex: + aij = complex(*map(float, line.split())) + else: + aij = float(line) + a[i, j] = aij + if has_symmetry and i != j: + if is_skew: + a[j, i] = -aij + elif is_herm: + a[j, i] = conj(aij) + else: + a[j, i] = aij + if i < rows-1: + i = i + 1 + else: + j = j + 1 + if not has_symmetry: + i = 0 + else: + i = j + if is_skew: + a[i, j] = 0 + if i < rows-1: + i += 1 + + if is_skew: + if not (i in [0, j] and j == cols - 1): + raise ValueError("Parse error, did not read all lines.") + else: + if not (i in [0, j] and j == cols): + raise ValueError("Parse error, did not read all lines.") + + elif format == self.FORMAT_COORDINATE: + # Read sparse COOrdinate format + + if entries == 0: + # empty matrix + return coo_matrix((rows, cols), dtype=dtype) + + I = zeros(entries, dtype='intc') + J = zeros(entries, dtype='intc') + if is_pattern: + V = ones(entries, dtype='int8') + elif is_integer: + V = zeros(entries, dtype='intp') + elif is_unsigned_integer: + V = zeros(entries, dtype='uint64') + elif is_complex: + V = zeros(entries, dtype='complex') + else: + V = zeros(entries, dtype='float') + + entry_number = 0 + for line in stream: + # line.startswith('%') + if not line or line[0] in ['%', 37] or not line.strip(): + continue + + if entry_number+1 > entries: + raise ValueError("'entries' in header is smaller than " + "number of entries") + l = line.split() + I[entry_number], J[entry_number] = map(int, l[:2]) + + if not is_pattern: + if is_integer: + V[entry_number] = int(l[2]) + elif is_unsigned_integer: + V[entry_number] = int(l[2]) + elif is_complex: + V[entry_number] = complex(*map(float, l[2:])) + else: + V[entry_number] = float(l[2]) + entry_number += 1 + if entry_number < entries: + raise ValueError("'entries' in header is larger than " + "number of entries") + + I -= 1 # adjust indices (base 1 -> base 0) + J -= 1 + + if has_symmetry: + mask = (I != J) # off diagonal mask + od_I = I[mask] + od_J = J[mask] + od_V = V[mask] + + I = concatenate((I, od_J)) + J = concatenate((J, od_I)) + + if is_skew: + od_V *= -1 + elif is_herm: + od_V = od_V.conjugate() + + V = concatenate((V, od_V)) + + a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype) + else: + raise NotImplementedError(format) + + return a + + # ------------------------------------------------------------------------ + def _write(self, stream, a, comment='', field=None, precision=None, + symmetry=None): + if isinstance(a, list) or isinstance(a, ndarray) or \ + isinstance(a, tuple) or hasattr(a, '__array__'): + rep = self.FORMAT_ARRAY + a = asarray(a) + if len(a.shape) != 2: + raise ValueError('Expected 2 dimensional array') + rows, cols = a.shape + + if field is not None: + + if field == self.FIELD_INTEGER: + if not can_cast(a.dtype, 'intp'): + raise OverflowError("mmwrite does not support integer " + "dtypes larger than native 'intp'.") + a = a.astype('intp') + elif field == self.FIELD_REAL: + if a.dtype.char not in 'fd': + a = a.astype('d') + elif field == self.FIELD_COMPLEX: + if a.dtype.char not in 'FD': + a = a.astype('D') + + else: + if not issparse(a): + raise ValueError('unknown matrix type: %s' % type(a)) + + rep = 'coordinate' + rows, cols = a.shape + + typecode = a.dtype.char + + if precision is None: + if typecode in 'fF': + precision = 8 + else: + precision = 16 + if field is None: + kind = a.dtype.kind + if kind == 'i': + if not can_cast(a.dtype, 'intp'): + raise OverflowError("mmwrite does not support integer " + "dtypes larger than native 'intp'.") + field = 'integer' + elif kind == 'f': + field = 'real' + elif kind == 'c': + field = 'complex' + elif kind == 'u': + field = 'unsigned-integer' + else: + raise TypeError('unexpected dtype kind ' + kind) + + if symmetry is None: + symmetry = self._get_symmetry(a) + + # validate rep, field, and symmetry + self.__class__._validate_format(rep) + self.__class__._validate_field(field) + self.__class__._validate_symmetry(symmetry) + + # write initial header line + data = f'%%MatrixMarket matrix {rep} {field} {symmetry}\n' + stream.write(data.encode('latin1')) + + # write comments + for line in comment.split('\n'): + data = '%%%s\n' % (line) + stream.write(data.encode('latin1')) + + template = self._field_template(field, precision) + # write dense format + if rep == self.FORMAT_ARRAY: + # write shape spec + data = '%i %i\n' % (rows, cols) + stream.write(data.encode('latin1')) + + if field in (self.FIELD_INTEGER, self.FIELD_REAL, + self.FIELD_UNSIGNED): + if symmetry == self.SYMMETRY_GENERAL: + for j in range(cols): + for i in range(rows): + data = template % a[i, j] + stream.write(data.encode('latin1')) + + elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC: + for j in range(cols): + for i in range(j + 1, rows): + data = template % a[i, j] + stream.write(data.encode('latin1')) + + else: + for j in range(cols): + for i in range(j, rows): + data = template % a[i, j] + stream.write(data.encode('latin1')) + + elif field == self.FIELD_COMPLEX: + + if symmetry == self.SYMMETRY_GENERAL: + for j in range(cols): + for i in range(rows): + aij = a[i, j] + data = template % (real(aij), imag(aij)) + stream.write(data.encode('latin1')) + else: + for j in range(cols): + for i in range(j, rows): + aij = a[i, j] + data = template % (real(aij), imag(aij)) + stream.write(data.encode('latin1')) + + elif field == self.FIELD_PATTERN: + raise ValueError('pattern type inconsisted with dense format') + + else: + raise TypeError('Unknown field type %s' % field) + + # write sparse format + else: + coo = a.tocoo() # convert to COOrdinate format + + # if symmetry format used, remove values above main diagonal + if symmetry != self.SYMMETRY_GENERAL: + lower_triangle_mask = coo.row >= coo.col + coo = coo_matrix((coo.data[lower_triangle_mask], + (coo.row[lower_triangle_mask], + coo.col[lower_triangle_mask])), + shape=coo.shape) + + # write shape spec + data = '%i %i %i\n' % (rows, cols, coo.nnz) + stream.write(data.encode('latin1')) + + template = self._field_template(field, precision-1) + + if field == self.FIELD_PATTERN: + for r, c in zip(coo.row+1, coo.col+1): + data = "%i %i\n" % (r, c) + stream.write(data.encode('latin1')) + elif field in (self.FIELD_INTEGER, self.FIELD_REAL, + self.FIELD_UNSIGNED): + for r, c, d in zip(coo.row+1, coo.col+1, coo.data): + data = ("%i %i " % (r, c)) + (template % d) + stream.write(data.encode('latin1')) + elif field == self.FIELD_COMPLEX: + for r, c, d in zip(coo.row+1, coo.col+1, coo.data): + data = ("%i %i " % (r, c)) + (template % (d.real, d.imag)) + stream.write(data.encode('latin1')) + else: + raise TypeError('Unknown field type %s' % field) + + +def _is_fromfile_compatible(stream): + """ + Check whether `stream` is compatible with numpy.fromfile. + + Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with + Python 3. + """ + + bad_cls = [] + try: + import gzip + bad_cls.append(gzip.GzipFile) + except ImportError: + pass + try: + import bz2 + bad_cls.append(bz2.BZ2File) + except ImportError: + pass + + bad_cls = tuple(bad_cls) + return not isinstance(stream, bad_cls) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/_netcdf.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/_netcdf.py new file mode 100644 index 0000000000000000000000000000000000000000..4b8b576b8612334169834f58ec2ae7e068f653cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/_netcdf.py @@ -0,0 +1,1095 @@ +""" +NetCDF reader/writer module. + +This module is used to read and create NetCDF files. NetCDF files are +accessed through the `netcdf_file` object. Data written to and from NetCDF +files are contained in `netcdf_variable` objects. Attributes are given +as member variables of the `netcdf_file` and `netcdf_variable` objects. + +This module implements the Scientific.IO.NetCDF API to read and create +NetCDF files. The same API is also used in the PyNIO and pynetcdf +modules, allowing these modules to be used interchangeably when working +with NetCDF files. + +Only NetCDF3 is supported here; for NetCDF4 see +`netCDF4-python `__, +which has a similar API. + +""" + +# TODO: +# * properly implement ``_FillValue``. +# * fix character variables. +# * implement PAGESIZE for Python 2.6? + +# The Scientific.IO.NetCDF API allows attributes to be added directly to +# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate +# between user-set attributes and instance attributes, user-set attributes +# are automatically stored in the ``_attributes`` attribute by overloading +#``__setattr__``. This is the reason why the code sometimes uses +#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``; +# otherwise the key would be inserted into userspace attributes. + + +__all__ = ['netcdf_file', 'netcdf_variable'] + + +import warnings +import weakref +from operator import mul +from platform import python_implementation + +import mmap as mm + +import numpy as np +from numpy import frombuffer, dtype, empty, array, asarray +from numpy import little_endian as LITTLE_ENDIAN +from functools import reduce + + +IS_PYPY = python_implementation() == 'PyPy' + +ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00' +ZERO = b'\x00\x00\x00\x00' +NC_BYTE = b'\x00\x00\x00\x01' +NC_CHAR = b'\x00\x00\x00\x02' +NC_SHORT = b'\x00\x00\x00\x03' +NC_INT = b'\x00\x00\x00\x04' +NC_FLOAT = b'\x00\x00\x00\x05' +NC_DOUBLE = b'\x00\x00\x00\x06' +NC_DIMENSION = b'\x00\x00\x00\n' +NC_VARIABLE = b'\x00\x00\x00\x0b' +NC_ATTRIBUTE = b'\x00\x00\x00\x0c' +FILL_BYTE = b'\x81' +FILL_CHAR = b'\x00' +FILL_SHORT = b'\x80\x01' +FILL_INT = b'\x80\x00\x00\x01' +FILL_FLOAT = b'\x7C\xF0\x00\x00' +FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00' + +TYPEMAP = {NC_BYTE: ('b', 1), + NC_CHAR: ('c', 1), + NC_SHORT: ('h', 2), + NC_INT: ('i', 4), + NC_FLOAT: ('f', 4), + NC_DOUBLE: ('d', 8)} + +FILLMAP = {NC_BYTE: FILL_BYTE, + NC_CHAR: FILL_CHAR, + NC_SHORT: FILL_SHORT, + NC_INT: FILL_INT, + NC_FLOAT: FILL_FLOAT, + NC_DOUBLE: FILL_DOUBLE} + +REVERSE = {('b', 1): NC_BYTE, + ('B', 1): NC_CHAR, + ('c', 1): NC_CHAR, + ('h', 2): NC_SHORT, + ('i', 4): NC_INT, + ('f', 4): NC_FLOAT, + ('d', 8): NC_DOUBLE, + + # these come from asarray(1).dtype.char and asarray('foo').dtype.char, + # used when getting the types from generic attributes. + ('l', 4): NC_INT, + ('S', 1): NC_CHAR} + + +class netcdf_file: + """ + A file object for NetCDF data. + + A `netcdf_file` object has two standard attributes: `dimensions` and + `variables`. The values of both are dictionaries, mapping dimension + names to their associated lengths and variable names to variables, + respectively. Application programs should never modify these + dictionaries. + + All other attributes correspond to global attributes defined in the + NetCDF file. Global file attributes are created by assigning to an + attribute of the `netcdf_file` object. + + Parameters + ---------- + filename : string or file-like + string -> filename + mode : {'r', 'w', 'a'}, optional + read-write-append mode, default is 'r' + mmap : None or bool, optional + Whether to mmap `filename` when reading. Default is True + when `filename` is a file name, False when `filename` is a + file-like object. Note that when mmap is in use, data arrays + returned refer directly to the mmapped data on disk, and the + file cannot be closed as long as references to it exist. + version : {1, 2}, optional + version of netcdf to read / write, where 1 means *Classic + format* and 2 means *64-bit offset format*. Default is 1. See + `here `__ + for more info. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. + + Notes + ----- + The major advantage of this module over other modules is that it doesn't + require the code to be linked to the NetCDF libraries. This module is + derived from `pupynere `_. + + NetCDF files are a self-describing binary data format. The file contains + metadata that describes the dimensions and variables in the file. More + details about NetCDF files can be found `here + `__. There + are three main sections to a NetCDF data structure: + + 1. Dimensions + 2. Variables + 3. Attributes + + The dimensions section records the name and length of each dimension used + by the variables. The variables would then indicate which dimensions it + uses and any attributes such as data units, along with containing the data + values for the variable. It is good practice to include a + variable that is the same name as a dimension to provide the values for + that axes. Lastly, the attributes section would contain additional + information such as the name of the file creator or the instrument used to + collect the data. + + When writing data to a NetCDF file, there is often the need to indicate the + 'record dimension'. A record dimension is the unbounded dimension for a + variable. For example, a temperature variable may have dimensions of + latitude, longitude and time. If one wants to add more temperature data to + the NetCDF file as time progresses, then the temperature variable should + have the time dimension flagged as the record dimension. + + In addition, the NetCDF file header contains the position of the data in + the file, so access can be done in an efficient manner without loading + unnecessary data into memory. It uses the ``mmap`` module to create + Numpy arrays mapped to the data on disk, for the same purpose. + + Note that when `netcdf_file` is used to open a file with mmap=True + (default for read-only), arrays returned by it refer to data + directly on the disk. The file should not be closed, and cannot be cleanly + closed when asked, if such arrays are alive. You may want to copy data arrays + obtained from mmapped Netcdf file if they are to be processed after the file + is closed, see the example below. + + Examples + -------- + To create a NetCDF file: + + >>> from scipy.io import netcdf_file + >>> import numpy as np + >>> f = netcdf_file('simple.nc', 'w') + >>> f.history = 'Created for a test' + >>> f.createDimension('time', 10) + >>> time = f.createVariable('time', 'i', ('time',)) + >>> time[:] = np.arange(10) + >>> time.units = 'days since 2008-01-01' + >>> f.close() + + Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice + of the time variable allows for the data to be set in the object, rather + than letting ``arange(10)`` overwrite the ``time`` variable. + + To read the NetCDF file we just created: + + >>> from scipy.io import netcdf_file + >>> f = netcdf_file('simple.nc', 'r') + >>> print(f.history) + b'Created for a test' + >>> time = f.variables['time'] + >>> print(time.units) + b'days since 2008-01-01' + >>> print(time.shape) + (10,) + >>> print(time[-1]) + 9 + + NetCDF files, when opened read-only, return arrays that refer + directly to memory-mapped data on disk: + + >>> data = time[:] + + If the data is to be processed after the file is closed, it needs + to be copied to main memory: + + >>> data = time[:].copy() + >>> del time + >>> f.close() + >>> data.mean() + 4.5 + + A NetCDF file can also be used as context manager: + + >>> from scipy.io import netcdf_file + >>> with netcdf_file('simple.nc', 'r') as f: + ... print(f.history) + b'Created for a test' + + """ + def __init__(self, filename, mode='r', mmap=None, version=1, + maskandscale=False): + """Initialize netcdf_file from fileobj (str or file-like).""" + if mode not in 'rwa': + raise ValueError("Mode must be either 'r', 'w' or 'a'.") + + if hasattr(filename, 'seek'): # file-like + self.fp = filename + self.filename = 'None' + if mmap is None: + mmap = False + elif mmap and not hasattr(filename, 'fileno'): + raise ValueError('Cannot use file object for mmap') + else: # maybe it's a string + self.filename = filename + omode = 'r+' if mode == 'a' else mode + self.fp = open(self.filename, '%sb' % omode) + if mmap is None: + # Mmapped files on PyPy cannot be usually closed + # before the GC runs, so it's better to use mmap=False + # as the default. + mmap = (not IS_PYPY) + + if mode != 'r': + # Cannot read write-only files + mmap = False + + self.use_mmap = mmap + self.mode = mode + self.version_byte = version + self.maskandscale = maskandscale + + self.dimensions = {} + self.variables = {} + + self._dims = [] + self._recs = 0 + self._recsize = 0 + + self._mm = None + self._mm_buf = None + if self.use_mmap: + self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ) + self._mm_buf = np.frombuffer(self._mm, dtype=np.int8) + + self._attributes = {} + + if mode in 'ra': + self._read() + + def __setattr__(self, attr, value): + # Store user defined attributes in a separate dict, + # so we can save them to file later. + try: + self._attributes[attr] = value + except AttributeError: + pass + self.__dict__[attr] = value + + def close(self): + """Closes the NetCDF file.""" + if hasattr(self, 'fp') and not self.fp.closed: + try: + self.flush() + finally: + self.variables = {} + if self._mm_buf is not None: + ref = weakref.ref(self._mm_buf) + self._mm_buf = None + if ref() is None: + # self._mm_buf is gc'd, and we can close the mmap + self._mm.close() + else: + # we cannot close self._mm, since self._mm_buf is + # alive and there may still be arrays referring to it + warnings.warn( + "Cannot close a netcdf_file opened with mmap=True, when " + "netcdf_variables or arrays referring to its data still " + "exist. All data arrays obtained from such files refer " + "directly to data on disk, and must be copied before the " + "file can be cleanly closed. " + "(See netcdf_file docstring for more information on mmap.)", + category=RuntimeWarning, stacklevel=2, + ) + self._mm = None + self.fp.close() + __del__ = close + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def createDimension(self, name, length): + """ + Adds a dimension to the Dimension section of the NetCDF data structure. + + Note that this function merely adds a new dimension that the variables can + reference. The values for the dimension, if desired, should be added as + a variable using `createVariable`, referring to this dimension. + + Parameters + ---------- + name : str + Name of the dimension (Eg, 'lat' or 'time'). + length : int + Length of the dimension. + + See Also + -------- + createVariable + + """ + if length is None and self._dims: + raise ValueError("Only first dimension may be unlimited!") + + self.dimensions[name] = length + self._dims.append(name) + + def createVariable(self, name, type, dimensions): + """ + Create an empty variable for the `netcdf_file` object, specifying its data + type and the dimensions it uses. + + Parameters + ---------- + name : str + Name of the new variable. + type : dtype or str + Data type of the variable. + dimensions : sequence of str + List of the dimension names used by the variable, in the desired order. + + Returns + ------- + variable : netcdf_variable + The newly created ``netcdf_variable`` object. + This object has also been added to the `netcdf_file` object as well. + + See Also + -------- + createDimension + + Notes + ----- + Any dimensions to be used by the variable should already exist in the + NetCDF data structure or should be created by `createDimension` prior to + creating the NetCDF variable. + + """ + shape = tuple([self.dimensions[dim] for dim in dimensions]) + shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for NumPy + + type = dtype(type) + typecode, size = type.char, type.itemsize + if (typecode, size) not in REVERSE: + raise ValueError("NetCDF 3 does not support type %s" % type) + + # convert to big endian always for NetCDF 3 + data = empty(shape_, dtype=type.newbyteorder("B")) + self.variables[name] = netcdf_variable( + data, typecode, size, shape, dimensions, + maskandscale=self.maskandscale) + return self.variables[name] + + def flush(self): + """ + Perform a sync-to-disk flush if the `netcdf_file` object is in write mode. + + See Also + -------- + sync : Identical function + + """ + if hasattr(self, 'mode') and self.mode in 'wa': + self._write() + sync = flush + + def _write(self): + self.fp.seek(0) + self.fp.write(b'CDF') + self.fp.write(array(self.version_byte, '>b').tobytes()) + + # Write headers and data. + self._write_numrecs() + self._write_dim_array() + self._write_gatt_array() + self._write_var_array() + + def _write_numrecs(self): + # Get highest record count from all record variables. + for var in self.variables.values(): + if var.isrec and len(var.data) > self._recs: + self.__dict__['_recs'] = len(var.data) + self._pack_int(self._recs) + + def _write_dim_array(self): + if self.dimensions: + self.fp.write(NC_DIMENSION) + self._pack_int(len(self.dimensions)) + for name in self._dims: + self._pack_string(name) + length = self.dimensions[name] + self._pack_int(length or 0) # replace None with 0 for record dimension + else: + self.fp.write(ABSENT) + + def _write_gatt_array(self): + self._write_att_array(self._attributes) + + def _write_att_array(self, attributes): + if attributes: + self.fp.write(NC_ATTRIBUTE) + self._pack_int(len(attributes)) + for name, values in attributes.items(): + self._pack_string(name) + self._write_att_values(values) + else: + self.fp.write(ABSENT) + + def _write_var_array(self): + if self.variables: + self.fp.write(NC_VARIABLE) + self._pack_int(len(self.variables)) + + # Sort variable names non-recs first, then recs. + def sortkey(n): + v = self.variables[n] + if v.isrec: + return (-1,) + return v._shape + variables = sorted(self.variables, key=sortkey, reverse=True) + + # Set the metadata for all variables. + for name in variables: + self._write_var_metadata(name) + # Now that we have the metadata, we know the vsize of + # each record variable, so we can calculate recsize. + self.__dict__['_recsize'] = sum([ + var._vsize for var in self.variables.values() + if var.isrec]) + # Set the data for all variables. + for name in variables: + self._write_var_data(name) + else: + self.fp.write(ABSENT) + + def _write_var_metadata(self, name): + var = self.variables[name] + + self._pack_string(name) + self._pack_int(len(var.dimensions)) + for dimname in var.dimensions: + dimid = self._dims.index(dimname) + self._pack_int(dimid) + + self._write_att_array(var._attributes) + + nc_type = REVERSE[var.typecode(), var.itemsize()] + self.fp.write(nc_type) + + if not var.isrec: + vsize = var.data.size * var.data.itemsize + vsize += -vsize % 4 + else: # record variable + try: + vsize = var.data[0].size * var.data.itemsize + except IndexError: + vsize = 0 + rec_vars = len([v for v in self.variables.values() + if v.isrec]) + if rec_vars > 1: + vsize += -vsize % 4 + self.variables[name].__dict__['_vsize'] = vsize + self._pack_int(vsize) + + # Pack a bogus begin, and set the real value later. + self.variables[name].__dict__['_begin'] = self.fp.tell() + self._pack_begin(0) + + def _write_var_data(self, name): + var = self.variables[name] + + # Set begin in file header. + the_beguine = self.fp.tell() + self.fp.seek(var._begin) + self._pack_begin(the_beguine) + self.fp.seek(the_beguine) + + # Write data. + if not var.isrec: + self.fp.write(var.data.tobytes()) + count = var.data.size * var.data.itemsize + self._write_var_padding(var, var._vsize - count) + else: # record variable + # Handle rec vars with shape[0] < nrecs. + if self._recs > len(var.data): + shape = (self._recs,) + var.data.shape[1:] + # Resize in-place does not always work since + # the array might not be single-segment + try: + var.data.resize(shape) + except ValueError: + dtype = var.data.dtype + var.__dict__['data'] = np.resize(var.data, shape).astype(dtype) + + pos0 = pos = self.fp.tell() + for rec in var.data: + # Apparently scalars cannot be converted to big endian. If we + # try to convert a ``=i4`` scalar to, say, '>i4' the dtype + # will remain as ``=i4``. + if not rec.shape and (rec.dtype.byteorder == '<' or + (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)): + rec = rec.byteswap() + self.fp.write(rec.tobytes()) + # Padding + count = rec.size * rec.itemsize + self._write_var_padding(var, var._vsize - count) + pos += self._recsize + self.fp.seek(pos) + self.fp.seek(pos0 + var._vsize) + + def _write_var_padding(self, var, size): + encoded_fill_value = var._get_encoded_fill_value() + num_fills = size // len(encoded_fill_value) + self.fp.write(encoded_fill_value * num_fills) + + def _write_att_values(self, values): + if hasattr(values, 'dtype'): + nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] + else: + types = [(int, NC_INT), (float, NC_FLOAT), (str, NC_CHAR)] + + # bytes index into scalars in py3k. Check for "string" types + if isinstance(values, (str, bytes)): + sample = values + else: + try: + sample = values[0] # subscriptable? + except TypeError: + sample = values # scalar + + for class_, nc_type in types: + if isinstance(sample, class_): + break + + typecode, size = TYPEMAP[nc_type] + dtype_ = '>%s' % typecode + # asarray() dies with bytes and '>c' in py3k. Change to 'S' + dtype_ = 'S' if dtype_ == '>c' else dtype_ + + values = asarray(values, dtype=dtype_) + + self.fp.write(nc_type) + + if values.dtype.char == 'S': + nelems = values.itemsize + else: + nelems = values.size + self._pack_int(nelems) + + if not values.shape and (values.dtype.byteorder == '<' or + (values.dtype.byteorder == '=' and LITTLE_ENDIAN)): + values = values.byteswap() + self.fp.write(values.tobytes()) + count = values.size * values.itemsize + self.fp.write(b'\x00' * (-count % 4)) # pad + + def _read(self): + # Check magic bytes and version + magic = self.fp.read(3) + if not magic == b'CDF': + raise TypeError("Error: %s is not a valid NetCDF 3 file" % + self.filename) + self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0] + + # Read file headers and set data. + self._read_numrecs() + self._read_dim_array() + self._read_gatt_array() + self._read_var_array() + + def _read_numrecs(self): + self.__dict__['_recs'] = self._unpack_int() + + def _read_dim_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_DIMENSION]: + raise ValueError("Unexpected header.") + count = self._unpack_int() + + for dim in range(count): + name = self._unpack_string().decode('latin1') + length = self._unpack_int() or None # None for record dimension + self.dimensions[name] = length + self._dims.append(name) # preserve order + + def _read_gatt_array(self): + for k, v in self._read_att_array().items(): + self.__setattr__(k, v) + + def _read_att_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_ATTRIBUTE]: + raise ValueError("Unexpected header.") + count = self._unpack_int() + + attributes = {} + for attr in range(count): + name = self._unpack_string().decode('latin1') + attributes[name] = self._read_att_values() + return attributes + + def _read_var_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_VARIABLE]: + raise ValueError("Unexpected header.") + + begin = 0 + dtypes = {'names': [], 'formats': []} + rec_vars = [] + count = self._unpack_int() + for var in range(count): + (name, dimensions, shape, attributes, + typecode, size, dtype_, begin_, vsize) = self._read_var() + # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html + # Note that vsize is the product of the dimension lengths + # (omitting the record dimension) and the number of bytes + # per value (determined from the type), increased to the + # next multiple of 4, for each variable. If a record + # variable, this is the amount of space per record. The + # netCDF "record size" is calculated as the sum of the + # vsize's of all the record variables. + # + # The vsize field is actually redundant, because its value + # may be computed from other information in the header. The + # 32-bit vsize field is not large enough to contain the size + # of variables that require more than 2^32 - 4 bytes, so + # 2^32 - 1 is used in the vsize field for such variables. + if shape and shape[0] is None: # record variable + rec_vars.append(name) + # The netCDF "record size" is calculated as the sum of + # the vsize's of all the record variables. + self.__dict__['_recsize'] += vsize + if begin == 0: + begin = begin_ + dtypes['names'].append(name) + dtypes['formats'].append(str(shape[1:]) + dtype_) + + # Handle padding with a virtual variable. + if typecode in 'bch': + actual_size = reduce(mul, (1,) + shape[1:]) * size + padding = -actual_size % 4 + if padding: + dtypes['names'].append('_padding_%d' % var) + dtypes['formats'].append('(%d,)>b' % padding) + + # Data will be set later. + data = None + else: # not a record variable + # Calculate size to avoid problems with vsize (above) + a_size = reduce(mul, shape, 1) * size + if self.use_mmap: + data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_) + data.shape = shape + else: + pos = self.fp.tell() + self.fp.seek(begin_) + data = frombuffer(self.fp.read(a_size), dtype=dtype_ + ).copy() + data.shape = shape + self.fp.seek(pos) + + # Add variable. + self.variables[name] = netcdf_variable( + data, typecode, size, shape, dimensions, attributes, + maskandscale=self.maskandscale) + + if rec_vars: + # Remove padding when only one record variable. + if len(rec_vars) == 1: + dtypes['names'] = dtypes['names'][:1] + dtypes['formats'] = dtypes['formats'][:1] + + # Build rec array. + if self.use_mmap: + buf = self._mm_buf[begin:begin+self._recs*self._recsize] + rec_array = buf.view(dtype=dtypes) + rec_array.shape = (self._recs,) + else: + pos = self.fp.tell() + self.fp.seek(begin) + rec_array = frombuffer(self.fp.read(self._recs*self._recsize), + dtype=dtypes).copy() + rec_array.shape = (self._recs,) + self.fp.seek(pos) + + for var in rec_vars: + self.variables[var].__dict__['data'] = rec_array[var] + + def _read_var(self): + name = self._unpack_string().decode('latin1') + dimensions = [] + shape = [] + dims = self._unpack_int() + + for i in range(dims): + dimid = self._unpack_int() + dimname = self._dims[dimid] + dimensions.append(dimname) + dim = self.dimensions[dimname] + shape.append(dim) + dimensions = tuple(dimensions) + shape = tuple(shape) + + attributes = self._read_att_array() + nc_type = self.fp.read(4) + vsize = self._unpack_int() + begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() + + typecode, size = TYPEMAP[nc_type] + dtype_ = '>%s' % typecode + + return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize + + def _read_att_values(self): + nc_type = self.fp.read(4) + n = self._unpack_int() + + typecode, size = TYPEMAP[nc_type] + + count = n*size + values = self.fp.read(int(count)) + self.fp.read(-count % 4) # read padding + + if typecode != 'c': + values = frombuffer(values, dtype='>%s' % typecode).copy() + if values.shape == (1,): + values = values[0] + else: + values = values.rstrip(b'\x00') + return values + + def _pack_begin(self, begin): + if self.version_byte == 1: + self._pack_int(begin) + elif self.version_byte == 2: + self._pack_int64(begin) + + def _pack_int(self, value): + self.fp.write(array(value, '>i').tobytes()) + _pack_int32 = _pack_int + + def _unpack_int(self): + return int(frombuffer(self.fp.read(4), '>i')[0]) + _unpack_int32 = _unpack_int + + def _pack_int64(self, value): + self.fp.write(array(value, '>q').tobytes()) + + def _unpack_int64(self): + return frombuffer(self.fp.read(8), '>q')[0] + + def _pack_string(self, s): + count = len(s) + self._pack_int(count) + self.fp.write(s.encode('latin1')) + self.fp.write(b'\x00' * (-count % 4)) # pad + + def _unpack_string(self): + count = self._unpack_int() + s = self.fp.read(count).rstrip(b'\x00') + self.fp.read(-count % 4) # read padding + return s + + +class netcdf_variable: + """ + A data object for netcdf files. + + `netcdf_variable` objects are constructed by calling the method + `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable` + objects behave much like array objects defined in numpy, except that their + data resides in a file. Data is read by indexing and written by assigning + to an indexed subset; the entire array can be accessed by the index ``[:]`` + or (for scalars) by using the methods `getValue` and `assignValue`. + `netcdf_variable` objects also have attribute `shape` with the same meaning + as for arrays, but the shape cannot be modified. There is another read-only + attribute `dimensions`, whose value is the tuple of dimension names. + + All other attributes correspond to variable attributes defined in + the NetCDF file. Variable attributes are created by assigning to an + attribute of the `netcdf_variable` object. + + Parameters + ---------- + data : array_like + The data array that holds the values for the variable. + Typically, this is initialized as empty, but with the proper shape. + typecode : dtype character code + Desired data-type for the data array. + size : int + Desired element size for the data array. + shape : sequence of ints + The shape of the array. This should match the lengths of the + variable's dimensions. + dimensions : sequence of strings + The names of the dimensions used by the variable. Must be in the + same order of the dimension lengths given by `shape`. + attributes : dict, optional + Attribute values (any type) keyed by string names. These attributes + become attributes for the netcdf_variable object. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. + + + Attributes + ---------- + dimensions : list of str + List of names of dimensions used by the variable object. + isrec, shape + Properties + + See also + -------- + isrec, shape + + """ + def __init__(self, data, typecode, size, shape, dimensions, + attributes=None, + maskandscale=False): + self.data = data + self._typecode = typecode + self._size = size + self._shape = shape + self.dimensions = dimensions + self.maskandscale = maskandscale + + self._attributes = attributes or {} + for k, v in self._attributes.items(): + self.__dict__[k] = v + + def __setattr__(self, attr, value): + # Store user defined attributes in a separate dict, + # so we can save them to file later. + try: + self._attributes[attr] = value + except AttributeError: + pass + self.__dict__[attr] = value + + def isrec(self): + """Returns whether the variable has a record dimension or not. + + A record dimension is a dimension along which additional data could be + easily appended in the netcdf data structure without much rewriting of + the data file. This attribute is a read-only property of the + `netcdf_variable`. + + """ + return bool(self.data.shape) and not self._shape[0] + isrec = property(isrec) + + def shape(self): + """Returns the shape tuple of the data variable. + + This is a read-only attribute and can not be modified in the + same manner of other numpy arrays. + """ + return self.data.shape + shape = property(shape) + + def getValue(self): + """ + Retrieve a scalar value from a `netcdf_variable` of length one. + + Raises + ------ + ValueError + If the netcdf variable is an array of length greater than one, + this exception will be raised. + + """ + return self.data.item() + + def assignValue(self, value): + """ + Assign a scalar value to a `netcdf_variable` of length one. + + Parameters + ---------- + value : scalar + Scalar value (of compatible type) to assign to a length-one netcdf + variable. This value will be written to file. + + Raises + ------ + ValueError + If the input is not a scalar, or if the destination is not a length-one + netcdf variable. + + """ + if not self.data.flags.writeable: + # Work-around for a bug in NumPy. Calling itemset() on a read-only + # memory-mapped array causes a seg. fault. + # See NumPy ticket #1622, and SciPy ticket #1202. + # This check for `writeable` can be removed when the oldest version + # of NumPy still supported by scipy contains the fix for #1622. + raise RuntimeError("variable is not writeable") + + self.data[:] = value + + def typecode(self): + """ + Return the typecode of the variable. + + Returns + ------- + typecode : char + The character typecode of the variable (e.g., 'i' for int). + + """ + return self._typecode + + def itemsize(self): + """ + Return the itemsize of the variable. + + Returns + ------- + itemsize : int + The element size of the variable (e.g., 8 for float64). + + """ + return self._size + + def __getitem__(self, index): + if not self.maskandscale: + return self.data[index] + + data = self.data[index].copy() + missing_value = self._get_missing_value() + data = self._apply_missing_value(data, missing_value) + scale_factor = self._attributes.get('scale_factor') + add_offset = self._attributes.get('add_offset') + if add_offset is not None or scale_factor is not None: + data = data.astype(np.float64) + if scale_factor is not None: + data = data * scale_factor + if add_offset is not None: + data += add_offset + + return data + + def __setitem__(self, index, data): + if self.maskandscale: + missing_value = ( + self._get_missing_value() or + getattr(data, 'fill_value', 999999)) + self._attributes.setdefault('missing_value', missing_value) + self._attributes.setdefault('_FillValue', missing_value) + data = ((data - self._attributes.get('add_offset', 0.0)) / + self._attributes.get('scale_factor', 1.0)) + data = np.ma.asarray(data).filled(missing_value) + if self._typecode not in 'fd' and data.dtype.kind == 'f': + data = np.round(data) + + # Expand data for record vars? + if self.isrec: + if isinstance(index, tuple): + rec_index = index[0] + else: + rec_index = index + if isinstance(rec_index, slice): + recs = (rec_index.start or 0) + len(data) + else: + recs = rec_index + 1 + if recs > len(self.data): + shape = (recs,) + self._shape[1:] + # Resize in-place does not always work since + # the array might not be single-segment + try: + self.data.resize(shape) + except ValueError: + dtype = self.data.dtype + self.__dict__['data'] = np.resize(self.data, shape).astype(dtype) + self.data[index] = data + + def _default_encoded_fill_value(self): + """ + The default encoded fill-value for this Variable's data type. + """ + nc_type = REVERSE[self.typecode(), self.itemsize()] + return FILLMAP[nc_type] + + def _get_encoded_fill_value(self): + """ + Returns the encoded fill value for this variable as bytes. + + This is taken from either the _FillValue attribute, or the default fill + value for this variable's data type. + """ + if '_FillValue' in self._attributes: + fill_value = np.array(self._attributes['_FillValue'], + dtype=self.data.dtype).tobytes() + if len(fill_value) == self.itemsize(): + return fill_value + else: + return self._default_encoded_fill_value() + else: + return self._default_encoded_fill_value() + + def _get_missing_value(self): + """ + Returns the value denoting "no data" for this variable. + + If this variable does not have a missing/fill value, returns None. + + If both _FillValue and missing_value are given, give precedence to + _FillValue. The netCDF standard gives special meaning to _FillValue; + missing_value is just used for compatibility with old datasets. + """ + + if '_FillValue' in self._attributes: + missing_value = self._attributes['_FillValue'] + elif 'missing_value' in self._attributes: + missing_value = self._attributes['missing_value'] + else: + missing_value = None + + return missing_value + + @staticmethod + def _apply_missing_value(data, missing_value): + """ + Applies the given missing value to the data array. + + Returns a numpy.ma array, with any value equal to missing_value masked + out (unless missing_value is None, in which case the original array is + returned). + """ + + if missing_value is None: + newdata = data + else: + try: + missing_value_isnan = np.isnan(missing_value) + except (TypeError, NotImplementedError): + # some data types (e.g., characters) cannot be tested for NaN + missing_value_isnan = False + + if missing_value_isnan: + mymask = np.isnan(data) + else: + mymask = (data == missing_value) + + newdata = np.ma.masked_where(mymask, data) + + return newdata + + +NetCDFFile = netcdf_file +NetCDFVariable = netcdf_variable diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..34fc29c089d790546d6d3932752543dcc9cb95c0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/harwell_boeing.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/harwell_boeing.py new file mode 100644 index 0000000000000000000000000000000000000000..d4d17fe7d522a4a6ad41cbfb0ef5f05a221d06da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/harwell_boeing.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'MalformedHeader', 'hb_read', 'hb_write', 'HBInfo', + 'HBFile', 'HBMatrixType', 'FortranFormatParser', 'IntFormat', + 'ExpFormat', 'BadFortranFormat', 'hb' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io", module="harwell_boeing", + private_modules=["_harwell_boeing"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/idl.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/idl.py new file mode 100644 index 0000000000000000000000000000000000000000..1168022eeaa03a21c59d13f84469b7e24b95b785 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/idl.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'readsav', 'DTYPE_DICT', 'RECTYPE_DICT', 'STRUCT_DICT', + 'Pointer', 'ObjectPointer', 'AttrDict' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io", module="idl", + private_modules=["_idl"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/byteordercodes.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/byteordercodes.py new file mode 100644 index 0000000000000000000000000000000000000000..27aaf69dbb78da4d48be411d803cd145b3bb425c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/byteordercodes.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'aliases', 'native_code', 'swapped_code', + 'sys_is_le', 'to_numpy_code' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="byteordercodes", + private_modules=["_byteordercodes"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5_params.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5_params.py new file mode 100644 index 0000000000000000000000000000000000000000..6d771a1993f0cd75ee1917fdc04ab4e486bee16f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5_params.py @@ -0,0 +1,28 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'MDTYPES', 'MatlabFunction', 'MatlabObject', 'MatlabOpaque', + 'NP_TO_MTYPES', 'NP_TO_MXTYPES', 'OPAQUE_DTYPE', 'codecs_template', + 'mat_struct', 'mclass_dtypes_template', 'mclass_info', 'mdtypes_template', + 'miCOMPRESSED', 'miDOUBLE', 'miINT16', 'miINT32', 'miINT64', 'miINT8', + 'miMATRIX', 'miSINGLE', 'miUINT16', 'miUINT32', 'miUINT64', 'miUINT8', + 'miUTF16', 'miUTF32', 'miUTF8', 'mxCELL_CLASS', 'mxCHAR_CLASS', + 'mxDOUBLE_CLASS', 'mxFUNCTION_CLASS', 'mxINT16_CLASS', 'mxINT32_CLASS', + 'mxINT64_CLASS', 'mxINT8_CLASS', 'mxOBJECT_CLASS', + 'mxOBJECT_CLASS_FROM_MATRIX_H', 'mxOPAQUE_CLASS', 'mxSINGLE_CLASS', + 'mxSPARSE_CLASS', 'mxSTRUCT_CLASS', 'mxUINT16_CLASS', 'mxUINT32_CLASS', + 'mxUINT64_CLASS', 'mxUINT8_CLASS', 'convert_dtypes' +] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="mio5_params", + private_modules=["_mio5_params"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/streams.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..9f0e8c1ecc1713750e591ba1028ea4f2d440600b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/streams.py @@ -0,0 +1,18 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io.matlab` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'BLOCK_SIZE', 'GenericStream', 'ZlibInputStream', 'make_stream' +] + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io.matlab", module="streams", + private_modules=["_streams"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/netcdf.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/netcdf.py new file mode 100644 index 0000000000000000000000000000000000000000..f469c5a3472417b0c97e848a74672e1d119d3eb4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/netcdf.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'netcdf_file', 'netcdf_variable', + 'array', 'LITTLE_ENDIAN', 'IS_PYPY', 'ABSENT', 'ZERO', + 'NC_BYTE', 'NC_CHAR', 'NC_SHORT', 'NC_INT', 'NC_FLOAT', + 'NC_DOUBLE', 'NC_DIMENSION', 'NC_VARIABLE', 'NC_ATTRIBUTE', + 'FILL_BYTE', 'FILL_CHAR', 'FILL_SHORT', 'FILL_INT', 'FILL_FLOAT', + 'FILL_DOUBLE', 'TYPEMAP', 'FILLMAP', 'REVERSE', 'NetCDFFile', + 'NetCDFVariable' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io", module="netcdf", + private_modules=["_netcdf"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e578fb0d49dadf1b3b057a57e3b5b42aef7cbc9b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_fortran.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_fortran.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40fad9f61d1c32d71cd9c5ce240bbe63316568c0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_fortran.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_idl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_idl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32d61de68ce62241ff886ff54bef55ff258344cc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_idl.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_mmio.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_mmio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16536c9b7e44a3ba934743911b02397299f4ec13 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_mmio.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_netcdf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_netcdf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94c7ad6a3369cf306adb1dc3b65c0c35bcacee29 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_netcdf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_paths.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_paths.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..243e6f05d97fbc832cfd83393a069e9414d80dc6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_paths.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_wavfile.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_wavfile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c33fc5cf13237340bcc5edfa8b456472313b890a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_wavfile.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/Transparent Busy.ani b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/Transparent Busy.ani new file mode 100644 index 0000000000000000000000000000000000000000..3be500032786398c3efdbd9f873f705b6c1636bd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/Transparent Busy.ani differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_1d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_1d.sav new file mode 100644 index 0000000000000000000000000000000000000000..619a1259670a361ac76ffa86c481a813dbaec07a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_1d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_3d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..3fa56c450eaa916d9c91b492ba17e7e843df2d53 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_3d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_5d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_5d.sav new file mode 100644 index 0000000000000000000000000000000000000000..2854dbc8b1e53f298ac3b135eac1f06e73940152 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_5d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_7d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_7d.sav new file mode 100644 index 0000000000000000000000000000000000000000..3e978fad540a8979435d4561de151573696affd8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_7d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_8d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_8d.sav new file mode 100644 index 0000000000000000000000000000000000000000..f699fe2427dfe876283de0fcade2c2325a262061 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_8d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav new file mode 100644 index 0000000000000000000000000000000000000000..8e3a402c60a515149811e2ca21628e97180c4956 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav new file mode 100644 index 0000000000000000000000000000000000000000..dd3504f0ecfaed178ace02e1a8a84650111c3936 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..285da7f78ffbbf2155fd2e4e648f19a1d3a42ac3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav new file mode 100644 index 0000000000000000000000000000000000000000..d99fa48f0a43ec06c3101560f9cade829c8b1940 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav new file mode 100644 index 0000000000000000000000000000000000000000..de5e984e49f507ae550b1ae2fd54b799e742a195 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav new file mode 100644 index 0000000000000000000000000000000000000000..bb76671a65be41fd2a426146c6c366f1e7fb07c3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav new file mode 100644 index 0000000000000000000000000000000000000000..4249ec62119e264d55a81d3faf9c87dcaed1c7c8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_2.nc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_2.nc new file mode 100644 index 0000000000000000000000000000000000000000..07db1cd986a4c3b9929c01c1f22bcc3f562b1c16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_2.nc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_3_maskedvals.nc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_3_maskedvals.nc new file mode 100644 index 0000000000000000000000000000000000000000..57f8bf9da3bca295c15508963c77a870222af0bc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_3_maskedvals.nc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat new file mode 100644 index 0000000000000000000000000000000000000000..87731eb9d4b1f2ac827a212436fe6de175431e11 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-mixed.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-mixed.dat new file mode 100644 index 0000000000000000000000000000000000000000..a165a7a30424b20af9a3a0636c5e655239ea6fa5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-mixed.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat new file mode 100644 index 0000000000000000000000000000000000000000..c3bb9dcbe50ef784ce3282b28e53f4c40beb48ce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat new file mode 100644 index 0000000000000000000000000000000000000000..351801fd47a2e3e48d9b63034fbae28f8318c9f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat new file mode 100644 index 0000000000000000000000000000000000000000..64bf92f74a457d2f4bc42798493db15cc3ab1008 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..3d3f27f88eef4e02451d18204cdcfd51f96f6d15 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..25269ff9ea4f6dd3f8a9ca0c8ad27d399e4248f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat new file mode 100644 index 0000000000000000000000000000000000000000..9850de37cf86af622b759625c15e6b1a9477ce47 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat new file mode 100644 index 0000000000000000000000000000000000000000..98c09c2dff6e1ef605e25ed1d00afe94597abddc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat new file mode 100644 index 0000000000000000000000000000000000000000..959098d2a9cdd6140758843e059d4ca529b14279 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..49c0ec1d18d9f08111fe2d2a269ed407da71b158 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat new file mode 100644 index 0000000000000000000000000000000000000000..bb936b8789920ce18281fa754a5c048b31e59ba8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x3x5.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x3x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..cb3e9e4876249f42924a43232b74f05b91123815 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x3x5.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/invalid_pointer.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/invalid_pointer.sav new file mode 100644 index 0000000000000000000000000000000000000000..d53893c6c734e6c7771e08042c16874623dc6f0e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/invalid_pointer.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/null_pointer.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/null_pointer.sav new file mode 100644 index 0000000000000000000000000000000000000000..8cee5ebecc3bef248ed37c438e0731160b31a310 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/null_pointer.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte_descr.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte_descr.sav new file mode 100644 index 0000000000000000000000000000000000000000..182e29bc57dc05154388553a71876820025bca8d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte_descr.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex32.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex32.sav new file mode 100644 index 0000000000000000000000000000000000000000..593e8c6208ab0bf3aa869de89e213b8aa9f8c071 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex32.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex64.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex64.sav new file mode 100644 index 0000000000000000000000000000000000000000..edb19d388afbaff44e5f0883978e6a74e9755613 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_complex64.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float32.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float32.sav new file mode 100644 index 0000000000000000000000000000000000000000..be9e3877ea845da76d9466c14d70c4cce882368c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float32.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float64.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float64.sav new file mode 100644 index 0000000000000000000000000000000000000000..9680b2878c6008a27c8fc9ae6966903ff936cc4a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_float64.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_heap_pointer.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_heap_pointer.sav new file mode 100644 index 0000000000000000000000000000000000000000..d02b1756ac043a4ba6119acb28ef34c40359a4dd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_heap_pointer.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int16.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int16.sav new file mode 100644 index 0000000000000000000000000000000000000000..603525694cc307d47412717c4c2f85ddc960897b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int16.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int32.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int32.sav new file mode 100644 index 0000000000000000000000000000000000000000..40210b889402c0f27562296ab39ce1a714f0d0ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int32.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int64.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int64.sav new file mode 100644 index 0000000000000000000000000000000000000000..c91cd0a561e011a2f18c86119e45392fbc0be825 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_int64.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_string.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_string.sav new file mode 100644 index 0000000000000000000000000000000000000000..ee6e69fe8461edfa580f682761118c8afe2add3a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_string.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint16.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint16.sav new file mode 100644 index 0000000000000000000000000000000000000000..759c2e64fa034c6ddbdbe6181efae1e699a0c314 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint16.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint64.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint64.sav new file mode 100644 index 0000000000000000000000000000000000000000..fc9da5796eab6ce9fb59488b836ba2f567de7b25 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint64.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_byte_idl80.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_byte_idl80.sav new file mode 100644 index 0000000000000000000000000000000000000000..f1aa416f8e661893be282a490005536953d4b7af Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_byte_idl80.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..6f01fbfd109e76c94b6e6e9bfd9eb388f39d99ee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated_3d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..bac9b207488eb9712ec27fb3567155f0dd773f34 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays_replicated_3d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_inherit.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_inherit.sav new file mode 100644 index 0000000000000000000000000000000000000000..8babd56306f09fa612f731ce593ae13c75f84f4c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_inherit.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays.sav new file mode 100644 index 0000000000000000000000000000000000000000..a3c678162911426702a9a6e932761385a01f247e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..38b812261125e6aabef8618955b234f6c7b04955 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..db1c256c85a707f0a0d78c28241b78d1eddcab1e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers.sav new file mode 100644 index 0000000000000000000000000000000000000000..acbb058a307090f6c9e2d8402c7badf6bb48144c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated_3d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..732dd2cbfa9c7fd029bb59b4cfcb630cc1077f54 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated_3d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars.sav new file mode 100644 index 0000000000000000000000000000000000000000..69d7eaf4ecf8747c21d07e14edcf65b4e394974c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..2222391ae5b93ba34c1fdb982c02eb97d9658b58 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated_3d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..a35f1acfb4cb93ecb637310bbfa7fc1a2151d483 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_scalars_replicated_3d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav new file mode 100644 index 0000000000000000000000000000000000000000..f8d332e94208f448b8377fed7a539181e13bb65c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccc9e1bd9beb3f893239629529f6c19decafd37c6fce0a976211cdc6c0310c9e +size 13 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..71934ac873c7e4278420597e763a596f4dfbbc72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84661c8714058ebb6f642068d142e2fb1759f24aea5dc29d4e5dcd4947ea7bc9 +size 90 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav new file mode 100644 index 0000000000000000000000000000000000000000..4dc4ebed4e134fccfbf88c0ab93ac33705e93cb5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87c097b16e7f4a1291d7deedfddf935139600ea399fbbc0afc47151ae711f9a6 +size 74 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..3152c566f155a76b36c33f7de56b0942c26548ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a22315b1057dfaa181cff670b1f024800f416573635db1f8cf1086bef753d116 +size 120 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..99ec1413418a387563eba670c547de03b91ec783 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1ba272a5cefcd0fdb418c81b23f86c278a164e867d2ec5f2a12ff9c434218d7 +size 150 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..d6f534ab9619a6cb3a121e36e61d5f8c00ecea29 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d45ebb87cb6bdb1cf40b92b6d53f72f60b29706034aa748ecec976be302b00cb +size 116 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/various_compressed.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/various_compressed.sav new file mode 100644 index 0000000000000000000000000000000000000000..dcdb0b0d433939d6a240c86e5060214cd8875732 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/various_compressed.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/wavfile.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/wavfile.py new file mode 100644 index 0000000000000000000000000000000000000000..ad56a811789b1a8181f792814174faf61608ff5e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/wavfile.py @@ -0,0 +1,840 @@ +""" +Module to read / write wav files using NumPy arrays + +Functions +--------- +`read`: Return the sample rate (in samples/sec) and data from a WAV file. + +`write`: Write a NumPy array as a WAV file. + +""" +import io +import sys +import numpy +import struct +import warnings +from enum import IntEnum + + +__all__ = [ + 'WavFileWarning', + 'read', + 'write' +] + + +class WavFileWarning(UserWarning): + pass + + +class WAVE_FORMAT(IntEnum): + """ + WAVE form wFormatTag IDs + + Complete list is in mmreg.h in Windows 10 SDK. ALAC and OPUS are the + newest additions, in v10.0.14393 2016-07 + """ + UNKNOWN = 0x0000 + PCM = 0x0001 + ADPCM = 0x0002 + IEEE_FLOAT = 0x0003 + VSELP = 0x0004 + IBM_CVSD = 0x0005 + ALAW = 0x0006 + MULAW = 0x0007 + DTS = 0x0008 + DRM = 0x0009 + WMAVOICE9 = 0x000A + WMAVOICE10 = 0x000B + OKI_ADPCM = 0x0010 + DVI_ADPCM = 0x0011 + IMA_ADPCM = 0x0011 # Duplicate + MEDIASPACE_ADPCM = 0x0012 + SIERRA_ADPCM = 0x0013 + G723_ADPCM = 0x0014 + DIGISTD = 0x0015 + DIGIFIX = 0x0016 + DIALOGIC_OKI_ADPCM = 0x0017 + MEDIAVISION_ADPCM = 0x0018 + CU_CODEC = 0x0019 + HP_DYN_VOICE = 0x001A + YAMAHA_ADPCM = 0x0020 + SONARC = 0x0021 + DSPGROUP_TRUESPEECH = 0x0022 + ECHOSC1 = 0x0023 + AUDIOFILE_AF36 = 0x0024 + APTX = 0x0025 + AUDIOFILE_AF10 = 0x0026 + PROSODY_1612 = 0x0027 + LRC = 0x0028 + DOLBY_AC2 = 0x0030 + GSM610 = 0x0031 + MSNAUDIO = 0x0032 + ANTEX_ADPCME = 0x0033 + CONTROL_RES_VQLPC = 0x0034 + DIGIREAL = 0x0035 + DIGIADPCM = 0x0036 + CONTROL_RES_CR10 = 0x0037 + NMS_VBXADPCM = 0x0038 + CS_IMAADPCM = 0x0039 + ECHOSC3 = 0x003A + ROCKWELL_ADPCM = 0x003B + ROCKWELL_DIGITALK = 0x003C + XEBEC = 0x003D + G721_ADPCM = 0x0040 + G728_CELP = 0x0041 + MSG723 = 0x0042 + INTEL_G723_1 = 0x0043 + INTEL_G729 = 0x0044 + SHARP_G726 = 0x0045 + MPEG = 0x0050 + RT24 = 0x0052 + PAC = 0x0053 + MPEGLAYER3 = 0x0055 + LUCENT_G723 = 0x0059 + CIRRUS = 0x0060 + ESPCM = 0x0061 + VOXWARE = 0x0062 + CANOPUS_ATRAC = 0x0063 + G726_ADPCM = 0x0064 + G722_ADPCM = 0x0065 + DSAT = 0x0066 + DSAT_DISPLAY = 0x0067 + VOXWARE_BYTE_ALIGNED = 0x0069 + VOXWARE_AC8 = 0x0070 + VOXWARE_AC10 = 0x0071 + VOXWARE_AC16 = 0x0072 + VOXWARE_AC20 = 0x0073 + VOXWARE_RT24 = 0x0074 + VOXWARE_RT29 = 0x0075 + VOXWARE_RT29HW = 0x0076 + VOXWARE_VR12 = 0x0077 + VOXWARE_VR18 = 0x0078 + VOXWARE_TQ40 = 0x0079 + VOXWARE_SC3 = 0x007A + VOXWARE_SC3_1 = 0x007B + SOFTSOUND = 0x0080 + VOXWARE_TQ60 = 0x0081 + MSRT24 = 0x0082 + G729A = 0x0083 + MVI_MVI2 = 0x0084 + DF_G726 = 0x0085 + DF_GSM610 = 0x0086 + ISIAUDIO = 0x0088 + ONLIVE = 0x0089 + MULTITUDE_FT_SX20 = 0x008A + INFOCOM_ITS_G721_ADPCM = 0x008B + CONVEDIA_G729 = 0x008C + CONGRUENCY = 0x008D + SBC24 = 0x0091 + DOLBY_AC3_SPDIF = 0x0092 + MEDIASONIC_G723 = 0x0093 + PROSODY_8KBPS = 0x0094 + ZYXEL_ADPCM = 0x0097 + PHILIPS_LPCBB = 0x0098 + PACKED = 0x0099 + MALDEN_PHONYTALK = 0x00A0 + RACAL_RECORDER_GSM = 0x00A1 + RACAL_RECORDER_G720_A = 0x00A2 + RACAL_RECORDER_G723_1 = 0x00A3 + RACAL_RECORDER_TETRA_ACELP = 0x00A4 + NEC_AAC = 0x00B0 + RAW_AAC1 = 0x00FF + RHETOREX_ADPCM = 0x0100 + IRAT = 0x0101 + VIVO_G723 = 0x0111 + VIVO_SIREN = 0x0112 + PHILIPS_CELP = 0x0120 + PHILIPS_GRUNDIG = 0x0121 + DIGITAL_G723 = 0x0123 + SANYO_LD_ADPCM = 0x0125 + SIPROLAB_ACEPLNET = 0x0130 + SIPROLAB_ACELP4800 = 0x0131 + SIPROLAB_ACELP8V3 = 0x0132 + SIPROLAB_G729 = 0x0133 + SIPROLAB_G729A = 0x0134 + SIPROLAB_KELVIN = 0x0135 + VOICEAGE_AMR = 0x0136 + G726ADPCM = 0x0140 + DICTAPHONE_CELP68 = 0x0141 + DICTAPHONE_CELP54 = 0x0142 + QUALCOMM_PUREVOICE = 0x0150 + QUALCOMM_HALFRATE = 0x0151 + TUBGSM = 0x0155 + MSAUDIO1 = 0x0160 + WMAUDIO2 = 0x0161 + WMAUDIO3 = 0x0162 + WMAUDIO_LOSSLESS = 0x0163 + WMASPDIF = 0x0164 + UNISYS_NAP_ADPCM = 0x0170 + UNISYS_NAP_ULAW = 0x0171 + UNISYS_NAP_ALAW = 0x0172 + UNISYS_NAP_16K = 0x0173 + SYCOM_ACM_SYC008 = 0x0174 + SYCOM_ACM_SYC701_G726L = 0x0175 + SYCOM_ACM_SYC701_CELP54 = 0x0176 + SYCOM_ACM_SYC701_CELP68 = 0x0177 + KNOWLEDGE_ADVENTURE_ADPCM = 0x0178 + FRAUNHOFER_IIS_MPEG2_AAC = 0x0180 + DTS_DS = 0x0190 + CREATIVE_ADPCM = 0x0200 + CREATIVE_FASTSPEECH8 = 0x0202 + CREATIVE_FASTSPEECH10 = 0x0203 + UHER_ADPCM = 0x0210 + ULEAD_DV_AUDIO = 0x0215 + ULEAD_DV_AUDIO_1 = 0x0216 + QUARTERDECK = 0x0220 + ILINK_VC = 0x0230 + RAW_SPORT = 0x0240 + ESST_AC3 = 0x0241 + GENERIC_PASSTHRU = 0x0249 + IPI_HSX = 0x0250 + IPI_RPELP = 0x0251 + CS2 = 0x0260 + SONY_SCX = 0x0270 + SONY_SCY = 0x0271 + SONY_ATRAC3 = 0x0272 + SONY_SPC = 0x0273 + TELUM_AUDIO = 0x0280 + TELUM_IA_AUDIO = 0x0281 + NORCOM_VOICE_SYSTEMS_ADPCM = 0x0285 + FM_TOWNS_SND = 0x0300 + MICRONAS = 0x0350 + MICRONAS_CELP833 = 0x0351 + BTV_DIGITAL = 0x0400 + INTEL_MUSIC_CODER = 0x0401 + INDEO_AUDIO = 0x0402 + QDESIGN_MUSIC = 0x0450 + ON2_VP7_AUDIO = 0x0500 + ON2_VP6_AUDIO = 0x0501 + VME_VMPCM = 0x0680 + TPC = 0x0681 + LIGHTWAVE_LOSSLESS = 0x08AE + OLIGSM = 0x1000 + OLIADPCM = 0x1001 + OLICELP = 0x1002 + OLISBC = 0x1003 + OLIOPR = 0x1004 + LH_CODEC = 0x1100 + LH_CODEC_CELP = 0x1101 + LH_CODEC_SBC8 = 0x1102 + LH_CODEC_SBC12 = 0x1103 + LH_CODEC_SBC16 = 0x1104 + NORRIS = 0x1400 + ISIAUDIO_2 = 0x1401 + SOUNDSPACE_MUSICOMPRESS = 0x1500 + MPEG_ADTS_AAC = 0x1600 + MPEG_RAW_AAC = 0x1601 + MPEG_LOAS = 0x1602 + NOKIA_MPEG_ADTS_AAC = 0x1608 + NOKIA_MPEG_RAW_AAC = 0x1609 + VODAFONE_MPEG_ADTS_AAC = 0x160A + VODAFONE_MPEG_RAW_AAC = 0x160B + MPEG_HEAAC = 0x1610 + VOXWARE_RT24_SPEECH = 0x181C + SONICFOUNDRY_LOSSLESS = 0x1971 + INNINGS_TELECOM_ADPCM = 0x1979 + LUCENT_SX8300P = 0x1C07 + LUCENT_SX5363S = 0x1C0C + CUSEEME = 0x1F03 + NTCSOFT_ALF2CM_ACM = 0x1FC4 + DVM = 0x2000 + DTS2 = 0x2001 + MAKEAVIS = 0x3313 + DIVIO_MPEG4_AAC = 0x4143 + NOKIA_ADAPTIVE_MULTIRATE = 0x4201 + DIVIO_G726 = 0x4243 + LEAD_SPEECH = 0x434C + LEAD_VORBIS = 0x564C + WAVPACK_AUDIO = 0x5756 + OGG_VORBIS_MODE_1 = 0x674F + OGG_VORBIS_MODE_2 = 0x6750 + OGG_VORBIS_MODE_3 = 0x6751 + OGG_VORBIS_MODE_1_PLUS = 0x676F + OGG_VORBIS_MODE_2_PLUS = 0x6770 + OGG_VORBIS_MODE_3_PLUS = 0x6771 + ALAC = 0x6C61 + _3COM_NBX = 0x7000 # Can't have leading digit + OPUS = 0x704F + FAAD_AAC = 0x706D + AMR_NB = 0x7361 + AMR_WB = 0x7362 + AMR_WP = 0x7363 + GSM_AMR_CBR = 0x7A21 + GSM_AMR_VBR_SID = 0x7A22 + COMVERSE_INFOSYS_G723_1 = 0xA100 + COMVERSE_INFOSYS_AVQSBC = 0xA101 + COMVERSE_INFOSYS_SBC = 0xA102 + SYMBOL_G729_A = 0xA103 + VOICEAGE_AMR_WB = 0xA104 + INGENIENT_G726 = 0xA105 + MPEG4_AAC = 0xA106 + ENCORE_G726 = 0xA107 + ZOLL_ASAO = 0xA108 + SPEEX_VOICE = 0xA109 + VIANIX_MASC = 0xA10A + WM9_SPECTRUM_ANALYZER = 0xA10B + WMF_SPECTRUM_ANAYZER = 0xA10C + GSM_610 = 0xA10D + GSM_620 = 0xA10E + GSM_660 = 0xA10F + GSM_690 = 0xA110 + GSM_ADAPTIVE_MULTIRATE_WB = 0xA111 + POLYCOM_G722 = 0xA112 + POLYCOM_G728 = 0xA113 + POLYCOM_G729_A = 0xA114 + POLYCOM_SIREN = 0xA115 + GLOBAL_IP_ILBC = 0xA116 + RADIOTIME_TIME_SHIFT_RADIO = 0xA117 + NICE_ACA = 0xA118 + NICE_ADPCM = 0xA119 + VOCORD_G721 = 0xA11A + VOCORD_G726 = 0xA11B + VOCORD_G722_1 = 0xA11C + VOCORD_G728 = 0xA11D + VOCORD_G729 = 0xA11E + VOCORD_G729_A = 0xA11F + VOCORD_G723_1 = 0xA120 + VOCORD_LBC = 0xA121 + NICE_G728 = 0xA122 + FRACE_TELECOM_G729 = 0xA123 + CODIAN = 0xA124 + FLAC = 0xF1AC + EXTENSIBLE = 0xFFFE + DEVELOPMENT = 0xFFFF + + +KNOWN_WAVE_FORMATS = {WAVE_FORMAT.PCM, WAVE_FORMAT.IEEE_FLOAT} + + +def _raise_bad_format(format_tag): + try: + format_name = WAVE_FORMAT(format_tag).name + except ValueError: + format_name = f'{format_tag:#06x}' + raise ValueError(f"Unknown wave file format: {format_name}. Supported " + "formats: " + + ', '.join(x.name for x in KNOWN_WAVE_FORMATS)) + + +def _read_fmt_chunk(fid, is_big_endian): + """ + Returns + ------- + size : int + size of format subchunk in bytes (minus 8 for "fmt " and itself) + format_tag : int + PCM, float, or compressed format + channels : int + number of channels + fs : int + sampling frequency in samples per second + bytes_per_second : int + overall byte rate for the file + block_align : int + bytes per sample, including all channels + bit_depth : int + bits per sample + + Notes + ----- + Assumes file pointer is immediately after the 'fmt ' id + """ + if is_big_endian: + fmt = '>' + else: + fmt = '<' + + size = struct.unpack(fmt+'I', fid.read(4))[0] + + if size < 16: + raise ValueError("Binary structure of wave file is not compliant") + + res = struct.unpack(fmt+'HHIIHH', fid.read(16)) + bytes_read = 16 + + format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res + + if format_tag == WAVE_FORMAT.EXTENSIBLE and size >= (16+2): + ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0] + bytes_read += 2 + if ext_chunk_size >= 22: + extensible_chunk_data = fid.read(22) + bytes_read += 22 + raw_guid = extensible_chunk_data[2+4:2+4+16] + # GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361) + # MS GUID byte order: first three groups are native byte order, + # rest is Big Endian + if is_big_endian: + tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71' + else: + tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71' + if raw_guid.endswith(tail): + format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0] + else: + raise ValueError("Binary structure of wave file is not compliant") + + if format_tag not in KNOWN_WAVE_FORMATS: + _raise_bad_format(format_tag) + + # move file pointer to next chunk + if size > bytes_read: + fid.read(size - bytes_read) + + # fmt should always be 16, 18 or 40, but handle it just in case + _handle_pad_byte(fid, size) + + if format_tag == WAVE_FORMAT.PCM: + if bytes_per_second != fs * block_align: + raise ValueError("WAV header is invalid: nAvgBytesPerSec must" + " equal product of nSamplesPerSec and" + " nBlockAlign, but file has nSamplesPerSec =" + f" {fs}, nBlockAlign = {block_align}, and" + f" nAvgBytesPerSec = {bytes_per_second}") + + return (size, format_tag, channels, fs, bytes_per_second, block_align, + bit_depth) + + +def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, + block_align, mmap=False): + """ + Notes + ----- + Assumes file pointer is immediately after the 'data' id + + It's possible to not use all available bits in a container, or to store + samples in a container bigger than necessary, so bytes_per_sample uses + the actual reported container size (nBlockAlign / nChannels). Real-world + examples: + + Adobe Audition's "24-bit packed int (type 1, 20-bit)" + + nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20 + + http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav + is: + + nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12 + + http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf + gives an example of: + + nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20 + """ + if is_big_endian: + fmt = '>' + else: + fmt = '<' + + # Size of the data subchunk in bytes + size = struct.unpack(fmt+'I', fid.read(4))[0] + + # Number of bytes per sample (sample container size) + bytes_per_sample = block_align // channels + n_samples = size // bytes_per_sample + + if format_tag == WAVE_FORMAT.PCM: + if 1 <= bit_depth <= 8: + dtype = 'u1' # WAV of 8-bit integer or less are unsigned + elif bytes_per_sample in {3, 5, 6, 7}: + # No compatible dtype. Load as raw bytes for reshaping later. + dtype = 'V1' + elif bit_depth <= 64: + # Remaining bit depths can map directly to signed numpy dtypes + dtype = f'{fmt}i{bytes_per_sample}' + else: + raise ValueError("Unsupported bit depth: the WAV file " + f"has {bit_depth}-bit integer data.") + elif format_tag == WAVE_FORMAT.IEEE_FLOAT: + if bit_depth in {32, 64}: + dtype = f'{fmt}f{bytes_per_sample}' + else: + raise ValueError("Unsupported bit depth: the WAV file " + f"has {bit_depth}-bit floating-point data.") + else: + _raise_bad_format(format_tag) + + start = fid.tell() + if not mmap: + try: + count = size if dtype == 'V1' else n_samples + data = numpy.fromfile(fid, dtype=dtype, count=count) + except io.UnsupportedOperation: # not a C-like file + fid.seek(start, 0) # just in case it seeked, though it shouldn't + data = numpy.frombuffer(fid.read(size), dtype=dtype) + + if dtype == 'V1': + # Rearrange raw bytes into smallest compatible numpy dtype + dt = f'{fmt}i4' if bytes_per_sample == 3 else f'{fmt}i8' + a = numpy.zeros((len(data) // bytes_per_sample, numpy.dtype(dt).itemsize), + dtype='V1') + if is_big_endian: + a[:, :bytes_per_sample] = data.reshape((-1, bytes_per_sample)) + else: + a[:, -bytes_per_sample:] = data.reshape((-1, bytes_per_sample)) + data = a.view(dt).reshape(a.shape[:-1]) + else: + if bytes_per_sample in {1, 2, 4, 8}: + start = fid.tell() + data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start, + shape=(n_samples,)) + fid.seek(start + size) + else: + raise ValueError("mmap=True not compatible with " + f"{bytes_per_sample}-byte container size.") + + _handle_pad_byte(fid, size) + + if channels > 1: + data = data.reshape(-1, channels) + return data + + +def _skip_unknown_chunk(fid, is_big_endian): + if is_big_endian: + fmt = '>I' + else: + fmt = '>> from os.path import dirname, join as pjoin + >>> from scipy.io import wavfile + >>> import scipy.io + + Get the filename for an example .wav file from the tests/data directory. + + >>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data') + >>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav') + + Load the .wav file contents. + + >>> samplerate, data = wavfile.read(wav_fname) + >>> print(f"number of channels = {data.shape[1]}") + number of channels = 2 + >>> length = data.shape[0] / samplerate + >>> print(f"length = {length}s") + length = 0.01s + + Plot the waveform. + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> time = np.linspace(0., length, data.shape[0]) + >>> plt.plot(time, data[:, 0], label="Left channel") + >>> plt.plot(time, data[:, 1], label="Right channel") + >>> plt.legend() + >>> plt.xlabel("Time [s]") + >>> plt.ylabel("Amplitude") + >>> plt.show() + + """ + if hasattr(filename, 'read'): + fid = filename + mmap = False + else: + fid = open(filename, 'rb') + + try: + file_size, is_big_endian = _read_riff_chunk(fid) + fmt_chunk_received = False + data_chunk_received = False + while fid.tell() < file_size: + # read the next chunk + chunk_id = fid.read(4) + + if not chunk_id: + if data_chunk_received: + # End of file but data successfully read + warnings.warn( + f"Reached EOF prematurely; finished at {fid.tell():d} bytes, " + f"expected {file_size:d} bytes from header.", + WavFileWarning, stacklevel=2) + break + else: + raise ValueError("Unexpected end of file.") + elif len(chunk_id) < 4: + msg = f"Incomplete chunk ID: {repr(chunk_id)}" + # If we have the data, ignore the broken chunk + if fmt_chunk_received and data_chunk_received: + warnings.warn(msg + ", ignoring it.", WavFileWarning, + stacklevel=2) + else: + raise ValueError(msg) + + if chunk_id == b'fmt ': + fmt_chunk_received = True + fmt_chunk = _read_fmt_chunk(fid, is_big_endian) + format_tag, channels, fs = fmt_chunk[1:4] + bit_depth = fmt_chunk[6] + block_align = fmt_chunk[5] + elif chunk_id == b'fact': + _skip_unknown_chunk(fid, is_big_endian) + elif chunk_id == b'data': + data_chunk_received = True + if not fmt_chunk_received: + raise ValueError("No fmt chunk before data") + data = _read_data_chunk(fid, format_tag, channels, bit_depth, + is_big_endian, block_align, mmap) + elif chunk_id == b'LIST': + # Someday this could be handled properly but for now skip it + _skip_unknown_chunk(fid, is_big_endian) + elif chunk_id in {b'JUNK', b'Fake'}: + # Skip alignment chunks without warning + _skip_unknown_chunk(fid, is_big_endian) + else: + warnings.warn("Chunk (non-data) not understood, skipping it.", + WavFileWarning, stacklevel=2) + _skip_unknown_chunk(fid, is_big_endian) + finally: + if not hasattr(filename, 'read'): + fid.close() + else: + fid.seek(0) + + return fs, data + + +def write(filename, rate, data): + """ + Write a NumPy array as a WAV file. + + Parameters + ---------- + filename : string or open file handle + Output wav file. + rate : int + The sample rate (in samples/sec). + data : ndarray + A 1-D or 2-D NumPy array of either integer or float data-type. + + Notes + ----- + * Writes a simple uncompressed WAV file. + * To write multiple-channels, use a 2-D array of shape + (Nsamples, Nchannels). + * The bits-per-sample and PCM/float will be determined by the data-type. + + Common data types: [1]_ + + ===================== =========== =========== ============= + WAV format Min Max NumPy dtype + ===================== =========== =========== ============= + 32-bit floating-point -1.0 +1.0 float32 + 32-bit PCM -2147483648 +2147483647 int32 + 16-bit PCM -32768 +32767 int16 + 8-bit PCM 0 255 uint8 + ===================== =========== =========== ============= + + Note that 8-bit PCM is unsigned. + + References + ---------- + .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming + Interface and Data Specifications 1.0", section "Data Format of the + Samples", August 1991 + http://www.tactilemedia.com/info/MCI_Control_Info.html + + Examples + -------- + Create a 100Hz sine wave, sampled at 44100Hz. + Write to 16-bit PCM, Mono. + + >>> from scipy.io.wavfile import write + >>> import numpy as np + >>> samplerate = 44100; fs = 100 + >>> t = np.linspace(0., 1., samplerate) + >>> amplitude = np.iinfo(np.int16).max + >>> data = amplitude * np.sin(2. * np.pi * fs * t) + >>> write("example.wav", samplerate, data.astype(np.int16)) + + """ + if hasattr(filename, 'write'): + fid = filename + else: + fid = open(filename, 'wb') + + fs = rate + + try: + dkind = data.dtype.kind + allowed_dtypes = ['float32', 'float64', + 'uint8', 'int16', 'int32', 'int64'] + if data.dtype.name not in allowed_dtypes: + raise ValueError("Unsupported data type '%s'" % data.dtype) + + header_data = b'' + + header_data += b'RIFF' + header_data += b'\x00\x00\x00\x00' + header_data += b'WAVE' + + # fmt chunk + header_data += b'fmt ' + if dkind == 'f': + format_tag = WAVE_FORMAT.IEEE_FLOAT + else: + format_tag = WAVE_FORMAT.PCM + if data.ndim == 1: + channels = 1 + else: + channels = data.shape[1] + bit_depth = data.dtype.itemsize * 8 + bytes_per_second = fs*(bit_depth // 8)*channels + block_align = channels * (bit_depth // 8) + + fmt_chunk_data = struct.pack(' 0xFFFFFFFF: + raise ValueError("Data exceeds wave file size limit") + + fid.write(header_data) + + # data chunk + fid.write(b'data') + fid.write(struct.pack('' or (data.dtype.byteorder == '=' and + sys.byteorder == 'big'): + data = data.byteswap() + _array_tofile(fid, data) + + # Determine file size and place it in correct + # position at start of the file. + size = fid.tell() + fid.seek(4) + fid.write(struct.pack('