applied-ai-018 commited on
Commit
21ca2e7
·
verified ·
1 Parent(s): d31891b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_double_ref.npz +3 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_longdouble_ref.npz +3 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_single_ref.npz +3 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test.npz +3 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/io/__init__.py +116 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/io/_fortran.py +354 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/io/_idl.py +918 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/io/_mmio.py +961 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/io/_netcdf.py +1095 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so +0 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/io/harwell_boeing.py +21 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/io/idl.py +20 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/byteordercodes.py +20 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5_params.py +28 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/streams.py +18 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/io/netcdf.py +25 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_fortran.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_idl.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_mmio.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_netcdf.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_paths.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_wavfile.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/Transparent Busy.ani +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_1d.sav +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_3d.sav +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_5d.sav +0 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_7d.sav +0 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_8d.sav +0 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav +0 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav +0 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav +0 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav +0 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav +0 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav +0 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_2.nc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_3_maskedvals.nc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat +0 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-mixed.dat +0 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat +0 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat +0 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat +0 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat +0 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat +0 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat +0 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat +0 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat +0 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat +0 -0
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_double_ref.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a60c649415b645223924d8342ccc5c097801c86901287a369e53fc9259f5ec4e
3
+ size 162120
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_longdouble_ref.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a406cbd4dad04d0c59dd38f54416fb49424c82229c1a074b6a44ec0cde2000e3
3
+ size 296072
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_single_ref.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:276a9141318e6fc36e4ab6ff54a61b64054ef8849b660f17359e5f541b43c526
3
+ size 95144
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36de804a22d8fdea054590ce49ddf3c859838b7d89193c56b3bcb660cbf43797
3
+ size 11968
env-llmeval/lib/python3.10/site-packages/scipy/io/__init__.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ==================================
3
+ Input and output (:mod:`scipy.io`)
4
+ ==================================
5
+
6
+ .. currentmodule:: scipy.io
7
+
8
+ SciPy has many modules, classes, and functions available to read data
9
+ from and write data to a variety of file formats.
10
+
11
+ .. seealso:: `NumPy IO routines <https://www.numpy.org/devdocs/reference/routines.io.html>`__
12
+
13
+ MATLAB® files
14
+ =============
15
+
16
+ .. autosummary::
17
+ :toctree: generated/
18
+
19
+ loadmat - Read a MATLAB style mat file (version 4 through 7.1)
20
+ savemat - Write a MATLAB style mat file (version 4 through 7.1)
21
+ whosmat - List contents of a MATLAB style mat file (version 4 through 7.1)
22
+
23
+ For low-level MATLAB reading and writing utilities, see `scipy.io.matlab`.
24
+
25
+ IDL® files
26
+ ==========
27
+
28
+ .. autosummary::
29
+ :toctree: generated/
30
+
31
+ readsav - Read an IDL 'save' file
32
+
33
+ Matrix Market files
34
+ ===================
35
+
36
+ .. autosummary::
37
+ :toctree: generated/
38
+
39
+ mminfo - Query matrix info from Matrix Market formatted file
40
+ mmread - Read matrix from Matrix Market formatted file
41
+ mmwrite - Write matrix to Matrix Market formatted file
42
+
43
+ Unformatted Fortran files
44
+ ===============================
45
+
46
+ .. autosummary::
47
+ :toctree: generated/
48
+
49
+ FortranFile - A file object for unformatted sequential Fortran files
50
+ FortranEOFError - Exception indicating the end of a well-formed file
51
+ FortranFormattingError - Exception indicating an inappropriate end
52
+
53
+ Netcdf
54
+ ======
55
+
56
+ .. autosummary::
57
+ :toctree: generated/
58
+
59
+ netcdf_file - A file object for NetCDF data
60
+ netcdf_variable - A data object for the netcdf module
61
+
62
+ Harwell-Boeing files
63
+ ====================
64
+
65
+ .. autosummary::
66
+ :toctree: generated/
67
+
68
+ hb_read -- read H-B file
69
+ hb_write -- write H-B file
70
+
71
+ Wav sound files (:mod:`scipy.io.wavfile`)
72
+ =========================================
73
+
74
+ .. module:: scipy.io.wavfile
75
+
76
+ .. autosummary::
77
+ :toctree: generated/
78
+
79
+ read
80
+ write
81
+ WavFileWarning
82
+
83
+ Arff files (:mod:`scipy.io.arff`)
84
+ =================================
85
+
86
+ .. module:: scipy.io.arff
87
+
88
+ .. autosummary::
89
+ :toctree: generated/
90
+
91
+ loadarff
92
+ MetaData
93
+ ArffError
94
+ ParseArffError
95
+ """
96
+ # matfile read and write
97
+ from .matlab import loadmat, savemat, whosmat
98
+
99
+ # netCDF file support
100
+ from ._netcdf import netcdf_file, netcdf_variable
101
+
102
+ # Fortran file support
103
+ from ._fortran import FortranFile, FortranEOFError, FortranFormattingError
104
+
105
+ from ._fast_matrix_market import mminfo, mmread, mmwrite
106
+ from ._idl import readsav
107
+ from ._harwell_boeing import hb_read, hb_write
108
+
109
+ # Deprecated namespaces, to be removed in v2.0.0
110
+ from . import arff, harwell_boeing, idl, mmio, netcdf, wavfile
111
+
112
+ __all__ = [s for s in dir() if not s.startswith('_')]
113
+
114
+ from scipy._lib._testutils import PytestTester
115
+ test = PytestTester(__name__)
116
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/io/_fortran.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module to read / write Fortran unformatted sequential files.
3
+
4
+ This is in the spirit of code written by Neil Martinsen-Burrell and Joe Zuntz.
5
+
6
+ """
7
+ import warnings
8
+ import numpy as np
9
+
10
+ __all__ = ['FortranFile', 'FortranEOFError', 'FortranFormattingError']
11
+
12
+
13
+ class FortranEOFError(TypeError, OSError):
14
+ """Indicates that the file ended properly.
15
+
16
+ This error descends from TypeError because the code used to raise
17
+ TypeError (and this was the only way to know that the file had
18
+ ended) so users might have ``except TypeError:``.
19
+
20
+ """
21
+ pass
22
+
23
+
24
+ class FortranFormattingError(TypeError, OSError):
25
+ """Indicates that the file ended mid-record.
26
+
27
+ Descends from TypeError for backward compatibility.
28
+
29
+ """
30
+ pass
31
+
32
+
33
+ class FortranFile:
34
+ """
35
+ A file object for unformatted sequential files from Fortran code.
36
+
37
+ Parameters
38
+ ----------
39
+ filename : file or str
40
+ Open file object or filename.
41
+ mode : {'r', 'w'}, optional
42
+ Read-write mode, default is 'r'.
43
+ header_dtype : dtype, optional
44
+ Data type of the header. Size and endianness must match the input/output file.
45
+
46
+ Notes
47
+ -----
48
+ These files are broken up into records of unspecified types. The size of
49
+ each record is given at the start (although the size of this header is not
50
+ standard) and the data is written onto disk without any formatting. Fortran
51
+ compilers supporting the BACKSPACE statement will write a second copy of
52
+ the size to facilitate backwards seeking.
53
+
54
+ This class only supports files written with both sizes for the record.
55
+ It also does not support the subrecords used in Intel and gfortran compilers
56
+ for records which are greater than 2GB with a 4-byte header.
57
+
58
+ An example of an unformatted sequential file in Fortran would be written as::
59
+
60
+ OPEN(1, FILE=myfilename, FORM='unformatted')
61
+
62
+ WRITE(1) myvariable
63
+
64
+ Since this is a non-standard file format, whose contents depend on the
65
+ compiler and the endianness of the machine, caution is advised. Files from
66
+ gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work.
67
+
68
+ Consider using Fortran direct-access files or files from the newer Stream
69
+ I/O, which can be easily read by `numpy.fromfile`.
70
+
71
+ Examples
72
+ --------
73
+ To create an unformatted sequential Fortran file:
74
+
75
+ >>> from scipy.io import FortranFile
76
+ >>> import numpy as np
77
+ >>> f = FortranFile('test.unf', 'w')
78
+ >>> f.write_record(np.array([1,2,3,4,5], dtype=np.int32))
79
+ >>> f.write_record(np.linspace(0,1,20).reshape((5,4)).T)
80
+ >>> f.close()
81
+
82
+ To read this file:
83
+
84
+ >>> f = FortranFile('test.unf', 'r')
85
+ >>> print(f.read_ints(np.int32))
86
+ [1 2 3 4 5]
87
+ >>> print(f.read_reals(float).reshape((5,4), order="F"))
88
+ [[0. 0.05263158 0.10526316 0.15789474]
89
+ [0.21052632 0.26315789 0.31578947 0.36842105]
90
+ [0.42105263 0.47368421 0.52631579 0.57894737]
91
+ [0.63157895 0.68421053 0.73684211 0.78947368]
92
+ [0.84210526 0.89473684 0.94736842 1. ]]
93
+ >>> f.close()
94
+
95
+ Or, in Fortran::
96
+
97
+ integer :: a(5), i
98
+ double precision :: b(5,4)
99
+ open(1, file='test.unf', form='unformatted')
100
+ read(1) a
101
+ read(1) b
102
+ close(1)
103
+ write(*,*) a
104
+ do i = 1, 5
105
+ write(*,*) b(i,:)
106
+ end do
107
+
108
+ """
109
+ def __init__(self, filename, mode='r', header_dtype=np.uint32):
110
+ if header_dtype is None:
111
+ raise ValueError('Must specify dtype')
112
+
113
+ header_dtype = np.dtype(header_dtype)
114
+ if header_dtype.kind != 'u':
115
+ warnings.warn("Given a dtype which is not unsigned.", stacklevel=2)
116
+
117
+ if mode not in 'rw' or len(mode) != 1:
118
+ raise ValueError('mode must be either r or w')
119
+
120
+ if hasattr(filename, 'seek'):
121
+ self._fp = filename
122
+ else:
123
+ self._fp = open(filename, '%sb' % mode)
124
+
125
+ self._header_dtype = header_dtype
126
+
127
+ def _read_size(self, eof_ok=False):
128
+ n = self._header_dtype.itemsize
129
+ b = self._fp.read(n)
130
+ if (not b) and eof_ok:
131
+ raise FortranEOFError("End of file occurred at end of record")
132
+ elif len(b) < n:
133
+ raise FortranFormattingError(
134
+ "End of file in the middle of the record size")
135
+ return int(np.frombuffer(b, dtype=self._header_dtype, count=1)[0])
136
+
137
+ def write_record(self, *items):
138
+ """
139
+ Write a record (including sizes) to the file.
140
+
141
+ Parameters
142
+ ----------
143
+ *items : array_like
144
+ The data arrays to write.
145
+
146
+ Notes
147
+ -----
148
+ Writes data items to a file::
149
+
150
+ write_record(a.T, b.T, c.T, ...)
151
+
152
+ write(1) a, b, c, ...
153
+
154
+ Note that data in multidimensional arrays is written in
155
+ row-major order --- to make them read correctly by Fortran
156
+ programs, you need to transpose the arrays yourself when
157
+ writing them.
158
+
159
+ """
160
+ items = tuple(np.asarray(item) for item in items)
161
+ total_size = sum(item.nbytes for item in items)
162
+
163
+ nb = np.array([total_size], dtype=self._header_dtype)
164
+
165
+ nb.tofile(self._fp)
166
+ for item in items:
167
+ item.tofile(self._fp)
168
+ nb.tofile(self._fp)
169
+
170
+ def read_record(self, *dtypes, **kwargs):
171
+ """
172
+ Reads a record of a given type from the file.
173
+
174
+ Parameters
175
+ ----------
176
+ *dtypes : dtypes, optional
177
+ Data type(s) specifying the size and endianness of the data.
178
+
179
+ Returns
180
+ -------
181
+ data : ndarray
182
+ A 1-D array object.
183
+
184
+ Raises
185
+ ------
186
+ FortranEOFError
187
+ To signal that no further records are available
188
+ FortranFormattingError
189
+ To signal that the end of the file was encountered
190
+ part-way through a record
191
+
192
+ Notes
193
+ -----
194
+ If the record contains a multidimensional array, you can specify
195
+ the size in the dtype. For example::
196
+
197
+ INTEGER var(5,4)
198
+
199
+ can be read with::
200
+
201
+ read_record('(4,5)i4').T
202
+
203
+ Note that this function does **not** assume the file data is in Fortran
204
+ column major order, so you need to (i) swap the order of dimensions
205
+ when reading and (ii) transpose the resulting array.
206
+
207
+ Alternatively, you can read the data as a 1-D array and handle the
208
+ ordering yourself. For example::
209
+
210
+ read_record('i4').reshape(5, 4, order='F')
211
+
212
+ For records that contain several variables or mixed types (as opposed
213
+ to single scalar or array types), give them as separate arguments::
214
+
215
+ double precision :: a
216
+ integer :: b
217
+ write(1) a, b
218
+
219
+ record = f.read_record('<f4', '<i4')
220
+ a = record[0] # first number
221
+ b = record[1] # second number
222
+
223
+ and if any of the variables are arrays, the shape can be specified as
224
+ the third item in the relevant dtype::
225
+
226
+ double precision :: a
227
+ integer :: b(3,4)
228
+ write(1) a, b
229
+
230
+ record = f.read_record('<f4', np.dtype(('<i4', (4, 3))))
231
+ a = record[0]
232
+ b = record[1].T
233
+
234
+ NumPy also supports a short syntax for this kind of type::
235
+
236
+ record = f.read_record('<f4', '(3,3)<i4')
237
+
238
+ See Also
239
+ --------
240
+ read_reals
241
+ read_ints
242
+
243
+ """
244
+ dtype = kwargs.pop('dtype', None)
245
+ if kwargs:
246
+ raise ValueError(f"Unknown keyword arguments {tuple(kwargs.keys())}")
247
+
248
+ if dtype is not None:
249
+ dtypes = dtypes + (dtype,)
250
+ elif not dtypes:
251
+ raise ValueError('Must specify at least one dtype')
252
+
253
+ first_size = self._read_size(eof_ok=True)
254
+
255
+ dtypes = tuple(np.dtype(dtype) for dtype in dtypes)
256
+ block_size = sum(dtype.itemsize for dtype in dtypes)
257
+
258
+ num_blocks, remainder = divmod(first_size, block_size)
259
+ if remainder != 0:
260
+ raise ValueError(f'Size obtained ({first_size}) is not a multiple of the '
261
+ f'dtypes given ({block_size}).')
262
+
263
+ if len(dtypes) != 1 and first_size != block_size:
264
+ # Fortran does not write mixed type array items in interleaved order,
265
+ # and it's not possible to guess the sizes of the arrays that were written.
266
+ # The user must specify the exact sizes of each of the arrays.
267
+ raise ValueError(f'Size obtained ({first_size}) does not match with the '
268
+ f'expected size ({block_size}) of multi-item record')
269
+
270
+ data = []
271
+ for dtype in dtypes:
272
+ r = np.fromfile(self._fp, dtype=dtype, count=num_blocks)
273
+ if len(r) != num_blocks:
274
+ raise FortranFormattingError(
275
+ "End of file in the middle of a record")
276
+ if dtype.shape != ():
277
+ # Squeeze outmost block dimension for array items
278
+ if num_blocks == 1:
279
+ assert r.shape == (1,) + dtype.shape
280
+ r = r[0]
281
+
282
+ data.append(r)
283
+
284
+ second_size = self._read_size()
285
+ if first_size != second_size:
286
+ raise ValueError('Sizes do not agree in the header and footer for '
287
+ 'this record - check header dtype')
288
+
289
+ # Unpack result
290
+ if len(dtypes) == 1:
291
+ return data[0]
292
+ else:
293
+ return tuple(data)
294
+
295
+ def read_ints(self, dtype='i4'):
296
+ """
297
+ Reads a record of a given type from the file, defaulting to an integer
298
+ type (``INTEGER*4`` in Fortran).
299
+
300
+ Parameters
301
+ ----------
302
+ dtype : dtype, optional
303
+ Data type specifying the size and endianness of the data.
304
+
305
+ Returns
306
+ -------
307
+ data : ndarray
308
+ A 1-D array object.
309
+
310
+ See Also
311
+ --------
312
+ read_reals
313
+ read_record
314
+
315
+ """
316
+ return self.read_record(dtype)
317
+
318
+ def read_reals(self, dtype='f8'):
319
+ """
320
+ Reads a record of a given type from the file, defaulting to a floating
321
+ point number (``real*8`` in Fortran).
322
+
323
+ Parameters
324
+ ----------
325
+ dtype : dtype, optional
326
+ Data type specifying the size and endianness of the data.
327
+
328
+ Returns
329
+ -------
330
+ data : ndarray
331
+ A 1-D array object.
332
+
333
+ See Also
334
+ --------
335
+ read_ints
336
+ read_record
337
+
338
+ """
339
+ return self.read_record(dtype)
340
+
341
+ def close(self):
342
+ """
343
+ Closes the file. It is unsupported to call any other methods off this
344
+ object after closing it. Note that this class supports the 'with'
345
+ statement in modern versions of Python, to call this automatically
346
+
347
+ """
348
+ self._fp.close()
349
+
350
+ def __enter__(self):
351
+ return self
352
+
353
+ def __exit__(self, type, value, tb):
354
+ self.close()
env-llmeval/lib/python3.10/site-packages/scipy/io/_idl.py ADDED
@@ -0,0 +1,918 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # IDLSave - a python module to read IDL 'save' files
2
+ # Copyright (c) 2010 Thomas P. Robitaille
3
+
4
+ # Many thanks to Craig Markwardt for publishing the Unofficial Format
5
+ # Specification for IDL .sav files, without which this Python module would not
6
+ # exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt).
7
+
8
+ # This code was developed by with permission from ITT Visual Information
9
+ # Systems. IDL(r) is a registered trademark of ITT Visual Information Systems,
10
+ # Inc. for their Interactive Data Language software.
11
+
12
+ # Permission is hereby granted, free of charge, to any person obtaining a
13
+ # copy of this software and associated documentation files (the "Software"),
14
+ # to deal in the Software without restriction, including without limitation
15
+ # the rights to use, copy, modify, merge, publish, distribute, sublicense,
16
+ # and/or sell copies of the Software, and to permit persons to whom the
17
+ # Software is furnished to do so, subject to the following conditions:
18
+
19
+ # The above copyright notice and this permission notice shall be included in
20
+ # all copies or substantial portions of the Software.
21
+
22
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27
+ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28
+ # DEALINGS IN THE SOFTWARE.
29
+
30
+ __all__ = ['readsav']
31
+
32
+ import struct
33
+ import numpy as np
34
+ import tempfile
35
+ import zlib
36
+ import warnings
37
+
38
+ # Define the different data types that can be found in an IDL save file
39
+ DTYPE_DICT = {1: '>u1',
40
+ 2: '>i2',
41
+ 3: '>i4',
42
+ 4: '>f4',
43
+ 5: '>f8',
44
+ 6: '>c8',
45
+ 7: '|O',
46
+ 8: '|O',
47
+ 9: '>c16',
48
+ 10: '|O',
49
+ 11: '|O',
50
+ 12: '>u2',
51
+ 13: '>u4',
52
+ 14: '>i8',
53
+ 15: '>u8'}
54
+
55
+ # Define the different record types that can be found in an IDL save file
56
+ RECTYPE_DICT = {0: "START_MARKER",
57
+ 1: "COMMON_VARIABLE",
58
+ 2: "VARIABLE",
59
+ 3: "SYSTEM_VARIABLE",
60
+ 6: "END_MARKER",
61
+ 10: "TIMESTAMP",
62
+ 12: "COMPILED",
63
+ 13: "IDENTIFICATION",
64
+ 14: "VERSION",
65
+ 15: "HEAP_HEADER",
66
+ 16: "HEAP_DATA",
67
+ 17: "PROMOTE64",
68
+ 19: "NOTICE",
69
+ 20: "DESCRIPTION"}
70
+
71
+ # Define a dictionary to contain structure definitions
72
+ STRUCT_DICT = {}
73
+
74
+
75
+ def _align_32(f):
76
+ '''Align to the next 32-bit position in a file'''
77
+
78
+ pos = f.tell()
79
+ if pos % 4 != 0:
80
+ f.seek(pos + 4 - pos % 4)
81
+ return
82
+
83
+
84
+ def _skip_bytes(f, n):
85
+ '''Skip `n` bytes'''
86
+ f.read(n)
87
+ return
88
+
89
+
90
+ def _read_bytes(f, n):
91
+ '''Read the next `n` bytes'''
92
+ return f.read(n)
93
+
94
+
95
+ def _read_byte(f):
96
+ '''Read a single byte'''
97
+ return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])
98
+
99
+
100
+ def _read_long(f):
101
+ '''Read a signed 32-bit integer'''
102
+ return np.int32(struct.unpack('>l', f.read(4))[0])
103
+
104
+
105
+ def _read_int16(f):
106
+ '''Read a signed 16-bit integer'''
107
+ return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])
108
+
109
+
110
+ def _read_int32(f):
111
+ '''Read a signed 32-bit integer'''
112
+ return np.int32(struct.unpack('>i', f.read(4))[0])
113
+
114
+
115
+ def _read_int64(f):
116
+ '''Read a signed 64-bit integer'''
117
+ return np.int64(struct.unpack('>q', f.read(8))[0])
118
+
119
+
120
+ def _read_uint16(f):
121
+ '''Read an unsigned 16-bit integer'''
122
+ return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])
123
+
124
+
125
+ def _read_uint32(f):
126
+ '''Read an unsigned 32-bit integer'''
127
+ return np.uint32(struct.unpack('>I', f.read(4))[0])
128
+
129
+
130
+ def _read_uint64(f):
131
+ '''Read an unsigned 64-bit integer'''
132
+ return np.uint64(struct.unpack('>Q', f.read(8))[0])
133
+
134
+
135
+ def _read_float32(f):
136
+ '''Read a 32-bit float'''
137
+ return np.float32(struct.unpack('>f', f.read(4))[0])
138
+
139
+
140
+ def _read_float64(f):
141
+ '''Read a 64-bit float'''
142
+ return np.float64(struct.unpack('>d', f.read(8))[0])
143
+
144
+
145
+ class Pointer:
146
+ '''Class used to define pointers'''
147
+
148
+ def __init__(self, index):
149
+ self.index = index
150
+ return
151
+
152
+
153
+ class ObjectPointer(Pointer):
154
+ '''Class used to define object pointers'''
155
+ pass
156
+
157
+
158
+ def _read_string(f):
159
+ '''Read a string'''
160
+ length = _read_long(f)
161
+ if length > 0:
162
+ chars = _read_bytes(f, length).decode('latin1')
163
+ _align_32(f)
164
+ else:
165
+ chars = ''
166
+ return chars
167
+
168
+
169
+ def _read_string_data(f):
170
+ '''Read a data string (length is specified twice)'''
171
+ length = _read_long(f)
172
+ if length > 0:
173
+ length = _read_long(f)
174
+ string_data = _read_bytes(f, length)
175
+ _align_32(f)
176
+ else:
177
+ string_data = ''
178
+ return string_data
179
+
180
+
181
+ def _read_data(f, dtype):
182
+ '''Read a variable with a specified data type'''
183
+ if dtype == 1:
184
+ if _read_int32(f) != 1:
185
+ raise Exception("Error occurred while reading byte variable")
186
+ return _read_byte(f)
187
+ elif dtype == 2:
188
+ return _read_int16(f)
189
+ elif dtype == 3:
190
+ return _read_int32(f)
191
+ elif dtype == 4:
192
+ return _read_float32(f)
193
+ elif dtype == 5:
194
+ return _read_float64(f)
195
+ elif dtype == 6:
196
+ real = _read_float32(f)
197
+ imag = _read_float32(f)
198
+ return np.complex64(real + imag * 1j)
199
+ elif dtype == 7:
200
+ return _read_string_data(f)
201
+ elif dtype == 8:
202
+ raise Exception("Should not be here - please report this")
203
+ elif dtype == 9:
204
+ real = _read_float64(f)
205
+ imag = _read_float64(f)
206
+ return np.complex128(real + imag * 1j)
207
+ elif dtype == 10:
208
+ return Pointer(_read_int32(f))
209
+ elif dtype == 11:
210
+ return ObjectPointer(_read_int32(f))
211
+ elif dtype == 12:
212
+ return _read_uint16(f)
213
+ elif dtype == 13:
214
+ return _read_uint32(f)
215
+ elif dtype == 14:
216
+ return _read_int64(f)
217
+ elif dtype == 15:
218
+ return _read_uint64(f)
219
+ else:
220
+ raise Exception("Unknown IDL type: %i - please report this" % dtype)
221
+
222
+
223
+ def _read_structure(f, array_desc, struct_desc):
224
+ '''
225
+ Read a structure, with the array and structure descriptors given as
226
+ `array_desc` and `structure_desc` respectively.
227
+ '''
228
+
229
+ nrows = array_desc['nelements']
230
+ columns = struct_desc['tagtable']
231
+
232
+ dtype = []
233
+ for col in columns:
234
+ if col['structure'] or col['array']:
235
+ dtype.append(((col['name'].lower(), col['name']), np.object_))
236
+ else:
237
+ if col['typecode'] in DTYPE_DICT:
238
+ dtype.append(((col['name'].lower(), col['name']),
239
+ DTYPE_DICT[col['typecode']]))
240
+ else:
241
+ raise Exception("Variable type %i not implemented" %
242
+ col['typecode'])
243
+
244
+ structure = np.rec.recarray((nrows, ), dtype=dtype)
245
+
246
+ for i in range(nrows):
247
+ for col in columns:
248
+ dtype = col['typecode']
249
+ if col['structure']:
250
+ structure[col['name']][i] = _read_structure(f,
251
+ struct_desc['arrtable'][col['name']],
252
+ struct_desc['structtable'][col['name']])
253
+ elif col['array']:
254
+ structure[col['name']][i] = _read_array(f, dtype,
255
+ struct_desc['arrtable'][col['name']])
256
+ else:
257
+ structure[col['name']][i] = _read_data(f, dtype)
258
+
259
+ # Reshape structure if needed
260
+ if array_desc['ndims'] > 1:
261
+ dims = array_desc['dims'][:int(array_desc['ndims'])]
262
+ dims.reverse()
263
+ structure = structure.reshape(dims)
264
+
265
+ return structure
266
+
267
+
268
+ def _read_array(f, typecode, array_desc):
269
+ '''
270
+ Read an array of type `typecode`, with the array descriptor given as
271
+ `array_desc`.
272
+ '''
273
+
274
+ if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:
275
+
276
+ if typecode == 1:
277
+ nbytes = _read_int32(f)
278
+ if nbytes != array_desc['nbytes']:
279
+ warnings.warn("Not able to verify number of bytes from header",
280
+ stacklevel=3)
281
+
282
+ # Read bytes as numpy array
283
+ array = np.frombuffer(f.read(array_desc['nbytes']),
284
+ dtype=DTYPE_DICT[typecode])
285
+
286
+ elif typecode in [2, 12]:
287
+
288
+ # These are 2 byte types, need to skip every two as they are not packed
289
+
290
+ array = np.frombuffer(f.read(array_desc['nbytes']*2),
291
+ dtype=DTYPE_DICT[typecode])[1::2]
292
+
293
+ else:
294
+
295
+ # Read bytes into list
296
+ array = []
297
+ for i in range(array_desc['nelements']):
298
+ dtype = typecode
299
+ data = _read_data(f, dtype)
300
+ array.append(data)
301
+
302
+ array = np.array(array, dtype=np.object_)
303
+
304
+ # Reshape array if needed
305
+ if array_desc['ndims'] > 1:
306
+ dims = array_desc['dims'][:int(array_desc['ndims'])]
307
+ dims.reverse()
308
+ array = array.reshape(dims)
309
+
310
+ # Go to next alignment position
311
+ _align_32(f)
312
+
313
+ return array
314
+
315
+
316
+ def _read_record(f):
317
+ '''Function to read in a full record'''
318
+
319
+ record = {'rectype': _read_long(f)}
320
+
321
+ nextrec = _read_uint32(f)
322
+ nextrec += _read_uint32(f).astype(np.int64) * 2**32
323
+
324
+ _skip_bytes(f, 4)
325
+
326
+ if record['rectype'] not in RECTYPE_DICT:
327
+ raise Exception("Unknown RECTYPE: %i" % record['rectype'])
328
+
329
+ record['rectype'] = RECTYPE_DICT[record['rectype']]
330
+
331
+ if record['rectype'] in ["VARIABLE", "HEAP_DATA"]:
332
+
333
+ if record['rectype'] == "VARIABLE":
334
+ record['varname'] = _read_string(f)
335
+ else:
336
+ record['heap_index'] = _read_long(f)
337
+ _skip_bytes(f, 4)
338
+
339
+ rectypedesc = _read_typedesc(f)
340
+
341
+ if rectypedesc['typecode'] == 0:
342
+
343
+ if nextrec == f.tell():
344
+ record['data'] = None # Indicates NULL value
345
+ else:
346
+ raise ValueError("Unexpected type code: 0")
347
+
348
+ else:
349
+
350
+ varstart = _read_long(f)
351
+ if varstart != 7:
352
+ raise Exception("VARSTART is not 7")
353
+
354
+ if rectypedesc['structure']:
355
+ record['data'] = _read_structure(f, rectypedesc['array_desc'],
356
+ rectypedesc['struct_desc'])
357
+ elif rectypedesc['array']:
358
+ record['data'] = _read_array(f, rectypedesc['typecode'],
359
+ rectypedesc['array_desc'])
360
+ else:
361
+ dtype = rectypedesc['typecode']
362
+ record['data'] = _read_data(f, dtype)
363
+
364
+ elif record['rectype'] == "TIMESTAMP":
365
+
366
+ _skip_bytes(f, 4*256)
367
+ record['date'] = _read_string(f)
368
+ record['user'] = _read_string(f)
369
+ record['host'] = _read_string(f)
370
+
371
+ elif record['rectype'] == "VERSION":
372
+
373
+ record['format'] = _read_long(f)
374
+ record['arch'] = _read_string(f)
375
+ record['os'] = _read_string(f)
376
+ record['release'] = _read_string(f)
377
+
378
+ elif record['rectype'] == "IDENTIFICATON":
379
+
380
+ record['author'] = _read_string(f)
381
+ record['title'] = _read_string(f)
382
+ record['idcode'] = _read_string(f)
383
+
384
+ elif record['rectype'] == "NOTICE":
385
+
386
+ record['notice'] = _read_string(f)
387
+
388
+ elif record['rectype'] == "DESCRIPTION":
389
+
390
+ record['description'] = _read_string_data(f)
391
+
392
+ elif record['rectype'] == "HEAP_HEADER":
393
+
394
+ record['nvalues'] = _read_long(f)
395
+ record['indices'] = [_read_long(f) for _ in range(record['nvalues'])]
396
+
397
+ elif record['rectype'] == "COMMONBLOCK":
398
+
399
+ record['nvars'] = _read_long(f)
400
+ record['name'] = _read_string(f)
401
+ record['varnames'] = [_read_string(f) for _ in range(record['nvars'])]
402
+
403
+ elif record['rectype'] == "END_MARKER":
404
+
405
+ record['end'] = True
406
+
407
+ elif record['rectype'] == "UNKNOWN":
408
+
409
+ warnings.warn("Skipping UNKNOWN record", stacklevel=3)
410
+
411
+ elif record['rectype'] == "SYSTEM_VARIABLE":
412
+
413
+ warnings.warn("Skipping SYSTEM_VARIABLE record", stacklevel=3)
414
+
415
+ else:
416
+
417
+ raise Exception(f"record['rectype']={record['rectype']} not implemented")
418
+
419
+ f.seek(nextrec)
420
+
421
+ return record
422
+
423
+
424
+ def _read_typedesc(f):
425
+ '''Function to read in a type descriptor'''
426
+
427
+ typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)}
428
+
429
+ if typedesc['varflags'] & 2 == 2:
430
+ raise Exception("System variables not implemented")
431
+
432
+ typedesc['array'] = typedesc['varflags'] & 4 == 4
433
+ typedesc['structure'] = typedesc['varflags'] & 32 == 32
434
+
435
+ if typedesc['structure']:
436
+ typedesc['array_desc'] = _read_arraydesc(f)
437
+ typedesc['struct_desc'] = _read_structdesc(f)
438
+ elif typedesc['array']:
439
+ typedesc['array_desc'] = _read_arraydesc(f)
440
+
441
+ return typedesc
442
+
443
+
444
+ def _read_arraydesc(f):
445
+ '''Function to read in an array descriptor'''
446
+
447
+ arraydesc = {'arrstart': _read_long(f)}
448
+
449
+ if arraydesc['arrstart'] == 8:
450
+
451
+ _skip_bytes(f, 4)
452
+
453
+ arraydesc['nbytes'] = _read_long(f)
454
+ arraydesc['nelements'] = _read_long(f)
455
+ arraydesc['ndims'] = _read_long(f)
456
+
457
+ _skip_bytes(f, 8)
458
+
459
+ arraydesc['nmax'] = _read_long(f)
460
+
461
+ arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])]
462
+
463
+ elif arraydesc['arrstart'] == 18:
464
+
465
+ warnings.warn("Using experimental 64-bit array read", stacklevel=3)
466
+
467
+ _skip_bytes(f, 8)
468
+
469
+ arraydesc['nbytes'] = _read_uint64(f)
470
+ arraydesc['nelements'] = _read_uint64(f)
471
+ arraydesc['ndims'] = _read_long(f)
472
+
473
+ _skip_bytes(f, 8)
474
+
475
+ arraydesc['nmax'] = 8
476
+
477
+ arraydesc['dims'] = []
478
+ for d in range(arraydesc['nmax']):
479
+ v = _read_long(f)
480
+ if v != 0:
481
+ raise Exception("Expected a zero in ARRAY_DESC")
482
+ arraydesc['dims'].append(_read_long(f))
483
+
484
+ else:
485
+
486
+ raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart'])
487
+
488
+ return arraydesc
489
+
490
+
491
+ def _read_structdesc(f):
492
+ '''Function to read in a structure descriptor'''
493
+
494
+ structdesc = {}
495
+
496
+ structstart = _read_long(f)
497
+ if structstart != 9:
498
+ raise Exception("STRUCTSTART should be 9")
499
+
500
+ structdesc['name'] = _read_string(f)
501
+ predef = _read_long(f)
502
+ structdesc['ntags'] = _read_long(f)
503
+ structdesc['nbytes'] = _read_long(f)
504
+
505
+ structdesc['predef'] = predef & 1
506
+ structdesc['inherits'] = predef & 2
507
+ structdesc['is_super'] = predef & 4
508
+
509
+ if not structdesc['predef']:
510
+
511
+ structdesc['tagtable'] = [_read_tagdesc(f)
512
+ for _ in range(structdesc['ntags'])]
513
+
514
+ for tag in structdesc['tagtable']:
515
+ tag['name'] = _read_string(f)
516
+
517
+ structdesc['arrtable'] = {tag['name']: _read_arraydesc(f)
518
+ for tag in structdesc['tagtable']
519
+ if tag['array']}
520
+
521
+ structdesc['structtable'] = {tag['name']: _read_structdesc(f)
522
+ for tag in structdesc['tagtable']
523
+ if tag['structure']}
524
+
525
+ if structdesc['inherits'] or structdesc['is_super']:
526
+ structdesc['classname'] = _read_string(f)
527
+ structdesc['nsupclasses'] = _read_long(f)
528
+ structdesc['supclassnames'] = [
529
+ _read_string(f) for _ in range(structdesc['nsupclasses'])]
530
+ structdesc['supclasstable'] = [
531
+ _read_structdesc(f) for _ in range(structdesc['nsupclasses'])]
532
+
533
+ STRUCT_DICT[structdesc['name']] = structdesc
534
+
535
+ else:
536
+
537
+ if structdesc['name'] not in STRUCT_DICT:
538
+ raise Exception("PREDEF=1 but can't find definition")
539
+
540
+ structdesc = STRUCT_DICT[structdesc['name']]
541
+
542
+ return structdesc
543
+
544
+
545
+ def _read_tagdesc(f):
546
+ '''Function to read in a tag descriptor'''
547
+
548
+ tagdesc = {'offset': _read_long(f)}
549
+
550
+ if tagdesc['offset'] == -1:
551
+ tagdesc['offset'] = _read_uint64(f)
552
+
553
+ tagdesc['typecode'] = _read_long(f)
554
+ tagflags = _read_long(f)
555
+
556
+ tagdesc['array'] = tagflags & 4 == 4
557
+ tagdesc['structure'] = tagflags & 32 == 32
558
+ tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT
559
+ # Assume '10'x is scalar
560
+
561
+ return tagdesc
562
+
563
+
564
+ def _replace_heap(variable, heap):
565
+
566
+ if isinstance(variable, Pointer):
567
+
568
+ while isinstance(variable, Pointer):
569
+
570
+ if variable.index == 0:
571
+ variable = None
572
+ else:
573
+ if variable.index in heap:
574
+ variable = heap[variable.index]
575
+ else:
576
+ warnings.warn("Variable referenced by pointer not found "
577
+ "in heap: variable will be set to None",
578
+ stacklevel=3)
579
+ variable = None
580
+
581
+ replace, new = _replace_heap(variable, heap)
582
+
583
+ if replace:
584
+ variable = new
585
+
586
+ return True, variable
587
+
588
+ elif isinstance(variable, np.rec.recarray):
589
+
590
+ # Loop over records
591
+ for ir, record in enumerate(variable):
592
+
593
+ replace, new = _replace_heap(record, heap)
594
+
595
+ if replace:
596
+ variable[ir] = new
597
+
598
+ return False, variable
599
+
600
+ elif isinstance(variable, np.record):
601
+
602
+ # Loop over values
603
+ for iv, value in enumerate(variable):
604
+
605
+ replace, new = _replace_heap(value, heap)
606
+
607
+ if replace:
608
+ variable[iv] = new
609
+
610
+ return False, variable
611
+
612
+ elif isinstance(variable, np.ndarray):
613
+
614
+ # Loop over values if type is np.object_
615
+ if variable.dtype.type is np.object_:
616
+
617
+ for iv in range(variable.size):
618
+
619
+ replace, new = _replace_heap(variable.item(iv), heap)
620
+
621
+ if replace:
622
+ variable.reshape(-1)[iv] = new
623
+
624
+ return False, variable
625
+
626
+ else:
627
+
628
+ return False, variable
629
+
630
+
631
+ class AttrDict(dict):
632
+ '''
633
+ A case-insensitive dictionary with access via item, attribute, and call
634
+ notations:
635
+
636
+ >>> from scipy.io._idl import AttrDict
637
+ >>> d = AttrDict()
638
+ >>> d['Variable'] = 123
639
+ >>> d['Variable']
640
+ 123
641
+ >>> d.Variable
642
+ 123
643
+ >>> d.variable
644
+ 123
645
+ >>> d('VARIABLE')
646
+ 123
647
+ >>> d['missing']
648
+ Traceback (most recent error last):
649
+ ...
650
+ KeyError: 'missing'
651
+ >>> d.missing
652
+ Traceback (most recent error last):
653
+ ...
654
+ AttributeError: 'AttrDict' object has no attribute 'missing'
655
+ '''
656
+
657
+ def __init__(self, init={}):
658
+ dict.__init__(self, init)
659
+
660
+ def __getitem__(self, name):
661
+ return super().__getitem__(name.lower())
662
+
663
+ def __setitem__(self, key, value):
664
+ return super().__setitem__(key.lower(), value)
665
+
666
+ def __getattr__(self, name):
667
+ try:
668
+ return self.__getitem__(name)
669
+ except KeyError:
670
+ raise AttributeError(
671
+ f"'{type(self)}' object has no attribute '{name}'") from None
672
+
673
+ __setattr__ = __setitem__
674
+ __call__ = __getitem__
675
+
676
+
677
+ def readsav(file_name, idict=None, python_dict=False,
678
+ uncompressed_file_name=None, verbose=False):
679
+ """
680
+ Read an IDL .sav file.
681
+
682
+ Parameters
683
+ ----------
684
+ file_name : str
685
+ Name of the IDL save file.
686
+ idict : dict, optional
687
+ Dictionary in which to insert .sav file variables.
688
+ python_dict : bool, optional
689
+ By default, the object return is not a Python dictionary, but a
690
+ case-insensitive dictionary with item, attribute, and call access
691
+ to variables. To get a standard Python dictionary, set this option
692
+ to True.
693
+ uncompressed_file_name : str, optional
694
+ This option only has an effect for .sav files written with the
695
+ /compress option. If a file name is specified, compressed .sav
696
+ files are uncompressed to this file. Otherwise, readsav will use
697
+ the `tempfile` module to determine a temporary filename
698
+ automatically, and will remove the temporary file upon successfully
699
+ reading it in.
700
+ verbose : bool, optional
701
+ Whether to print out information about the save file, including
702
+ the records read, and available variables.
703
+
704
+ Returns
705
+ -------
706
+ idl_dict : AttrDict or dict
707
+ If `python_dict` is set to False (default), this function returns a
708
+ case-insensitive dictionary with item, attribute, and call access
709
+ to variables. If `python_dict` is set to True, this function
710
+ returns a Python dictionary with all variable names in lowercase.
711
+ If `idict` was specified, then variables are written to the
712
+ dictionary specified, and the updated dictionary is returned.
713
+
714
+ Examples
715
+ --------
716
+ >>> from os.path import dirname, join as pjoin
717
+ >>> import scipy.io as sio
718
+ >>> from scipy.io import readsav
719
+
720
+ Get the filename for an example .sav file from the tests/data directory.
721
+
722
+ >>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
723
+ >>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav')
724
+
725
+ Load the .sav file contents.
726
+
727
+ >>> sav_data = readsav(sav_fname)
728
+
729
+ Get keys of the .sav file contents.
730
+
731
+ >>> print(sav_data.keys())
732
+ dict_keys(['array1d'])
733
+
734
+ Access a content with a key.
735
+
736
+ >>> print(sav_data['array1d'])
737
+ [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
738
+ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
739
+ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
740
+ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
741
+ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
742
+ 0. 0. 0.]
743
+
744
+ """
745
+
746
+ # Initialize record and variable holders
747
+ records = []
748
+ if python_dict or idict:
749
+ variables = {}
750
+ else:
751
+ variables = AttrDict()
752
+
753
+ # Open the IDL file
754
+ f = open(file_name, 'rb')
755
+
756
+ # Read the signature, which should be 'SR'
757
+ signature = _read_bytes(f, 2)
758
+ if signature != b'SR':
759
+ raise Exception("Invalid SIGNATURE: %s" % signature)
760
+
761
+ # Next, the record format, which is '\x00\x04' for normal .sav
762
+ # files, and '\x00\x06' for compressed .sav files.
763
+ recfmt = _read_bytes(f, 2)
764
+
765
+ if recfmt == b'\x00\x04':
766
+ pass
767
+
768
+ elif recfmt == b'\x00\x06':
769
+
770
+ if verbose:
771
+ print("IDL Save file is compressed")
772
+
773
+ if uncompressed_file_name:
774
+ fout = open(uncompressed_file_name, 'w+b')
775
+ else:
776
+ fout = tempfile.NamedTemporaryFile(suffix='.sav')
777
+
778
+ if verbose:
779
+ print(" -> expanding to %s" % fout.name)
780
+
781
+ # Write header
782
+ fout.write(b'SR\x00\x04')
783
+
784
+ # Cycle through records
785
+ while True:
786
+
787
+ # Read record type
788
+ rectype = _read_long(f)
789
+ fout.write(struct.pack('>l', int(rectype)))
790
+
791
+ # Read position of next record and return as int
792
+ nextrec = _read_uint32(f)
793
+ nextrec += _read_uint32(f).astype(np.int64) * 2**32
794
+
795
+ # Read the unknown 4 bytes
796
+ unknown = f.read(4)
797
+
798
+ # Check if the end of the file has been reached
799
+ if RECTYPE_DICT[rectype] == 'END_MARKER':
800
+ modval = np.int64(2**32)
801
+ fout.write(struct.pack('>I', int(nextrec) % modval))
802
+ fout.write(
803
+ struct.pack('>I', int((nextrec - (nextrec % modval)) / modval))
804
+ )
805
+ fout.write(unknown)
806
+ break
807
+
808
+ # Find current position
809
+ pos = f.tell()
810
+
811
+ # Decompress record
812
+ rec_string = zlib.decompress(f.read(nextrec-pos))
813
+
814
+ # Find new position of next record
815
+ nextrec = fout.tell() + len(rec_string) + 12
816
+
817
+ # Write out record
818
+ fout.write(struct.pack('>I', int(nextrec % 2**32)))
819
+ fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
820
+ fout.write(unknown)
821
+ fout.write(rec_string)
822
+
823
+ # Close the original compressed file
824
+ f.close()
825
+
826
+ # Set f to be the decompressed file, and skip the first four bytes
827
+ f = fout
828
+ f.seek(4)
829
+
830
+ else:
831
+ raise Exception("Invalid RECFMT: %s" % recfmt)
832
+
833
+ # Loop through records, and add them to the list
834
+ while True:
835
+ r = _read_record(f)
836
+ records.append(r)
837
+ if 'end' in r:
838
+ if r['end']:
839
+ break
840
+
841
+ # Close the file
842
+ f.close()
843
+
844
+ # Find heap data variables
845
+ heap = {}
846
+ for r in records:
847
+ if r['rectype'] == "HEAP_DATA":
848
+ heap[r['heap_index']] = r['data']
849
+
850
+ # Find all variables
851
+ for r in records:
852
+ if r['rectype'] == "VARIABLE":
853
+ replace, new = _replace_heap(r['data'], heap)
854
+ if replace:
855
+ r['data'] = new
856
+ variables[r['varname'].lower()] = r['data']
857
+
858
+ if verbose:
859
+
860
+ # Print out timestamp info about the file
861
+ for record in records:
862
+ if record['rectype'] == "TIMESTAMP":
863
+ print("-"*50)
864
+ print("Date: %s" % record['date'])
865
+ print("User: %s" % record['user'])
866
+ print("Host: %s" % record['host'])
867
+ break
868
+
869
+ # Print out version info about the file
870
+ for record in records:
871
+ if record['rectype'] == "VERSION":
872
+ print("-"*50)
873
+ print("Format: %s" % record['format'])
874
+ print("Architecture: %s" % record['arch'])
875
+ print("Operating System: %s" % record['os'])
876
+ print("IDL Version: %s" % record['release'])
877
+ break
878
+
879
+ # Print out identification info about the file
880
+ for record in records:
881
+ if record['rectype'] == "IDENTIFICATON":
882
+ print("-"*50)
883
+ print("Author: %s" % record['author'])
884
+ print("Title: %s" % record['title'])
885
+ print("ID Code: %s" % record['idcode'])
886
+ break
887
+
888
+ # Print out descriptions saved with the file
889
+ for record in records:
890
+ if record['rectype'] == "DESCRIPTION":
891
+ print("-"*50)
892
+ print("Description: %s" % record['description'])
893
+ break
894
+
895
+ print("-"*50)
896
+ print("Successfully read %i records of which:" %
897
+ (len(records)))
898
+
899
+ # Create convenience list of record types
900
+ rectypes = [r['rectype'] for r in records]
901
+
902
+ for rt in set(rectypes):
903
+ if rt != 'END_MARKER':
904
+ print(" - %i are of type %s" % (rectypes.count(rt), rt))
905
+ print("-"*50)
906
+
907
+ if 'VARIABLE' in rectypes:
908
+ print("Available variables:")
909
+ for var in variables:
910
+ print(f" - {var} [{type(variables[var])}]")
911
+ print("-"*50)
912
+
913
+ if idict:
914
+ for var in variables:
915
+ idict[var] = variables[var]
916
+ return idict
917
+ else:
918
+ return variables
env-llmeval/lib/python3.10/site-packages/scipy/io/_mmio.py ADDED
@@ -0,0 +1,961 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Matrix Market I/O in Python.
3
+ See http://math.nist.gov/MatrixMarket/formats.html
4
+ for information about the Matrix Market format.
5
+ """
6
+ #
7
+ # Author: Pearu Peterson <[email protected]>
8
+ # Created: October, 2004
9
+ #
10
+ # References:
11
+ # http://math.nist.gov/MatrixMarket/
12
+ #
13
+ import os
14
+
15
+ import numpy as np
16
+ from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate,
17
+ ones, can_cast)
18
+
19
+ from scipy.sparse import coo_matrix, issparse
20
+
21
+ __all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile']
22
+
23
+
24
+ # -----------------------------------------------------------------------------
25
+ def asstr(s):
26
+ if isinstance(s, bytes):
27
+ return s.decode('latin1')
28
+ return str(s)
29
+
30
+
31
+ def mminfo(source):
32
+ """
33
+ Return size and storage parameters from Matrix Market file-like 'source'.
34
+
35
+ Parameters
36
+ ----------
37
+ source : str or file-like
38
+ Matrix Market filename (extension .mtx) or open file-like object
39
+
40
+ Returns
41
+ -------
42
+ rows : int
43
+ Number of matrix rows.
44
+ cols : int
45
+ Number of matrix columns.
46
+ entries : int
47
+ Number of non-zero entries of a sparse matrix
48
+ or rows*cols for a dense matrix.
49
+ format : str
50
+ Either 'coordinate' or 'array'.
51
+ field : str
52
+ Either 'real', 'complex', 'pattern', or 'integer'.
53
+ symmetry : str
54
+ Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
55
+
56
+ Examples
57
+ --------
58
+ >>> from io import StringIO
59
+ >>> from scipy.io import mminfo
60
+
61
+ >>> text = '''%%MatrixMarket matrix coordinate real general
62
+ ... 5 5 7
63
+ ... 2 3 1.0
64
+ ... 3 4 2.0
65
+ ... 3 5 3.0
66
+ ... 4 1 4.0
67
+ ... 4 2 5.0
68
+ ... 4 3 6.0
69
+ ... 4 4 7.0
70
+ ... '''
71
+
72
+
73
+ ``mminfo(source)`` returns the number of rows, number of columns,
74
+ format, field type and symmetry attribute of the source file.
75
+
76
+ >>> mminfo(StringIO(text))
77
+ (5, 5, 7, 'coordinate', 'real', 'general')
78
+ """
79
+ return MMFile.info(source)
80
+
81
+ # -----------------------------------------------------------------------------
82
+
83
+
84
+ def mmread(source):
85
+ """
86
+ Reads the contents of a Matrix Market file-like 'source' into a matrix.
87
+
88
+ Parameters
89
+ ----------
90
+ source : str or file-like
91
+ Matrix Market filename (extensions .mtx, .mtz.gz)
92
+ or open file-like object.
93
+
94
+ Returns
95
+ -------
96
+ a : ndarray or coo_matrix
97
+ Dense or sparse matrix depending on the matrix format in the
98
+ Matrix Market file.
99
+
100
+ Examples
101
+ --------
102
+ >>> from io import StringIO
103
+ >>> from scipy.io import mmread
104
+
105
+ >>> text = '''%%MatrixMarket matrix coordinate real general
106
+ ... 5 5 7
107
+ ... 2 3 1.0
108
+ ... 3 4 2.0
109
+ ... 3 5 3.0
110
+ ... 4 1 4.0
111
+ ... 4 2 5.0
112
+ ... 4 3 6.0
113
+ ... 4 4 7.0
114
+ ... '''
115
+
116
+ ``mmread(source)`` returns the data as sparse matrix in COO format.
117
+
118
+ >>> m = mmread(StringIO(text))
119
+ >>> m
120
+ <5x5 sparse matrix of type '<class 'numpy.float64'>'
121
+ with 7 stored elements in COOrdinate format>
122
+ >>> m.A
123
+ array([[0., 0., 0., 0., 0.],
124
+ [0., 0., 1., 0., 0.],
125
+ [0., 0., 0., 2., 3.],
126
+ [4., 5., 6., 7., 0.],
127
+ [0., 0., 0., 0., 0.]])
128
+ """
129
+ return MMFile().read(source)
130
+
131
+ # -----------------------------------------------------------------------------
132
+
133
+
134
+ def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None):
135
+ r"""
136
+ Writes the sparse or dense array `a` to Matrix Market file-like `target`.
137
+
138
+ Parameters
139
+ ----------
140
+ target : str or file-like
141
+ Matrix Market filename (extension .mtx) or open file-like object.
142
+ a : array like
143
+ Sparse or dense 2-D array.
144
+ comment : str, optional
145
+ Comments to be prepended to the Matrix Market file.
146
+ field : None or str, optional
147
+ Either 'real', 'complex', 'pattern', or 'integer'.
148
+ precision : None or int, optional
149
+ Number of digits to display for real or complex values.
150
+ symmetry : None or str, optional
151
+ Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
152
+ If symmetry is None the symmetry type of 'a' is determined by its
153
+ values.
154
+
155
+ Returns
156
+ -------
157
+ None
158
+
159
+ Examples
160
+ --------
161
+ >>> from io import BytesIO
162
+ >>> import numpy as np
163
+ >>> from scipy.sparse import coo_matrix
164
+ >>> from scipy.io import mmwrite
165
+
166
+ Write a small NumPy array to a matrix market file. The file will be
167
+ written in the ``'array'`` format.
168
+
169
+ >>> a = np.array([[1.0, 0, 0, 0], [0, 2.5, 0, 6.25]])
170
+ >>> target = BytesIO()
171
+ >>> mmwrite(target, a)
172
+ >>> print(target.getvalue().decode('latin1'))
173
+ %%MatrixMarket matrix array real general
174
+ %
175
+ 2 4
176
+ 1.0000000000000000e+00
177
+ 0.0000000000000000e+00
178
+ 0.0000000000000000e+00
179
+ 2.5000000000000000e+00
180
+ 0.0000000000000000e+00
181
+ 0.0000000000000000e+00
182
+ 0.0000000000000000e+00
183
+ 6.2500000000000000e+00
184
+
185
+ Add a comment to the output file, and set the precision to 3.
186
+
187
+ >>> target = BytesIO()
188
+ >>> mmwrite(target, a, comment='\n Some test data.\n', precision=3)
189
+ >>> print(target.getvalue().decode('latin1'))
190
+ %%MatrixMarket matrix array real general
191
+ %
192
+ % Some test data.
193
+ %
194
+ 2 4
195
+ 1.000e+00
196
+ 0.000e+00
197
+ 0.000e+00
198
+ 2.500e+00
199
+ 0.000e+00
200
+ 0.000e+00
201
+ 0.000e+00
202
+ 6.250e+00
203
+
204
+ Convert to a sparse matrix before calling ``mmwrite``. This will
205
+ result in the output format being ``'coordinate'`` rather than
206
+ ``'array'``.
207
+
208
+ >>> target = BytesIO()
209
+ >>> mmwrite(target, coo_matrix(a), precision=3)
210
+ >>> print(target.getvalue().decode('latin1'))
211
+ %%MatrixMarket matrix coordinate real general
212
+ %
213
+ 2 4 3
214
+ 1 1 1.00e+00
215
+ 2 2 2.50e+00
216
+ 2 4 6.25e+00
217
+
218
+ Write a complex Hermitian array to a matrix market file. Note that
219
+ only six values are actually written to the file; the other values
220
+ are implied by the symmetry.
221
+
222
+ >>> z = np.array([[3, 1+2j, 4-3j], [1-2j, 1, -5j], [4+3j, 5j, 2.5]])
223
+ >>> z
224
+ array([[ 3. +0.j, 1. +2.j, 4. -3.j],
225
+ [ 1. -2.j, 1. +0.j, -0. -5.j],
226
+ [ 4. +3.j, 0. +5.j, 2.5+0.j]])
227
+
228
+ >>> target = BytesIO()
229
+ >>> mmwrite(target, z, precision=2)
230
+ >>> print(target.getvalue().decode('latin1'))
231
+ %%MatrixMarket matrix array complex hermitian
232
+ %
233
+ 3 3
234
+ 3.00e+00 0.00e+00
235
+ 1.00e+00 -2.00e+00
236
+ 4.00e+00 3.00e+00
237
+ 1.00e+00 0.00e+00
238
+ 0.00e+00 5.00e+00
239
+ 2.50e+00 0.00e+00
240
+
241
+ """
242
+ MMFile().write(target, a, comment, field, precision, symmetry)
243
+
244
+
245
+ ###############################################################################
246
+ class MMFile:
247
+ __slots__ = ('_rows',
248
+ '_cols',
249
+ '_entries',
250
+ '_format',
251
+ '_field',
252
+ '_symmetry')
253
+
254
+ @property
255
+ def rows(self):
256
+ return self._rows
257
+
258
+ @property
259
+ def cols(self):
260
+ return self._cols
261
+
262
+ @property
263
+ def entries(self):
264
+ return self._entries
265
+
266
+ @property
267
+ def format(self):
268
+ return self._format
269
+
270
+ @property
271
+ def field(self):
272
+ return self._field
273
+
274
+ @property
275
+ def symmetry(self):
276
+ return self._symmetry
277
+
278
+ @property
279
+ def has_symmetry(self):
280
+ return self._symmetry in (self.SYMMETRY_SYMMETRIC,
281
+ self.SYMMETRY_SKEW_SYMMETRIC,
282
+ self.SYMMETRY_HERMITIAN)
283
+
284
+ # format values
285
+ FORMAT_COORDINATE = 'coordinate'
286
+ FORMAT_ARRAY = 'array'
287
+ FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY)
288
+
289
+ @classmethod
290
+ def _validate_format(self, format):
291
+ if format not in self.FORMAT_VALUES:
292
+ msg = f'unknown format type {format}, must be one of {self.FORMAT_VALUES}'
293
+ raise ValueError(msg)
294
+
295
+ # field values
296
+ FIELD_INTEGER = 'integer'
297
+ FIELD_UNSIGNED = 'unsigned-integer'
298
+ FIELD_REAL = 'real'
299
+ FIELD_COMPLEX = 'complex'
300
+ FIELD_PATTERN = 'pattern'
301
+ FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX,
302
+ FIELD_PATTERN)
303
+
304
+ @classmethod
305
+ def _validate_field(self, field):
306
+ if field not in self.FIELD_VALUES:
307
+ msg = f'unknown field type {field}, must be one of {self.FIELD_VALUES}'
308
+ raise ValueError(msg)
309
+
310
+ # symmetry values
311
+ SYMMETRY_GENERAL = 'general'
312
+ SYMMETRY_SYMMETRIC = 'symmetric'
313
+ SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
314
+ SYMMETRY_HERMITIAN = 'hermitian'
315
+ SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC,
316
+ SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
317
+
318
+ @classmethod
319
+ def _validate_symmetry(self, symmetry):
320
+ if symmetry not in self.SYMMETRY_VALUES:
321
+ raise ValueError(f'unknown symmetry type {symmetry}, '
322
+ f'must be one of {self.SYMMETRY_VALUES}')
323
+
324
+ DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp',
325
+ FIELD_UNSIGNED: 'uint64',
326
+ FIELD_REAL: 'd',
327
+ FIELD_COMPLEX: 'D',
328
+ FIELD_PATTERN: 'd'}
329
+
330
+ # -------------------------------------------------------------------------
331
+ @staticmethod
332
+ def reader():
333
+ pass
334
+
335
+ # -------------------------------------------------------------------------
336
+ @staticmethod
337
+ def writer():
338
+ pass
339
+
340
+ # -------------------------------------------------------------------------
341
+ @classmethod
342
+ def info(self, source):
343
+ """
344
+ Return size, storage parameters from Matrix Market file-like 'source'.
345
+
346
+ Parameters
347
+ ----------
348
+ source : str or file-like
349
+ Matrix Market filename (extension .mtx) or open file-like object
350
+
351
+ Returns
352
+ -------
353
+ rows : int
354
+ Number of matrix rows.
355
+ cols : int
356
+ Number of matrix columns.
357
+ entries : int
358
+ Number of non-zero entries of a sparse matrix
359
+ or rows*cols for a dense matrix.
360
+ format : str
361
+ Either 'coordinate' or 'array'.
362
+ field : str
363
+ Either 'real', 'complex', 'pattern', or 'integer'.
364
+ symmetry : str
365
+ Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
366
+ """
367
+
368
+ stream, close_it = self._open(source)
369
+
370
+ try:
371
+
372
+ # read and validate header line
373
+ line = stream.readline()
374
+ mmid, matrix, format, field, symmetry = \
375
+ (asstr(part.strip()) for part in line.split())
376
+ if not mmid.startswith('%%MatrixMarket'):
377
+ raise ValueError('source is not in Matrix Market format')
378
+ if not matrix.lower() == 'matrix':
379
+ raise ValueError("Problem reading file header: " + line)
380
+
381
+ # http://math.nist.gov/MatrixMarket/formats.html
382
+ if format.lower() == 'array':
383
+ format = self.FORMAT_ARRAY
384
+ elif format.lower() == 'coordinate':
385
+ format = self.FORMAT_COORDINATE
386
+
387
+ # skip comments
388
+ # line.startswith('%')
389
+ while line:
390
+ if line.lstrip() and line.lstrip()[0] in ['%', 37]:
391
+ line = stream.readline()
392
+ else:
393
+ break
394
+
395
+ # skip empty lines
396
+ while not line.strip():
397
+ line = stream.readline()
398
+
399
+ split_line = line.split()
400
+ if format == self.FORMAT_ARRAY:
401
+ if not len(split_line) == 2:
402
+ raise ValueError("Header line not of length 2: " +
403
+ line.decode('ascii'))
404
+ rows, cols = map(int, split_line)
405
+ entries = rows * cols
406
+ else:
407
+ if not len(split_line) == 3:
408
+ raise ValueError("Header line not of length 3: " +
409
+ line.decode('ascii'))
410
+ rows, cols, entries = map(int, split_line)
411
+
412
+ return (rows, cols, entries, format, field.lower(),
413
+ symmetry.lower())
414
+
415
+ finally:
416
+ if close_it:
417
+ stream.close()
418
+
419
+ # -------------------------------------------------------------------------
420
+ @staticmethod
421
+ def _open(filespec, mode='rb'):
422
+ """ Return an open file stream for reading based on source.
423
+
424
+ If source is a file name, open it (after trying to find it with mtx and
425
+ gzipped mtx extensions). Otherwise, just return source.
426
+
427
+ Parameters
428
+ ----------
429
+ filespec : str or file-like
430
+ String giving file name or file-like object
431
+ mode : str, optional
432
+ Mode with which to open file, if `filespec` is a file name.
433
+
434
+ Returns
435
+ -------
436
+ fobj : file-like
437
+ Open file-like object.
438
+ close_it : bool
439
+ True if the calling function should close this file when done,
440
+ false otherwise.
441
+ """
442
+ # If 'filespec' is path-like (str, pathlib.Path, os.DirEntry, other class
443
+ # implementing a '__fspath__' method), try to convert it to str. If this
444
+ # fails by throwing a 'TypeError', assume it's an open file handle and
445
+ # return it as-is.
446
+ try:
447
+ filespec = os.fspath(filespec)
448
+ except TypeError:
449
+ return filespec, False
450
+
451
+ # 'filespec' is definitely a str now
452
+
453
+ # open for reading
454
+ if mode[0] == 'r':
455
+
456
+ # determine filename plus extension
457
+ if not os.path.isfile(filespec):
458
+ if os.path.isfile(filespec+'.mtx'):
459
+ filespec = filespec + '.mtx'
460
+ elif os.path.isfile(filespec+'.mtx.gz'):
461
+ filespec = filespec + '.mtx.gz'
462
+ elif os.path.isfile(filespec+'.mtx.bz2'):
463
+ filespec = filespec + '.mtx.bz2'
464
+ # open filename
465
+ if filespec.endswith('.gz'):
466
+ import gzip
467
+ stream = gzip.open(filespec, mode)
468
+ elif filespec.endswith('.bz2'):
469
+ import bz2
470
+ stream = bz2.BZ2File(filespec, 'rb')
471
+ else:
472
+ stream = open(filespec, mode)
473
+
474
+ # open for writing
475
+ else:
476
+ if filespec[-4:] != '.mtx':
477
+ filespec = filespec + '.mtx'
478
+ stream = open(filespec, mode)
479
+
480
+ return stream, True
481
+
482
+ # -------------------------------------------------------------------------
483
+ @staticmethod
484
+ def _get_symmetry(a):
485
+ m, n = a.shape
486
+ if m != n:
487
+ return MMFile.SYMMETRY_GENERAL
488
+ issymm = True
489
+ isskew = True
490
+ isherm = a.dtype.char in 'FD'
491
+
492
+ # sparse input
493
+ if issparse(a):
494
+ # check if number of nonzero entries of lower and upper triangle
495
+ # matrix are equal
496
+ a = a.tocoo()
497
+ (row, col) = a.nonzero()
498
+ if (row < col).sum() != (row > col).sum():
499
+ return MMFile.SYMMETRY_GENERAL
500
+
501
+ # define iterator over symmetric pair entries
502
+ a = a.todok()
503
+
504
+ def symm_iterator():
505
+ for ((i, j), aij) in a.items():
506
+ if i > j:
507
+ aji = a[j, i]
508
+ yield (aij, aji, False)
509
+ elif i == j:
510
+ yield (aij, aij, True)
511
+
512
+ # non-sparse input
513
+ else:
514
+ # define iterator over symmetric pair entries
515
+ def symm_iterator():
516
+ for j in range(n):
517
+ for i in range(j, n):
518
+ aij, aji = a[i][j], a[j][i]
519
+ yield (aij, aji, i == j)
520
+
521
+ # check for symmetry
522
+ # yields aij, aji, is_diagonal
523
+ for (aij, aji, is_diagonal) in symm_iterator():
524
+ if isskew and is_diagonal and aij != 0:
525
+ isskew = False
526
+ else:
527
+ if issymm and aij != aji:
528
+ issymm = False
529
+ with np.errstate(over="ignore"):
530
+ # This can give a warning for uint dtypes, so silence that
531
+ if isskew and aij != -aji:
532
+ isskew = False
533
+ if isherm and aij != conj(aji):
534
+ isherm = False
535
+ if not (issymm or isskew or isherm):
536
+ break
537
+
538
+ # return symmetry value
539
+ if issymm:
540
+ return MMFile.SYMMETRY_SYMMETRIC
541
+ if isskew:
542
+ return MMFile.SYMMETRY_SKEW_SYMMETRIC
543
+ if isherm:
544
+ return MMFile.SYMMETRY_HERMITIAN
545
+ return MMFile.SYMMETRY_GENERAL
546
+
547
+ # -------------------------------------------------------------------------
548
+ @staticmethod
549
+ def _field_template(field, precision):
550
+ return {MMFile.FIELD_REAL: '%%.%ie\n' % precision,
551
+ MMFile.FIELD_INTEGER: '%i\n',
552
+ MMFile.FIELD_UNSIGNED: '%u\n',
553
+ MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' %
554
+ (precision, precision)
555
+ }.get(field, None)
556
+
557
+ # -------------------------------------------------------------------------
558
+ def __init__(self, **kwargs):
559
+ self._init_attrs(**kwargs)
560
+
561
+ # -------------------------------------------------------------------------
562
+ def read(self, source):
563
+ """
564
+ Reads the contents of a Matrix Market file-like 'source' into a matrix.
565
+
566
+ Parameters
567
+ ----------
568
+ source : str or file-like
569
+ Matrix Market filename (extensions .mtx, .mtz.gz)
570
+ or open file object.
571
+
572
+ Returns
573
+ -------
574
+ a : ndarray or coo_matrix
575
+ Dense or sparse matrix depending on the matrix format in the
576
+ Matrix Market file.
577
+ """
578
+ stream, close_it = self._open(source)
579
+
580
+ try:
581
+ self._parse_header(stream)
582
+ return self._parse_body(stream)
583
+
584
+ finally:
585
+ if close_it:
586
+ stream.close()
587
+
588
+ # -------------------------------------------------------------------------
589
+ def write(self, target, a, comment='', field=None, precision=None,
590
+ symmetry=None):
591
+ """
592
+ Writes sparse or dense array `a` to Matrix Market file-like `target`.
593
+
594
+ Parameters
595
+ ----------
596
+ target : str or file-like
597
+ Matrix Market filename (extension .mtx) or open file-like object.
598
+ a : array like
599
+ Sparse or dense 2-D array.
600
+ comment : str, optional
601
+ Comments to be prepended to the Matrix Market file.
602
+ field : None or str, optional
603
+ Either 'real', 'complex', 'pattern', or 'integer'.
604
+ precision : None or int, optional
605
+ Number of digits to display for real or complex values.
606
+ symmetry : None or str, optional
607
+ Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
608
+ If symmetry is None the symmetry type of 'a' is determined by its
609
+ values.
610
+ """
611
+
612
+ stream, close_it = self._open(target, 'wb')
613
+
614
+ try:
615
+ self._write(stream, a, comment, field, precision, symmetry)
616
+
617
+ finally:
618
+ if close_it:
619
+ stream.close()
620
+ else:
621
+ stream.flush()
622
+
623
+ # -------------------------------------------------------------------------
624
+ def _init_attrs(self, **kwargs):
625
+ """
626
+ Initialize each attributes with the corresponding keyword arg value
627
+ or a default of None
628
+ """
629
+
630
+ attrs = self.__class__.__slots__
631
+ public_attrs = [attr[1:] for attr in attrs]
632
+ invalid_keys = set(kwargs.keys()) - set(public_attrs)
633
+
634
+ if invalid_keys:
635
+ raise ValueError('''found {} invalid keyword arguments, please only
636
+ use {}'''.format(tuple(invalid_keys),
637
+ public_attrs))
638
+
639
+ for attr in attrs:
640
+ setattr(self, attr, kwargs.get(attr[1:], None))
641
+
642
+ # -------------------------------------------------------------------------
643
+ def _parse_header(self, stream):
644
+ rows, cols, entries, format, field, symmetry = \
645
+ self.__class__.info(stream)
646
+ self._init_attrs(rows=rows, cols=cols, entries=entries, format=format,
647
+ field=field, symmetry=symmetry)
648
+
649
+ # -------------------------------------------------------------------------
650
+ def _parse_body(self, stream):
651
+ rows, cols, entries, format, field, symm = (self.rows, self.cols,
652
+ self.entries, self.format,
653
+ self.field, self.symmetry)
654
+
655
+ dtype = self.DTYPES_BY_FIELD.get(field, None)
656
+
657
+ has_symmetry = self.has_symmetry
658
+ is_integer = field == self.FIELD_INTEGER
659
+ is_unsigned_integer = field == self.FIELD_UNSIGNED
660
+ is_complex = field == self.FIELD_COMPLEX
661
+ is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC
662
+ is_herm = symm == self.SYMMETRY_HERMITIAN
663
+ is_pattern = field == self.FIELD_PATTERN
664
+
665
+ if format == self.FORMAT_ARRAY:
666
+ a = zeros((rows, cols), dtype=dtype)
667
+ line = 1
668
+ i, j = 0, 0
669
+ if is_skew:
670
+ a[i, j] = 0
671
+ if i < rows - 1:
672
+ i += 1
673
+ while line:
674
+ line = stream.readline()
675
+ # line.startswith('%')
676
+ if not line or line[0] in ['%', 37] or not line.strip():
677
+ continue
678
+ if is_integer:
679
+ aij = int(line)
680
+ elif is_unsigned_integer:
681
+ aij = int(line)
682
+ elif is_complex:
683
+ aij = complex(*map(float, line.split()))
684
+ else:
685
+ aij = float(line)
686
+ a[i, j] = aij
687
+ if has_symmetry and i != j:
688
+ if is_skew:
689
+ a[j, i] = -aij
690
+ elif is_herm:
691
+ a[j, i] = conj(aij)
692
+ else:
693
+ a[j, i] = aij
694
+ if i < rows-1:
695
+ i = i + 1
696
+ else:
697
+ j = j + 1
698
+ if not has_symmetry:
699
+ i = 0
700
+ else:
701
+ i = j
702
+ if is_skew:
703
+ a[i, j] = 0
704
+ if i < rows-1:
705
+ i += 1
706
+
707
+ if is_skew:
708
+ if not (i in [0, j] and j == cols - 1):
709
+ raise ValueError("Parse error, did not read all lines.")
710
+ else:
711
+ if not (i in [0, j] and j == cols):
712
+ raise ValueError("Parse error, did not read all lines.")
713
+
714
+ elif format == self.FORMAT_COORDINATE:
715
+ # Read sparse COOrdinate format
716
+
717
+ if entries == 0:
718
+ # empty matrix
719
+ return coo_matrix((rows, cols), dtype=dtype)
720
+
721
+ I = zeros(entries, dtype='intc')
722
+ J = zeros(entries, dtype='intc')
723
+ if is_pattern:
724
+ V = ones(entries, dtype='int8')
725
+ elif is_integer:
726
+ V = zeros(entries, dtype='intp')
727
+ elif is_unsigned_integer:
728
+ V = zeros(entries, dtype='uint64')
729
+ elif is_complex:
730
+ V = zeros(entries, dtype='complex')
731
+ else:
732
+ V = zeros(entries, dtype='float')
733
+
734
+ entry_number = 0
735
+ for line in stream:
736
+ # line.startswith('%')
737
+ if not line or line[0] in ['%', 37] or not line.strip():
738
+ continue
739
+
740
+ if entry_number+1 > entries:
741
+ raise ValueError("'entries' in header is smaller than "
742
+ "number of entries")
743
+ l = line.split()
744
+ I[entry_number], J[entry_number] = map(int, l[:2])
745
+
746
+ if not is_pattern:
747
+ if is_integer:
748
+ V[entry_number] = int(l[2])
749
+ elif is_unsigned_integer:
750
+ V[entry_number] = int(l[2])
751
+ elif is_complex:
752
+ V[entry_number] = complex(*map(float, l[2:]))
753
+ else:
754
+ V[entry_number] = float(l[2])
755
+ entry_number += 1
756
+ if entry_number < entries:
757
+ raise ValueError("'entries' in header is larger than "
758
+ "number of entries")
759
+
760
+ I -= 1 # adjust indices (base 1 -> base 0)
761
+ J -= 1
762
+
763
+ if has_symmetry:
764
+ mask = (I != J) # off diagonal mask
765
+ od_I = I[mask]
766
+ od_J = J[mask]
767
+ od_V = V[mask]
768
+
769
+ I = concatenate((I, od_J))
770
+ J = concatenate((J, od_I))
771
+
772
+ if is_skew:
773
+ od_V *= -1
774
+ elif is_herm:
775
+ od_V = od_V.conjugate()
776
+
777
+ V = concatenate((V, od_V))
778
+
779
+ a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype)
780
+ else:
781
+ raise NotImplementedError(format)
782
+
783
+ return a
784
+
785
+ # ------------------------------------------------------------------------
786
+ def _write(self, stream, a, comment='', field=None, precision=None,
787
+ symmetry=None):
788
+ if isinstance(a, list) or isinstance(a, ndarray) or \
789
+ isinstance(a, tuple) or hasattr(a, '__array__'):
790
+ rep = self.FORMAT_ARRAY
791
+ a = asarray(a)
792
+ if len(a.shape) != 2:
793
+ raise ValueError('Expected 2 dimensional array')
794
+ rows, cols = a.shape
795
+
796
+ if field is not None:
797
+
798
+ if field == self.FIELD_INTEGER:
799
+ if not can_cast(a.dtype, 'intp'):
800
+ raise OverflowError("mmwrite does not support integer "
801
+ "dtypes larger than native 'intp'.")
802
+ a = a.astype('intp')
803
+ elif field == self.FIELD_REAL:
804
+ if a.dtype.char not in 'fd':
805
+ a = a.astype('d')
806
+ elif field == self.FIELD_COMPLEX:
807
+ if a.dtype.char not in 'FD':
808
+ a = a.astype('D')
809
+
810
+ else:
811
+ if not issparse(a):
812
+ raise ValueError('unknown matrix type: %s' % type(a))
813
+
814
+ rep = 'coordinate'
815
+ rows, cols = a.shape
816
+
817
+ typecode = a.dtype.char
818
+
819
+ if precision is None:
820
+ if typecode in 'fF':
821
+ precision = 8
822
+ else:
823
+ precision = 16
824
+ if field is None:
825
+ kind = a.dtype.kind
826
+ if kind == 'i':
827
+ if not can_cast(a.dtype, 'intp'):
828
+ raise OverflowError("mmwrite does not support integer "
829
+ "dtypes larger than native 'intp'.")
830
+ field = 'integer'
831
+ elif kind == 'f':
832
+ field = 'real'
833
+ elif kind == 'c':
834
+ field = 'complex'
835
+ elif kind == 'u':
836
+ field = 'unsigned-integer'
837
+ else:
838
+ raise TypeError('unexpected dtype kind ' + kind)
839
+
840
+ if symmetry is None:
841
+ symmetry = self._get_symmetry(a)
842
+
843
+ # validate rep, field, and symmetry
844
+ self.__class__._validate_format(rep)
845
+ self.__class__._validate_field(field)
846
+ self.__class__._validate_symmetry(symmetry)
847
+
848
+ # write initial header line
849
+ data = f'%%MatrixMarket matrix {rep} {field} {symmetry}\n'
850
+ stream.write(data.encode('latin1'))
851
+
852
+ # write comments
853
+ for line in comment.split('\n'):
854
+ data = '%%%s\n' % (line)
855
+ stream.write(data.encode('latin1'))
856
+
857
+ template = self._field_template(field, precision)
858
+ # write dense format
859
+ if rep == self.FORMAT_ARRAY:
860
+ # write shape spec
861
+ data = '%i %i\n' % (rows, cols)
862
+ stream.write(data.encode('latin1'))
863
+
864
+ if field in (self.FIELD_INTEGER, self.FIELD_REAL,
865
+ self.FIELD_UNSIGNED):
866
+ if symmetry == self.SYMMETRY_GENERAL:
867
+ for j in range(cols):
868
+ for i in range(rows):
869
+ data = template % a[i, j]
870
+ stream.write(data.encode('latin1'))
871
+
872
+ elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC:
873
+ for j in range(cols):
874
+ for i in range(j + 1, rows):
875
+ data = template % a[i, j]
876
+ stream.write(data.encode('latin1'))
877
+
878
+ else:
879
+ for j in range(cols):
880
+ for i in range(j, rows):
881
+ data = template % a[i, j]
882
+ stream.write(data.encode('latin1'))
883
+
884
+ elif field == self.FIELD_COMPLEX:
885
+
886
+ if symmetry == self.SYMMETRY_GENERAL:
887
+ for j in range(cols):
888
+ for i in range(rows):
889
+ aij = a[i, j]
890
+ data = template % (real(aij), imag(aij))
891
+ stream.write(data.encode('latin1'))
892
+ else:
893
+ for j in range(cols):
894
+ for i in range(j, rows):
895
+ aij = a[i, j]
896
+ data = template % (real(aij), imag(aij))
897
+ stream.write(data.encode('latin1'))
898
+
899
+ elif field == self.FIELD_PATTERN:
900
+ raise ValueError('pattern type inconsisted with dense format')
901
+
902
+ else:
903
+ raise TypeError('Unknown field type %s' % field)
904
+
905
+ # write sparse format
906
+ else:
907
+ coo = a.tocoo() # convert to COOrdinate format
908
+
909
+ # if symmetry format used, remove values above main diagonal
910
+ if symmetry != self.SYMMETRY_GENERAL:
911
+ lower_triangle_mask = coo.row >= coo.col
912
+ coo = coo_matrix((coo.data[lower_triangle_mask],
913
+ (coo.row[lower_triangle_mask],
914
+ coo.col[lower_triangle_mask])),
915
+ shape=coo.shape)
916
+
917
+ # write shape spec
918
+ data = '%i %i %i\n' % (rows, cols, coo.nnz)
919
+ stream.write(data.encode('latin1'))
920
+
921
+ template = self._field_template(field, precision-1)
922
+
923
+ if field == self.FIELD_PATTERN:
924
+ for r, c in zip(coo.row+1, coo.col+1):
925
+ data = "%i %i\n" % (r, c)
926
+ stream.write(data.encode('latin1'))
927
+ elif field in (self.FIELD_INTEGER, self.FIELD_REAL,
928
+ self.FIELD_UNSIGNED):
929
+ for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
930
+ data = ("%i %i " % (r, c)) + (template % d)
931
+ stream.write(data.encode('latin1'))
932
+ elif field == self.FIELD_COMPLEX:
933
+ for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
934
+ data = ("%i %i " % (r, c)) + (template % (d.real, d.imag))
935
+ stream.write(data.encode('latin1'))
936
+ else:
937
+ raise TypeError('Unknown field type %s' % field)
938
+
939
+
940
+ def _is_fromfile_compatible(stream):
941
+ """
942
+ Check whether `stream` is compatible with numpy.fromfile.
943
+
944
+ Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with
945
+ Python 3.
946
+ """
947
+
948
+ bad_cls = []
949
+ try:
950
+ import gzip
951
+ bad_cls.append(gzip.GzipFile)
952
+ except ImportError:
953
+ pass
954
+ try:
955
+ import bz2
956
+ bad_cls.append(bz2.BZ2File)
957
+ except ImportError:
958
+ pass
959
+
960
+ bad_cls = tuple(bad_cls)
961
+ return not isinstance(stream, bad_cls)
env-llmeval/lib/python3.10/site-packages/scipy/io/_netcdf.py ADDED
@@ -0,0 +1,1095 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ NetCDF reader/writer module.
3
+
4
+ This module is used to read and create NetCDF files. NetCDF files are
5
+ accessed through the `netcdf_file` object. Data written to and from NetCDF
6
+ files are contained in `netcdf_variable` objects. Attributes are given
7
+ as member variables of the `netcdf_file` and `netcdf_variable` objects.
8
+
9
+ This module implements the Scientific.IO.NetCDF API to read and create
10
+ NetCDF files. The same API is also used in the PyNIO and pynetcdf
11
+ modules, allowing these modules to be used interchangeably when working
12
+ with NetCDF files.
13
+
14
+ Only NetCDF3 is supported here; for NetCDF4 see
15
+ `netCDF4-python <http://unidata.github.io/netcdf4-python/>`__,
16
+ which has a similar API.
17
+
18
+ """
19
+
20
+ # TODO:
21
+ # * properly implement ``_FillValue``.
22
+ # * fix character variables.
23
+ # * implement PAGESIZE for Python 2.6?
24
+
25
+ # The Scientific.IO.NetCDF API allows attributes to be added directly to
26
+ # instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
27
+ # between user-set attributes and instance attributes, user-set attributes
28
+ # are automatically stored in the ``_attributes`` attribute by overloading
29
+ #``__setattr__``. This is the reason why the code sometimes uses
30
+ #``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
31
+ # otherwise the key would be inserted into userspace attributes.
32
+
33
+
34
+ __all__ = ['netcdf_file', 'netcdf_variable']
35
+
36
+
37
+ import warnings
38
+ import weakref
39
+ from operator import mul
40
+ from platform import python_implementation
41
+
42
+ import mmap as mm
43
+
44
+ import numpy as np
45
+ from numpy import frombuffer, dtype, empty, array, asarray
46
+ from numpy import little_endian as LITTLE_ENDIAN
47
+ from functools import reduce
48
+
49
+
50
+ IS_PYPY = python_implementation() == 'PyPy'
51
+
52
+ ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
53
+ ZERO = b'\x00\x00\x00\x00'
54
+ NC_BYTE = b'\x00\x00\x00\x01'
55
+ NC_CHAR = b'\x00\x00\x00\x02'
56
+ NC_SHORT = b'\x00\x00\x00\x03'
57
+ NC_INT = b'\x00\x00\x00\x04'
58
+ NC_FLOAT = b'\x00\x00\x00\x05'
59
+ NC_DOUBLE = b'\x00\x00\x00\x06'
60
+ NC_DIMENSION = b'\x00\x00\x00\n'
61
+ NC_VARIABLE = b'\x00\x00\x00\x0b'
62
+ NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
63
+ FILL_BYTE = b'\x81'
64
+ FILL_CHAR = b'\x00'
65
+ FILL_SHORT = b'\x80\x01'
66
+ FILL_INT = b'\x80\x00\x00\x01'
67
+ FILL_FLOAT = b'\x7C\xF0\x00\x00'
68
+ FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00'
69
+
70
+ TYPEMAP = {NC_BYTE: ('b', 1),
71
+ NC_CHAR: ('c', 1),
72
+ NC_SHORT: ('h', 2),
73
+ NC_INT: ('i', 4),
74
+ NC_FLOAT: ('f', 4),
75
+ NC_DOUBLE: ('d', 8)}
76
+
77
+ FILLMAP = {NC_BYTE: FILL_BYTE,
78
+ NC_CHAR: FILL_CHAR,
79
+ NC_SHORT: FILL_SHORT,
80
+ NC_INT: FILL_INT,
81
+ NC_FLOAT: FILL_FLOAT,
82
+ NC_DOUBLE: FILL_DOUBLE}
83
+
84
+ REVERSE = {('b', 1): NC_BYTE,
85
+ ('B', 1): NC_CHAR,
86
+ ('c', 1): NC_CHAR,
87
+ ('h', 2): NC_SHORT,
88
+ ('i', 4): NC_INT,
89
+ ('f', 4): NC_FLOAT,
90
+ ('d', 8): NC_DOUBLE,
91
+
92
+ # these come from asarray(1).dtype.char and asarray('foo').dtype.char,
93
+ # used when getting the types from generic attributes.
94
+ ('l', 4): NC_INT,
95
+ ('S', 1): NC_CHAR}
96
+
97
+
98
+ class netcdf_file:
99
+ """
100
+ A file object for NetCDF data.
101
+
102
+ A `netcdf_file` object has two standard attributes: `dimensions` and
103
+ `variables`. The values of both are dictionaries, mapping dimension
104
+ names to their associated lengths and variable names to variables,
105
+ respectively. Application programs should never modify these
106
+ dictionaries.
107
+
108
+ All other attributes correspond to global attributes defined in the
109
+ NetCDF file. Global file attributes are created by assigning to an
110
+ attribute of the `netcdf_file` object.
111
+
112
+ Parameters
113
+ ----------
114
+ filename : string or file-like
115
+ string -> filename
116
+ mode : {'r', 'w', 'a'}, optional
117
+ read-write-append mode, default is 'r'
118
+ mmap : None or bool, optional
119
+ Whether to mmap `filename` when reading. Default is True
120
+ when `filename` is a file name, False when `filename` is a
121
+ file-like object. Note that when mmap is in use, data arrays
122
+ returned refer directly to the mmapped data on disk, and the
123
+ file cannot be closed as long as references to it exist.
124
+ version : {1, 2}, optional
125
+ version of netcdf to read / write, where 1 means *Classic
126
+ format* and 2 means *64-bit offset format*. Default is 1. See
127
+ `here <https://docs.unidata.ucar.edu/nug/current/netcdf_introduction.html#select_format>`__
128
+ for more info.
129
+ maskandscale : bool, optional
130
+ Whether to automatically scale and/or mask data based on attributes.
131
+ Default is False.
132
+
133
+ Notes
134
+ -----
135
+ The major advantage of this module over other modules is that it doesn't
136
+ require the code to be linked to the NetCDF libraries. This module is
137
+ derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
138
+
139
+ NetCDF files are a self-describing binary data format. The file contains
140
+ metadata that describes the dimensions and variables in the file. More
141
+ details about NetCDF files can be found `here
142
+ <https://www.unidata.ucar.edu/software/netcdf/guide_toc.html>`__. There
143
+ are three main sections to a NetCDF data structure:
144
+
145
+ 1. Dimensions
146
+ 2. Variables
147
+ 3. Attributes
148
+
149
+ The dimensions section records the name and length of each dimension used
150
+ by the variables. The variables would then indicate which dimensions it
151
+ uses and any attributes such as data units, along with containing the data
152
+ values for the variable. It is good practice to include a
153
+ variable that is the same name as a dimension to provide the values for
154
+ that axes. Lastly, the attributes section would contain additional
155
+ information such as the name of the file creator or the instrument used to
156
+ collect the data.
157
+
158
+ When writing data to a NetCDF file, there is often the need to indicate the
159
+ 'record dimension'. A record dimension is the unbounded dimension for a
160
+ variable. For example, a temperature variable may have dimensions of
161
+ latitude, longitude and time. If one wants to add more temperature data to
162
+ the NetCDF file as time progresses, then the temperature variable should
163
+ have the time dimension flagged as the record dimension.
164
+
165
+ In addition, the NetCDF file header contains the position of the data in
166
+ the file, so access can be done in an efficient manner without loading
167
+ unnecessary data into memory. It uses the ``mmap`` module to create
168
+ Numpy arrays mapped to the data on disk, for the same purpose.
169
+
170
+ Note that when `netcdf_file` is used to open a file with mmap=True
171
+ (default for read-only), arrays returned by it refer to data
172
+ directly on the disk. The file should not be closed, and cannot be cleanly
173
+ closed when asked, if such arrays are alive. You may want to copy data arrays
174
+ obtained from mmapped Netcdf file if they are to be processed after the file
175
+ is closed, see the example below.
176
+
177
+ Examples
178
+ --------
179
+ To create a NetCDF file:
180
+
181
+ >>> from scipy.io import netcdf_file
182
+ >>> import numpy as np
183
+ >>> f = netcdf_file('simple.nc', 'w')
184
+ >>> f.history = 'Created for a test'
185
+ >>> f.createDimension('time', 10)
186
+ >>> time = f.createVariable('time', 'i', ('time',))
187
+ >>> time[:] = np.arange(10)
188
+ >>> time.units = 'days since 2008-01-01'
189
+ >>> f.close()
190
+
191
+ Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice
192
+ of the time variable allows for the data to be set in the object, rather
193
+ than letting ``arange(10)`` overwrite the ``time`` variable.
194
+
195
+ To read the NetCDF file we just created:
196
+
197
+ >>> from scipy.io import netcdf_file
198
+ >>> f = netcdf_file('simple.nc', 'r')
199
+ >>> print(f.history)
200
+ b'Created for a test'
201
+ >>> time = f.variables['time']
202
+ >>> print(time.units)
203
+ b'days since 2008-01-01'
204
+ >>> print(time.shape)
205
+ (10,)
206
+ >>> print(time[-1])
207
+ 9
208
+
209
+ NetCDF files, when opened read-only, return arrays that refer
210
+ directly to memory-mapped data on disk:
211
+
212
+ >>> data = time[:]
213
+
214
+ If the data is to be processed after the file is closed, it needs
215
+ to be copied to main memory:
216
+
217
+ >>> data = time[:].copy()
218
+ >>> del time
219
+ >>> f.close()
220
+ >>> data.mean()
221
+ 4.5
222
+
223
+ A NetCDF file can also be used as context manager:
224
+
225
+ >>> from scipy.io import netcdf_file
226
+ >>> with netcdf_file('simple.nc', 'r') as f:
227
+ ... print(f.history)
228
+ b'Created for a test'
229
+
230
+ """
231
+ def __init__(self, filename, mode='r', mmap=None, version=1,
232
+ maskandscale=False):
233
+ """Initialize netcdf_file from fileobj (str or file-like)."""
234
+ if mode not in 'rwa':
235
+ raise ValueError("Mode must be either 'r', 'w' or 'a'.")
236
+
237
+ if hasattr(filename, 'seek'): # file-like
238
+ self.fp = filename
239
+ self.filename = 'None'
240
+ if mmap is None:
241
+ mmap = False
242
+ elif mmap and not hasattr(filename, 'fileno'):
243
+ raise ValueError('Cannot use file object for mmap')
244
+ else: # maybe it's a string
245
+ self.filename = filename
246
+ omode = 'r+' if mode == 'a' else mode
247
+ self.fp = open(self.filename, '%sb' % omode)
248
+ if mmap is None:
249
+ # Mmapped files on PyPy cannot be usually closed
250
+ # before the GC runs, so it's better to use mmap=False
251
+ # as the default.
252
+ mmap = (not IS_PYPY)
253
+
254
+ if mode != 'r':
255
+ # Cannot read write-only files
256
+ mmap = False
257
+
258
+ self.use_mmap = mmap
259
+ self.mode = mode
260
+ self.version_byte = version
261
+ self.maskandscale = maskandscale
262
+
263
+ self.dimensions = {}
264
+ self.variables = {}
265
+
266
+ self._dims = []
267
+ self._recs = 0
268
+ self._recsize = 0
269
+
270
+ self._mm = None
271
+ self._mm_buf = None
272
+ if self.use_mmap:
273
+ self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
274
+ self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
275
+
276
+ self._attributes = {}
277
+
278
+ if mode in 'ra':
279
+ self._read()
280
+
281
+ def __setattr__(self, attr, value):
282
+ # Store user defined attributes in a separate dict,
283
+ # so we can save them to file later.
284
+ try:
285
+ self._attributes[attr] = value
286
+ except AttributeError:
287
+ pass
288
+ self.__dict__[attr] = value
289
+
290
+ def close(self):
291
+ """Closes the NetCDF file."""
292
+ if hasattr(self, 'fp') and not self.fp.closed:
293
+ try:
294
+ self.flush()
295
+ finally:
296
+ self.variables = {}
297
+ if self._mm_buf is not None:
298
+ ref = weakref.ref(self._mm_buf)
299
+ self._mm_buf = None
300
+ if ref() is None:
301
+ # self._mm_buf is gc'd, and we can close the mmap
302
+ self._mm.close()
303
+ else:
304
+ # we cannot close self._mm, since self._mm_buf is
305
+ # alive and there may still be arrays referring to it
306
+ warnings.warn(
307
+ "Cannot close a netcdf_file opened with mmap=True, when "
308
+ "netcdf_variables or arrays referring to its data still "
309
+ "exist. All data arrays obtained from such files refer "
310
+ "directly to data on disk, and must be copied before the "
311
+ "file can be cleanly closed. "
312
+ "(See netcdf_file docstring for more information on mmap.)",
313
+ category=RuntimeWarning, stacklevel=2,
314
+ )
315
+ self._mm = None
316
+ self.fp.close()
317
+ __del__ = close
318
+
319
+ def __enter__(self):
320
+ return self
321
+
322
+ def __exit__(self, type, value, traceback):
323
+ self.close()
324
+
325
+ def createDimension(self, name, length):
326
+ """
327
+ Adds a dimension to the Dimension section of the NetCDF data structure.
328
+
329
+ Note that this function merely adds a new dimension that the variables can
330
+ reference. The values for the dimension, if desired, should be added as
331
+ a variable using `createVariable`, referring to this dimension.
332
+
333
+ Parameters
334
+ ----------
335
+ name : str
336
+ Name of the dimension (Eg, 'lat' or 'time').
337
+ length : int
338
+ Length of the dimension.
339
+
340
+ See Also
341
+ --------
342
+ createVariable
343
+
344
+ """
345
+ if length is None and self._dims:
346
+ raise ValueError("Only first dimension may be unlimited!")
347
+
348
+ self.dimensions[name] = length
349
+ self._dims.append(name)
350
+
351
+ def createVariable(self, name, type, dimensions):
352
+ """
353
+ Create an empty variable for the `netcdf_file` object, specifying its data
354
+ type and the dimensions it uses.
355
+
356
+ Parameters
357
+ ----------
358
+ name : str
359
+ Name of the new variable.
360
+ type : dtype or str
361
+ Data type of the variable.
362
+ dimensions : sequence of str
363
+ List of the dimension names used by the variable, in the desired order.
364
+
365
+ Returns
366
+ -------
367
+ variable : netcdf_variable
368
+ The newly created ``netcdf_variable`` object.
369
+ This object has also been added to the `netcdf_file` object as well.
370
+
371
+ See Also
372
+ --------
373
+ createDimension
374
+
375
+ Notes
376
+ -----
377
+ Any dimensions to be used by the variable should already exist in the
378
+ NetCDF data structure or should be created by `createDimension` prior to
379
+ creating the NetCDF variable.
380
+
381
+ """
382
+ shape = tuple([self.dimensions[dim] for dim in dimensions])
383
+ shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for NumPy
384
+
385
+ type = dtype(type)
386
+ typecode, size = type.char, type.itemsize
387
+ if (typecode, size) not in REVERSE:
388
+ raise ValueError("NetCDF 3 does not support type %s" % type)
389
+
390
+ # convert to big endian always for NetCDF 3
391
+ data = empty(shape_, dtype=type.newbyteorder("B"))
392
+ self.variables[name] = netcdf_variable(
393
+ data, typecode, size, shape, dimensions,
394
+ maskandscale=self.maskandscale)
395
+ return self.variables[name]
396
+
397
+ def flush(self):
398
+ """
399
+ Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
400
+
401
+ See Also
402
+ --------
403
+ sync : Identical function
404
+
405
+ """
406
+ if hasattr(self, 'mode') and self.mode in 'wa':
407
+ self._write()
408
+ sync = flush
409
+
410
+ def _write(self):
411
+ self.fp.seek(0)
412
+ self.fp.write(b'CDF')
413
+ self.fp.write(array(self.version_byte, '>b').tobytes())
414
+
415
+ # Write headers and data.
416
+ self._write_numrecs()
417
+ self._write_dim_array()
418
+ self._write_gatt_array()
419
+ self._write_var_array()
420
+
421
+ def _write_numrecs(self):
422
+ # Get highest record count from all record variables.
423
+ for var in self.variables.values():
424
+ if var.isrec and len(var.data) > self._recs:
425
+ self.__dict__['_recs'] = len(var.data)
426
+ self._pack_int(self._recs)
427
+
428
+ def _write_dim_array(self):
429
+ if self.dimensions:
430
+ self.fp.write(NC_DIMENSION)
431
+ self._pack_int(len(self.dimensions))
432
+ for name in self._dims:
433
+ self._pack_string(name)
434
+ length = self.dimensions[name]
435
+ self._pack_int(length or 0) # replace None with 0 for record dimension
436
+ else:
437
+ self.fp.write(ABSENT)
438
+
439
+ def _write_gatt_array(self):
440
+ self._write_att_array(self._attributes)
441
+
442
+ def _write_att_array(self, attributes):
443
+ if attributes:
444
+ self.fp.write(NC_ATTRIBUTE)
445
+ self._pack_int(len(attributes))
446
+ for name, values in attributes.items():
447
+ self._pack_string(name)
448
+ self._write_att_values(values)
449
+ else:
450
+ self.fp.write(ABSENT)
451
+
452
+ def _write_var_array(self):
453
+ if self.variables:
454
+ self.fp.write(NC_VARIABLE)
455
+ self._pack_int(len(self.variables))
456
+
457
+ # Sort variable names non-recs first, then recs.
458
+ def sortkey(n):
459
+ v = self.variables[n]
460
+ if v.isrec:
461
+ return (-1,)
462
+ return v._shape
463
+ variables = sorted(self.variables, key=sortkey, reverse=True)
464
+
465
+ # Set the metadata for all variables.
466
+ for name in variables:
467
+ self._write_var_metadata(name)
468
+ # Now that we have the metadata, we know the vsize of
469
+ # each record variable, so we can calculate recsize.
470
+ self.__dict__['_recsize'] = sum([
471
+ var._vsize for var in self.variables.values()
472
+ if var.isrec])
473
+ # Set the data for all variables.
474
+ for name in variables:
475
+ self._write_var_data(name)
476
+ else:
477
+ self.fp.write(ABSENT)
478
+
479
+ def _write_var_metadata(self, name):
480
+ var = self.variables[name]
481
+
482
+ self._pack_string(name)
483
+ self._pack_int(len(var.dimensions))
484
+ for dimname in var.dimensions:
485
+ dimid = self._dims.index(dimname)
486
+ self._pack_int(dimid)
487
+
488
+ self._write_att_array(var._attributes)
489
+
490
+ nc_type = REVERSE[var.typecode(), var.itemsize()]
491
+ self.fp.write(nc_type)
492
+
493
+ if not var.isrec:
494
+ vsize = var.data.size * var.data.itemsize
495
+ vsize += -vsize % 4
496
+ else: # record variable
497
+ try:
498
+ vsize = var.data[0].size * var.data.itemsize
499
+ except IndexError:
500
+ vsize = 0
501
+ rec_vars = len([v for v in self.variables.values()
502
+ if v.isrec])
503
+ if rec_vars > 1:
504
+ vsize += -vsize % 4
505
+ self.variables[name].__dict__['_vsize'] = vsize
506
+ self._pack_int(vsize)
507
+
508
+ # Pack a bogus begin, and set the real value later.
509
+ self.variables[name].__dict__['_begin'] = self.fp.tell()
510
+ self._pack_begin(0)
511
+
512
+ def _write_var_data(self, name):
513
+ var = self.variables[name]
514
+
515
+ # Set begin in file header.
516
+ the_beguine = self.fp.tell()
517
+ self.fp.seek(var._begin)
518
+ self._pack_begin(the_beguine)
519
+ self.fp.seek(the_beguine)
520
+
521
+ # Write data.
522
+ if not var.isrec:
523
+ self.fp.write(var.data.tobytes())
524
+ count = var.data.size * var.data.itemsize
525
+ self._write_var_padding(var, var._vsize - count)
526
+ else: # record variable
527
+ # Handle rec vars with shape[0] < nrecs.
528
+ if self._recs > len(var.data):
529
+ shape = (self._recs,) + var.data.shape[1:]
530
+ # Resize in-place does not always work since
531
+ # the array might not be single-segment
532
+ try:
533
+ var.data.resize(shape)
534
+ except ValueError:
535
+ dtype = var.data.dtype
536
+ var.__dict__['data'] = np.resize(var.data, shape).astype(dtype)
537
+
538
+ pos0 = pos = self.fp.tell()
539
+ for rec in var.data:
540
+ # Apparently scalars cannot be converted to big endian. If we
541
+ # try to convert a ``=i4`` scalar to, say, '>i4' the dtype
542
+ # will remain as ``=i4``.
543
+ if not rec.shape and (rec.dtype.byteorder == '<' or
544
+ (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
545
+ rec = rec.byteswap()
546
+ self.fp.write(rec.tobytes())
547
+ # Padding
548
+ count = rec.size * rec.itemsize
549
+ self._write_var_padding(var, var._vsize - count)
550
+ pos += self._recsize
551
+ self.fp.seek(pos)
552
+ self.fp.seek(pos0 + var._vsize)
553
+
554
+ def _write_var_padding(self, var, size):
555
+ encoded_fill_value = var._get_encoded_fill_value()
556
+ num_fills = size // len(encoded_fill_value)
557
+ self.fp.write(encoded_fill_value * num_fills)
558
+
559
+ def _write_att_values(self, values):
560
+ if hasattr(values, 'dtype'):
561
+ nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
562
+ else:
563
+ types = [(int, NC_INT), (float, NC_FLOAT), (str, NC_CHAR)]
564
+
565
+ # bytes index into scalars in py3k. Check for "string" types
566
+ if isinstance(values, (str, bytes)):
567
+ sample = values
568
+ else:
569
+ try:
570
+ sample = values[0] # subscriptable?
571
+ except TypeError:
572
+ sample = values # scalar
573
+
574
+ for class_, nc_type in types:
575
+ if isinstance(sample, class_):
576
+ break
577
+
578
+ typecode, size = TYPEMAP[nc_type]
579
+ dtype_ = '>%s' % typecode
580
+ # asarray() dies with bytes and '>c' in py3k. Change to 'S'
581
+ dtype_ = 'S' if dtype_ == '>c' else dtype_
582
+
583
+ values = asarray(values, dtype=dtype_)
584
+
585
+ self.fp.write(nc_type)
586
+
587
+ if values.dtype.char == 'S':
588
+ nelems = values.itemsize
589
+ else:
590
+ nelems = values.size
591
+ self._pack_int(nelems)
592
+
593
+ if not values.shape and (values.dtype.byteorder == '<' or
594
+ (values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
595
+ values = values.byteswap()
596
+ self.fp.write(values.tobytes())
597
+ count = values.size * values.itemsize
598
+ self.fp.write(b'\x00' * (-count % 4)) # pad
599
+
600
+ def _read(self):
601
+ # Check magic bytes and version
602
+ magic = self.fp.read(3)
603
+ if not magic == b'CDF':
604
+ raise TypeError("Error: %s is not a valid NetCDF 3 file" %
605
+ self.filename)
606
+ self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]
607
+
608
+ # Read file headers and set data.
609
+ self._read_numrecs()
610
+ self._read_dim_array()
611
+ self._read_gatt_array()
612
+ self._read_var_array()
613
+
614
+ def _read_numrecs(self):
615
+ self.__dict__['_recs'] = self._unpack_int()
616
+
617
+ def _read_dim_array(self):
618
+ header = self.fp.read(4)
619
+ if header not in [ZERO, NC_DIMENSION]:
620
+ raise ValueError("Unexpected header.")
621
+ count = self._unpack_int()
622
+
623
+ for dim in range(count):
624
+ name = self._unpack_string().decode('latin1')
625
+ length = self._unpack_int() or None # None for record dimension
626
+ self.dimensions[name] = length
627
+ self._dims.append(name) # preserve order
628
+
629
+ def _read_gatt_array(self):
630
+ for k, v in self._read_att_array().items():
631
+ self.__setattr__(k, v)
632
+
633
+ def _read_att_array(self):
634
+ header = self.fp.read(4)
635
+ if header not in [ZERO, NC_ATTRIBUTE]:
636
+ raise ValueError("Unexpected header.")
637
+ count = self._unpack_int()
638
+
639
+ attributes = {}
640
+ for attr in range(count):
641
+ name = self._unpack_string().decode('latin1')
642
+ attributes[name] = self._read_att_values()
643
+ return attributes
644
+
645
+ def _read_var_array(self):
646
+ header = self.fp.read(4)
647
+ if header not in [ZERO, NC_VARIABLE]:
648
+ raise ValueError("Unexpected header.")
649
+
650
+ begin = 0
651
+ dtypes = {'names': [], 'formats': []}
652
+ rec_vars = []
653
+ count = self._unpack_int()
654
+ for var in range(count):
655
+ (name, dimensions, shape, attributes,
656
+ typecode, size, dtype_, begin_, vsize) = self._read_var()
657
+ # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
658
+ # Note that vsize is the product of the dimension lengths
659
+ # (omitting the record dimension) and the number of bytes
660
+ # per value (determined from the type), increased to the
661
+ # next multiple of 4, for each variable. If a record
662
+ # variable, this is the amount of space per record. The
663
+ # netCDF "record size" is calculated as the sum of the
664
+ # vsize's of all the record variables.
665
+ #
666
+ # The vsize field is actually redundant, because its value
667
+ # may be computed from other information in the header. The
668
+ # 32-bit vsize field is not large enough to contain the size
669
+ # of variables that require more than 2^32 - 4 bytes, so
670
+ # 2^32 - 1 is used in the vsize field for such variables.
671
+ if shape and shape[0] is None: # record variable
672
+ rec_vars.append(name)
673
+ # The netCDF "record size" is calculated as the sum of
674
+ # the vsize's of all the record variables.
675
+ self.__dict__['_recsize'] += vsize
676
+ if begin == 0:
677
+ begin = begin_
678
+ dtypes['names'].append(name)
679
+ dtypes['formats'].append(str(shape[1:]) + dtype_)
680
+
681
+ # Handle padding with a virtual variable.
682
+ if typecode in 'bch':
683
+ actual_size = reduce(mul, (1,) + shape[1:]) * size
684
+ padding = -actual_size % 4
685
+ if padding:
686
+ dtypes['names'].append('_padding_%d' % var)
687
+ dtypes['formats'].append('(%d,)>b' % padding)
688
+
689
+ # Data will be set later.
690
+ data = None
691
+ else: # not a record variable
692
+ # Calculate size to avoid problems with vsize (above)
693
+ a_size = reduce(mul, shape, 1) * size
694
+ if self.use_mmap:
695
+ data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
696
+ data.shape = shape
697
+ else:
698
+ pos = self.fp.tell()
699
+ self.fp.seek(begin_)
700
+ data = frombuffer(self.fp.read(a_size), dtype=dtype_
701
+ ).copy()
702
+ data.shape = shape
703
+ self.fp.seek(pos)
704
+
705
+ # Add variable.
706
+ self.variables[name] = netcdf_variable(
707
+ data, typecode, size, shape, dimensions, attributes,
708
+ maskandscale=self.maskandscale)
709
+
710
+ if rec_vars:
711
+ # Remove padding when only one record variable.
712
+ if len(rec_vars) == 1:
713
+ dtypes['names'] = dtypes['names'][:1]
714
+ dtypes['formats'] = dtypes['formats'][:1]
715
+
716
+ # Build rec array.
717
+ if self.use_mmap:
718
+ buf = self._mm_buf[begin:begin+self._recs*self._recsize]
719
+ rec_array = buf.view(dtype=dtypes)
720
+ rec_array.shape = (self._recs,)
721
+ else:
722
+ pos = self.fp.tell()
723
+ self.fp.seek(begin)
724
+ rec_array = frombuffer(self.fp.read(self._recs*self._recsize),
725
+ dtype=dtypes).copy()
726
+ rec_array.shape = (self._recs,)
727
+ self.fp.seek(pos)
728
+
729
+ for var in rec_vars:
730
+ self.variables[var].__dict__['data'] = rec_array[var]
731
+
732
+ def _read_var(self):
733
+ name = self._unpack_string().decode('latin1')
734
+ dimensions = []
735
+ shape = []
736
+ dims = self._unpack_int()
737
+
738
+ for i in range(dims):
739
+ dimid = self._unpack_int()
740
+ dimname = self._dims[dimid]
741
+ dimensions.append(dimname)
742
+ dim = self.dimensions[dimname]
743
+ shape.append(dim)
744
+ dimensions = tuple(dimensions)
745
+ shape = tuple(shape)
746
+
747
+ attributes = self._read_att_array()
748
+ nc_type = self.fp.read(4)
749
+ vsize = self._unpack_int()
750
+ begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
751
+
752
+ typecode, size = TYPEMAP[nc_type]
753
+ dtype_ = '>%s' % typecode
754
+
755
+ return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
756
+
757
+ def _read_att_values(self):
758
+ nc_type = self.fp.read(4)
759
+ n = self._unpack_int()
760
+
761
+ typecode, size = TYPEMAP[nc_type]
762
+
763
+ count = n*size
764
+ values = self.fp.read(int(count))
765
+ self.fp.read(-count % 4) # read padding
766
+
767
+ if typecode != 'c':
768
+ values = frombuffer(values, dtype='>%s' % typecode).copy()
769
+ if values.shape == (1,):
770
+ values = values[0]
771
+ else:
772
+ values = values.rstrip(b'\x00')
773
+ return values
774
+
775
+ def _pack_begin(self, begin):
776
+ if self.version_byte == 1:
777
+ self._pack_int(begin)
778
+ elif self.version_byte == 2:
779
+ self._pack_int64(begin)
780
+
781
+ def _pack_int(self, value):
782
+ self.fp.write(array(value, '>i').tobytes())
783
+ _pack_int32 = _pack_int
784
+
785
+ def _unpack_int(self):
786
+ return int(frombuffer(self.fp.read(4), '>i')[0])
787
+ _unpack_int32 = _unpack_int
788
+
789
+ def _pack_int64(self, value):
790
+ self.fp.write(array(value, '>q').tobytes())
791
+
792
+ def _unpack_int64(self):
793
+ return frombuffer(self.fp.read(8), '>q')[0]
794
+
795
+ def _pack_string(self, s):
796
+ count = len(s)
797
+ self._pack_int(count)
798
+ self.fp.write(s.encode('latin1'))
799
+ self.fp.write(b'\x00' * (-count % 4)) # pad
800
+
801
+ def _unpack_string(self):
802
+ count = self._unpack_int()
803
+ s = self.fp.read(count).rstrip(b'\x00')
804
+ self.fp.read(-count % 4) # read padding
805
+ return s
806
+
807
+
808
+ class netcdf_variable:
809
+ """
810
+ A data object for netcdf files.
811
+
812
+ `netcdf_variable` objects are constructed by calling the method
813
+ `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
814
+ objects behave much like array objects defined in numpy, except that their
815
+ data resides in a file. Data is read by indexing and written by assigning
816
+ to an indexed subset; the entire array can be accessed by the index ``[:]``
817
+ or (for scalars) by using the methods `getValue` and `assignValue`.
818
+ `netcdf_variable` objects also have attribute `shape` with the same meaning
819
+ as for arrays, but the shape cannot be modified. There is another read-only
820
+ attribute `dimensions`, whose value is the tuple of dimension names.
821
+
822
+ All other attributes correspond to variable attributes defined in
823
+ the NetCDF file. Variable attributes are created by assigning to an
824
+ attribute of the `netcdf_variable` object.
825
+
826
+ Parameters
827
+ ----------
828
+ data : array_like
829
+ The data array that holds the values for the variable.
830
+ Typically, this is initialized as empty, but with the proper shape.
831
+ typecode : dtype character code
832
+ Desired data-type for the data array.
833
+ size : int
834
+ Desired element size for the data array.
835
+ shape : sequence of ints
836
+ The shape of the array. This should match the lengths of the
837
+ variable's dimensions.
838
+ dimensions : sequence of strings
839
+ The names of the dimensions used by the variable. Must be in the
840
+ same order of the dimension lengths given by `shape`.
841
+ attributes : dict, optional
842
+ Attribute values (any type) keyed by string names. These attributes
843
+ become attributes for the netcdf_variable object.
844
+ maskandscale : bool, optional
845
+ Whether to automatically scale and/or mask data based on attributes.
846
+ Default is False.
847
+
848
+
849
+ Attributes
850
+ ----------
851
+ dimensions : list of str
852
+ List of names of dimensions used by the variable object.
853
+ isrec, shape
854
+ Properties
855
+
856
+ See also
857
+ --------
858
+ isrec, shape
859
+
860
+ """
861
+ def __init__(self, data, typecode, size, shape, dimensions,
862
+ attributes=None,
863
+ maskandscale=False):
864
+ self.data = data
865
+ self._typecode = typecode
866
+ self._size = size
867
+ self._shape = shape
868
+ self.dimensions = dimensions
869
+ self.maskandscale = maskandscale
870
+
871
+ self._attributes = attributes or {}
872
+ for k, v in self._attributes.items():
873
+ self.__dict__[k] = v
874
+
875
+ def __setattr__(self, attr, value):
876
+ # Store user defined attributes in a separate dict,
877
+ # so we can save them to file later.
878
+ try:
879
+ self._attributes[attr] = value
880
+ except AttributeError:
881
+ pass
882
+ self.__dict__[attr] = value
883
+
884
+ def isrec(self):
885
+ """Returns whether the variable has a record dimension or not.
886
+
887
+ A record dimension is a dimension along which additional data could be
888
+ easily appended in the netcdf data structure without much rewriting of
889
+ the data file. This attribute is a read-only property of the
890
+ `netcdf_variable`.
891
+
892
+ """
893
+ return bool(self.data.shape) and not self._shape[0]
894
+ isrec = property(isrec)
895
+
896
+ def shape(self):
897
+ """Returns the shape tuple of the data variable.
898
+
899
+ This is a read-only attribute and can not be modified in the
900
+ same manner of other numpy arrays.
901
+ """
902
+ return self.data.shape
903
+ shape = property(shape)
904
+
905
+ def getValue(self):
906
+ """
907
+ Retrieve a scalar value from a `netcdf_variable` of length one.
908
+
909
+ Raises
910
+ ------
911
+ ValueError
912
+ If the netcdf variable is an array of length greater than one,
913
+ this exception will be raised.
914
+
915
+ """
916
+ return self.data.item()
917
+
918
+ def assignValue(self, value):
919
+ """
920
+ Assign a scalar value to a `netcdf_variable` of length one.
921
+
922
+ Parameters
923
+ ----------
924
+ value : scalar
925
+ Scalar value (of compatible type) to assign to a length-one netcdf
926
+ variable. This value will be written to file.
927
+
928
+ Raises
929
+ ------
930
+ ValueError
931
+ If the input is not a scalar, or if the destination is not a length-one
932
+ netcdf variable.
933
+
934
+ """
935
+ if not self.data.flags.writeable:
936
+ # Work-around for a bug in NumPy. Calling itemset() on a read-only
937
+ # memory-mapped array causes a seg. fault.
938
+ # See NumPy ticket #1622, and SciPy ticket #1202.
939
+ # This check for `writeable` can be removed when the oldest version
940
+ # of NumPy still supported by scipy contains the fix for #1622.
941
+ raise RuntimeError("variable is not writeable")
942
+
943
+ self.data[:] = value
944
+
945
+ def typecode(self):
946
+ """
947
+ Return the typecode of the variable.
948
+
949
+ Returns
950
+ -------
951
+ typecode : char
952
+ The character typecode of the variable (e.g., 'i' for int).
953
+
954
+ """
955
+ return self._typecode
956
+
957
+ def itemsize(self):
958
+ """
959
+ Return the itemsize of the variable.
960
+
961
+ Returns
962
+ -------
963
+ itemsize : int
964
+ The element size of the variable (e.g., 8 for float64).
965
+
966
+ """
967
+ return self._size
968
+
969
+ def __getitem__(self, index):
970
+ if not self.maskandscale:
971
+ return self.data[index]
972
+
973
+ data = self.data[index].copy()
974
+ missing_value = self._get_missing_value()
975
+ data = self._apply_missing_value(data, missing_value)
976
+ scale_factor = self._attributes.get('scale_factor')
977
+ add_offset = self._attributes.get('add_offset')
978
+ if add_offset is not None or scale_factor is not None:
979
+ data = data.astype(np.float64)
980
+ if scale_factor is not None:
981
+ data = data * scale_factor
982
+ if add_offset is not None:
983
+ data += add_offset
984
+
985
+ return data
986
+
987
+ def __setitem__(self, index, data):
988
+ if self.maskandscale:
989
+ missing_value = (
990
+ self._get_missing_value() or
991
+ getattr(data, 'fill_value', 999999))
992
+ self._attributes.setdefault('missing_value', missing_value)
993
+ self._attributes.setdefault('_FillValue', missing_value)
994
+ data = ((data - self._attributes.get('add_offset', 0.0)) /
995
+ self._attributes.get('scale_factor', 1.0))
996
+ data = np.ma.asarray(data).filled(missing_value)
997
+ if self._typecode not in 'fd' and data.dtype.kind == 'f':
998
+ data = np.round(data)
999
+
1000
+ # Expand data for record vars?
1001
+ if self.isrec:
1002
+ if isinstance(index, tuple):
1003
+ rec_index = index[0]
1004
+ else:
1005
+ rec_index = index
1006
+ if isinstance(rec_index, slice):
1007
+ recs = (rec_index.start or 0) + len(data)
1008
+ else:
1009
+ recs = rec_index + 1
1010
+ if recs > len(self.data):
1011
+ shape = (recs,) + self._shape[1:]
1012
+ # Resize in-place does not always work since
1013
+ # the array might not be single-segment
1014
+ try:
1015
+ self.data.resize(shape)
1016
+ except ValueError:
1017
+ dtype = self.data.dtype
1018
+ self.__dict__['data'] = np.resize(self.data, shape).astype(dtype)
1019
+ self.data[index] = data
1020
+
1021
+ def _default_encoded_fill_value(self):
1022
+ """
1023
+ The default encoded fill-value for this Variable's data type.
1024
+ """
1025
+ nc_type = REVERSE[self.typecode(), self.itemsize()]
1026
+ return FILLMAP[nc_type]
1027
+
1028
+ def _get_encoded_fill_value(self):
1029
+ """
1030
+ Returns the encoded fill value for this variable as bytes.
1031
+
1032
+ This is taken from either the _FillValue attribute, or the default fill
1033
+ value for this variable's data type.
1034
+ """
1035
+ if '_FillValue' in self._attributes:
1036
+ fill_value = np.array(self._attributes['_FillValue'],
1037
+ dtype=self.data.dtype).tobytes()
1038
+ if len(fill_value) == self.itemsize():
1039
+ return fill_value
1040
+ else:
1041
+ return self._default_encoded_fill_value()
1042
+ else:
1043
+ return self._default_encoded_fill_value()
1044
+
1045
+ def _get_missing_value(self):
1046
+ """
1047
+ Returns the value denoting "no data" for this variable.
1048
+
1049
+ If this variable does not have a missing/fill value, returns None.
1050
+
1051
+ If both _FillValue and missing_value are given, give precedence to
1052
+ _FillValue. The netCDF standard gives special meaning to _FillValue;
1053
+ missing_value is just used for compatibility with old datasets.
1054
+ """
1055
+
1056
+ if '_FillValue' in self._attributes:
1057
+ missing_value = self._attributes['_FillValue']
1058
+ elif 'missing_value' in self._attributes:
1059
+ missing_value = self._attributes['missing_value']
1060
+ else:
1061
+ missing_value = None
1062
+
1063
+ return missing_value
1064
+
1065
+ @staticmethod
1066
+ def _apply_missing_value(data, missing_value):
1067
+ """
1068
+ Applies the given missing value to the data array.
1069
+
1070
+ Returns a numpy.ma array, with any value equal to missing_value masked
1071
+ out (unless missing_value is None, in which case the original array is
1072
+ returned).
1073
+ """
1074
+
1075
+ if missing_value is None:
1076
+ newdata = data
1077
+ else:
1078
+ try:
1079
+ missing_value_isnan = np.isnan(missing_value)
1080
+ except (TypeError, NotImplementedError):
1081
+ # some data types (e.g., characters) cannot be tested for NaN
1082
+ missing_value_isnan = False
1083
+
1084
+ if missing_value_isnan:
1085
+ mymask = np.isnan(data)
1086
+ else:
1087
+ mymask = (data == missing_value)
1088
+
1089
+ newdata = np.ma.masked_where(mymask, data)
1090
+
1091
+ return newdata
1092
+
1093
+
1094
+ NetCDFFile = netcdf_file
1095
+ NetCDFVariable = netcdf_variable
env-llmeval/lib/python3.10/site-packages/scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (63.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/harwell_boeing.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.io` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'MalformedHeader', 'hb_read', 'hb_write', 'HBInfo',
9
+ 'HBFile', 'HBMatrixType', 'FortranFormatParser', 'IntFormat',
10
+ 'ExpFormat', 'BadFortranFormat', 'hb'
11
+ ]
12
+
13
+
14
+ def __dir__():
15
+ return __all__
16
+
17
+
18
+ def __getattr__(name):
19
+ return _sub_module_deprecation(sub_package="io", module="harwell_boeing",
20
+ private_modules=["_harwell_boeing"], all=__all__,
21
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/io/idl.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.io` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'readsav', 'DTYPE_DICT', 'RECTYPE_DICT', 'STRUCT_DICT',
9
+ 'Pointer', 'ObjectPointer', 'AttrDict'
10
+ ]
11
+
12
+
13
+ def __dir__():
14
+ return __all__
15
+
16
+
17
+ def __getattr__(name):
18
+ return _sub_module_deprecation(sub_package="io", module="idl",
19
+ private_modules=["_idl"], all=__all__,
20
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/byteordercodes.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.io.matlab` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'aliases', 'native_code', 'swapped_code',
9
+ 'sys_is_le', 'to_numpy_code'
10
+ ]
11
+
12
+
13
+ def __dir__():
14
+ return __all__
15
+
16
+
17
+ def __getattr__(name):
18
+ return _sub_module_deprecation(sub_package="io.matlab", module="byteordercodes",
19
+ private_modules=["_byteordercodes"], all=__all__,
20
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/mio5_params.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.io.matlab` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'MDTYPES', 'MatlabFunction', 'MatlabObject', 'MatlabOpaque',
9
+ 'NP_TO_MTYPES', 'NP_TO_MXTYPES', 'OPAQUE_DTYPE', 'codecs_template',
10
+ 'mat_struct', 'mclass_dtypes_template', 'mclass_info', 'mdtypes_template',
11
+ 'miCOMPRESSED', 'miDOUBLE', 'miINT16', 'miINT32', 'miINT64', 'miINT8',
12
+ 'miMATRIX', 'miSINGLE', 'miUINT16', 'miUINT32', 'miUINT64', 'miUINT8',
13
+ 'miUTF16', 'miUTF32', 'miUTF8', 'mxCELL_CLASS', 'mxCHAR_CLASS',
14
+ 'mxDOUBLE_CLASS', 'mxFUNCTION_CLASS', 'mxINT16_CLASS', 'mxINT32_CLASS',
15
+ 'mxINT64_CLASS', 'mxINT8_CLASS', 'mxOBJECT_CLASS',
16
+ 'mxOBJECT_CLASS_FROM_MATRIX_H', 'mxOPAQUE_CLASS', 'mxSINGLE_CLASS',
17
+ 'mxSPARSE_CLASS', 'mxSTRUCT_CLASS', 'mxUINT16_CLASS', 'mxUINT32_CLASS',
18
+ 'mxUINT64_CLASS', 'mxUINT8_CLASS', 'convert_dtypes'
19
+ ]
20
+
21
+ def __dir__():
22
+ return __all__
23
+
24
+
25
+ def __getattr__(name):
26
+ return _sub_module_deprecation(sub_package="io.matlab", module="mio5_params",
27
+ private_modules=["_mio5_params"], all=__all__,
28
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/io/matlab/streams.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.io.matlab` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'BLOCK_SIZE', 'GenericStream', 'ZlibInputStream', 'make_stream'
9
+ ]
10
+
11
+ def __dir__():
12
+ return __all__
13
+
14
+
15
+ def __getattr__(name):
16
+ return _sub_module_deprecation(sub_package="io.matlab", module="streams",
17
+ private_modules=["_streams"], all=__all__,
18
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/io/netcdf.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.io` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'netcdf_file', 'netcdf_variable',
9
+ 'array', 'LITTLE_ENDIAN', 'IS_PYPY', 'ABSENT', 'ZERO',
10
+ 'NC_BYTE', 'NC_CHAR', 'NC_SHORT', 'NC_INT', 'NC_FLOAT',
11
+ 'NC_DOUBLE', 'NC_DIMENSION', 'NC_VARIABLE', 'NC_ATTRIBUTE',
12
+ 'FILL_BYTE', 'FILL_CHAR', 'FILL_SHORT', 'FILL_INT', 'FILL_FLOAT',
13
+ 'FILL_DOUBLE', 'TYPEMAP', 'FILLMAP', 'REVERSE', 'NetCDFFile',
14
+ 'NetCDFVariable'
15
+ ]
16
+
17
+
18
+ def __dir__():
19
+ return __all__
20
+
21
+
22
+ def __getattr__(name):
23
+ return _sub_module_deprecation(sub_package="io", module="netcdf",
24
+ private_modules=["_netcdf"], all=__all__,
25
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_fortran.cpython-310.pyc ADDED
Binary file (7.65 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_idl.cpython-310.pyc ADDED
Binary file (19.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_mmio.cpython-310.pyc ADDED
Binary file (28.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_netcdf.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_paths.cpython-310.pyc ADDED
Binary file (3.79 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__pycache__/test_wavfile.cpython-310.pyc ADDED
Binary file (9.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/Transparent Busy.ani ADDED
Binary file (4.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_1d.sav ADDED
Binary file (2.63 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_3d.sav ADDED
Binary file (13.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_5d.sav ADDED
Binary file (7.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_7d.sav ADDED
Binary file (3.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_8d.sav ADDED
Binary file (13.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav ADDED
Binary file (2.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav ADDED
Binary file (3.26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav ADDED
Binary file (13.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav ADDED
Binary file (6.68 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav ADDED
Binary file (7.96 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav ADDED
Binary file (19.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav ADDED
Binary file (13.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_2.nc ADDED
Binary file (272 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_3_maskedvals.nc ADDED
Binary file (1.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat ADDED
Binary file (451 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-mixed.dat ADDED
Binary file (40 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat ADDED
Binary file (888 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat ADDED
Binary file (26.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat ADDED
Binary file (16 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat ADDED
Binary file (48 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat ADDED
Binary file (128 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat ADDED
Binary file (448 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat ADDED
Binary file (13.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat ADDED
Binary file (12 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat ADDED
Binary file (28 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat ADDED
Binary file (36 Bytes). View file