applied-ai-018 commited on
Commit
5e03ef2
·
verified ·
1 Parent(s): 5ea1893

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/scipy/datasets/__init__.py +90 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/datasets/_download_all.py +57 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/datasets/_fetchers.py +220 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/datasets/_registry.py +26 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/datasets/_utils.py +81 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py +0 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py +123 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/__init__.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/_fortran.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/_idl.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/_mmio.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/_netcdf.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/harwell_boeing.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/idl.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/netcdf.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/wavfile.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/__pycache__/__init__.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/__pycache__/_fortran_format_parser.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/__pycache__/hb.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/__init__.py +28 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/__pycache__/__init__.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/__pycache__/_arffread.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/__pycache__/arffread.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/_arffread.py +907 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/arffread.py +28 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/__init__.py +0 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/__pycache__/test_arffread.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/iris.arff +225 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/missing.arff +8 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff +11 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal.arff +13 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal_spaces.arff +13 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test1.arff +10 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test11.arff +11 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff +15 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test3.arff +6 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test4.arff +11 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test5.arff +26 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test6.arff +12 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test7.arff +15 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test8.arff +12 -0
env-llmeval/lib/python3.10/site-packages/scipy/datasets/__init__.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ================================
3
+ Datasets (:mod:`scipy.datasets`)
4
+ ================================
5
+
6
+ .. currentmodule:: scipy.datasets
7
+
8
+ Dataset Methods
9
+ ===============
10
+
11
+ .. autosummary::
12
+ :toctree: generated/
13
+
14
+ ascent
15
+ face
16
+ electrocardiogram
17
+
18
+ Utility Methods
19
+ ===============
20
+
21
+ .. autosummary::
22
+ :toctree: generated/
23
+
24
+ download_all -- Download all the dataset files to specified path.
25
+ clear_cache -- Clear cached dataset directory.
26
+
27
+
28
+ Usage of Datasets
29
+ =================
30
+
31
+ SciPy dataset methods can be simply called as follows: ``'<dataset-name>()'``
32
+ This downloads the dataset files over the network once, and saves the cache,
33
+ before returning a `numpy.ndarray` object representing the dataset.
34
+
35
+ Note that the return data structure and data type might be different for
36
+ different dataset methods. For a more detailed example on usage, please look
37
+ into the particular dataset method documentation above.
38
+
39
+
40
+ How dataset retrieval and storage works
41
+ =======================================
42
+
43
+ SciPy dataset files are stored within individual github repositories under the
44
+ SciPy GitHub organization, following a naming convention as
45
+ ``'dataset-<name>'``, for example `scipy.datasets.face` files live at
46
+ https://github.com/scipy/dataset-face. The `scipy.datasets` submodule utilizes
47
+ and depends on `Pooch <https://www.fatiando.org/pooch/latest/>`_, a Python
48
+ package built to simplify fetching data files. Pooch uses these repos to
49
+ retrieve the respective dataset files when calling the dataset function.
50
+
51
+ A registry of all the datasets, essentially a mapping of filenames with their
52
+ SHA256 hash and repo urls are maintained, which Pooch uses to handle and verify
53
+ the downloads on function call. After downloading the dataset once, the files
54
+ are saved in the system cache directory under ``'scipy-data'``.
55
+
56
+ Dataset cache locations may vary on different platforms.
57
+
58
+ For macOS::
59
+
60
+ '~/Library/Caches/scipy-data'
61
+
62
+ For Linux and other Unix-like platforms::
63
+
64
+ '~/.cache/scipy-data' # or the value of the XDG_CACHE_HOME env var, if defined
65
+
66
+ For Windows::
67
+
68
+ 'C:\\Users\\<user>\\AppData\\Local\\<AppAuthor>\\scipy-data\\Cache'
69
+
70
+
71
+ In environments with constrained network connectivity for various security
72
+ reasons or on systems without continuous internet connections, one may manually
73
+ load the cache of the datasets by placing the contents of the dataset repo in
74
+ the above mentioned cache directory to avoid fetching dataset errors without
75
+ the internet connectivity.
76
+
77
+ """
78
+
79
+
80
+ from ._fetchers import face, ascent, electrocardiogram
81
+ from ._download_all import download_all
82
+ from ._utils import clear_cache
83
+
84
+ __all__ = ['ascent', 'electrocardiogram', 'face',
85
+ 'download_all', 'clear_cache']
86
+
87
+
88
+ from scipy._lib._testutils import PytestTester
89
+ test = PytestTester(__name__)
90
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc ADDED
Binary file (6.31 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc ADDED
Binary file (771 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/datasets/_download_all.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Platform independent script to download all the
3
+ `scipy.datasets` module data files.
4
+ This doesn't require a full scipy build.
5
+
6
+ Run: python _download_all.py <download_dir>
7
+ """
8
+
9
+ import argparse
10
+ try:
11
+ import pooch
12
+ except ImportError:
13
+ pooch = None
14
+
15
+
16
+ if __package__ is None or __package__ == '':
17
+ # Running as python script, use absolute import
18
+ import _registry # type: ignore
19
+ else:
20
+ # Running as python module, use relative import
21
+ from . import _registry
22
+
23
+
24
+ def download_all(path=None):
25
+ """
26
+ Utility method to download all the dataset files
27
+ for `scipy.datasets` module.
28
+
29
+ Parameters
30
+ ----------
31
+ path : str, optional
32
+ Directory path to download all the dataset files.
33
+ If None, default to the system cache_dir detected by pooch.
34
+ """
35
+ if pooch is None:
36
+ raise ImportError("Missing optional dependency 'pooch' required "
37
+ "for scipy.datasets module. Please use pip or "
38
+ "conda to install 'pooch'.")
39
+ if path is None:
40
+ path = pooch.os_cache('scipy-data')
41
+ for dataset_name, dataset_hash in _registry.registry.items():
42
+ pooch.retrieve(url=_registry.registry_urls[dataset_name],
43
+ known_hash=dataset_hash,
44
+ fname=dataset_name, path=path)
45
+
46
+
47
+ def main():
48
+ parser = argparse.ArgumentParser(description='Download SciPy data files.')
49
+ parser.add_argument("path", nargs='?', type=str,
50
+ default=pooch.os_cache('scipy-data'),
51
+ help="Directory path to download all the data files.")
52
+ args = parser.parse_args()
53
+ download_all(args.path)
54
+
55
+
56
+ if __name__ == "__main__":
57
+ main()
env-llmeval/lib/python3.10/site-packages/scipy/datasets/_fetchers.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import array, frombuffer, load
2
+ from ._registry import registry, registry_urls
3
+
4
+ try:
5
+ import pooch
6
+ except ImportError:
7
+ pooch = None
8
+ data_fetcher = None
9
+ else:
10
+ data_fetcher = pooch.create(
11
+ # Use the default cache folder for the operating system
12
+ # Pooch uses appdirs (https://github.com/ActiveState/appdirs) to
13
+ # select an appropriate directory for the cache on each platform.
14
+ path=pooch.os_cache("scipy-data"),
15
+
16
+ # The remote data is on Github
17
+ # base_url is a required param, even though we override this
18
+ # using individual urls in the registry.
19
+ base_url="https://github.com/scipy/",
20
+ registry=registry,
21
+ urls=registry_urls
22
+ )
23
+
24
+
25
+ def fetch_data(dataset_name, data_fetcher=data_fetcher):
26
+ if data_fetcher is None:
27
+ raise ImportError("Missing optional dependency 'pooch' required "
28
+ "for scipy.datasets module. Please use pip or "
29
+ "conda to install 'pooch'.")
30
+ # The "fetch" method returns the full path to the downloaded data file.
31
+ return data_fetcher.fetch(dataset_name)
32
+
33
+
34
+ def ascent():
35
+ """
36
+ Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy
37
+ use in demos.
38
+
39
+ The image is derived from accent-to-the-top.jpg at
40
+ http://www.public-domain-image.com/people-public-domain-images-pictures/
41
+
42
+ Parameters
43
+ ----------
44
+ None
45
+
46
+ Returns
47
+ -------
48
+ ascent : ndarray
49
+ convenient image to use for testing and demonstration
50
+
51
+ Examples
52
+ --------
53
+ >>> import scipy.datasets
54
+ >>> ascent = scipy.datasets.ascent()
55
+ >>> ascent.shape
56
+ (512, 512)
57
+ >>> ascent.max()
58
+ 255
59
+
60
+ >>> import matplotlib.pyplot as plt
61
+ >>> plt.gray()
62
+ >>> plt.imshow(ascent)
63
+ >>> plt.show()
64
+
65
+ """
66
+ import pickle
67
+
68
+ # The file will be downloaded automatically the first time this is run,
69
+ # returning the path to the downloaded file. Afterwards, Pooch finds
70
+ # it in the local cache and doesn't repeat the download.
71
+ fname = fetch_data("ascent.dat")
72
+ # Now we just need to load it with our standard Python tools.
73
+ with open(fname, 'rb') as f:
74
+ ascent = array(pickle.load(f))
75
+ return ascent
76
+
77
+
78
+ def electrocardiogram():
79
+ """
80
+ Load an electrocardiogram as an example for a 1-D signal.
81
+
82
+ The returned signal is a 5 minute long electrocardiogram (ECG), a medical
83
+ recording of the heart's electrical activity, sampled at 360 Hz.
84
+
85
+ Returns
86
+ -------
87
+ ecg : ndarray
88
+ The electrocardiogram in millivolt (mV) sampled at 360 Hz.
89
+
90
+ Notes
91
+ -----
92
+ The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
93
+ (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
94
+ PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
95
+ heartbeats as well as pathological changes.
96
+
97
+ .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
98
+
99
+ .. versionadded:: 1.1.0
100
+
101
+ References
102
+ ----------
103
+ .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
104
+ IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
105
+ (PMID: 11446209); :doi:`10.13026/C2F305`
106
+ .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
107
+ Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
108
+ PhysioToolkit, and PhysioNet: Components of a New Research Resource
109
+ for Complex Physiologic Signals. Circulation 101(23):e215-e220;
110
+ :doi:`10.1161/01.CIR.101.23.e215`
111
+
112
+ Examples
113
+ --------
114
+ >>> from scipy.datasets import electrocardiogram
115
+ >>> ecg = electrocardiogram()
116
+ >>> ecg
117
+ array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
118
+ >>> ecg.shape, ecg.mean(), ecg.std()
119
+ ((108000,), -0.16510875, 0.5992473991177294)
120
+
121
+ As stated the signal features several areas with a different morphology.
122
+ E.g., the first few seconds show the electrical activity of a heart in
123
+ normal sinus rhythm as seen below.
124
+
125
+ >>> import numpy as np
126
+ >>> import matplotlib.pyplot as plt
127
+ >>> fs = 360
128
+ >>> time = np.arange(ecg.size) / fs
129
+ >>> plt.plot(time, ecg)
130
+ >>> plt.xlabel("time in s")
131
+ >>> plt.ylabel("ECG in mV")
132
+ >>> plt.xlim(9, 10.2)
133
+ >>> plt.ylim(-1, 1.5)
134
+ >>> plt.show()
135
+
136
+ After second 16, however, the first premature ventricular contractions,
137
+ also called extrasystoles, appear. These have a different morphology
138
+ compared to typical heartbeats. The difference can easily be observed
139
+ in the following plot.
140
+
141
+ >>> plt.plot(time, ecg)
142
+ >>> plt.xlabel("time in s")
143
+ >>> plt.ylabel("ECG in mV")
144
+ >>> plt.xlim(46.5, 50)
145
+ >>> plt.ylim(-2, 1.5)
146
+ >>> plt.show()
147
+
148
+ At several points large artifacts disturb the recording, e.g.:
149
+
150
+ >>> plt.plot(time, ecg)
151
+ >>> plt.xlabel("time in s")
152
+ >>> plt.ylabel("ECG in mV")
153
+ >>> plt.xlim(207, 215)
154
+ >>> plt.ylim(-2, 3.5)
155
+ >>> plt.show()
156
+
157
+ Finally, examining the power spectrum reveals that most of the biosignal is
158
+ made up of lower frequencies. At 60 Hz the noise induced by the mains
159
+ electricity can be clearly observed.
160
+
161
+ >>> from scipy.signal import welch
162
+ >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
163
+ >>> plt.semilogy(f, Pxx)
164
+ >>> plt.xlabel("Frequency in Hz")
165
+ >>> plt.ylabel("Power spectrum of the ECG in mV**2")
166
+ >>> plt.xlim(f[[0, -1]])
167
+ >>> plt.show()
168
+ """
169
+ fname = fetch_data("ecg.dat")
170
+ with load(fname) as file:
171
+ ecg = file["ecg"].astype(int) # np.uint16 -> int
172
+ # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
173
+ ecg = (ecg - 1024) / 200.0
174
+ return ecg
175
+
176
+
177
+ def face(gray=False):
178
+ """
179
+ Get a 1024 x 768, color image of a raccoon face.
180
+
181
+ raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
182
+
183
+ Parameters
184
+ ----------
185
+ gray : bool, optional
186
+ If True return 8-bit grey-scale image, otherwise return a color image
187
+
188
+ Returns
189
+ -------
190
+ face : ndarray
191
+ image of a raccoon face
192
+
193
+ Examples
194
+ --------
195
+ >>> import scipy.datasets
196
+ >>> face = scipy.datasets.face()
197
+ >>> face.shape
198
+ (768, 1024, 3)
199
+ >>> face.max()
200
+ 255
201
+ >>> face.dtype
202
+ dtype('uint8')
203
+
204
+ >>> import matplotlib.pyplot as plt
205
+ >>> plt.gray()
206
+ >>> plt.imshow(face)
207
+ >>> plt.show()
208
+
209
+ """
210
+ import bz2
211
+ fname = fetch_data("face.dat")
212
+ with open(fname, 'rb') as f:
213
+ rawdata = f.read()
214
+ face_data = bz2.decompress(rawdata)
215
+ face = frombuffer(face_data, dtype='uint8')
216
+ face.shape = (768, 1024, 3)
217
+ if gray is True:
218
+ face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] +
219
+ 0.07 * face[:, :, 2]).astype('uint8')
220
+ return face
env-llmeval/lib/python3.10/site-packages/scipy/datasets/_registry.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ##########################################################################
2
+ # This file serves as the dataset registry for SciPy Datasets SubModule.
3
+ ##########################################################################
4
+
5
+
6
+ # To generate the SHA256 hash, use the command
7
+ # openssl sha256 <filename>
8
+ registry = {
9
+ "ascent.dat": "03ce124c1afc880f87b55f6b061110e2e1e939679184f5614e38dacc6c1957e2",
10
+ "ecg.dat": "f20ad3365fb9b7f845d0e5c48b6fe67081377ee466c3a220b7f69f35c8958baf",
11
+ "face.dat": "9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886"
12
+ }
13
+
14
+ registry_urls = {
15
+ "ascent.dat": "https://raw.githubusercontent.com/scipy/dataset-ascent/main/ascent.dat",
16
+ "ecg.dat": "https://raw.githubusercontent.com/scipy/dataset-ecg/main/ecg.dat",
17
+ "face.dat": "https://raw.githubusercontent.com/scipy/dataset-face/main/face.dat"
18
+ }
19
+
20
+ # dataset method mapping with their associated filenames
21
+ # <method_name> : ["filename1", "filename2", ...]
22
+ method_files_map = {
23
+ "ascent": ["ascent.dat"],
24
+ "electrocardiogram": ["ecg.dat"],
25
+ "face": ["face.dat"]
26
+ }
env-llmeval/lib/python3.10/site-packages/scipy/datasets/_utils.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from ._registry import method_files_map
4
+
5
+ try:
6
+ import platformdirs
7
+ except ImportError:
8
+ platformdirs = None # type: ignore[assignment]
9
+
10
+
11
+ def _clear_cache(datasets, cache_dir=None, method_map=None):
12
+ if method_map is None:
13
+ # Use SciPy Datasets method map
14
+ method_map = method_files_map
15
+ if cache_dir is None:
16
+ # Use default cache_dir path
17
+ if platformdirs is None:
18
+ # platformdirs is pooch dependency
19
+ raise ImportError("Missing optional dependency 'pooch' required "
20
+ "for scipy.datasets module. Please use pip or "
21
+ "conda to install 'pooch'.")
22
+ cache_dir = platformdirs.user_cache_dir("scipy-data")
23
+
24
+ if not os.path.exists(cache_dir):
25
+ print(f"Cache Directory {cache_dir} doesn't exist. Nothing to clear.")
26
+ return
27
+
28
+ if datasets is None:
29
+ print(f"Cleaning the cache directory {cache_dir}!")
30
+ shutil.rmtree(cache_dir)
31
+ else:
32
+ if not isinstance(datasets, (list, tuple)):
33
+ # single dataset method passed should be converted to list
34
+ datasets = [datasets, ]
35
+ for dataset in datasets:
36
+ assert callable(dataset)
37
+ dataset_name = dataset.__name__ # Name of the dataset method
38
+ if dataset_name not in method_map:
39
+ raise ValueError(f"Dataset method {dataset_name} doesn't "
40
+ "exist. Please check if the passed dataset "
41
+ "is a subset of the following dataset "
42
+ f"methods: {list(method_map.keys())}")
43
+
44
+ data_files = method_map[dataset_name]
45
+ data_filepaths = [os.path.join(cache_dir, file)
46
+ for file in data_files]
47
+ for data_filepath in data_filepaths:
48
+ if os.path.exists(data_filepath):
49
+ print("Cleaning the file "
50
+ f"{os.path.split(data_filepath)[1]} "
51
+ f"for dataset {dataset_name}")
52
+ os.remove(data_filepath)
53
+ else:
54
+ print(f"Path {data_filepath} doesn't exist. "
55
+ "Nothing to clear.")
56
+
57
+
58
+ def clear_cache(datasets=None):
59
+ """
60
+ Cleans the scipy datasets cache directory.
61
+
62
+ If a scipy.datasets method or a list/tuple of the same is
63
+ provided, then clear_cache removes all the data files
64
+ associated to the passed dataset method callable(s).
65
+
66
+ By default, it removes all the cached data files.
67
+
68
+ Parameters
69
+ ----------
70
+ datasets : callable or list/tuple of callable or None
71
+
72
+ Examples
73
+ --------
74
+ >>> from scipy import datasets
75
+ >>> ascent_array = datasets.ascent()
76
+ >>> ascent_array.shape
77
+ (512, 512)
78
+ >>> datasets.clear_cache([datasets.ascent])
79
+ Cleaning the file ascent.dat for dataset ascent
80
+ """
81
+ _clear_cache(datasets)
env-llmeval/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy.datasets._registry import registry
2
+ from scipy.datasets._fetchers import data_fetcher
3
+ from scipy.datasets._utils import _clear_cache
4
+ from scipy.datasets import ascent, face, electrocardiogram, download_all
5
+ from numpy.testing import assert_equal, assert_almost_equal
6
+ import os
7
+ import pytest
8
+
9
+ try:
10
+ import pooch
11
+ except ImportError:
12
+ raise ImportError("Missing optional dependency 'pooch' required "
13
+ "for scipy.datasets module. Please use pip or "
14
+ "conda to install 'pooch'.")
15
+
16
+
17
+ data_dir = data_fetcher.path # type: ignore
18
+
19
+
20
+ def _has_hash(path, expected_hash):
21
+ """Check if the provided path has the expected hash."""
22
+ if not os.path.exists(path):
23
+ return False
24
+ return pooch.file_hash(path) == expected_hash
25
+
26
+
27
+ class TestDatasets:
28
+
29
+ @pytest.fixture(scope='module', autouse=True)
30
+ def test_download_all(self):
31
+ # This fixture requires INTERNET CONNECTION
32
+
33
+ # test_setup phase
34
+ download_all()
35
+
36
+ yield
37
+
38
+ def test_existence_all(self):
39
+ assert len(os.listdir(data_dir)) >= len(registry)
40
+
41
+ def test_ascent(self):
42
+ assert_equal(ascent().shape, (512, 512))
43
+
44
+ # hash check
45
+ assert _has_hash(os.path.join(data_dir, "ascent.dat"),
46
+ registry["ascent.dat"])
47
+
48
+ def test_face(self):
49
+ assert_equal(face().shape, (768, 1024, 3))
50
+
51
+ # hash check
52
+ assert _has_hash(os.path.join(data_dir, "face.dat"),
53
+ registry["face.dat"])
54
+
55
+ def test_electrocardiogram(self):
56
+ # Test shape, dtype and stats of signal
57
+ ecg = electrocardiogram()
58
+ assert_equal(ecg.dtype, float)
59
+ assert_equal(ecg.shape, (108000,))
60
+ assert_almost_equal(ecg.mean(), -0.16510875)
61
+ assert_almost_equal(ecg.std(), 0.5992473991177294)
62
+
63
+ # hash check
64
+ assert _has_hash(os.path.join(data_dir, "ecg.dat"),
65
+ registry["ecg.dat"])
66
+
67
+
68
+ def test_clear_cache(tmp_path):
69
+ # Note: `tmp_path` is a pytest fixture, it handles cleanup
70
+ dummy_basepath = tmp_path / "dummy_cache_dir"
71
+ dummy_basepath.mkdir()
72
+
73
+ # Create three dummy dataset files for dummy dataset methods
74
+ dummy_method_map = {}
75
+ for i in range(4):
76
+ dummy_method_map[f"data{i}"] = [f"data{i}.dat"]
77
+ data_filepath = dummy_basepath / f"data{i}.dat"
78
+ data_filepath.write_text("")
79
+
80
+ # clear files associated to single dataset method data0
81
+ # also test callable argument instead of list of callables
82
+ def data0():
83
+ pass
84
+ _clear_cache(datasets=data0, cache_dir=dummy_basepath,
85
+ method_map=dummy_method_map)
86
+ assert not os.path.exists(dummy_basepath/"data0.dat")
87
+
88
+ # clear files associated to multiple dataset methods "data3" and "data4"
89
+ def data1():
90
+ pass
91
+
92
+ def data2():
93
+ pass
94
+ _clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath,
95
+ method_map=dummy_method_map)
96
+ assert not os.path.exists(dummy_basepath/"data1.dat")
97
+ assert not os.path.exists(dummy_basepath/"data2.dat")
98
+
99
+ # clear multiple dataset files "data3_0.dat" and "data3_1.dat"
100
+ # associated with dataset method "data3"
101
+ def data4():
102
+ pass
103
+ # create files
104
+ (dummy_basepath / "data4_0.dat").write_text("")
105
+ (dummy_basepath / "data4_1.dat").write_text("")
106
+
107
+ dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"]
108
+ _clear_cache(datasets=[data4], cache_dir=dummy_basepath,
109
+ method_map=dummy_method_map)
110
+ assert not os.path.exists(dummy_basepath/"data4_0.dat")
111
+ assert not os.path.exists(dummy_basepath/"data4_1.dat")
112
+
113
+ # wrong dataset method should raise ValueError since it
114
+ # doesn't exist in the dummy_method_map
115
+ def data5():
116
+ pass
117
+ with pytest.raises(ValueError):
118
+ _clear_cache(datasets=[data5], cache_dir=dummy_basepath,
119
+ method_map=dummy_method_map)
120
+
121
+ # remove all dataset cache
122
+ _clear_cache(datasets=None, cache_dir=dummy_basepath)
123
+ assert not os.path.exists(dummy_basepath)
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.09 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/_fortran.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/_idl.cpython-310.pyc ADDED
Binary file (19.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/_mmio.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/_netcdf.cpython-310.pyc ADDED
Binary file (30.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/harwell_boeing.cpython-310.pyc ADDED
Binary file (736 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/idl.cpython-310.pyc ADDED
Binary file (654 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/netcdf.cpython-310.pyc ADDED
Binary file (878 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/__pycache__/wavfile.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (650 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/__pycache__/_fortran_format_parser.cpython-310.pyc ADDED
Binary file (8.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/_harwell_boeing/__pycache__/hb.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module to read ARFF files
3
+ =========================
4
+ ARFF is the standard data format for WEKA.
5
+ It is a text file format which support numerical, string and data values.
6
+ The format can also represent missing data and sparse data.
7
+
8
+ Notes
9
+ -----
10
+ The ARFF support in ``scipy.io`` provides file reading functionality only.
11
+ For more extensive ARFF functionality, see `liac-arff
12
+ <https://github.com/renatopp/liac-arff>`_.
13
+
14
+ See the `WEKA website <http://weka.wikispaces.com/ARFF>`_
15
+ for more details about the ARFF format and available datasets.
16
+
17
+ """
18
+ from ._arffread import *
19
+ from . import _arffread
20
+
21
+ # Deprecated namespaces, to be removed in v2.0.0
22
+ from .import arffread
23
+
24
+ __all__ = _arffread.__all__ + ['arffread']
25
+
26
+ from scipy._lib._testutils import PytestTester
27
+ test = PytestTester(__name__)
28
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (937 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/__pycache__/_arffread.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/__pycache__/arffread.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/_arffread.py ADDED
@@ -0,0 +1,907 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Last Change: Mon Aug 20 08:00 PM 2007 J
2
+ import re
3
+ import datetime
4
+
5
+ import numpy as np
6
+
7
+ import csv
8
+ import ctypes
9
+
10
+ """A module to read arff files."""
11
+
12
+ __all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
13
+
14
+ # An Arff file is basically two parts:
15
+ # - header
16
+ # - data
17
+ #
18
+ # A header has each of its components starting by @META where META is one of
19
+ # the keyword (attribute of relation, for now).
20
+
21
+ # TODO:
22
+ # - both integer and reals are treated as numeric -> the integer info
23
+ # is lost!
24
+ # - Replace ValueError by ParseError or something
25
+
26
+ # We know can handle the following:
27
+ # - numeric and nominal attributes
28
+ # - missing values for numeric attributes
29
+
30
+ r_meta = re.compile(r'^\s*@')
31
+ # Match a comment
32
+ r_comment = re.compile(r'^%')
33
+ # Match an empty line
34
+ r_empty = re.compile(r'^\s+$')
35
+ # Match a header line, that is a line which starts by @ + a word
36
+ r_headerline = re.compile(r'^\s*@\S*')
37
+ r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
38
+ r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
39
+ r_attribute = re.compile(r'^\s*@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
40
+
41
+ r_nominal = re.compile(r'{(.+)}')
42
+ r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
43
+
44
+ # To get attributes name enclosed with ''
45
+ r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
46
+ # To get normal attributes
47
+ r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
48
+
49
+ # ------------------------
50
+ # Module defined exception
51
+ # ------------------------
52
+
53
+
54
+ class ArffError(OSError):
55
+ pass
56
+
57
+
58
+ class ParseArffError(ArffError):
59
+ pass
60
+
61
+
62
+ # ----------
63
+ # Attributes
64
+ # ----------
65
+ class Attribute:
66
+
67
+ type_name = None
68
+
69
+ def __init__(self, name):
70
+ self.name = name
71
+ self.range = None
72
+ self.dtype = np.object_
73
+
74
+ @classmethod
75
+ def parse_attribute(cls, name, attr_string):
76
+ """
77
+ Parse the attribute line if it knows how. Returns the parsed
78
+ attribute, or None.
79
+ """
80
+ return None
81
+
82
+ def parse_data(self, data_str):
83
+ """
84
+ Parse a value of this type.
85
+ """
86
+ return None
87
+
88
+ def __str__(self):
89
+ """
90
+ Parse a value of this type.
91
+ """
92
+ return self.name + ',' + self.type_name
93
+
94
+
95
+ class NominalAttribute(Attribute):
96
+
97
+ type_name = 'nominal'
98
+
99
+ def __init__(self, name, values):
100
+ super().__init__(name)
101
+ self.values = values
102
+ self.range = values
103
+ self.dtype = (np.bytes_, max(len(i) for i in values))
104
+
105
+ @staticmethod
106
+ def _get_nom_val(atrv):
107
+ """Given a string containing a nominal type, returns a tuple of the
108
+ possible values.
109
+
110
+ A nominal type is defined as something framed between braces ({}).
111
+
112
+ Parameters
113
+ ----------
114
+ atrv : str
115
+ Nominal type definition
116
+
117
+ Returns
118
+ -------
119
+ poss_vals : tuple
120
+ possible values
121
+
122
+ Examples
123
+ --------
124
+ >>> from scipy.io.arff._arffread import NominalAttribute
125
+ >>> NominalAttribute._get_nom_val("{floup, bouga, fl, ratata}")
126
+ ('floup', 'bouga', 'fl', 'ratata')
127
+ """
128
+ m = r_nominal.match(atrv)
129
+ if m:
130
+ attrs, _ = split_data_line(m.group(1))
131
+ return tuple(attrs)
132
+ else:
133
+ raise ValueError("This does not look like a nominal string")
134
+
135
+ @classmethod
136
+ def parse_attribute(cls, name, attr_string):
137
+ """
138
+ Parse the attribute line if it knows how. Returns the parsed
139
+ attribute, or None.
140
+
141
+ For nominal attributes, the attribute string would be like '{<attr_1>,
142
+ <attr2>, <attr_3>}'.
143
+ """
144
+ if attr_string[0] == '{':
145
+ values = cls._get_nom_val(attr_string)
146
+ return cls(name, values)
147
+ else:
148
+ return None
149
+
150
+ def parse_data(self, data_str):
151
+ """
152
+ Parse a value of this type.
153
+ """
154
+ if data_str in self.values:
155
+ return data_str
156
+ elif data_str == '?':
157
+ return data_str
158
+ else:
159
+ raise ValueError(f"{str(data_str)} value not in {str(self.values)}")
160
+
161
+ def __str__(self):
162
+ msg = self.name + ",{"
163
+ for i in range(len(self.values)-1):
164
+ msg += self.values[i] + ","
165
+ msg += self.values[-1]
166
+ msg += "}"
167
+ return msg
168
+
169
+
170
+ class NumericAttribute(Attribute):
171
+
172
+ def __init__(self, name):
173
+ super().__init__(name)
174
+ self.type_name = 'numeric'
175
+ self.dtype = np.float64
176
+
177
+ @classmethod
178
+ def parse_attribute(cls, name, attr_string):
179
+ """
180
+ Parse the attribute line if it knows how. Returns the parsed
181
+ attribute, or None.
182
+
183
+ For numeric attributes, the attribute string would be like
184
+ 'numeric' or 'int' or 'real'.
185
+ """
186
+
187
+ attr_string = attr_string.lower().strip()
188
+
189
+ if (attr_string[:len('numeric')] == 'numeric' or
190
+ attr_string[:len('int')] == 'int' or
191
+ attr_string[:len('real')] == 'real'):
192
+ return cls(name)
193
+ else:
194
+ return None
195
+
196
+ def parse_data(self, data_str):
197
+ """
198
+ Parse a value of this type.
199
+
200
+ Parameters
201
+ ----------
202
+ data_str : str
203
+ string to convert
204
+
205
+ Returns
206
+ -------
207
+ f : float
208
+ where float can be nan
209
+
210
+ Examples
211
+ --------
212
+ >>> from scipy.io.arff._arffread import NumericAttribute
213
+ >>> atr = NumericAttribute('atr')
214
+ >>> atr.parse_data('1')
215
+ 1.0
216
+ >>> atr.parse_data('1\\n')
217
+ 1.0
218
+ >>> atr.parse_data('?\\n')
219
+ nan
220
+ """
221
+ if '?' in data_str:
222
+ return np.nan
223
+ else:
224
+ return float(data_str)
225
+
226
+ def _basic_stats(self, data):
227
+ nbfac = data.size * 1. / (data.size - 1)
228
+ return (np.nanmin(data), np.nanmax(data),
229
+ np.mean(data), np.std(data) * nbfac)
230
+
231
+
232
+ class StringAttribute(Attribute):
233
+
234
+ def __init__(self, name):
235
+ super().__init__(name)
236
+ self.type_name = 'string'
237
+
238
+ @classmethod
239
+ def parse_attribute(cls, name, attr_string):
240
+ """
241
+ Parse the attribute line if it knows how. Returns the parsed
242
+ attribute, or None.
243
+
244
+ For string attributes, the attribute string would be like
245
+ 'string'.
246
+ """
247
+
248
+ attr_string = attr_string.lower().strip()
249
+
250
+ if attr_string[:len('string')] == 'string':
251
+ return cls(name)
252
+ else:
253
+ return None
254
+
255
+
256
+ class DateAttribute(Attribute):
257
+
258
+ def __init__(self, name, date_format, datetime_unit):
259
+ super().__init__(name)
260
+ self.date_format = date_format
261
+ self.datetime_unit = datetime_unit
262
+ self.type_name = 'date'
263
+ self.range = date_format
264
+ self.dtype = np.datetime64(0, self.datetime_unit)
265
+
266
+ @staticmethod
267
+ def _get_date_format(atrv):
268
+ m = r_date.match(atrv)
269
+ if m:
270
+ pattern = m.group(1).strip()
271
+ # convert time pattern from Java's SimpleDateFormat to C's format
272
+ datetime_unit = None
273
+ if "yyyy" in pattern:
274
+ pattern = pattern.replace("yyyy", "%Y")
275
+ datetime_unit = "Y"
276
+ elif "yy":
277
+ pattern = pattern.replace("yy", "%y")
278
+ datetime_unit = "Y"
279
+ if "MM" in pattern:
280
+ pattern = pattern.replace("MM", "%m")
281
+ datetime_unit = "M"
282
+ if "dd" in pattern:
283
+ pattern = pattern.replace("dd", "%d")
284
+ datetime_unit = "D"
285
+ if "HH" in pattern:
286
+ pattern = pattern.replace("HH", "%H")
287
+ datetime_unit = "h"
288
+ if "mm" in pattern:
289
+ pattern = pattern.replace("mm", "%M")
290
+ datetime_unit = "m"
291
+ if "ss" in pattern:
292
+ pattern = pattern.replace("ss", "%S")
293
+ datetime_unit = "s"
294
+ if "z" in pattern or "Z" in pattern:
295
+ raise ValueError("Date type attributes with time zone not "
296
+ "supported, yet")
297
+
298
+ if datetime_unit is None:
299
+ raise ValueError("Invalid or unsupported date format")
300
+
301
+ return pattern, datetime_unit
302
+ else:
303
+ raise ValueError("Invalid or no date format")
304
+
305
+ @classmethod
306
+ def parse_attribute(cls, name, attr_string):
307
+ """
308
+ Parse the attribute line if it knows how. Returns the parsed
309
+ attribute, or None.
310
+
311
+ For date attributes, the attribute string would be like
312
+ 'date <format>'.
313
+ """
314
+
315
+ attr_string_lower = attr_string.lower().strip()
316
+
317
+ if attr_string_lower[:len('date')] == 'date':
318
+ date_format, datetime_unit = cls._get_date_format(attr_string)
319
+ return cls(name, date_format, datetime_unit)
320
+ else:
321
+ return None
322
+
323
+ def parse_data(self, data_str):
324
+ """
325
+ Parse a value of this type.
326
+ """
327
+ date_str = data_str.strip().strip("'").strip('"')
328
+ if date_str == '?':
329
+ return np.datetime64('NaT', self.datetime_unit)
330
+ else:
331
+ dt = datetime.datetime.strptime(date_str, self.date_format)
332
+ return np.datetime64(dt).astype(
333
+ "datetime64[%s]" % self.datetime_unit)
334
+
335
+ def __str__(self):
336
+ return super().__str__() + ',' + self.date_format
337
+
338
+
339
+ class RelationalAttribute(Attribute):
340
+
341
+ def __init__(self, name):
342
+ super().__init__(name)
343
+ self.type_name = 'relational'
344
+ self.dtype = np.object_
345
+ self.attributes = []
346
+ self.dialect = None
347
+
348
+ @classmethod
349
+ def parse_attribute(cls, name, attr_string):
350
+ """
351
+ Parse the attribute line if it knows how. Returns the parsed
352
+ attribute, or None.
353
+
354
+ For date attributes, the attribute string would be like
355
+ 'date <format>'.
356
+ """
357
+
358
+ attr_string_lower = attr_string.lower().strip()
359
+
360
+ if attr_string_lower[:len('relational')] == 'relational':
361
+ return cls(name)
362
+ else:
363
+ return None
364
+
365
+ def parse_data(self, data_str):
366
+ # Copy-pasted
367
+ elems = list(range(len(self.attributes)))
368
+
369
+ escaped_string = data_str.encode().decode("unicode-escape")
370
+
371
+ row_tuples = []
372
+
373
+ for raw in escaped_string.split("\n"):
374
+ row, self.dialect = split_data_line(raw, self.dialect)
375
+
376
+ row_tuples.append(tuple(
377
+ [self.attributes[i].parse_data(row[i]) for i in elems]))
378
+
379
+ return np.array(row_tuples,
380
+ [(a.name, a.dtype) for a in self.attributes])
381
+
382
+ def __str__(self):
383
+ return (super().__str__() + '\n\t' +
384
+ '\n\t'.join(str(a) for a in self.attributes))
385
+
386
+
387
+ # -----------------
388
+ # Various utilities
389
+ # -----------------
390
+ def to_attribute(name, attr_string):
391
+ attr_classes = (NominalAttribute, NumericAttribute, DateAttribute,
392
+ StringAttribute, RelationalAttribute)
393
+
394
+ for cls in attr_classes:
395
+ attr = cls.parse_attribute(name, attr_string)
396
+ if attr is not None:
397
+ return attr
398
+
399
+ raise ParseArffError("unknown attribute %s" % attr_string)
400
+
401
+
402
+ def csv_sniffer_has_bug_last_field():
403
+ """
404
+ Checks if the bug https://bugs.python.org/issue30157 is unpatched.
405
+ """
406
+
407
+ # We only compute this once.
408
+ has_bug = getattr(csv_sniffer_has_bug_last_field, "has_bug", None)
409
+
410
+ if has_bug is None:
411
+ dialect = csv.Sniffer().sniff("3, 'a'")
412
+ csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != "'"
413
+ has_bug = csv_sniffer_has_bug_last_field.has_bug
414
+
415
+ return has_bug
416
+
417
+
418
+ def workaround_csv_sniffer_bug_last_field(sniff_line, dialect, delimiters):
419
+ """
420
+ Workaround for the bug https://bugs.python.org/issue30157 if is unpatched.
421
+ """
422
+ if csv_sniffer_has_bug_last_field():
423
+ # Reuses code from the csv module
424
+ right_regex = r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)' # noqa: E501
425
+
426
+ for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?", # noqa: E501
427
+ r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # .*?", # noqa: E501
428
+ right_regex, # ,".*?"
429
+ r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) # noqa: E501
430
+ regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
431
+ matches = regexp.findall(sniff_line)
432
+ if matches:
433
+ break
434
+
435
+ # If it does not match the expression that was bugged,
436
+ # then this bug does not apply
437
+ if restr != right_regex:
438
+ return
439
+
440
+ groupindex = regexp.groupindex
441
+
442
+ # There is only one end of the string
443
+ assert len(matches) == 1
444
+ m = matches[0]
445
+
446
+ n = groupindex['quote'] - 1
447
+ quote = m[n]
448
+
449
+ n = groupindex['delim'] - 1
450
+ delim = m[n]
451
+
452
+ n = groupindex['space'] - 1
453
+ space = bool(m[n])
454
+
455
+ dq_regexp = re.compile(
456
+ rf"(({re.escape(delim)})|^)\W*{quote}[^{re.escape(delim)}\n]*{quote}[^{re.escape(delim)}\n]*{quote}\W*(({re.escape(delim)})|$)", re.MULTILINE # noqa: E501
457
+ )
458
+
459
+ doublequote = bool(dq_regexp.search(sniff_line))
460
+
461
+ dialect.quotechar = quote
462
+ if delim in delimiters:
463
+ dialect.delimiter = delim
464
+ dialect.doublequote = doublequote
465
+ dialect.skipinitialspace = space
466
+
467
+
468
+ def split_data_line(line, dialect=None):
469
+ delimiters = ",\t"
470
+
471
+ # This can not be done in a per reader basis, and relational fields
472
+ # can be HUGE
473
+ csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
474
+
475
+ # Remove the line end if any
476
+ if line[-1] == '\n':
477
+ line = line[:-1]
478
+
479
+ # Remove potential trailing whitespace
480
+ line = line.strip()
481
+
482
+ sniff_line = line
483
+
484
+ # Add a delimiter if none is present, so that the csv.Sniffer
485
+ # does not complain for a single-field CSV.
486
+ if not any(d in line for d in delimiters):
487
+ sniff_line += ","
488
+
489
+ if dialect is None:
490
+ dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters)
491
+ workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line,
492
+ dialect=dialect,
493
+ delimiters=delimiters)
494
+
495
+ row = next(csv.reader([line], dialect))
496
+
497
+ return row, dialect
498
+
499
+
500
+ # --------------
501
+ # Parsing header
502
+ # --------------
503
+ def tokenize_attribute(iterable, attribute):
504
+ """Parse a raw string in header (e.g., starts by @attribute).
505
+
506
+ Given a raw string attribute, try to get the name and type of the
507
+ attribute. Constraints:
508
+
509
+ * The first line must start with @attribute (case insensitive, and
510
+ space like characters before @attribute are allowed)
511
+ * Works also if the attribute is spread on multilines.
512
+ * Works if empty lines or comments are in between
513
+
514
+ Parameters
515
+ ----------
516
+ attribute : str
517
+ the attribute string.
518
+
519
+ Returns
520
+ -------
521
+ name : str
522
+ name of the attribute
523
+ value : str
524
+ value of the attribute
525
+ next : str
526
+ next line to be parsed
527
+
528
+ Examples
529
+ --------
530
+ If attribute is a string defined in python as r"floupi real", will
531
+ return floupi as name, and real as value.
532
+
533
+ >>> from scipy.io.arff._arffread import tokenize_attribute
534
+ >>> iterable = iter([0] * 10) # dummy iterator
535
+ >>> tokenize_attribute(iterable, r"@attribute floupi real")
536
+ ('floupi', 'real', 0)
537
+
538
+ If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
539
+ and real as value.
540
+
541
+ >>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
542
+ ('floupi 2', 'real', 0)
543
+
544
+ """
545
+ sattr = attribute.strip()
546
+ mattr = r_attribute.match(sattr)
547
+ if mattr:
548
+ # atrv is everything after @attribute
549
+ atrv = mattr.group(1)
550
+ if r_comattrval.match(atrv):
551
+ name, type = tokenize_single_comma(atrv)
552
+ next_item = next(iterable)
553
+ elif r_wcomattrval.match(atrv):
554
+ name, type = tokenize_single_wcomma(atrv)
555
+ next_item = next(iterable)
556
+ else:
557
+ # Not sure we should support this, as it does not seem supported by
558
+ # weka.
559
+ raise ValueError("multi line not supported yet")
560
+ else:
561
+ raise ValueError("First line unparsable: %s" % sattr)
562
+
563
+ attribute = to_attribute(name, type)
564
+
565
+ if type.lower() == 'relational':
566
+ next_item = read_relational_attribute(iterable, attribute, next_item)
567
+ # raise ValueError("relational attributes not supported yet")
568
+
569
+ return attribute, next_item
570
+
571
+
572
+ def tokenize_single_comma(val):
573
+ # XXX we match twice the same string (here and at the caller level). It is
574
+ # stupid, but it is easier for now...
575
+ m = r_comattrval.match(val)
576
+ if m:
577
+ try:
578
+ name = m.group(1).strip()
579
+ type = m.group(2).strip()
580
+ except IndexError as e:
581
+ raise ValueError("Error while tokenizing attribute") from e
582
+ else:
583
+ raise ValueError("Error while tokenizing single %s" % val)
584
+ return name, type
585
+
586
+
587
+ def tokenize_single_wcomma(val):
588
+ # XXX we match twice the same string (here and at the caller level). It is
589
+ # stupid, but it is easier for now...
590
+ m = r_wcomattrval.match(val)
591
+ if m:
592
+ try:
593
+ name = m.group(1).strip()
594
+ type = m.group(2).strip()
595
+ except IndexError as e:
596
+ raise ValueError("Error while tokenizing attribute") from e
597
+ else:
598
+ raise ValueError("Error while tokenizing single %s" % val)
599
+ return name, type
600
+
601
+
602
+ def read_relational_attribute(ofile, relational_attribute, i):
603
+ """Read the nested attributes of a relational attribute"""
604
+
605
+ r_end_relational = re.compile(r'^@[Ee][Nn][Dd]\s*' +
606
+ relational_attribute.name + r'\s*$')
607
+
608
+ while not r_end_relational.match(i):
609
+ m = r_headerline.match(i)
610
+ if m:
611
+ isattr = r_attribute.match(i)
612
+ if isattr:
613
+ attr, i = tokenize_attribute(ofile, i)
614
+ relational_attribute.attributes.append(attr)
615
+ else:
616
+ raise ValueError("Error parsing line %s" % i)
617
+ else:
618
+ i = next(ofile)
619
+
620
+ i = next(ofile)
621
+ return i
622
+
623
+
624
+ def read_header(ofile):
625
+ """Read the header of the iterable ofile."""
626
+ i = next(ofile)
627
+
628
+ # Pass first comments
629
+ while r_comment.match(i):
630
+ i = next(ofile)
631
+
632
+ # Header is everything up to DATA attribute ?
633
+ relation = None
634
+ attributes = []
635
+ while not r_datameta.match(i):
636
+ m = r_headerline.match(i)
637
+ if m:
638
+ isattr = r_attribute.match(i)
639
+ if isattr:
640
+ attr, i = tokenize_attribute(ofile, i)
641
+ attributes.append(attr)
642
+ else:
643
+ isrel = r_relation.match(i)
644
+ if isrel:
645
+ relation = isrel.group(1)
646
+ else:
647
+ raise ValueError("Error parsing line %s" % i)
648
+ i = next(ofile)
649
+ else:
650
+ i = next(ofile)
651
+
652
+ return relation, attributes
653
+
654
+
655
+ class MetaData:
656
+ """Small container to keep useful information on a ARFF dataset.
657
+
658
+ Knows about attributes names and types.
659
+
660
+ Examples
661
+ --------
662
+ ::
663
+
664
+ data, meta = loadarff('iris.arff')
665
+ # This will print the attributes names of the iris.arff dataset
666
+ for i in meta:
667
+ print(i)
668
+ # This works too
669
+ meta.names()
670
+ # Getting attribute type
671
+ types = meta.types()
672
+
673
+ Methods
674
+ -------
675
+ names
676
+ types
677
+
678
+ Notes
679
+ -----
680
+ Also maintains the list of attributes in order, i.e., doing for i in
681
+ meta, where meta is an instance of MetaData, will return the
682
+ different attribute names in the order they were defined.
683
+ """
684
+ def __init__(self, rel, attr):
685
+ self.name = rel
686
+ self._attributes = {a.name: a for a in attr}
687
+
688
+ def __repr__(self):
689
+ msg = ""
690
+ msg += "Dataset: %s\n" % self.name
691
+ for i in self._attributes:
692
+ msg += f"\t{i}'s type is {self._attributes[i].type_name}"
693
+ if self._attributes[i].range:
694
+ msg += ", range is %s" % str(self._attributes[i].range)
695
+ msg += '\n'
696
+ return msg
697
+
698
+ def __iter__(self):
699
+ return iter(self._attributes)
700
+
701
+ def __getitem__(self, key):
702
+ attr = self._attributes[key]
703
+
704
+ return (attr.type_name, attr.range)
705
+
706
+ def names(self):
707
+ """Return the list of attribute names.
708
+
709
+ Returns
710
+ -------
711
+ attrnames : list of str
712
+ The attribute names.
713
+ """
714
+ return list(self._attributes)
715
+
716
+ def types(self):
717
+ """Return the list of attribute types.
718
+
719
+ Returns
720
+ -------
721
+ attr_types : list of str
722
+ The attribute types.
723
+ """
724
+ attr_types = [self._attributes[name].type_name
725
+ for name in self._attributes]
726
+ return attr_types
727
+
728
+
729
+ def loadarff(f):
730
+ """
731
+ Read an arff file.
732
+
733
+ The data is returned as a record array, which can be accessed much like
734
+ a dictionary of NumPy arrays. For example, if one of the attributes is
735
+ called 'pressure', then its first 10 data points can be accessed from the
736
+ ``data`` record array like so: ``data['pressure'][0:10]``
737
+
738
+
739
+ Parameters
740
+ ----------
741
+ f : file-like or str
742
+ File-like object to read from, or filename to open.
743
+
744
+ Returns
745
+ -------
746
+ data : record array
747
+ The data of the arff file, accessible by attribute names.
748
+ meta : `MetaData`
749
+ Contains information about the arff file such as name and
750
+ type of attributes, the relation (name of the dataset), etc.
751
+
752
+ Raises
753
+ ------
754
+ ParseArffError
755
+ This is raised if the given file is not ARFF-formatted.
756
+ NotImplementedError
757
+ The ARFF file has an attribute which is not supported yet.
758
+
759
+ Notes
760
+ -----
761
+
762
+ This function should be able to read most arff files. Not
763
+ implemented functionality include:
764
+
765
+ * date type attributes
766
+ * string type attributes
767
+
768
+ It can read files with numeric and nominal attributes. It cannot read
769
+ files with sparse data ({} in the file). However, this function can
770
+ read files with missing data (? in the file), representing the data
771
+ points as NaNs.
772
+
773
+ Examples
774
+ --------
775
+ >>> from scipy.io import arff
776
+ >>> from io import StringIO
777
+ >>> content = \"\"\"
778
+ ... @relation foo
779
+ ... @attribute width numeric
780
+ ... @attribute height numeric
781
+ ... @attribute color {red,green,blue,yellow,black}
782
+ ... @data
783
+ ... 5.0,3.25,blue
784
+ ... 4.5,3.75,green
785
+ ... 3.0,4.00,red
786
+ ... \"\"\"
787
+ >>> f = StringIO(content)
788
+ >>> data, meta = arff.loadarff(f)
789
+ >>> data
790
+ array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')],
791
+ dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')])
792
+ >>> meta
793
+ Dataset: foo
794
+ \twidth's type is numeric
795
+ \theight's type is numeric
796
+ \tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black')
797
+
798
+ """
799
+ if hasattr(f, 'read'):
800
+ ofile = f
801
+ else:
802
+ ofile = open(f)
803
+ try:
804
+ return _loadarff(ofile)
805
+ finally:
806
+ if ofile is not f: # only close what we opened
807
+ ofile.close()
808
+
809
+
810
+ def _loadarff(ofile):
811
+ # Parse the header file
812
+ try:
813
+ rel, attr = read_header(ofile)
814
+ except ValueError as e:
815
+ msg = "Error while parsing header, error was: " + str(e)
816
+ raise ParseArffError(msg) from e
817
+
818
+ # Check whether we have a string attribute (not supported yet)
819
+ hasstr = False
820
+ for a in attr:
821
+ if isinstance(a, StringAttribute):
822
+ hasstr = True
823
+
824
+ meta = MetaData(rel, attr)
825
+
826
+ # XXX The following code is not great
827
+ # Build the type descriptor descr and the list of converters to convert
828
+ # each attribute to the suitable type (which should match the one in
829
+ # descr).
830
+
831
+ # This can be used once we want to support integer as integer values and
832
+ # not as numeric anymore (using masked arrays ?).
833
+
834
+ if hasstr:
835
+ # How to support string efficiently ? Ideally, we should know the max
836
+ # size of the string before allocating the numpy array.
837
+ raise NotImplementedError("String attributes not supported yet, sorry")
838
+
839
+ ni = len(attr)
840
+
841
+ def generator(row_iter, delim=','):
842
+ # TODO: this is where we are spending time (~80%). I think things
843
+ # could be made more efficiently:
844
+ # - We could for example "compile" the function, because some values
845
+ # do not change here.
846
+ # - The function to convert a line to dtyped values could also be
847
+ # generated on the fly from a string and be executed instead of
848
+ # looping.
849
+ # - The regex are overkill: for comments, checking that a line starts
850
+ # by % should be enough and faster, and for empty lines, same thing
851
+ # --> this does not seem to change anything.
852
+
853
+ # 'compiling' the range since it does not change
854
+ # Note, I have already tried zipping the converters and
855
+ # row elements and got slightly worse performance.
856
+ elems = list(range(ni))
857
+
858
+ dialect = None
859
+ for raw in row_iter:
860
+ # We do not abstract skipping comments and empty lines for
861
+ # performance reasons.
862
+ if r_comment.match(raw) or r_empty.match(raw):
863
+ continue
864
+
865
+ row, dialect = split_data_line(raw, dialect)
866
+
867
+ yield tuple([attr[i].parse_data(row[i]) for i in elems])
868
+
869
+ a = list(generator(ofile))
870
+ # No error should happen here: it is a bug otherwise
871
+ data = np.array(a, [(a.name, a.dtype) for a in attr])
872
+ return data, meta
873
+
874
+
875
+ # ----
876
+ # Misc
877
+ # ----
878
+ def basic_stats(data):
879
+ nbfac = data.size * 1. / (data.size - 1)
880
+ return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
881
+
882
+
883
+ def print_attribute(name, tp, data):
884
+ type = tp.type_name
885
+ if type == 'numeric' or type == 'real' or type == 'integer':
886
+ min, max, mean, std = basic_stats(data)
887
+ print(f"{name},{type},{min:f},{max:f},{mean:f},{std:f}")
888
+ else:
889
+ print(str(tp))
890
+
891
+
892
+ def test_weka(filename):
893
+ data, meta = loadarff(filename)
894
+ print(len(data.dtype))
895
+ print(data.size)
896
+ for i in meta:
897
+ print_attribute(i, meta[i], data[i])
898
+
899
+
900
+ # make sure nose does not find this as a test
901
+ test_weka.__test__ = False
902
+
903
+
904
+ if __name__ == '__main__':
905
+ import sys
906
+ filename = sys.argv[1]
907
+ test_weka(filename)
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/arffread.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.io.arff` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'MetaData', 'loadarff', 'ArffError', 'ParseArffError',
9
+ 'r_meta', 'r_comment', 'r_empty', 'r_headerline',
10
+ 'r_datameta', 'r_relation', 'r_attribute', 'r_nominal',
11
+ 'r_date', 'r_comattrval', 'r_wcomattrval', 'Attribute',
12
+ 'NominalAttribute', 'NumericAttribute', 'StringAttribute',
13
+ 'DateAttribute', 'RelationalAttribute', 'to_attribute',
14
+ 'csv_sniffer_has_bug_last_field', 'workaround_csv_sniffer_bug_last_field',
15
+ 'split_data_line', 'tokenize_attribute', 'tokenize_single_comma',
16
+ 'tokenize_single_wcomma', 'read_relational_attribute', 'read_header',
17
+ 'basic_stats', 'print_attribute', 'test_weka'
18
+ ]
19
+
20
+
21
+ def __dir__():
22
+ return __all__
23
+
24
+
25
+ def __getattr__(name):
26
+ return _sub_module_deprecation(sub_package="io.arff", module="arffread",
27
+ private_modules=["_arffread"], all=__all__,
28
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/__pycache__/test_arffread.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/iris.arff ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ % 1. Title: Iris Plants Database
2
+ %
3
+ % 2. Sources:
4
+ % (a) Creator: R.A. Fisher
5
+ % (b) Donor: Michael Marshall (MARSHALL%[email protected])
6
+ % (c) Date: July, 1988
7
+ %
8
+ % 3. Past Usage:
9
+ % - Publications: too many to mention!!! Here are a few.
10
+ % 1. Fisher,R.A. "The use of multiple measurements in taxonomic problems"
11
+ % Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions
12
+ % to Mathematical Statistics" (John Wiley, NY, 1950).
13
+ % 2. Duda,R.O., & Hart,P.E. (1973) Pattern Classification and Scene Analysis.
14
+ % (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218.
15
+ % 3. Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System
16
+ % Structure and Classification Rule for Recognition in Partially Exposed
17
+ % Environments". IEEE Transactions on Pattern Analysis and Machine
18
+ % Intelligence, Vol. PAMI-2, No. 1, 67-71.
19
+ % -- Results:
20
+ % -- very low misclassification rates (0% for the setosa class)
21
+ % 4. Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule". IEEE
22
+ % Transactions on Information Theory, May 1972, 431-433.
23
+ % -- Results:
24
+ % -- very low misclassification rates again
25
+ % 5. See also: 1988 MLC Proceedings, 54-64. Cheeseman et al's AUTOCLASS II
26
+ % conceptual clustering system finds 3 classes in the data.
27
+ %
28
+ % 4. Relevant Information:
29
+ % --- This is perhaps the best known database to be found in the pattern
30
+ % recognition literature. Fisher's paper is a classic in the field
31
+ % and is referenced frequently to this day. (See Duda & Hart, for
32
+ % example.) The data set contains 3 classes of 50 instances each,
33
+ % where each class refers to a type of iris plant. One class is
34
+ % linearly separable from the other 2; the latter are NOT linearly
35
+ % separable from each other.
36
+ % --- Predicted attribute: class of iris plant.
37
+ % --- This is an exceedingly simple domain.
38
+ %
39
+ % 5. Number of Instances: 150 (50 in each of three classes)
40
+ %
41
+ % 6. Number of Attributes: 4 numeric, predictive attributes and the class
42
+ %
43
+ % 7. Attribute Information:
44
+ % 1. sepal length in cm
45
+ % 2. sepal width in cm
46
+ % 3. petal length in cm
47
+ % 4. petal width in cm
48
+ % 5. class:
49
+ % -- Iris Setosa
50
+ % -- Iris Versicolour
51
+ % -- Iris Virginica
52
+ %
53
+ % 8. Missing Attribute Values: None
54
+ %
55
+ % Summary Statistics:
56
+ % Min Max Mean SD Class Correlation
57
+ % sepal length: 4.3 7.9 5.84 0.83 0.7826
58
+ % sepal width: 2.0 4.4 3.05 0.43 -0.4194
59
+ % petal length: 1.0 6.9 3.76 1.76 0.9490 (high!)
60
+ % petal width: 0.1 2.5 1.20 0.76 0.9565 (high!)
61
+ %
62
+ % 9. Class Distribution: 33.3% for each of 3 classes.
63
+
64
+ @RELATION iris
65
+
66
+ @ATTRIBUTE sepallength REAL
67
+ @ATTRIBUTE sepalwidth REAL
68
+ @ATTRIBUTE petallength REAL
69
+ @ATTRIBUTE petalwidth REAL
70
+ @ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica}
71
+
72
+ @DATA
73
+ 5.1,3.5,1.4,0.2,Iris-setosa
74
+ 4.9,3.0,1.4,0.2,Iris-setosa
75
+ 4.7,3.2,1.3,0.2,Iris-setosa
76
+ 4.6,3.1,1.5,0.2,Iris-setosa
77
+ 5.0,3.6,1.4,0.2,Iris-setosa
78
+ 5.4,3.9,1.7,0.4,Iris-setosa
79
+ 4.6,3.4,1.4,0.3,Iris-setosa
80
+ 5.0,3.4,1.5,0.2,Iris-setosa
81
+ 4.4,2.9,1.4,0.2,Iris-setosa
82
+ 4.9,3.1,1.5,0.1,Iris-setosa
83
+ 5.4,3.7,1.5,0.2,Iris-setosa
84
+ 4.8,3.4,1.6,0.2,Iris-setosa
85
+ 4.8,3.0,1.4,0.1,Iris-setosa
86
+ 4.3,3.0,1.1,0.1,Iris-setosa
87
+ 5.8,4.0,1.2,0.2,Iris-setosa
88
+ 5.7,4.4,1.5,0.4,Iris-setosa
89
+ 5.4,3.9,1.3,0.4,Iris-setosa
90
+ 5.1,3.5,1.4,0.3,Iris-setosa
91
+ 5.7,3.8,1.7,0.3,Iris-setosa
92
+ 5.1,3.8,1.5,0.3,Iris-setosa
93
+ 5.4,3.4,1.7,0.2,Iris-setosa
94
+ 5.1,3.7,1.5,0.4,Iris-setosa
95
+ 4.6,3.6,1.0,0.2,Iris-setosa
96
+ 5.1,3.3,1.7,0.5,Iris-setosa
97
+ 4.8,3.4,1.9,0.2,Iris-setosa
98
+ 5.0,3.0,1.6,0.2,Iris-setosa
99
+ 5.0,3.4,1.6,0.4,Iris-setosa
100
+ 5.2,3.5,1.5,0.2,Iris-setosa
101
+ 5.2,3.4,1.4,0.2,Iris-setosa
102
+ 4.7,3.2,1.6,0.2,Iris-setosa
103
+ 4.8,3.1,1.6,0.2,Iris-setosa
104
+ 5.4,3.4,1.5,0.4,Iris-setosa
105
+ 5.2,4.1,1.5,0.1,Iris-setosa
106
+ 5.5,4.2,1.4,0.2,Iris-setosa
107
+ 4.9,3.1,1.5,0.1,Iris-setosa
108
+ 5.0,3.2,1.2,0.2,Iris-setosa
109
+ 5.5,3.5,1.3,0.2,Iris-setosa
110
+ 4.9,3.1,1.5,0.1,Iris-setosa
111
+ 4.4,3.0,1.3,0.2,Iris-setosa
112
+ 5.1,3.4,1.5,0.2,Iris-setosa
113
+ 5.0,3.5,1.3,0.3,Iris-setosa
114
+ 4.5,2.3,1.3,0.3,Iris-setosa
115
+ 4.4,3.2,1.3,0.2,Iris-setosa
116
+ 5.0,3.5,1.6,0.6,Iris-setosa
117
+ 5.1,3.8,1.9,0.4,Iris-setosa
118
+ 4.8,3.0,1.4,0.3,Iris-setosa
119
+ 5.1,3.8,1.6,0.2,Iris-setosa
120
+ 4.6,3.2,1.4,0.2,Iris-setosa
121
+ 5.3,3.7,1.5,0.2,Iris-setosa
122
+ 5.0,3.3,1.4,0.2,Iris-setosa
123
+ 7.0,3.2,4.7,1.4,Iris-versicolor
124
+ 6.4,3.2,4.5,1.5,Iris-versicolor
125
+ 6.9,3.1,4.9,1.5,Iris-versicolor
126
+ 5.5,2.3,4.0,1.3,Iris-versicolor
127
+ 6.5,2.8,4.6,1.5,Iris-versicolor
128
+ 5.7,2.8,4.5,1.3,Iris-versicolor
129
+ 6.3,3.3,4.7,1.6,Iris-versicolor
130
+ 4.9,2.4,3.3,1.0,Iris-versicolor
131
+ 6.6,2.9,4.6,1.3,Iris-versicolor
132
+ 5.2,2.7,3.9,1.4,Iris-versicolor
133
+ 5.0,2.0,3.5,1.0,Iris-versicolor
134
+ 5.9,3.0,4.2,1.5,Iris-versicolor
135
+ 6.0,2.2,4.0,1.0,Iris-versicolor
136
+ 6.1,2.9,4.7,1.4,Iris-versicolor
137
+ 5.6,2.9,3.6,1.3,Iris-versicolor
138
+ 6.7,3.1,4.4,1.4,Iris-versicolor
139
+ 5.6,3.0,4.5,1.5,Iris-versicolor
140
+ 5.8,2.7,4.1,1.0,Iris-versicolor
141
+ 6.2,2.2,4.5,1.5,Iris-versicolor
142
+ 5.6,2.5,3.9,1.1,Iris-versicolor
143
+ 5.9,3.2,4.8,1.8,Iris-versicolor
144
+ 6.1,2.8,4.0,1.3,Iris-versicolor
145
+ 6.3,2.5,4.9,1.5,Iris-versicolor
146
+ 6.1,2.8,4.7,1.2,Iris-versicolor
147
+ 6.4,2.9,4.3,1.3,Iris-versicolor
148
+ 6.6,3.0,4.4,1.4,Iris-versicolor
149
+ 6.8,2.8,4.8,1.4,Iris-versicolor
150
+ 6.7,3.0,5.0,1.7,Iris-versicolor
151
+ 6.0,2.9,4.5,1.5,Iris-versicolor
152
+ 5.7,2.6,3.5,1.0,Iris-versicolor
153
+ 5.5,2.4,3.8,1.1,Iris-versicolor
154
+ 5.5,2.4,3.7,1.0,Iris-versicolor
155
+ 5.8,2.7,3.9,1.2,Iris-versicolor
156
+ 6.0,2.7,5.1,1.6,Iris-versicolor
157
+ 5.4,3.0,4.5,1.5,Iris-versicolor
158
+ 6.0,3.4,4.5,1.6,Iris-versicolor
159
+ 6.7,3.1,4.7,1.5,Iris-versicolor
160
+ 6.3,2.3,4.4,1.3,Iris-versicolor
161
+ 5.6,3.0,4.1,1.3,Iris-versicolor
162
+ 5.5,2.5,4.0,1.3,Iris-versicolor
163
+ 5.5,2.6,4.4,1.2,Iris-versicolor
164
+ 6.1,3.0,4.6,1.4,Iris-versicolor
165
+ 5.8,2.6,4.0,1.2,Iris-versicolor
166
+ 5.0,2.3,3.3,1.0,Iris-versicolor
167
+ 5.6,2.7,4.2,1.3,Iris-versicolor
168
+ 5.7,3.0,4.2,1.2,Iris-versicolor
169
+ 5.7,2.9,4.2,1.3,Iris-versicolor
170
+ 6.2,2.9,4.3,1.3,Iris-versicolor
171
+ 5.1,2.5,3.0,1.1,Iris-versicolor
172
+ 5.7,2.8,4.1,1.3,Iris-versicolor
173
+ 6.3,3.3,6.0,2.5,Iris-virginica
174
+ 5.8,2.7,5.1,1.9,Iris-virginica
175
+ 7.1,3.0,5.9,2.1,Iris-virginica
176
+ 6.3,2.9,5.6,1.8,Iris-virginica
177
+ 6.5,3.0,5.8,2.2,Iris-virginica
178
+ 7.6,3.0,6.6,2.1,Iris-virginica
179
+ 4.9,2.5,4.5,1.7,Iris-virginica
180
+ 7.3,2.9,6.3,1.8,Iris-virginica
181
+ 6.7,2.5,5.8,1.8,Iris-virginica
182
+ 7.2,3.6,6.1,2.5,Iris-virginica
183
+ 6.5,3.2,5.1,2.0,Iris-virginica
184
+ 6.4,2.7,5.3,1.9,Iris-virginica
185
+ 6.8,3.0,5.5,2.1,Iris-virginica
186
+ 5.7,2.5,5.0,2.0,Iris-virginica
187
+ 5.8,2.8,5.1,2.4,Iris-virginica
188
+ 6.4,3.2,5.3,2.3,Iris-virginica
189
+ 6.5,3.0,5.5,1.8,Iris-virginica
190
+ 7.7,3.8,6.7,2.2,Iris-virginica
191
+ 7.7,2.6,6.9,2.3,Iris-virginica
192
+ 6.0,2.2,5.0,1.5,Iris-virginica
193
+ 6.9,3.2,5.7,2.3,Iris-virginica
194
+ 5.6,2.8,4.9,2.0,Iris-virginica
195
+ 7.7,2.8,6.7,2.0,Iris-virginica
196
+ 6.3,2.7,4.9,1.8,Iris-virginica
197
+ 6.7,3.3,5.7,2.1,Iris-virginica
198
+ 7.2,3.2,6.0,1.8,Iris-virginica
199
+ 6.2,2.8,4.8,1.8,Iris-virginica
200
+ 6.1,3.0,4.9,1.8,Iris-virginica
201
+ 6.4,2.8,5.6,2.1,Iris-virginica
202
+ 7.2,3.0,5.8,1.6,Iris-virginica
203
+ 7.4,2.8,6.1,1.9,Iris-virginica
204
+ 7.9,3.8,6.4,2.0,Iris-virginica
205
+ 6.4,2.8,5.6,2.2,Iris-virginica
206
+ 6.3,2.8,5.1,1.5,Iris-virginica
207
+ 6.1,2.6,5.6,1.4,Iris-virginica
208
+ 7.7,3.0,6.1,2.3,Iris-virginica
209
+ 6.3,3.4,5.6,2.4,Iris-virginica
210
+ 6.4,3.1,5.5,1.8,Iris-virginica
211
+ 6.0,3.0,4.8,1.8,Iris-virginica
212
+ 6.9,3.1,5.4,2.1,Iris-virginica
213
+ 6.7,3.1,5.6,2.4,Iris-virginica
214
+ 6.9,3.1,5.1,2.3,Iris-virginica
215
+ 5.8,2.7,5.1,1.9,Iris-virginica
216
+ 6.8,3.2,5.9,2.3,Iris-virginica
217
+ 6.7,3.3,5.7,2.5,Iris-virginica
218
+ 6.7,3.0,5.2,2.3,Iris-virginica
219
+ 6.3,2.5,5.0,1.9,Iris-virginica
220
+ 6.5,3.0,5.2,2.0,Iris-virginica
221
+ 6.2,3.4,5.4,2.3,Iris-virginica
222
+ 5.9,3.0,5.1,1.8,Iris-virginica
223
+ %
224
+ %
225
+ %
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/missing.arff ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ % This arff file contains some missing data
2
+ @relation missing
3
+ @attribute yop real
4
+ @attribute yap real
5
+ @data
6
+ 1,5
7
+ 2,4
8
+ ?,?
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION iris
2
+
3
+ @ATTRIBUTE sepallength REAL
4
+ @ATTRIBUTE sepalwidth REAL
5
+ @ATTRIBUTE petallength REAL
6
+ @ATTRIBUTE petalwidth REAL
7
+ @ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica}
8
+
9
+ @DATA
10
+
11
+ % This file has no data
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal.arff ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ % Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes
2
+ % Spaces between elements are stripped by the parser
3
+
4
+ @relation SOME_DATA
5
+ @attribute age numeric
6
+ @attribute smoker {'yes', 'no'}
7
+ @data
8
+ 18, 'no'
9
+ 24, 'yes'
10
+ 44, 'no'
11
+ 56, 'no'
12
+ 89,'yes'
13
+ 11, 'no'
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/quoted_nominal_spaces.arff ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ % Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes
2
+ % Spaces inside quotes are NOT stripped by the parser
3
+
4
+ @relation SOME_DATA
5
+ @attribute age numeric
6
+ @attribute smoker {' yes', 'no '}
7
+ @data
8
+ 18,'no '
9
+ 24,' yes'
10
+ 44,'no '
11
+ 56,'no '
12
+ 89,' yes'
13
+ 11,'no '
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test1.arff ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION test1
2
+
3
+ @ATTRIBUTE attr0 REAL
4
+ @ATTRIBUTE attr1 REAL
5
+ @ATTRIBUTE attr2 REAL
6
+ @ATTRIBUTE attr3 REAL
7
+ @ATTRIBUTE class {class0, class1, class2, class3}
8
+
9
+ @DATA
10
+ 0.1, 0.2, 0.3, 0.4,class1
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test11.arff ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION test11
2
+
3
+ @ATTRIBUTE attr0 REAL
4
+ @ATTRIBUTE attr1 REAL
5
+ @ATTRIBUTE attr2 REAL
6
+ @ATTRIBUTE attr3 REAL
7
+ @ATTRIBUTE class { class0, class1, class2, class3 }
8
+ @DATA
9
+ 0.1, 0.2, 0.3, 0.4,class1
10
+ -0.1, -0.2, -0.3, -0.4,class2
11
+ 1, 2, 3, 4,class3
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION test2
2
+
3
+ @ATTRIBUTE attr0 REAL
4
+ @ATTRIBUTE attr1 real
5
+ @ATTRIBUTE attr2 integer
6
+ @ATTRIBUTE attr3 Integer
7
+ @ATTRIBUTE attr4 Numeric
8
+ @ATTRIBUTE attr5 numeric
9
+ @ATTRIBUTE attr6 string
10
+ @ATTRIBUTE attr7 STRING
11
+ @ATTRIBUTE attr8 {bla}
12
+ @ATTRIBUTE attr9 {bla, bla}
13
+
14
+ @DATA
15
+ 0.1, 0.2, 0.3, 0.4,class1
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test3.arff ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ @RELATION test3
2
+
3
+ @ATTRIBUTE attr0 crap
4
+
5
+ @DATA
6
+ 0.1, 0.2, 0.3, 0.4,class1
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test4.arff ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION test5
2
+
3
+ @ATTRIBUTE attr0 REAL
4
+ @ATTRIBUTE attr1 REAL
5
+ @ATTRIBUTE attr2 REAL
6
+ @ATTRIBUTE attr3 REAL
7
+ @ATTRIBUTE class {class0, class1, class2, class3}
8
+ @DATA
9
+ 0.1, 0.2, 0.3, 0.4,class1
10
+ -0.1, -0.2, -0.3, -0.4,class2
11
+ 1, 2, 3, 4,class3
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test5.arff ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION test4
2
+
3
+ @ATTRIBUTE attr0 REAL
4
+ @ATTRIBUTE attr1 REAL
5
+ @ATTRIBUTE attr2 REAL
6
+ @ATTRIBUTE attr3 REAL
7
+ @ATTRIBUTE class {class0, class1, class2, class3}
8
+
9
+ @DATA
10
+
11
+ % lsdflkjhaksjdhf
12
+
13
+ % lsdflkjhaksjdhf
14
+
15
+ 0.1, 0.2, 0.3, 0.4,class1
16
+ % laksjdhf
17
+
18
+ % lsdflkjhaksjdhf
19
+ -0.1, -0.2, -0.3, -0.4,class2
20
+
21
+ % lsdflkjhaksjdhf
22
+ % lsdflkjhaksjdhf
23
+
24
+ % lsdflkjhaksjdhf
25
+
26
+ 1, 2, 3, 4,class3
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test6.arff ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION test6
2
+
3
+ @ATTRIBUTE attr0 REAL
4
+ @ATTRIBUTE attr1 REAL
5
+ @ATTRIBUTE attr2 REAL
6
+ @ATTRIBUTE attr3 REAL
7
+ @ATTRIBUTE class {C}
8
+
9
+ @DATA
10
+ 0.1, 0.2, 0.3, 0.4,C
11
+ -0.1, -0.2, -0.3, -0.4,C
12
+ 1, 2, 3, 4,C
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test7.arff ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION test7
2
+
3
+ @ATTRIBUTE attr_year DATE yyyy
4
+ @ATTRIBUTE attr_month DATE yyyy-MM
5
+ @ATTRIBUTE attr_date DATE yyyy-MM-dd
6
+ @ATTRIBUTE attr_datetime_local DATE "yyyy-MM-dd HH:mm"
7
+ @ATTRIBUTE attr_datetime_missing DATE "yyyy-MM-dd HH:mm"
8
+
9
+ @DATA
10
+ 1999,1999-01,1999-01-31,"1999-01-31 00:01",?
11
+ 2004,2004-12,2004-12-01,"2004-12-01 23:59","2004-12-01 23:59"
12
+ 1817,1817-04,1817-04-28,"1817-04-28 13:00",?
13
+ 2100,2100-09,2100-09-10,"2100-09-10 12:00",?
14
+ 2013,2013-11,2013-11-30,"2013-11-30 04:55","2013-11-30 04:55"
15
+ 1631,1631-10,1631-10-15,"1631-10-15 20:04","1631-10-15 20:04"
env-llmeval/lib/python3.10/site-packages/scipy/io/arff/tests/data/test8.arff ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION test8
2
+
3
+ @ATTRIBUTE attr_datetime_utc DATE "yyyy-MM-dd HH:mm Z"
4
+ @ATTRIBUTE attr_datetime_full DATE "yy-MM-dd HH:mm:ss z"
5
+
6
+ @DATA
7
+ "1999-01-31 00:01 UTC","99-01-31 00:01:08 +0430"
8
+ "2004-12-01 23:59 UTC","04-12-01 23:59:59 -0800"
9
+ "1817-04-28 13:00 UTC","17-04-28 13:00:33 +1000"
10
+ "2100-09-10 12:00 UTC","21-09-10 12:00:21 -0300"
11
+ "2013-11-30 04:55 UTC","13-11-30 04:55:48 -1100"
12
+ "1631-10-15 20:04 UTC","31-10-15 20:04:10 +0000"