applied-ai-018 commited on
Commit
7df0933
·
verified ·
1 Parent(s): 6230e9d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg.pt +3 -0
  4. venv/lib/python3.10/site-packages/sklearn/__check_build/__init__.py +47 -0
  5. venv/lib/python3.10/site-packages/sklearn/__check_build/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so +0 -0
  7. venv/lib/python3.10/site-packages/sklearn/externals/__init__.py +5 -0
  8. venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/sklearn/externals/_arff.py +1107 -0
  10. venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__init__.py +0 -0
  11. venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py +90 -0
  15. venv/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py +535 -0
  16. venv/lib/python3.10/site-packages/sklearn/externals/conftest.py +6 -0
  17. venv/lib/python3.10/site-packages/sklearn/neighbors/__init__.py +42 -0
  18. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_base.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_classification.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_kde.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_lof.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nca.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nearest_centroid.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_regression.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_unsupervised.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so +0 -0
  29. venv/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so +0 -0
  30. venv/lib/python3.10/site-packages/sklearn/neighbors/_kde.py +365 -0
  31. venv/lib/python3.10/site-packages/sklearn/neighbors/_lof.py +516 -0
  32. venv/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py +261 -0
  33. venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd +10 -0
  34. venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd +92 -0
  35. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__init__.py +0 -0
  36. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_graph.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kde.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_pipeline.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_tree.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_quad_tree.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_ball_tree.py +200 -0
  49. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_graph.py +101 -0
  50. venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kd_tree.py +100 -0
ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66b98c071654fe4be7652e90d5a47ba2e1d74f367a2dda69a2c23b018c2f07fe
3
+ size 9372
ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc2953903facfdb139199004fc6c3711f8fba6bc6cf251d016623fb4cabdbb08
3
+ size 9293
ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb2d62594d78b56c8c7deef4a0fb4709e206d5ee0c30690fd1972a44939cbc8d
3
+ size 16778396
venv/lib/python3.10/site-packages/sklearn/__check_build/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Module to give helpful messages to the user that did not
2
+ compile scikit-learn properly.
3
+ """
4
+ import os
5
+
6
+ INPLACE_MSG = """
7
+ It appears that you are importing a local scikit-learn source tree. For
8
+ this, you need to have an inplace install. Maybe you are in the source
9
+ directory and you need to try from another location."""
10
+
11
+ STANDARD_MSG = """
12
+ If you have used an installer, please check that it is suited for your
13
+ Python version, your operating system and your platform."""
14
+
15
+
16
+ def raise_build_error(e):
17
+ # Raise a comprehensible error and list the contents of the
18
+ # directory to help debugging on the mailing list.
19
+ local_dir = os.path.split(__file__)[0]
20
+ msg = STANDARD_MSG
21
+ if local_dir == "sklearn/__check_build":
22
+ # Picking up the local install: this will work only if the
23
+ # install is an 'inplace build'
24
+ msg = INPLACE_MSG
25
+ dir_content = list()
26
+ for i, filename in enumerate(os.listdir(local_dir)):
27
+ if (i + 1) % 3:
28
+ dir_content.append(filename.ljust(26))
29
+ else:
30
+ dir_content.append(filename + "\n")
31
+ raise ImportError("""%s
32
+ ___________________________________________________________________________
33
+ Contents of %s:
34
+ %s
35
+ ___________________________________________________________________________
36
+ It seems that scikit-learn has not been built correctly.
37
+
38
+ If you have installed scikit-learn from source, please do not forget
39
+ to build the package before using it: run `python setup.py install` or
40
+ `make` in the source directory.
41
+ %s""" % (e, local_dir, "".join(dir_content).strip(), msg))
42
+
43
+
44
+ try:
45
+ from ._check_build import check_build # noqa
46
+ except ImportError as e:
47
+ raise_build_error(e)
venv/lib/python3.10/site-packages/sklearn/__check_build/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (51.3 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+
2
+ """
3
+ External, bundled dependencies.
4
+
5
+ """
venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (231 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/_arff.py ADDED
@@ -0,0 +1,1107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # Federal University of Rio Grande do Sul (UFRGS)
3
+ # Connectionist Artificial Intelligence Laboratory (LIAC)
4
+ # Renato de Pontes Pereira - [email protected]
5
+ # =============================================================================
6
+ # Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com
7
+ #
8
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ # of this software and associated documentation files (the "Software"), to deal
10
+ # in the Software without restriction, including without limitation the rights
11
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ # copies of the Software, and to permit persons to whom the Software is
13
+ # furnished to do so, subject to the following conditions:
14
+ #
15
+ # The above copyright notice and this permission notice shall be included in
16
+ # all copies or substantial portions of the Software.
17
+ #
18
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
+ # SOFTWARE.
25
+ # =============================================================================
26
+
27
+ '''
28
+ The liac-arff module implements functions to read and write ARFF files in
29
+ Python. It was created in the Connectionist Artificial Intelligence Laboratory
30
+ (LIAC), which takes place at the Federal University of Rio Grande do Sul
31
+ (UFRGS), in Brazil.
32
+
33
+ ARFF (Attribute-Relation File Format) is an file format specially created for
34
+ describe datasets which are commonly used for machine learning experiments and
35
+ software. This file format was created to be used in Weka, the best
36
+ representative software for machine learning automated experiments.
37
+
38
+ An ARFF file can be divided into two sections: header and data. The Header
39
+ describes the metadata of the dataset, including a general description of the
40
+ dataset, its name and its attributes. The source below is an example of a
41
+ header section in a XOR dataset::
42
+
43
+ %
44
+ % XOR Dataset
45
+ %
46
+ % Created by Renato Pereira
47
48
+ % http://inf.ufrgs.br/~rppereira
49
+ %
50
+ %
51
+ @RELATION XOR
52
+
53
+ @ATTRIBUTE input1 REAL
54
+ @ATTRIBUTE input2 REAL
55
+ @ATTRIBUTE y REAL
56
+
57
+ The Data section of an ARFF file describes the observations of the dataset, in
58
+ the case of XOR dataset::
59
+
60
+ @DATA
61
+ 0.0,0.0,0.0
62
+ 0.0,1.0,1.0
63
+ 1.0,0.0,1.0
64
+ 1.0,1.0,0.0
65
+ %
66
+ %
67
+ %
68
+
69
+ Notice that several lines are starting with an ``%`` symbol, denoting a
70
+ comment, thus, lines with ``%`` at the beginning will be ignored, except by the
71
+ description part at the beginning of the file. The declarations ``@RELATION``,
72
+ ``@ATTRIBUTE``, and ``@DATA`` are all case insensitive and obligatory.
73
+
74
+ For more information and details about the ARFF file description, consult
75
+ http://www.cs.waikato.ac.nz/~ml/weka/arff.html
76
+
77
+
78
+ ARFF Files in Python
79
+ ~~~~~~~~~~~~~~~~~~~~
80
+
81
+ This module uses built-ins python objects to represent a deserialized ARFF
82
+ file. A dictionary is used as the container of the data and metadata of ARFF,
83
+ and have the following keys:
84
+
85
+ - **description**: (OPTIONAL) a string with the description of the dataset.
86
+ - **relation**: (OBLIGATORY) a string with the name of the dataset.
87
+ - **attributes**: (OBLIGATORY) a list of attributes with the following
88
+ template::
89
+
90
+ (attribute_name, attribute_type)
91
+
92
+ the attribute_name is a string, and attribute_type must be an string
93
+ or a list of strings.
94
+ - **data**: (OBLIGATORY) a list of data instances. Each data instance must be
95
+ a list with values, depending on the attributes.
96
+
97
+ The above keys must follow the case which were described, i.e., the keys are
98
+ case sensitive. The attribute type ``attribute_type`` must be one of these
99
+ strings (they are not case sensitive): ``NUMERIC``, ``INTEGER``, ``REAL`` or
100
+ ``STRING``. For nominal attributes, the ``atribute_type`` must be a list of
101
+ strings.
102
+
103
+ In this format, the XOR dataset presented above can be represented as a python
104
+ object as::
105
+
106
+ xor_dataset = {
107
+ 'description': 'XOR Dataset',
108
+ 'relation': 'XOR',
109
+ 'attributes': [
110
+ ('input1', 'REAL'),
111
+ ('input2', 'REAL'),
112
+ ('y', 'REAL'),
113
+ ],
114
+ 'data': [
115
+ [0.0, 0.0, 0.0],
116
+ [0.0, 1.0, 1.0],
117
+ [1.0, 0.0, 1.0],
118
+ [1.0, 1.0, 0.0]
119
+ ]
120
+ }
121
+
122
+
123
+ Features
124
+ ~~~~~~~~
125
+
126
+ This module provides several features, including:
127
+
128
+ - Read and write ARFF files using python built-in structures, such dictionaries
129
+ and lists;
130
+ - Supports `scipy.sparse.coo <http://docs.scipy
131
+ .org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html#scipy.sparse.coo_matrix>`_
132
+ and lists of dictionaries as used by SVMLight
133
+ - Supports the following attribute types: NUMERIC, REAL, INTEGER, STRING, and
134
+ NOMINAL;
135
+ - Has an interface similar to other built-in modules such as ``json``, or
136
+ ``zipfile``;
137
+ - Supports read and write the descriptions of files;
138
+ - Supports missing values and names with spaces;
139
+ - Supports unicode values and names;
140
+ - Fully compatible with Python 2.7+, Python 3.5+, pypy and pypy3;
141
+ - Under `MIT License <http://opensource.org/licenses/MIT>`_
142
+
143
+ '''
144
+ __author__ = 'Renato de Pontes Pereira, Matthias Feurer, Joel Nothman'
145
+ __author_email__ = ('[email protected], '
146
147
148
+ __version__ = '2.4.0'
149
+
150
+ import re
151
+ import csv
152
+ from typing import TYPE_CHECKING
153
+ from typing import Optional, List, Dict, Any, Iterator, Union, Tuple
154
+
155
+ # CONSTANTS ===================================================================
156
+ _SIMPLE_TYPES = ['NUMERIC', 'REAL', 'INTEGER', 'STRING']
157
+
158
+ _TK_DESCRIPTION = '%'
159
+ _TK_COMMENT = '%'
160
+ _TK_RELATION = '@RELATION'
161
+ _TK_ATTRIBUTE = '@ATTRIBUTE'
162
+ _TK_DATA = '@DATA'
163
+
164
+ _RE_RELATION = re.compile(r'^([^\{\}%,\s]*|\".*\"|\'.*\')$', re.UNICODE)
165
+ _RE_ATTRIBUTE = re.compile(r'^(\".*\"|\'.*\'|[^\{\}%,\s]*)\s+(.+)$', re.UNICODE)
166
+ _RE_QUOTE_CHARS = re.compile(r'["\'\\\s%,\000-\031]', re.UNICODE)
167
+ _RE_ESCAPE_CHARS = re.compile(r'(?=["\'\\%])|[\n\r\t\000-\031]')
168
+ _RE_SPARSE_LINE = re.compile(r'^\s*\{.*\}\s*$', re.UNICODE)
169
+ _RE_NONTRIVIAL_DATA = re.compile('["\'{}\\s]', re.UNICODE)
170
+
171
+ ArffDenseDataType = Iterator[List]
172
+ ArffSparseDataType = Tuple[List, ...]
173
+
174
+
175
+ if TYPE_CHECKING:
176
+ # typing_extensions is available when mypy is installed
177
+ from typing_extensions import TypedDict
178
+
179
+ class ArffContainerType(TypedDict):
180
+ description: str
181
+ relation: str
182
+ attributes: List
183
+ data: Union[ArffDenseDataType, ArffSparseDataType]
184
+
185
+ else:
186
+ ArffContainerType = Dict[str, Any]
187
+
188
+
189
+ def _build_re_values():
190
+ quoted_re = r'''
191
+ " # open quote followed by zero or more of:
192
+ (?:
193
+ (?<!\\) # no additional backslash
194
+ (?:\\\\)* # maybe escaped backslashes
195
+ \\" # escaped quote
196
+ |
197
+ \\[^"] # escaping a non-quote
198
+ |
199
+ [^"\\] # non-quote char
200
+ )*
201
+ " # close quote
202
+ '''
203
+ # a value is surrounded by " or by ' or contains no quotables
204
+ value_re = r'''(?:
205
+ %s| # a value may be surrounded by "
206
+ %s| # or by '
207
+ [^,\s"'{}]+ # or may contain no characters requiring quoting
208
+ )''' % (quoted_re,
209
+ quoted_re.replace('"', "'"))
210
+
211
+ # This captures (value, error) groups. Because empty values are allowed,
212
+ # we cannot just look for empty values to handle syntax errors.
213
+ # We presume the line has had ',' prepended...
214
+ dense = re.compile(r'''(?x)
215
+ , # may follow ','
216
+ \s*
217
+ ((?=,)|$|{value_re}) # empty or value
218
+ |
219
+ (\S.*) # error
220
+ '''.format(value_re=value_re))
221
+
222
+ # This captures (key, value) groups and will have an empty key/value
223
+ # in case of syntax errors.
224
+ # It does not ensure that the line starts with '{' or ends with '}'.
225
+ sparse = re.compile(r'''(?x)
226
+ (?:^\s*\{|,) # may follow ',', or '{' at line start
227
+ \s*
228
+ (\d+) # attribute key
229
+ \s+
230
+ (%(value_re)s) # value
231
+ |
232
+ (?!}\s*$) # not an error if it's }$
233
+ (?!^\s*{\s*}\s*$) # not an error if it's ^{}$
234
+ \S.* # error
235
+ ''' % {'value_re': value_re})
236
+ return dense, sparse
237
+
238
+
239
+
240
+ _RE_DENSE_VALUES, _RE_SPARSE_KEY_VALUES = _build_re_values()
241
+
242
+
243
+ _ESCAPE_SUB_MAP = {
244
+ '\\\\': '\\',
245
+ '\\"': '"',
246
+ "\\'": "'",
247
+ '\\t': '\t',
248
+ '\\n': '\n',
249
+ '\\r': '\r',
250
+ '\\b': '\b',
251
+ '\\f': '\f',
252
+ '\\%': '%',
253
+ }
254
+ _UNESCAPE_SUB_MAP = {chr(i): '\\%03o' % i for i in range(32)}
255
+ _UNESCAPE_SUB_MAP.update({v: k for k, v in _ESCAPE_SUB_MAP.items()})
256
+ _UNESCAPE_SUB_MAP[''] = '\\'
257
+ _ESCAPE_SUB_MAP.update({'\\%d' % i: chr(i) for i in range(10)})
258
+
259
+
260
+ def _escape_sub_callback(match):
261
+ s = match.group()
262
+ if len(s) == 2:
263
+ try:
264
+ return _ESCAPE_SUB_MAP[s]
265
+ except KeyError:
266
+ raise ValueError('Unsupported escape sequence: %s' % s)
267
+ if s[1] == 'u':
268
+ return chr(int(s[2:], 16))
269
+ else:
270
+ return chr(int(s[1:], 8))
271
+
272
+
273
+ def _unquote(v):
274
+ if v[:1] in ('"', "'"):
275
+ return re.sub(r'\\([0-9]{1,3}|u[0-9a-f]{4}|.)', _escape_sub_callback,
276
+ v[1:-1])
277
+ elif v in ('?', ''):
278
+ return None
279
+ else:
280
+ return v
281
+
282
+
283
+ def _parse_values(s):
284
+ '''(INTERNAL) Split a line into a list of values'''
285
+ if not _RE_NONTRIVIAL_DATA.search(s):
286
+ # Fast path for trivial cases (unfortunately we have to handle missing
287
+ # values because of the empty string case :(.)
288
+ return [None if s in ('?', '') else s
289
+ for s in next(csv.reader([s]))]
290
+
291
+ # _RE_DENSE_VALUES tokenizes despite quoting, whitespace, etc.
292
+ values, errors = zip(*_RE_DENSE_VALUES.findall(',' + s))
293
+ if not any(errors):
294
+ return [_unquote(v) for v in values]
295
+ if _RE_SPARSE_LINE.match(s):
296
+ try:
297
+ return {int(k): _unquote(v)
298
+ for k, v in _RE_SPARSE_KEY_VALUES.findall(s)}
299
+ except ValueError:
300
+ # an ARFF syntax error in sparse data
301
+ for match in _RE_SPARSE_KEY_VALUES.finditer(s):
302
+ if not match.group(1):
303
+ raise BadLayout('Error parsing %r' % match.group())
304
+ raise BadLayout('Unknown parsing error')
305
+ else:
306
+ # an ARFF syntax error
307
+ for match in _RE_DENSE_VALUES.finditer(s):
308
+ if match.group(2):
309
+ raise BadLayout('Error parsing %r' % match.group())
310
+ raise BadLayout('Unknown parsing error')
311
+
312
+
313
+ DENSE = 0 # Constant value representing a dense matrix
314
+ COO = 1 # Constant value representing a sparse matrix in coordinate format
315
+ LOD = 2 # Constant value representing a sparse matrix in list of
316
+ # dictionaries format
317
+ DENSE_GEN = 3 # Generator of dictionaries
318
+ LOD_GEN = 4 # Generator of dictionaries
319
+ _SUPPORTED_DATA_STRUCTURES = [DENSE, COO, LOD, DENSE_GEN, LOD_GEN]
320
+
321
+
322
+ # EXCEPTIONS ==================================================================
323
+ class ArffException(Exception):
324
+ message: Optional[str] = None
325
+
326
+ def __init__(self):
327
+ self.line = -1
328
+
329
+ def __str__(self):
330
+ return self.message%self.line
331
+
332
+ class BadRelationFormat(ArffException):
333
+ '''Error raised when the relation declaration is in an invalid format.'''
334
+ message = 'Bad @RELATION format, at line %d.'
335
+
336
+ class BadAttributeFormat(ArffException):
337
+ '''Error raised when some attribute declaration is in an invalid format.'''
338
+ message = 'Bad @ATTRIBUTE format, at line %d.'
339
+
340
+ class BadDataFormat(ArffException):
341
+ '''Error raised when some data instance is in an invalid format.'''
342
+ def __init__(self, value):
343
+ super().__init__()
344
+ self.message = (
345
+ 'Bad @DATA instance format in line %d: ' +
346
+ ('%s' % value)
347
+ )
348
+
349
+ class BadAttributeType(ArffException):
350
+ '''Error raised when some invalid type is provided into the attribute
351
+ declaration.'''
352
+ message = 'Bad @ATTRIBUTE type, at line %d.'
353
+
354
+ class BadAttributeName(ArffException):
355
+ '''Error raised when an attribute name is provided twice the attribute
356
+ declaration.'''
357
+
358
+ def __init__(self, value, value2):
359
+ super().__init__()
360
+ self.message = (
361
+ ('Bad @ATTRIBUTE name %s at line' % value) +
362
+ ' %d, this name is already in use in line' +
363
+ (' %d.' % value2)
364
+ )
365
+
366
+ class BadNominalValue(ArffException):
367
+ '''Error raised when a value in used in some data instance but is not
368
+ declared into it respective attribute declaration.'''
369
+
370
+ def __init__(self, value):
371
+ super().__init__()
372
+ self.message = (
373
+ ('Data value %s not found in nominal declaration, ' % value)
374
+ + 'at line %d.'
375
+ )
376
+
377
+ class BadNominalFormatting(ArffException):
378
+ '''Error raised when a nominal value with space is not properly quoted.'''
379
+ def __init__(self, value):
380
+ super().__init__()
381
+ self.message = (
382
+ ('Nominal data value "%s" not properly quoted in line ' % value) +
383
+ '%d.'
384
+ )
385
+
386
+ class BadNumericalValue(ArffException):
387
+ '''Error raised when and invalid numerical value is used in some data
388
+ instance.'''
389
+ message = 'Invalid numerical value, at line %d.'
390
+
391
+ class BadStringValue(ArffException):
392
+ '''Error raise when a string contains space but is not quoted.'''
393
+ message = 'Invalid string value at line %d.'
394
+
395
+ class BadLayout(ArffException):
396
+ '''Error raised when the layout of the ARFF file has something wrong.'''
397
+ message = 'Invalid layout of the ARFF file, at line %d.'
398
+
399
+ def __init__(self, msg=''):
400
+ super().__init__()
401
+ if msg:
402
+ self.message = BadLayout.message + ' ' + msg.replace('%', '%%')
403
+
404
+
405
+ class BadObject(ArffException):
406
+ '''Error raised when the object representing the ARFF file has something
407
+ wrong.'''
408
+ def __init__(self, msg='Invalid object.'):
409
+ self.msg = msg
410
+
411
+ def __str__(self):
412
+ return '%s' % self.msg
413
+
414
+ # =============================================================================
415
+
416
+ # INTERNAL ====================================================================
417
+ def _unescape_sub_callback(match):
418
+ return _UNESCAPE_SUB_MAP[match.group()]
419
+
420
+
421
+ def encode_string(s):
422
+ if _RE_QUOTE_CHARS.search(s):
423
+ return "'%s'" % _RE_ESCAPE_CHARS.sub(_unescape_sub_callback, s)
424
+ return s
425
+
426
+
427
+ class EncodedNominalConversor:
428
+ def __init__(self, values):
429
+ self.values = {v: i for i, v in enumerate(values)}
430
+ self.values[0] = 0
431
+
432
+ def __call__(self, value):
433
+ try:
434
+ return self.values[value]
435
+ except KeyError:
436
+ raise BadNominalValue(value)
437
+
438
+
439
+ class NominalConversor:
440
+ def __init__(self, values):
441
+ self.values = set(values)
442
+ self.zero_value = values[0]
443
+
444
+ def __call__(self, value):
445
+ if value not in self.values:
446
+ if value == 0:
447
+ # Sparse decode
448
+ # See issue #52: nominals should take their first value when
449
+ # unspecified in a sparse matrix. Naturally, this is consistent
450
+ # with EncodedNominalConversor.
451
+ return self.zero_value
452
+ raise BadNominalValue(value)
453
+ return str(value)
454
+
455
+
456
+ class DenseGeneratorData:
457
+ '''Internal helper class to allow for different matrix types without
458
+ making the code a huge collection of if statements.'''
459
+
460
+ def decode_rows(self, stream, conversors):
461
+ for row in stream:
462
+ values = _parse_values(row)
463
+
464
+ if isinstance(values, dict):
465
+ if values and max(values) >= len(conversors):
466
+ raise BadDataFormat(row)
467
+ # XXX: int 0 is used for implicit values, not '0'
468
+ values = [values[i] if i in values else 0 for i in
469
+ range(len(conversors))]
470
+ else:
471
+ if len(values) != len(conversors):
472
+ raise BadDataFormat(row)
473
+
474
+ yield self._decode_values(values, conversors)
475
+
476
+ @staticmethod
477
+ def _decode_values(values, conversors):
478
+ try:
479
+ values = [None if value is None else conversor(value)
480
+ for conversor, value
481
+ in zip(conversors, values)]
482
+ except ValueError as exc:
483
+ if 'float: ' in str(exc):
484
+ raise BadNumericalValue()
485
+ return values
486
+
487
+ def encode_data(self, data, attributes):
488
+ '''(INTERNAL) Encodes a line of data.
489
+
490
+ Data instances follow the csv format, i.e, attribute values are
491
+ delimited by commas. After converted from csv.
492
+
493
+ :param data: a list of values.
494
+ :param attributes: a list of attributes. Used to check if data is valid.
495
+ :return: a string with the encoded data line.
496
+ '''
497
+ current_row = 0
498
+
499
+ for inst in data:
500
+ if len(inst) != len(attributes):
501
+ raise BadObject(
502
+ 'Instance %d has %d attributes, expected %d' %
503
+ (current_row, len(inst), len(attributes))
504
+ )
505
+
506
+ new_data = []
507
+ for value in inst:
508
+ if value is None or value == '' or value != value:
509
+ s = '?'
510
+ else:
511
+ s = encode_string(str(value))
512
+ new_data.append(s)
513
+
514
+ current_row += 1
515
+ yield ','.join(new_data)
516
+
517
+
518
+ class _DataListMixin:
519
+ """Mixin to return a list from decode_rows instead of a generator"""
520
+ def decode_rows(self, stream, conversors):
521
+ return list(super().decode_rows(stream, conversors))
522
+
523
+
524
+ class Data(_DataListMixin, DenseGeneratorData):
525
+ pass
526
+
527
+
528
+ class COOData:
529
+ def decode_rows(self, stream, conversors):
530
+ data, rows, cols = [], [], []
531
+ for i, row in enumerate(stream):
532
+ values = _parse_values(row)
533
+ if not isinstance(values, dict):
534
+ raise BadLayout()
535
+ if not values:
536
+ continue
537
+ row_cols, values = zip(*sorted(values.items()))
538
+ try:
539
+ values = [value if value is None else conversors[key](value)
540
+ for key, value in zip(row_cols, values)]
541
+ except ValueError as exc:
542
+ if 'float: ' in str(exc):
543
+ raise BadNumericalValue()
544
+ raise
545
+ except IndexError:
546
+ # conversor out of range
547
+ raise BadDataFormat(row)
548
+
549
+ data.extend(values)
550
+ rows.extend([i] * len(values))
551
+ cols.extend(row_cols)
552
+
553
+ return data, rows, cols
554
+
555
+ def encode_data(self, data, attributes):
556
+ num_attributes = len(attributes)
557
+ new_data = []
558
+ current_row = 0
559
+
560
+ row = data.row
561
+ col = data.col
562
+ data = data.data
563
+
564
+ # Check if the rows are sorted
565
+ if not all(row[i] <= row[i + 1] for i in range(len(row) - 1)):
566
+ raise ValueError("liac-arff can only output COO matrices with "
567
+ "sorted rows.")
568
+
569
+ for v, col, row in zip(data, col, row):
570
+ if row > current_row:
571
+ # Add empty rows if necessary
572
+ while current_row < row:
573
+ yield " ".join(["{", ','.join(new_data), "}"])
574
+ new_data = []
575
+ current_row += 1
576
+
577
+ if col >= num_attributes:
578
+ raise BadObject(
579
+ 'Instance %d has at least %d attributes, expected %d' %
580
+ (current_row, col + 1, num_attributes)
581
+ )
582
+
583
+ if v is None or v == '' or v != v:
584
+ s = '?'
585
+ else:
586
+ s = encode_string(str(v))
587
+ new_data.append("%d %s" % (col, s))
588
+
589
+ yield " ".join(["{", ','.join(new_data), "}"])
590
+
591
+ class LODGeneratorData:
592
+ def decode_rows(self, stream, conversors):
593
+ for row in stream:
594
+ values = _parse_values(row)
595
+
596
+ if not isinstance(values, dict):
597
+ raise BadLayout()
598
+ try:
599
+ yield {key: None if value is None else conversors[key](value)
600
+ for key, value in values.items()}
601
+ except ValueError as exc:
602
+ if 'float: ' in str(exc):
603
+ raise BadNumericalValue()
604
+ raise
605
+ except IndexError:
606
+ # conversor out of range
607
+ raise BadDataFormat(row)
608
+
609
+ def encode_data(self, data, attributes):
610
+ current_row = 0
611
+
612
+ num_attributes = len(attributes)
613
+ for row in data:
614
+ new_data = []
615
+
616
+ if len(row) > 0 and max(row) >= num_attributes:
617
+ raise BadObject(
618
+ 'Instance %d has %d attributes, expected %d' %
619
+ (current_row, max(row) + 1, num_attributes)
620
+ )
621
+
622
+ for col in sorted(row):
623
+ v = row[col]
624
+ if v is None or v == '' or v != v:
625
+ s = '?'
626
+ else:
627
+ s = encode_string(str(v))
628
+ new_data.append("%d %s" % (col, s))
629
+
630
+ current_row += 1
631
+ yield " ".join(["{", ','.join(new_data), "}"])
632
+
633
+ class LODData(_DataListMixin, LODGeneratorData):
634
+ pass
635
+
636
+
637
+ def _get_data_object_for_decoding(matrix_type):
638
+ if matrix_type == DENSE:
639
+ return Data()
640
+ elif matrix_type == COO:
641
+ return COOData()
642
+ elif matrix_type == LOD:
643
+ return LODData()
644
+ elif matrix_type == DENSE_GEN:
645
+ return DenseGeneratorData()
646
+ elif matrix_type == LOD_GEN:
647
+ return LODGeneratorData()
648
+ else:
649
+ raise ValueError("Matrix type %s not supported." % str(matrix_type))
650
+
651
+ def _get_data_object_for_encoding(matrix):
652
+ # Probably a scipy.sparse
653
+ if hasattr(matrix, 'format'):
654
+ if matrix.format == 'coo':
655
+ return COOData()
656
+ else:
657
+ raise ValueError('Cannot guess matrix format!')
658
+ elif isinstance(matrix[0], dict):
659
+ return LODData()
660
+ else:
661
+ return Data()
662
+
663
+ # =============================================================================
664
+
665
+ # ADVANCED INTERFACE ==========================================================
666
+ class ArffDecoder:
667
+ '''An ARFF decoder.'''
668
+
669
+ def __init__(self):
670
+ '''Constructor.'''
671
+ self._conversors = []
672
+ self._current_line = 0
673
+
674
+ def _decode_comment(self, s):
675
+ '''(INTERNAL) Decodes a comment line.
676
+
677
+ Comments are single line strings starting, obligatorily, with the ``%``
678
+ character, and can have any symbol, including whitespaces or special
679
+ characters.
680
+
681
+ This method must receive a normalized string, i.e., a string without
682
+ padding, including the "\r\n" characters.
683
+
684
+ :param s: a normalized string.
685
+ :return: a string with the decoded comment.
686
+ '''
687
+ res = re.sub(r'^\%( )?', '', s)
688
+ return res
689
+
690
+ def _decode_relation(self, s):
691
+ '''(INTERNAL) Decodes a relation line.
692
+
693
+ The relation declaration is a line with the format ``@RELATION
694
+ <relation-name>``, where ``relation-name`` is a string. The string must
695
+ start with alphabetic character and must be quoted if the name includes
696
+ spaces, otherwise this method will raise a `BadRelationFormat` exception.
697
+
698
+ This method must receive a normalized string, i.e., a string without
699
+ padding, including the "\r\n" characters.
700
+
701
+ :param s: a normalized string.
702
+ :return: a string with the decoded relation name.
703
+ '''
704
+ _, v = s.split(' ', 1)
705
+ v = v.strip()
706
+
707
+ if not _RE_RELATION.match(v):
708
+ raise BadRelationFormat()
709
+
710
+ res = str(v.strip('"\''))
711
+ return res
712
+
713
+ def _decode_attribute(self, s):
714
+ '''(INTERNAL) Decodes an attribute line.
715
+
716
+ The attribute is the most complex declaration in an arff file. All
717
+ attributes must follow the template::
718
+
719
+ @attribute <attribute-name> <datatype>
720
+
721
+ where ``attribute-name`` is a string, quoted if the name contains any
722
+ whitespace, and ``datatype`` can be:
723
+
724
+ - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
725
+ - Strings as ``STRING``.
726
+ - Dates (NOT IMPLEMENTED).
727
+ - Nominal attributes with format:
728
+
729
+ {<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
730
+
731
+ The nominal names follow the rules for the attribute names, i.e., they
732
+ must be quoted if the name contains whitespaces.
733
+
734
+ This method must receive a normalized string, i.e., a string without
735
+ padding, including the "\r\n" characters.
736
+
737
+ :param s: a normalized string.
738
+ :return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
739
+ '''
740
+ _, v = s.split(' ', 1)
741
+ v = v.strip()
742
+
743
+ # Verify the general structure of declaration
744
+ m = _RE_ATTRIBUTE.match(v)
745
+ if not m:
746
+ raise BadAttributeFormat()
747
+
748
+ # Extracts the raw name and type
749
+ name, type_ = m.groups()
750
+
751
+ # Extracts the final name
752
+ name = str(name.strip('"\''))
753
+
754
+ # Extracts the final type
755
+ if type_[:1] == "{" and type_[-1:] == "}":
756
+ try:
757
+ type_ = _parse_values(type_.strip('{} '))
758
+ except Exception:
759
+ raise BadAttributeType()
760
+ if isinstance(type_, dict):
761
+ raise BadAttributeType()
762
+
763
+ else:
764
+ # If not nominal, verify the type name
765
+ type_ = str(type_).upper()
766
+ if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']:
767
+ raise BadAttributeType()
768
+
769
+ return (name, type_)
770
+
771
+ def _decode(self, s, encode_nominal=False, matrix_type=DENSE):
772
+ '''Do the job the ``encode``.'''
773
+
774
+ # Make sure this method is idempotent
775
+ self._current_line = 0
776
+
777
+ # If string, convert to a list of lines
778
+ if isinstance(s, str):
779
+ s = s.strip('\r\n ').replace('\r\n', '\n').split('\n')
780
+
781
+ # Create the return object
782
+ obj: ArffContainerType = {
783
+ 'description': '',
784
+ 'relation': '',
785
+ 'attributes': [],
786
+ 'data': []
787
+ }
788
+ attribute_names = {}
789
+
790
+ # Create the data helper object
791
+ data = _get_data_object_for_decoding(matrix_type)
792
+
793
+ # Read all lines
794
+ STATE = _TK_DESCRIPTION
795
+ s = iter(s)
796
+ for row in s:
797
+ self._current_line += 1
798
+ # Ignore empty lines
799
+ row = row.strip(' \r\n')
800
+ if not row: continue
801
+
802
+ u_row = row.upper()
803
+
804
+ # DESCRIPTION -----------------------------------------------------
805
+ if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION:
806
+ obj['description'] += self._decode_comment(row) + '\n'
807
+ # -----------------------------------------------------------------
808
+
809
+ # RELATION --------------------------------------------------------
810
+ elif u_row.startswith(_TK_RELATION):
811
+ if STATE != _TK_DESCRIPTION:
812
+ raise BadLayout()
813
+
814
+ STATE = _TK_RELATION
815
+ obj['relation'] = self._decode_relation(row)
816
+ # -----------------------------------------------------------------
817
+
818
+ # ATTRIBUTE -------------------------------------------------------
819
+ elif u_row.startswith(_TK_ATTRIBUTE):
820
+ if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE:
821
+ raise BadLayout()
822
+
823
+ STATE = _TK_ATTRIBUTE
824
+
825
+ attr = self._decode_attribute(row)
826
+ if attr[0] in attribute_names:
827
+ raise BadAttributeName(attr[0], attribute_names[attr[0]])
828
+ else:
829
+ attribute_names[attr[0]] = self._current_line
830
+ obj['attributes'].append(attr)
831
+
832
+ if isinstance(attr[1], (list, tuple)):
833
+ if encode_nominal:
834
+ conversor = EncodedNominalConversor(attr[1])
835
+ else:
836
+ conversor = NominalConversor(attr[1])
837
+ else:
838
+ CONVERSOR_MAP = {'STRING': str,
839
+ 'INTEGER': lambda x: int(float(x)),
840
+ 'NUMERIC': float,
841
+ 'REAL': float}
842
+ conversor = CONVERSOR_MAP[attr[1]]
843
+
844
+ self._conversors.append(conversor)
845
+ # -----------------------------------------------------------------
846
+
847
+ # DATA ------------------------------------------------------------
848
+ elif u_row.startswith(_TK_DATA):
849
+ if STATE != _TK_ATTRIBUTE:
850
+ raise BadLayout()
851
+
852
+ break
853
+ # -----------------------------------------------------------------
854
+
855
+ # COMMENT ---------------------------------------------------------
856
+ elif u_row.startswith(_TK_COMMENT):
857
+ pass
858
+ # -----------------------------------------------------------------
859
+ else:
860
+ # Never found @DATA
861
+ raise BadLayout()
862
+
863
+ def stream():
864
+ for row in s:
865
+ self._current_line += 1
866
+ row = row.strip()
867
+ # Ignore empty lines and comment lines.
868
+ if row and not row.startswith(_TK_COMMENT):
869
+ yield row
870
+
871
+ # Alter the data object
872
+ obj['data'] = data.decode_rows(stream(), self._conversors)
873
+ if obj['description'].endswith('\n'):
874
+ obj['description'] = obj['description'][:-1]
875
+
876
+ return obj
877
+
878
+ def decode(self, s, encode_nominal=False, return_type=DENSE):
879
+ '''Returns the Python representation of a given ARFF file.
880
+
881
+ When a file object is passed as an argument, this method reads lines
882
+ iteratively, avoiding to load unnecessary information to the memory.
883
+
884
+ :param s: a string or file object with the ARFF file.
885
+ :param encode_nominal: boolean, if True perform a label encoding
886
+ while reading the .arff file.
887
+ :param return_type: determines the data structure used to store the
888
+ dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
889
+ `arff.DENSE_GEN` or `arff.LOD_GEN`.
890
+ Consult the sections on `working with sparse data`_ and `loading
891
+ progressively`_.
892
+ '''
893
+ try:
894
+ return self._decode(s, encode_nominal=encode_nominal,
895
+ matrix_type=return_type)
896
+ except ArffException as e:
897
+ e.line = self._current_line
898
+ raise e
899
+
900
+
901
+ class ArffEncoder:
902
+ '''An ARFF encoder.'''
903
+
904
+ def _encode_comment(self, s=''):
905
+ '''(INTERNAL) Encodes a comment line.
906
+
907
+ Comments are single line strings starting, obligatorily, with the ``%``
908
+ character, and can have any symbol, including whitespaces or special
909
+ characters.
910
+
911
+ If ``s`` is None, this method will simply return an empty comment.
912
+
913
+ :param s: (OPTIONAL) string.
914
+ :return: a string with the encoded comment line.
915
+ '''
916
+ if s:
917
+ return '%s %s'%(_TK_COMMENT, s)
918
+ else:
919
+ return '%s' % _TK_COMMENT
920
+
921
+ def _encode_relation(self, name):
922
+ '''(INTERNAL) Decodes a relation line.
923
+
924
+ The relation declaration is a line with the format ``@RELATION
925
+ <relation-name>``, where ``relation-name`` is a string.
926
+
927
+ :param name: a string.
928
+ :return: a string with the encoded relation declaration.
929
+ '''
930
+ for char in ' %{},':
931
+ if char in name:
932
+ name = '"%s"'%name
933
+ break
934
+
935
+ return '%s %s'%(_TK_RELATION, name)
936
+
937
+ def _encode_attribute(self, name, type_):
938
+ '''(INTERNAL) Encodes an attribute line.
939
+
940
+ The attribute follow the template::
941
+
942
+ @attribute <attribute-name> <datatype>
943
+
944
+ where ``attribute-name`` is a string, and ``datatype`` can be:
945
+
946
+ - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
947
+ - Strings as ``STRING``.
948
+ - Dates (NOT IMPLEMENTED).
949
+ - Nominal attributes with format:
950
+
951
+ {<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
952
+
953
+ This method must receive a the name of the attribute and its type, if
954
+ the attribute type is nominal, ``type`` must be a list of values.
955
+
956
+ :param name: a string.
957
+ :param type_: a string or a list of string.
958
+ :return: a string with the encoded attribute declaration.
959
+ '''
960
+ for char in ' %{},':
961
+ if char in name:
962
+ name = '"%s"'%name
963
+ break
964
+
965
+ if isinstance(type_, (tuple, list)):
966
+ type_tmp = ['%s' % encode_string(type_k) for type_k in type_]
967
+ type_ = '{%s}'%(', '.join(type_tmp))
968
+
969
+ return '%s %s %s'%(_TK_ATTRIBUTE, name, type_)
970
+
971
+ def encode(self, obj):
972
+ '''Encodes a given object to an ARFF file.
973
+
974
+ :param obj: the object containing the ARFF information.
975
+ :return: the ARFF file as an string.
976
+ '''
977
+ data = [row for row in self.iter_encode(obj)]
978
+
979
+ return '\n'.join(data)
980
+
981
+ def iter_encode(self, obj):
982
+ '''The iterative version of `arff.ArffEncoder.encode`.
983
+
984
+ This encodes iteratively a given object and return, one-by-one, the
985
+ lines of the ARFF file.
986
+
987
+ :param obj: the object containing the ARFF information.
988
+ :return: (yields) the ARFF file as strings.
989
+ '''
990
+ # DESCRIPTION
991
+ if obj.get('description', None):
992
+ for row in obj['description'].split('\n'):
993
+ yield self._encode_comment(row)
994
+
995
+ # RELATION
996
+ if not obj.get('relation'):
997
+ raise BadObject('Relation name not found or with invalid value.')
998
+
999
+ yield self._encode_relation(obj['relation'])
1000
+ yield ''
1001
+
1002
+ # ATTRIBUTES
1003
+ if not obj.get('attributes'):
1004
+ raise BadObject('Attributes not found.')
1005
+
1006
+ attribute_names = set()
1007
+ for attr in obj['attributes']:
1008
+ # Verify for bad object format
1009
+ if not isinstance(attr, (tuple, list)) or \
1010
+ len(attr) != 2 or \
1011
+ not isinstance(attr[0], str):
1012
+ raise BadObject('Invalid attribute declaration "%s"'%str(attr))
1013
+
1014
+ if isinstance(attr[1], str):
1015
+ # Verify for invalid types
1016
+ if attr[1] not in _SIMPLE_TYPES:
1017
+ raise BadObject('Invalid attribute type "%s"'%str(attr))
1018
+
1019
+ # Verify for bad object format
1020
+ elif not isinstance(attr[1], (tuple, list)):
1021
+ raise BadObject('Invalid attribute type "%s"'%str(attr))
1022
+
1023
+ # Verify attribute name is not used twice
1024
+ if attr[0] in attribute_names:
1025
+ raise BadObject('Trying to use attribute name "%s" for the '
1026
+ 'second time.' % str(attr[0]))
1027
+ else:
1028
+ attribute_names.add(attr[0])
1029
+
1030
+ yield self._encode_attribute(attr[0], attr[1])
1031
+ yield ''
1032
+ attributes = obj['attributes']
1033
+
1034
+ # DATA
1035
+ yield _TK_DATA
1036
+ if 'data' in obj:
1037
+ data = _get_data_object_for_encoding(obj.get('data'))
1038
+ yield from data.encode_data(obj.get('data'), attributes)
1039
+
1040
+ yield ''
1041
+
1042
+ # =============================================================================
1043
+
1044
+ # BASIC INTERFACE =============================================================
1045
+ def load(fp, encode_nominal=False, return_type=DENSE):
1046
+ '''Load a file-like object containing the ARFF document and convert it into
1047
+ a Python object.
1048
+
1049
+ :param fp: a file-like object.
1050
+ :param encode_nominal: boolean, if True perform a label encoding
1051
+ while reading the .arff file.
1052
+ :param return_type: determines the data structure used to store the
1053
+ dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
1054
+ `arff.DENSE_GEN` or `arff.LOD_GEN`.
1055
+ Consult the sections on `working with sparse data`_ and `loading
1056
+ progressively`_.
1057
+ :return: a dictionary.
1058
+ '''
1059
+ decoder = ArffDecoder()
1060
+ return decoder.decode(fp, encode_nominal=encode_nominal,
1061
+ return_type=return_type)
1062
+
1063
+ def loads(s, encode_nominal=False, return_type=DENSE):
1064
+ '''Convert a string instance containing the ARFF document into a Python
1065
+ object.
1066
+
1067
+ :param s: a string object.
1068
+ :param encode_nominal: boolean, if True perform a label encoding
1069
+ while reading the .arff file.
1070
+ :param return_type: determines the data structure used to store the
1071
+ dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
1072
+ `arff.DENSE_GEN` or `arff.LOD_GEN`.
1073
+ Consult the sections on `working with sparse data`_ and `loading
1074
+ progressively`_.
1075
+ :return: a dictionary.
1076
+ '''
1077
+ decoder = ArffDecoder()
1078
+ return decoder.decode(s, encode_nominal=encode_nominal,
1079
+ return_type=return_type)
1080
+
1081
+ def dump(obj, fp):
1082
+ '''Serialize an object representing the ARFF document to a given file-like
1083
+ object.
1084
+
1085
+ :param obj: a dictionary.
1086
+ :param fp: a file-like object.
1087
+ '''
1088
+ encoder = ArffEncoder()
1089
+ generator = encoder.iter_encode(obj)
1090
+
1091
+ last_row = next(generator)
1092
+ for row in generator:
1093
+ fp.write(last_row + '\n')
1094
+ last_row = row
1095
+ fp.write(last_row)
1096
+
1097
+ return fp
1098
+
1099
+ def dumps(obj):
1100
+ '''Serialize an object representing the ARFF document, returning a string.
1101
+
1102
+ :param obj: a dictionary.
1103
+ :return: a string with the ARFF document.
1104
+ '''
1105
+ encoder = ArffEncoder()
1106
+ return encoder.encode(obj)
1107
+ # =============================================================================
venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc ADDED
Binary file (3.07 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Vendoered from
2
+ https://github.com/pypa/packaging/blob/main/packaging/_structures.py
3
+ """
4
+ # Copyright (c) Donald Stufft and individual contributors.
5
+ # All rights reserved.
6
+
7
+ # Redistribution and use in source and binary forms, with or without
8
+ # modification, are permitted provided that the following conditions are met:
9
+
10
+ # 1. Redistributions of source code must retain the above copyright notice,
11
+ # this list of conditions and the following disclaimer.
12
+
13
+ # 2. Redistributions in binary form must reproduce the above copyright
14
+ # notice, this list of conditions and the following disclaimer in the
15
+ # documentation and/or other materials provided with the distribution.
16
+
17
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18
+ # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20
+ # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21
+ # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23
+ # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24
+ # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25
+ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+
29
+ class InfinityType:
30
+ def __repr__(self) -> str:
31
+ return "Infinity"
32
+
33
+ def __hash__(self) -> int:
34
+ return hash(repr(self))
35
+
36
+ def __lt__(self, other: object) -> bool:
37
+ return False
38
+
39
+ def __le__(self, other: object) -> bool:
40
+ return False
41
+
42
+ def __eq__(self, other: object) -> bool:
43
+ return isinstance(other, self.__class__)
44
+
45
+ def __ne__(self, other: object) -> bool:
46
+ return not isinstance(other, self.__class__)
47
+
48
+ def __gt__(self, other: object) -> bool:
49
+ return True
50
+
51
+ def __ge__(self, other: object) -> bool:
52
+ return True
53
+
54
+ def __neg__(self: object) -> "NegativeInfinityType":
55
+ return NegativeInfinity
56
+
57
+
58
+ Infinity = InfinityType()
59
+
60
+
61
+ class NegativeInfinityType:
62
+ def __repr__(self) -> str:
63
+ return "-Infinity"
64
+
65
+ def __hash__(self) -> int:
66
+ return hash(repr(self))
67
+
68
+ def __lt__(self, other: object) -> bool:
69
+ return True
70
+
71
+ def __le__(self, other: object) -> bool:
72
+ return True
73
+
74
+ def __eq__(self, other: object) -> bool:
75
+ return isinstance(other, self.__class__)
76
+
77
+ def __ne__(self, other: object) -> bool:
78
+ return not isinstance(other, self.__class__)
79
+
80
+ def __gt__(self, other: object) -> bool:
81
+ return False
82
+
83
+ def __ge__(self, other: object) -> bool:
84
+ return False
85
+
86
+ def __neg__(self: object) -> InfinityType:
87
+ return Infinity
88
+
89
+
90
+ NegativeInfinity = NegativeInfinityType()
venv/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Vendoered from
2
+ https://github.com/pypa/packaging/blob/main/packaging/version.py
3
+ """
4
+ # Copyright (c) Donald Stufft and individual contributors.
5
+ # All rights reserved.
6
+
7
+ # Redistribution and use in source and binary forms, with or without
8
+ # modification, are permitted provided that the following conditions are met:
9
+
10
+ # 1. Redistributions of source code must retain the above copyright notice,
11
+ # this list of conditions and the following disclaimer.
12
+
13
+ # 2. Redistributions in binary form must reproduce the above copyright
14
+ # notice, this list of conditions and the following disclaimer in the
15
+ # documentation and/or other materials provided with the distribution.
16
+
17
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18
+ # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20
+ # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21
+ # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23
+ # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24
+ # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25
+ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ import collections
29
+ import itertools
30
+ import re
31
+ import warnings
32
+ from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
33
+
34
+ from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
35
+
36
+ __all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
37
+
38
+ InfiniteTypes = Union[InfinityType, NegativeInfinityType]
39
+ PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
40
+ SubLocalType = Union[InfiniteTypes, int, str]
41
+ LocalType = Union[
42
+ NegativeInfinityType,
43
+ Tuple[
44
+ Union[
45
+ SubLocalType,
46
+ Tuple[SubLocalType, str],
47
+ Tuple[NegativeInfinityType, SubLocalType],
48
+ ],
49
+ ...,
50
+ ],
51
+ ]
52
+ CmpKey = Tuple[
53
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
54
+ ]
55
+ LegacyCmpKey = Tuple[int, Tuple[str, ...]]
56
+ VersionComparisonMethod = Callable[
57
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
58
+ ]
59
+
60
+ _Version = collections.namedtuple(
61
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
62
+ )
63
+
64
+
65
+ def parse(version: str) -> Union["LegacyVersion", "Version"]:
66
+ """Parse the given version from a string to an appropriate class.
67
+
68
+ Parameters
69
+ ----------
70
+ version : str
71
+ Version in a string format, eg. "0.9.1" or "1.2.dev0".
72
+
73
+ Returns
74
+ -------
75
+ version : :class:`Version` object or a :class:`LegacyVersion` object
76
+ Returned class depends on the given version: if is a valid
77
+ PEP 440 version or a legacy version.
78
+ """
79
+ try:
80
+ return Version(version)
81
+ except InvalidVersion:
82
+ return LegacyVersion(version)
83
+
84
+
85
+ class InvalidVersion(ValueError):
86
+ """
87
+ An invalid version was found, users should refer to PEP 440.
88
+ """
89
+
90
+
91
+ class _BaseVersion:
92
+ _key: Union[CmpKey, LegacyCmpKey]
93
+
94
+ def __hash__(self) -> int:
95
+ return hash(self._key)
96
+
97
+ # Please keep the duplicated `isinstance` check
98
+ # in the six comparisons hereunder
99
+ # unless you find a way to avoid adding overhead function calls.
100
+ def __lt__(self, other: "_BaseVersion") -> bool:
101
+ if not isinstance(other, _BaseVersion):
102
+ return NotImplemented
103
+
104
+ return self._key < other._key
105
+
106
+ def __le__(self, other: "_BaseVersion") -> bool:
107
+ if not isinstance(other, _BaseVersion):
108
+ return NotImplemented
109
+
110
+ return self._key <= other._key
111
+
112
+ def __eq__(self, other: object) -> bool:
113
+ if not isinstance(other, _BaseVersion):
114
+ return NotImplemented
115
+
116
+ return self._key == other._key
117
+
118
+ def __ge__(self, other: "_BaseVersion") -> bool:
119
+ if not isinstance(other, _BaseVersion):
120
+ return NotImplemented
121
+
122
+ return self._key >= other._key
123
+
124
+ def __gt__(self, other: "_BaseVersion") -> bool:
125
+ if not isinstance(other, _BaseVersion):
126
+ return NotImplemented
127
+
128
+ return self._key > other._key
129
+
130
+ def __ne__(self, other: object) -> bool:
131
+ if not isinstance(other, _BaseVersion):
132
+ return NotImplemented
133
+
134
+ return self._key != other._key
135
+
136
+
137
+ class LegacyVersion(_BaseVersion):
138
+ def __init__(self, version: str) -> None:
139
+ self._version = str(version)
140
+ self._key = _legacy_cmpkey(self._version)
141
+
142
+ warnings.warn(
143
+ "Creating a LegacyVersion has been deprecated and will be "
144
+ "removed in the next major release",
145
+ DeprecationWarning,
146
+ )
147
+
148
+ def __str__(self) -> str:
149
+ return self._version
150
+
151
+ def __repr__(self) -> str:
152
+ return f"<LegacyVersion('{self}')>"
153
+
154
+ @property
155
+ def public(self) -> str:
156
+ return self._version
157
+
158
+ @property
159
+ def base_version(self) -> str:
160
+ return self._version
161
+
162
+ @property
163
+ def epoch(self) -> int:
164
+ return -1
165
+
166
+ @property
167
+ def release(self) -> None:
168
+ return None
169
+
170
+ @property
171
+ def pre(self) -> None:
172
+ return None
173
+
174
+ @property
175
+ def post(self) -> None:
176
+ return None
177
+
178
+ @property
179
+ def dev(self) -> None:
180
+ return None
181
+
182
+ @property
183
+ def local(self) -> None:
184
+ return None
185
+
186
+ @property
187
+ def is_prerelease(self) -> bool:
188
+ return False
189
+
190
+ @property
191
+ def is_postrelease(self) -> bool:
192
+ return False
193
+
194
+ @property
195
+ def is_devrelease(self) -> bool:
196
+ return False
197
+
198
+
199
+ _legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
200
+
201
+ _legacy_version_replacement_map = {
202
+ "pre": "c",
203
+ "preview": "c",
204
+ "-": "final-",
205
+ "rc": "c",
206
+ "dev": "@",
207
+ }
208
+
209
+
210
+ def _parse_version_parts(s: str) -> Iterator[str]:
211
+ for part in _legacy_version_component_re.split(s):
212
+ part = _legacy_version_replacement_map.get(part, part)
213
+
214
+ if not part or part == ".":
215
+ continue
216
+
217
+ if part[:1] in "0123456789":
218
+ # pad for numeric comparison
219
+ yield part.zfill(8)
220
+ else:
221
+ yield "*" + part
222
+
223
+ # ensure that alpha/beta/candidate are before final
224
+ yield "*final"
225
+
226
+
227
+ def _legacy_cmpkey(version: str) -> LegacyCmpKey:
228
+
229
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
230
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
231
+ # which uses the defacto standard originally implemented by setuptools,
232
+ # as before all PEP 440 versions.
233
+ epoch = -1
234
+
235
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
236
+ # it's adoption of the packaging library.
237
+ parts: List[str] = []
238
+ for part in _parse_version_parts(version.lower()):
239
+ if part.startswith("*"):
240
+ # remove "-" before a prerelease tag
241
+ if part < "*final":
242
+ while parts and parts[-1] == "*final-":
243
+ parts.pop()
244
+
245
+ # remove trailing zeros from each series of numeric parts
246
+ while parts and parts[-1] == "00000000":
247
+ parts.pop()
248
+
249
+ parts.append(part)
250
+
251
+ return epoch, tuple(parts)
252
+
253
+
254
+ # Deliberately not anchored to the start and end of the string, to make it
255
+ # easier for 3rd party code to reuse
256
+ VERSION_PATTERN = r"""
257
+ v?
258
+ (?:
259
+ (?:(?P<epoch>[0-9]+)!)? # epoch
260
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
261
+ (?P<pre> # pre-release
262
+ [-_\.]?
263
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
264
+ [-_\.]?
265
+ (?P<pre_n>[0-9]+)?
266
+ )?
267
+ (?P<post> # post release
268
+ (?:-(?P<post_n1>[0-9]+))
269
+ |
270
+ (?:
271
+ [-_\.]?
272
+ (?P<post_l>post|rev|r)
273
+ [-_\.]?
274
+ (?P<post_n2>[0-9]+)?
275
+ )
276
+ )?
277
+ (?P<dev> # dev release
278
+ [-_\.]?
279
+ (?P<dev_l>dev)
280
+ [-_\.]?
281
+ (?P<dev_n>[0-9]+)?
282
+ )?
283
+ )
284
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
285
+ """
286
+
287
+
288
+ class Version(_BaseVersion):
289
+
290
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
291
+
292
+ def __init__(self, version: str) -> None:
293
+
294
+ # Validate the version and parse it into pieces
295
+ match = self._regex.search(version)
296
+ if not match:
297
+ raise InvalidVersion(f"Invalid version: '{version}'")
298
+
299
+ # Store the parsed out pieces of the version
300
+ self._version = _Version(
301
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
302
+ release=tuple(int(i) for i in match.group("release").split(".")),
303
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
304
+ post=_parse_letter_version(
305
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
306
+ ),
307
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
308
+ local=_parse_local_version(match.group("local")),
309
+ )
310
+
311
+ # Generate a key which will be used for sorting
312
+ self._key = _cmpkey(
313
+ self._version.epoch,
314
+ self._version.release,
315
+ self._version.pre,
316
+ self._version.post,
317
+ self._version.dev,
318
+ self._version.local,
319
+ )
320
+
321
+ def __repr__(self) -> str:
322
+ return f"<Version('{self}')>"
323
+
324
+ def __str__(self) -> str:
325
+ parts = []
326
+
327
+ # Epoch
328
+ if self.epoch != 0:
329
+ parts.append(f"{self.epoch}!")
330
+
331
+ # Release segment
332
+ parts.append(".".join(str(x) for x in self.release))
333
+
334
+ # Pre-release
335
+ if self.pre is not None:
336
+ parts.append("".join(str(x) for x in self.pre))
337
+
338
+ # Post-release
339
+ if self.post is not None:
340
+ parts.append(f".post{self.post}")
341
+
342
+ # Development release
343
+ if self.dev is not None:
344
+ parts.append(f".dev{self.dev}")
345
+
346
+ # Local version segment
347
+ if self.local is not None:
348
+ parts.append(f"+{self.local}")
349
+
350
+ return "".join(parts)
351
+
352
+ @property
353
+ def epoch(self) -> int:
354
+ _epoch: int = self._version.epoch
355
+ return _epoch
356
+
357
+ @property
358
+ def release(self) -> Tuple[int, ...]:
359
+ _release: Tuple[int, ...] = self._version.release
360
+ return _release
361
+
362
+ @property
363
+ def pre(self) -> Optional[Tuple[str, int]]:
364
+ _pre: Optional[Tuple[str, int]] = self._version.pre
365
+ return _pre
366
+
367
+ @property
368
+ def post(self) -> Optional[int]:
369
+ return self._version.post[1] if self._version.post else None
370
+
371
+ @property
372
+ def dev(self) -> Optional[int]:
373
+ return self._version.dev[1] if self._version.dev else None
374
+
375
+ @property
376
+ def local(self) -> Optional[str]:
377
+ if self._version.local:
378
+ return ".".join(str(x) for x in self._version.local)
379
+ else:
380
+ return None
381
+
382
+ @property
383
+ def public(self) -> str:
384
+ return str(self).split("+", 1)[0]
385
+
386
+ @property
387
+ def base_version(self) -> str:
388
+ parts = []
389
+
390
+ # Epoch
391
+ if self.epoch != 0:
392
+ parts.append(f"{self.epoch}!")
393
+
394
+ # Release segment
395
+ parts.append(".".join(str(x) for x in self.release))
396
+
397
+ return "".join(parts)
398
+
399
+ @property
400
+ def is_prerelease(self) -> bool:
401
+ return self.dev is not None or self.pre is not None
402
+
403
+ @property
404
+ def is_postrelease(self) -> bool:
405
+ return self.post is not None
406
+
407
+ @property
408
+ def is_devrelease(self) -> bool:
409
+ return self.dev is not None
410
+
411
+ @property
412
+ def major(self) -> int:
413
+ return self.release[0] if len(self.release) >= 1 else 0
414
+
415
+ @property
416
+ def minor(self) -> int:
417
+ return self.release[1] if len(self.release) >= 2 else 0
418
+
419
+ @property
420
+ def micro(self) -> int:
421
+ return self.release[2] if len(self.release) >= 3 else 0
422
+
423
+
424
+ def _parse_letter_version(
425
+ letter: str, number: Union[str, bytes, SupportsInt]
426
+ ) -> Optional[Tuple[str, int]]:
427
+
428
+ if letter:
429
+ # We consider there to be an implicit 0 in a pre-release if there is
430
+ # not a numeral associated with it.
431
+ if number is None:
432
+ number = 0
433
+
434
+ # We normalize any letters to their lower case form
435
+ letter = letter.lower()
436
+
437
+ # We consider some words to be alternate spellings of other words and
438
+ # in those cases we want to normalize the spellings to our preferred
439
+ # spelling.
440
+ if letter == "alpha":
441
+ letter = "a"
442
+ elif letter == "beta":
443
+ letter = "b"
444
+ elif letter in ["c", "pre", "preview"]:
445
+ letter = "rc"
446
+ elif letter in ["rev", "r"]:
447
+ letter = "post"
448
+
449
+ return letter, int(number)
450
+ if not letter and number:
451
+ # We assume if we are given a number, but we are not given a letter
452
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
453
+ letter = "post"
454
+
455
+ return letter, int(number)
456
+
457
+ return None
458
+
459
+
460
+ _local_version_separators = re.compile(r"[\._-]")
461
+
462
+
463
+ def _parse_local_version(local: str) -> Optional[LocalType]:
464
+ """
465
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
466
+ """
467
+ if local is not None:
468
+ return tuple(
469
+ part.lower() if not part.isdigit() else int(part)
470
+ for part in _local_version_separators.split(local)
471
+ )
472
+ return None
473
+
474
+
475
+ def _cmpkey(
476
+ epoch: int,
477
+ release: Tuple[int, ...],
478
+ pre: Optional[Tuple[str, int]],
479
+ post: Optional[Tuple[str, int]],
480
+ dev: Optional[Tuple[str, int]],
481
+ local: Optional[Tuple[SubLocalType]],
482
+ ) -> CmpKey:
483
+
484
+ # When we compare a release version, we want to compare it with all of the
485
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
486
+ # leading zeros until we come to something non zero, then take the rest
487
+ # re-reverse it back into the correct order and make it a tuple and use
488
+ # that for our sorting key.
489
+ _release = tuple(
490
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
491
+ )
492
+
493
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
494
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
495
+ # if there is not a pre or a post segment. If we have one of those then
496
+ # the normal sorting rules will handle this case correctly.
497
+ if pre is None and post is None and dev is not None:
498
+ _pre: PrePostDevType = NegativeInfinity
499
+ # Versions without a pre-release (except as noted above) should sort after
500
+ # those with one.
501
+ elif pre is None:
502
+ _pre = Infinity
503
+ else:
504
+ _pre = pre
505
+
506
+ # Versions without a post segment should sort before those with one.
507
+ if post is None:
508
+ _post: PrePostDevType = NegativeInfinity
509
+
510
+ else:
511
+ _post = post
512
+
513
+ # Versions without a development segment should sort after those with one.
514
+ if dev is None:
515
+ _dev: PrePostDevType = Infinity
516
+
517
+ else:
518
+ _dev = dev
519
+
520
+ if local is None:
521
+ # Versions without a local segment should sort before those with one.
522
+ _local: LocalType = NegativeInfinity
523
+ else:
524
+ # Versions with a local segment need that segment parsed to implement
525
+ # the sorting rules in PEP440.
526
+ # - Alpha numeric segments sort before numeric segments
527
+ # - Alpha numeric segments sort lexicographically
528
+ # - Numeric segments sort numerically
529
+ # - Shorter versions sort before longer versions when the prefixes
530
+ # match exactly
531
+ _local = tuple(
532
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
533
+ )
534
+
535
+ return epoch, _release, _pre, _post, _dev, _local
venv/lib/python3.10/site-packages/sklearn/externals/conftest.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Do not collect any tests in externals. This is more robust than using
2
+ # --ignore because --ignore needs a path and it is not convenient to pass in
3
+ # the externals path (very long install-dependent path in site-packages) when
4
+ # using --pyargs
5
+ def pytest_ignore_collect(path, config):
6
+ return True
venv/lib/python3.10/site-packages/sklearn/neighbors/__init__.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
3
+ algorithm.
4
+ """
5
+
6
+ from ._ball_tree import BallTree
7
+ from ._base import VALID_METRICS, VALID_METRICS_SPARSE, sort_graph_by_row_values
8
+ from ._classification import KNeighborsClassifier, RadiusNeighborsClassifier
9
+ from ._graph import (
10
+ KNeighborsTransformer,
11
+ RadiusNeighborsTransformer,
12
+ kneighbors_graph,
13
+ radius_neighbors_graph,
14
+ )
15
+ from ._kd_tree import KDTree
16
+ from ._kde import KernelDensity
17
+ from ._lof import LocalOutlierFactor
18
+ from ._nca import NeighborhoodComponentsAnalysis
19
+ from ._nearest_centroid import NearestCentroid
20
+ from ._regression import KNeighborsRegressor, RadiusNeighborsRegressor
21
+ from ._unsupervised import NearestNeighbors
22
+
23
+ __all__ = [
24
+ "BallTree",
25
+ "KDTree",
26
+ "KNeighborsClassifier",
27
+ "KNeighborsRegressor",
28
+ "KNeighborsTransformer",
29
+ "NearestCentroid",
30
+ "NearestNeighbors",
31
+ "RadiusNeighborsClassifier",
32
+ "RadiusNeighborsRegressor",
33
+ "RadiusNeighborsTransformer",
34
+ "kneighbors_graph",
35
+ "radius_neighbors_graph",
36
+ "KernelDensity",
37
+ "LocalOutlierFactor",
38
+ "NeighborhoodComponentsAnalysis",
39
+ "sort_graph_by_row_values",
40
+ "VALID_METRICS",
41
+ "VALID_METRICS_SPARSE",
42
+ ]
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_base.cpython-310.pyc ADDED
Binary file (34.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_classification.cpython-310.pyc ADDED
Binary file (24.3 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc ADDED
Binary file (22.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_kde.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_lof.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nca.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_nearest_centroid.cpython-310.pyc ADDED
Binary file (7.77 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_regression.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_unsupervised.cpython-310.pyc ADDED
Binary file (6.42 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (774 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (774 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/_kde.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Kernel Density Estimation
3
+ -------------------------
4
+ """
5
+ # Author: Jake Vanderplas <[email protected]>
6
+ import itertools
7
+ from numbers import Integral, Real
8
+
9
+ import numpy as np
10
+ from scipy.special import gammainc
11
+
12
+ from ..base import BaseEstimator, _fit_context
13
+ from ..neighbors._base import VALID_METRICS
14
+ from ..utils import check_random_state
15
+ from ..utils._param_validation import Interval, StrOptions
16
+ from ..utils.extmath import row_norms
17
+ from ..utils.validation import _check_sample_weight, check_is_fitted
18
+ from ._ball_tree import BallTree
19
+ from ._kd_tree import KDTree
20
+
21
+ VALID_KERNELS = [
22
+ "gaussian",
23
+ "tophat",
24
+ "epanechnikov",
25
+ "exponential",
26
+ "linear",
27
+ "cosine",
28
+ ]
29
+
30
+ TREE_DICT = {"ball_tree": BallTree, "kd_tree": KDTree}
31
+
32
+
33
+ # TODO: implement a brute force version for testing purposes
34
+ # TODO: create a density estimation base class?
35
+ class KernelDensity(BaseEstimator):
36
+ """Kernel Density Estimation.
37
+
38
+ Read more in the :ref:`User Guide <kernel_density>`.
39
+
40
+ Parameters
41
+ ----------
42
+ bandwidth : float or {"scott", "silverman"}, default=1.0
43
+ The bandwidth of the kernel. If bandwidth is a float, it defines the
44
+ bandwidth of the kernel. If bandwidth is a string, one of the estimation
45
+ methods is implemented.
46
+
47
+ algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto'
48
+ The tree algorithm to use.
49
+
50
+ kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', \
51
+ 'cosine'}, default='gaussian'
52
+ The kernel to use.
53
+
54
+ metric : str, default='euclidean'
55
+ Metric to use for distance computation. See the
56
+ documentation of `scipy.spatial.distance
57
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
58
+ the metrics listed in
59
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
60
+ values.
61
+
62
+ Not all metrics are valid with all algorithms: refer to the
63
+ documentation of :class:`BallTree` and :class:`KDTree`. Note that the
64
+ normalization of the density output is correct only for the Euclidean
65
+ distance metric.
66
+
67
+ atol : float, default=0
68
+ The desired absolute tolerance of the result. A larger tolerance will
69
+ generally lead to faster execution.
70
+
71
+ rtol : float, default=0
72
+ The desired relative tolerance of the result. A larger tolerance will
73
+ generally lead to faster execution.
74
+
75
+ breadth_first : bool, default=True
76
+ If true (default), use a breadth-first approach to the problem.
77
+ Otherwise use a depth-first approach.
78
+
79
+ leaf_size : int, default=40
80
+ Specify the leaf size of the underlying tree. See :class:`BallTree`
81
+ or :class:`KDTree` for details.
82
+
83
+ metric_params : dict, default=None
84
+ Additional parameters to be passed to the tree for use with the
85
+ metric. For more information, see the documentation of
86
+ :class:`BallTree` or :class:`KDTree`.
87
+
88
+ Attributes
89
+ ----------
90
+ n_features_in_ : int
91
+ Number of features seen during :term:`fit`.
92
+
93
+ .. versionadded:: 0.24
94
+
95
+ tree_ : ``BinaryTree`` instance
96
+ The tree algorithm for fast generalized N-point problems.
97
+
98
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
99
+ Names of features seen during :term:`fit`. Defined only when `X`
100
+ has feature names that are all strings.
101
+
102
+ bandwidth_ : float
103
+ Value of the bandwidth, given directly by the bandwidth parameter or
104
+ estimated using the 'scott' or 'silverman' method.
105
+
106
+ .. versionadded:: 1.0
107
+
108
+ See Also
109
+ --------
110
+ sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point
111
+ problems.
112
+ sklearn.neighbors.BallTree : Ball tree for fast generalized N-point
113
+ problems.
114
+
115
+ Examples
116
+ --------
117
+ Compute a gaussian kernel density estimate with a fixed bandwidth.
118
+
119
+ >>> from sklearn.neighbors import KernelDensity
120
+ >>> import numpy as np
121
+ >>> rng = np.random.RandomState(42)
122
+ >>> X = rng.random_sample((100, 3))
123
+ >>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)
124
+ >>> log_density = kde.score_samples(X[:3])
125
+ >>> log_density
126
+ array([-1.52955942, -1.51462041, -1.60244657])
127
+ """
128
+
129
+ _parameter_constraints: dict = {
130
+ "bandwidth": [
131
+ Interval(Real, 0, None, closed="neither"),
132
+ StrOptions({"scott", "silverman"}),
133
+ ],
134
+ "algorithm": [StrOptions(set(TREE_DICT.keys()) | {"auto"})],
135
+ "kernel": [StrOptions(set(VALID_KERNELS))],
136
+ "metric": [
137
+ StrOptions(
138
+ set(itertools.chain(*[VALID_METRICS[alg] for alg in TREE_DICT.keys()]))
139
+ )
140
+ ],
141
+ "atol": [Interval(Real, 0, None, closed="left")],
142
+ "rtol": [Interval(Real, 0, None, closed="left")],
143
+ "breadth_first": ["boolean"],
144
+ "leaf_size": [Interval(Integral, 1, None, closed="left")],
145
+ "metric_params": [None, dict],
146
+ }
147
+
148
+ def __init__(
149
+ self,
150
+ *,
151
+ bandwidth=1.0,
152
+ algorithm="auto",
153
+ kernel="gaussian",
154
+ metric="euclidean",
155
+ atol=0,
156
+ rtol=0,
157
+ breadth_first=True,
158
+ leaf_size=40,
159
+ metric_params=None,
160
+ ):
161
+ self.algorithm = algorithm
162
+ self.bandwidth = bandwidth
163
+ self.kernel = kernel
164
+ self.metric = metric
165
+ self.atol = atol
166
+ self.rtol = rtol
167
+ self.breadth_first = breadth_first
168
+ self.leaf_size = leaf_size
169
+ self.metric_params = metric_params
170
+
171
+ def _choose_algorithm(self, algorithm, metric):
172
+ # given the algorithm string + metric string, choose the optimal
173
+ # algorithm to compute the result.
174
+ if algorithm == "auto":
175
+ # use KD Tree if possible
176
+ if metric in KDTree.valid_metrics:
177
+ return "kd_tree"
178
+ elif metric in BallTree.valid_metrics:
179
+ return "ball_tree"
180
+ else: # kd_tree or ball_tree
181
+ if metric not in TREE_DICT[algorithm].valid_metrics:
182
+ raise ValueError(
183
+ "invalid metric for {0}: '{1}'".format(TREE_DICT[algorithm], metric)
184
+ )
185
+ return algorithm
186
+
187
+ @_fit_context(
188
+ # KernelDensity.metric is not validated yet
189
+ prefer_skip_nested_validation=False
190
+ )
191
+ def fit(self, X, y=None, sample_weight=None):
192
+ """Fit the Kernel Density model on the data.
193
+
194
+ Parameters
195
+ ----------
196
+ X : array-like of shape (n_samples, n_features)
197
+ List of n_features-dimensional data points. Each row
198
+ corresponds to a single data point.
199
+
200
+ y : None
201
+ Ignored. This parameter exists only for compatibility with
202
+ :class:`~sklearn.pipeline.Pipeline`.
203
+
204
+ sample_weight : array-like of shape (n_samples,), default=None
205
+ List of sample weights attached to the data X.
206
+
207
+ .. versionadded:: 0.20
208
+
209
+ Returns
210
+ -------
211
+ self : object
212
+ Returns the instance itself.
213
+ """
214
+ algorithm = self._choose_algorithm(self.algorithm, self.metric)
215
+
216
+ if isinstance(self.bandwidth, str):
217
+ if self.bandwidth == "scott":
218
+ self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4))
219
+ elif self.bandwidth == "silverman":
220
+ self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** (
221
+ -1 / (X.shape[1] + 4)
222
+ )
223
+ else:
224
+ self.bandwidth_ = self.bandwidth
225
+
226
+ X = self._validate_data(X, order="C", dtype=np.float64)
227
+
228
+ if sample_weight is not None:
229
+ sample_weight = _check_sample_weight(
230
+ sample_weight, X, dtype=np.float64, only_non_negative=True
231
+ )
232
+
233
+ kwargs = self.metric_params
234
+ if kwargs is None:
235
+ kwargs = {}
236
+ self.tree_ = TREE_DICT[algorithm](
237
+ X,
238
+ metric=self.metric,
239
+ leaf_size=self.leaf_size,
240
+ sample_weight=sample_weight,
241
+ **kwargs,
242
+ )
243
+ return self
244
+
245
+ def score_samples(self, X):
246
+ """Compute the log-likelihood of each sample under the model.
247
+
248
+ Parameters
249
+ ----------
250
+ X : array-like of shape (n_samples, n_features)
251
+ An array of points to query. Last dimension should match dimension
252
+ of training data (n_features).
253
+
254
+ Returns
255
+ -------
256
+ density : ndarray of shape (n_samples,)
257
+ Log-likelihood of each sample in `X`. These are normalized to be
258
+ probability densities, so values will be low for high-dimensional
259
+ data.
260
+ """
261
+ check_is_fitted(self)
262
+ # The returned density is normalized to the number of points.
263
+ # For it to be a probability, we must scale it. For this reason
264
+ # we'll also scale atol.
265
+ X = self._validate_data(X, order="C", dtype=np.float64, reset=False)
266
+ if self.tree_.sample_weight is None:
267
+ N = self.tree_.data.shape[0]
268
+ else:
269
+ N = self.tree_.sum_weight
270
+ atol_N = self.atol * N
271
+ log_density = self.tree_.kernel_density(
272
+ X,
273
+ h=self.bandwidth_,
274
+ kernel=self.kernel,
275
+ atol=atol_N,
276
+ rtol=self.rtol,
277
+ breadth_first=self.breadth_first,
278
+ return_log=True,
279
+ )
280
+ log_density -= np.log(N)
281
+ return log_density
282
+
283
+ def score(self, X, y=None):
284
+ """Compute the total log-likelihood under the model.
285
+
286
+ Parameters
287
+ ----------
288
+ X : array-like of shape (n_samples, n_features)
289
+ List of n_features-dimensional data points. Each row
290
+ corresponds to a single data point.
291
+
292
+ y : None
293
+ Ignored. This parameter exists only for compatibility with
294
+ :class:`~sklearn.pipeline.Pipeline`.
295
+
296
+ Returns
297
+ -------
298
+ logprob : float
299
+ Total log-likelihood of the data in X. This is normalized to be a
300
+ probability density, so the value will be low for high-dimensional
301
+ data.
302
+ """
303
+ return np.sum(self.score_samples(X))
304
+
305
+ def sample(self, n_samples=1, random_state=None):
306
+ """Generate random samples from the model.
307
+
308
+ Currently, this is implemented only for gaussian and tophat kernels.
309
+
310
+ Parameters
311
+ ----------
312
+ n_samples : int, default=1
313
+ Number of samples to generate.
314
+
315
+ random_state : int, RandomState instance or None, default=None
316
+ Determines random number generation used to generate
317
+ random samples. Pass an int for reproducible results
318
+ across multiple function calls.
319
+ See :term:`Glossary <random_state>`.
320
+
321
+ Returns
322
+ -------
323
+ X : array-like of shape (n_samples, n_features)
324
+ List of samples.
325
+ """
326
+ check_is_fitted(self)
327
+ # TODO: implement sampling for other valid kernel shapes
328
+ if self.kernel not in ["gaussian", "tophat"]:
329
+ raise NotImplementedError()
330
+
331
+ data = np.asarray(self.tree_.data)
332
+
333
+ rng = check_random_state(random_state)
334
+ u = rng.uniform(0, 1, size=n_samples)
335
+ if self.tree_.sample_weight is None:
336
+ i = (u * data.shape[0]).astype(np.int64)
337
+ else:
338
+ cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight))
339
+ sum_weight = cumsum_weight[-1]
340
+ i = np.searchsorted(cumsum_weight, u * sum_weight)
341
+ if self.kernel == "gaussian":
342
+ return np.atleast_2d(rng.normal(data[i], self.bandwidth_))
343
+
344
+ elif self.kernel == "tophat":
345
+ # we first draw points from a d-dimensional normal distribution,
346
+ # then use an incomplete gamma function to map them to a uniform
347
+ # d-dimensional tophat distribution.
348
+ dim = data.shape[1]
349
+ X = rng.normal(size=(n_samples, dim))
350
+ s_sq = row_norms(X, squared=True)
351
+ correction = (
352
+ gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim)
353
+ * self.bandwidth_
354
+ / np.sqrt(s_sq)
355
+ )
356
+ return data[i] + X * correction[:, np.newaxis]
357
+
358
+ def _more_tags(self):
359
+ return {
360
+ "_xfail_checks": {
361
+ "check_sample_weights_invariance": (
362
+ "sample_weight must have positive values"
363
+ ),
364
+ }
365
+ }
venv/lib/python3.10/site-packages/sklearn/neighbors/_lof.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Nicolas Goix <[email protected]>
2
+ # Alexandre Gramfort <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import warnings
6
+ from numbers import Real
7
+
8
+ import numpy as np
9
+
10
+ from ..base import OutlierMixin, _fit_context
11
+ from ..utils import check_array
12
+ from ..utils._param_validation import Interval, StrOptions
13
+ from ..utils.metaestimators import available_if
14
+ from ..utils.validation import check_is_fitted
15
+ from ._base import KNeighborsMixin, NeighborsBase
16
+
17
+ __all__ = ["LocalOutlierFactor"]
18
+
19
+
20
+ class LocalOutlierFactor(KNeighborsMixin, OutlierMixin, NeighborsBase):
21
+ """Unsupervised Outlier Detection using the Local Outlier Factor (LOF).
22
+
23
+ The anomaly score of each sample is called the Local Outlier Factor.
24
+ It measures the local deviation of the density of a given sample with respect
25
+ to its neighbors.
26
+ It is local in that the anomaly score depends on how isolated the object
27
+ is with respect to the surrounding neighborhood.
28
+ More precisely, locality is given by k-nearest neighbors, whose distance
29
+ is used to estimate the local density.
30
+ By comparing the local density of a sample to the local densities of its
31
+ neighbors, one can identify samples that have a substantially lower density
32
+ than their neighbors. These are considered outliers.
33
+
34
+ .. versionadded:: 0.19
35
+
36
+ Parameters
37
+ ----------
38
+ n_neighbors : int, default=20
39
+ Number of neighbors to use by default for :meth:`kneighbors` queries.
40
+ If n_neighbors is larger than the number of samples provided,
41
+ all samples will be used.
42
+
43
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
44
+ Algorithm used to compute the nearest neighbors:
45
+
46
+ - 'ball_tree' will use :class:`BallTree`
47
+ - 'kd_tree' will use :class:`KDTree`
48
+ - 'brute' will use a brute-force search.
49
+ - 'auto' will attempt to decide the most appropriate algorithm
50
+ based on the values passed to :meth:`fit` method.
51
+
52
+ Note: fitting on sparse input will override the setting of
53
+ this parameter, using brute force.
54
+
55
+ leaf_size : int, default=30
56
+ Leaf is size passed to :class:`BallTree` or :class:`KDTree`. This can
57
+ affect the speed of the construction and query, as well as the memory
58
+ required to store the tree. The optimal value depends on the
59
+ nature of the problem.
60
+
61
+ metric : str or callable, default='minkowski'
62
+ Metric to use for distance computation. Default is "minkowski", which
63
+ results in the standard Euclidean distance when p = 2. See the
64
+ documentation of `scipy.spatial.distance
65
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
66
+ the metrics listed in
67
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
68
+ values.
69
+
70
+ If metric is "precomputed", X is assumed to be a distance matrix and
71
+ must be square during fit. X may be a :term:`sparse graph`, in which
72
+ case only "nonzero" elements may be considered neighbors.
73
+
74
+ If metric is a callable function, it takes two arrays representing 1D
75
+ vectors as inputs and must return one value indicating the distance
76
+ between those vectors. This works for Scipy's metrics, but is less
77
+ efficient than passing the metric name as a string.
78
+
79
+ p : float, default=2
80
+ Parameter for the Minkowski metric from
81
+ :func:`sklearn.metrics.pairwise_distances`. When p = 1, this
82
+ is equivalent to using manhattan_distance (l1), and euclidean_distance
83
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
84
+
85
+ metric_params : dict, default=None
86
+ Additional keyword arguments for the metric function.
87
+
88
+ contamination : 'auto' or float, default='auto'
89
+ The amount of contamination of the data set, i.e. the proportion
90
+ of outliers in the data set. When fitting this is used to define the
91
+ threshold on the scores of the samples.
92
+
93
+ - if 'auto', the threshold is determined as in the
94
+ original paper,
95
+ - if a float, the contamination should be in the range (0, 0.5].
96
+
97
+ .. versionchanged:: 0.22
98
+ The default value of ``contamination`` changed from 0.1
99
+ to ``'auto'``.
100
+
101
+ novelty : bool, default=False
102
+ By default, LocalOutlierFactor is only meant to be used for outlier
103
+ detection (novelty=False). Set novelty to True if you want to use
104
+ LocalOutlierFactor for novelty detection. In this case be aware that
105
+ you should only use predict, decision_function and score_samples
106
+ on new unseen data and not on the training set; and note that the
107
+ results obtained this way may differ from the standard LOF results.
108
+
109
+ .. versionadded:: 0.20
110
+
111
+ n_jobs : int, default=None
112
+ The number of parallel jobs to run for neighbors search.
113
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
114
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
115
+ for more details.
116
+
117
+ Attributes
118
+ ----------
119
+ negative_outlier_factor_ : ndarray of shape (n_samples,)
120
+ The opposite LOF of the training samples. The higher, the more normal.
121
+ Inliers tend to have a LOF score close to 1
122
+ (``negative_outlier_factor_`` close to -1), while outliers tend to have
123
+ a larger LOF score.
124
+
125
+ The local outlier factor (LOF) of a sample captures its
126
+ supposed 'degree of abnormality'.
127
+ It is the average of the ratio of the local reachability density of
128
+ a sample and those of its k-nearest neighbors.
129
+
130
+ n_neighbors_ : int
131
+ The actual number of neighbors used for :meth:`kneighbors` queries.
132
+
133
+ offset_ : float
134
+ Offset used to obtain binary labels from the raw scores.
135
+ Observations having a negative_outlier_factor smaller than `offset_`
136
+ are detected as abnormal.
137
+ The offset is set to -1.5 (inliers score around -1), except when a
138
+ contamination parameter different than "auto" is provided. In that
139
+ case, the offset is defined in such a way we obtain the expected
140
+ number of outliers in training.
141
+
142
+ .. versionadded:: 0.20
143
+
144
+ effective_metric_ : str
145
+ The effective metric used for the distance computation.
146
+
147
+ effective_metric_params_ : dict
148
+ The effective additional keyword arguments for the metric function.
149
+
150
+ n_features_in_ : int
151
+ Number of features seen during :term:`fit`.
152
+
153
+ .. versionadded:: 0.24
154
+
155
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
156
+ Names of features seen during :term:`fit`. Defined only when `X`
157
+ has feature names that are all strings.
158
+
159
+ .. versionadded:: 1.0
160
+
161
+ n_samples_fit_ : int
162
+ It is the number of samples in the fitted data.
163
+
164
+ See Also
165
+ --------
166
+ sklearn.svm.OneClassSVM: Unsupervised Outlier Detection using
167
+ Support Vector Machine.
168
+
169
+ References
170
+ ----------
171
+ .. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
172
+ LOF: identifying density-based local outliers. In ACM sigmod record.
173
+
174
+ Examples
175
+ --------
176
+ >>> import numpy as np
177
+ >>> from sklearn.neighbors import LocalOutlierFactor
178
+ >>> X = [[-1.1], [0.2], [101.1], [0.3]]
179
+ >>> clf = LocalOutlierFactor(n_neighbors=2)
180
+ >>> clf.fit_predict(X)
181
+ array([ 1, 1, -1, 1])
182
+ >>> clf.negative_outlier_factor_
183
+ array([ -0.9821..., -1.0370..., -73.3697..., -0.9821...])
184
+ """
185
+
186
+ _parameter_constraints: dict = {
187
+ **NeighborsBase._parameter_constraints,
188
+ "contamination": [
189
+ StrOptions({"auto"}),
190
+ Interval(Real, 0, 0.5, closed="right"),
191
+ ],
192
+ "novelty": ["boolean"],
193
+ }
194
+ _parameter_constraints.pop("radius")
195
+
196
+ def __init__(
197
+ self,
198
+ n_neighbors=20,
199
+ *,
200
+ algorithm="auto",
201
+ leaf_size=30,
202
+ metric="minkowski",
203
+ p=2,
204
+ metric_params=None,
205
+ contamination="auto",
206
+ novelty=False,
207
+ n_jobs=None,
208
+ ):
209
+ super().__init__(
210
+ n_neighbors=n_neighbors,
211
+ algorithm=algorithm,
212
+ leaf_size=leaf_size,
213
+ metric=metric,
214
+ p=p,
215
+ metric_params=metric_params,
216
+ n_jobs=n_jobs,
217
+ )
218
+ self.contamination = contamination
219
+ self.novelty = novelty
220
+
221
+ def _check_novelty_fit_predict(self):
222
+ if self.novelty:
223
+ msg = (
224
+ "fit_predict is not available when novelty=True. Use "
225
+ "novelty=False if you want to predict on the training set."
226
+ )
227
+ raise AttributeError(msg)
228
+ return True
229
+
230
+ @available_if(_check_novelty_fit_predict)
231
+ def fit_predict(self, X, y=None):
232
+ """Fit the model to the training set X and return the labels.
233
+
234
+ **Not available for novelty detection (when novelty is set to True).**
235
+ Label is 1 for an inlier and -1 for an outlier according to the LOF
236
+ score and the contamination parameter.
237
+
238
+ Parameters
239
+ ----------
240
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
241
+ The query sample or samples to compute the Local Outlier Factor
242
+ w.r.t. the training samples.
243
+
244
+ y : Ignored
245
+ Not used, present for API consistency by convention.
246
+
247
+ Returns
248
+ -------
249
+ is_inlier : ndarray of shape (n_samples,)
250
+ Returns -1 for anomalies/outliers and 1 for inliers.
251
+ """
252
+
253
+ # As fit_predict would be different from fit.predict, fit_predict is
254
+ # only available for outlier detection (novelty=False)
255
+
256
+ return self.fit(X)._predict()
257
+
258
+ @_fit_context(
259
+ # LocalOutlierFactor.metric is not validated yet
260
+ prefer_skip_nested_validation=False
261
+ )
262
+ def fit(self, X, y=None):
263
+ """Fit the local outlier factor detector from the training dataset.
264
+
265
+ Parameters
266
+ ----------
267
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
268
+ (n_samples, n_samples) if metric='precomputed'
269
+ Training data.
270
+
271
+ y : Ignored
272
+ Not used, present for API consistency by convention.
273
+
274
+ Returns
275
+ -------
276
+ self : LocalOutlierFactor
277
+ The fitted local outlier factor detector.
278
+ """
279
+ self._fit(X)
280
+
281
+ n_samples = self.n_samples_fit_
282
+ if self.n_neighbors > n_samples:
283
+ warnings.warn(
284
+ "n_neighbors (%s) is greater than the "
285
+ "total number of samples (%s). n_neighbors "
286
+ "will be set to (n_samples - 1) for estimation."
287
+ % (self.n_neighbors, n_samples)
288
+ )
289
+ self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
290
+
291
+ self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors(
292
+ n_neighbors=self.n_neighbors_
293
+ )
294
+
295
+ if self._fit_X.dtype == np.float32:
296
+ self._distances_fit_X_ = self._distances_fit_X_.astype(
297
+ self._fit_X.dtype,
298
+ copy=False,
299
+ )
300
+
301
+ self._lrd = self._local_reachability_density(
302
+ self._distances_fit_X_, _neighbors_indices_fit_X_
303
+ )
304
+
305
+ # Compute lof score over training samples to define offset_:
306
+ lrd_ratios_array = (
307
+ self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]
308
+ )
309
+
310
+ self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
311
+
312
+ if self.contamination == "auto":
313
+ # inliers score around -1 (the higher, the less abnormal).
314
+ self.offset_ = -1.5
315
+ else:
316
+ self.offset_ = np.percentile(
317
+ self.negative_outlier_factor_, 100.0 * self.contamination
318
+ )
319
+
320
+ return self
321
+
322
+ def _check_novelty_predict(self):
323
+ if not self.novelty:
324
+ msg = (
325
+ "predict is not available when novelty=False, use "
326
+ "fit_predict if you want to predict on training data. Use "
327
+ "novelty=True if you want to use LOF for novelty detection "
328
+ "and predict on new unseen data."
329
+ )
330
+ raise AttributeError(msg)
331
+ return True
332
+
333
+ @available_if(_check_novelty_predict)
334
+ def predict(self, X=None):
335
+ """Predict the labels (1 inlier, -1 outlier) of X according to LOF.
336
+
337
+ **Only available for novelty detection (when novelty is set to True).**
338
+ This method allows to generalize prediction to *new observations* (not
339
+ in the training set). Note that the result of ``clf.fit(X)`` then
340
+ ``clf.predict(X)`` with ``novelty=True`` may differ from the result
341
+ obtained by ``clf.fit_predict(X)`` with ``novelty=False``.
342
+
343
+ Parameters
344
+ ----------
345
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
346
+ The query sample or samples to compute the Local Outlier Factor
347
+ w.r.t. the training samples.
348
+
349
+ Returns
350
+ -------
351
+ is_inlier : ndarray of shape (n_samples,)
352
+ Returns -1 for anomalies/outliers and +1 for inliers.
353
+ """
354
+ return self._predict(X)
355
+
356
+ def _predict(self, X=None):
357
+ """Predict the labels (1 inlier, -1 outlier) of X according to LOF.
358
+
359
+ If X is None, returns the same as fit_predict(X_train).
360
+
361
+ Parameters
362
+ ----------
363
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
364
+ The query sample or samples to compute the Local Outlier Factor
365
+ w.r.t. the training samples. If None, makes prediction on the
366
+ training data without considering them as their own neighbors.
367
+
368
+ Returns
369
+ -------
370
+ is_inlier : ndarray of shape (n_samples,)
371
+ Returns -1 for anomalies/outliers and +1 for inliers.
372
+ """
373
+ check_is_fitted(self)
374
+
375
+ if X is not None:
376
+ X = check_array(X, accept_sparse="csr")
377
+ is_inlier = np.ones(X.shape[0], dtype=int)
378
+ is_inlier[self.decision_function(X) < 0] = -1
379
+ else:
380
+ is_inlier = np.ones(self.n_samples_fit_, dtype=int)
381
+ is_inlier[self.negative_outlier_factor_ < self.offset_] = -1
382
+
383
+ return is_inlier
384
+
385
+ def _check_novelty_decision_function(self):
386
+ if not self.novelty:
387
+ msg = (
388
+ "decision_function is not available when novelty=False. "
389
+ "Use novelty=True if you want to use LOF for novelty "
390
+ "detection and compute decision_function for new unseen "
391
+ "data. Note that the opposite LOF of the training samples "
392
+ "is always available by considering the "
393
+ "negative_outlier_factor_ attribute."
394
+ )
395
+ raise AttributeError(msg)
396
+ return True
397
+
398
+ @available_if(_check_novelty_decision_function)
399
+ def decision_function(self, X):
400
+ """Shifted opposite of the Local Outlier Factor of X.
401
+
402
+ Bigger is better, i.e. large values correspond to inliers.
403
+
404
+ **Only available for novelty detection (when novelty is set to True).**
405
+ The shift offset allows a zero threshold for being an outlier.
406
+ The argument X is supposed to contain *new data*: if X contains a
407
+ point from training, it considers the later in its own neighborhood.
408
+ Also, the samples in X are not considered in the neighborhood of any
409
+ point.
410
+
411
+ Parameters
412
+ ----------
413
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
414
+ The query sample or samples to compute the Local Outlier Factor
415
+ w.r.t. the training samples.
416
+
417
+ Returns
418
+ -------
419
+ shifted_opposite_lof_scores : ndarray of shape (n_samples,)
420
+ The shifted opposite of the Local Outlier Factor of each input
421
+ samples. The lower, the more abnormal. Negative scores represent
422
+ outliers, positive scores represent inliers.
423
+ """
424
+ return self.score_samples(X) - self.offset_
425
+
426
+ def _check_novelty_score_samples(self):
427
+ if not self.novelty:
428
+ msg = (
429
+ "score_samples is not available when novelty=False. The "
430
+ "scores of the training samples are always available "
431
+ "through the negative_outlier_factor_ attribute. Use "
432
+ "novelty=True if you want to use LOF for novelty detection "
433
+ "and compute score_samples for new unseen data."
434
+ )
435
+ raise AttributeError(msg)
436
+ return True
437
+
438
+ @available_if(_check_novelty_score_samples)
439
+ def score_samples(self, X):
440
+ """Opposite of the Local Outlier Factor of X.
441
+
442
+ It is the opposite as bigger is better, i.e. large values correspond
443
+ to inliers.
444
+
445
+ **Only available for novelty detection (when novelty is set to True).**
446
+ The argument X is supposed to contain *new data*: if X contains a
447
+ point from training, it considers the later in its own neighborhood.
448
+ Also, the samples in X are not considered in the neighborhood of any
449
+ point. Because of this, the scores obtained via ``score_samples`` may
450
+ differ from the standard LOF scores.
451
+ The standard LOF scores for the training data is available via the
452
+ ``negative_outlier_factor_`` attribute.
453
+
454
+ Parameters
455
+ ----------
456
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
457
+ The query sample or samples to compute the Local Outlier Factor
458
+ w.r.t. the training samples.
459
+
460
+ Returns
461
+ -------
462
+ opposite_lof_scores : ndarray of shape (n_samples,)
463
+ The opposite of the Local Outlier Factor of each input samples.
464
+ The lower, the more abnormal.
465
+ """
466
+ check_is_fitted(self)
467
+ X = check_array(X, accept_sparse="csr")
468
+
469
+ distances_X, neighbors_indices_X = self.kneighbors(
470
+ X, n_neighbors=self.n_neighbors_
471
+ )
472
+
473
+ if X.dtype == np.float32:
474
+ distances_X = distances_X.astype(X.dtype, copy=False)
475
+
476
+ X_lrd = self._local_reachability_density(
477
+ distances_X,
478
+ neighbors_indices_X,
479
+ )
480
+
481
+ lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]
482
+
483
+ # as bigger is better:
484
+ return -np.mean(lrd_ratios_array, axis=1)
485
+
486
+ def _local_reachability_density(self, distances_X, neighbors_indices):
487
+ """The local reachability density (LRD)
488
+
489
+ The LRD of a sample is the inverse of the average reachability
490
+ distance of its k-nearest neighbors.
491
+
492
+ Parameters
493
+ ----------
494
+ distances_X : ndarray of shape (n_queries, self.n_neighbors)
495
+ Distances to the neighbors (in the training samples `self._fit_X`)
496
+ of each query point to compute the LRD.
497
+
498
+ neighbors_indices : ndarray of shape (n_queries, self.n_neighbors)
499
+ Neighbors indices (of each query point) among training samples
500
+ self._fit_X.
501
+
502
+ Returns
503
+ -------
504
+ local_reachability_density : ndarray of shape (n_queries,)
505
+ The local reachability density of each sample.
506
+ """
507
+ dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1]
508
+ reach_dist_array = np.maximum(distances_X, dist_k)
509
+
510
+ # 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
511
+ return 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)
512
+
513
+ def _more_tags(self):
514
+ return {
515
+ "preserves_dtype": [np.float64, np.float32],
516
+ }
venv/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Nearest Centroid Classification
3
+ """
4
+
5
+ # Author: Robert Layton <[email protected]>
6
+ # Olivier Grisel <[email protected]>
7
+ #
8
+ # License: BSD 3 clause
9
+
10
+ import warnings
11
+ from numbers import Real
12
+
13
+ import numpy as np
14
+ from scipy import sparse as sp
15
+
16
+ from sklearn.metrics.pairwise import _VALID_METRICS
17
+
18
+ from ..base import BaseEstimator, ClassifierMixin, _fit_context
19
+ from ..metrics.pairwise import pairwise_distances_argmin
20
+ from ..preprocessing import LabelEncoder
21
+ from ..utils._param_validation import Interval, StrOptions
22
+ from ..utils.multiclass import check_classification_targets
23
+ from ..utils.sparsefuncs import csc_median_axis_0
24
+ from ..utils.validation import check_is_fitted
25
+
26
+
27
+ class NearestCentroid(ClassifierMixin, BaseEstimator):
28
+ """Nearest centroid classifier.
29
+
30
+ Each class is represented by its centroid, with test samples classified to
31
+ the class with the nearest centroid.
32
+
33
+ Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
34
+
35
+ Parameters
36
+ ----------
37
+ metric : str or callable, default="euclidean"
38
+ Metric to use for distance computation. See the documentation of
39
+ `scipy.spatial.distance
40
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
41
+ the metrics listed in
42
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
43
+ values. Note that "wminkowski", "seuclidean" and "mahalanobis" are not
44
+ supported.
45
+
46
+ The centroids for the samples corresponding to each class is
47
+ the point from which the sum of the distances (according to the metric)
48
+ of all samples that belong to that particular class are minimized.
49
+ If the `"manhattan"` metric is provided, this centroid is the median
50
+ and for all other metrics, the centroid is now set to be the mean.
51
+
52
+ .. deprecated:: 1.3
53
+ Support for metrics other than `euclidean` and `manhattan` and for
54
+ callables was deprecated in version 1.3 and will be removed in
55
+ version 1.5.
56
+
57
+ .. versionchanged:: 0.19
58
+ `metric='precomputed'` was deprecated and now raises an error
59
+
60
+ shrink_threshold : float, default=None
61
+ Threshold for shrinking centroids to remove features.
62
+
63
+ Attributes
64
+ ----------
65
+ centroids_ : array-like of shape (n_classes, n_features)
66
+ Centroid of each class.
67
+
68
+ classes_ : array of shape (n_classes,)
69
+ The unique classes labels.
70
+
71
+ n_features_in_ : int
72
+ Number of features seen during :term:`fit`.
73
+
74
+ .. versionadded:: 0.24
75
+
76
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
77
+ Names of features seen during :term:`fit`. Defined only when `X`
78
+ has feature names that are all strings.
79
+
80
+ .. versionadded:: 1.0
81
+
82
+ See Also
83
+ --------
84
+ KNeighborsClassifier : Nearest neighbors classifier.
85
+
86
+ Notes
87
+ -----
88
+ When used for text classification with tf-idf vectors, this classifier is
89
+ also known as the Rocchio classifier.
90
+
91
+ References
92
+ ----------
93
+ Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
94
+ multiple cancer types by shrunken centroids of gene expression. Proceedings
95
+ of the National Academy of Sciences of the United States of America,
96
+ 99(10), 6567-6572. The National Academy of Sciences.
97
+
98
+ Examples
99
+ --------
100
+ >>> from sklearn.neighbors import NearestCentroid
101
+ >>> import numpy as np
102
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
103
+ >>> y = np.array([1, 1, 1, 2, 2, 2])
104
+ >>> clf = NearestCentroid()
105
+ >>> clf.fit(X, y)
106
+ NearestCentroid()
107
+ >>> print(clf.predict([[-0.8, -1]]))
108
+ [1]
109
+ """
110
+
111
+ _valid_metrics = set(_VALID_METRICS) - {"mahalanobis", "seuclidean", "wminkowski"}
112
+
113
+ _parameter_constraints: dict = {
114
+ "metric": [
115
+ StrOptions(
116
+ _valid_metrics, deprecated=_valid_metrics - {"manhattan", "euclidean"}
117
+ ),
118
+ callable,
119
+ ],
120
+ "shrink_threshold": [Interval(Real, 0, None, closed="neither"), None],
121
+ }
122
+
123
+ def __init__(self, metric="euclidean", *, shrink_threshold=None):
124
+ self.metric = metric
125
+ self.shrink_threshold = shrink_threshold
126
+
127
+ @_fit_context(prefer_skip_nested_validation=True)
128
+ def fit(self, X, y):
129
+ """
130
+ Fit the NearestCentroid model according to the given training data.
131
+
132
+ Parameters
133
+ ----------
134
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
135
+ Training vector, where `n_samples` is the number of samples and
136
+ `n_features` is the number of features.
137
+ Note that centroid shrinking cannot be used with sparse matrices.
138
+ y : array-like of shape (n_samples,)
139
+ Target values.
140
+
141
+ Returns
142
+ -------
143
+ self : object
144
+ Fitted estimator.
145
+ """
146
+ if isinstance(self.metric, str) and self.metric not in (
147
+ "manhattan",
148
+ "euclidean",
149
+ ):
150
+ warnings.warn(
151
+ (
152
+ "Support for distance metrics other than euclidean and "
153
+ "manhattan and for callables was deprecated in version "
154
+ "1.3 and will be removed in version 1.5."
155
+ ),
156
+ FutureWarning,
157
+ )
158
+
159
+ # If X is sparse and the metric is "manhattan", store it in a csc
160
+ # format is easier to calculate the median.
161
+ if self.metric == "manhattan":
162
+ X, y = self._validate_data(X, y, accept_sparse=["csc"])
163
+ else:
164
+ X, y = self._validate_data(X, y, accept_sparse=["csr", "csc"])
165
+ is_X_sparse = sp.issparse(X)
166
+ if is_X_sparse and self.shrink_threshold:
167
+ raise ValueError("threshold shrinking not supported for sparse input")
168
+ check_classification_targets(y)
169
+
170
+ n_samples, n_features = X.shape
171
+ le = LabelEncoder()
172
+ y_ind = le.fit_transform(y)
173
+ self.classes_ = classes = le.classes_
174
+ n_classes = classes.size
175
+ if n_classes < 2:
176
+ raise ValueError(
177
+ "The number of classes has to be greater than one; got %d class"
178
+ % (n_classes)
179
+ )
180
+
181
+ # Mask mapping each class to its members.
182
+ self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
183
+ # Number of clusters in each class.
184
+ nk = np.zeros(n_classes)
185
+
186
+ for cur_class in range(n_classes):
187
+ center_mask = y_ind == cur_class
188
+ nk[cur_class] = np.sum(center_mask)
189
+ if is_X_sparse:
190
+ center_mask = np.where(center_mask)[0]
191
+
192
+ if self.metric == "manhattan":
193
+ # NumPy does not calculate median of sparse matrices.
194
+ if not is_X_sparse:
195
+ self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
196
+ else:
197
+ self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
198
+ else:
199
+ # TODO(1.5) remove warning when metric is only manhattan or euclidean
200
+ if self.metric != "euclidean":
201
+ warnings.warn(
202
+ "Averaging for metrics other than "
203
+ "euclidean and manhattan not supported. "
204
+ "The average is set to be the mean."
205
+ )
206
+ self.centroids_[cur_class] = X[center_mask].mean(axis=0)
207
+
208
+ if self.shrink_threshold:
209
+ if np.all(np.ptp(X, axis=0) == 0):
210
+ raise ValueError("All features have zero variance. Division by zero.")
211
+ dataset_centroid_ = np.mean(X, axis=0)
212
+
213
+ # m parameter for determining deviation
214
+ m = np.sqrt((1.0 / nk) - (1.0 / n_samples))
215
+ # Calculate deviation using the standard deviation of centroids.
216
+ variance = (X - self.centroids_[y_ind]) ** 2
217
+ variance = variance.sum(axis=0)
218
+ s = np.sqrt(variance / (n_samples - n_classes))
219
+ s += np.median(s) # To deter outliers from affecting the results.
220
+ mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
221
+ ms = mm * s
222
+ deviation = (self.centroids_ - dataset_centroid_) / ms
223
+ # Soft thresholding: if the deviation crosses 0 during shrinking,
224
+ # it becomes zero.
225
+ signs = np.sign(deviation)
226
+ deviation = np.abs(deviation) - self.shrink_threshold
227
+ np.clip(deviation, 0, None, out=deviation)
228
+ deviation *= signs
229
+ # Now adjust the centroids using the deviation
230
+ msd = ms * deviation
231
+ self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
232
+ return self
233
+
234
+ # TODO(1.5) remove note about precomputed metric
235
+ def predict(self, X):
236
+ """Perform classification on an array of test vectors `X`.
237
+
238
+ The predicted class `C` for each sample in `X` is returned.
239
+
240
+ Parameters
241
+ ----------
242
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
243
+ Test samples.
244
+
245
+ Returns
246
+ -------
247
+ C : ndarray of shape (n_samples,)
248
+ The predicted classes.
249
+
250
+ Notes
251
+ -----
252
+ If the metric constructor parameter is `"precomputed"`, `X` is assumed
253
+ to be the distance matrix between the data to be predicted and
254
+ `self.centroids_`.
255
+ """
256
+ check_is_fitted(self)
257
+
258
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
259
+ return self.classes_[
260
+ pairwise_distances_argmin(X, self.centroids_, metric=self.metric)
261
+ ]
venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from cython cimport floating
2
+ from ..utils._typedefs cimport float64_t, intp_t
3
+
4
+ cdef int partition_node_indices(
5
+ const floating *data,
6
+ intp_t *node_indices,
7
+ intp_t split_dim,
8
+ intp_t split_index,
9
+ intp_t n_features,
10
+ intp_t n_points) except -1
venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Thomas Moreau <[email protected]>
2
+ # Author: Olivier Grisel <[email protected]>
3
+
4
+ # See quad_tree.pyx for details.
5
+
6
+ cimport numpy as cnp
7
+ from ..utils._typedefs cimport float32_t, intp_t
8
+
9
+ # This is effectively an ifdef statement in Cython
10
+ # It allows us to write printf debugging lines
11
+ # and remove them at compile time
12
+ cdef enum:
13
+ DEBUGFLAG = 0
14
+
15
+ cdef float EPSILON = 1e-6
16
+
17
+ # XXX: Careful to not change the order of the arguments. It is important to
18
+ # have is_leaf and max_width consecutive as it permits to avoid padding by
19
+ # the compiler and keep the size coherent for both C and numpy data structures.
20
+ cdef struct Cell:
21
+ # Base storage structure for cells in a QuadTree object
22
+
23
+ # Tree structure
24
+ intp_t parent # Parent cell of this cell
25
+ intp_t[8] children # Array pointing to children of this cell
26
+
27
+ # Cell description
28
+ intp_t cell_id # Id of the cell in the cells array in the Tree
29
+ intp_t point_index # Index of the point at this cell (only defined
30
+ # # in non empty leaf)
31
+ bint is_leaf # Does this cell have children?
32
+ float32_t squared_max_width # Squared value of the maximum width w
33
+ intp_t depth # Depth of the cell in the tree
34
+ intp_t cumulative_size # Number of points included in the subtree with
35
+ # # this cell as a root.
36
+
37
+ # Internal constants
38
+ float32_t[3] center # Store the center for quick split of cells
39
+ float32_t[3] barycenter # Keep track of the center of mass of the cell
40
+
41
+ # Cell boundaries
42
+ float32_t[3] min_bounds # Inferior boundaries of this cell (inclusive)
43
+ float32_t[3] max_bounds # Superior boundaries of this cell (exclusive)
44
+
45
+
46
+ cdef class _QuadTree:
47
+ # The QuadTree object is a quad tree structure constructed by inserting
48
+ # recursively points in the tree and splitting cells in 4 so that each
49
+ # leaf cell contains at most one point.
50
+ # This structure also handle 3D data, inserted in trees with 8 children
51
+ # for each node.
52
+
53
+ # Parameters of the tree
54
+ cdef public int n_dimensions # Number of dimensions in X
55
+ cdef public int verbose # Verbosity of the output
56
+ cdef intp_t n_cells_per_cell # Number of children per node. (2 ** n_dimension)
57
+
58
+ # Tree inner structure
59
+ cdef public intp_t max_depth # Max depth of the tree
60
+ cdef public intp_t cell_count # Counter for node IDs
61
+ cdef public intp_t capacity # Capacity of tree, in terms of nodes
62
+ cdef public intp_t n_points # Total number of points
63
+ cdef Cell* cells # Array of nodes
64
+
65
+ # Point insertion methods
66
+ cdef int insert_point(self, float32_t[3] point, intp_t point_index,
67
+ intp_t cell_id=*) except -1 nogil
68
+ cdef intp_t _insert_point_in_new_child(self, float32_t[3] point, Cell* cell,
69
+ intp_t point_index, intp_t size=*
70
+ ) noexcept nogil
71
+ cdef intp_t _select_child(self, float32_t[3] point, Cell* cell) noexcept nogil
72
+ cdef bint _is_duplicate(self, float32_t[3] point1, float32_t[3] point2) noexcept nogil
73
+
74
+ # Create a summary of the Tree compare to a query point
75
+ cdef long summarize(self, float32_t[3] point, float32_t* results,
76
+ float squared_theta=*, intp_t cell_id=*, long idx=*
77
+ ) noexcept nogil
78
+
79
+ # Internal cell initialization methods
80
+ cdef void _init_cell(self, Cell* cell, intp_t parent, intp_t depth) noexcept nogil
81
+ cdef void _init_root(self, float32_t[3] min_bounds, float32_t[3] max_bounds
82
+ ) noexcept nogil
83
+
84
+ # Private methods
85
+ cdef int _check_point_in_cell(self, float32_t[3] point, Cell* cell
86
+ ) except -1 nogil
87
+
88
+ # Private array manipulation to manage the ``cells`` array
89
+ cdef int _resize(self, intp_t capacity) except -1 nogil
90
+ cdef int _resize_c(self, intp_t capacity=*) except -1 nogil
91
+ cdef int _get_cell(self, float32_t[3] point, intp_t cell_id=*) except -1 nogil
92
+ cdef Cell[:] _get_cell_ndarray(self)
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc ADDED
Binary file (6.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_graph.cpython-310.pyc ADDED
Binary file (2.95 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kde.cpython-310.pyc ADDED
Binary file (7.34 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc ADDED
Binary file (9.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc ADDED
Binary file (4.71 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors.cpython-310.pyc ADDED
Binary file (55.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_pipeline.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_tree.cpython-310.pyc ADDED
Binary file (8.05 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_quad_tree.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_ball_tree.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+
3
+ import numpy as np
4
+ import pytest
5
+ from numpy.testing import assert_allclose, assert_array_almost_equal, assert_equal
6
+
7
+ from sklearn.neighbors._ball_tree import BallTree, BallTree32, BallTree64
8
+ from sklearn.utils import check_random_state
9
+ from sklearn.utils._testing import _convert_container
10
+ from sklearn.utils.validation import check_array
11
+
12
+ rng = np.random.RandomState(10)
13
+ V_mahalanobis = rng.rand(3, 3)
14
+ V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
15
+
16
+ DIMENSION = 3
17
+
18
+ METRICS = {
19
+ "euclidean": {},
20
+ "manhattan": {},
21
+ "minkowski": dict(p=3),
22
+ "chebyshev": {},
23
+ }
24
+
25
+ DISCRETE_METRICS = ["hamming", "canberra", "braycurtis"]
26
+
27
+ BOOLEAN_METRICS = [
28
+ "jaccard",
29
+ "dice",
30
+ "rogerstanimoto",
31
+ "russellrao",
32
+ "sokalmichener",
33
+ "sokalsneath",
34
+ ]
35
+
36
+ BALL_TREE_CLASSES = [
37
+ BallTree64,
38
+ BallTree32,
39
+ ]
40
+
41
+
42
+ def brute_force_neighbors(X, Y, k, metric, **kwargs):
43
+ from sklearn.metrics import DistanceMetric
44
+
45
+ X, Y = check_array(X), check_array(Y)
46
+ D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
47
+ ind = np.argsort(D, axis=1)[:, :k]
48
+ dist = D[np.arange(Y.shape[0])[:, None], ind]
49
+ return dist, ind
50
+
51
+
52
+ def test_BallTree_is_BallTree64_subclass():
53
+ assert issubclass(BallTree, BallTree64)
54
+
55
+
56
+ @pytest.mark.parametrize("metric", itertools.chain(BOOLEAN_METRICS, DISCRETE_METRICS))
57
+ @pytest.mark.parametrize("array_type", ["list", "array"])
58
+ @pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES)
59
+ def test_ball_tree_query_metrics(metric, array_type, BallTreeImplementation):
60
+ rng = check_random_state(0)
61
+ if metric in BOOLEAN_METRICS:
62
+ X = rng.random_sample((40, 10)).round(0)
63
+ Y = rng.random_sample((10, 10)).round(0)
64
+ elif metric in DISCRETE_METRICS:
65
+ X = (4 * rng.random_sample((40, 10))).round(0)
66
+ Y = (4 * rng.random_sample((10, 10))).round(0)
67
+ X = _convert_container(X, array_type)
68
+ Y = _convert_container(Y, array_type)
69
+
70
+ k = 5
71
+
72
+ bt = BallTreeImplementation(X, leaf_size=1, metric=metric)
73
+ dist1, ind1 = bt.query(Y, k)
74
+ dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
75
+ assert_array_almost_equal(dist1, dist2)
76
+
77
+
78
+ @pytest.mark.parametrize(
79
+ "BallTreeImplementation, decimal_tol", zip(BALL_TREE_CLASSES, [6, 5])
80
+ )
81
+ def test_query_haversine(BallTreeImplementation, decimal_tol):
82
+ rng = check_random_state(0)
83
+ X = 2 * np.pi * rng.random_sample((40, 2))
84
+ bt = BallTreeImplementation(X, leaf_size=1, metric="haversine")
85
+ dist1, ind1 = bt.query(X, k=5)
86
+ dist2, ind2 = brute_force_neighbors(X, X, k=5, metric="haversine")
87
+
88
+ assert_array_almost_equal(dist1, dist2, decimal=decimal_tol)
89
+ assert_array_almost_equal(ind1, ind2)
90
+
91
+
92
+ @pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES)
93
+ def test_array_object_type(BallTreeImplementation):
94
+ """Check that we do not accept object dtype array."""
95
+ X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object)
96
+ with pytest.raises(ValueError, match="setting an array element with a sequence"):
97
+ BallTreeImplementation(X)
98
+
99
+
100
+ @pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES)
101
+ def test_bad_pyfunc_metric(BallTreeImplementation):
102
+ def wrong_returned_value(x, y):
103
+ return "1"
104
+
105
+ def one_arg_func(x):
106
+ return 1.0 # pragma: no cover
107
+
108
+ X = np.ones((5, 2))
109
+ msg = "Custom distance function must accept two vectors and return a float."
110
+ with pytest.raises(TypeError, match=msg):
111
+ BallTreeImplementation(X, metric=wrong_returned_value)
112
+
113
+ msg = "takes 1 positional argument but 2 were given"
114
+ with pytest.raises(TypeError, match=msg):
115
+ BallTreeImplementation(X, metric=one_arg_func)
116
+
117
+
118
+ @pytest.mark.parametrize("metric", itertools.chain(METRICS, BOOLEAN_METRICS))
119
+ def test_ball_tree_numerical_consistency(global_random_seed, metric):
120
+ # Results on float64 and float32 versions of a dataset must be
121
+ # numerically close.
122
+ X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(
123
+ random_seed=global_random_seed, features=50
124
+ )
125
+
126
+ metric_params = METRICS.get(metric, {})
127
+ bt_64 = BallTree64(X_64, leaf_size=1, metric=metric, **metric_params)
128
+ bt_32 = BallTree32(X_32, leaf_size=1, metric=metric, **metric_params)
129
+
130
+ # Test consistency with respect to the `query` method
131
+ k = 5
132
+ dist_64, ind_64 = bt_64.query(Y_64, k=k)
133
+ dist_32, ind_32 = bt_32.query(Y_32, k=k)
134
+ assert_allclose(dist_64, dist_32, rtol=1e-5)
135
+ assert_equal(ind_64, ind_32)
136
+ assert dist_64.dtype == np.float64
137
+ assert dist_32.dtype == np.float32
138
+
139
+ # Test consistency with respect to the `query_radius` method
140
+ r = 2.38
141
+ ind_64 = bt_64.query_radius(Y_64, r=r)
142
+ ind_32 = bt_32.query_radius(Y_32, r=r)
143
+ for _ind64, _ind32 in zip(ind_64, ind_32):
144
+ assert_equal(_ind64, _ind32)
145
+
146
+ # Test consistency with respect to the `query_radius` method
147
+ # with return distances being true
148
+ ind_64, dist_64 = bt_64.query_radius(Y_64, r=r, return_distance=True)
149
+ ind_32, dist_32 = bt_32.query_radius(Y_32, r=r, return_distance=True)
150
+ for _ind64, _ind32, _dist_64, _dist_32 in zip(ind_64, ind_32, dist_64, dist_32):
151
+ assert_equal(_ind64, _ind32)
152
+ assert_allclose(_dist_64, _dist_32, rtol=1e-5)
153
+ assert _dist_64.dtype == np.float64
154
+ assert _dist_32.dtype == np.float32
155
+
156
+
157
+ @pytest.mark.parametrize("metric", itertools.chain(METRICS, BOOLEAN_METRICS))
158
+ def test_kernel_density_numerical_consistency(global_random_seed, metric):
159
+ # Test consistency with respect to the `kernel_density` method
160
+ X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed)
161
+
162
+ metric_params = METRICS.get(metric, {})
163
+ bt_64 = BallTree64(X_64, leaf_size=1, metric=metric, **metric_params)
164
+ bt_32 = BallTree32(X_32, leaf_size=1, metric=metric, **metric_params)
165
+
166
+ kernel = "gaussian"
167
+ h = 0.1
168
+ density64 = bt_64.kernel_density(Y_64, h=h, kernel=kernel, breadth_first=True)
169
+ density32 = bt_32.kernel_density(Y_32, h=h, kernel=kernel, breadth_first=True)
170
+ assert_allclose(density64, density32, rtol=1e-5)
171
+ assert density64.dtype == np.float64
172
+ assert density32.dtype == np.float32
173
+
174
+
175
+ def test_two_point_correlation_numerical_consistency(global_random_seed):
176
+ # Test consistency with respect to the `two_point_correlation` method
177
+ X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed)
178
+
179
+ bt_64 = BallTree64(X_64, leaf_size=10)
180
+ bt_32 = BallTree32(X_32, leaf_size=10)
181
+
182
+ r = np.linspace(0, 1, 10)
183
+
184
+ counts_64 = bt_64.two_point_correlation(Y_64, r=r, dualtree=True)
185
+ counts_32 = bt_32.two_point_correlation(Y_32, r=r, dualtree=True)
186
+ assert_allclose(counts_64, counts_32)
187
+
188
+
189
+ def get_dataset_for_binary_tree(random_seed, features=3):
190
+ rng = np.random.RandomState(random_seed)
191
+ _X = rng.rand(100, features)
192
+ _Y = rng.rand(5, features)
193
+
194
+ X_64 = _X.astype(dtype=np.float64, copy=False)
195
+ Y_64 = _Y.astype(dtype=np.float64, copy=False)
196
+
197
+ X_32 = _X.astype(dtype=np.float32, copy=False)
198
+ Y_32 = _Y.astype(dtype=np.float32, copy=False)
199
+
200
+ return X_64, X_32, Y_64, Y_32
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_graph.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.metrics import euclidean_distances
5
+ from sklearn.neighbors import KNeighborsTransformer, RadiusNeighborsTransformer
6
+ from sklearn.neighbors._base import _is_sorted_by_data
7
+ from sklearn.utils._testing import assert_array_equal
8
+
9
+
10
+ def test_transformer_result():
11
+ # Test the number of neighbors returned
12
+ n_neighbors = 5
13
+ n_samples_fit = 20
14
+ n_queries = 18
15
+ n_features = 10
16
+
17
+ rng = np.random.RandomState(42)
18
+ X = rng.randn(n_samples_fit, n_features)
19
+ X2 = rng.randn(n_queries, n_features)
20
+ radius = np.percentile(euclidean_distances(X), 10)
21
+
22
+ # with n_neighbors
23
+ for mode in ["distance", "connectivity"]:
24
+ add_one = mode == "distance"
25
+ nnt = KNeighborsTransformer(n_neighbors=n_neighbors, mode=mode)
26
+ Xt = nnt.fit_transform(X)
27
+ assert Xt.shape == (n_samples_fit, n_samples_fit)
28
+ assert Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),)
29
+ assert Xt.format == "csr"
30
+ assert _is_sorted_by_data(Xt)
31
+
32
+ X2t = nnt.transform(X2)
33
+ assert X2t.shape == (n_queries, n_samples_fit)
34
+ assert X2t.data.shape == (n_queries * (n_neighbors + add_one),)
35
+ assert X2t.format == "csr"
36
+ assert _is_sorted_by_data(X2t)
37
+
38
+ # with radius
39
+ for mode in ["distance", "connectivity"]:
40
+ add_one = mode == "distance"
41
+ nnt = RadiusNeighborsTransformer(radius=radius, mode=mode)
42
+ Xt = nnt.fit_transform(X)
43
+ assert Xt.shape == (n_samples_fit, n_samples_fit)
44
+ assert not Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),)
45
+ assert Xt.format == "csr"
46
+ assert _is_sorted_by_data(Xt)
47
+
48
+ X2t = nnt.transform(X2)
49
+ assert X2t.shape == (n_queries, n_samples_fit)
50
+ assert not X2t.data.shape == (n_queries * (n_neighbors + add_one),)
51
+ assert X2t.format == "csr"
52
+ assert _is_sorted_by_data(X2t)
53
+
54
+
55
+ def _has_explicit_diagonal(X):
56
+ """Return True if the diagonal is explicitly stored"""
57
+ X = X.tocoo()
58
+ explicit = X.row[X.row == X.col]
59
+ return len(explicit) == X.shape[0]
60
+
61
+
62
+ def test_explicit_diagonal():
63
+ # Test that the diagonal is explicitly stored in the sparse graph
64
+ n_neighbors = 5
65
+ n_samples_fit, n_samples_transform, n_features = 20, 18, 10
66
+ rng = np.random.RandomState(42)
67
+ X = rng.randn(n_samples_fit, n_features)
68
+ X2 = rng.randn(n_samples_transform, n_features)
69
+
70
+ nnt = KNeighborsTransformer(n_neighbors=n_neighbors)
71
+ Xt = nnt.fit_transform(X)
72
+ assert _has_explicit_diagonal(Xt)
73
+ assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0)
74
+
75
+ Xt = nnt.transform(X)
76
+ assert _has_explicit_diagonal(Xt)
77
+ assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0)
78
+
79
+ # Using transform on new data should not always have zero diagonal
80
+ X2t = nnt.transform(X2)
81
+ assert not _has_explicit_diagonal(X2t)
82
+
83
+
84
+ @pytest.mark.parametrize("Klass", [KNeighborsTransformer, RadiusNeighborsTransformer])
85
+ def test_graph_feature_names_out(Klass):
86
+ """Check `get_feature_names_out` for transformers defined in `_graph.py`."""
87
+
88
+ n_samples_fit = 20
89
+ n_features = 10
90
+ rng = np.random.RandomState(42)
91
+ X = rng.randn(n_samples_fit, n_features)
92
+
93
+ est = Klass().fit(X)
94
+ names_out = est.get_feature_names_out()
95
+
96
+ class_name_lower = Klass.__name__.lower()
97
+ expected_names_out = np.array(
98
+ [f"{class_name_lower}{i}" for i in range(est.n_samples_fit_)],
99
+ dtype=object,
100
+ )
101
+ assert_array_equal(names_out, expected_names_out)
venv/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kd_tree.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose, assert_equal
4
+
5
+ from sklearn.neighbors._kd_tree import KDTree, KDTree32, KDTree64
6
+ from sklearn.neighbors.tests.test_ball_tree import get_dataset_for_binary_tree
7
+ from sklearn.utils.parallel import Parallel, delayed
8
+
9
+ DIMENSION = 3
10
+
11
+ METRICS = {"euclidean": {}, "manhattan": {}, "chebyshev": {}, "minkowski": dict(p=3)}
12
+
13
+ KD_TREE_CLASSES = [
14
+ KDTree64,
15
+ KDTree32,
16
+ ]
17
+
18
+
19
+ def test_KDTree_is_KDTree64_subclass():
20
+ assert issubclass(KDTree, KDTree64)
21
+
22
+
23
+ @pytest.mark.parametrize("BinarySearchTree", KD_TREE_CLASSES)
24
+ def test_array_object_type(BinarySearchTree):
25
+ """Check that we do not accept object dtype array."""
26
+ X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object)
27
+ with pytest.raises(ValueError, match="setting an array element with a sequence"):
28
+ BinarySearchTree(X)
29
+
30
+
31
+ @pytest.mark.parametrize("BinarySearchTree", KD_TREE_CLASSES)
32
+ def test_kdtree_picklable_with_joblib(BinarySearchTree):
33
+ """Make sure that KDTree queries work when joblib memmaps.
34
+
35
+ Non-regression test for #21685 and #21228."""
36
+ rng = np.random.RandomState(0)
37
+ X = rng.random_sample((10, 3))
38
+ tree = BinarySearchTree(X, leaf_size=2)
39
+
40
+ # Call Parallel with max_nbytes=1 to trigger readonly memory mapping that
41
+ # use to raise "ValueError: buffer source array is read-only" in a previous
42
+ # version of the Cython code.
43
+ Parallel(n_jobs=2, max_nbytes=1)(delayed(tree.query)(data) for data in 2 * [X])
44
+
45
+
46
+ @pytest.mark.parametrize("metric", METRICS)
47
+ def test_kd_tree_numerical_consistency(global_random_seed, metric):
48
+ # Results on float64 and float32 versions of a dataset must be
49
+ # numerically close.
50
+ X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(
51
+ random_seed=global_random_seed, features=50
52
+ )
53
+
54
+ metric_params = METRICS.get(metric, {})
55
+ kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params)
56
+ kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params)
57
+
58
+ # Test consistency with respect to the `query` method
59
+ k = 4
60
+ dist_64, ind_64 = kd_64.query(Y_64, k=k)
61
+ dist_32, ind_32 = kd_32.query(Y_32, k=k)
62
+ assert_allclose(dist_64, dist_32, rtol=1e-5)
63
+ assert_equal(ind_64, ind_32)
64
+ assert dist_64.dtype == np.float64
65
+ assert dist_32.dtype == np.float32
66
+
67
+ # Test consistency with respect to the `query_radius` method
68
+ r = 2.38
69
+ ind_64 = kd_64.query_radius(Y_64, r=r)
70
+ ind_32 = kd_32.query_radius(Y_32, r=r)
71
+ for _ind64, _ind32 in zip(ind_64, ind_32):
72
+ assert_equal(_ind64, _ind32)
73
+
74
+ # Test consistency with respect to the `query_radius` method
75
+ # with return distances being true
76
+ ind_64, dist_64 = kd_64.query_radius(Y_64, r=r, return_distance=True)
77
+ ind_32, dist_32 = kd_32.query_radius(Y_32, r=r, return_distance=True)
78
+ for _ind64, _ind32, _dist_64, _dist_32 in zip(ind_64, ind_32, dist_64, dist_32):
79
+ assert_equal(_ind64, _ind32)
80
+ assert_allclose(_dist_64, _dist_32, rtol=1e-5)
81
+ assert _dist_64.dtype == np.float64
82
+ assert _dist_32.dtype == np.float32
83
+
84
+
85
+ @pytest.mark.parametrize("metric", METRICS)
86
+ def test_kernel_density_numerical_consistency(global_random_seed, metric):
87
+ # Test consistency with respect to the `kernel_density` method
88
+ X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed)
89
+
90
+ metric_params = METRICS.get(metric, {})
91
+ kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params)
92
+ kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params)
93
+
94
+ kernel = "gaussian"
95
+ h = 0.1
96
+ density64 = kd_64.kernel_density(Y_64, h=h, kernel=kernel, breadth_first=True)
97
+ density32 = kd_32.kernel_density(Y_32, h=h, kernel=kernel, breadth_first=True)
98
+ assert_allclose(density64, density32, rtol=1e-5)
99
+ assert density64.dtype == np.float64
100
+ assert density32.dtype == np.float32