applied-ai-018 commited on
Commit
6d870c6
·
verified ·
1 Parent(s): 4fe907d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg_sq.pt +3 -0
  4. venv/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py +19 -0
  5. venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py +452 -0
  12. venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py +197 -0
  13. venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  14. venv/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py +325 -0
  15. venv/lib/python3.10/site-packages/sklearn/feature_extraction/image.py +671 -0
  16. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py +0 -0
  17. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_dict_vectorizer.py +262 -0
  23. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py +160 -0
  24. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_image.py +356 -0
  25. venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py +1655 -0
  26. venv/lib/python3.10/site-packages/sklearn/feature_extraction/text.py +2166 -0
  27. venv/lib/python3.10/site-packages/sklearn/svm/__init__.py +25 -0
  28. venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/__init__.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_base.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_bounds.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_classes.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/sklearn/svm/_base.py +1249 -0
  33. venv/lib/python3.10/site-packages/sklearn/svm/_bounds.py +94 -0
  34. venv/lib/python3.10/site-packages/sklearn/svm/_classes.py +1832 -0
  35. venv/lib/python3.10/site-packages/sklearn/svm/_liblinear.cpython-310-x86_64-linux-gnu.so +0 -0
  36. venv/lib/python3.10/site-packages/sklearn/svm/_libsvm.cpython-310-x86_64-linux-gnu.so +0 -0
  37. venv/lib/python3.10/site-packages/sklearn/svm/_libsvm_sparse.cpython-310-x86_64-linux-gnu.so +0 -0
  38. venv/lib/python3.10/site-packages/sklearn/svm/_newrand.cpython-310-x86_64-linux-gnu.so +0 -0
  39. venv/lib/python3.10/site-packages/sklearn/svm/tests/__init__.py +0 -0
  40. venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_bounds.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_sparse.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_svm.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/sklearn/svm/tests/test_bounds.py +142 -0
  45. venv/lib/python3.10/site-packages/sklearn/svm/tests/test_sparse.py +493 -0
  46. venv/lib/python3.10/site-packages/sklearn/svm/tests/test_svm.py +1434 -0
  47. venv/lib/python3.10/site-packages/sklearn/utils/__init__.py +1299 -0
  48. venv/lib/python3.10/site-packages/sklearn/utils/_arpack.py +30 -0
  49. venv/lib/python3.10/site-packages/sklearn/utils/_array_api.py +575 -0
  50. venv/lib/python3.10/site-packages/sklearn/utils/_available_if.py +93 -0
ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc6549c0a88fbfc97075ef2d2cbbe6845eb2da840f7a8893bf3cc6564668b787
3
+ size 33555627
ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f130d87201997e48c47b0ea865c94a40798b6dbcbe1d99e771a2f139b2c2b6f1
3
+ size 33555533
ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76f467b45af601530430872d2c6314dff0b05a9f74496ad4d67a7030eb74f05c
3
+ size 16778411
venv/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.feature_extraction` module deals with feature extraction
3
+ from raw data. It currently includes methods to extract features from text and
4
+ images.
5
+ """
6
+
7
+ from . import text
8
+ from ._dict_vectorizer import DictVectorizer
9
+ from ._hash import FeatureHasher
10
+ from .image import grid_to_graph, img_to_graph
11
+
12
+ __all__ = [
13
+ "DictVectorizer",
14
+ "image",
15
+ "img_to_graph",
16
+ "grid_to_graph",
17
+ "text",
18
+ "FeatureHasher",
19
+ ]
venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (618 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc ADDED
Binary file (7.93 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc ADDED
Binary file (67.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Lars Buitinck
2
+ # Dan Blanchard <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ from array import array
6
+ from collections.abc import Iterable, Mapping
7
+ from numbers import Number
8
+ from operator import itemgetter
9
+
10
+ import numpy as np
11
+ import scipy.sparse as sp
12
+
13
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
14
+ from ..utils import check_array
15
+ from ..utils.validation import check_is_fitted
16
+
17
+
18
+ class DictVectorizer(TransformerMixin, BaseEstimator):
19
+ """Transforms lists of feature-value mappings to vectors.
20
+
21
+ This transformer turns lists of mappings (dict-like objects) of feature
22
+ names to feature values into Numpy arrays or scipy.sparse matrices for use
23
+ with scikit-learn estimators.
24
+
25
+ When feature values are strings, this transformer will do a binary one-hot
26
+ (aka one-of-K) coding: one boolean-valued feature is constructed for each
27
+ of the possible string values that the feature can take on. For instance,
28
+ a feature "f" that can take on the values "ham" and "spam" will become two
29
+ features in the output, one signifying "f=ham", the other "f=spam".
30
+
31
+ If a feature value is a sequence or set of strings, this transformer
32
+ will iterate over the values and will count the occurrences of each string
33
+ value.
34
+
35
+ However, note that this transformer will only do a binary one-hot encoding
36
+ when feature values are of type string. If categorical features are
37
+ represented as numeric values such as int or iterables of strings, the
38
+ DictVectorizer can be followed by
39
+ :class:`~sklearn.preprocessing.OneHotEncoder` to complete
40
+ binary one-hot encoding.
41
+
42
+ Features that do not occur in a sample (mapping) will have a zero value
43
+ in the resulting array/matrix.
44
+
45
+ For an efficiency comparison of the different feature extractors, see
46
+ :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
47
+
48
+ Read more in the :ref:`User Guide <dict_feature_extraction>`.
49
+
50
+ Parameters
51
+ ----------
52
+ dtype : dtype, default=np.float64
53
+ The type of feature values. Passed to Numpy array/scipy.sparse matrix
54
+ constructors as the dtype argument.
55
+ separator : str, default="="
56
+ Separator string used when constructing new features for one-hot
57
+ coding.
58
+ sparse : bool, default=True
59
+ Whether transform should produce scipy.sparse matrices.
60
+ sort : bool, default=True
61
+ Whether ``feature_names_`` and ``vocabulary_`` should be
62
+ sorted when fitting.
63
+
64
+ Attributes
65
+ ----------
66
+ vocabulary_ : dict
67
+ A dictionary mapping feature names to feature indices.
68
+
69
+ feature_names_ : list
70
+ A list of length n_features containing the feature names (e.g., "f=ham"
71
+ and "f=spam").
72
+
73
+ See Also
74
+ --------
75
+ FeatureHasher : Performs vectorization using only a hash function.
76
+ sklearn.preprocessing.OrdinalEncoder : Handles nominal/categorical
77
+ features encoded as columns of arbitrary data types.
78
+
79
+ Examples
80
+ --------
81
+ >>> from sklearn.feature_extraction import DictVectorizer
82
+ >>> v = DictVectorizer(sparse=False)
83
+ >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
84
+ >>> X = v.fit_transform(D)
85
+ >>> X
86
+ array([[2., 0., 1.],
87
+ [0., 1., 3.]])
88
+ >>> v.inverse_transform(X) == [{'bar': 2.0, 'foo': 1.0},
89
+ ... {'baz': 1.0, 'foo': 3.0}]
90
+ True
91
+ >>> v.transform({'foo': 4, 'unseen_feature': 3})
92
+ array([[0., 0., 4.]])
93
+ """
94
+
95
+ _parameter_constraints: dict = {
96
+ "dtype": "no_validation", # validation delegated to numpy,
97
+ "separator": [str],
98
+ "sparse": ["boolean"],
99
+ "sort": ["boolean"],
100
+ }
101
+
102
+ def __init__(self, *, dtype=np.float64, separator="=", sparse=True, sort=True):
103
+ self.dtype = dtype
104
+ self.separator = separator
105
+ self.sparse = sparse
106
+ self.sort = sort
107
+
108
+ def _add_iterable_element(
109
+ self,
110
+ f,
111
+ v,
112
+ feature_names,
113
+ vocab,
114
+ *,
115
+ fitting=True,
116
+ transforming=False,
117
+ indices=None,
118
+ values=None,
119
+ ):
120
+ """Add feature names for iterable of strings"""
121
+ for vv in v:
122
+ if isinstance(vv, str):
123
+ feature_name = "%s%s%s" % (f, self.separator, vv)
124
+ vv = 1
125
+ else:
126
+ raise TypeError(
127
+ f"Unsupported type {type(vv)} in iterable "
128
+ "value. Only iterables of string are "
129
+ "supported."
130
+ )
131
+ if fitting and feature_name not in vocab:
132
+ vocab[feature_name] = len(feature_names)
133
+ feature_names.append(feature_name)
134
+
135
+ if transforming and feature_name in vocab:
136
+ indices.append(vocab[feature_name])
137
+ values.append(self.dtype(vv))
138
+
139
+ @_fit_context(prefer_skip_nested_validation=True)
140
+ def fit(self, X, y=None):
141
+ """Learn a list of feature name -> indices mappings.
142
+
143
+ Parameters
144
+ ----------
145
+ X : Mapping or iterable over Mappings
146
+ Dict(s) or Mapping(s) from feature names (arbitrary Python
147
+ objects) to feature values (strings or convertible to dtype).
148
+
149
+ .. versionchanged:: 0.24
150
+ Accepts multiple string values for one categorical feature.
151
+
152
+ y : (ignored)
153
+ Ignored parameter.
154
+
155
+ Returns
156
+ -------
157
+ self : object
158
+ DictVectorizer class instance.
159
+ """
160
+ feature_names = []
161
+ vocab = {}
162
+
163
+ for x in X:
164
+ for f, v in x.items():
165
+ if isinstance(v, str):
166
+ feature_name = "%s%s%s" % (f, self.separator, v)
167
+ elif isinstance(v, Number) or (v is None):
168
+ feature_name = f
169
+ elif isinstance(v, Mapping):
170
+ raise TypeError(
171
+ f"Unsupported value type {type(v)} "
172
+ f"for {f}: {v}.\n"
173
+ "Mapping objects are not supported."
174
+ )
175
+ elif isinstance(v, Iterable):
176
+ feature_name = None
177
+ self._add_iterable_element(f, v, feature_names, vocab)
178
+
179
+ if feature_name is not None:
180
+ if feature_name not in vocab:
181
+ vocab[feature_name] = len(feature_names)
182
+ feature_names.append(feature_name)
183
+
184
+ if self.sort:
185
+ feature_names.sort()
186
+ vocab = {f: i for i, f in enumerate(feature_names)}
187
+
188
+ self.feature_names_ = feature_names
189
+ self.vocabulary_ = vocab
190
+
191
+ return self
192
+
193
+ def _transform(self, X, fitting):
194
+ # Sanity check: Python's array has no way of explicitly requesting the
195
+ # signed 32-bit integers that scipy.sparse needs, so we use the next
196
+ # best thing: typecode "i" (int). However, if that gives larger or
197
+ # smaller integers than 32-bit ones, np.frombuffer screws up.
198
+ assert array("i").itemsize == 4, (
199
+ "sizeof(int) != 4 on your platform; please report this at"
200
+ " https://github.com/scikit-learn/scikit-learn/issues and"
201
+ " include the output from platform.platform() in your bug report"
202
+ )
203
+
204
+ dtype = self.dtype
205
+ if fitting:
206
+ feature_names = []
207
+ vocab = {}
208
+ else:
209
+ feature_names = self.feature_names_
210
+ vocab = self.vocabulary_
211
+
212
+ transforming = True
213
+
214
+ # Process everything as sparse regardless of setting
215
+ X = [X] if isinstance(X, Mapping) else X
216
+
217
+ indices = array("i")
218
+ indptr = [0]
219
+ # XXX we could change values to an array.array as well, but it
220
+ # would require (heuristic) conversion of dtype to typecode...
221
+ values = []
222
+
223
+ # collect all the possible feature names and build sparse matrix at
224
+ # same time
225
+ for x in X:
226
+ for f, v in x.items():
227
+ if isinstance(v, str):
228
+ feature_name = "%s%s%s" % (f, self.separator, v)
229
+ v = 1
230
+ elif isinstance(v, Number) or (v is None):
231
+ feature_name = f
232
+ elif not isinstance(v, Mapping) and isinstance(v, Iterable):
233
+ feature_name = None
234
+ self._add_iterable_element(
235
+ f,
236
+ v,
237
+ feature_names,
238
+ vocab,
239
+ fitting=fitting,
240
+ transforming=transforming,
241
+ indices=indices,
242
+ values=values,
243
+ )
244
+ else:
245
+ raise TypeError(
246
+ f"Unsupported value Type {type(v)} "
247
+ f"for {f}: {v}.\n"
248
+ f"{type(v)} objects are not supported."
249
+ )
250
+
251
+ if feature_name is not None:
252
+ if fitting and feature_name not in vocab:
253
+ vocab[feature_name] = len(feature_names)
254
+ feature_names.append(feature_name)
255
+
256
+ if feature_name in vocab:
257
+ indices.append(vocab[feature_name])
258
+ values.append(self.dtype(v))
259
+
260
+ indptr.append(len(indices))
261
+
262
+ if len(indptr) == 1:
263
+ raise ValueError("Sample sequence X is empty.")
264
+
265
+ indices = np.frombuffer(indices, dtype=np.intc)
266
+ shape = (len(indptr) - 1, len(vocab))
267
+
268
+ result_matrix = sp.csr_matrix(
269
+ (values, indices, indptr), shape=shape, dtype=dtype
270
+ )
271
+
272
+ # Sort everything if asked
273
+ if fitting and self.sort:
274
+ feature_names.sort()
275
+ map_index = np.empty(len(feature_names), dtype=np.int32)
276
+ for new_val, f in enumerate(feature_names):
277
+ map_index[new_val] = vocab[f]
278
+ vocab[f] = new_val
279
+ result_matrix = result_matrix[:, map_index]
280
+
281
+ if self.sparse:
282
+ result_matrix.sort_indices()
283
+ else:
284
+ result_matrix = result_matrix.toarray()
285
+
286
+ if fitting:
287
+ self.feature_names_ = feature_names
288
+ self.vocabulary_ = vocab
289
+
290
+ return result_matrix
291
+
292
+ @_fit_context(prefer_skip_nested_validation=True)
293
+ def fit_transform(self, X, y=None):
294
+ """Learn a list of feature name -> indices mappings and transform X.
295
+
296
+ Like fit(X) followed by transform(X), but does not require
297
+ materializing X in memory.
298
+
299
+ Parameters
300
+ ----------
301
+ X : Mapping or iterable over Mappings
302
+ Dict(s) or Mapping(s) from feature names (arbitrary Python
303
+ objects) to feature values (strings or convertible to dtype).
304
+
305
+ .. versionchanged:: 0.24
306
+ Accepts multiple string values for one categorical feature.
307
+
308
+ y : (ignored)
309
+ Ignored parameter.
310
+
311
+ Returns
312
+ -------
313
+ Xa : {array, sparse matrix}
314
+ Feature vectors; always 2-d.
315
+ """
316
+ return self._transform(X, fitting=True)
317
+
318
+ def inverse_transform(self, X, dict_type=dict):
319
+ """Transform array or sparse matrix X back to feature mappings.
320
+
321
+ X must have been produced by this DictVectorizer's transform or
322
+ fit_transform method; it may only have passed through transformers
323
+ that preserve the number of features and their order.
324
+
325
+ In the case of one-hot/one-of-K coding, the constructed feature
326
+ names and values are returned rather than the original ones.
327
+
328
+ Parameters
329
+ ----------
330
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
331
+ Sample matrix.
332
+ dict_type : type, default=dict
333
+ Constructor for feature mappings. Must conform to the
334
+ collections.Mapping API.
335
+
336
+ Returns
337
+ -------
338
+ D : list of dict_type objects of shape (n_samples,)
339
+ Feature mappings for the samples in X.
340
+ """
341
+ check_is_fitted(self, "feature_names_")
342
+
343
+ # COO matrix is not subscriptable
344
+ X = check_array(X, accept_sparse=["csr", "csc"])
345
+ n_samples = X.shape[0]
346
+
347
+ names = self.feature_names_
348
+ dicts = [dict_type() for _ in range(n_samples)]
349
+
350
+ if sp.issparse(X):
351
+ for i, j in zip(*X.nonzero()):
352
+ dicts[i][names[j]] = X[i, j]
353
+ else:
354
+ for i, d in enumerate(dicts):
355
+ for j, v in enumerate(X[i, :]):
356
+ if v != 0:
357
+ d[names[j]] = X[i, j]
358
+
359
+ return dicts
360
+
361
+ def transform(self, X):
362
+ """Transform feature->value dicts to array or sparse matrix.
363
+
364
+ Named features not encountered during fit or fit_transform will be
365
+ silently ignored.
366
+
367
+ Parameters
368
+ ----------
369
+ X : Mapping or iterable over Mappings of shape (n_samples,)
370
+ Dict(s) or Mapping(s) from feature names (arbitrary Python
371
+ objects) to feature values (strings or convertible to dtype).
372
+
373
+ Returns
374
+ -------
375
+ Xa : {array, sparse matrix}
376
+ Feature vectors; always 2-d.
377
+ """
378
+ check_is_fitted(self, ["feature_names_", "vocabulary_"])
379
+ return self._transform(X, fitting=False)
380
+
381
+ def get_feature_names_out(self, input_features=None):
382
+ """Get output feature names for transformation.
383
+
384
+ Parameters
385
+ ----------
386
+ input_features : array-like of str or None, default=None
387
+ Not used, present here for API consistency by convention.
388
+
389
+ Returns
390
+ -------
391
+ feature_names_out : ndarray of str objects
392
+ Transformed feature names.
393
+ """
394
+ check_is_fitted(self, "feature_names_")
395
+ if any(not isinstance(name, str) for name in self.feature_names_):
396
+ feature_names = [str(name) for name in self.feature_names_]
397
+ else:
398
+ feature_names = self.feature_names_
399
+ return np.asarray(feature_names, dtype=object)
400
+
401
+ def restrict(self, support, indices=False):
402
+ """Restrict the features to those in support using feature selection.
403
+
404
+ This function modifies the estimator in-place.
405
+
406
+ Parameters
407
+ ----------
408
+ support : array-like
409
+ Boolean mask or list of indices (as returned by the get_support
410
+ member of feature selectors).
411
+ indices : bool, default=False
412
+ Whether support is a list of indices.
413
+
414
+ Returns
415
+ -------
416
+ self : object
417
+ DictVectorizer class instance.
418
+
419
+ Examples
420
+ --------
421
+ >>> from sklearn.feature_extraction import DictVectorizer
422
+ >>> from sklearn.feature_selection import SelectKBest, chi2
423
+ >>> v = DictVectorizer()
424
+ >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
425
+ >>> X = v.fit_transform(D)
426
+ >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
427
+ >>> v.get_feature_names_out()
428
+ array(['bar', 'baz', 'foo'], ...)
429
+ >>> v.restrict(support.get_support())
430
+ DictVectorizer()
431
+ >>> v.get_feature_names_out()
432
+ array(['bar', 'foo'], ...)
433
+ """
434
+ check_is_fitted(self, "feature_names_")
435
+
436
+ if not indices:
437
+ support = np.where(support)[0]
438
+
439
+ names = self.feature_names_
440
+ new_vocab = {}
441
+ for i in support:
442
+ new_vocab[names[i]] = len(new_vocab)
443
+
444
+ self.vocabulary_ = new_vocab
445
+ self.feature_names_ = [
446
+ f for f, i in sorted(new_vocab.items(), key=itemgetter(1))
447
+ ]
448
+
449
+ return self
450
+
451
+ def _more_tags(self):
452
+ return {"X_types": ["dict"]}
venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Lars Buitinck
2
+ # License: BSD 3 clause
3
+
4
+ from itertools import chain
5
+ from numbers import Integral
6
+
7
+ import numpy as np
8
+ import scipy.sparse as sp
9
+
10
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
11
+ from ..utils._param_validation import Interval, StrOptions
12
+ from ._hashing_fast import transform as _hashing_transform
13
+
14
+
15
+ def _iteritems(d):
16
+ """Like d.iteritems, but accepts any collections.Mapping."""
17
+ return d.iteritems() if hasattr(d, "iteritems") else d.items()
18
+
19
+
20
+ class FeatureHasher(TransformerMixin, BaseEstimator):
21
+ """Implements feature hashing, aka the hashing trick.
22
+
23
+ This class turns sequences of symbolic feature names (strings) into
24
+ scipy.sparse matrices, using a hash function to compute the matrix column
25
+ corresponding to a name. The hash function employed is the signed 32-bit
26
+ version of Murmurhash3.
27
+
28
+ Feature names of type byte string are used as-is. Unicode strings are
29
+ converted to UTF-8 first, but no Unicode normalization is done.
30
+ Feature values must be (finite) numbers.
31
+
32
+ This class is a low-memory alternative to DictVectorizer and
33
+ CountVectorizer, intended for large-scale (online) learning and situations
34
+ where memory is tight, e.g. when running prediction code on embedded
35
+ devices.
36
+
37
+ For an efficiency comparison of the different feature extractors, see
38
+ :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
39
+
40
+ Read more in the :ref:`User Guide <feature_hashing>`.
41
+
42
+ .. versionadded:: 0.13
43
+
44
+ Parameters
45
+ ----------
46
+ n_features : int, default=2**20
47
+ The number of features (columns) in the output matrices. Small numbers
48
+ of features are likely to cause hash collisions, but large numbers
49
+ will cause larger coefficient dimensions in linear learners.
50
+ input_type : str, default='dict'
51
+ Choose a string from {'dict', 'pair', 'string'}.
52
+ Either "dict" (the default) to accept dictionaries over
53
+ (feature_name, value); "pair" to accept pairs of (feature_name, value);
54
+ or "string" to accept single strings.
55
+ feature_name should be a string, while value should be a number.
56
+ In the case of "string", a value of 1 is implied.
57
+ The feature_name is hashed to find the appropriate column for the
58
+ feature. The value's sign might be flipped in the output (but see
59
+ non_negative, below).
60
+ dtype : numpy dtype, default=np.float64
61
+ The type of feature values. Passed to scipy.sparse matrix constructors
62
+ as the dtype argument. Do not set this to bool, np.boolean or any
63
+ unsigned integer type.
64
+ alternate_sign : bool, default=True
65
+ When True, an alternating sign is added to the features as to
66
+ approximately conserve the inner product in the hashed space even for
67
+ small n_features. This approach is similar to sparse random projection.
68
+
69
+ .. versionchanged:: 0.19
70
+ ``alternate_sign`` replaces the now deprecated ``non_negative``
71
+ parameter.
72
+
73
+ See Also
74
+ --------
75
+ DictVectorizer : Vectorizes string-valued features using a hash table.
76
+ sklearn.preprocessing.OneHotEncoder : Handles nominal/categorical features.
77
+
78
+ Notes
79
+ -----
80
+ This estimator is :term:`stateless` and does not need to be fitted.
81
+ However, we recommend to call :meth:`fit_transform` instead of
82
+ :meth:`transform`, as parameter validation is only performed in
83
+ :meth:`fit`.
84
+
85
+ Examples
86
+ --------
87
+ >>> from sklearn.feature_extraction import FeatureHasher
88
+ >>> h = FeatureHasher(n_features=10)
89
+ >>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
90
+ >>> f = h.transform(D)
91
+ >>> f.toarray()
92
+ array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
93
+ [ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
94
+
95
+ With `input_type="string"`, the input must be an iterable over iterables of
96
+ strings:
97
+
98
+ >>> h = FeatureHasher(n_features=8, input_type="string")
99
+ >>> raw_X = [["dog", "cat", "snake"], ["snake", "dog"], ["cat", "bird"]]
100
+ >>> f = h.transform(raw_X)
101
+ >>> f.toarray()
102
+ array([[ 0., 0., 0., -1., 0., -1., 0., 1.],
103
+ [ 0., 0., 0., -1., 0., -1., 0., 0.],
104
+ [ 0., -1., 0., 0., 0., 0., 0., 1.]])
105
+ """
106
+
107
+ _parameter_constraints: dict = {
108
+ "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="both")],
109
+ "input_type": [StrOptions({"dict", "pair", "string"})],
110
+ "dtype": "no_validation", # delegate to numpy
111
+ "alternate_sign": ["boolean"],
112
+ }
113
+
114
+ def __init__(
115
+ self,
116
+ n_features=(2**20),
117
+ *,
118
+ input_type="dict",
119
+ dtype=np.float64,
120
+ alternate_sign=True,
121
+ ):
122
+ self.dtype = dtype
123
+ self.input_type = input_type
124
+ self.n_features = n_features
125
+ self.alternate_sign = alternate_sign
126
+
127
+ @_fit_context(prefer_skip_nested_validation=True)
128
+ def fit(self, X=None, y=None):
129
+ """Only validates estimator's parameters.
130
+
131
+ This method allows to: (i) validate the estimator's parameters and
132
+ (ii) be consistent with the scikit-learn transformer API.
133
+
134
+ Parameters
135
+ ----------
136
+ X : Ignored
137
+ Not used, present here for API consistency by convention.
138
+
139
+ y : Ignored
140
+ Not used, present here for API consistency by convention.
141
+
142
+ Returns
143
+ -------
144
+ self : object
145
+ FeatureHasher class instance.
146
+ """
147
+ return self
148
+
149
+ def transform(self, raw_X):
150
+ """Transform a sequence of instances to a scipy.sparse matrix.
151
+
152
+ Parameters
153
+ ----------
154
+ raw_X : iterable over iterable over raw features, length = n_samples
155
+ Samples. Each sample must be iterable an (e.g., a list or tuple)
156
+ containing/generating feature names (and optionally values, see
157
+ the input_type constructor argument) which will be hashed.
158
+ raw_X need not support the len function, so it can be the result
159
+ of a generator; n_samples is determined on the fly.
160
+
161
+ Returns
162
+ -------
163
+ X : sparse matrix of shape (n_samples, n_features)
164
+ Feature matrix, for use with estimators or further transformers.
165
+ """
166
+ raw_X = iter(raw_X)
167
+ if self.input_type == "dict":
168
+ raw_X = (_iteritems(d) for d in raw_X)
169
+ elif self.input_type == "string":
170
+ first_raw_X = next(raw_X)
171
+ if isinstance(first_raw_X, str):
172
+ raise ValueError(
173
+ "Samples can not be a single string. The input must be an iterable"
174
+ " over iterables of strings."
175
+ )
176
+ raw_X_ = chain([first_raw_X], raw_X)
177
+ raw_X = (((f, 1) for f in x) for x in raw_X_)
178
+
179
+ indices, indptr, values = _hashing_transform(
180
+ raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
181
+ )
182
+ n_samples = indptr.shape[0] - 1
183
+
184
+ if n_samples == 0:
185
+ raise ValueError("Cannot vectorize empty sequence.")
186
+
187
+ X = sp.csr_matrix(
188
+ (values, indices, indptr),
189
+ dtype=self.dtype,
190
+ shape=(n_samples, self.n_features),
191
+ )
192
+ X.sum_duplicates() # also sorts the indices
193
+
194
+ return X
195
+
196
+ def _more_tags(self):
197
+ return {"X_types": [self.input_type]}
venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (110 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This list of English stop words is taken from the "Glasgow Information
2
+ # Retrieval Group". The original list can be found at
3
+ # http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
4
+ ENGLISH_STOP_WORDS = frozenset(
5
+ [
6
+ "a",
7
+ "about",
8
+ "above",
9
+ "across",
10
+ "after",
11
+ "afterwards",
12
+ "again",
13
+ "against",
14
+ "all",
15
+ "almost",
16
+ "alone",
17
+ "along",
18
+ "already",
19
+ "also",
20
+ "although",
21
+ "always",
22
+ "am",
23
+ "among",
24
+ "amongst",
25
+ "amoungst",
26
+ "amount",
27
+ "an",
28
+ "and",
29
+ "another",
30
+ "any",
31
+ "anyhow",
32
+ "anyone",
33
+ "anything",
34
+ "anyway",
35
+ "anywhere",
36
+ "are",
37
+ "around",
38
+ "as",
39
+ "at",
40
+ "back",
41
+ "be",
42
+ "became",
43
+ "because",
44
+ "become",
45
+ "becomes",
46
+ "becoming",
47
+ "been",
48
+ "before",
49
+ "beforehand",
50
+ "behind",
51
+ "being",
52
+ "below",
53
+ "beside",
54
+ "besides",
55
+ "between",
56
+ "beyond",
57
+ "bill",
58
+ "both",
59
+ "bottom",
60
+ "but",
61
+ "by",
62
+ "call",
63
+ "can",
64
+ "cannot",
65
+ "cant",
66
+ "co",
67
+ "con",
68
+ "could",
69
+ "couldnt",
70
+ "cry",
71
+ "de",
72
+ "describe",
73
+ "detail",
74
+ "do",
75
+ "done",
76
+ "down",
77
+ "due",
78
+ "during",
79
+ "each",
80
+ "eg",
81
+ "eight",
82
+ "either",
83
+ "eleven",
84
+ "else",
85
+ "elsewhere",
86
+ "empty",
87
+ "enough",
88
+ "etc",
89
+ "even",
90
+ "ever",
91
+ "every",
92
+ "everyone",
93
+ "everything",
94
+ "everywhere",
95
+ "except",
96
+ "few",
97
+ "fifteen",
98
+ "fifty",
99
+ "fill",
100
+ "find",
101
+ "fire",
102
+ "first",
103
+ "five",
104
+ "for",
105
+ "former",
106
+ "formerly",
107
+ "forty",
108
+ "found",
109
+ "four",
110
+ "from",
111
+ "front",
112
+ "full",
113
+ "further",
114
+ "get",
115
+ "give",
116
+ "go",
117
+ "had",
118
+ "has",
119
+ "hasnt",
120
+ "have",
121
+ "he",
122
+ "hence",
123
+ "her",
124
+ "here",
125
+ "hereafter",
126
+ "hereby",
127
+ "herein",
128
+ "hereupon",
129
+ "hers",
130
+ "herself",
131
+ "him",
132
+ "himself",
133
+ "his",
134
+ "how",
135
+ "however",
136
+ "hundred",
137
+ "i",
138
+ "ie",
139
+ "if",
140
+ "in",
141
+ "inc",
142
+ "indeed",
143
+ "interest",
144
+ "into",
145
+ "is",
146
+ "it",
147
+ "its",
148
+ "itself",
149
+ "keep",
150
+ "last",
151
+ "latter",
152
+ "latterly",
153
+ "least",
154
+ "less",
155
+ "ltd",
156
+ "made",
157
+ "many",
158
+ "may",
159
+ "me",
160
+ "meanwhile",
161
+ "might",
162
+ "mill",
163
+ "mine",
164
+ "more",
165
+ "moreover",
166
+ "most",
167
+ "mostly",
168
+ "move",
169
+ "much",
170
+ "must",
171
+ "my",
172
+ "myself",
173
+ "name",
174
+ "namely",
175
+ "neither",
176
+ "never",
177
+ "nevertheless",
178
+ "next",
179
+ "nine",
180
+ "no",
181
+ "nobody",
182
+ "none",
183
+ "noone",
184
+ "nor",
185
+ "not",
186
+ "nothing",
187
+ "now",
188
+ "nowhere",
189
+ "of",
190
+ "off",
191
+ "often",
192
+ "on",
193
+ "once",
194
+ "one",
195
+ "only",
196
+ "onto",
197
+ "or",
198
+ "other",
199
+ "others",
200
+ "otherwise",
201
+ "our",
202
+ "ours",
203
+ "ourselves",
204
+ "out",
205
+ "over",
206
+ "own",
207
+ "part",
208
+ "per",
209
+ "perhaps",
210
+ "please",
211
+ "put",
212
+ "rather",
213
+ "re",
214
+ "same",
215
+ "see",
216
+ "seem",
217
+ "seemed",
218
+ "seeming",
219
+ "seems",
220
+ "serious",
221
+ "several",
222
+ "she",
223
+ "should",
224
+ "show",
225
+ "side",
226
+ "since",
227
+ "sincere",
228
+ "six",
229
+ "sixty",
230
+ "so",
231
+ "some",
232
+ "somehow",
233
+ "someone",
234
+ "something",
235
+ "sometime",
236
+ "sometimes",
237
+ "somewhere",
238
+ "still",
239
+ "such",
240
+ "system",
241
+ "take",
242
+ "ten",
243
+ "than",
244
+ "that",
245
+ "the",
246
+ "their",
247
+ "them",
248
+ "themselves",
249
+ "then",
250
+ "thence",
251
+ "there",
252
+ "thereafter",
253
+ "thereby",
254
+ "therefore",
255
+ "therein",
256
+ "thereupon",
257
+ "these",
258
+ "they",
259
+ "thick",
260
+ "thin",
261
+ "third",
262
+ "this",
263
+ "those",
264
+ "though",
265
+ "three",
266
+ "through",
267
+ "throughout",
268
+ "thru",
269
+ "thus",
270
+ "to",
271
+ "together",
272
+ "too",
273
+ "top",
274
+ "toward",
275
+ "towards",
276
+ "twelve",
277
+ "twenty",
278
+ "two",
279
+ "un",
280
+ "under",
281
+ "until",
282
+ "up",
283
+ "upon",
284
+ "us",
285
+ "very",
286
+ "via",
287
+ "was",
288
+ "we",
289
+ "well",
290
+ "were",
291
+ "what",
292
+ "whatever",
293
+ "when",
294
+ "whence",
295
+ "whenever",
296
+ "where",
297
+ "whereafter",
298
+ "whereas",
299
+ "whereby",
300
+ "wherein",
301
+ "whereupon",
302
+ "wherever",
303
+ "whether",
304
+ "which",
305
+ "while",
306
+ "whither",
307
+ "who",
308
+ "whoever",
309
+ "whole",
310
+ "whom",
311
+ "whose",
312
+ "why",
313
+ "will",
314
+ "with",
315
+ "within",
316
+ "without",
317
+ "would",
318
+ "yet",
319
+ "you",
320
+ "your",
321
+ "yours",
322
+ "yourself",
323
+ "yourselves",
324
+ ]
325
+ )
venv/lib/python3.10/site-packages/sklearn/feature_extraction/image.py ADDED
@@ -0,0 +1,671 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
3
+ extract features from images.
4
+ """
5
+
6
+ # Authors: Emmanuelle Gouillart <[email protected]>
7
+ # Gael Varoquaux <[email protected]>
8
+ # Olivier Grisel
9
+ # Vlad Niculae
10
+ # License: BSD 3 clause
11
+
12
+ from itertools import product
13
+ from numbers import Integral, Number, Real
14
+
15
+ import numpy as np
16
+ from numpy.lib.stride_tricks import as_strided
17
+ from scipy import sparse
18
+
19
+ from ..base import BaseEstimator, TransformerMixin, _fit_context
20
+ from ..utils import check_array, check_random_state
21
+ from ..utils._param_validation import Hidden, Interval, RealNotInt, validate_params
22
+
23
+ __all__ = [
24
+ "PatchExtractor",
25
+ "extract_patches_2d",
26
+ "grid_to_graph",
27
+ "img_to_graph",
28
+ "reconstruct_from_patches_2d",
29
+ ]
30
+
31
+ ###############################################################################
32
+ # From an image to a graph
33
+
34
+
35
+ def _make_edges_3d(n_x, n_y, n_z=1):
36
+ """Returns a list of edges for a 3D image.
37
+
38
+ Parameters
39
+ ----------
40
+ n_x : int
41
+ The size of the grid in the x direction.
42
+ n_y : int
43
+ The size of the grid in the y direction.
44
+ n_z : integer, default=1
45
+ The size of the grid in the z direction, defaults to 1
46
+ """
47
+ vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
48
+ edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel()))
49
+ edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel()))
50
+ edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
51
+ edges = np.hstack((edges_deep, edges_right, edges_down))
52
+ return edges
53
+
54
+
55
+ def _compute_gradient_3d(edges, img):
56
+ _, n_y, n_z = img.shape
57
+ gradient = np.abs(
58
+ img[
59
+ edges[0] // (n_y * n_z),
60
+ (edges[0] % (n_y * n_z)) // n_z,
61
+ (edges[0] % (n_y * n_z)) % n_z,
62
+ ]
63
+ - img[
64
+ edges[1] // (n_y * n_z),
65
+ (edges[1] % (n_y * n_z)) // n_z,
66
+ (edges[1] % (n_y * n_z)) % n_z,
67
+ ]
68
+ )
69
+ return gradient
70
+
71
+
72
+ # XXX: Why mask the image after computing the weights?
73
+
74
+
75
+ def _mask_edges_weights(mask, edges, weights=None):
76
+ """Apply a mask to edges (weighted or not)"""
77
+ inds = np.arange(mask.size)
78
+ inds = inds[mask.ravel()]
79
+ ind_mask = np.logical_and(np.isin(edges[0], inds), np.isin(edges[1], inds))
80
+ edges = edges[:, ind_mask]
81
+ if weights is not None:
82
+ weights = weights[ind_mask]
83
+ if len(edges.ravel()):
84
+ maxval = edges.max()
85
+ else:
86
+ maxval = 0
87
+ order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1))
88
+ edges = order[edges]
89
+ if weights is None:
90
+ return edges
91
+ else:
92
+ return edges, weights
93
+
94
+
95
+ def _to_graph(
96
+ n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None
97
+ ):
98
+ """Auxiliary function for img_to_graph and grid_to_graph"""
99
+ edges = _make_edges_3d(n_x, n_y, n_z)
100
+
101
+ if dtype is None: # To not overwrite input dtype
102
+ if img is None:
103
+ dtype = int
104
+ else:
105
+ dtype = img.dtype
106
+
107
+ if img is not None:
108
+ img = np.atleast_3d(img)
109
+ weights = _compute_gradient_3d(edges, img)
110
+ if mask is not None:
111
+ edges, weights = _mask_edges_weights(mask, edges, weights)
112
+ diag = img.squeeze()[mask]
113
+ else:
114
+ diag = img.ravel()
115
+ n_voxels = diag.size
116
+ else:
117
+ if mask is not None:
118
+ mask = mask.astype(dtype=bool, copy=False)
119
+ edges = _mask_edges_weights(mask, edges)
120
+ n_voxels = np.sum(mask)
121
+ else:
122
+ n_voxels = n_x * n_y * n_z
123
+ weights = np.ones(edges.shape[1], dtype=dtype)
124
+ diag = np.ones(n_voxels, dtype=dtype)
125
+
126
+ diag_idx = np.arange(n_voxels)
127
+ i_idx = np.hstack((edges[0], edges[1]))
128
+ j_idx = np.hstack((edges[1], edges[0]))
129
+ graph = sparse.coo_matrix(
130
+ (
131
+ np.hstack((weights, weights, diag)),
132
+ (np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx))),
133
+ ),
134
+ (n_voxels, n_voxels),
135
+ dtype=dtype,
136
+ )
137
+ if return_as is np.ndarray:
138
+ return graph.toarray()
139
+ return return_as(graph)
140
+
141
+
142
+ @validate_params(
143
+ {
144
+ "img": ["array-like"],
145
+ "mask": [None, np.ndarray],
146
+ "return_as": [type],
147
+ "dtype": "no_validation", # validation delegated to numpy
148
+ },
149
+ prefer_skip_nested_validation=True,
150
+ )
151
+ def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None):
152
+ """Graph of the pixel-to-pixel gradient connections.
153
+
154
+ Edges are weighted with the gradient values.
155
+
156
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
157
+
158
+ Parameters
159
+ ----------
160
+ img : array-like of shape (height, width) or (height, width, channel)
161
+ 2D or 3D image.
162
+ mask : ndarray of shape (height, width) or \
163
+ (height, width, channel), dtype=bool, default=None
164
+ An optional mask of the image, to consider only part of the
165
+ pixels.
166
+ return_as : np.ndarray or a sparse matrix class, \
167
+ default=sparse.coo_matrix
168
+ The class to use to build the returned adjacency matrix.
169
+ dtype : dtype, default=None
170
+ The data of the returned sparse matrix. By default it is the
171
+ dtype of img.
172
+
173
+ Returns
174
+ -------
175
+ graph : ndarray or a sparse matrix class
176
+ The computed adjacency matrix.
177
+
178
+ Notes
179
+ -----
180
+ For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
181
+ handled by returning a dense np.matrix instance. Going forward, np.ndarray
182
+ returns an np.ndarray, as expected.
183
+
184
+ For compatibility, user code relying on this method should wrap its
185
+ calls in ``np.asarray`` to avoid type issues.
186
+ """
187
+ img = np.atleast_3d(img)
188
+ n_x, n_y, n_z = img.shape
189
+ return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
190
+
191
+
192
+ @validate_params(
193
+ {
194
+ "n_x": [Interval(Integral, left=1, right=None, closed="left")],
195
+ "n_y": [Interval(Integral, left=1, right=None, closed="left")],
196
+ "n_z": [Interval(Integral, left=1, right=None, closed="left")],
197
+ "mask": [None, np.ndarray],
198
+ "return_as": [type],
199
+ "dtype": "no_validation", # validation delegated to numpy
200
+ },
201
+ prefer_skip_nested_validation=True,
202
+ )
203
+ def grid_to_graph(
204
+ n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int
205
+ ):
206
+ """Graph of the pixel-to-pixel connections.
207
+
208
+ Edges exist if 2 voxels are connected.
209
+
210
+ Parameters
211
+ ----------
212
+ n_x : int
213
+ Dimension in x axis.
214
+ n_y : int
215
+ Dimension in y axis.
216
+ n_z : int, default=1
217
+ Dimension in z axis.
218
+ mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None
219
+ An optional mask of the image, to consider only part of the
220
+ pixels.
221
+ return_as : np.ndarray or a sparse matrix class, \
222
+ default=sparse.coo_matrix
223
+ The class to use to build the returned adjacency matrix.
224
+ dtype : dtype, default=int
225
+ The data of the returned sparse matrix. By default it is int.
226
+
227
+ Returns
228
+ -------
229
+ graph : np.ndarray or a sparse matrix class
230
+ The computed adjacency matrix.
231
+
232
+ Notes
233
+ -----
234
+ For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
235
+ handled by returning a dense np.matrix instance. Going forward, np.ndarray
236
+ returns an np.ndarray, as expected.
237
+
238
+ For compatibility, user code relying on this method should wrap its
239
+ calls in ``np.asarray`` to avoid type issues.
240
+
241
+ Examples
242
+ --------
243
+ >>> import numpy as np
244
+ >>> from sklearn.feature_extraction.image import grid_to_graph
245
+ >>> shape_img = (4, 4, 1)
246
+ >>> mask = np.zeros(shape=shape_img, dtype=bool)
247
+ >>> mask[[1, 2], [1, 2], :] = True
248
+ >>> graph = grid_to_graph(*shape_img, mask=mask)
249
+ >>> print(graph)
250
+ (0, 0) 1
251
+ (1, 1) 1
252
+ """
253
+ return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype)
254
+
255
+
256
+ ###############################################################################
257
+ # From an image to a set of small image patches
258
+
259
+
260
+ def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
261
+ """Compute the number of patches that will be extracted in an image.
262
+
263
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
264
+
265
+ Parameters
266
+ ----------
267
+ i_h : int
268
+ The image height
269
+ i_w : int
270
+ The image with
271
+ p_h : int
272
+ The height of a patch
273
+ p_w : int
274
+ The width of a patch
275
+ max_patches : int or float, default=None
276
+ The maximum number of patches to extract. If `max_patches` is a float
277
+ between 0 and 1, it is taken to be a proportion of the total number
278
+ of patches. If `max_patches` is None, all possible patches are extracted.
279
+ """
280
+ n_h = i_h - p_h + 1
281
+ n_w = i_w - p_w + 1
282
+ all_patches = n_h * n_w
283
+
284
+ if max_patches:
285
+ if isinstance(max_patches, (Integral)) and max_patches < all_patches:
286
+ return max_patches
287
+ elif isinstance(max_patches, (Integral)) and max_patches >= all_patches:
288
+ return all_patches
289
+ elif isinstance(max_patches, (Real)) and 0 < max_patches < 1:
290
+ return int(max_patches * all_patches)
291
+ else:
292
+ raise ValueError("Invalid value for max_patches: %r" % max_patches)
293
+ else:
294
+ return all_patches
295
+
296
+
297
+ def _extract_patches(arr, patch_shape=8, extraction_step=1):
298
+ """Extracts patches of any n-dimensional array in place using strides.
299
+
300
+ Given an n-dimensional array it will return a 2n-dimensional array with
301
+ the first n dimensions indexing patch position and the last n indexing
302
+ the patch content. This operation is immediate (O(1)). A reshape
303
+ performed on the first n dimensions will cause numpy to copy data, leading
304
+ to a list of extracted patches.
305
+
306
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
307
+
308
+ Parameters
309
+ ----------
310
+ arr : ndarray
311
+ n-dimensional array of which patches are to be extracted
312
+
313
+ patch_shape : int or tuple of length arr.ndim.default=8
314
+ Indicates the shape of the patches to be extracted. If an
315
+ integer is given, the shape will be a hypercube of
316
+ sidelength given by its value.
317
+
318
+ extraction_step : int or tuple of length arr.ndim, default=1
319
+ Indicates step size at which extraction shall be performed.
320
+ If integer is given, then the step is uniform in all dimensions.
321
+
322
+
323
+ Returns
324
+ -------
325
+ patches : strided ndarray
326
+ 2n-dimensional array indexing patches on first n dimensions and
327
+ containing patches on the last n dimensions. These dimensions
328
+ are fake, but this way no data is copied. A simple reshape invokes
329
+ a copying operation to obtain a list of patches:
330
+ result.reshape([-1] + list(patch_shape))
331
+ """
332
+
333
+ arr_ndim = arr.ndim
334
+
335
+ if isinstance(patch_shape, Number):
336
+ patch_shape = tuple([patch_shape] * arr_ndim)
337
+ if isinstance(extraction_step, Number):
338
+ extraction_step = tuple([extraction_step] * arr_ndim)
339
+
340
+ patch_strides = arr.strides
341
+
342
+ slices = tuple(slice(None, None, st) for st in extraction_step)
343
+ indexing_strides = arr[slices].strides
344
+
345
+ patch_indices_shape = (
346
+ (np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step)
347
+ ) + 1
348
+
349
+ shape = tuple(list(patch_indices_shape) + list(patch_shape))
350
+ strides = tuple(list(indexing_strides) + list(patch_strides))
351
+
352
+ patches = as_strided(arr, shape=shape, strides=strides)
353
+ return patches
354
+
355
+
356
+ @validate_params(
357
+ {
358
+ "image": [np.ndarray],
359
+ "patch_size": [tuple, list],
360
+ "max_patches": [
361
+ Interval(RealNotInt, 0, 1, closed="neither"),
362
+ Interval(Integral, 1, None, closed="left"),
363
+ None,
364
+ ],
365
+ "random_state": ["random_state"],
366
+ },
367
+ prefer_skip_nested_validation=True,
368
+ )
369
+ def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):
370
+ """Reshape a 2D image into a collection of patches.
371
+
372
+ The resulting patches are allocated in a dedicated array.
373
+
374
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
375
+
376
+ Parameters
377
+ ----------
378
+ image : ndarray of shape (image_height, image_width) or \
379
+ (image_height, image_width, n_channels)
380
+ The original image data. For color images, the last dimension specifies
381
+ the channel: a RGB image would have `n_channels=3`.
382
+
383
+ patch_size : tuple of int (patch_height, patch_width)
384
+ The dimensions of one patch.
385
+
386
+ max_patches : int or float, default=None
387
+ The maximum number of patches to extract. If `max_patches` is a float
388
+ between 0 and 1, it is taken to be a proportion of the total number
389
+ of patches. If `max_patches` is None it corresponds to the total number
390
+ of patches that can be extracted.
391
+
392
+ random_state : int, RandomState instance, default=None
393
+ Determines the random number generator used for random sampling when
394
+ `max_patches` is not None. Use an int to make the randomness
395
+ deterministic.
396
+ See :term:`Glossary <random_state>`.
397
+
398
+ Returns
399
+ -------
400
+ patches : array of shape (n_patches, patch_height, patch_width) or \
401
+ (n_patches, patch_height, patch_width, n_channels)
402
+ The collection of patches extracted from the image, where `n_patches`
403
+ is either `max_patches` or the total number of patches that can be
404
+ extracted.
405
+
406
+ Examples
407
+ --------
408
+ >>> from sklearn.datasets import load_sample_image
409
+ >>> from sklearn.feature_extraction import image
410
+ >>> # Use the array data from the first image in this dataset:
411
+ >>> one_image = load_sample_image("china.jpg")
412
+ >>> print('Image shape: {}'.format(one_image.shape))
413
+ Image shape: (427, 640, 3)
414
+ >>> patches = image.extract_patches_2d(one_image, (2, 2))
415
+ >>> print('Patches shape: {}'.format(patches.shape))
416
+ Patches shape: (272214, 2, 2, 3)
417
+ >>> # Here are just two of these patches:
418
+ >>> print(patches[1])
419
+ [[[174 201 231]
420
+ [174 201 231]]
421
+ [[173 200 230]
422
+ [173 200 230]]]
423
+ >>> print(patches[800])
424
+ [[[187 214 243]
425
+ [188 215 244]]
426
+ [[187 214 243]
427
+ [188 215 244]]]
428
+ """
429
+ i_h, i_w = image.shape[:2]
430
+ p_h, p_w = patch_size
431
+
432
+ if p_h > i_h:
433
+ raise ValueError(
434
+ "Height of the patch should be less than the height of the image."
435
+ )
436
+
437
+ if p_w > i_w:
438
+ raise ValueError(
439
+ "Width of the patch should be less than the width of the image."
440
+ )
441
+
442
+ image = check_array(image, allow_nd=True)
443
+ image = image.reshape((i_h, i_w, -1))
444
+ n_colors = image.shape[-1]
445
+
446
+ extracted_patches = _extract_patches(
447
+ image, patch_shape=(p_h, p_w, n_colors), extraction_step=1
448
+ )
449
+
450
+ n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
451
+ if max_patches:
452
+ rng = check_random_state(random_state)
453
+ i_s = rng.randint(i_h - p_h + 1, size=n_patches)
454
+ j_s = rng.randint(i_w - p_w + 1, size=n_patches)
455
+ patches = extracted_patches[i_s, j_s, 0]
456
+ else:
457
+ patches = extracted_patches
458
+
459
+ patches = patches.reshape(-1, p_h, p_w, n_colors)
460
+ # remove the color dimension if useless
461
+ if patches.shape[-1] == 1:
462
+ return patches.reshape((n_patches, p_h, p_w))
463
+ else:
464
+ return patches
465
+
466
+
467
+ @validate_params(
468
+ {"patches": [np.ndarray], "image_size": [tuple, Hidden(list)]},
469
+ prefer_skip_nested_validation=True,
470
+ )
471
+ def reconstruct_from_patches_2d(patches, image_size):
472
+ """Reconstruct the image from all of its patches.
473
+
474
+ Patches are assumed to overlap and the image is constructed by filling in
475
+ the patches from left to right, top to bottom, averaging the overlapping
476
+ regions.
477
+
478
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
479
+
480
+ Parameters
481
+ ----------
482
+ patches : ndarray of shape (n_patches, patch_height, patch_width) or \
483
+ (n_patches, patch_height, patch_width, n_channels)
484
+ The complete set of patches. If the patches contain colour information,
485
+ channels are indexed along the last dimension: RGB patches would
486
+ have `n_channels=3`.
487
+
488
+ image_size : tuple of int (image_height, image_width) or \
489
+ (image_height, image_width, n_channels)
490
+ The size of the image that will be reconstructed.
491
+
492
+ Returns
493
+ -------
494
+ image : ndarray of shape image_size
495
+ The reconstructed image.
496
+ """
497
+ i_h, i_w = image_size[:2]
498
+ p_h, p_w = patches.shape[1:3]
499
+ img = np.zeros(image_size)
500
+ # compute the dimensions of the patches array
501
+ n_h = i_h - p_h + 1
502
+ n_w = i_w - p_w + 1
503
+ for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
504
+ img[i : i + p_h, j : j + p_w] += p
505
+
506
+ for i in range(i_h):
507
+ for j in range(i_w):
508
+ # divide by the amount of overlap
509
+ # XXX: is this the most efficient way? memory-wise yes, cpu wise?
510
+ img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j))
511
+ return img
512
+
513
+
514
+ class PatchExtractor(TransformerMixin, BaseEstimator):
515
+ """Extracts patches from a collection of images.
516
+
517
+ Read more in the :ref:`User Guide <image_feature_extraction>`.
518
+
519
+ .. versionadded:: 0.9
520
+
521
+ Parameters
522
+ ----------
523
+ patch_size : tuple of int (patch_height, patch_width), default=None
524
+ The dimensions of one patch. If set to None, the patch size will be
525
+ automatically set to `(img_height // 10, img_width // 10)`, where
526
+ `img_height` and `img_width` are the dimensions of the input images.
527
+
528
+ max_patches : int or float, default=None
529
+ The maximum number of patches per image to extract. If `max_patches` is
530
+ a float in (0, 1), it is taken to mean a proportion of the total number
531
+ of patches. If set to None, extract all possible patches.
532
+
533
+ random_state : int, RandomState instance, default=None
534
+ Determines the random number generator used for random sampling when
535
+ `max_patches is not None`. Use an int to make the randomness
536
+ deterministic.
537
+ See :term:`Glossary <random_state>`.
538
+
539
+ See Also
540
+ --------
541
+ reconstruct_from_patches_2d : Reconstruct image from all of its patches.
542
+
543
+ Notes
544
+ -----
545
+ This estimator is stateless and does not need to be fitted. However, we
546
+ recommend to call :meth:`fit_transform` instead of :meth:`transform`, as
547
+ parameter validation is only performed in :meth:`fit`.
548
+
549
+ Examples
550
+ --------
551
+ >>> from sklearn.datasets import load_sample_images
552
+ >>> from sklearn.feature_extraction import image
553
+ >>> # Use the array data from the second image in this dataset:
554
+ >>> X = load_sample_images().images[1]
555
+ >>> X = X[None, ...]
556
+ >>> print(f"Image shape: {X.shape}")
557
+ Image shape: (1, 427, 640, 3)
558
+ >>> pe = image.PatchExtractor(patch_size=(10, 10))
559
+ >>> pe_trans = pe.transform(X)
560
+ >>> print(f"Patches shape: {pe_trans.shape}")
561
+ Patches shape: (263758, 10, 10, 3)
562
+ >>> X_reconstructed = image.reconstruct_from_patches_2d(pe_trans, X.shape[1:])
563
+ >>> print(f"Reconstructed shape: {X_reconstructed.shape}")
564
+ Reconstructed shape: (427, 640, 3)
565
+ """
566
+
567
+ _parameter_constraints: dict = {
568
+ "patch_size": [tuple, None],
569
+ "max_patches": [
570
+ None,
571
+ Interval(RealNotInt, 0, 1, closed="neither"),
572
+ Interval(Integral, 1, None, closed="left"),
573
+ ],
574
+ "random_state": ["random_state"],
575
+ }
576
+
577
+ def __init__(self, *, patch_size=None, max_patches=None, random_state=None):
578
+ self.patch_size = patch_size
579
+ self.max_patches = max_patches
580
+ self.random_state = random_state
581
+
582
+ @_fit_context(prefer_skip_nested_validation=True)
583
+ def fit(self, X, y=None):
584
+ """Only validate the parameters of the estimator.
585
+
586
+ This method allows to: (i) validate the parameters of the estimator and
587
+ (ii) be consistent with the scikit-learn transformer API.
588
+
589
+ Parameters
590
+ ----------
591
+ X : ndarray of shape (n_samples, image_height, image_width) or \
592
+ (n_samples, image_height, image_width, n_channels)
593
+ Array of images from which to extract patches. For color images,
594
+ the last dimension specifies the channel: a RGB image would have
595
+ `n_channels=3`.
596
+
597
+ y : Ignored
598
+ Not used, present for API consistency by convention.
599
+
600
+ Returns
601
+ -------
602
+ self : object
603
+ Returns the instance itself.
604
+ """
605
+ return self
606
+
607
+ def transform(self, X):
608
+ """Transform the image samples in `X` into a matrix of patch data.
609
+
610
+ Parameters
611
+ ----------
612
+ X : ndarray of shape (n_samples, image_height, image_width) or \
613
+ (n_samples, image_height, image_width, n_channels)
614
+ Array of images from which to extract patches. For color images,
615
+ the last dimension specifies the channel: a RGB image would have
616
+ `n_channels=3`.
617
+
618
+ Returns
619
+ -------
620
+ patches : array of shape (n_patches, patch_height, patch_width) or \
621
+ (n_patches, patch_height, patch_width, n_channels)
622
+ The collection of patches extracted from the images, where
623
+ `n_patches` is either `n_samples * max_patches` or the total
624
+ number of patches that can be extracted.
625
+ """
626
+ X = self._validate_data(
627
+ X=X,
628
+ ensure_2d=False,
629
+ allow_nd=True,
630
+ ensure_min_samples=1,
631
+ ensure_min_features=1,
632
+ reset=False,
633
+ )
634
+ random_state = check_random_state(self.random_state)
635
+ n_imgs, img_height, img_width = X.shape[:3]
636
+ if self.patch_size is None:
637
+ patch_size = img_height // 10, img_width // 10
638
+ else:
639
+ if len(self.patch_size) != 2:
640
+ raise ValueError(
641
+ "patch_size must be a tuple of two integers. Got"
642
+ f" {self.patch_size} instead."
643
+ )
644
+ patch_size = self.patch_size
645
+
646
+ n_imgs, img_height, img_width = X.shape[:3]
647
+ X = np.reshape(X, (n_imgs, img_height, img_width, -1))
648
+ n_channels = X.shape[-1]
649
+
650
+ # compute the dimensions of the patches array
651
+ patch_height, patch_width = patch_size
652
+ n_patches = _compute_n_patches(
653
+ img_height, img_width, patch_height, patch_width, self.max_patches
654
+ )
655
+ patches_shape = (n_imgs * n_patches,) + patch_size
656
+ if n_channels > 1:
657
+ patches_shape += (n_channels,)
658
+
659
+ # extract the patches
660
+ patches = np.empty(patches_shape)
661
+ for ii, image in enumerate(X):
662
+ patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d(
663
+ image,
664
+ patch_size,
665
+ max_patches=self.max_patches,
666
+ random_state=random_state,
667
+ )
668
+ return patches
669
+
670
+ def _more_tags(self):
671
+ return {"X_types": ["3darray"], "stateless": True}
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc ADDED
Binary file (7.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc ADDED
Binary file (5.91 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc ADDED
Binary file (38.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_dict_vectorizer.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Lars Buitinck
2
+ # Dan Blanchard <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ from random import Random
6
+
7
+ import numpy as np
8
+ import pytest
9
+ import scipy.sparse as sp
10
+ from numpy.testing import assert_allclose, assert_array_equal
11
+
12
+ from sklearn.exceptions import NotFittedError
13
+ from sklearn.feature_extraction import DictVectorizer
14
+ from sklearn.feature_selection import SelectKBest, chi2
15
+
16
+
17
+ @pytest.mark.parametrize("sparse", (True, False))
18
+ @pytest.mark.parametrize("dtype", (int, np.float32, np.int16))
19
+ @pytest.mark.parametrize("sort", (True, False))
20
+ @pytest.mark.parametrize("iterable", (True, False))
21
+ def test_dictvectorizer(sparse, dtype, sort, iterable):
22
+ D = [{"foo": 1, "bar": 3}, {"bar": 4, "baz": 2}, {"bar": 1, "quux": 1, "quuux": 2}]
23
+
24
+ v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
25
+ X = v.fit_transform(iter(D) if iterable else D)
26
+
27
+ assert sp.issparse(X) == sparse
28
+ assert X.shape == (3, 5)
29
+ assert X.sum() == 14
30
+ assert v.inverse_transform(X) == D
31
+
32
+ if sparse:
33
+ # CSR matrices can't be compared for equality
34
+ assert_array_equal(
35
+ X.toarray(), v.transform(iter(D) if iterable else D).toarray()
36
+ )
37
+ else:
38
+ assert_array_equal(X, v.transform(iter(D) if iterable else D))
39
+
40
+ if sort:
41
+ assert v.feature_names_ == sorted(v.feature_names_)
42
+
43
+
44
+ def test_feature_selection():
45
+ # make two feature dicts with two useful features and a bunch of useless
46
+ # ones, in terms of chi2
47
+ d1 = dict([("useless%d" % i, 10) for i in range(20)], useful1=1, useful2=20)
48
+ d2 = dict([("useless%d" % i, 10) for i in range(20)], useful1=20, useful2=1)
49
+
50
+ for indices in (True, False):
51
+ v = DictVectorizer().fit([d1, d2])
52
+ X = v.transform([d1, d2])
53
+ sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
54
+
55
+ v.restrict(sel.get_support(indices=indices), indices=indices)
56
+ assert_array_equal(v.get_feature_names_out(), ["useful1", "useful2"])
57
+
58
+
59
+ def test_one_of_k():
60
+ D_in = [
61
+ {"version": "1", "ham": 2},
62
+ {"version": "2", "spam": 0.3},
63
+ {"version=3": True, "spam": -1},
64
+ ]
65
+ v = DictVectorizer()
66
+ X = v.fit_transform(D_in)
67
+ assert X.shape == (3, 5)
68
+
69
+ D_out = v.inverse_transform(X)
70
+ assert D_out[0] == {"version=1": 1, "ham": 2}
71
+
72
+ names = v.get_feature_names_out()
73
+ assert "version=2" in names
74
+ assert "version" not in names
75
+
76
+
77
+ def test_iterable_value():
78
+ D_names = ["ham", "spam", "version=1", "version=2", "version=3"]
79
+ X_expected = [
80
+ [2.0, 0.0, 2.0, 1.0, 0.0],
81
+ [0.0, 0.3, 0.0, 1.0, 0.0],
82
+ [0.0, -1.0, 0.0, 0.0, 1.0],
83
+ ]
84
+ D_in = [
85
+ {"version": ["1", "2", "1"], "ham": 2},
86
+ {"version": "2", "spam": 0.3},
87
+ {"version=3": True, "spam": -1},
88
+ ]
89
+ v = DictVectorizer()
90
+ X = v.fit_transform(D_in)
91
+ X = X.toarray()
92
+ assert_array_equal(X, X_expected)
93
+
94
+ D_out = v.inverse_transform(X)
95
+ assert D_out[0] == {"version=1": 2, "version=2": 1, "ham": 2}
96
+
97
+ names = v.get_feature_names_out()
98
+
99
+ assert_array_equal(names, D_names)
100
+
101
+
102
+ def test_iterable_not_string_error():
103
+ error_value = (
104
+ "Unsupported type <class 'int'> in iterable value. "
105
+ "Only iterables of string are supported."
106
+ )
107
+ D2 = [{"foo": "1", "bar": "2"}, {"foo": "3", "baz": "1"}, {"foo": [1, "three"]}]
108
+ v = DictVectorizer(sparse=False)
109
+ with pytest.raises(TypeError) as error:
110
+ v.fit(D2)
111
+ assert str(error.value) == error_value
112
+
113
+
114
+ def test_mapping_error():
115
+ error_value = (
116
+ "Unsupported value type <class 'dict'> "
117
+ "for foo: {'one': 1, 'three': 3}.\n"
118
+ "Mapping objects are not supported."
119
+ )
120
+ D2 = [
121
+ {"foo": "1", "bar": "2"},
122
+ {"foo": "3", "baz": "1"},
123
+ {"foo": {"one": 1, "three": 3}},
124
+ ]
125
+ v = DictVectorizer(sparse=False)
126
+ with pytest.raises(TypeError) as error:
127
+ v.fit(D2)
128
+ assert str(error.value) == error_value
129
+
130
+
131
+ def test_unseen_or_no_features():
132
+ D = [{"camelot": 0, "spamalot": 1}]
133
+ for sparse in [True, False]:
134
+ v = DictVectorizer(sparse=sparse).fit(D)
135
+
136
+ X = v.transform({"push the pram a lot": 2})
137
+ if sparse:
138
+ X = X.toarray()
139
+ assert_array_equal(X, np.zeros((1, 2)))
140
+
141
+ X = v.transform({})
142
+ if sparse:
143
+ X = X.toarray()
144
+ assert_array_equal(X, np.zeros((1, 2)))
145
+
146
+ with pytest.raises(ValueError, match="empty"):
147
+ v.transform([])
148
+
149
+
150
+ def test_deterministic_vocabulary(global_random_seed):
151
+ # Generate equal dictionaries with different memory layouts
152
+ items = [("%03d" % i, i) for i in range(1000)]
153
+ rng = Random(global_random_seed)
154
+ d_sorted = dict(items)
155
+ rng.shuffle(items)
156
+ d_shuffled = dict(items)
157
+
158
+ # check that the memory layout does not impact the resulting vocabulary
159
+ v_1 = DictVectorizer().fit([d_sorted])
160
+ v_2 = DictVectorizer().fit([d_shuffled])
161
+
162
+ assert v_1.vocabulary_ == v_2.vocabulary_
163
+
164
+
165
+ def test_n_features_in():
166
+ # For vectorizers, n_features_in_ does not make sense and does not exist.
167
+ dv = DictVectorizer()
168
+ assert not hasattr(dv, "n_features_in_")
169
+ d = [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]
170
+ dv.fit(d)
171
+ assert not hasattr(dv, "n_features_in_")
172
+
173
+
174
+ def test_dictvectorizer_dense_sparse_equivalence():
175
+ """Check the equivalence between between sparse and dense DictVectorizer.
176
+ Non-regression test for:
177
+ https://github.com/scikit-learn/scikit-learn/issues/19978
178
+ """
179
+ movie_entry_fit = [
180
+ {"category": ["thriller", "drama"], "year": 2003},
181
+ {"category": ["animation", "family"], "year": 2011},
182
+ {"year": 1974},
183
+ ]
184
+ movie_entry_transform = [{"category": ["thriller"], "unseen_feature": "3"}]
185
+ dense_vectorizer = DictVectorizer(sparse=False)
186
+ sparse_vectorizer = DictVectorizer(sparse=True)
187
+
188
+ dense_vector_fit = dense_vectorizer.fit_transform(movie_entry_fit)
189
+ sparse_vector_fit = sparse_vectorizer.fit_transform(movie_entry_fit)
190
+
191
+ assert not sp.issparse(dense_vector_fit)
192
+ assert sp.issparse(sparse_vector_fit)
193
+
194
+ assert_allclose(dense_vector_fit, sparse_vector_fit.toarray())
195
+
196
+ dense_vector_transform = dense_vectorizer.transform(movie_entry_transform)
197
+ sparse_vector_transform = sparse_vectorizer.transform(movie_entry_transform)
198
+
199
+ assert not sp.issparse(dense_vector_transform)
200
+ assert sp.issparse(sparse_vector_transform)
201
+
202
+ assert_allclose(dense_vector_transform, sparse_vector_transform.toarray())
203
+
204
+ dense_inverse_transform = dense_vectorizer.inverse_transform(dense_vector_transform)
205
+ sparse_inverse_transform = sparse_vectorizer.inverse_transform(
206
+ sparse_vector_transform
207
+ )
208
+
209
+ expected_inverse = [{"category=thriller": 1.0}]
210
+ assert dense_inverse_transform == expected_inverse
211
+ assert sparse_inverse_transform == expected_inverse
212
+
213
+
214
+ def test_dict_vectorizer_unsupported_value_type():
215
+ """Check that we raise an error when the value associated to a feature
216
+ is not supported.
217
+
218
+ Non-regression test for:
219
+ https://github.com/scikit-learn/scikit-learn/issues/19489
220
+ """
221
+
222
+ class A:
223
+ pass
224
+
225
+ vectorizer = DictVectorizer(sparse=True)
226
+ X = [{"foo": A()}]
227
+ err_msg = "Unsupported value Type"
228
+ with pytest.raises(TypeError, match=err_msg):
229
+ vectorizer.fit_transform(X)
230
+
231
+
232
+ def test_dict_vectorizer_get_feature_names_out():
233
+ """Check that integer feature names are converted to strings in
234
+ feature_names_out."""
235
+
236
+ X = [{1: 2, 3: 4}, {2: 4}]
237
+ dv = DictVectorizer(sparse=False).fit(X)
238
+
239
+ feature_names = dv.get_feature_names_out()
240
+ assert isinstance(feature_names, np.ndarray)
241
+ assert feature_names.dtype == object
242
+ assert_array_equal(feature_names, ["1", "2", "3"])
243
+
244
+
245
+ @pytest.mark.parametrize(
246
+ "method, input",
247
+ [
248
+ ("transform", [{1: 2, 3: 4}, {2: 4}]),
249
+ ("inverse_transform", [{1: 2, 3: 4}, {2: 4}]),
250
+ ("restrict", [True, False, True]),
251
+ ],
252
+ )
253
+ def test_dict_vectorizer_not_fitted_error(method, input):
254
+ """Check that unfitted DictVectorizer instance raises NotFittedError.
255
+
256
+ This should be part of the common test but currently they test estimator accepting
257
+ text input.
258
+ """
259
+ dv = DictVectorizer(sparse=False)
260
+
261
+ with pytest.raises(NotFittedError):
262
+ getattr(dv, method)(input)
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_array_equal
4
+
5
+ from sklearn.feature_extraction import FeatureHasher
6
+ from sklearn.feature_extraction._hashing_fast import transform as _hashing_transform
7
+
8
+
9
+ def test_feature_hasher_dicts():
10
+ feature_hasher = FeatureHasher(n_features=16)
11
+ assert "dict" == feature_hasher.input_type
12
+
13
+ raw_X = [{"foo": "bar", "dada": 42, "tzara": 37}, {"foo": "baz", "gaga": "string1"}]
14
+ X1 = FeatureHasher(n_features=16).transform(raw_X)
15
+ gen = (iter(d.items()) for d in raw_X)
16
+ X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
17
+ assert_array_equal(X1.toarray(), X2.toarray())
18
+
19
+
20
+ def test_feature_hasher_strings():
21
+ # mix byte and Unicode strings; note that "foo" is a duplicate in row 0
22
+ raw_X = [
23
+ ["foo", "bar", "baz", "foo".encode("ascii")],
24
+ ["bar".encode("ascii"), "baz", "quux"],
25
+ ]
26
+
27
+ for lg_n_features in (7, 9, 11, 16, 22):
28
+ n_features = 2**lg_n_features
29
+
30
+ it = (x for x in raw_X) # iterable
31
+
32
+ feature_hasher = FeatureHasher(
33
+ n_features=n_features, input_type="string", alternate_sign=False
34
+ )
35
+ X = feature_hasher.transform(it)
36
+
37
+ assert X.shape[0] == len(raw_X)
38
+ assert X.shape[1] == n_features
39
+
40
+ assert X[0].sum() == 4
41
+ assert X[1].sum() == 3
42
+
43
+ assert X.nnz == 6
44
+
45
+
46
+ @pytest.mark.parametrize(
47
+ "raw_X",
48
+ [
49
+ ["my_string", "another_string"],
50
+ (x for x in ["my_string", "another_string"]),
51
+ ],
52
+ ids=["list", "generator"],
53
+ )
54
+ def test_feature_hasher_single_string(raw_X):
55
+ """FeatureHasher raises error when a sample is a single string.
56
+
57
+ Non-regression test for gh-13199.
58
+ """
59
+ msg = "Samples can not be a single string"
60
+
61
+ feature_hasher = FeatureHasher(n_features=10, input_type="string")
62
+ with pytest.raises(ValueError, match=msg):
63
+ feature_hasher.transform(raw_X)
64
+
65
+
66
+ def test_hashing_transform_seed():
67
+ # check the influence of the seed when computing the hashes
68
+ raw_X = [
69
+ ["foo", "bar", "baz", "foo".encode("ascii")],
70
+ ["bar".encode("ascii"), "baz", "quux"],
71
+ ]
72
+
73
+ raw_X_ = (((f, 1) for f in x) for x in raw_X)
74
+ indices, indptr, _ = _hashing_transform(raw_X_, 2**7, str, False)
75
+
76
+ raw_X_ = (((f, 1) for f in x) for x in raw_X)
77
+ indices_0, indptr_0, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=0)
78
+ assert_array_equal(indices, indices_0)
79
+ assert_array_equal(indptr, indptr_0)
80
+
81
+ raw_X_ = (((f, 1) for f in x) for x in raw_X)
82
+ indices_1, _, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=1)
83
+ with pytest.raises(AssertionError):
84
+ assert_array_equal(indices, indices_1)
85
+
86
+
87
+ def test_feature_hasher_pairs():
88
+ raw_X = (
89
+ iter(d.items())
90
+ for d in [{"foo": 1, "bar": 2}, {"baz": 3, "quux": 4, "foo": -1}]
91
+ )
92
+ feature_hasher = FeatureHasher(n_features=16, input_type="pair")
93
+ x1, x2 = feature_hasher.transform(raw_X).toarray()
94
+ x1_nz = sorted(np.abs(x1[x1 != 0]))
95
+ x2_nz = sorted(np.abs(x2[x2 != 0]))
96
+ assert [1, 2] == x1_nz
97
+ assert [1, 3, 4] == x2_nz
98
+
99
+
100
+ def test_feature_hasher_pairs_with_string_values():
101
+ raw_X = (
102
+ iter(d.items())
103
+ for d in [{"foo": 1, "bar": "a"}, {"baz": "abc", "quux": 4, "foo": -1}]
104
+ )
105
+ feature_hasher = FeatureHasher(n_features=16, input_type="pair")
106
+ x1, x2 = feature_hasher.transform(raw_X).toarray()
107
+ x1_nz = sorted(np.abs(x1[x1 != 0]))
108
+ x2_nz = sorted(np.abs(x2[x2 != 0]))
109
+ assert [1, 1] == x1_nz
110
+ assert [1, 1, 4] == x2_nz
111
+
112
+ raw_X = (iter(d.items()) for d in [{"bax": "abc"}, {"bax": "abc"}])
113
+ x1, x2 = feature_hasher.transform(raw_X).toarray()
114
+ x1_nz = np.abs(x1[x1 != 0])
115
+ x2_nz = np.abs(x2[x2 != 0])
116
+ assert [1] == x1_nz
117
+ assert [1] == x2_nz
118
+ assert_array_equal(x1, x2)
119
+
120
+
121
+ def test_hash_empty_input():
122
+ n_features = 16
123
+ raw_X = [[], (), iter(range(0))]
124
+
125
+ feature_hasher = FeatureHasher(n_features=n_features, input_type="string")
126
+ X = feature_hasher.transform(raw_X)
127
+
128
+ assert_array_equal(X.toarray(), np.zeros((len(raw_X), n_features)))
129
+
130
+
131
+ def test_hasher_zeros():
132
+ # Assert that no zeros are materialized in the output.
133
+ X = FeatureHasher().transform([{"foo": 0}])
134
+ assert X.data.shape == (0,)
135
+
136
+
137
+ def test_hasher_alternate_sign():
138
+ X = [list("Thequickbrownfoxjumped")]
139
+
140
+ Xt = FeatureHasher(alternate_sign=True, input_type="string").fit_transform(X)
141
+ assert Xt.data.min() < 0 and Xt.data.max() > 0
142
+
143
+ Xt = FeatureHasher(alternate_sign=False, input_type="string").fit_transform(X)
144
+ assert Xt.data.min() > 0
145
+
146
+
147
+ def test_hash_collisions():
148
+ X = [list("Thequickbrownfoxjumped")]
149
+
150
+ Xt = FeatureHasher(
151
+ alternate_sign=True, n_features=1, input_type="string"
152
+ ).fit_transform(X)
153
+ # check that some of the hashed tokens are added
154
+ # with an opposite sign and cancel out
155
+ assert abs(Xt.data[0]) < len(X[0])
156
+
157
+ Xt = FeatureHasher(
158
+ alternate_sign=False, n_features=1, input_type="string"
159
+ ).fit_transform(X)
160
+ assert Xt.data[0] == len(X[0])
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_image.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Emmanuelle Gouillart <[email protected]>
2
+ # Gael Varoquaux <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import numpy as np
6
+ import pytest
7
+ from scipy import ndimage
8
+ from scipy.sparse.csgraph import connected_components
9
+
10
+ from sklearn.feature_extraction.image import (
11
+ PatchExtractor,
12
+ _extract_patches,
13
+ extract_patches_2d,
14
+ grid_to_graph,
15
+ img_to_graph,
16
+ reconstruct_from_patches_2d,
17
+ )
18
+
19
+
20
+ def test_img_to_graph():
21
+ x, y = np.mgrid[:4, :4] - 10
22
+ grad_x = img_to_graph(x)
23
+ grad_y = img_to_graph(y)
24
+ assert grad_x.nnz == grad_y.nnz
25
+ # Negative elements are the diagonal: the elements of the original
26
+ # image. Positive elements are the values of the gradient, they
27
+ # should all be equal on grad_x and grad_y
28
+ np.testing.assert_array_equal(
29
+ grad_x.data[grad_x.data > 0], grad_y.data[grad_y.data > 0]
30
+ )
31
+
32
+
33
+ def test_img_to_graph_sparse():
34
+ # Check that the edges are in the right position
35
+ # when using a sparse image with a singleton component
36
+ mask = np.zeros((2, 3), dtype=bool)
37
+ mask[0, 0] = 1
38
+ mask[:, 2] = 1
39
+ x = np.zeros((2, 3))
40
+ x[0, 0] = 1
41
+ x[0, 2] = -1
42
+ x[1, 2] = -2
43
+ grad_x = img_to_graph(x, mask=mask).todense()
44
+ desired = np.array([[1, 0, 0], [0, -1, 1], [0, 1, -2]])
45
+ np.testing.assert_array_equal(grad_x, desired)
46
+
47
+
48
+ def test_grid_to_graph():
49
+ # Checking that the function works with graphs containing no edges
50
+ size = 2
51
+ roi_size = 1
52
+ # Generating two convex parts with one vertex
53
+ # Thus, edges will be empty in _to_graph
54
+ mask = np.zeros((size, size), dtype=bool)
55
+ mask[0:roi_size, 0:roi_size] = True
56
+ mask[-roi_size:, -roi_size:] = True
57
+ mask = mask.reshape(size**2)
58
+ A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
59
+ assert connected_components(A)[0] == 2
60
+
61
+ # check ordering
62
+ mask = np.zeros((2, 3), dtype=bool)
63
+ mask[0, 0] = 1
64
+ mask[:, 2] = 1
65
+ graph = grid_to_graph(2, 3, 1, mask=mask.ravel()).todense()
66
+ desired = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
67
+ np.testing.assert_array_equal(graph, desired)
68
+
69
+ # Checking that the function works whatever the type of mask is
70
+ mask = np.ones((size, size), dtype=np.int16)
71
+ A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
72
+ assert connected_components(A)[0] == 1
73
+
74
+ # Checking dtype of the graph
75
+ mask = np.ones((size, size))
76
+ A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=bool)
77
+ assert A.dtype == bool
78
+ A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=int)
79
+ assert A.dtype == int
80
+ A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64)
81
+ assert A.dtype == np.float64
82
+
83
+
84
+ def test_connect_regions(raccoon_face_fxt):
85
+ face = raccoon_face_fxt
86
+ # subsample by 4 to reduce run time
87
+ face = face[::4, ::4]
88
+ for thr in (50, 150):
89
+ mask = face > thr
90
+ graph = img_to_graph(face, mask=mask)
91
+ assert ndimage.label(mask)[1] == connected_components(graph)[0]
92
+
93
+
94
+ def test_connect_regions_with_grid(raccoon_face_fxt):
95
+ face = raccoon_face_fxt
96
+
97
+ # subsample by 4 to reduce run time
98
+ face = face[::4, ::4]
99
+
100
+ mask = face > 50
101
+ graph = grid_to_graph(*face.shape, mask=mask)
102
+ assert ndimage.label(mask)[1] == connected_components(graph)[0]
103
+
104
+ mask = face > 150
105
+ graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
106
+ assert ndimage.label(mask)[1] == connected_components(graph)[0]
107
+
108
+
109
+ @pytest.fixture
110
+ def downsampled_face(raccoon_face_fxt):
111
+ face = raccoon_face_fxt
112
+ face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
113
+ face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
114
+ face = face.astype(np.float32)
115
+ face /= 16.0
116
+ return face
117
+
118
+
119
+ @pytest.fixture
120
+ def orange_face(downsampled_face):
121
+ face = downsampled_face
122
+ face_color = np.zeros(face.shape + (3,))
123
+ face_color[:, :, 0] = 256 - face
124
+ face_color[:, :, 1] = 256 - face / 2
125
+ face_color[:, :, 2] = 256 - face / 4
126
+ return face_color
127
+
128
+
129
+ def _make_images(face):
130
+ # make a collection of faces
131
+ images = np.zeros((3,) + face.shape)
132
+ images[0] = face
133
+ images[1] = face + 1
134
+ images[2] = face + 2
135
+ return images
136
+
137
+
138
+ @pytest.fixture
139
+ def downsampled_face_collection(downsampled_face):
140
+ return _make_images(downsampled_face)
141
+
142
+
143
+ def test_extract_patches_all(downsampled_face):
144
+ face = downsampled_face
145
+ i_h, i_w = face.shape
146
+ p_h, p_w = 16, 16
147
+ expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
148
+ patches = extract_patches_2d(face, (p_h, p_w))
149
+ assert patches.shape == (expected_n_patches, p_h, p_w)
150
+
151
+
152
+ def test_extract_patches_all_color(orange_face):
153
+ face = orange_face
154
+ i_h, i_w = face.shape[:2]
155
+ p_h, p_w = 16, 16
156
+ expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
157
+ patches = extract_patches_2d(face, (p_h, p_w))
158
+ assert patches.shape == (expected_n_patches, p_h, p_w, 3)
159
+
160
+
161
+ def test_extract_patches_all_rect(downsampled_face):
162
+ face = downsampled_face
163
+ face = face[:, 32:97]
164
+ i_h, i_w = face.shape
165
+ p_h, p_w = 16, 12
166
+ expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
167
+
168
+ patches = extract_patches_2d(face, (p_h, p_w))
169
+ assert patches.shape == (expected_n_patches, p_h, p_w)
170
+
171
+
172
+ def test_extract_patches_max_patches(downsampled_face):
173
+ face = downsampled_face
174
+ i_h, i_w = face.shape
175
+ p_h, p_w = 16, 16
176
+
177
+ patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
178
+ assert patches.shape == (100, p_h, p_w)
179
+
180
+ expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
181
+ patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
182
+ assert patches.shape == (expected_n_patches, p_h, p_w)
183
+
184
+ with pytest.raises(ValueError):
185
+ extract_patches_2d(face, (p_h, p_w), max_patches=2.0)
186
+ with pytest.raises(ValueError):
187
+ extract_patches_2d(face, (p_h, p_w), max_patches=-1.0)
188
+
189
+
190
+ def test_extract_patch_same_size_image(downsampled_face):
191
+ face = downsampled_face
192
+ # Request patches of the same size as image
193
+ # Should return just the single patch a.k.a. the image
194
+ patches = extract_patches_2d(face, face.shape, max_patches=2)
195
+ assert patches.shape[0] == 1
196
+
197
+
198
+ def test_extract_patches_less_than_max_patches(downsampled_face):
199
+ face = downsampled_face
200
+ i_h, i_w = face.shape
201
+ p_h, p_w = 3 * i_h // 4, 3 * i_w // 4
202
+ # this is 3185
203
+ expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
204
+
205
+ patches = extract_patches_2d(face, (p_h, p_w), max_patches=4000)
206
+ assert patches.shape == (expected_n_patches, p_h, p_w)
207
+
208
+
209
+ def test_reconstruct_patches_perfect(downsampled_face):
210
+ face = downsampled_face
211
+ p_h, p_w = 16, 16
212
+
213
+ patches = extract_patches_2d(face, (p_h, p_w))
214
+ face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
215
+ np.testing.assert_array_almost_equal(face, face_reconstructed)
216
+
217
+
218
+ def test_reconstruct_patches_perfect_color(orange_face):
219
+ face = orange_face
220
+ p_h, p_w = 16, 16
221
+
222
+ patches = extract_patches_2d(face, (p_h, p_w))
223
+ face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
224
+ np.testing.assert_array_almost_equal(face, face_reconstructed)
225
+
226
+
227
+ def test_patch_extractor_fit(downsampled_face_collection):
228
+ faces = downsampled_face_collection
229
+ extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
230
+ assert extr == extr.fit(faces)
231
+
232
+
233
+ def test_patch_extractor_max_patches(downsampled_face_collection):
234
+ faces = downsampled_face_collection
235
+ i_h, i_w = faces.shape[1:3]
236
+ p_h, p_w = 8, 8
237
+
238
+ max_patches = 100
239
+ expected_n_patches = len(faces) * max_patches
240
+ extr = PatchExtractor(
241
+ patch_size=(p_h, p_w), max_patches=max_patches, random_state=0
242
+ )
243
+ patches = extr.transform(faces)
244
+ assert patches.shape == (expected_n_patches, p_h, p_w)
245
+
246
+ max_patches = 0.5
247
+ expected_n_patches = len(faces) * int(
248
+ (i_h - p_h + 1) * (i_w - p_w + 1) * max_patches
249
+ )
250
+ extr = PatchExtractor(
251
+ patch_size=(p_h, p_w), max_patches=max_patches, random_state=0
252
+ )
253
+ patches = extr.transform(faces)
254
+ assert patches.shape == (expected_n_patches, p_h, p_w)
255
+
256
+
257
+ def test_patch_extractor_max_patches_default(downsampled_face_collection):
258
+ faces = downsampled_face_collection
259
+ extr = PatchExtractor(max_patches=100, random_state=0)
260
+ patches = extr.transform(faces)
261
+ assert patches.shape == (len(faces) * 100, 19, 25)
262
+
263
+
264
+ def test_patch_extractor_all_patches(downsampled_face_collection):
265
+ faces = downsampled_face_collection
266
+ i_h, i_w = faces.shape[1:3]
267
+ p_h, p_w = 8, 8
268
+ expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
269
+ extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
270
+ patches = extr.transform(faces)
271
+ assert patches.shape == (expected_n_patches, p_h, p_w)
272
+
273
+
274
+ def test_patch_extractor_color(orange_face):
275
+ faces = _make_images(orange_face)
276
+ i_h, i_w = faces.shape[1:3]
277
+ p_h, p_w = 8, 8
278
+ expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
279
+ extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
280
+ patches = extr.transform(faces)
281
+ assert patches.shape == (expected_n_patches, p_h, p_w, 3)
282
+
283
+
284
+ def test_extract_patches_strided():
285
+ image_shapes_1D = [(10,), (10,), (11,), (10,)]
286
+ patch_sizes_1D = [(1,), (2,), (3,), (8,)]
287
+ patch_steps_1D = [(1,), (1,), (4,), (2,)]
288
+
289
+ expected_views_1D = [(10,), (9,), (3,), (2,)]
290
+ last_patch_1D = [(10,), (8,), (8,), (2,)]
291
+
292
+ image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
293
+ patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
294
+ patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
295
+
296
+ expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
297
+ last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
298
+
299
+ image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
300
+ patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
301
+ patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
302
+
303
+ expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
304
+ last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
305
+
306
+ image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
307
+ patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
308
+ patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
309
+ expected_views = expected_views_1D + expected_views_2D + expected_views_3D
310
+ last_patches = last_patch_1D + last_patch_2D + last_patch_3D
311
+
312
+ for image_shape, patch_size, patch_step, expected_view, last_patch in zip(
313
+ image_shapes, patch_sizes, patch_steps, expected_views, last_patches
314
+ ):
315
+ image = np.arange(np.prod(image_shape)).reshape(image_shape)
316
+ patches = _extract_patches(
317
+ image, patch_shape=patch_size, extraction_step=patch_step
318
+ )
319
+
320
+ ndim = len(image_shape)
321
+
322
+ assert patches.shape[:ndim] == expected_view
323
+ last_patch_slices = tuple(
324
+ slice(i, i + j, None) for i, j in zip(last_patch, patch_size)
325
+ )
326
+ assert (
327
+ patches[(-1, None, None) * ndim] == image[last_patch_slices].squeeze()
328
+ ).all()
329
+
330
+
331
+ def test_extract_patches_square(downsampled_face):
332
+ # test same patch size for all dimensions
333
+ face = downsampled_face
334
+ i_h, i_w = face.shape
335
+ p = 8
336
+ expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
337
+ patches = _extract_patches(face, patch_shape=p)
338
+ assert patches.shape == (expected_n_patches[0], expected_n_patches[1], p, p)
339
+
340
+
341
+ def test_width_patch():
342
+ # width and height of the patch should be less than the image
343
+ x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
344
+ with pytest.raises(ValueError):
345
+ extract_patches_2d(x, (4, 1))
346
+ with pytest.raises(ValueError):
347
+ extract_patches_2d(x, (1, 4))
348
+
349
+
350
+ def test_patch_extractor_wrong_input(orange_face):
351
+ """Check that an informative error is raised if the patch_size is not valid."""
352
+ faces = _make_images(orange_face)
353
+ err_msg = "patch_size must be a tuple of two integers"
354
+ extractor = PatchExtractor(patch_size=(8, 8, 8))
355
+ with pytest.raises(ValueError, match=err_msg):
356
+ extractor.transform(faces)
venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py ADDED
@@ -0,0 +1,1655 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import re
3
+ import warnings
4
+ from collections import defaultdict
5
+ from collections.abc import Mapping
6
+ from functools import partial
7
+ from io import StringIO
8
+ from itertools import product
9
+
10
+ import numpy as np
11
+ import pytest
12
+ from numpy.testing import assert_array_almost_equal, assert_array_equal
13
+ from scipy import sparse
14
+
15
+ from sklearn.base import clone
16
+ from sklearn.feature_extraction.text import (
17
+ ENGLISH_STOP_WORDS,
18
+ CountVectorizer,
19
+ HashingVectorizer,
20
+ TfidfTransformer,
21
+ TfidfVectorizer,
22
+ strip_accents_ascii,
23
+ strip_accents_unicode,
24
+ strip_tags,
25
+ )
26
+ from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
27
+ from sklearn.pipeline import Pipeline
28
+ from sklearn.svm import LinearSVC
29
+ from sklearn.utils import _IS_WASM, IS_PYPY
30
+ from sklearn.utils._testing import (
31
+ assert_allclose_dense_sparse,
32
+ assert_almost_equal,
33
+ fails_if_pypy,
34
+ skip_if_32bit,
35
+ )
36
+ from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
37
+
38
+ JUNK_FOOD_DOCS = (
39
+ "the pizza pizza beer copyright",
40
+ "the pizza burger beer copyright",
41
+ "the the pizza beer beer copyright",
42
+ "the burger beer beer copyright",
43
+ "the coke burger coke copyright",
44
+ "the coke burger burger",
45
+ )
46
+
47
+ NOTJUNK_FOOD_DOCS = (
48
+ "the salad celeri copyright",
49
+ "the salad salad sparkling water copyright",
50
+ "the the celeri celeri copyright",
51
+ "the tomato tomato salad water",
52
+ "the tomato salad water copyright",
53
+ )
54
+
55
+ ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
56
+
57
+
58
+ def uppercase(s):
59
+ return strip_accents_unicode(s).upper()
60
+
61
+
62
+ def strip_eacute(s):
63
+ return s.replace("é", "e")
64
+
65
+
66
+ def split_tokenize(s):
67
+ return s.split()
68
+
69
+
70
+ def lazy_analyze(s):
71
+ return ["the_ultimate_feature"]
72
+
73
+
74
+ def test_strip_accents():
75
+ # check some classical latin accentuated symbols
76
+ a = "àáâãäåçèéêë"
77
+ expected = "aaaaaaceeee"
78
+ assert strip_accents_unicode(a) == expected
79
+
80
+ a = "ìíîïñòóôõöùúûüý"
81
+ expected = "iiiinooooouuuuy"
82
+ assert strip_accents_unicode(a) == expected
83
+
84
+ # check some arabic
85
+ a = "\u0625" # alef with a hamza below: إ
86
+ expected = "\u0627" # simple alef: ا
87
+ assert strip_accents_unicode(a) == expected
88
+
89
+ # mix letters accentuated and not
90
+ a = "this is à test"
91
+ expected = "this is a test"
92
+ assert strip_accents_unicode(a) == expected
93
+
94
+ # strings that are already decomposed
95
+ a = "o\u0308" # o with diaeresis
96
+ expected = "o"
97
+ assert strip_accents_unicode(a) == expected
98
+
99
+ # combining marks by themselves
100
+ a = "\u0300\u0301\u0302\u0303"
101
+ expected = ""
102
+ assert strip_accents_unicode(a) == expected
103
+
104
+ # Multiple combining marks on one character
105
+ a = "o\u0308\u0304"
106
+ expected = "o"
107
+ assert strip_accents_unicode(a) == expected
108
+
109
+
110
+ def test_to_ascii():
111
+ # check some classical latin accentuated symbols
112
+ a = "àáâãäåçèéêë"
113
+ expected = "aaaaaaceeee"
114
+ assert strip_accents_ascii(a) == expected
115
+
116
+ a = "ìíîïñòóôõöùúûüý"
117
+ expected = "iiiinooooouuuuy"
118
+ assert strip_accents_ascii(a) == expected
119
+
120
+ # check some arabic
121
+ a = "\u0625" # halef with a hamza below
122
+ expected = "" # halef has no direct ascii match
123
+ assert strip_accents_ascii(a) == expected
124
+
125
+ # mix letters accentuated and not
126
+ a = "this is à test"
127
+ expected = "this is a test"
128
+ assert strip_accents_ascii(a) == expected
129
+
130
+
131
+ @pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer))
132
+ def test_word_analyzer_unigrams(Vectorizer):
133
+ wa = Vectorizer(strip_accents="ascii").build_analyzer()
134
+ text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
135
+ expected = [
136
+ "ai",
137
+ "mange",
138
+ "du",
139
+ "kangourou",
140
+ "ce",
141
+ "midi",
142
+ "etait",
143
+ "pas",
144
+ "tres",
145
+ "bon",
146
+ ]
147
+ assert wa(text) == expected
148
+
149
+ text = "This is a test, really.\n\n I met Harry yesterday."
150
+ expected = ["this", "is", "test", "really", "met", "harry", "yesterday"]
151
+ assert wa(text) == expected
152
+
153
+ wa = Vectorizer(input="file").build_analyzer()
154
+ text = StringIO("This is a test with a file-like object!")
155
+ expected = ["this", "is", "test", "with", "file", "like", "object"]
156
+ assert wa(text) == expected
157
+
158
+ # with custom preprocessor
159
+ wa = Vectorizer(preprocessor=uppercase).build_analyzer()
160
+ text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
161
+ expected = [
162
+ "AI",
163
+ "MANGE",
164
+ "DU",
165
+ "KANGOUROU",
166
+ "CE",
167
+ "MIDI",
168
+ "ETAIT",
169
+ "PAS",
170
+ "TRES",
171
+ "BON",
172
+ ]
173
+ assert wa(text) == expected
174
+
175
+ # with custom tokenizer
176
+ wa = Vectorizer(tokenizer=split_tokenize, strip_accents="ascii").build_analyzer()
177
+ text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
178
+ expected = [
179
+ "j'ai",
180
+ "mange",
181
+ "du",
182
+ "kangourou",
183
+ "ce",
184
+ "midi,",
185
+ "c'etait",
186
+ "pas",
187
+ "tres",
188
+ "bon.",
189
+ ]
190
+ assert wa(text) == expected
191
+
192
+
193
+ def test_word_analyzer_unigrams_and_bigrams():
194
+ wa = CountVectorizer(
195
+ analyzer="word", strip_accents="unicode", ngram_range=(1, 2)
196
+ ).build_analyzer()
197
+
198
+ text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
199
+ expected = [
200
+ "ai",
201
+ "mange",
202
+ "du",
203
+ "kangourou",
204
+ "ce",
205
+ "midi",
206
+ "etait",
207
+ "pas",
208
+ "tres",
209
+ "bon",
210
+ "ai mange",
211
+ "mange du",
212
+ "du kangourou",
213
+ "kangourou ce",
214
+ "ce midi",
215
+ "midi etait",
216
+ "etait pas",
217
+ "pas tres",
218
+ "tres bon",
219
+ ]
220
+ assert wa(text) == expected
221
+
222
+
223
+ def test_unicode_decode_error():
224
+ # decode_error default to strict, so this should fail
225
+ # First, encode (as bytes) a unicode string.
226
+ text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
227
+ text_bytes = text.encode("utf-8")
228
+
229
+ # Then let the Analyzer try to decode it as ascii. It should fail,
230
+ # because we have given it an incorrect encoding.
231
+ wa = CountVectorizer(ngram_range=(1, 2), encoding="ascii").build_analyzer()
232
+ with pytest.raises(UnicodeDecodeError):
233
+ wa(text_bytes)
234
+
235
+ ca = CountVectorizer(
236
+ analyzer="char", ngram_range=(3, 6), encoding="ascii"
237
+ ).build_analyzer()
238
+ with pytest.raises(UnicodeDecodeError):
239
+ ca(text_bytes)
240
+
241
+
242
+ def test_char_ngram_analyzer():
243
+ cnga = CountVectorizer(
244
+ analyzer="char", strip_accents="unicode", ngram_range=(3, 6)
245
+ ).build_analyzer()
246
+
247
+ text = "J'ai mangé du kangourou ce midi, c'était pas très bon"
248
+ expected = ["j'a", "'ai", "ai ", "i m", " ma"]
249
+ assert cnga(text)[:5] == expected
250
+ expected = ["s tres", " tres ", "tres b", "res bo", "es bon"]
251
+ assert cnga(text)[-5:] == expected
252
+
253
+ text = "This \n\tis a test, really.\n\n I met Harry yesterday"
254
+ expected = ["thi", "his", "is ", "s i", " is"]
255
+ assert cnga(text)[:5] == expected
256
+
257
+ expected = [" yeste", "yester", "esterd", "sterda", "terday"]
258
+ assert cnga(text)[-5:] == expected
259
+
260
+ cnga = CountVectorizer(
261
+ input="file", analyzer="char", ngram_range=(3, 6)
262
+ ).build_analyzer()
263
+ text = StringIO("This is a test with a file-like object!")
264
+ expected = ["thi", "his", "is ", "s i", " is"]
265
+ assert cnga(text)[:5] == expected
266
+
267
+
268
+ def test_char_wb_ngram_analyzer():
269
+ cnga = CountVectorizer(
270
+ analyzer="char_wb", strip_accents="unicode", ngram_range=(3, 6)
271
+ ).build_analyzer()
272
+
273
+ text = "This \n\tis a test, really.\n\n I met Harry yesterday"
274
+ expected = [" th", "thi", "his", "is ", " thi"]
275
+ assert cnga(text)[:5] == expected
276
+
277
+ expected = ["yester", "esterd", "sterda", "terday", "erday "]
278
+ assert cnga(text)[-5:] == expected
279
+
280
+ cnga = CountVectorizer(
281
+ input="file", analyzer="char_wb", ngram_range=(3, 6)
282
+ ).build_analyzer()
283
+ text = StringIO("A test with a file-like object!")
284
+ expected = [" a ", " te", "tes", "est", "st ", " tes"]
285
+ assert cnga(text)[:6] == expected
286
+
287
+
288
+ def test_word_ngram_analyzer():
289
+ cnga = CountVectorizer(
290
+ analyzer="word", strip_accents="unicode", ngram_range=(3, 6)
291
+ ).build_analyzer()
292
+
293
+ text = "This \n\tis a test, really.\n\n I met Harry yesterday"
294
+ expected = ["this is test", "is test really", "test really met"]
295
+ assert cnga(text)[:3] == expected
296
+
297
+ expected = [
298
+ "test really met harry yesterday",
299
+ "this is test really met harry",
300
+ "is test really met harry yesterday",
301
+ ]
302
+ assert cnga(text)[-3:] == expected
303
+
304
+ cnga_file = CountVectorizer(
305
+ input="file", analyzer="word", ngram_range=(3, 6)
306
+ ).build_analyzer()
307
+ file = StringIO(text)
308
+ assert cnga_file(file) == cnga(text)
309
+
310
+
311
+ def test_countvectorizer_custom_vocabulary():
312
+ vocab = {"pizza": 0, "beer": 1}
313
+ terms = set(vocab.keys())
314
+
315
+ # Try a few of the supported types.
316
+ for typ in [dict, list, iter, partial(defaultdict, int)]:
317
+ v = typ(vocab)
318
+ vect = CountVectorizer(vocabulary=v)
319
+ vect.fit(JUNK_FOOD_DOCS)
320
+ if isinstance(v, Mapping):
321
+ assert vect.vocabulary_ == vocab
322
+ else:
323
+ assert set(vect.vocabulary_) == terms
324
+ X = vect.transform(JUNK_FOOD_DOCS)
325
+ assert X.shape[1] == len(terms)
326
+ v = typ(vocab)
327
+ vect = CountVectorizer(vocabulary=v)
328
+ inv = vect.inverse_transform(X)
329
+ assert len(inv) == X.shape[0]
330
+
331
+
332
+ def test_countvectorizer_custom_vocabulary_pipeline():
333
+ what_we_like = ["pizza", "beer"]
334
+ pipe = Pipeline(
335
+ [
336
+ ("count", CountVectorizer(vocabulary=what_we_like)),
337
+ ("tfidf", TfidfTransformer()),
338
+ ]
339
+ )
340
+ X = pipe.fit_transform(ALL_FOOD_DOCS)
341
+ assert set(pipe.named_steps["count"].vocabulary_) == set(what_we_like)
342
+ assert X.shape[1] == len(what_we_like)
343
+
344
+
345
+ def test_countvectorizer_custom_vocabulary_repeated_indices():
346
+ vocab = {"pizza": 0, "beer": 0}
347
+ msg = "Vocabulary contains repeated indices"
348
+ with pytest.raises(ValueError, match=msg):
349
+ vect = CountVectorizer(vocabulary=vocab)
350
+ vect.fit(["pasta_siziliana"])
351
+
352
+
353
+ def test_countvectorizer_custom_vocabulary_gap_index():
354
+ vocab = {"pizza": 1, "beer": 2}
355
+ with pytest.raises(ValueError, match="doesn't contain index"):
356
+ vect = CountVectorizer(vocabulary=vocab)
357
+ vect.fit(["pasta_verdura"])
358
+
359
+
360
+ def test_countvectorizer_stop_words():
361
+ cv = CountVectorizer()
362
+ cv.set_params(stop_words="english")
363
+ assert cv.get_stop_words() == ENGLISH_STOP_WORDS
364
+ cv.set_params(stop_words="_bad_str_stop_")
365
+ with pytest.raises(ValueError):
366
+ cv.get_stop_words()
367
+ cv.set_params(stop_words="_bad_unicode_stop_")
368
+ with pytest.raises(ValueError):
369
+ cv.get_stop_words()
370
+ stoplist = ["some", "other", "words"]
371
+ cv.set_params(stop_words=stoplist)
372
+ assert cv.get_stop_words() == set(stoplist)
373
+
374
+
375
+ def test_countvectorizer_empty_vocabulary():
376
+ with pytest.raises(ValueError, match="empty vocabulary"):
377
+ vect = CountVectorizer(vocabulary=[])
378
+ vect.fit(["foo"])
379
+
380
+ with pytest.raises(ValueError, match="empty vocabulary"):
381
+ v = CountVectorizer(max_df=1.0, stop_words="english")
382
+ # fit on stopwords only
383
+ v.fit(["to be or not to be", "and me too", "and so do you"])
384
+
385
+
386
+ def test_fit_countvectorizer_twice():
387
+ cv = CountVectorizer()
388
+ X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
389
+ X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
390
+ assert X1.shape[1] != X2.shape[1]
391
+
392
+
393
+ def test_countvectorizer_custom_token_pattern():
394
+ """Check `get_feature_names_out()` when a custom token pattern is passed.
395
+ Non-regression test for:
396
+ https://github.com/scikit-learn/scikit-learn/issues/12971
397
+ """
398
+ corpus = [
399
+ "This is the 1st document in my corpus.",
400
+ "This document is the 2nd sample.",
401
+ "And this is the 3rd one.",
402
+ "Is this the 4th document?",
403
+ ]
404
+ token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b"
405
+ vectorizer = CountVectorizer(token_pattern=token_pattern)
406
+ vectorizer.fit_transform(corpus)
407
+ expected = ["document", "one", "sample"]
408
+ feature_names_out = vectorizer.get_feature_names_out()
409
+ assert_array_equal(feature_names_out, expected)
410
+
411
+
412
+ def test_countvectorizer_custom_token_pattern_with_several_group():
413
+ """Check that we raise an error if token pattern capture several groups.
414
+ Non-regression test for:
415
+ https://github.com/scikit-learn/scikit-learn/issues/12971
416
+ """
417
+ corpus = [
418
+ "This is the 1st document in my corpus.",
419
+ "This document is the 2nd sample.",
420
+ "And this is the 3rd one.",
421
+ "Is this the 4th document?",
422
+ ]
423
+
424
+ token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b"
425
+ err_msg = "More than 1 capturing group in token pattern"
426
+ vectorizer = CountVectorizer(token_pattern=token_pattern)
427
+ with pytest.raises(ValueError, match=err_msg):
428
+ vectorizer.fit(corpus)
429
+
430
+
431
+ def test_countvectorizer_uppercase_in_vocab():
432
+ # Check that the check for uppercase in the provided vocabulary is only done at fit
433
+ # time and not at transform time (#21251)
434
+ vocabulary = ["Sample", "Upper", "Case", "Vocabulary"]
435
+ message = (
436
+ "Upper case characters found in"
437
+ " vocabulary while 'lowercase'"
438
+ " is True. These entries will not"
439
+ " be matched with any documents"
440
+ )
441
+
442
+ vectorizer = CountVectorizer(lowercase=True, vocabulary=vocabulary)
443
+
444
+ with pytest.warns(UserWarning, match=message):
445
+ vectorizer.fit(vocabulary)
446
+
447
+ with warnings.catch_warnings():
448
+ warnings.simplefilter("error", UserWarning)
449
+ vectorizer.transform(vocabulary)
450
+
451
+
452
+ def test_tf_transformer_feature_names_out():
453
+ """Check get_feature_names_out for TfidfTransformer"""
454
+ X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
455
+ tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X)
456
+
457
+ feature_names_in = ["a", "c", "b"]
458
+ feature_names_out = tr.get_feature_names_out(feature_names_in)
459
+ assert_array_equal(feature_names_in, feature_names_out)
460
+
461
+
462
+ def test_tf_idf_smoothing():
463
+ X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
464
+ tr = TfidfTransformer(smooth_idf=True, norm="l2")
465
+ tfidf = tr.fit_transform(X).toarray()
466
+ assert (tfidf >= 0).all()
467
+
468
+ # check normalization
469
+ assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0])
470
+
471
+ # this is robust to features with only zeros
472
+ X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
473
+ tr = TfidfTransformer(smooth_idf=True, norm="l2")
474
+ tfidf = tr.fit_transform(X).toarray()
475
+ assert (tfidf >= 0).all()
476
+
477
+
478
+ @pytest.mark.xfail(
479
+ _IS_WASM,
480
+ reason=(
481
+ "no floating point exceptions, see"
482
+ " https://github.com/numpy/numpy/pull/21895#issuecomment-1311525881"
483
+ ),
484
+ )
485
+ def test_tfidf_no_smoothing():
486
+ X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
487
+ tr = TfidfTransformer(smooth_idf=False, norm="l2")
488
+ tfidf = tr.fit_transform(X).toarray()
489
+ assert (tfidf >= 0).all()
490
+
491
+ # check normalization
492
+ assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0])
493
+
494
+ # the lack of smoothing make IDF fragile in the presence of feature with
495
+ # only zeros
496
+ X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
497
+ tr = TfidfTransformer(smooth_idf=False, norm="l2")
498
+
499
+ in_warning_message = "divide by zero"
500
+ with pytest.warns(RuntimeWarning, match=in_warning_message):
501
+ tr.fit_transform(X).toarray()
502
+
503
+
504
+ def test_sublinear_tf():
505
+ X = [[1], [2], [3]]
506
+ tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
507
+ tfidf = tr.fit_transform(X).toarray()
508
+ assert tfidf[0] == 1
509
+ assert tfidf[1] > tfidf[0]
510
+ assert tfidf[2] > tfidf[1]
511
+ assert tfidf[1] < 2
512
+ assert tfidf[2] < 3
513
+
514
+
515
+ def test_vectorizer():
516
+ # raw documents as an iterator
517
+ train_data = iter(ALL_FOOD_DOCS[:-1])
518
+ test_data = [ALL_FOOD_DOCS[-1]]
519
+ n_train = len(ALL_FOOD_DOCS) - 1
520
+
521
+ # test without vocabulary
522
+ v1 = CountVectorizer(max_df=0.5)
523
+ counts_train = v1.fit_transform(train_data)
524
+ if hasattr(counts_train, "tocsr"):
525
+ counts_train = counts_train.tocsr()
526
+ assert counts_train[0, v1.vocabulary_["pizza"]] == 2
527
+
528
+ # build a vectorizer v1 with the same vocabulary as the one fitted by v1
529
+ v2 = CountVectorizer(vocabulary=v1.vocabulary_)
530
+
531
+ # compare that the two vectorizer give the same output on the test sample
532
+ for v in (v1, v2):
533
+ counts_test = v.transform(test_data)
534
+ if hasattr(counts_test, "tocsr"):
535
+ counts_test = counts_test.tocsr()
536
+
537
+ vocabulary = v.vocabulary_
538
+ assert counts_test[0, vocabulary["salad"]] == 1
539
+ assert counts_test[0, vocabulary["tomato"]] == 1
540
+ assert counts_test[0, vocabulary["water"]] == 1
541
+
542
+ # stop word from the fixed list
543
+ assert "the" not in vocabulary
544
+
545
+ # stop word found automatically by the vectorizer DF thresholding
546
+ # words that are high frequent across the complete corpus are likely
547
+ # to be not informative (either real stop words of extraction
548
+ # artifacts)
549
+ assert "copyright" not in vocabulary
550
+
551
+ # not present in the sample
552
+ assert counts_test[0, vocabulary["coke"]] == 0
553
+ assert counts_test[0, vocabulary["burger"]] == 0
554
+ assert counts_test[0, vocabulary["beer"]] == 0
555
+ assert counts_test[0, vocabulary["pizza"]] == 0
556
+
557
+ # test tf-idf
558
+ t1 = TfidfTransformer(norm="l1")
559
+ tfidf = t1.fit(counts_train).transform(counts_train).toarray()
560
+ assert len(t1.idf_) == len(v1.vocabulary_)
561
+ assert tfidf.shape == (n_train, len(v1.vocabulary_))
562
+
563
+ # test tf-idf with new data
564
+ tfidf_test = t1.transform(counts_test).toarray()
565
+ assert tfidf_test.shape == (len(test_data), len(v1.vocabulary_))
566
+
567
+ # test tf alone
568
+ t2 = TfidfTransformer(norm="l1", use_idf=False)
569
+ tf = t2.fit(counts_train).transform(counts_train).toarray()
570
+ assert not hasattr(t2, "idf_")
571
+
572
+ # test idf transform with unlearned idf vector
573
+ t3 = TfidfTransformer(use_idf=True)
574
+ with pytest.raises(ValueError):
575
+ t3.transform(counts_train)
576
+
577
+ # L1-normalized term frequencies sum to one
578
+ assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
579
+
580
+ # test the direct tfidf vectorizer
581
+ # (equivalent to term count vectorizer + tfidf transformer)
582
+ train_data = iter(ALL_FOOD_DOCS[:-1])
583
+ tv = TfidfVectorizer(norm="l1")
584
+
585
+ tv.max_df = v1.max_df
586
+ tfidf2 = tv.fit_transform(train_data).toarray()
587
+ assert not tv.fixed_vocabulary_
588
+ assert_array_almost_equal(tfidf, tfidf2)
589
+
590
+ # test the direct tfidf vectorizer with new data
591
+ tfidf_test2 = tv.transform(test_data).toarray()
592
+ assert_array_almost_equal(tfidf_test, tfidf_test2)
593
+
594
+ # test transform on unfitted vectorizer with empty vocabulary
595
+ v3 = CountVectorizer(vocabulary=None)
596
+ with pytest.raises(ValueError):
597
+ v3.transform(train_data)
598
+
599
+ # ascii preprocessor?
600
+ v3.set_params(strip_accents="ascii", lowercase=False)
601
+ processor = v3.build_preprocessor()
602
+ text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
603
+ expected = strip_accents_ascii(text)
604
+ result = processor(text)
605
+ assert expected == result
606
+
607
+ # error on bad strip_accents param
608
+ v3.set_params(strip_accents="_gabbledegook_", preprocessor=None)
609
+ with pytest.raises(ValueError):
610
+ v3.build_preprocessor()
611
+
612
+ # error with bad analyzer type
613
+ v3.set_params = "_invalid_analyzer_type_"
614
+ with pytest.raises(ValueError):
615
+ v3.build_analyzer()
616
+
617
+
618
+ def test_tfidf_vectorizer_setters():
619
+ norm, use_idf, smooth_idf, sublinear_tf = "l2", False, False, False
620
+ tv = TfidfVectorizer(
621
+ norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf
622
+ )
623
+ tv.fit(JUNK_FOOD_DOCS)
624
+ assert tv._tfidf.norm == norm
625
+ assert tv._tfidf.use_idf == use_idf
626
+ assert tv._tfidf.smooth_idf == smooth_idf
627
+ assert tv._tfidf.sublinear_tf == sublinear_tf
628
+
629
+ # assigning value to `TfidfTransformer` should not have any effect until
630
+ # fitting
631
+ tv.norm = "l1"
632
+ tv.use_idf = True
633
+ tv.smooth_idf = True
634
+ tv.sublinear_tf = True
635
+ assert tv._tfidf.norm == norm
636
+ assert tv._tfidf.use_idf == use_idf
637
+ assert tv._tfidf.smooth_idf == smooth_idf
638
+ assert tv._tfidf.sublinear_tf == sublinear_tf
639
+
640
+ tv.fit(JUNK_FOOD_DOCS)
641
+ assert tv._tfidf.norm == tv.norm
642
+ assert tv._tfidf.use_idf == tv.use_idf
643
+ assert tv._tfidf.smooth_idf == tv.smooth_idf
644
+ assert tv._tfidf.sublinear_tf == tv.sublinear_tf
645
+
646
+
647
+ @fails_if_pypy
648
+ def test_hashing_vectorizer():
649
+ v = HashingVectorizer()
650
+ X = v.transform(ALL_FOOD_DOCS)
651
+ token_nnz = X.nnz
652
+ assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
653
+ assert X.dtype == v.dtype
654
+
655
+ # By default the hashed values receive a random sign and l2 normalization
656
+ # makes the feature values bounded
657
+ assert np.min(X.data) > -1
658
+ assert np.min(X.data) < 0
659
+ assert np.max(X.data) > 0
660
+ assert np.max(X.data) < 1
661
+
662
+ # Check that the rows are normalized
663
+ for i in range(X.shape[0]):
664
+ assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
665
+
666
+ # Check vectorization with some non-default parameters
667
+ v = HashingVectorizer(ngram_range=(1, 2), norm="l1")
668
+ X = v.transform(ALL_FOOD_DOCS)
669
+ assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
670
+ assert X.dtype == v.dtype
671
+
672
+ # ngrams generate more non zeros
673
+ ngrams_nnz = X.nnz
674
+ assert ngrams_nnz > token_nnz
675
+ assert ngrams_nnz < 2 * token_nnz
676
+
677
+ # makes the feature values bounded
678
+ assert np.min(X.data) > -1
679
+ assert np.max(X.data) < 1
680
+
681
+ # Check that the rows are normalized
682
+ for i in range(X.shape[0]):
683
+ assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
684
+
685
+
686
+ def test_feature_names():
687
+ cv = CountVectorizer(max_df=0.5)
688
+
689
+ # test for Value error on unfitted/empty vocabulary
690
+ with pytest.raises(ValueError):
691
+ cv.get_feature_names_out()
692
+ assert not cv.fixed_vocabulary_
693
+
694
+ # test for vocabulary learned from data
695
+ X = cv.fit_transform(ALL_FOOD_DOCS)
696
+ n_samples, n_features = X.shape
697
+ assert len(cv.vocabulary_) == n_features
698
+
699
+ feature_names = cv.get_feature_names_out()
700
+ assert isinstance(feature_names, np.ndarray)
701
+ assert feature_names.dtype == object
702
+
703
+ assert len(feature_names) == n_features
704
+ assert_array_equal(
705
+ [
706
+ "beer",
707
+ "burger",
708
+ "celeri",
709
+ "coke",
710
+ "pizza",
711
+ "salad",
712
+ "sparkling",
713
+ "tomato",
714
+ "water",
715
+ ],
716
+ feature_names,
717
+ )
718
+
719
+ for idx, name in enumerate(feature_names):
720
+ assert idx == cv.vocabulary_.get(name)
721
+
722
+ # test for custom vocabulary
723
+ vocab = [
724
+ "beer",
725
+ "burger",
726
+ "celeri",
727
+ "coke",
728
+ "pizza",
729
+ "salad",
730
+ "sparkling",
731
+ "tomato",
732
+ "water",
733
+ ]
734
+
735
+ cv = CountVectorizer(vocabulary=vocab)
736
+ feature_names = cv.get_feature_names_out()
737
+ assert_array_equal(
738
+ [
739
+ "beer",
740
+ "burger",
741
+ "celeri",
742
+ "coke",
743
+ "pizza",
744
+ "salad",
745
+ "sparkling",
746
+ "tomato",
747
+ "water",
748
+ ],
749
+ feature_names,
750
+ )
751
+ assert cv.fixed_vocabulary_
752
+
753
+ for idx, name in enumerate(feature_names):
754
+ assert idx == cv.vocabulary_.get(name)
755
+
756
+
757
+ @pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
758
+ def test_vectorizer_max_features(Vectorizer):
759
+ expected_vocabulary = {"burger", "beer", "salad", "pizza"}
760
+ expected_stop_words = {
761
+ "celeri",
762
+ "tomato",
763
+ "copyright",
764
+ "coke",
765
+ "sparkling",
766
+ "water",
767
+ "the",
768
+ }
769
+
770
+ # test bounded number of extracted features
771
+ vectorizer = Vectorizer(max_df=0.6, max_features=4)
772
+ vectorizer.fit(ALL_FOOD_DOCS)
773
+ assert set(vectorizer.vocabulary_) == expected_vocabulary
774
+ assert vectorizer.stop_words_ == expected_stop_words
775
+
776
+
777
+ def test_count_vectorizer_max_features():
778
+ # Regression test: max_features didn't work correctly in 0.14.
779
+
780
+ cv_1 = CountVectorizer(max_features=1)
781
+ cv_3 = CountVectorizer(max_features=3)
782
+ cv_None = CountVectorizer(max_features=None)
783
+
784
+ counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
785
+ counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
786
+ counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
787
+
788
+ features_1 = cv_1.get_feature_names_out()
789
+ features_3 = cv_3.get_feature_names_out()
790
+ features_None = cv_None.get_feature_names_out()
791
+
792
+ # The most common feature is "the", with frequency 7.
793
+ assert 7 == counts_1.max()
794
+ assert 7 == counts_3.max()
795
+ assert 7 == counts_None.max()
796
+
797
+ # The most common feature should be the same
798
+ assert "the" == features_1[np.argmax(counts_1)]
799
+ assert "the" == features_3[np.argmax(counts_3)]
800
+ assert "the" == features_None[np.argmax(counts_None)]
801
+
802
+
803
+ def test_vectorizer_max_df():
804
+ test_data = ["abc", "dea", "eat"]
805
+ vect = CountVectorizer(analyzer="char", max_df=1.0)
806
+ vect.fit(test_data)
807
+ assert "a" in vect.vocabulary_.keys()
808
+ assert len(vect.vocabulary_.keys()) == 6
809
+ assert len(vect.stop_words_) == 0
810
+
811
+ vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
812
+ vect.fit(test_data)
813
+ assert "a" not in vect.vocabulary_.keys() # {ae} ignored
814
+ assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
815
+ assert "a" in vect.stop_words_
816
+ assert len(vect.stop_words_) == 2
817
+
818
+ vect.max_df = 1
819
+ vect.fit(test_data)
820
+ assert "a" not in vect.vocabulary_.keys() # {ae} ignored
821
+ assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
822
+ assert "a" in vect.stop_words_
823
+ assert len(vect.stop_words_) == 2
824
+
825
+
826
+ def test_vectorizer_min_df():
827
+ test_data = ["abc", "dea", "eat"]
828
+ vect = CountVectorizer(analyzer="char", min_df=1)
829
+ vect.fit(test_data)
830
+ assert "a" in vect.vocabulary_.keys()
831
+ assert len(vect.vocabulary_.keys()) == 6
832
+ assert len(vect.stop_words_) == 0
833
+
834
+ vect.min_df = 2
835
+ vect.fit(test_data)
836
+ assert "c" not in vect.vocabulary_.keys() # {bcdt} ignored
837
+ assert len(vect.vocabulary_.keys()) == 2 # {ae} remain
838
+ assert "c" in vect.stop_words_
839
+ assert len(vect.stop_words_) == 4
840
+
841
+ vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
842
+ vect.fit(test_data)
843
+ assert "c" not in vect.vocabulary_.keys() # {bcdet} ignored
844
+ assert len(vect.vocabulary_.keys()) == 1 # {a} remains
845
+ assert "c" in vect.stop_words_
846
+ assert len(vect.stop_words_) == 5
847
+
848
+
849
+ def test_count_binary_occurrences():
850
+ # by default multiple occurrences are counted as longs
851
+ test_data = ["aaabc", "abbde"]
852
+ vect = CountVectorizer(analyzer="char", max_df=1.0)
853
+ X = vect.fit_transform(test_data).toarray()
854
+ assert_array_equal(["a", "b", "c", "d", "e"], vect.get_feature_names_out())
855
+ assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X)
856
+
857
+ # using boolean features, we can fetch the binary occurrence info
858
+ # instead.
859
+ vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True)
860
+ X = vect.fit_transform(test_data).toarray()
861
+ assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X)
862
+
863
+ # check the ability to change the dtype
864
+ vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True, dtype=np.float32)
865
+ X_sparse = vect.fit_transform(test_data)
866
+ assert X_sparse.dtype == np.float32
867
+
868
+
869
+ @fails_if_pypy
870
+ def test_hashed_binary_occurrences():
871
+ # by default multiple occurrences are counted as longs
872
+ test_data = ["aaabc", "abbde"]
873
+ vect = HashingVectorizer(alternate_sign=False, analyzer="char", norm=None)
874
+ X = vect.transform(test_data)
875
+ assert np.max(X[0:1].data) == 3
876
+ assert np.max(X[1:2].data) == 2
877
+ assert X.dtype == np.float64
878
+
879
+ # using boolean features, we can fetch the binary occurrence info
880
+ # instead.
881
+ vect = HashingVectorizer(
882
+ analyzer="char", alternate_sign=False, binary=True, norm=None
883
+ )
884
+ X = vect.transform(test_data)
885
+ assert np.max(X.data) == 1
886
+ assert X.dtype == np.float64
887
+
888
+ # check the ability to change the dtype
889
+ vect = HashingVectorizer(
890
+ analyzer="char", alternate_sign=False, binary=True, norm=None, dtype=np.float64
891
+ )
892
+ X = vect.transform(test_data)
893
+ assert X.dtype == np.float64
894
+
895
+
896
+ @pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
897
+ def test_vectorizer_inverse_transform(Vectorizer):
898
+ # raw documents
899
+ data = ALL_FOOD_DOCS
900
+ vectorizer = Vectorizer()
901
+ transformed_data = vectorizer.fit_transform(data)
902
+ inversed_data = vectorizer.inverse_transform(transformed_data)
903
+ assert isinstance(inversed_data, list)
904
+
905
+ analyze = vectorizer.build_analyzer()
906
+ for doc, inversed_terms in zip(data, inversed_data):
907
+ terms = np.sort(np.unique(analyze(doc)))
908
+ inversed_terms = np.sort(np.unique(inversed_terms))
909
+ assert_array_equal(terms, inversed_terms)
910
+
911
+ assert sparse.issparse(transformed_data)
912
+ assert transformed_data.format == "csr"
913
+
914
+ # Test that inverse_transform also works with numpy arrays and
915
+ # scipy
916
+ transformed_data2 = transformed_data.toarray()
917
+ inversed_data2 = vectorizer.inverse_transform(transformed_data2)
918
+ for terms, terms2 in zip(inversed_data, inversed_data2):
919
+ assert_array_equal(np.sort(terms), np.sort(terms2))
920
+
921
+ # Check that inverse_transform also works on non CSR sparse data:
922
+ transformed_data3 = transformed_data.tocsc()
923
+ inversed_data3 = vectorizer.inverse_transform(transformed_data3)
924
+ for terms, terms3 in zip(inversed_data, inversed_data3):
925
+ assert_array_equal(np.sort(terms), np.sort(terms3))
926
+
927
+
928
+ def test_count_vectorizer_pipeline_grid_selection():
929
+ # raw documents
930
+ data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
931
+
932
+ # label junk food as -1, the others as +1
933
+ target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
934
+
935
+ # split the dataset for model development and final evaluation
936
+ train_data, test_data, target_train, target_test = train_test_split(
937
+ data, target, test_size=0.2, random_state=0
938
+ )
939
+
940
+ pipeline = Pipeline([("vect", CountVectorizer()), ("svc", LinearSVC(dual="auto"))])
941
+
942
+ parameters = {
943
+ "vect__ngram_range": [(1, 1), (1, 2)],
944
+ "svc__loss": ("hinge", "squared_hinge"),
945
+ }
946
+
947
+ # find the best parameters for both the feature extraction and the
948
+ # classifier
949
+ grid_search = GridSearchCV(pipeline, parameters, n_jobs=1, cv=3)
950
+
951
+ # Check that the best model found by grid search is 100% correct on the
952
+ # held out evaluation set.
953
+ pred = grid_search.fit(train_data, target_train).predict(test_data)
954
+ assert_array_equal(pred, target_test)
955
+
956
+ # on this toy dataset bigram representation which is used in the last of
957
+ # the grid_search is considered the best estimator since they all converge
958
+ # to 100% accuracy models
959
+ assert grid_search.best_score_ == 1.0
960
+ best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
961
+ assert best_vectorizer.ngram_range == (1, 1)
962
+
963
+
964
+ def test_vectorizer_pipeline_grid_selection():
965
+ # raw documents
966
+ data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
967
+
968
+ # label junk food as -1, the others as +1
969
+ target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
970
+
971
+ # split the dataset for model development and final evaluation
972
+ train_data, test_data, target_train, target_test = train_test_split(
973
+ data, target, test_size=0.1, random_state=0
974
+ )
975
+
976
+ pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))])
977
+
978
+ parameters = {
979
+ "vect__ngram_range": [(1, 1), (1, 2)],
980
+ "vect__norm": ("l1", "l2"),
981
+ "svc__loss": ("hinge", "squared_hinge"),
982
+ }
983
+
984
+ # find the best parameters for both the feature extraction and the
985
+ # classifier
986
+ grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
987
+
988
+ # Check that the best model found by grid search is 100% correct on the
989
+ # held out evaluation set.
990
+ pred = grid_search.fit(train_data, target_train).predict(test_data)
991
+ assert_array_equal(pred, target_test)
992
+
993
+ # on this toy dataset bigram representation which is used in the last of
994
+ # the grid_search is considered the best estimator since they all converge
995
+ # to 100% accuracy models
996
+ assert grid_search.best_score_ == 1.0
997
+ best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
998
+ assert best_vectorizer.ngram_range == (1, 1)
999
+ assert best_vectorizer.norm == "l2"
1000
+ assert not best_vectorizer.fixed_vocabulary_
1001
+
1002
+
1003
+ def test_vectorizer_pipeline_cross_validation():
1004
+ # raw documents
1005
+ data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
1006
+
1007
+ # label junk food as -1, the others as +1
1008
+ target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
1009
+
1010
+ pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))])
1011
+
1012
+ cv_scores = cross_val_score(pipeline, data, target, cv=3)
1013
+ assert_array_equal(cv_scores, [1.0, 1.0, 1.0])
1014
+
1015
+
1016
+ @fails_if_pypy
1017
+ def test_vectorizer_unicode():
1018
+ # tests that the count vectorizer works with cyrillic.
1019
+ document = (
1020
+ "Машинное обучение — обширный подраздел искусственного "
1021
+ "интеллекта, изучающий методы построения алгоритмов, "
1022
+ "способных обучаться."
1023
+ )
1024
+
1025
+ vect = CountVectorizer()
1026
+ X_counted = vect.fit_transform([document])
1027
+ assert X_counted.shape == (1, 12)
1028
+
1029
+ vect = HashingVectorizer(norm=None, alternate_sign=False)
1030
+ X_hashed = vect.transform([document])
1031
+ assert X_hashed.shape == (1, 2**20)
1032
+
1033
+ # No collisions on such a small dataset
1034
+ assert X_counted.nnz == X_hashed.nnz
1035
+
1036
+ # When norm is None and not alternate_sign, the tokens are counted up to
1037
+ # collisions
1038
+ assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
1039
+
1040
+
1041
+ def test_tfidf_vectorizer_with_fixed_vocabulary():
1042
+ # non regression smoke test for inheritance issues
1043
+ vocabulary = ["pizza", "celeri"]
1044
+ vect = TfidfVectorizer(vocabulary=vocabulary)
1045
+ X_1 = vect.fit_transform(ALL_FOOD_DOCS)
1046
+ X_2 = vect.transform(ALL_FOOD_DOCS)
1047
+ assert_array_almost_equal(X_1.toarray(), X_2.toarray())
1048
+ assert vect.fixed_vocabulary_
1049
+
1050
+
1051
+ def test_pickling_vectorizer():
1052
+ instances = [
1053
+ HashingVectorizer(),
1054
+ HashingVectorizer(norm="l1"),
1055
+ HashingVectorizer(binary=True),
1056
+ HashingVectorizer(ngram_range=(1, 2)),
1057
+ CountVectorizer(),
1058
+ CountVectorizer(preprocessor=strip_tags),
1059
+ CountVectorizer(analyzer=lazy_analyze),
1060
+ CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
1061
+ CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
1062
+ TfidfVectorizer(),
1063
+ TfidfVectorizer(analyzer=lazy_analyze),
1064
+ TfidfVectorizer().fit(JUNK_FOOD_DOCS),
1065
+ ]
1066
+
1067
+ for orig in instances:
1068
+ s = pickle.dumps(orig)
1069
+ copy = pickle.loads(s)
1070
+ assert type(copy) == orig.__class__
1071
+ assert copy.get_params() == orig.get_params()
1072
+ if IS_PYPY and isinstance(orig, HashingVectorizer):
1073
+ continue
1074
+ else:
1075
+ assert_allclose_dense_sparse(
1076
+ copy.fit_transform(JUNK_FOOD_DOCS),
1077
+ orig.fit_transform(JUNK_FOOD_DOCS),
1078
+ )
1079
+
1080
+
1081
+ @pytest.mark.parametrize(
1082
+ "factory",
1083
+ [
1084
+ CountVectorizer.build_analyzer,
1085
+ CountVectorizer.build_preprocessor,
1086
+ CountVectorizer.build_tokenizer,
1087
+ ],
1088
+ )
1089
+ def test_pickling_built_processors(factory):
1090
+ """Tokenizers cannot be pickled
1091
+ https://github.com/scikit-learn/scikit-learn/issues/12833
1092
+ """
1093
+ vec = CountVectorizer()
1094
+ function = factory(vec)
1095
+ text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
1096
+ roundtripped_function = pickle.loads(pickle.dumps(function))
1097
+ expected = function(text)
1098
+ result = roundtripped_function(text)
1099
+ assert result == expected
1100
+
1101
+
1102
+ def test_countvectorizer_vocab_sets_when_pickling():
1103
+ # ensure that vocabulary of type set is coerced to a list to
1104
+ # preserve iteration ordering after deserialization
1105
+ rng = np.random.RandomState(0)
1106
+ vocab_words = np.array(
1107
+ [
1108
+ "beer",
1109
+ "burger",
1110
+ "celeri",
1111
+ "coke",
1112
+ "pizza",
1113
+ "salad",
1114
+ "sparkling",
1115
+ "tomato",
1116
+ "water",
1117
+ ]
1118
+ )
1119
+ for x in range(0, 100):
1120
+ vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
1121
+ cv = CountVectorizer(vocabulary=vocab_set)
1122
+ unpickled_cv = pickle.loads(pickle.dumps(cv))
1123
+ cv.fit(ALL_FOOD_DOCS)
1124
+ unpickled_cv.fit(ALL_FOOD_DOCS)
1125
+ assert_array_equal(
1126
+ cv.get_feature_names_out(), unpickled_cv.get_feature_names_out()
1127
+ )
1128
+
1129
+
1130
+ def test_countvectorizer_vocab_dicts_when_pickling():
1131
+ rng = np.random.RandomState(0)
1132
+ vocab_words = np.array(
1133
+ [
1134
+ "beer",
1135
+ "burger",
1136
+ "celeri",
1137
+ "coke",
1138
+ "pizza",
1139
+ "salad",
1140
+ "sparkling",
1141
+ "tomato",
1142
+ "water",
1143
+ ]
1144
+ )
1145
+ for x in range(0, 100):
1146
+ vocab_dict = dict()
1147
+ words = rng.choice(vocab_words, size=5, replace=False)
1148
+ for y in range(0, 5):
1149
+ vocab_dict[words[y]] = y
1150
+ cv = CountVectorizer(vocabulary=vocab_dict)
1151
+ unpickled_cv = pickle.loads(pickle.dumps(cv))
1152
+ cv.fit(ALL_FOOD_DOCS)
1153
+ unpickled_cv.fit(ALL_FOOD_DOCS)
1154
+ assert_array_equal(
1155
+ cv.get_feature_names_out(), unpickled_cv.get_feature_names_out()
1156
+ )
1157
+
1158
+
1159
+ def test_stop_words_removal():
1160
+ # Ensure that deleting the stop_words_ attribute doesn't affect transform
1161
+
1162
+ fitted_vectorizers = (
1163
+ TfidfVectorizer().fit(JUNK_FOOD_DOCS),
1164
+ CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
1165
+ CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
1166
+ )
1167
+
1168
+ for vect in fitted_vectorizers:
1169
+ vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
1170
+
1171
+ vect.stop_words_ = None
1172
+ stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
1173
+
1174
+ delattr(vect, "stop_words_")
1175
+ stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
1176
+
1177
+ assert_array_equal(stop_None_transform, vect_transform)
1178
+ assert_array_equal(stop_del_transform, vect_transform)
1179
+
1180
+
1181
+ def test_pickling_transformer():
1182
+ X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
1183
+ orig = TfidfTransformer().fit(X)
1184
+ s = pickle.dumps(orig)
1185
+ copy = pickle.loads(s)
1186
+ assert type(copy) == orig.__class__
1187
+ assert_array_equal(copy.fit_transform(X).toarray(), orig.fit_transform(X).toarray())
1188
+
1189
+
1190
+ def test_transformer_idf_setter():
1191
+ X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
1192
+ orig = TfidfTransformer().fit(X)
1193
+ copy = TfidfTransformer()
1194
+ copy.idf_ = orig.idf_
1195
+ assert_array_equal(copy.transform(X).toarray(), orig.transform(X).toarray())
1196
+
1197
+
1198
+ def test_tfidf_vectorizer_setter():
1199
+ orig = TfidfVectorizer(use_idf=True)
1200
+ orig.fit(JUNK_FOOD_DOCS)
1201
+ copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True)
1202
+ copy.idf_ = orig.idf_
1203
+ assert_array_equal(
1204
+ copy.transform(JUNK_FOOD_DOCS).toarray(),
1205
+ orig.transform(JUNK_FOOD_DOCS).toarray(),
1206
+ )
1207
+ # `idf_` cannot be set with `use_idf=False`
1208
+ copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=False)
1209
+ err_msg = "`idf_` cannot be set when `user_idf=False`."
1210
+ with pytest.raises(ValueError, match=err_msg):
1211
+ copy.idf_ = orig.idf_
1212
+
1213
+
1214
+ def test_tfidfvectorizer_invalid_idf_attr():
1215
+ vect = TfidfVectorizer(use_idf=True)
1216
+ vect.fit(JUNK_FOOD_DOCS)
1217
+ copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True)
1218
+ expected_idf_len = len(vect.idf_)
1219
+ invalid_idf = [1.0] * (expected_idf_len + 1)
1220
+ with pytest.raises(ValueError):
1221
+ setattr(copy, "idf_", invalid_idf)
1222
+
1223
+
1224
+ def test_non_unique_vocab():
1225
+ vocab = ["a", "b", "c", "a", "a"]
1226
+ vect = CountVectorizer(vocabulary=vocab)
1227
+ with pytest.raises(ValueError):
1228
+ vect.fit([])
1229
+
1230
+
1231
+ @fails_if_pypy
1232
+ def test_hashingvectorizer_nan_in_docs():
1233
+ # np.nan can appear when using pandas to load text fields from a csv file
1234
+ # with missing values.
1235
+ message = "np.nan is an invalid document, expected byte or unicode string."
1236
+ exception = ValueError
1237
+
1238
+ def func():
1239
+ hv = HashingVectorizer()
1240
+ hv.fit_transform(["hello world", np.nan, "hello hello"])
1241
+
1242
+ with pytest.raises(exception, match=message):
1243
+ func()
1244
+
1245
+
1246
+ def test_tfidfvectorizer_binary():
1247
+ # Non-regression test: TfidfVectorizer used to ignore its "binary" param.
1248
+ v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
1249
+ assert v.binary
1250
+
1251
+ X = v.fit_transform(["hello world", "hello hello"]).toarray()
1252
+ assert_array_equal(X.ravel(), [1, 1, 1, 0])
1253
+ X2 = v.transform(["hello world", "hello hello"]).toarray()
1254
+ assert_array_equal(X2.ravel(), [1, 1, 1, 0])
1255
+
1256
+
1257
+ def test_tfidfvectorizer_export_idf():
1258
+ vect = TfidfVectorizer(use_idf=True)
1259
+ vect.fit(JUNK_FOOD_DOCS)
1260
+ assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
1261
+
1262
+
1263
+ def test_vectorizer_vocab_clone():
1264
+ vect_vocab = TfidfVectorizer(vocabulary=["the"])
1265
+ vect_vocab_clone = clone(vect_vocab)
1266
+ vect_vocab.fit(ALL_FOOD_DOCS)
1267
+ vect_vocab_clone.fit(ALL_FOOD_DOCS)
1268
+ assert vect_vocab_clone.vocabulary_ == vect_vocab.vocabulary_
1269
+
1270
+
1271
+ @pytest.mark.parametrize(
1272
+ "Vectorizer", (CountVectorizer, TfidfVectorizer, HashingVectorizer)
1273
+ )
1274
+ def test_vectorizer_string_object_as_input(Vectorizer):
1275
+ message = "Iterable over raw text documents expected, string object received."
1276
+ vec = Vectorizer()
1277
+
1278
+ with pytest.raises(ValueError, match=message):
1279
+ vec.fit_transform("hello world!")
1280
+
1281
+ with pytest.raises(ValueError, match=message):
1282
+ vec.fit("hello world!")
1283
+ vec.fit(["some text", "some other text"])
1284
+
1285
+ with pytest.raises(ValueError, match=message):
1286
+ vec.transform("hello world!")
1287
+
1288
+
1289
+ @pytest.mark.parametrize("X_dtype", [np.float32, np.float64])
1290
+ def test_tfidf_transformer_type(X_dtype):
1291
+ X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42)
1292
+ X_trans = TfidfTransformer().fit_transform(X)
1293
+ assert X_trans.dtype == X.dtype
1294
+
1295
+
1296
+ @pytest.mark.parametrize(
1297
+ "csc_container, csr_container", product(CSC_CONTAINERS, CSR_CONTAINERS)
1298
+ )
1299
+ def test_tfidf_transformer_sparse(csc_container, csr_container):
1300
+ X = sparse.rand(10, 20000, dtype=np.float64, random_state=42)
1301
+ X_csc = csc_container(X)
1302
+ X_csr = csr_container(X)
1303
+
1304
+ X_trans_csc = TfidfTransformer().fit_transform(X_csc)
1305
+ X_trans_csr = TfidfTransformer().fit_transform(X_csr)
1306
+ assert_allclose_dense_sparse(X_trans_csc, X_trans_csr)
1307
+ assert X_trans_csc.format == X_trans_csr.format
1308
+
1309
+
1310
+ @pytest.mark.parametrize(
1311
+ "vectorizer_dtype, output_dtype, warning_expected",
1312
+ [
1313
+ (np.int32, np.float64, True),
1314
+ (np.int64, np.float64, True),
1315
+ (np.float32, np.float32, False),
1316
+ (np.float64, np.float64, False),
1317
+ ],
1318
+ )
1319
+ def test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype, warning_expected):
1320
+ X = np.array(["numpy", "scipy", "sklearn"])
1321
+ vectorizer = TfidfVectorizer(dtype=vectorizer_dtype)
1322
+
1323
+ warning_msg_match = "'dtype' should be used."
1324
+ if warning_expected:
1325
+ with pytest.warns(UserWarning, match=warning_msg_match):
1326
+ X_idf = vectorizer.fit_transform(X)
1327
+ else:
1328
+ with warnings.catch_warnings():
1329
+ warnings.simplefilter("error", UserWarning)
1330
+ X_idf = vectorizer.fit_transform(X)
1331
+ assert X_idf.dtype == output_dtype
1332
+
1333
+
1334
+ @pytest.mark.parametrize(
1335
+ "vec",
1336
+ [
1337
+ HashingVectorizer(ngram_range=(2, 1)),
1338
+ CountVectorizer(ngram_range=(2, 1)),
1339
+ TfidfVectorizer(ngram_range=(2, 1)),
1340
+ ],
1341
+ )
1342
+ def test_vectorizers_invalid_ngram_range(vec):
1343
+ # vectorizers could be initialized with invalid ngram range
1344
+ # test for raising error message
1345
+ invalid_range = vec.ngram_range
1346
+ message = re.escape(
1347
+ f"Invalid value for ngram_range={invalid_range} "
1348
+ "lower boundary larger than the upper boundary."
1349
+ )
1350
+ if isinstance(vec, HashingVectorizer) and IS_PYPY:
1351
+ pytest.xfail(reason="HashingVectorizer is not supported on PyPy")
1352
+
1353
+ with pytest.raises(ValueError, match=message):
1354
+ vec.fit(["good news everyone"])
1355
+
1356
+ with pytest.raises(ValueError, match=message):
1357
+ vec.fit_transform(["good news everyone"])
1358
+
1359
+ if isinstance(vec, HashingVectorizer):
1360
+ with pytest.raises(ValueError, match=message):
1361
+ vec.transform(["good news everyone"])
1362
+
1363
+
1364
+ def _check_stop_words_consistency(estimator):
1365
+ stop_words = estimator.get_stop_words()
1366
+ tokenize = estimator.build_tokenizer()
1367
+ preprocess = estimator.build_preprocessor()
1368
+ return estimator._check_stop_words_consistency(stop_words, preprocess, tokenize)
1369
+
1370
+
1371
+ @fails_if_pypy
1372
+ def test_vectorizer_stop_words_inconsistent():
1373
+ lstr = r"\['and', 'll', 've'\]"
1374
+ message = (
1375
+ "Your stop_words may be inconsistent with your "
1376
+ "preprocessing. Tokenizing the stop words generated "
1377
+ "tokens %s not in stop_words." % lstr
1378
+ )
1379
+ for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
1380
+ vec.set_params(stop_words=["you've", "you", "you'll", "AND"])
1381
+ with pytest.warns(UserWarning, match=message):
1382
+ vec.fit_transform(["hello world"])
1383
+ # reset stop word validation
1384
+ del vec._stop_words_id
1385
+ assert _check_stop_words_consistency(vec) is False
1386
+
1387
+ # Only one warning per stop list
1388
+ with warnings.catch_warnings():
1389
+ warnings.simplefilter("error", UserWarning)
1390
+ vec.fit_transform(["hello world"])
1391
+ assert _check_stop_words_consistency(vec) is None
1392
+
1393
+ # Test caching of inconsistency assessment
1394
+ vec.set_params(stop_words=["you've", "you", "you'll", "blah", "AND"])
1395
+ with pytest.warns(UserWarning, match=message):
1396
+ vec.fit_transform(["hello world"])
1397
+
1398
+
1399
+ @skip_if_32bit
1400
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
1401
+ def test_countvectorizer_sort_features_64bit_sparse_indices(csr_container):
1402
+ """
1403
+ Check that CountVectorizer._sort_features preserves the dtype of its sparse
1404
+ feature matrix.
1405
+
1406
+ This test is skipped on 32bit platforms, see:
1407
+ https://github.com/scikit-learn/scikit-learn/pull/11295
1408
+ for more details.
1409
+ """
1410
+
1411
+ X = csr_container((5, 5), dtype=np.int64)
1412
+
1413
+ # force indices and indptr to int64.
1414
+ INDICES_DTYPE = np.int64
1415
+ X.indices = X.indices.astype(INDICES_DTYPE)
1416
+ X.indptr = X.indptr.astype(INDICES_DTYPE)
1417
+
1418
+ vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2}
1419
+
1420
+ Xs = CountVectorizer()._sort_features(X, vocabulary)
1421
+
1422
+ assert INDICES_DTYPE == Xs.indices.dtype
1423
+
1424
+
1425
+ @fails_if_pypy
1426
+ @pytest.mark.parametrize(
1427
+ "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
1428
+ )
1429
+ def test_stop_word_validation_custom_preprocessor(Estimator):
1430
+ data = [{"text": "some text"}]
1431
+
1432
+ vec = Estimator()
1433
+ assert _check_stop_words_consistency(vec) is True
1434
+
1435
+ vec = Estimator(preprocessor=lambda x: x["text"], stop_words=["and"])
1436
+ assert _check_stop_words_consistency(vec) == "error"
1437
+ # checks are cached
1438
+ assert _check_stop_words_consistency(vec) is None
1439
+ vec.fit_transform(data)
1440
+
1441
+ class CustomEstimator(Estimator):
1442
+ def build_preprocessor(self):
1443
+ return lambda x: x["text"]
1444
+
1445
+ vec = CustomEstimator(stop_words=["and"])
1446
+ assert _check_stop_words_consistency(vec) == "error"
1447
+
1448
+ vec = Estimator(
1449
+ tokenizer=lambda doc: re.compile(r"\w{1,}").findall(doc), stop_words=["and"]
1450
+ )
1451
+ assert _check_stop_words_consistency(vec) is True
1452
+
1453
+
1454
+ @pytest.mark.parametrize(
1455
+ "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
1456
+ )
1457
+ @pytest.mark.parametrize(
1458
+ "input_type, err_type, err_msg",
1459
+ [
1460
+ ("filename", FileNotFoundError, ""),
1461
+ ("file", AttributeError, "'str' object has no attribute 'read'"),
1462
+ ],
1463
+ )
1464
+ def test_callable_analyzer_error(Estimator, input_type, err_type, err_msg):
1465
+ if issubclass(Estimator, HashingVectorizer) and IS_PYPY:
1466
+ pytest.xfail("HashingVectorizer is not supported on PyPy")
1467
+ data = ["this is text, not file or filename"]
1468
+ with pytest.raises(err_type, match=err_msg):
1469
+ Estimator(analyzer=lambda x: x.split(), input=input_type).fit_transform(data)
1470
+
1471
+
1472
+ @pytest.mark.parametrize(
1473
+ "Estimator",
1474
+ [
1475
+ CountVectorizer,
1476
+ TfidfVectorizer,
1477
+ pytest.param(HashingVectorizer, marks=fails_if_pypy),
1478
+ ],
1479
+ )
1480
+ @pytest.mark.parametrize(
1481
+ "analyzer", [lambda doc: open(doc, "r"), lambda doc: doc.read()]
1482
+ )
1483
+ @pytest.mark.parametrize("input_type", ["file", "filename"])
1484
+ def test_callable_analyzer_change_behavior(Estimator, analyzer, input_type):
1485
+ data = ["this is text, not file or filename"]
1486
+ with pytest.raises((FileNotFoundError, AttributeError)):
1487
+ Estimator(analyzer=analyzer, input=input_type).fit_transform(data)
1488
+
1489
+
1490
+ @pytest.mark.parametrize(
1491
+ "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
1492
+ )
1493
+ def test_callable_analyzer_reraise_error(tmpdir, Estimator):
1494
+ # check if a custom exception from the analyzer is shown to the user
1495
+ def analyzer(doc):
1496
+ raise Exception("testing")
1497
+
1498
+ if issubclass(Estimator, HashingVectorizer) and IS_PYPY:
1499
+ pytest.xfail("HashingVectorizer is not supported on PyPy")
1500
+
1501
+ f = tmpdir.join("file.txt")
1502
+ f.write("sample content\n")
1503
+
1504
+ with pytest.raises(Exception, match="testing"):
1505
+ Estimator(analyzer=analyzer, input="file").fit_transform([f])
1506
+
1507
+
1508
+ @pytest.mark.parametrize(
1509
+ "Vectorizer", [CountVectorizer, HashingVectorizer, TfidfVectorizer]
1510
+ )
1511
+ @pytest.mark.parametrize(
1512
+ (
1513
+ "stop_words, tokenizer, preprocessor, ngram_range, token_pattern,"
1514
+ "analyzer, unused_name, ovrd_name, ovrd_msg"
1515
+ ),
1516
+ [
1517
+ (
1518
+ ["you've", "you'll"],
1519
+ None,
1520
+ None,
1521
+ (1, 1),
1522
+ None,
1523
+ "char",
1524
+ "'stop_words'",
1525
+ "'analyzer'",
1526
+ "!= 'word'",
1527
+ ),
1528
+ (
1529
+ None,
1530
+ lambda s: s.split(),
1531
+ None,
1532
+ (1, 1),
1533
+ None,
1534
+ "char",
1535
+ "'tokenizer'",
1536
+ "'analyzer'",
1537
+ "!= 'word'",
1538
+ ),
1539
+ (
1540
+ None,
1541
+ lambda s: s.split(),
1542
+ None,
1543
+ (1, 1),
1544
+ r"\w+",
1545
+ "word",
1546
+ "'token_pattern'",
1547
+ "'tokenizer'",
1548
+ "is not None",
1549
+ ),
1550
+ (
1551
+ None,
1552
+ None,
1553
+ lambda s: s.upper(),
1554
+ (1, 1),
1555
+ r"\w+",
1556
+ lambda s: s.upper(),
1557
+ "'preprocessor'",
1558
+ "'analyzer'",
1559
+ "is callable",
1560
+ ),
1561
+ (
1562
+ None,
1563
+ None,
1564
+ None,
1565
+ (1, 2),
1566
+ None,
1567
+ lambda s: s.upper(),
1568
+ "'ngram_range'",
1569
+ "'analyzer'",
1570
+ "is callable",
1571
+ ),
1572
+ (
1573
+ None,
1574
+ None,
1575
+ None,
1576
+ (1, 1),
1577
+ r"\w+",
1578
+ "char",
1579
+ "'token_pattern'",
1580
+ "'analyzer'",
1581
+ "!= 'word'",
1582
+ ),
1583
+ ],
1584
+ )
1585
+ def test_unused_parameters_warn(
1586
+ Vectorizer,
1587
+ stop_words,
1588
+ tokenizer,
1589
+ preprocessor,
1590
+ ngram_range,
1591
+ token_pattern,
1592
+ analyzer,
1593
+ unused_name,
1594
+ ovrd_name,
1595
+ ovrd_msg,
1596
+ ):
1597
+ train_data = JUNK_FOOD_DOCS
1598
+ # setting parameter and checking for corresponding warning messages
1599
+ vect = Vectorizer()
1600
+ vect.set_params(
1601
+ stop_words=stop_words,
1602
+ tokenizer=tokenizer,
1603
+ preprocessor=preprocessor,
1604
+ ngram_range=ngram_range,
1605
+ token_pattern=token_pattern,
1606
+ analyzer=analyzer,
1607
+ )
1608
+ msg = "The parameter %s will not be used since %s %s" % (
1609
+ unused_name,
1610
+ ovrd_name,
1611
+ ovrd_msg,
1612
+ )
1613
+ with pytest.warns(UserWarning, match=msg):
1614
+ vect.fit(train_data)
1615
+
1616
+
1617
+ @pytest.mark.parametrize(
1618
+ "Vectorizer, X",
1619
+ (
1620
+ (HashingVectorizer, [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]),
1621
+ (CountVectorizer, JUNK_FOOD_DOCS),
1622
+ ),
1623
+ )
1624
+ def test_n_features_in(Vectorizer, X):
1625
+ # For vectorizers, n_features_in_ does not make sense
1626
+ vectorizer = Vectorizer()
1627
+ assert not hasattr(vectorizer, "n_features_in_")
1628
+ vectorizer.fit(X)
1629
+ assert not hasattr(vectorizer, "n_features_in_")
1630
+
1631
+
1632
+ def test_tie_breaking_sample_order_invariance():
1633
+ # Checks the sample order invariance when setting max_features
1634
+ # non-regression test for #17939
1635
+ vec = CountVectorizer(max_features=1)
1636
+ vocab1 = vec.fit(["hello", "world"]).vocabulary_
1637
+ vocab2 = vec.fit(["world", "hello"]).vocabulary_
1638
+ assert vocab1 == vocab2
1639
+
1640
+
1641
+ @fails_if_pypy
1642
+ def test_nonnegative_hashing_vectorizer_result_indices():
1643
+ # add test for pr 19035
1644
+ hashing = HashingVectorizer(n_features=1000000, ngram_range=(2, 3))
1645
+ indices = hashing.transform(["22pcs efuture"]).indices
1646
+ assert indices[0] >= 0
1647
+
1648
+
1649
+ @pytest.mark.parametrize(
1650
+ "Estimator", [CountVectorizer, TfidfVectorizer, TfidfTransformer, HashingVectorizer]
1651
+ )
1652
+ def test_vectorizers_do_not_have_set_output(Estimator):
1653
+ """Check that vectorizers do not define set_output."""
1654
+ est = Estimator()
1655
+ assert not hasattr(est, "set_output")
venv/lib/python3.10/site-packages/sklearn/feature_extraction/text.py ADDED
@@ -0,0 +1,2166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Olivier Grisel <[email protected]>
2
+ # Mathieu Blondel <[email protected]>
3
+ # Lars Buitinck
4
+ # Robert Layton <[email protected]>
5
+ # Jochen Wersdörfer <[email protected]>
6
+ # Roman Sinayev <[email protected]>
7
+ #
8
+ # License: BSD 3 clause
9
+ """
10
+ The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
11
+ build feature vectors from text documents.
12
+ """
13
+
14
+ import array
15
+ import re
16
+ import unicodedata
17
+ import warnings
18
+ from collections import defaultdict
19
+ from collections.abc import Mapping
20
+ from functools import partial
21
+ from numbers import Integral
22
+ from operator import itemgetter
23
+
24
+ import numpy as np
25
+ import scipy.sparse as sp
26
+
27
+ from ..base import BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context
28
+ from ..exceptions import NotFittedError
29
+ from ..preprocessing import normalize
30
+ from ..utils import _IS_32BIT
31
+ from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions
32
+ from ..utils.validation import FLOAT_DTYPES, check_array, check_is_fitted
33
+ from ._hash import FeatureHasher
34
+ from ._stop_words import ENGLISH_STOP_WORDS
35
+
36
+ __all__ = [
37
+ "HashingVectorizer",
38
+ "CountVectorizer",
39
+ "ENGLISH_STOP_WORDS",
40
+ "TfidfTransformer",
41
+ "TfidfVectorizer",
42
+ "strip_accents_ascii",
43
+ "strip_accents_unicode",
44
+ "strip_tags",
45
+ ]
46
+
47
+
48
+ def _preprocess(doc, accent_function=None, lower=False):
49
+ """Chain together an optional series of text preprocessing steps to
50
+ apply to a document.
51
+
52
+ Parameters
53
+ ----------
54
+ doc: str
55
+ The string to preprocess
56
+ accent_function: callable, default=None
57
+ Function for handling accented characters. Common strategies include
58
+ normalizing and removing.
59
+ lower: bool, default=False
60
+ Whether to use str.lower to lowercase all of the text
61
+
62
+ Returns
63
+ -------
64
+ doc: str
65
+ preprocessed string
66
+ """
67
+ if lower:
68
+ doc = doc.lower()
69
+ if accent_function is not None:
70
+ doc = accent_function(doc)
71
+ return doc
72
+
73
+
74
+ def _analyze(
75
+ doc,
76
+ analyzer=None,
77
+ tokenizer=None,
78
+ ngrams=None,
79
+ preprocessor=None,
80
+ decoder=None,
81
+ stop_words=None,
82
+ ):
83
+ """Chain together an optional series of text processing steps to go from
84
+ a single document to ngrams, with or without tokenizing or preprocessing.
85
+
86
+ If analyzer is used, only the decoder argument is used, as the analyzer is
87
+ intended to replace the preprocessor, tokenizer, and ngrams steps.
88
+
89
+ Parameters
90
+ ----------
91
+ analyzer: callable, default=None
92
+ tokenizer: callable, default=None
93
+ ngrams: callable, default=None
94
+ preprocessor: callable, default=None
95
+ decoder: callable, default=None
96
+ stop_words: list, default=None
97
+
98
+ Returns
99
+ -------
100
+ ngrams: list
101
+ A sequence of tokens, possibly with pairs, triples, etc.
102
+ """
103
+
104
+ if decoder is not None:
105
+ doc = decoder(doc)
106
+ if analyzer is not None:
107
+ doc = analyzer(doc)
108
+ else:
109
+ if preprocessor is not None:
110
+ doc = preprocessor(doc)
111
+ if tokenizer is not None:
112
+ doc = tokenizer(doc)
113
+ if ngrams is not None:
114
+ if stop_words is not None:
115
+ doc = ngrams(doc, stop_words)
116
+ else:
117
+ doc = ngrams(doc)
118
+ return doc
119
+
120
+
121
+ def strip_accents_unicode(s):
122
+ """Transform accentuated unicode symbols into their simple counterpart.
123
+
124
+ Warning: the python-level loop and join operations make this
125
+ implementation 20 times slower than the strip_accents_ascii basic
126
+ normalization.
127
+
128
+ Parameters
129
+ ----------
130
+ s : str
131
+ The string to strip.
132
+
133
+ Returns
134
+ -------
135
+ s : str
136
+ The stripped string.
137
+
138
+ See Also
139
+ --------
140
+ strip_accents_ascii : Remove accentuated char for any unicode symbol that
141
+ has a direct ASCII equivalent.
142
+ """
143
+ try:
144
+ # If `s` is ASCII-compatible, then it does not contain any accented
145
+ # characters and we can avoid an expensive list comprehension
146
+ s.encode("ASCII", errors="strict")
147
+ return s
148
+ except UnicodeEncodeError:
149
+ normalized = unicodedata.normalize("NFKD", s)
150
+ return "".join([c for c in normalized if not unicodedata.combining(c)])
151
+
152
+
153
+ def strip_accents_ascii(s):
154
+ """Transform accentuated unicode symbols into ascii or nothing.
155
+
156
+ Warning: this solution is only suited for languages that have a direct
157
+ transliteration to ASCII symbols.
158
+
159
+ Parameters
160
+ ----------
161
+ s : str
162
+ The string to strip.
163
+
164
+ Returns
165
+ -------
166
+ s : str
167
+ The stripped string.
168
+
169
+ See Also
170
+ --------
171
+ strip_accents_unicode : Remove accentuated char for any unicode symbol.
172
+ """
173
+ nkfd_form = unicodedata.normalize("NFKD", s)
174
+ return nkfd_form.encode("ASCII", "ignore").decode("ASCII")
175
+
176
+
177
+ def strip_tags(s):
178
+ """Basic regexp based HTML / XML tag stripper function.
179
+
180
+ For serious HTML/XML preprocessing you should rather use an external
181
+ library such as lxml or BeautifulSoup.
182
+
183
+ Parameters
184
+ ----------
185
+ s : str
186
+ The string to strip.
187
+
188
+ Returns
189
+ -------
190
+ s : str
191
+ The stripped string.
192
+ """
193
+ return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
194
+
195
+
196
+ def _check_stop_list(stop):
197
+ if stop == "english":
198
+ return ENGLISH_STOP_WORDS
199
+ elif isinstance(stop, str):
200
+ raise ValueError("not a built-in stop list: %s" % stop)
201
+ elif stop is None:
202
+ return None
203
+ else: # assume it's a collection
204
+ return frozenset(stop)
205
+
206
+
207
+ class _VectorizerMixin:
208
+ """Provides common code for text vectorizers (tokenization logic)."""
209
+
210
+ _white_spaces = re.compile(r"\s\s+")
211
+
212
+ def decode(self, doc):
213
+ """Decode the input into a string of unicode symbols.
214
+
215
+ The decoding strategy depends on the vectorizer parameters.
216
+
217
+ Parameters
218
+ ----------
219
+ doc : bytes or str
220
+ The string to decode.
221
+
222
+ Returns
223
+ -------
224
+ doc: str
225
+ A string of unicode symbols.
226
+ """
227
+ if self.input == "filename":
228
+ with open(doc, "rb") as fh:
229
+ doc = fh.read()
230
+
231
+ elif self.input == "file":
232
+ doc = doc.read()
233
+
234
+ if isinstance(doc, bytes):
235
+ doc = doc.decode(self.encoding, self.decode_error)
236
+
237
+ if doc is np.nan:
238
+ raise ValueError(
239
+ "np.nan is an invalid document, expected byte or unicode string."
240
+ )
241
+
242
+ return doc
243
+
244
+ def _word_ngrams(self, tokens, stop_words=None):
245
+ """Turn tokens into a sequence of n-grams after stop words filtering"""
246
+ # handle stop words
247
+ if stop_words is not None:
248
+ tokens = [w for w in tokens if w not in stop_words]
249
+
250
+ # handle token n-grams
251
+ min_n, max_n = self.ngram_range
252
+ if max_n != 1:
253
+ original_tokens = tokens
254
+ if min_n == 1:
255
+ # no need to do any slicing for unigrams
256
+ # just iterate through the original tokens
257
+ tokens = list(original_tokens)
258
+ min_n += 1
259
+ else:
260
+ tokens = []
261
+
262
+ n_original_tokens = len(original_tokens)
263
+
264
+ # bind method outside of loop to reduce overhead
265
+ tokens_append = tokens.append
266
+ space_join = " ".join
267
+
268
+ for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):
269
+ for i in range(n_original_tokens - n + 1):
270
+ tokens_append(space_join(original_tokens[i : i + n]))
271
+
272
+ return tokens
273
+
274
+ def _char_ngrams(self, text_document):
275
+ """Tokenize text_document into a sequence of character n-grams"""
276
+ # normalize white spaces
277
+ text_document = self._white_spaces.sub(" ", text_document)
278
+
279
+ text_len = len(text_document)
280
+ min_n, max_n = self.ngram_range
281
+ if min_n == 1:
282
+ # no need to do any slicing for unigrams
283
+ # iterate through the string
284
+ ngrams = list(text_document)
285
+ min_n += 1
286
+ else:
287
+ ngrams = []
288
+
289
+ # bind method outside of loop to reduce overhead
290
+ ngrams_append = ngrams.append
291
+
292
+ for n in range(min_n, min(max_n + 1, text_len + 1)):
293
+ for i in range(text_len - n + 1):
294
+ ngrams_append(text_document[i : i + n])
295
+ return ngrams
296
+
297
+ def _char_wb_ngrams(self, text_document):
298
+ """Whitespace sensitive char-n-gram tokenization.
299
+
300
+ Tokenize text_document into a sequence of character n-grams
301
+ operating only inside word boundaries. n-grams at the edges
302
+ of words are padded with space."""
303
+ # normalize white spaces
304
+ text_document = self._white_spaces.sub(" ", text_document)
305
+
306
+ min_n, max_n = self.ngram_range
307
+ ngrams = []
308
+
309
+ # bind method outside of loop to reduce overhead
310
+ ngrams_append = ngrams.append
311
+
312
+ for w in text_document.split():
313
+ w = " " + w + " "
314
+ w_len = len(w)
315
+ for n in range(min_n, max_n + 1):
316
+ offset = 0
317
+ ngrams_append(w[offset : offset + n])
318
+ while offset + n < w_len:
319
+ offset += 1
320
+ ngrams_append(w[offset : offset + n])
321
+ if offset == 0: # count a short word (w_len < n) only once
322
+ break
323
+ return ngrams
324
+
325
+ def build_preprocessor(self):
326
+ """Return a function to preprocess the text before tokenization.
327
+
328
+ Returns
329
+ -------
330
+ preprocessor: callable
331
+ A function to preprocess the text before tokenization.
332
+ """
333
+ if self.preprocessor is not None:
334
+ return self.preprocessor
335
+
336
+ # accent stripping
337
+ if not self.strip_accents:
338
+ strip_accents = None
339
+ elif callable(self.strip_accents):
340
+ strip_accents = self.strip_accents
341
+ elif self.strip_accents == "ascii":
342
+ strip_accents = strip_accents_ascii
343
+ elif self.strip_accents == "unicode":
344
+ strip_accents = strip_accents_unicode
345
+ else:
346
+ raise ValueError(
347
+ 'Invalid value for "strip_accents": %s' % self.strip_accents
348
+ )
349
+
350
+ return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
351
+
352
+ def build_tokenizer(self):
353
+ """Return a function that splits a string into a sequence of tokens.
354
+
355
+ Returns
356
+ -------
357
+ tokenizer: callable
358
+ A function to split a string into a sequence of tokens.
359
+ """
360
+ if self.tokenizer is not None:
361
+ return self.tokenizer
362
+ token_pattern = re.compile(self.token_pattern)
363
+
364
+ if token_pattern.groups > 1:
365
+ raise ValueError(
366
+ "More than 1 capturing group in token pattern. Only a single "
367
+ "group should be captured."
368
+ )
369
+
370
+ return token_pattern.findall
371
+
372
+ def get_stop_words(self):
373
+ """Build or fetch the effective stop words list.
374
+
375
+ Returns
376
+ -------
377
+ stop_words: list or None
378
+ A list of stop words.
379
+ """
380
+ return _check_stop_list(self.stop_words)
381
+
382
+ def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
383
+ """Check if stop words are consistent
384
+
385
+ Returns
386
+ -------
387
+ is_consistent : True if stop words are consistent with the preprocessor
388
+ and tokenizer, False if they are not, None if the check
389
+ was previously performed, "error" if it could not be
390
+ performed (e.g. because of the use of a custom
391
+ preprocessor / tokenizer)
392
+ """
393
+ if id(self.stop_words) == getattr(self, "_stop_words_id", None):
394
+ # Stop words are were previously validated
395
+ return None
396
+
397
+ # NB: stop_words is validated, unlike self.stop_words
398
+ try:
399
+ inconsistent = set()
400
+ for w in stop_words or ():
401
+ tokens = list(tokenize(preprocess(w)))
402
+ for token in tokens:
403
+ if token not in stop_words:
404
+ inconsistent.add(token)
405
+ self._stop_words_id = id(self.stop_words)
406
+
407
+ if inconsistent:
408
+ warnings.warn(
409
+ "Your stop_words may be inconsistent with "
410
+ "your preprocessing. Tokenizing the stop "
411
+ "words generated tokens %r not in "
412
+ "stop_words."
413
+ % sorted(inconsistent)
414
+ )
415
+ return not inconsistent
416
+ except Exception:
417
+ # Failed to check stop words consistency (e.g. because a custom
418
+ # preprocessor or tokenizer was used)
419
+ self._stop_words_id = id(self.stop_words)
420
+ return "error"
421
+
422
+ def build_analyzer(self):
423
+ """Return a callable to process input data.
424
+
425
+ The callable handles preprocessing, tokenization, and n-grams generation.
426
+
427
+ Returns
428
+ -------
429
+ analyzer: callable
430
+ A function to handle preprocessing, tokenization
431
+ and n-grams generation.
432
+ """
433
+
434
+ if callable(self.analyzer):
435
+ return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
436
+
437
+ preprocess = self.build_preprocessor()
438
+
439
+ if self.analyzer == "char":
440
+ return partial(
441
+ _analyze,
442
+ ngrams=self._char_ngrams,
443
+ preprocessor=preprocess,
444
+ decoder=self.decode,
445
+ )
446
+
447
+ elif self.analyzer == "char_wb":
448
+ return partial(
449
+ _analyze,
450
+ ngrams=self._char_wb_ngrams,
451
+ preprocessor=preprocess,
452
+ decoder=self.decode,
453
+ )
454
+
455
+ elif self.analyzer == "word":
456
+ stop_words = self.get_stop_words()
457
+ tokenize = self.build_tokenizer()
458
+ self._check_stop_words_consistency(stop_words, preprocess, tokenize)
459
+ return partial(
460
+ _analyze,
461
+ ngrams=self._word_ngrams,
462
+ tokenizer=tokenize,
463
+ preprocessor=preprocess,
464
+ decoder=self.decode,
465
+ stop_words=stop_words,
466
+ )
467
+
468
+ else:
469
+ raise ValueError(
470
+ "%s is not a valid tokenization scheme/analyzer" % self.analyzer
471
+ )
472
+
473
+ def _validate_vocabulary(self):
474
+ vocabulary = self.vocabulary
475
+ if vocabulary is not None:
476
+ if isinstance(vocabulary, set):
477
+ vocabulary = sorted(vocabulary)
478
+ if not isinstance(vocabulary, Mapping):
479
+ vocab = {}
480
+ for i, t in enumerate(vocabulary):
481
+ if vocab.setdefault(t, i) != i:
482
+ msg = "Duplicate term in vocabulary: %r" % t
483
+ raise ValueError(msg)
484
+ vocabulary = vocab
485
+ else:
486
+ indices = set(vocabulary.values())
487
+ if len(indices) != len(vocabulary):
488
+ raise ValueError("Vocabulary contains repeated indices.")
489
+ for i in range(len(vocabulary)):
490
+ if i not in indices:
491
+ msg = "Vocabulary of size %d doesn't contain index %d." % (
492
+ len(vocabulary),
493
+ i,
494
+ )
495
+ raise ValueError(msg)
496
+ if not vocabulary:
497
+ raise ValueError("empty vocabulary passed to fit")
498
+ self.fixed_vocabulary_ = True
499
+ self.vocabulary_ = dict(vocabulary)
500
+ else:
501
+ self.fixed_vocabulary_ = False
502
+
503
+ def _check_vocabulary(self):
504
+ """Check if vocabulary is empty or missing (not fitted)"""
505
+ if not hasattr(self, "vocabulary_"):
506
+ self._validate_vocabulary()
507
+ if not self.fixed_vocabulary_:
508
+ raise NotFittedError("Vocabulary not fitted or provided")
509
+
510
+ if len(self.vocabulary_) == 0:
511
+ raise ValueError("Vocabulary is empty")
512
+
513
+ def _validate_ngram_range(self):
514
+ """Check validity of ngram_range parameter"""
515
+ min_n, max_m = self.ngram_range
516
+ if min_n > max_m:
517
+ raise ValueError(
518
+ "Invalid value for ngram_range=%s "
519
+ "lower boundary larger than the upper boundary."
520
+ % str(self.ngram_range)
521
+ )
522
+
523
+ def _warn_for_unused_params(self):
524
+ if self.tokenizer is not None and self.token_pattern is not None:
525
+ warnings.warn(
526
+ "The parameter 'token_pattern' will not be used"
527
+ " since 'tokenizer' is not None'"
528
+ )
529
+
530
+ if self.preprocessor is not None and callable(self.analyzer):
531
+ warnings.warn(
532
+ "The parameter 'preprocessor' will not be used"
533
+ " since 'analyzer' is callable'"
534
+ )
535
+
536
+ if (
537
+ self.ngram_range != (1, 1)
538
+ and self.ngram_range is not None
539
+ and callable(self.analyzer)
540
+ ):
541
+ warnings.warn(
542
+ "The parameter 'ngram_range' will not be used"
543
+ " since 'analyzer' is callable'"
544
+ )
545
+ if self.analyzer != "word" or callable(self.analyzer):
546
+ if self.stop_words is not None:
547
+ warnings.warn(
548
+ "The parameter 'stop_words' will not be used"
549
+ " since 'analyzer' != 'word'"
550
+ )
551
+ if (
552
+ self.token_pattern is not None
553
+ and self.token_pattern != r"(?u)\b\w\w+\b"
554
+ ):
555
+ warnings.warn(
556
+ "The parameter 'token_pattern' will not be used"
557
+ " since 'analyzer' != 'word'"
558
+ )
559
+ if self.tokenizer is not None:
560
+ warnings.warn(
561
+ "The parameter 'tokenizer' will not be used"
562
+ " since 'analyzer' != 'word'"
563
+ )
564
+
565
+
566
+ class HashingVectorizer(
567
+ TransformerMixin, _VectorizerMixin, BaseEstimator, auto_wrap_output_keys=None
568
+ ):
569
+ r"""Convert a collection of text documents to a matrix of token occurrences.
570
+
571
+ It turns a collection of text documents into a scipy.sparse matrix holding
572
+ token occurrence counts (or binary occurrence information), possibly
573
+ normalized as token frequencies if norm='l1' or projected on the euclidean
574
+ unit sphere if norm='l2'.
575
+
576
+ This text vectorizer implementation uses the hashing trick to find the
577
+ token string name to feature integer index mapping.
578
+
579
+ This strategy has several advantages:
580
+
581
+ - it is very low memory scalable to large datasets as there is no need to
582
+ store a vocabulary dictionary in memory.
583
+
584
+ - it is fast to pickle and un-pickle as it holds no state besides the
585
+ constructor parameters.
586
+
587
+ - it can be used in a streaming (partial fit) or parallel pipeline as there
588
+ is no state computed during fit.
589
+
590
+ There are also a couple of cons (vs using a CountVectorizer with an
591
+ in-memory vocabulary):
592
+
593
+ - there is no way to compute the inverse transform (from feature indices to
594
+ string feature names) which can be a problem when trying to introspect
595
+ which features are most important to a model.
596
+
597
+ - there can be collisions: distinct tokens can be mapped to the same
598
+ feature index. However in practice this is rarely an issue if n_features
599
+ is large enough (e.g. 2 ** 18 for text classification problems).
600
+
601
+ - no IDF weighting as this would render the transformer stateful.
602
+
603
+ The hash function employed is the signed 32-bit version of Murmurhash3.
604
+
605
+ For an efficiency comparison of the different feature extractors, see
606
+ :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
607
+
608
+ Read more in the :ref:`User Guide <text_feature_extraction>`.
609
+
610
+ Parameters
611
+ ----------
612
+ input : {'filename', 'file', 'content'}, default='content'
613
+ - If `'filename'`, the sequence passed as an argument to fit is
614
+ expected to be a list of filenames that need reading to fetch
615
+ the raw content to analyze.
616
+
617
+ - If `'file'`, the sequence items must have a 'read' method (file-like
618
+ object) that is called to fetch the bytes in memory.
619
+
620
+ - If `'content'`, the input is expected to be a sequence of items that
621
+ can be of type string or byte.
622
+
623
+ encoding : str, default='utf-8'
624
+ If bytes or files are given to analyze, this encoding is used to
625
+ decode.
626
+
627
+ decode_error : {'strict', 'ignore', 'replace'}, default='strict'
628
+ Instruction on what to do if a byte sequence is given to analyze that
629
+ contains characters not of the given `encoding`. By default, it is
630
+ 'strict', meaning that a UnicodeDecodeError will be raised. Other
631
+ values are 'ignore' and 'replace'.
632
+
633
+ strip_accents : {'ascii', 'unicode'} or callable, default=None
634
+ Remove accents and perform other character normalization
635
+ during the preprocessing step.
636
+ 'ascii' is a fast method that only works on characters that have
637
+ a direct ASCII mapping.
638
+ 'unicode' is a slightly slower method that works on any character.
639
+ None (default) means no character normalization is performed.
640
+
641
+ Both 'ascii' and 'unicode' use NFKD normalization from
642
+ :func:`unicodedata.normalize`.
643
+
644
+ lowercase : bool, default=True
645
+ Convert all characters to lowercase before tokenizing.
646
+
647
+ preprocessor : callable, default=None
648
+ Override the preprocessing (string transformation) stage while
649
+ preserving the tokenizing and n-grams generation steps.
650
+ Only applies if ``analyzer`` is not callable.
651
+
652
+ tokenizer : callable, default=None
653
+ Override the string tokenization step while preserving the
654
+ preprocessing and n-grams generation steps.
655
+ Only applies if ``analyzer == 'word'``.
656
+
657
+ stop_words : {'english'}, list, default=None
658
+ If 'english', a built-in stop word list for English is used.
659
+ There are several known issues with 'english' and you should
660
+ consider an alternative (see :ref:`stop_words`).
661
+
662
+ If a list, that list is assumed to contain stop words, all of which
663
+ will be removed from the resulting tokens.
664
+ Only applies if ``analyzer == 'word'``.
665
+
666
+ token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b"
667
+ Regular expression denoting what constitutes a "token", only used
668
+ if ``analyzer == 'word'``. The default regexp selects tokens of 2
669
+ or more alphanumeric characters (punctuation is completely ignored
670
+ and always treated as a token separator).
671
+
672
+ If there is a capturing group in token_pattern then the
673
+ captured group content, not the entire match, becomes the token.
674
+ At most one capturing group is permitted.
675
+
676
+ ngram_range : tuple (min_n, max_n), default=(1, 1)
677
+ The lower and upper boundary of the range of n-values for different
678
+ n-grams to be extracted. All values of n such that min_n <= n <= max_n
679
+ will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
680
+ unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
681
+ only bigrams.
682
+ Only applies if ``analyzer`` is not callable.
683
+
684
+ analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
685
+ Whether the feature should be made of word or character n-grams.
686
+ Option 'char_wb' creates character n-grams only from text inside
687
+ word boundaries; n-grams at the edges of words are padded with space.
688
+
689
+ If a callable is passed it is used to extract the sequence of features
690
+ out of the raw, unprocessed input.
691
+
692
+ .. versionchanged:: 0.21
693
+ Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
694
+ is first read from the file and then passed to the given callable
695
+ analyzer.
696
+
697
+ n_features : int, default=(2 ** 20)
698
+ The number of features (columns) in the output matrices. Small numbers
699
+ of features are likely to cause hash collisions, but large numbers
700
+ will cause larger coefficient dimensions in linear learners.
701
+
702
+ binary : bool, default=False
703
+ If True, all non zero counts are set to 1. This is useful for discrete
704
+ probabilistic models that model binary events rather than integer
705
+ counts.
706
+
707
+ norm : {'l1', 'l2'}, default='l2'
708
+ Norm used to normalize term vectors. None for no normalization.
709
+
710
+ alternate_sign : bool, default=True
711
+ When True, an alternating sign is added to the features as to
712
+ approximately conserve the inner product in the hashed space even for
713
+ small n_features. This approach is similar to sparse random projection.
714
+
715
+ .. versionadded:: 0.19
716
+
717
+ dtype : type, default=np.float64
718
+ Type of the matrix returned by fit_transform() or transform().
719
+
720
+ See Also
721
+ --------
722
+ CountVectorizer : Convert a collection of text documents to a matrix of
723
+ token counts.
724
+ TfidfVectorizer : Convert a collection of raw documents to a matrix of
725
+ TF-IDF features.
726
+
727
+ Notes
728
+ -----
729
+ This estimator is :term:`stateless` and does not need to be fitted.
730
+ However, we recommend to call :meth:`fit_transform` instead of
731
+ :meth:`transform`, as parameter validation is only performed in
732
+ :meth:`fit`.
733
+
734
+ Examples
735
+ --------
736
+ >>> from sklearn.feature_extraction.text import HashingVectorizer
737
+ >>> corpus = [
738
+ ... 'This is the first document.',
739
+ ... 'This document is the second document.',
740
+ ... 'And this is the third one.',
741
+ ... 'Is this the first document?',
742
+ ... ]
743
+ >>> vectorizer = HashingVectorizer(n_features=2**4)
744
+ >>> X = vectorizer.fit_transform(corpus)
745
+ >>> print(X.shape)
746
+ (4, 16)
747
+ """
748
+
749
+ _parameter_constraints: dict = {
750
+ "input": [StrOptions({"filename", "file", "content"})],
751
+ "encoding": [str],
752
+ "decode_error": [StrOptions({"strict", "ignore", "replace"})],
753
+ "strip_accents": [StrOptions({"ascii", "unicode"}), None, callable],
754
+ "lowercase": ["boolean"],
755
+ "preprocessor": [callable, None],
756
+ "tokenizer": [callable, None],
757
+ "stop_words": [StrOptions({"english"}), list, None],
758
+ "token_pattern": [str, None],
759
+ "ngram_range": [tuple],
760
+ "analyzer": [StrOptions({"word", "char", "char_wb"}), callable],
761
+ "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="left")],
762
+ "binary": ["boolean"],
763
+ "norm": [StrOptions({"l1", "l2"}), None],
764
+ "alternate_sign": ["boolean"],
765
+ "dtype": "no_validation", # delegate to numpy
766
+ }
767
+
768
+ def __init__(
769
+ self,
770
+ *,
771
+ input="content",
772
+ encoding="utf-8",
773
+ decode_error="strict",
774
+ strip_accents=None,
775
+ lowercase=True,
776
+ preprocessor=None,
777
+ tokenizer=None,
778
+ stop_words=None,
779
+ token_pattern=r"(?u)\b\w\w+\b",
780
+ ngram_range=(1, 1),
781
+ analyzer="word",
782
+ n_features=(2**20),
783
+ binary=False,
784
+ norm="l2",
785
+ alternate_sign=True,
786
+ dtype=np.float64,
787
+ ):
788
+ self.input = input
789
+ self.encoding = encoding
790
+ self.decode_error = decode_error
791
+ self.strip_accents = strip_accents
792
+ self.preprocessor = preprocessor
793
+ self.tokenizer = tokenizer
794
+ self.analyzer = analyzer
795
+ self.lowercase = lowercase
796
+ self.token_pattern = token_pattern
797
+ self.stop_words = stop_words
798
+ self.n_features = n_features
799
+ self.ngram_range = ngram_range
800
+ self.binary = binary
801
+ self.norm = norm
802
+ self.alternate_sign = alternate_sign
803
+ self.dtype = dtype
804
+
805
+ @_fit_context(prefer_skip_nested_validation=True)
806
+ def partial_fit(self, X, y=None):
807
+ """Only validates estimator's parameters.
808
+
809
+ This method allows to: (i) validate the estimator's parameters and
810
+ (ii) be consistent with the scikit-learn transformer API.
811
+
812
+ Parameters
813
+ ----------
814
+ X : ndarray of shape [n_samples, n_features]
815
+ Training data.
816
+
817
+ y : Ignored
818
+ Not used, present for API consistency by convention.
819
+
820
+ Returns
821
+ -------
822
+ self : object
823
+ HashingVectorizer instance.
824
+ """
825
+ return self
826
+
827
+ @_fit_context(prefer_skip_nested_validation=True)
828
+ def fit(self, X, y=None):
829
+ """Only validates estimator's parameters.
830
+
831
+ This method allows to: (i) validate the estimator's parameters and
832
+ (ii) be consistent with the scikit-learn transformer API.
833
+
834
+ Parameters
835
+ ----------
836
+ X : ndarray of shape [n_samples, n_features]
837
+ Training data.
838
+
839
+ y : Ignored
840
+ Not used, present for API consistency by convention.
841
+
842
+ Returns
843
+ -------
844
+ self : object
845
+ HashingVectorizer instance.
846
+ """
847
+ # triggers a parameter validation
848
+ if isinstance(X, str):
849
+ raise ValueError(
850
+ "Iterable over raw text documents expected, string object received."
851
+ )
852
+
853
+ self._warn_for_unused_params()
854
+ self._validate_ngram_range()
855
+
856
+ self._get_hasher().fit(X, y=y)
857
+ return self
858
+
859
+ def transform(self, X):
860
+ """Transform a sequence of documents to a document-term matrix.
861
+
862
+ Parameters
863
+ ----------
864
+ X : iterable over raw text documents, length = n_samples
865
+ Samples. Each sample must be a text document (either bytes or
866
+ unicode strings, file name or file object depending on the
867
+ constructor argument) which will be tokenized and hashed.
868
+
869
+ Returns
870
+ -------
871
+ X : sparse matrix of shape (n_samples, n_features)
872
+ Document-term matrix.
873
+ """
874
+ if isinstance(X, str):
875
+ raise ValueError(
876
+ "Iterable over raw text documents expected, string object received."
877
+ )
878
+
879
+ self._validate_ngram_range()
880
+
881
+ analyzer = self.build_analyzer()
882
+ X = self._get_hasher().transform(analyzer(doc) for doc in X)
883
+ if self.binary:
884
+ X.data.fill(1)
885
+ if self.norm is not None:
886
+ X = normalize(X, norm=self.norm, copy=False)
887
+ return X
888
+
889
+ def fit_transform(self, X, y=None):
890
+ """Transform a sequence of documents to a document-term matrix.
891
+
892
+ Parameters
893
+ ----------
894
+ X : iterable over raw text documents, length = n_samples
895
+ Samples. Each sample must be a text document (either bytes or
896
+ unicode strings, file name or file object depending on the
897
+ constructor argument) which will be tokenized and hashed.
898
+ y : any
899
+ Ignored. This parameter exists only for compatibility with
900
+ sklearn.pipeline.Pipeline.
901
+
902
+ Returns
903
+ -------
904
+ X : sparse matrix of shape (n_samples, n_features)
905
+ Document-term matrix.
906
+ """
907
+ return self.fit(X, y).transform(X)
908
+
909
+ def _get_hasher(self):
910
+ return FeatureHasher(
911
+ n_features=self.n_features,
912
+ input_type="string",
913
+ dtype=self.dtype,
914
+ alternate_sign=self.alternate_sign,
915
+ )
916
+
917
+ def _more_tags(self):
918
+ return {"X_types": ["string"]}
919
+
920
+
921
+ def _document_frequency(X):
922
+ """Count the number of non-zero values for each feature in sparse X."""
923
+ if sp.issparse(X) and X.format == "csr":
924
+ return np.bincount(X.indices, minlength=X.shape[1])
925
+ else:
926
+ return np.diff(X.indptr)
927
+
928
+
929
+ class CountVectorizer(_VectorizerMixin, BaseEstimator):
930
+ r"""Convert a collection of text documents to a matrix of token counts.
931
+
932
+ This implementation produces a sparse representation of the counts using
933
+ scipy.sparse.csr_matrix.
934
+
935
+ If you do not provide an a-priori dictionary and you do not use an analyzer
936
+ that does some kind of feature selection then the number of features will
937
+ be equal to the vocabulary size found by analyzing the data.
938
+
939
+ For an efficiency comparison of the different feature extractors, see
940
+ :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
941
+
942
+ Read more in the :ref:`User Guide <text_feature_extraction>`.
943
+
944
+ Parameters
945
+ ----------
946
+ input : {'filename', 'file', 'content'}, default='content'
947
+ - If `'filename'`, the sequence passed as an argument to fit is
948
+ expected to be a list of filenames that need reading to fetch
949
+ the raw content to analyze.
950
+
951
+ - If `'file'`, the sequence items must have a 'read' method (file-like
952
+ object) that is called to fetch the bytes in memory.
953
+
954
+ - If `'content'`, the input is expected to be a sequence of items that
955
+ can be of type string or byte.
956
+
957
+ encoding : str, default='utf-8'
958
+ If bytes or files are given to analyze, this encoding is used to
959
+ decode.
960
+
961
+ decode_error : {'strict', 'ignore', 'replace'}, default='strict'
962
+ Instruction on what to do if a byte sequence is given to analyze that
963
+ contains characters not of the given `encoding`. By default, it is
964
+ 'strict', meaning that a UnicodeDecodeError will be raised. Other
965
+ values are 'ignore' and 'replace'.
966
+
967
+ strip_accents : {'ascii', 'unicode'} or callable, default=None
968
+ Remove accents and perform other character normalization
969
+ during the preprocessing step.
970
+ 'ascii' is a fast method that only works on characters that have
971
+ a direct ASCII mapping.
972
+ 'unicode' is a slightly slower method that works on any characters.
973
+ None (default) means no character normalization is performed.
974
+
975
+ Both 'ascii' and 'unicode' use NFKD normalization from
976
+ :func:`unicodedata.normalize`.
977
+
978
+ lowercase : bool, default=True
979
+ Convert all characters to lowercase before tokenizing.
980
+
981
+ preprocessor : callable, default=None
982
+ Override the preprocessing (strip_accents and lowercase) stage while
983
+ preserving the tokenizing and n-grams generation steps.
984
+ Only applies if ``analyzer`` is not callable.
985
+
986
+ tokenizer : callable, default=None
987
+ Override the string tokenization step while preserving the
988
+ preprocessing and n-grams generation steps.
989
+ Only applies if ``analyzer == 'word'``.
990
+
991
+ stop_words : {'english'}, list, default=None
992
+ If 'english', a built-in stop word list for English is used.
993
+ There are several known issues with 'english' and you should
994
+ consider an alternative (see :ref:`stop_words`).
995
+
996
+ If a list, that list is assumed to contain stop words, all of which
997
+ will be removed from the resulting tokens.
998
+ Only applies if ``analyzer == 'word'``.
999
+
1000
+ If None, no stop words will be used. In this case, setting `max_df`
1001
+ to a higher value, such as in the range (0.7, 1.0), can automatically detect
1002
+ and filter stop words based on intra corpus document frequency of terms.
1003
+
1004
+ token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b"
1005
+ Regular expression denoting what constitutes a "token", only used
1006
+ if ``analyzer == 'word'``. The default regexp select tokens of 2
1007
+ or more alphanumeric characters (punctuation is completely ignored
1008
+ and always treated as a token separator).
1009
+
1010
+ If there is a capturing group in token_pattern then the
1011
+ captured group content, not the entire match, becomes the token.
1012
+ At most one capturing group is permitted.
1013
+
1014
+ ngram_range : tuple (min_n, max_n), default=(1, 1)
1015
+ The lower and upper boundary of the range of n-values for different
1016
+ word n-grams or char n-grams to be extracted. All values of n such
1017
+ such that min_n <= n <= max_n will be used. For example an
1018
+ ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
1019
+ unigrams and bigrams, and ``(2, 2)`` means only bigrams.
1020
+ Only applies if ``analyzer`` is not callable.
1021
+
1022
+ analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
1023
+ Whether the feature should be made of word n-gram or character
1024
+ n-grams.
1025
+ Option 'char_wb' creates character n-grams only from text inside
1026
+ word boundaries; n-grams at the edges of words are padded with space.
1027
+
1028
+ If a callable is passed it is used to extract the sequence of features
1029
+ out of the raw, unprocessed input.
1030
+
1031
+ .. versionchanged:: 0.21
1032
+
1033
+ Since v0.21, if ``input`` is ``filename`` or ``file``, the data is
1034
+ first read from the file and then passed to the given callable
1035
+ analyzer.
1036
+
1037
+ max_df : float in range [0.0, 1.0] or int, default=1.0
1038
+ When building the vocabulary ignore terms that have a document
1039
+ frequency strictly higher than the given threshold (corpus-specific
1040
+ stop words).
1041
+ If float, the parameter represents a proportion of documents, integer
1042
+ absolute counts.
1043
+ This parameter is ignored if vocabulary is not None.
1044
+
1045
+ min_df : float in range [0.0, 1.0] or int, default=1
1046
+ When building the vocabulary ignore terms that have a document
1047
+ frequency strictly lower than the given threshold. This value is also
1048
+ called cut-off in the literature.
1049
+ If float, the parameter represents a proportion of documents, integer
1050
+ absolute counts.
1051
+ This parameter is ignored if vocabulary is not None.
1052
+
1053
+ max_features : int, default=None
1054
+ If not None, build a vocabulary that only consider the top
1055
+ `max_features` ordered by term frequency across the corpus.
1056
+ Otherwise, all features are used.
1057
+
1058
+ This parameter is ignored if vocabulary is not None.
1059
+
1060
+ vocabulary : Mapping or iterable, default=None
1061
+ Either a Mapping (e.g., a dict) where keys are terms and values are
1062
+ indices in the feature matrix, or an iterable over terms. If not
1063
+ given, a vocabulary is determined from the input documents. Indices
1064
+ in the mapping should not be repeated and should not have any gap
1065
+ between 0 and the largest index.
1066
+
1067
+ binary : bool, default=False
1068
+ If True, all non zero counts are set to 1. This is useful for discrete
1069
+ probabilistic models that model binary events rather than integer
1070
+ counts.
1071
+
1072
+ dtype : dtype, default=np.int64
1073
+ Type of the matrix returned by fit_transform() or transform().
1074
+
1075
+ Attributes
1076
+ ----------
1077
+ vocabulary_ : dict
1078
+ A mapping of terms to feature indices.
1079
+
1080
+ fixed_vocabulary_ : bool
1081
+ True if a fixed vocabulary of term to indices mapping
1082
+ is provided by the user.
1083
+
1084
+ stop_words_ : set
1085
+ Terms that were ignored because they either:
1086
+
1087
+ - occurred in too many documents (`max_df`)
1088
+ - occurred in too few documents (`min_df`)
1089
+ - were cut off by feature selection (`max_features`).
1090
+
1091
+ This is only available if no vocabulary was given.
1092
+
1093
+ See Also
1094
+ --------
1095
+ HashingVectorizer : Convert a collection of text documents to a
1096
+ matrix of token counts.
1097
+
1098
+ TfidfVectorizer : Convert a collection of raw documents to a matrix
1099
+ of TF-IDF features.
1100
+
1101
+ Notes
1102
+ -----
1103
+ The ``stop_words_`` attribute can get large and increase the model size
1104
+ when pickling. This attribute is provided only for introspection and can
1105
+ be safely removed using delattr or set to None before pickling.
1106
+
1107
+ Examples
1108
+ --------
1109
+ >>> from sklearn.feature_extraction.text import CountVectorizer
1110
+ >>> corpus = [
1111
+ ... 'This is the first document.',
1112
+ ... 'This document is the second document.',
1113
+ ... 'And this is the third one.',
1114
+ ... 'Is this the first document?',
1115
+ ... ]
1116
+ >>> vectorizer = CountVectorizer()
1117
+ >>> X = vectorizer.fit_transform(corpus)
1118
+ >>> vectorizer.get_feature_names_out()
1119
+ array(['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third',
1120
+ 'this'], ...)
1121
+ >>> print(X.toarray())
1122
+ [[0 1 1 1 0 0 1 0 1]
1123
+ [0 2 0 1 0 1 1 0 1]
1124
+ [1 0 0 1 1 0 1 1 1]
1125
+ [0 1 1 1 0 0 1 0 1]]
1126
+ >>> vectorizer2 = CountVectorizer(analyzer='word', ngram_range=(2, 2))
1127
+ >>> X2 = vectorizer2.fit_transform(corpus)
1128
+ >>> vectorizer2.get_feature_names_out()
1129
+ array(['and this', 'document is', 'first document', 'is the', 'is this',
1130
+ 'second document', 'the first', 'the second', 'the third', 'third one',
1131
+ 'this document', 'this is', 'this the'], ...)
1132
+ >>> print(X2.toarray())
1133
+ [[0 0 1 1 0 0 1 0 0 0 0 1 0]
1134
+ [0 1 0 1 0 1 0 1 0 0 1 0 0]
1135
+ [1 0 0 1 0 0 0 0 1 1 0 1 0]
1136
+ [0 0 1 0 1 0 1 0 0 0 0 0 1]]
1137
+ """
1138
+
1139
+ _parameter_constraints: dict = {
1140
+ "input": [StrOptions({"filename", "file", "content"})],
1141
+ "encoding": [str],
1142
+ "decode_error": [StrOptions({"strict", "ignore", "replace"})],
1143
+ "strip_accents": [StrOptions({"ascii", "unicode"}), None, callable],
1144
+ "lowercase": ["boolean"],
1145
+ "preprocessor": [callable, None],
1146
+ "tokenizer": [callable, None],
1147
+ "stop_words": [StrOptions({"english"}), list, None],
1148
+ "token_pattern": [str, None],
1149
+ "ngram_range": [tuple],
1150
+ "analyzer": [StrOptions({"word", "char", "char_wb"}), callable],
1151
+ "max_df": [
1152
+ Interval(RealNotInt, 0, 1, closed="both"),
1153
+ Interval(Integral, 1, None, closed="left"),
1154
+ ],
1155
+ "min_df": [
1156
+ Interval(RealNotInt, 0, 1, closed="both"),
1157
+ Interval(Integral, 1, None, closed="left"),
1158
+ ],
1159
+ "max_features": [Interval(Integral, 1, None, closed="left"), None],
1160
+ "vocabulary": [Mapping, HasMethods("__iter__"), None],
1161
+ "binary": ["boolean"],
1162
+ "dtype": "no_validation", # delegate to numpy
1163
+ }
1164
+
1165
+ def __init__(
1166
+ self,
1167
+ *,
1168
+ input="content",
1169
+ encoding="utf-8",
1170
+ decode_error="strict",
1171
+ strip_accents=None,
1172
+ lowercase=True,
1173
+ preprocessor=None,
1174
+ tokenizer=None,
1175
+ stop_words=None,
1176
+ token_pattern=r"(?u)\b\w\w+\b",
1177
+ ngram_range=(1, 1),
1178
+ analyzer="word",
1179
+ max_df=1.0,
1180
+ min_df=1,
1181
+ max_features=None,
1182
+ vocabulary=None,
1183
+ binary=False,
1184
+ dtype=np.int64,
1185
+ ):
1186
+ self.input = input
1187
+ self.encoding = encoding
1188
+ self.decode_error = decode_error
1189
+ self.strip_accents = strip_accents
1190
+ self.preprocessor = preprocessor
1191
+ self.tokenizer = tokenizer
1192
+ self.analyzer = analyzer
1193
+ self.lowercase = lowercase
1194
+ self.token_pattern = token_pattern
1195
+ self.stop_words = stop_words
1196
+ self.max_df = max_df
1197
+ self.min_df = min_df
1198
+ self.max_features = max_features
1199
+ self.ngram_range = ngram_range
1200
+ self.vocabulary = vocabulary
1201
+ self.binary = binary
1202
+ self.dtype = dtype
1203
+
1204
+ def _sort_features(self, X, vocabulary):
1205
+ """Sort features by name
1206
+
1207
+ Returns a reordered matrix and modifies the vocabulary in place
1208
+ """
1209
+ sorted_features = sorted(vocabulary.items())
1210
+ map_index = np.empty(len(sorted_features), dtype=X.indices.dtype)
1211
+ for new_val, (term, old_val) in enumerate(sorted_features):
1212
+ vocabulary[term] = new_val
1213
+ map_index[old_val] = new_val
1214
+
1215
+ X.indices = map_index.take(X.indices, mode="clip")
1216
+ return X
1217
+
1218
+ def _limit_features(self, X, vocabulary, high=None, low=None, limit=None):
1219
+ """Remove too rare or too common features.
1220
+
1221
+ Prune features that are non zero in more samples than high or less
1222
+ documents than low, modifying the vocabulary, and restricting it to
1223
+ at most the limit most frequent.
1224
+
1225
+ This does not prune samples with zero features.
1226
+ """
1227
+ if high is None and low is None and limit is None:
1228
+ return X, set()
1229
+
1230
+ # Calculate a mask based on document frequencies
1231
+ dfs = _document_frequency(X)
1232
+ mask = np.ones(len(dfs), dtype=bool)
1233
+ if high is not None:
1234
+ mask &= dfs <= high
1235
+ if low is not None:
1236
+ mask &= dfs >= low
1237
+ if limit is not None and mask.sum() > limit:
1238
+ tfs = np.asarray(X.sum(axis=0)).ravel()
1239
+ mask_inds = (-tfs[mask]).argsort()[:limit]
1240
+ new_mask = np.zeros(len(dfs), dtype=bool)
1241
+ new_mask[np.where(mask)[0][mask_inds]] = True
1242
+ mask = new_mask
1243
+
1244
+ new_indices = np.cumsum(mask) - 1 # maps old indices to new
1245
+ removed_terms = set()
1246
+ for term, old_index in list(vocabulary.items()):
1247
+ if mask[old_index]:
1248
+ vocabulary[term] = new_indices[old_index]
1249
+ else:
1250
+ del vocabulary[term]
1251
+ removed_terms.add(term)
1252
+ kept_indices = np.where(mask)[0]
1253
+ if len(kept_indices) == 0:
1254
+ raise ValueError(
1255
+ "After pruning, no terms remain. Try a lower min_df or a higher max_df."
1256
+ )
1257
+ return X[:, kept_indices], removed_terms
1258
+
1259
+ def _count_vocab(self, raw_documents, fixed_vocab):
1260
+ """Create sparse feature matrix, and vocabulary where fixed_vocab=False"""
1261
+ if fixed_vocab:
1262
+ vocabulary = self.vocabulary_
1263
+ else:
1264
+ # Add a new value when a new vocabulary item is seen
1265
+ vocabulary = defaultdict()
1266
+ vocabulary.default_factory = vocabulary.__len__
1267
+
1268
+ analyze = self.build_analyzer()
1269
+ j_indices = []
1270
+ indptr = []
1271
+
1272
+ values = _make_int_array()
1273
+ indptr.append(0)
1274
+ for doc in raw_documents:
1275
+ feature_counter = {}
1276
+ for feature in analyze(doc):
1277
+ try:
1278
+ feature_idx = vocabulary[feature]
1279
+ if feature_idx not in feature_counter:
1280
+ feature_counter[feature_idx] = 1
1281
+ else:
1282
+ feature_counter[feature_idx] += 1
1283
+ except KeyError:
1284
+ # Ignore out-of-vocabulary items for fixed_vocab=True
1285
+ continue
1286
+
1287
+ j_indices.extend(feature_counter.keys())
1288
+ values.extend(feature_counter.values())
1289
+ indptr.append(len(j_indices))
1290
+
1291
+ if not fixed_vocab:
1292
+ # disable defaultdict behaviour
1293
+ vocabulary = dict(vocabulary)
1294
+ if not vocabulary:
1295
+ raise ValueError(
1296
+ "empty vocabulary; perhaps the documents only contain stop words"
1297
+ )
1298
+
1299
+ if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1
1300
+ if _IS_32BIT:
1301
+ raise ValueError(
1302
+ (
1303
+ "sparse CSR array has {} non-zero "
1304
+ "elements and requires 64 bit indexing, "
1305
+ "which is unsupported with 32 bit Python."
1306
+ ).format(indptr[-1])
1307
+ )
1308
+ indices_dtype = np.int64
1309
+
1310
+ else:
1311
+ indices_dtype = np.int32
1312
+ j_indices = np.asarray(j_indices, dtype=indices_dtype)
1313
+ indptr = np.asarray(indptr, dtype=indices_dtype)
1314
+ values = np.frombuffer(values, dtype=np.intc)
1315
+
1316
+ X = sp.csr_matrix(
1317
+ (values, j_indices, indptr),
1318
+ shape=(len(indptr) - 1, len(vocabulary)),
1319
+ dtype=self.dtype,
1320
+ )
1321
+ X.sort_indices()
1322
+ return vocabulary, X
1323
+
1324
+ def fit(self, raw_documents, y=None):
1325
+ """Learn a vocabulary dictionary of all tokens in the raw documents.
1326
+
1327
+ Parameters
1328
+ ----------
1329
+ raw_documents : iterable
1330
+ An iterable which generates either str, unicode or file objects.
1331
+
1332
+ y : None
1333
+ This parameter is ignored.
1334
+
1335
+ Returns
1336
+ -------
1337
+ self : object
1338
+ Fitted vectorizer.
1339
+ """
1340
+ self.fit_transform(raw_documents)
1341
+ return self
1342
+
1343
+ @_fit_context(prefer_skip_nested_validation=True)
1344
+ def fit_transform(self, raw_documents, y=None):
1345
+ """Learn the vocabulary dictionary and return document-term matrix.
1346
+
1347
+ This is equivalent to fit followed by transform, but more efficiently
1348
+ implemented.
1349
+
1350
+ Parameters
1351
+ ----------
1352
+ raw_documents : iterable
1353
+ An iterable which generates either str, unicode or file objects.
1354
+
1355
+ y : None
1356
+ This parameter is ignored.
1357
+
1358
+ Returns
1359
+ -------
1360
+ X : array of shape (n_samples, n_features)
1361
+ Document-term matrix.
1362
+ """
1363
+ # We intentionally don't call the transform method to make
1364
+ # fit_transform overridable without unwanted side effects in
1365
+ # TfidfVectorizer.
1366
+ if isinstance(raw_documents, str):
1367
+ raise ValueError(
1368
+ "Iterable over raw text documents expected, string object received."
1369
+ )
1370
+
1371
+ self._validate_ngram_range()
1372
+ self._warn_for_unused_params()
1373
+ self._validate_vocabulary()
1374
+ max_df = self.max_df
1375
+ min_df = self.min_df
1376
+ max_features = self.max_features
1377
+
1378
+ if self.fixed_vocabulary_ and self.lowercase:
1379
+ for term in self.vocabulary:
1380
+ if any(map(str.isupper, term)):
1381
+ warnings.warn(
1382
+ "Upper case characters found in"
1383
+ " vocabulary while 'lowercase'"
1384
+ " is True. These entries will not"
1385
+ " be matched with any documents"
1386
+ )
1387
+ break
1388
+
1389
+ vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_)
1390
+
1391
+ if self.binary:
1392
+ X.data.fill(1)
1393
+
1394
+ if not self.fixed_vocabulary_:
1395
+ n_doc = X.shape[0]
1396
+ max_doc_count = max_df if isinstance(max_df, Integral) else max_df * n_doc
1397
+ min_doc_count = min_df if isinstance(min_df, Integral) else min_df * n_doc
1398
+ if max_doc_count < min_doc_count:
1399
+ raise ValueError("max_df corresponds to < documents than min_df")
1400
+ if max_features is not None:
1401
+ X = self._sort_features(X, vocabulary)
1402
+ X, self.stop_words_ = self._limit_features(
1403
+ X, vocabulary, max_doc_count, min_doc_count, max_features
1404
+ )
1405
+ if max_features is None:
1406
+ X = self._sort_features(X, vocabulary)
1407
+ self.vocabulary_ = vocabulary
1408
+
1409
+ return X
1410
+
1411
+ def transform(self, raw_documents):
1412
+ """Transform documents to document-term matrix.
1413
+
1414
+ Extract token counts out of raw text documents using the vocabulary
1415
+ fitted with fit or the one provided to the constructor.
1416
+
1417
+ Parameters
1418
+ ----------
1419
+ raw_documents : iterable
1420
+ An iterable which generates either str, unicode or file objects.
1421
+
1422
+ Returns
1423
+ -------
1424
+ X : sparse matrix of shape (n_samples, n_features)
1425
+ Document-term matrix.
1426
+ """
1427
+ if isinstance(raw_documents, str):
1428
+ raise ValueError(
1429
+ "Iterable over raw text documents expected, string object received."
1430
+ )
1431
+ self._check_vocabulary()
1432
+
1433
+ # use the same matrix-building strategy as fit_transform
1434
+ _, X = self._count_vocab(raw_documents, fixed_vocab=True)
1435
+ if self.binary:
1436
+ X.data.fill(1)
1437
+ return X
1438
+
1439
+ def inverse_transform(self, X):
1440
+ """Return terms per document with nonzero entries in X.
1441
+
1442
+ Parameters
1443
+ ----------
1444
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1445
+ Document-term matrix.
1446
+
1447
+ Returns
1448
+ -------
1449
+ X_inv : list of arrays of shape (n_samples,)
1450
+ List of arrays of terms.
1451
+ """
1452
+ self._check_vocabulary()
1453
+ # We need CSR format for fast row manipulations.
1454
+ X = check_array(X, accept_sparse="csr")
1455
+ n_samples = X.shape[0]
1456
+
1457
+ terms = np.array(list(self.vocabulary_.keys()))
1458
+ indices = np.array(list(self.vocabulary_.values()))
1459
+ inverse_vocabulary = terms[np.argsort(indices)]
1460
+
1461
+ if sp.issparse(X):
1462
+ return [
1463
+ inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
1464
+ for i in range(n_samples)
1465
+ ]
1466
+ else:
1467
+ return [
1468
+ inverse_vocabulary[np.flatnonzero(X[i, :])].ravel()
1469
+ for i in range(n_samples)
1470
+ ]
1471
+
1472
+ def get_feature_names_out(self, input_features=None):
1473
+ """Get output feature names for transformation.
1474
+
1475
+ Parameters
1476
+ ----------
1477
+ input_features : array-like of str or None, default=None
1478
+ Not used, present here for API consistency by convention.
1479
+
1480
+ Returns
1481
+ -------
1482
+ feature_names_out : ndarray of str objects
1483
+ Transformed feature names.
1484
+ """
1485
+ self._check_vocabulary()
1486
+ return np.asarray(
1487
+ [t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))],
1488
+ dtype=object,
1489
+ )
1490
+
1491
+ def _more_tags(self):
1492
+ return {"X_types": ["string"]}
1493
+
1494
+
1495
+ def _make_int_array():
1496
+ """Construct an array.array of a type suitable for scipy.sparse indices."""
1497
+ return array.array(str("i"))
1498
+
1499
+
1500
+ class TfidfTransformer(
1501
+ OneToOneFeatureMixin, TransformerMixin, BaseEstimator, auto_wrap_output_keys=None
1502
+ ):
1503
+ """Transform a count matrix to a normalized tf or tf-idf representation.
1504
+
1505
+ Tf means term-frequency while tf-idf means term-frequency times inverse
1506
+ document-frequency. This is a common term weighting scheme in information
1507
+ retrieval, that has also found good use in document classification.
1508
+
1509
+ The goal of using tf-idf instead of the raw frequencies of occurrence of a
1510
+ token in a given document is to scale down the impact of tokens that occur
1511
+ very frequently in a given corpus and that are hence empirically less
1512
+ informative than features that occur in a small fraction of the training
1513
+ corpus.
1514
+
1515
+ The formula that is used to compute the tf-idf for a term t of a document d
1516
+ in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is
1517
+ computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where
1518
+ n is the total number of documents in the document set and df(t) is the
1519
+ document frequency of t; the document frequency is the number of documents
1520
+ in the document set that contain the term t. The effect of adding "1" to
1521
+ the idf in the equation above is that terms with zero idf, i.e., terms
1522
+ that occur in all documents in a training set, will not be entirely
1523
+ ignored.
1524
+ (Note that the idf formula above differs from the standard textbook
1525
+ notation that defines the idf as
1526
+ idf(t) = log [ n / (df(t) + 1) ]).
1527
+
1528
+ If ``smooth_idf=True`` (the default), the constant "1" is added to the
1529
+ numerator and denominator of the idf as if an extra document was seen
1530
+ containing every term in the collection exactly once, which prevents
1531
+ zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1.
1532
+
1533
+ Furthermore, the formulas used to compute tf and idf depend
1534
+ on parameter settings that correspond to the SMART notation used in IR
1535
+ as follows:
1536
+
1537
+ Tf is "n" (natural) by default, "l" (logarithmic) when
1538
+ ``sublinear_tf=True``.
1539
+ Idf is "t" when use_idf is given, "n" (none) otherwise.
1540
+ Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
1541
+ when ``norm=None``.
1542
+
1543
+ Read more in the :ref:`User Guide <text_feature_extraction>`.
1544
+
1545
+ Parameters
1546
+ ----------
1547
+ norm : {'l1', 'l2'} or None, default='l2'
1548
+ Each output row will have unit norm, either:
1549
+
1550
+ - 'l2': Sum of squares of vector elements is 1. The cosine
1551
+ similarity between two vectors is their dot product when l2 norm has
1552
+ been applied.
1553
+ - 'l1': Sum of absolute values of vector elements is 1.
1554
+ See :func:`~sklearn.preprocessing.normalize`.
1555
+ - None: No normalization.
1556
+
1557
+ use_idf : bool, default=True
1558
+ Enable inverse-document-frequency reweighting. If False, idf(t) = 1.
1559
+
1560
+ smooth_idf : bool, default=True
1561
+ Smooth idf weights by adding one to document frequencies, as if an
1562
+ extra document was seen containing every term in the collection
1563
+ exactly once. Prevents zero divisions.
1564
+
1565
+ sublinear_tf : bool, default=False
1566
+ Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
1567
+
1568
+ Attributes
1569
+ ----------
1570
+ idf_ : array of shape (n_features)
1571
+ The inverse document frequency (IDF) vector; only defined
1572
+ if ``use_idf`` is True.
1573
+
1574
+ .. versionadded:: 0.20
1575
+
1576
+ n_features_in_ : int
1577
+ Number of features seen during :term:`fit`.
1578
+
1579
+ .. versionadded:: 1.0
1580
+
1581
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1582
+ Names of features seen during :term:`fit`. Defined only when `X`
1583
+ has feature names that are all strings.
1584
+
1585
+ .. versionadded:: 1.0
1586
+
1587
+ See Also
1588
+ --------
1589
+ CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
1590
+
1591
+ TfidfVectorizer : Convert a collection of raw documents to a matrix of
1592
+ TF-IDF features.
1593
+
1594
+ HashingVectorizer : Convert a collection of text documents to a matrix
1595
+ of token occurrences.
1596
+
1597
+ References
1598
+ ----------
1599
+ .. [Yates2011] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
1600
+ Information Retrieval. Addison Wesley, pp. 68-74.
1601
+
1602
+ .. [MRS2008] C.D. Manning, P. Raghavan and H. Schütze (2008).
1603
+ Introduction to Information Retrieval. Cambridge University
1604
+ Press, pp. 118-120.
1605
+
1606
+ Examples
1607
+ --------
1608
+ >>> from sklearn.feature_extraction.text import TfidfTransformer
1609
+ >>> from sklearn.feature_extraction.text import CountVectorizer
1610
+ >>> from sklearn.pipeline import Pipeline
1611
+ >>> corpus = ['this is the first document',
1612
+ ... 'this document is the second document',
1613
+ ... 'and this is the third one',
1614
+ ... 'is this the first document']
1615
+ >>> vocabulary = ['this', 'document', 'first', 'is', 'second', 'the',
1616
+ ... 'and', 'one']
1617
+ >>> pipe = Pipeline([('count', CountVectorizer(vocabulary=vocabulary)),
1618
+ ... ('tfid', TfidfTransformer())]).fit(corpus)
1619
+ >>> pipe['count'].transform(corpus).toarray()
1620
+ array([[1, 1, 1, 1, 0, 1, 0, 0],
1621
+ [1, 2, 0, 1, 1, 1, 0, 0],
1622
+ [1, 0, 0, 1, 0, 1, 1, 1],
1623
+ [1, 1, 1, 1, 0, 1, 0, 0]])
1624
+ >>> pipe['tfid'].idf_
1625
+ array([1. , 1.22314355, 1.51082562, 1. , 1.91629073,
1626
+ 1. , 1.91629073, 1.91629073])
1627
+ >>> pipe.transform(corpus).shape
1628
+ (4, 8)
1629
+ """
1630
+
1631
+ _parameter_constraints: dict = {
1632
+ "norm": [StrOptions({"l1", "l2"}), None],
1633
+ "use_idf": ["boolean"],
1634
+ "smooth_idf": ["boolean"],
1635
+ "sublinear_tf": ["boolean"],
1636
+ }
1637
+
1638
+ def __init__(self, *, norm="l2", use_idf=True, smooth_idf=True, sublinear_tf=False):
1639
+ self.norm = norm
1640
+ self.use_idf = use_idf
1641
+ self.smooth_idf = smooth_idf
1642
+ self.sublinear_tf = sublinear_tf
1643
+
1644
+ @_fit_context(prefer_skip_nested_validation=True)
1645
+ def fit(self, X, y=None):
1646
+ """Learn the idf vector (global term weights).
1647
+
1648
+ Parameters
1649
+ ----------
1650
+ X : sparse matrix of shape (n_samples, n_features)
1651
+ A matrix of term/token counts.
1652
+
1653
+ y : None
1654
+ This parameter is not needed to compute tf-idf.
1655
+
1656
+ Returns
1657
+ -------
1658
+ self : object
1659
+ Fitted transformer.
1660
+ """
1661
+ # large sparse data is not supported for 32bit platforms because
1662
+ # _document_frequency uses np.bincount which works on arrays of
1663
+ # dtype NPY_INTP which is int32 for 32bit platforms. See #20923
1664
+ X = self._validate_data(
1665
+ X, accept_sparse=("csr", "csc"), accept_large_sparse=not _IS_32BIT
1666
+ )
1667
+ if not sp.issparse(X):
1668
+ X = sp.csr_matrix(X)
1669
+ dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64
1670
+
1671
+ if self.use_idf:
1672
+ n_samples, n_features = X.shape
1673
+ df = _document_frequency(X)
1674
+ df = df.astype(dtype, copy=False)
1675
+
1676
+ # perform idf smoothing if required
1677
+ df += int(self.smooth_idf)
1678
+ n_samples += int(self.smooth_idf)
1679
+
1680
+ # log+1 instead of log makes sure terms with zero idf don't get
1681
+ # suppressed entirely.
1682
+ idf = np.log(n_samples / df) + 1
1683
+ self._idf_diag = sp.diags(
1684
+ idf,
1685
+ offsets=0,
1686
+ shape=(n_features, n_features),
1687
+ format="csr",
1688
+ dtype=dtype,
1689
+ )
1690
+
1691
+ return self
1692
+
1693
+ def transform(self, X, copy=True):
1694
+ """Transform a count matrix to a tf or tf-idf representation.
1695
+
1696
+ Parameters
1697
+ ----------
1698
+ X : sparse matrix of (n_samples, n_features)
1699
+ A matrix of term/token counts.
1700
+
1701
+ copy : bool, default=True
1702
+ Whether to copy X and operate on the copy or perform in-place
1703
+ operations.
1704
+
1705
+ Returns
1706
+ -------
1707
+ vectors : sparse matrix of shape (n_samples, n_features)
1708
+ Tf-idf-weighted document-term matrix.
1709
+ """
1710
+ X = self._validate_data(
1711
+ X, accept_sparse="csr", dtype=FLOAT_DTYPES, copy=copy, reset=False
1712
+ )
1713
+ if not sp.issparse(X):
1714
+ X = sp.csr_matrix(X, dtype=np.float64)
1715
+
1716
+ if self.sublinear_tf:
1717
+ np.log(X.data, X.data)
1718
+ X.data += 1
1719
+
1720
+ if self.use_idf:
1721
+ # idf_ being a property, the automatic attributes detection
1722
+ # does not work as usual and we need to specify the attribute
1723
+ # name:
1724
+ check_is_fitted(self, attributes=["idf_"], msg="idf vector is not fitted")
1725
+
1726
+ X = X @ self._idf_diag
1727
+
1728
+ if self.norm is not None:
1729
+ X = normalize(X, norm=self.norm, copy=False)
1730
+
1731
+ return X
1732
+
1733
+ @property
1734
+ def idf_(self):
1735
+ """Inverse document frequency vector, only defined if `use_idf=True`.
1736
+
1737
+ Returns
1738
+ -------
1739
+ ndarray of shape (n_features,)
1740
+ """
1741
+ # if _idf_diag is not set, this will raise an attribute error,
1742
+ # which means hasattr(self, "idf_") is False
1743
+ return np.ravel(self._idf_diag.sum(axis=0))
1744
+
1745
+ @idf_.setter
1746
+ def idf_(self, value):
1747
+ value = np.asarray(value, dtype=np.float64)
1748
+ n_features = value.shape[0]
1749
+ self._idf_diag = sp.spdiags(
1750
+ value, diags=0, m=n_features, n=n_features, format="csr"
1751
+ )
1752
+
1753
+ def _more_tags(self):
1754
+ return {"X_types": ["2darray", "sparse"]}
1755
+
1756
+
1757
+ class TfidfVectorizer(CountVectorizer):
1758
+ r"""Convert a collection of raw documents to a matrix of TF-IDF features.
1759
+
1760
+ Equivalent to :class:`CountVectorizer` followed by
1761
+ :class:`TfidfTransformer`.
1762
+
1763
+ For an example of usage, see
1764
+ :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`.
1765
+
1766
+ For an efficiency comparison of the different feature extractors, see
1767
+ :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
1768
+
1769
+ Read more in the :ref:`User Guide <text_feature_extraction>`.
1770
+
1771
+ Parameters
1772
+ ----------
1773
+ input : {'filename', 'file', 'content'}, default='content'
1774
+ - If `'filename'`, the sequence passed as an argument to fit is
1775
+ expected to be a list of filenames that need reading to fetch
1776
+ the raw content to analyze.
1777
+
1778
+ - If `'file'`, the sequence items must have a 'read' method (file-like
1779
+ object) that is called to fetch the bytes in memory.
1780
+
1781
+ - If `'content'`, the input is expected to be a sequence of items that
1782
+ can be of type string or byte.
1783
+
1784
+ encoding : str, default='utf-8'
1785
+ If bytes or files are given to analyze, this encoding is used to
1786
+ decode.
1787
+
1788
+ decode_error : {'strict', 'ignore', 'replace'}, default='strict'
1789
+ Instruction on what to do if a byte sequence is given to analyze that
1790
+ contains characters not of the given `encoding`. By default, it is
1791
+ 'strict', meaning that a UnicodeDecodeError will be raised. Other
1792
+ values are 'ignore' and 'replace'.
1793
+
1794
+ strip_accents : {'ascii', 'unicode'} or callable, default=None
1795
+ Remove accents and perform other character normalization
1796
+ during the preprocessing step.
1797
+ 'ascii' is a fast method that only works on characters that have
1798
+ a direct ASCII mapping.
1799
+ 'unicode' is a slightly slower method that works on any characters.
1800
+ None (default) means no character normalization is performed.
1801
+
1802
+ Both 'ascii' and 'unicode' use NFKD normalization from
1803
+ :func:`unicodedata.normalize`.
1804
+
1805
+ lowercase : bool, default=True
1806
+ Convert all characters to lowercase before tokenizing.
1807
+
1808
+ preprocessor : callable, default=None
1809
+ Override the preprocessing (string transformation) stage while
1810
+ preserving the tokenizing and n-grams generation steps.
1811
+ Only applies if ``analyzer`` is not callable.
1812
+
1813
+ tokenizer : callable, default=None
1814
+ Override the string tokenization step while preserving the
1815
+ preprocessing and n-grams generation steps.
1816
+ Only applies if ``analyzer == 'word'``.
1817
+
1818
+ analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
1819
+ Whether the feature should be made of word or character n-grams.
1820
+ Option 'char_wb' creates character n-grams only from text inside
1821
+ word boundaries; n-grams at the edges of words are padded with space.
1822
+
1823
+ If a callable is passed it is used to extract the sequence of features
1824
+ out of the raw, unprocessed input.
1825
+
1826
+ .. versionchanged:: 0.21
1827
+ Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
1828
+ is first read from the file and then passed to the given callable
1829
+ analyzer.
1830
+
1831
+ stop_words : {'english'}, list, default=None
1832
+ If a string, it is passed to _check_stop_list and the appropriate stop
1833
+ list is returned. 'english' is currently the only supported string
1834
+ value.
1835
+ There are several known issues with 'english' and you should
1836
+ consider an alternative (see :ref:`stop_words`).
1837
+
1838
+ If a list, that list is assumed to contain stop words, all of which
1839
+ will be removed from the resulting tokens.
1840
+ Only applies if ``analyzer == 'word'``.
1841
+
1842
+ If None, no stop words will be used. In this case, setting `max_df`
1843
+ to a higher value, such as in the range (0.7, 1.0), can automatically detect
1844
+ and filter stop words based on intra corpus document frequency of terms.
1845
+
1846
+ token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
1847
+ Regular expression denoting what constitutes a "token", only used
1848
+ if ``analyzer == 'word'``. The default regexp selects tokens of 2
1849
+ or more alphanumeric characters (punctuation is completely ignored
1850
+ and always treated as a token separator).
1851
+
1852
+ If there is a capturing group in token_pattern then the
1853
+ captured group content, not the entire match, becomes the token.
1854
+ At most one capturing group is permitted.
1855
+
1856
+ ngram_range : tuple (min_n, max_n), default=(1, 1)
1857
+ The lower and upper boundary of the range of n-values for different
1858
+ n-grams to be extracted. All values of n such that min_n <= n <= max_n
1859
+ will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
1860
+ unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
1861
+ only bigrams.
1862
+ Only applies if ``analyzer`` is not callable.
1863
+
1864
+ max_df : float or int, default=1.0
1865
+ When building the vocabulary ignore terms that have a document
1866
+ frequency strictly higher than the given threshold (corpus-specific
1867
+ stop words).
1868
+ If float in range [0.0, 1.0], the parameter represents a proportion of
1869
+ documents, integer absolute counts.
1870
+ This parameter is ignored if vocabulary is not None.
1871
+
1872
+ min_df : float or int, default=1
1873
+ When building the vocabulary ignore terms that have a document
1874
+ frequency strictly lower than the given threshold. This value is also
1875
+ called cut-off in the literature.
1876
+ If float in range of [0.0, 1.0], the parameter represents a proportion
1877
+ of documents, integer absolute counts.
1878
+ This parameter is ignored if vocabulary is not None.
1879
+
1880
+ max_features : int, default=None
1881
+ If not None, build a vocabulary that only consider the top
1882
+ `max_features` ordered by term frequency across the corpus.
1883
+ Otherwise, all features are used.
1884
+
1885
+ This parameter is ignored if vocabulary is not None.
1886
+
1887
+ vocabulary : Mapping or iterable, default=None
1888
+ Either a Mapping (e.g., a dict) where keys are terms and values are
1889
+ indices in the feature matrix, or an iterable over terms. If not
1890
+ given, a vocabulary is determined from the input documents.
1891
+
1892
+ binary : bool, default=False
1893
+ If True, all non-zero term counts are set to 1. This does not mean
1894
+ outputs will have only 0/1 values, only that the tf term in tf-idf
1895
+ is binary. (Set `binary` to True, `use_idf` to False and
1896
+ `norm` to None to get 0/1 outputs).
1897
+
1898
+ dtype : dtype, default=float64
1899
+ Type of the matrix returned by fit_transform() or transform().
1900
+
1901
+ norm : {'l1', 'l2'} or None, default='l2'
1902
+ Each output row will have unit norm, either:
1903
+
1904
+ - 'l2': Sum of squares of vector elements is 1. The cosine
1905
+ similarity between two vectors is their dot product when l2 norm has
1906
+ been applied.
1907
+ - 'l1': Sum of absolute values of vector elements is 1.
1908
+ See :func:`~sklearn.preprocessing.normalize`.
1909
+ - None: No normalization.
1910
+
1911
+ use_idf : bool, default=True
1912
+ Enable inverse-document-frequency reweighting. If False, idf(t) = 1.
1913
+
1914
+ smooth_idf : bool, default=True
1915
+ Smooth idf weights by adding one to document frequencies, as if an
1916
+ extra document was seen containing every term in the collection
1917
+ exactly once. Prevents zero divisions.
1918
+
1919
+ sublinear_tf : bool, default=False
1920
+ Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
1921
+
1922
+ Attributes
1923
+ ----------
1924
+ vocabulary_ : dict
1925
+ A mapping of terms to feature indices.
1926
+
1927
+ fixed_vocabulary_ : bool
1928
+ True if a fixed vocabulary of term to indices mapping
1929
+ is provided by the user.
1930
+
1931
+ idf_ : array of shape (n_features,)
1932
+ The inverse document frequency (IDF) vector; only defined
1933
+ if ``use_idf`` is True.
1934
+
1935
+ stop_words_ : set
1936
+ Terms that were ignored because they either:
1937
+
1938
+ - occurred in too many documents (`max_df`)
1939
+ - occurred in too few documents (`min_df`)
1940
+ - were cut off by feature selection (`max_features`).
1941
+
1942
+ This is only available if no vocabulary was given.
1943
+
1944
+ See Also
1945
+ --------
1946
+ CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
1947
+
1948
+ TfidfTransformer : Performs the TF-IDF transformation from a provided
1949
+ matrix of counts.
1950
+
1951
+ Notes
1952
+ -----
1953
+ The ``stop_words_`` attribute can get large and increase the model size
1954
+ when pickling. This attribute is provided only for introspection and can
1955
+ be safely removed using delattr or set to None before pickling.
1956
+
1957
+ Examples
1958
+ --------
1959
+ >>> from sklearn.feature_extraction.text import TfidfVectorizer
1960
+ >>> corpus = [
1961
+ ... 'This is the first document.',
1962
+ ... 'This document is the second document.',
1963
+ ... 'And this is the third one.',
1964
+ ... 'Is this the first document?',
1965
+ ... ]
1966
+ >>> vectorizer = TfidfVectorizer()
1967
+ >>> X = vectorizer.fit_transform(corpus)
1968
+ >>> vectorizer.get_feature_names_out()
1969
+ array(['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third',
1970
+ 'this'], ...)
1971
+ >>> print(X.shape)
1972
+ (4, 9)
1973
+ """
1974
+
1975
+ _parameter_constraints: dict = {**CountVectorizer._parameter_constraints}
1976
+ _parameter_constraints.update(
1977
+ {
1978
+ "norm": [StrOptions({"l1", "l2"}), None],
1979
+ "use_idf": ["boolean"],
1980
+ "smooth_idf": ["boolean"],
1981
+ "sublinear_tf": ["boolean"],
1982
+ }
1983
+ )
1984
+
1985
+ def __init__(
1986
+ self,
1987
+ *,
1988
+ input="content",
1989
+ encoding="utf-8",
1990
+ decode_error="strict",
1991
+ strip_accents=None,
1992
+ lowercase=True,
1993
+ preprocessor=None,
1994
+ tokenizer=None,
1995
+ analyzer="word",
1996
+ stop_words=None,
1997
+ token_pattern=r"(?u)\b\w\w+\b",
1998
+ ngram_range=(1, 1),
1999
+ max_df=1.0,
2000
+ min_df=1,
2001
+ max_features=None,
2002
+ vocabulary=None,
2003
+ binary=False,
2004
+ dtype=np.float64,
2005
+ norm="l2",
2006
+ use_idf=True,
2007
+ smooth_idf=True,
2008
+ sublinear_tf=False,
2009
+ ):
2010
+ super().__init__(
2011
+ input=input,
2012
+ encoding=encoding,
2013
+ decode_error=decode_error,
2014
+ strip_accents=strip_accents,
2015
+ lowercase=lowercase,
2016
+ preprocessor=preprocessor,
2017
+ tokenizer=tokenizer,
2018
+ analyzer=analyzer,
2019
+ stop_words=stop_words,
2020
+ token_pattern=token_pattern,
2021
+ ngram_range=ngram_range,
2022
+ max_df=max_df,
2023
+ min_df=min_df,
2024
+ max_features=max_features,
2025
+ vocabulary=vocabulary,
2026
+ binary=binary,
2027
+ dtype=dtype,
2028
+ )
2029
+ self.norm = norm
2030
+ self.use_idf = use_idf
2031
+ self.smooth_idf = smooth_idf
2032
+ self.sublinear_tf = sublinear_tf
2033
+
2034
+ # Broadcast the TF-IDF parameters to the underlying transformer instance
2035
+ # for easy grid search and repr
2036
+
2037
+ @property
2038
+ def idf_(self):
2039
+ """Inverse document frequency vector, only defined if `use_idf=True`.
2040
+
2041
+ Returns
2042
+ -------
2043
+ ndarray of shape (n_features,)
2044
+ """
2045
+ if not hasattr(self, "_tfidf"):
2046
+ raise NotFittedError(
2047
+ f"{self.__class__.__name__} is not fitted yet. Call 'fit' with "
2048
+ "appropriate arguments before using this attribute."
2049
+ )
2050
+ return self._tfidf.idf_
2051
+
2052
+ @idf_.setter
2053
+ def idf_(self, value):
2054
+ if not self.use_idf:
2055
+ raise ValueError("`idf_` cannot be set when `user_idf=False`.")
2056
+ if not hasattr(self, "_tfidf"):
2057
+ # We should support transferring `idf_` from another `TfidfTransformer`
2058
+ # and therefore, we need to create the transformer instance it does not
2059
+ # exist yet.
2060
+ self._tfidf = TfidfTransformer(
2061
+ norm=self.norm,
2062
+ use_idf=self.use_idf,
2063
+ smooth_idf=self.smooth_idf,
2064
+ sublinear_tf=self.sublinear_tf,
2065
+ )
2066
+ self._validate_vocabulary()
2067
+ if hasattr(self, "vocabulary_"):
2068
+ if len(self.vocabulary_) != len(value):
2069
+ raise ValueError(
2070
+ "idf length = %d must be equal to vocabulary size = %d"
2071
+ % (len(value), len(self.vocabulary))
2072
+ )
2073
+ self._tfidf.idf_ = value
2074
+
2075
+ def _check_params(self):
2076
+ if self.dtype not in FLOAT_DTYPES:
2077
+ warnings.warn(
2078
+ "Only {} 'dtype' should be used. {} 'dtype' will "
2079
+ "be converted to np.float64.".format(FLOAT_DTYPES, self.dtype),
2080
+ UserWarning,
2081
+ )
2082
+
2083
+ @_fit_context(prefer_skip_nested_validation=True)
2084
+ def fit(self, raw_documents, y=None):
2085
+ """Learn vocabulary and idf from training set.
2086
+
2087
+ Parameters
2088
+ ----------
2089
+ raw_documents : iterable
2090
+ An iterable which generates either str, unicode or file objects.
2091
+
2092
+ y : None
2093
+ This parameter is not needed to compute tfidf.
2094
+
2095
+ Returns
2096
+ -------
2097
+ self : object
2098
+ Fitted vectorizer.
2099
+ """
2100
+ self._check_params()
2101
+ self._warn_for_unused_params()
2102
+ self._tfidf = TfidfTransformer(
2103
+ norm=self.norm,
2104
+ use_idf=self.use_idf,
2105
+ smooth_idf=self.smooth_idf,
2106
+ sublinear_tf=self.sublinear_tf,
2107
+ )
2108
+ X = super().fit_transform(raw_documents)
2109
+ self._tfidf.fit(X)
2110
+ return self
2111
+
2112
+ def fit_transform(self, raw_documents, y=None):
2113
+ """Learn vocabulary and idf, return document-term matrix.
2114
+
2115
+ This is equivalent to fit followed by transform, but more efficiently
2116
+ implemented.
2117
+
2118
+ Parameters
2119
+ ----------
2120
+ raw_documents : iterable
2121
+ An iterable which generates either str, unicode or file objects.
2122
+
2123
+ y : None
2124
+ This parameter is ignored.
2125
+
2126
+ Returns
2127
+ -------
2128
+ X : sparse matrix of (n_samples, n_features)
2129
+ Tf-idf-weighted document-term matrix.
2130
+ """
2131
+ self._check_params()
2132
+ self._tfidf = TfidfTransformer(
2133
+ norm=self.norm,
2134
+ use_idf=self.use_idf,
2135
+ smooth_idf=self.smooth_idf,
2136
+ sublinear_tf=self.sublinear_tf,
2137
+ )
2138
+ X = super().fit_transform(raw_documents)
2139
+ self._tfidf.fit(X)
2140
+ # X is already a transformed view of raw_documents so
2141
+ # we set copy to False
2142
+ return self._tfidf.transform(X, copy=False)
2143
+
2144
+ def transform(self, raw_documents):
2145
+ """Transform documents to document-term matrix.
2146
+
2147
+ Uses the vocabulary and document frequencies (df) learned by fit (or
2148
+ fit_transform).
2149
+
2150
+ Parameters
2151
+ ----------
2152
+ raw_documents : iterable
2153
+ An iterable which generates either str, unicode or file objects.
2154
+
2155
+ Returns
2156
+ -------
2157
+ X : sparse matrix of (n_samples, n_features)
2158
+ Tf-idf-weighted document-term matrix.
2159
+ """
2160
+ check_is_fitted(self, msg="The TF-IDF vectorizer is not fitted")
2161
+
2162
+ X = super().transform(raw_documents)
2163
+ return self._tfidf.transform(X, copy=False)
2164
+
2165
+ def _more_tags(self):
2166
+ return {"X_types": ["string"], "_skip_test": True}
venv/lib/python3.10/site-packages/sklearn/svm/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.svm` module includes Support Vector Machine algorithms.
3
+ """
4
+
5
+ # See http://scikit-learn.sourceforge.net/modules/svm.html for complete
6
+ # documentation.
7
+
8
+ # Author: Fabian Pedregosa <[email protected]> with help from
9
+ # the scikit-learn community. LibSVM and LibLinear are copyright
10
+ # of their respective owners.
11
+ # License: BSD 3 clause (C) INRIA 2010
12
+
13
+ from ._bounds import l1_min_c
14
+ from ._classes import SVC, SVR, LinearSVC, LinearSVR, NuSVC, NuSVR, OneClassSVM
15
+
16
+ __all__ = [
17
+ "LinearSVC",
18
+ "LinearSVR",
19
+ "NuSVC",
20
+ "NuSVR",
21
+ "OneClassSVM",
22
+ "SVC",
23
+ "SVR",
24
+ "l1_min_c",
25
+ ]
venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (516 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_base.cpython-310.pyc ADDED
Binary file (30.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_bounds.cpython-310.pyc ADDED
Binary file (3.21 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_classes.cpython-310.pyc ADDED
Binary file (61.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/_base.py ADDED
@@ -0,0 +1,1249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from abc import ABCMeta, abstractmethod
3
+ from numbers import Integral, Real
4
+
5
+ import numpy as np
6
+ import scipy.sparse as sp
7
+
8
+ from ..base import BaseEstimator, ClassifierMixin, _fit_context
9
+ from ..exceptions import ConvergenceWarning, NotFittedError
10
+ from ..preprocessing import LabelEncoder
11
+ from ..utils import check_array, check_random_state, column_or_1d, compute_class_weight
12
+ from ..utils._param_validation import Interval, StrOptions
13
+ from ..utils.extmath import safe_sparse_dot
14
+ from ..utils.metaestimators import available_if
15
+ from ..utils.multiclass import _ovr_decision_function, check_classification_targets
16
+ from ..utils.validation import (
17
+ _check_large_sparse,
18
+ _check_sample_weight,
19
+ _num_samples,
20
+ check_consistent_length,
21
+ check_is_fitted,
22
+ )
23
+ from . import _liblinear as liblinear # type: ignore
24
+
25
+ # mypy error: error: Module 'sklearn.svm' has no attribute '_libsvm'
26
+ # (and same for other imports)
27
+ from . import _libsvm as libsvm # type: ignore
28
+ from . import _libsvm_sparse as libsvm_sparse # type: ignore
29
+
30
+ LIBSVM_IMPL = ["c_svc", "nu_svc", "one_class", "epsilon_svr", "nu_svr"]
31
+
32
+
33
+ def _one_vs_one_coef(dual_coef, n_support, support_vectors):
34
+ """Generate primal coefficients from dual coefficients
35
+ for the one-vs-one multi class LibSVM in the case
36
+ of a linear kernel."""
37
+
38
+ # get 1vs1 weights for all n*(n-1) classifiers.
39
+ # this is somewhat messy.
40
+ # shape of dual_coef_ is nSV * (n_classes -1)
41
+ # see docs for details
42
+ n_class = dual_coef.shape[0] + 1
43
+
44
+ # XXX we could do preallocation of coef but
45
+ # would have to take care in the sparse case
46
+ coef = []
47
+ sv_locs = np.cumsum(np.hstack([[0], n_support]))
48
+ for class1 in range(n_class):
49
+ # SVs for class1:
50
+ sv1 = support_vectors[sv_locs[class1] : sv_locs[class1 + 1], :]
51
+ for class2 in range(class1 + 1, n_class):
52
+ # SVs for class1:
53
+ sv2 = support_vectors[sv_locs[class2] : sv_locs[class2 + 1], :]
54
+
55
+ # dual coef for class1 SVs:
56
+ alpha1 = dual_coef[class2 - 1, sv_locs[class1] : sv_locs[class1 + 1]]
57
+ # dual coef for class2 SVs:
58
+ alpha2 = dual_coef[class1, sv_locs[class2] : sv_locs[class2 + 1]]
59
+ # build weight for class1 vs class2
60
+
61
+ coef.append(safe_sparse_dot(alpha1, sv1) + safe_sparse_dot(alpha2, sv2))
62
+ return coef
63
+
64
+
65
+ class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):
66
+ """Base class for estimators that use libsvm as backing library.
67
+
68
+ This implements support vector machine classification and regression.
69
+
70
+ Parameter documentation is in the derived `SVC` class.
71
+ """
72
+
73
+ _parameter_constraints: dict = {
74
+ "kernel": [
75
+ StrOptions({"linear", "poly", "rbf", "sigmoid", "precomputed"}),
76
+ callable,
77
+ ],
78
+ "degree": [Interval(Integral, 0, None, closed="left")],
79
+ "gamma": [
80
+ StrOptions({"scale", "auto"}),
81
+ Interval(Real, 0.0, None, closed="left"),
82
+ ],
83
+ "coef0": [Interval(Real, None, None, closed="neither")],
84
+ "tol": [Interval(Real, 0.0, None, closed="neither")],
85
+ "C": [Interval(Real, 0.0, None, closed="neither")],
86
+ "nu": [Interval(Real, 0.0, 1.0, closed="right")],
87
+ "epsilon": [Interval(Real, 0.0, None, closed="left")],
88
+ "shrinking": ["boolean"],
89
+ "probability": ["boolean"],
90
+ "cache_size": [Interval(Real, 0, None, closed="neither")],
91
+ "class_weight": [StrOptions({"balanced"}), dict, None],
92
+ "verbose": ["verbose"],
93
+ "max_iter": [Interval(Integral, -1, None, closed="left")],
94
+ "random_state": ["random_state"],
95
+ }
96
+
97
+ # The order of these must match the integer values in LibSVM.
98
+ # XXX These are actually the same in the dense case. Need to factor
99
+ # this out.
100
+ _sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
101
+
102
+ @abstractmethod
103
+ def __init__(
104
+ self,
105
+ kernel,
106
+ degree,
107
+ gamma,
108
+ coef0,
109
+ tol,
110
+ C,
111
+ nu,
112
+ epsilon,
113
+ shrinking,
114
+ probability,
115
+ cache_size,
116
+ class_weight,
117
+ verbose,
118
+ max_iter,
119
+ random_state,
120
+ ):
121
+ if self._impl not in LIBSVM_IMPL:
122
+ raise ValueError(
123
+ "impl should be one of %s, %s was given" % (LIBSVM_IMPL, self._impl)
124
+ )
125
+
126
+ self.kernel = kernel
127
+ self.degree = degree
128
+ self.gamma = gamma
129
+ self.coef0 = coef0
130
+ self.tol = tol
131
+ self.C = C
132
+ self.nu = nu
133
+ self.epsilon = epsilon
134
+ self.shrinking = shrinking
135
+ self.probability = probability
136
+ self.cache_size = cache_size
137
+ self.class_weight = class_weight
138
+ self.verbose = verbose
139
+ self.max_iter = max_iter
140
+ self.random_state = random_state
141
+
142
+ def _more_tags(self):
143
+ # Used by cross_val_score.
144
+ return {"pairwise": self.kernel == "precomputed"}
145
+
146
+ @_fit_context(prefer_skip_nested_validation=True)
147
+ def fit(self, X, y, sample_weight=None):
148
+ """Fit the SVM model according to the given training data.
149
+
150
+ Parameters
151
+ ----------
152
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) \
153
+ or (n_samples, n_samples)
154
+ Training vectors, where `n_samples` is the number of samples
155
+ and `n_features` is the number of features.
156
+ For kernel="precomputed", the expected shape of X is
157
+ (n_samples, n_samples).
158
+
159
+ y : array-like of shape (n_samples,)
160
+ Target values (class labels in classification, real numbers in
161
+ regression).
162
+
163
+ sample_weight : array-like of shape (n_samples,), default=None
164
+ Per-sample weights. Rescale C per sample. Higher weights
165
+ force the classifier to put more emphasis on these points.
166
+
167
+ Returns
168
+ -------
169
+ self : object
170
+ Fitted estimator.
171
+
172
+ Notes
173
+ -----
174
+ If X and y are not C-ordered and contiguous arrays of np.float64 and
175
+ X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
176
+
177
+ If X is a dense array, then the other methods will not support sparse
178
+ matrices as input.
179
+ """
180
+ rnd = check_random_state(self.random_state)
181
+
182
+ sparse = sp.issparse(X)
183
+ if sparse and self.kernel == "precomputed":
184
+ raise TypeError("Sparse precomputed kernels are not supported.")
185
+ self._sparse = sparse and not callable(self.kernel)
186
+
187
+ if callable(self.kernel):
188
+ check_consistent_length(X, y)
189
+ else:
190
+ X, y = self._validate_data(
191
+ X,
192
+ y,
193
+ dtype=np.float64,
194
+ order="C",
195
+ accept_sparse="csr",
196
+ accept_large_sparse=False,
197
+ )
198
+
199
+ y = self._validate_targets(y)
200
+
201
+ sample_weight = np.asarray(
202
+ [] if sample_weight is None else sample_weight, dtype=np.float64
203
+ )
204
+ solver_type = LIBSVM_IMPL.index(self._impl)
205
+
206
+ # input validation
207
+ n_samples = _num_samples(X)
208
+ if solver_type != 2 and n_samples != y.shape[0]:
209
+ raise ValueError(
210
+ "X and y have incompatible shapes.\n"
211
+ + "X has %s samples, but y has %s." % (n_samples, y.shape[0])
212
+ )
213
+
214
+ if self.kernel == "precomputed" and n_samples != X.shape[1]:
215
+ raise ValueError(
216
+ "Precomputed matrix must be a square matrix."
217
+ " Input is a {}x{} matrix.".format(X.shape[0], X.shape[1])
218
+ )
219
+
220
+ if sample_weight.shape[0] > 0 and sample_weight.shape[0] != n_samples:
221
+ raise ValueError(
222
+ "sample_weight and X have incompatible shapes: "
223
+ "%r vs %r\n"
224
+ "Note: Sparse matrices cannot be indexed w/"
225
+ "boolean masks (use `indices=True` in CV)."
226
+ % (sample_weight.shape, X.shape)
227
+ )
228
+
229
+ kernel = "precomputed" if callable(self.kernel) else self.kernel
230
+
231
+ if kernel == "precomputed":
232
+ # unused but needs to be a float for cython code that ignores
233
+ # it anyway
234
+ self._gamma = 0.0
235
+ elif isinstance(self.gamma, str):
236
+ if self.gamma == "scale":
237
+ # var = E[X^2] - E[X]^2 if sparse
238
+ X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var()
239
+ self._gamma = 1.0 / (X.shape[1] * X_var) if X_var != 0 else 1.0
240
+ elif self.gamma == "auto":
241
+ self._gamma = 1.0 / X.shape[1]
242
+ elif isinstance(self.gamma, Real):
243
+ self._gamma = self.gamma
244
+
245
+ fit = self._sparse_fit if self._sparse else self._dense_fit
246
+ if self.verbose:
247
+ print("[LibSVM]", end="")
248
+
249
+ seed = rnd.randint(np.iinfo("i").max)
250
+ fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
251
+ # see comment on the other call to np.iinfo in this file
252
+
253
+ self.shape_fit_ = X.shape if hasattr(X, "shape") else (n_samples,)
254
+
255
+ # In binary case, we need to flip the sign of coef, intercept and
256
+ # decision function. Use self._intercept_ and self._dual_coef_
257
+ # internally.
258
+ self._intercept_ = self.intercept_.copy()
259
+ self._dual_coef_ = self.dual_coef_
260
+ if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2:
261
+ self.intercept_ *= -1
262
+ self.dual_coef_ = -self.dual_coef_
263
+
264
+ dual_coef = self._dual_coef_.data if self._sparse else self._dual_coef_
265
+ intercept_finiteness = np.isfinite(self._intercept_).all()
266
+ dual_coef_finiteness = np.isfinite(dual_coef).all()
267
+ if not (intercept_finiteness and dual_coef_finiteness):
268
+ raise ValueError(
269
+ "The dual coefficients or intercepts are not finite."
270
+ " The input data may contain large values and need to be"
271
+ " preprocessed."
272
+ )
273
+
274
+ # Since, in the case of SVC and NuSVC, the number of models optimized by
275
+ # libSVM could be greater than one (depending on the input), `n_iter_`
276
+ # stores an ndarray.
277
+ # For the other sub-classes (SVR, NuSVR, and OneClassSVM), the number of
278
+ # models optimized by libSVM is always one, so `n_iter_` stores an
279
+ # integer.
280
+ if self._impl in ["c_svc", "nu_svc"]:
281
+ self.n_iter_ = self._num_iter
282
+ else:
283
+ self.n_iter_ = self._num_iter.item()
284
+
285
+ return self
286
+
287
+ def _validate_targets(self, y):
288
+ """Validation of y and class_weight.
289
+
290
+ Default implementation for SVR and one-class; overridden in BaseSVC.
291
+ """
292
+ return column_or_1d(y, warn=True).astype(np.float64, copy=False)
293
+
294
+ def _warn_from_fit_status(self):
295
+ assert self.fit_status_ in (0, 1)
296
+ if self.fit_status_ == 1:
297
+ warnings.warn(
298
+ "Solver terminated early (max_iter=%i)."
299
+ " Consider pre-processing your data with"
300
+ " StandardScaler or MinMaxScaler."
301
+ % self.max_iter,
302
+ ConvergenceWarning,
303
+ )
304
+
305
+ def _dense_fit(self, X, y, sample_weight, solver_type, kernel, random_seed):
306
+ if callable(self.kernel):
307
+ # you must store a reference to X to compute the kernel in predict
308
+ # TODO: add keyword copy to copy on demand
309
+ self.__Xfit = X
310
+ X = self._compute_kernel(X)
311
+
312
+ if X.shape[0] != X.shape[1]:
313
+ raise ValueError("X.shape[0] should be equal to X.shape[1]")
314
+
315
+ libsvm.set_verbosity_wrap(self.verbose)
316
+
317
+ # we don't pass **self.get_params() to allow subclasses to
318
+ # add other parameters to __init__
319
+ (
320
+ self.support_,
321
+ self.support_vectors_,
322
+ self._n_support,
323
+ self.dual_coef_,
324
+ self.intercept_,
325
+ self._probA,
326
+ self._probB,
327
+ self.fit_status_,
328
+ self._num_iter,
329
+ ) = libsvm.fit(
330
+ X,
331
+ y,
332
+ svm_type=solver_type,
333
+ sample_weight=sample_weight,
334
+ class_weight=getattr(self, "class_weight_", np.empty(0)),
335
+ kernel=kernel,
336
+ C=self.C,
337
+ nu=self.nu,
338
+ probability=self.probability,
339
+ degree=self.degree,
340
+ shrinking=self.shrinking,
341
+ tol=self.tol,
342
+ cache_size=self.cache_size,
343
+ coef0=self.coef0,
344
+ gamma=self._gamma,
345
+ epsilon=self.epsilon,
346
+ max_iter=self.max_iter,
347
+ random_seed=random_seed,
348
+ )
349
+
350
+ self._warn_from_fit_status()
351
+
352
+ def _sparse_fit(self, X, y, sample_weight, solver_type, kernel, random_seed):
353
+ X.data = np.asarray(X.data, dtype=np.float64, order="C")
354
+ X.sort_indices()
355
+
356
+ kernel_type = self._sparse_kernels.index(kernel)
357
+
358
+ libsvm_sparse.set_verbosity_wrap(self.verbose)
359
+
360
+ (
361
+ self.support_,
362
+ self.support_vectors_,
363
+ dual_coef_data,
364
+ self.intercept_,
365
+ self._n_support,
366
+ self._probA,
367
+ self._probB,
368
+ self.fit_status_,
369
+ self._num_iter,
370
+ ) = libsvm_sparse.libsvm_sparse_train(
371
+ X.shape[1],
372
+ X.data,
373
+ X.indices,
374
+ X.indptr,
375
+ y,
376
+ solver_type,
377
+ kernel_type,
378
+ self.degree,
379
+ self._gamma,
380
+ self.coef0,
381
+ self.tol,
382
+ self.C,
383
+ getattr(self, "class_weight_", np.empty(0)),
384
+ sample_weight,
385
+ self.nu,
386
+ self.cache_size,
387
+ self.epsilon,
388
+ int(self.shrinking),
389
+ int(self.probability),
390
+ self.max_iter,
391
+ random_seed,
392
+ )
393
+
394
+ self._warn_from_fit_status()
395
+
396
+ if hasattr(self, "classes_"):
397
+ n_class = len(self.classes_) - 1
398
+ else: # regression
399
+ n_class = 1
400
+ n_SV = self.support_vectors_.shape[0]
401
+
402
+ dual_coef_indices = np.tile(np.arange(n_SV), n_class)
403
+ if not n_SV:
404
+ self.dual_coef_ = sp.csr_matrix([])
405
+ else:
406
+ dual_coef_indptr = np.arange(
407
+ 0, dual_coef_indices.size + 1, dual_coef_indices.size / n_class
408
+ )
409
+ self.dual_coef_ = sp.csr_matrix(
410
+ (dual_coef_data, dual_coef_indices, dual_coef_indptr), (n_class, n_SV)
411
+ )
412
+
413
+ def predict(self, X):
414
+ """Perform regression on samples in X.
415
+
416
+ For an one-class model, +1 (inlier) or -1 (outlier) is returned.
417
+
418
+ Parameters
419
+ ----------
420
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
421
+ For kernel="precomputed", the expected shape of X is
422
+ (n_samples_test, n_samples_train).
423
+
424
+ Returns
425
+ -------
426
+ y_pred : ndarray of shape (n_samples,)
427
+ The predicted values.
428
+ """
429
+ X = self._validate_for_predict(X)
430
+ predict = self._sparse_predict if self._sparse else self._dense_predict
431
+ return predict(X)
432
+
433
+ def _dense_predict(self, X):
434
+ X = self._compute_kernel(X)
435
+ if X.ndim == 1:
436
+ X = check_array(X, order="C", accept_large_sparse=False)
437
+
438
+ kernel = self.kernel
439
+ if callable(self.kernel):
440
+ kernel = "precomputed"
441
+ if X.shape[1] != self.shape_fit_[0]:
442
+ raise ValueError(
443
+ "X.shape[1] = %d should be equal to %d, "
444
+ "the number of samples at training time"
445
+ % (X.shape[1], self.shape_fit_[0])
446
+ )
447
+
448
+ svm_type = LIBSVM_IMPL.index(self._impl)
449
+
450
+ return libsvm.predict(
451
+ X,
452
+ self.support_,
453
+ self.support_vectors_,
454
+ self._n_support,
455
+ self._dual_coef_,
456
+ self._intercept_,
457
+ self._probA,
458
+ self._probB,
459
+ svm_type=svm_type,
460
+ kernel=kernel,
461
+ degree=self.degree,
462
+ coef0=self.coef0,
463
+ gamma=self._gamma,
464
+ cache_size=self.cache_size,
465
+ )
466
+
467
+ def _sparse_predict(self, X):
468
+ # Precondition: X is a csr_matrix of dtype np.float64.
469
+ kernel = self.kernel
470
+ if callable(kernel):
471
+ kernel = "precomputed"
472
+
473
+ kernel_type = self._sparse_kernels.index(kernel)
474
+
475
+ C = 0.0 # C is not useful here
476
+
477
+ return libsvm_sparse.libsvm_sparse_predict(
478
+ X.data,
479
+ X.indices,
480
+ X.indptr,
481
+ self.support_vectors_.data,
482
+ self.support_vectors_.indices,
483
+ self.support_vectors_.indptr,
484
+ self._dual_coef_.data,
485
+ self._intercept_,
486
+ LIBSVM_IMPL.index(self._impl),
487
+ kernel_type,
488
+ self.degree,
489
+ self._gamma,
490
+ self.coef0,
491
+ self.tol,
492
+ C,
493
+ getattr(self, "class_weight_", np.empty(0)),
494
+ self.nu,
495
+ self.epsilon,
496
+ self.shrinking,
497
+ self.probability,
498
+ self._n_support,
499
+ self._probA,
500
+ self._probB,
501
+ )
502
+
503
+ def _compute_kernel(self, X):
504
+ """Return the data transformed by a callable kernel"""
505
+ if callable(self.kernel):
506
+ # in the case of precomputed kernel given as a function, we
507
+ # have to compute explicitly the kernel matrix
508
+ kernel = self.kernel(X, self.__Xfit)
509
+ if sp.issparse(kernel):
510
+ kernel = kernel.toarray()
511
+ X = np.asarray(kernel, dtype=np.float64, order="C")
512
+ return X
513
+
514
+ def _decision_function(self, X):
515
+ """Evaluates the decision function for the samples in X.
516
+
517
+ Parameters
518
+ ----------
519
+ X : array-like of shape (n_samples, n_features)
520
+
521
+ Returns
522
+ -------
523
+ X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
524
+ Returns the decision function of the sample for each class
525
+ in the model.
526
+ """
527
+ # NOTE: _validate_for_predict contains check for is_fitted
528
+ # hence must be placed before any other attributes are used.
529
+ X = self._validate_for_predict(X)
530
+ X = self._compute_kernel(X)
531
+
532
+ if self._sparse:
533
+ dec_func = self._sparse_decision_function(X)
534
+ else:
535
+ dec_func = self._dense_decision_function(X)
536
+
537
+ # In binary case, we need to flip the sign of coef, intercept and
538
+ # decision function.
539
+ if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2:
540
+ return -dec_func.ravel()
541
+
542
+ return dec_func
543
+
544
+ def _dense_decision_function(self, X):
545
+ X = check_array(X, dtype=np.float64, order="C", accept_large_sparse=False)
546
+
547
+ kernel = self.kernel
548
+ if callable(kernel):
549
+ kernel = "precomputed"
550
+
551
+ return libsvm.decision_function(
552
+ X,
553
+ self.support_,
554
+ self.support_vectors_,
555
+ self._n_support,
556
+ self._dual_coef_,
557
+ self._intercept_,
558
+ self._probA,
559
+ self._probB,
560
+ svm_type=LIBSVM_IMPL.index(self._impl),
561
+ kernel=kernel,
562
+ degree=self.degree,
563
+ cache_size=self.cache_size,
564
+ coef0=self.coef0,
565
+ gamma=self._gamma,
566
+ )
567
+
568
+ def _sparse_decision_function(self, X):
569
+ X.data = np.asarray(X.data, dtype=np.float64, order="C")
570
+
571
+ kernel = self.kernel
572
+ if hasattr(kernel, "__call__"):
573
+ kernel = "precomputed"
574
+
575
+ kernel_type = self._sparse_kernels.index(kernel)
576
+
577
+ return libsvm_sparse.libsvm_sparse_decision_function(
578
+ X.data,
579
+ X.indices,
580
+ X.indptr,
581
+ self.support_vectors_.data,
582
+ self.support_vectors_.indices,
583
+ self.support_vectors_.indptr,
584
+ self._dual_coef_.data,
585
+ self._intercept_,
586
+ LIBSVM_IMPL.index(self._impl),
587
+ kernel_type,
588
+ self.degree,
589
+ self._gamma,
590
+ self.coef0,
591
+ self.tol,
592
+ self.C,
593
+ getattr(self, "class_weight_", np.empty(0)),
594
+ self.nu,
595
+ self.epsilon,
596
+ self.shrinking,
597
+ self.probability,
598
+ self._n_support,
599
+ self._probA,
600
+ self._probB,
601
+ )
602
+
603
+ def _validate_for_predict(self, X):
604
+ check_is_fitted(self)
605
+
606
+ if not callable(self.kernel):
607
+ X = self._validate_data(
608
+ X,
609
+ accept_sparse="csr",
610
+ dtype=np.float64,
611
+ order="C",
612
+ accept_large_sparse=False,
613
+ reset=False,
614
+ )
615
+
616
+ if self._sparse and not sp.issparse(X):
617
+ X = sp.csr_matrix(X)
618
+ if self._sparse:
619
+ X.sort_indices()
620
+
621
+ if sp.issparse(X) and not self._sparse and not callable(self.kernel):
622
+ raise ValueError(
623
+ "cannot use sparse input in %r trained on dense data"
624
+ % type(self).__name__
625
+ )
626
+
627
+ if self.kernel == "precomputed":
628
+ if X.shape[1] != self.shape_fit_[0]:
629
+ raise ValueError(
630
+ "X.shape[1] = %d should be equal to %d, "
631
+ "the number of samples at training time"
632
+ % (X.shape[1], self.shape_fit_[0])
633
+ )
634
+ # Fixes https://nvd.nist.gov/vuln/detail/CVE-2020-28975
635
+ # Check that _n_support is consistent with support_vectors
636
+ sv = self.support_vectors_
637
+ if not self._sparse and sv.size > 0 and self.n_support_.sum() != sv.shape[0]:
638
+ raise ValueError(
639
+ f"The internal representation of {self.__class__.__name__} was altered"
640
+ )
641
+ return X
642
+
643
+ @property
644
+ def coef_(self):
645
+ """Weights assigned to the features when `kernel="linear"`.
646
+
647
+ Returns
648
+ -------
649
+ ndarray of shape (n_features, n_classes)
650
+ """
651
+ if self.kernel != "linear":
652
+ raise AttributeError("coef_ is only available when using a linear kernel")
653
+
654
+ coef = self._get_coef()
655
+
656
+ # coef_ being a read-only property, it's better to mark the value as
657
+ # immutable to avoid hiding potential bugs for the unsuspecting user.
658
+ if sp.issparse(coef):
659
+ # sparse matrix do not have global flags
660
+ coef.data.flags.writeable = False
661
+ else:
662
+ # regular dense array
663
+ coef.flags.writeable = False
664
+ return coef
665
+
666
+ def _get_coef(self):
667
+ return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
668
+
669
+ @property
670
+ def n_support_(self):
671
+ """Number of support vectors for each class."""
672
+ try:
673
+ check_is_fitted(self)
674
+ except NotFittedError:
675
+ raise AttributeError
676
+
677
+ svm_type = LIBSVM_IMPL.index(self._impl)
678
+ if svm_type in (0, 1):
679
+ return self._n_support
680
+ else:
681
+ # SVR and OneClass
682
+ # _n_support has size 2, we make it size 1
683
+ return np.array([self._n_support[0]])
684
+
685
+
686
+ class BaseSVC(ClassifierMixin, BaseLibSVM, metaclass=ABCMeta):
687
+ """ABC for LibSVM-based classifiers."""
688
+
689
+ _parameter_constraints: dict = {
690
+ **BaseLibSVM._parameter_constraints,
691
+ "decision_function_shape": [StrOptions({"ovr", "ovo"})],
692
+ "break_ties": ["boolean"],
693
+ }
694
+ for unused_param in ["epsilon", "nu"]:
695
+ _parameter_constraints.pop(unused_param)
696
+
697
+ @abstractmethod
698
+ def __init__(
699
+ self,
700
+ kernel,
701
+ degree,
702
+ gamma,
703
+ coef0,
704
+ tol,
705
+ C,
706
+ nu,
707
+ shrinking,
708
+ probability,
709
+ cache_size,
710
+ class_weight,
711
+ verbose,
712
+ max_iter,
713
+ decision_function_shape,
714
+ random_state,
715
+ break_ties,
716
+ ):
717
+ self.decision_function_shape = decision_function_shape
718
+ self.break_ties = break_ties
719
+ super().__init__(
720
+ kernel=kernel,
721
+ degree=degree,
722
+ gamma=gamma,
723
+ coef0=coef0,
724
+ tol=tol,
725
+ C=C,
726
+ nu=nu,
727
+ epsilon=0.0,
728
+ shrinking=shrinking,
729
+ probability=probability,
730
+ cache_size=cache_size,
731
+ class_weight=class_weight,
732
+ verbose=verbose,
733
+ max_iter=max_iter,
734
+ random_state=random_state,
735
+ )
736
+
737
+ def _validate_targets(self, y):
738
+ y_ = column_or_1d(y, warn=True)
739
+ check_classification_targets(y)
740
+ cls, y = np.unique(y_, return_inverse=True)
741
+ self.class_weight_ = compute_class_weight(self.class_weight, classes=cls, y=y_)
742
+ if len(cls) < 2:
743
+ raise ValueError(
744
+ "The number of classes has to be greater than one; got %d class"
745
+ % len(cls)
746
+ )
747
+
748
+ self.classes_ = cls
749
+
750
+ return np.asarray(y, dtype=np.float64, order="C")
751
+
752
+ def decision_function(self, X):
753
+ """Evaluate the decision function for the samples in X.
754
+
755
+ Parameters
756
+ ----------
757
+ X : array-like of shape (n_samples, n_features)
758
+ The input samples.
759
+
760
+ Returns
761
+ -------
762
+ X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
763
+ Returns the decision function of the sample for each class
764
+ in the model.
765
+ If decision_function_shape='ovr', the shape is (n_samples,
766
+ n_classes).
767
+
768
+ Notes
769
+ -----
770
+ If decision_function_shape='ovo', the function values are proportional
771
+ to the distance of the samples X to the separating hyperplane. If the
772
+ exact distances are required, divide the function values by the norm of
773
+ the weight vector (``coef_``). See also `this question
774
+ <https://stats.stackexchange.com/questions/14876/
775
+ interpreting-distance-from-hyperplane-in-svm>`_ for further details.
776
+ If decision_function_shape='ovr', the decision function is a monotonic
777
+ transformation of ovo decision function.
778
+ """
779
+ dec = self._decision_function(X)
780
+ if self.decision_function_shape == "ovr" and len(self.classes_) > 2:
781
+ return _ovr_decision_function(dec < 0, -dec, len(self.classes_))
782
+ return dec
783
+
784
+ def predict(self, X):
785
+ """Perform classification on samples in X.
786
+
787
+ For an one-class model, +1 or -1 is returned.
788
+
789
+ Parameters
790
+ ----------
791
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
792
+ (n_samples_test, n_samples_train)
793
+ For kernel="precomputed", the expected shape of X is
794
+ (n_samples_test, n_samples_train).
795
+
796
+ Returns
797
+ -------
798
+ y_pred : ndarray of shape (n_samples,)
799
+ Class labels for samples in X.
800
+ """
801
+ check_is_fitted(self)
802
+ if self.break_ties and self.decision_function_shape == "ovo":
803
+ raise ValueError(
804
+ "break_ties must be False when decision_function_shape is 'ovo'"
805
+ )
806
+
807
+ if (
808
+ self.break_ties
809
+ and self.decision_function_shape == "ovr"
810
+ and len(self.classes_) > 2
811
+ ):
812
+ y = np.argmax(self.decision_function(X), axis=1)
813
+ else:
814
+ y = super().predict(X)
815
+ return self.classes_.take(np.asarray(y, dtype=np.intp))
816
+
817
+ # Hacky way of getting predict_proba to raise an AttributeError when
818
+ # probability=False using properties. Do not use this in new code; when
819
+ # probabilities are not available depending on a setting, introduce two
820
+ # estimators.
821
+ def _check_proba(self):
822
+ if not self.probability:
823
+ raise AttributeError(
824
+ "predict_proba is not available when probability=False"
825
+ )
826
+ if self._impl not in ("c_svc", "nu_svc"):
827
+ raise AttributeError("predict_proba only implemented for SVC and NuSVC")
828
+ return True
829
+
830
+ @available_if(_check_proba)
831
+ def predict_proba(self, X):
832
+ """Compute probabilities of possible outcomes for samples in X.
833
+
834
+ The model needs to have probability information computed at training
835
+ time: fit with attribute `probability` set to True.
836
+
837
+ Parameters
838
+ ----------
839
+ X : array-like of shape (n_samples, n_features)
840
+ For kernel="precomputed", the expected shape of X is
841
+ (n_samples_test, n_samples_train).
842
+
843
+ Returns
844
+ -------
845
+ T : ndarray of shape (n_samples, n_classes)
846
+ Returns the probability of the sample for each class in
847
+ the model. The columns correspond to the classes in sorted
848
+ order, as they appear in the attribute :term:`classes_`.
849
+
850
+ Notes
851
+ -----
852
+ The probability model is created using cross validation, so
853
+ the results can be slightly different than those obtained by
854
+ predict. Also, it will produce meaningless results on very small
855
+ datasets.
856
+ """
857
+ X = self._validate_for_predict(X)
858
+ if self.probA_.size == 0 or self.probB_.size == 0:
859
+ raise NotFittedError(
860
+ "predict_proba is not available when fitted with probability=False"
861
+ )
862
+ pred_proba = (
863
+ self._sparse_predict_proba if self._sparse else self._dense_predict_proba
864
+ )
865
+ return pred_proba(X)
866
+
867
+ @available_if(_check_proba)
868
+ def predict_log_proba(self, X):
869
+ """Compute log probabilities of possible outcomes for samples in X.
870
+
871
+ The model need to have probability information computed at training
872
+ time: fit with attribute `probability` set to True.
873
+
874
+ Parameters
875
+ ----------
876
+ X : array-like of shape (n_samples, n_features) or \
877
+ (n_samples_test, n_samples_train)
878
+ For kernel="precomputed", the expected shape of X is
879
+ (n_samples_test, n_samples_train).
880
+
881
+ Returns
882
+ -------
883
+ T : ndarray of shape (n_samples, n_classes)
884
+ Returns the log-probabilities of the sample for each class in
885
+ the model. The columns correspond to the classes in sorted
886
+ order, as they appear in the attribute :term:`classes_`.
887
+
888
+ Notes
889
+ -----
890
+ The probability model is created using cross validation, so
891
+ the results can be slightly different than those obtained by
892
+ predict. Also, it will produce meaningless results on very small
893
+ datasets.
894
+ """
895
+ return np.log(self.predict_proba(X))
896
+
897
+ def _dense_predict_proba(self, X):
898
+ X = self._compute_kernel(X)
899
+
900
+ kernel = self.kernel
901
+ if callable(kernel):
902
+ kernel = "precomputed"
903
+
904
+ svm_type = LIBSVM_IMPL.index(self._impl)
905
+ pprob = libsvm.predict_proba(
906
+ X,
907
+ self.support_,
908
+ self.support_vectors_,
909
+ self._n_support,
910
+ self._dual_coef_,
911
+ self._intercept_,
912
+ self._probA,
913
+ self._probB,
914
+ svm_type=svm_type,
915
+ kernel=kernel,
916
+ degree=self.degree,
917
+ cache_size=self.cache_size,
918
+ coef0=self.coef0,
919
+ gamma=self._gamma,
920
+ )
921
+
922
+ return pprob
923
+
924
+ def _sparse_predict_proba(self, X):
925
+ X.data = np.asarray(X.data, dtype=np.float64, order="C")
926
+
927
+ kernel = self.kernel
928
+ if callable(kernel):
929
+ kernel = "precomputed"
930
+
931
+ kernel_type = self._sparse_kernels.index(kernel)
932
+
933
+ return libsvm_sparse.libsvm_sparse_predict_proba(
934
+ X.data,
935
+ X.indices,
936
+ X.indptr,
937
+ self.support_vectors_.data,
938
+ self.support_vectors_.indices,
939
+ self.support_vectors_.indptr,
940
+ self._dual_coef_.data,
941
+ self._intercept_,
942
+ LIBSVM_IMPL.index(self._impl),
943
+ kernel_type,
944
+ self.degree,
945
+ self._gamma,
946
+ self.coef0,
947
+ self.tol,
948
+ self.C,
949
+ getattr(self, "class_weight_", np.empty(0)),
950
+ self.nu,
951
+ self.epsilon,
952
+ self.shrinking,
953
+ self.probability,
954
+ self._n_support,
955
+ self._probA,
956
+ self._probB,
957
+ )
958
+
959
+ def _get_coef(self):
960
+ if self.dual_coef_.shape[0] == 1:
961
+ # binary classifier
962
+ coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
963
+ else:
964
+ # 1vs1 classifier
965
+ coef = _one_vs_one_coef(
966
+ self.dual_coef_, self._n_support, self.support_vectors_
967
+ )
968
+ if sp.issparse(coef[0]):
969
+ coef = sp.vstack(coef).tocsr()
970
+ else:
971
+ coef = np.vstack(coef)
972
+
973
+ return coef
974
+
975
+ @property
976
+ def probA_(self):
977
+ """Parameter learned in Platt scaling when `probability=True`.
978
+
979
+ Returns
980
+ -------
981
+ ndarray of shape (n_classes * (n_classes - 1) / 2)
982
+ """
983
+ return self._probA
984
+
985
+ @property
986
+ def probB_(self):
987
+ """Parameter learned in Platt scaling when `probability=True`.
988
+
989
+ Returns
990
+ -------
991
+ ndarray of shape (n_classes * (n_classes - 1) / 2)
992
+ """
993
+ return self._probB
994
+
995
+
996
+ def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
997
+ """Find the liblinear magic number for the solver.
998
+
999
+ This number depends on the values of the following attributes:
1000
+ - multi_class
1001
+ - penalty
1002
+ - loss
1003
+ - dual
1004
+
1005
+ The same number is also internally used by LibLinear to determine
1006
+ which solver to use.
1007
+ """
1008
+ # nested dicts containing level 1: available loss functions,
1009
+ # level2: available penalties for the given loss function,
1010
+ # level3: whether the dual solver is available for the specified
1011
+ # combination of loss function and penalty
1012
+ _solver_type_dict = {
1013
+ "logistic_regression": {"l1": {False: 6}, "l2": {False: 0, True: 7}},
1014
+ "hinge": {"l2": {True: 3}},
1015
+ "squared_hinge": {"l1": {False: 5}, "l2": {False: 2, True: 1}},
1016
+ "epsilon_insensitive": {"l2": {True: 13}},
1017
+ "squared_epsilon_insensitive": {"l2": {False: 11, True: 12}},
1018
+ "crammer_singer": 4,
1019
+ }
1020
+
1021
+ if multi_class == "crammer_singer":
1022
+ return _solver_type_dict[multi_class]
1023
+ elif multi_class != "ovr":
1024
+ raise ValueError(
1025
+ "`multi_class` must be one of `ovr`, `crammer_singer`, got %r" % multi_class
1026
+ )
1027
+
1028
+ _solver_pen = _solver_type_dict.get(loss, None)
1029
+ if _solver_pen is None:
1030
+ error_string = "loss='%s' is not supported" % loss
1031
+ else:
1032
+ _solver_dual = _solver_pen.get(penalty, None)
1033
+ if _solver_dual is None:
1034
+ error_string = (
1035
+ "The combination of penalty='%s' and loss='%s' is not supported"
1036
+ % (penalty, loss)
1037
+ )
1038
+ else:
1039
+ solver_num = _solver_dual.get(dual, None)
1040
+ if solver_num is None:
1041
+ error_string = (
1042
+ "The combination of penalty='%s' and "
1043
+ "loss='%s' are not supported when dual=%s" % (penalty, loss, dual)
1044
+ )
1045
+ else:
1046
+ return solver_num
1047
+ raise ValueError(
1048
+ "Unsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r"
1049
+ % (error_string, penalty, loss, dual)
1050
+ )
1051
+
1052
+
1053
+ def _fit_liblinear(
1054
+ X,
1055
+ y,
1056
+ C,
1057
+ fit_intercept,
1058
+ intercept_scaling,
1059
+ class_weight,
1060
+ penalty,
1061
+ dual,
1062
+ verbose,
1063
+ max_iter,
1064
+ tol,
1065
+ random_state=None,
1066
+ multi_class="ovr",
1067
+ loss="logistic_regression",
1068
+ epsilon=0.1,
1069
+ sample_weight=None,
1070
+ ):
1071
+ """Used by Logistic Regression (and CV) and LinearSVC/LinearSVR.
1072
+
1073
+ Preprocessing is done in this function before supplying it to liblinear.
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1078
+ Training vector, where `n_samples` is the number of samples and
1079
+ `n_features` is the number of features.
1080
+
1081
+ y : array-like of shape (n_samples,)
1082
+ Target vector relative to X
1083
+
1084
+ C : float
1085
+ Inverse of cross-validation parameter. The lower the C, the higher
1086
+ the penalization.
1087
+
1088
+ fit_intercept : bool
1089
+ Whether or not to fit an intercept. If set to True, the feature vector
1090
+ is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where
1091
+ 1 corresponds to the intercept. If set to False, no intercept will be
1092
+ used in calculations (i.e. data is expected to be already centered).
1093
+
1094
+ intercept_scaling : float
1095
+ Liblinear internally penalizes the intercept, treating it like any
1096
+ other term in the feature vector. To reduce the impact of the
1097
+ regularization on the intercept, the `intercept_scaling` parameter can
1098
+ be set to a value greater than 1; the higher the value of
1099
+ `intercept_scaling`, the lower the impact of regularization on it.
1100
+ Then, the weights become `[w_x_1, ..., w_x_n,
1101
+ w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
1102
+ the feature weights and the intercept weight is scaled by
1103
+ `intercept_scaling`. This scaling allows the intercept term to have a
1104
+ different regularization behavior compared to the other features.
1105
+
1106
+ class_weight : dict or 'balanced', default=None
1107
+ Weights associated with classes in the form ``{class_label: weight}``.
1108
+ If not given, all classes are supposed to have weight one. For
1109
+ multi-output problems, a list of dicts can be provided in the same
1110
+ order as the columns of y.
1111
+
1112
+ The "balanced" mode uses the values of y to automatically adjust
1113
+ weights inversely proportional to class frequencies in the input data
1114
+ as ``n_samples / (n_classes * np.bincount(y))``
1115
+
1116
+ penalty : {'l1', 'l2'}
1117
+ The norm of the penalty used in regularization.
1118
+
1119
+ dual : bool
1120
+ Dual or primal formulation,
1121
+
1122
+ verbose : int
1123
+ Set verbose to any positive number for verbosity.
1124
+
1125
+ max_iter : int
1126
+ Number of iterations.
1127
+
1128
+ tol : float
1129
+ Stopping condition.
1130
+
1131
+ random_state : int, RandomState instance or None, default=None
1132
+ Controls the pseudo random number generation for shuffling the data.
1133
+ Pass an int for reproducible output across multiple function calls.
1134
+ See :term:`Glossary <random_state>`.
1135
+
1136
+ multi_class : {'ovr', 'crammer_singer'}, default='ovr'
1137
+ `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
1138
+ optimizes a joint objective over all classes.
1139
+ While `crammer_singer` is interesting from an theoretical perspective
1140
+ as it is consistent it is seldom used in practice and rarely leads to
1141
+ better accuracy and is more expensive to compute.
1142
+ If `crammer_singer` is chosen, the options loss, penalty and dual will
1143
+ be ignored.
1144
+
1145
+ loss : {'logistic_regression', 'hinge', 'squared_hinge', \
1146
+ 'epsilon_insensitive', 'squared_epsilon_insensitive}, \
1147
+ default='logistic_regression'
1148
+ The loss function used to fit the model.
1149
+
1150
+ epsilon : float, default=0.1
1151
+ Epsilon parameter in the epsilon-insensitive loss function. Note
1152
+ that the value of this parameter depends on the scale of the target
1153
+ variable y. If unsure, set epsilon=0.
1154
+
1155
+ sample_weight : array-like of shape (n_samples,), default=None
1156
+ Weights assigned to each sample.
1157
+
1158
+ Returns
1159
+ -------
1160
+ coef_ : ndarray of shape (n_features, n_features + 1)
1161
+ The coefficient vector got by minimizing the objective function.
1162
+
1163
+ intercept_ : float
1164
+ The intercept term added to the vector.
1165
+
1166
+ n_iter_ : array of int
1167
+ Number of iterations run across for each class.
1168
+ """
1169
+ if loss not in ["epsilon_insensitive", "squared_epsilon_insensitive"]:
1170
+ enc = LabelEncoder()
1171
+ y_ind = enc.fit_transform(y)
1172
+ classes_ = enc.classes_
1173
+ if len(classes_) < 2:
1174
+ raise ValueError(
1175
+ "This solver needs samples of at least 2 classes"
1176
+ " in the data, but the data contains only one"
1177
+ " class: %r"
1178
+ % classes_[0]
1179
+ )
1180
+
1181
+ class_weight_ = compute_class_weight(class_weight, classes=classes_, y=y)
1182
+ else:
1183
+ class_weight_ = np.empty(0, dtype=np.float64)
1184
+ y_ind = y
1185
+ liblinear.set_verbosity_wrap(verbose)
1186
+ rnd = check_random_state(random_state)
1187
+ if verbose:
1188
+ print("[LibLinear]", end="")
1189
+
1190
+ # LinearSVC breaks when intercept_scaling is <= 0
1191
+ bias = -1.0
1192
+ if fit_intercept:
1193
+ if intercept_scaling <= 0:
1194
+ raise ValueError(
1195
+ "Intercept scaling is %r but needs to be greater "
1196
+ "than 0. To disable fitting an intercept,"
1197
+ " set fit_intercept=False." % intercept_scaling
1198
+ )
1199
+ else:
1200
+ bias = intercept_scaling
1201
+
1202
+ libsvm.set_verbosity_wrap(verbose)
1203
+ libsvm_sparse.set_verbosity_wrap(verbose)
1204
+ liblinear.set_verbosity_wrap(verbose)
1205
+
1206
+ # Liblinear doesn't support 64bit sparse matrix indices yet
1207
+ if sp.issparse(X):
1208
+ _check_large_sparse(X)
1209
+
1210
+ # LibLinear wants targets as doubles, even for classification
1211
+ y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
1212
+ y_ind = np.require(y_ind, requirements="W")
1213
+
1214
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
1215
+
1216
+ solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
1217
+ raw_coef_, n_iter_ = liblinear.train_wrap(
1218
+ X,
1219
+ y_ind,
1220
+ sp.issparse(X),
1221
+ solver_type,
1222
+ tol,
1223
+ bias,
1224
+ C,
1225
+ class_weight_,
1226
+ max_iter,
1227
+ rnd.randint(np.iinfo("i").max),
1228
+ epsilon,
1229
+ sample_weight,
1230
+ )
1231
+ # Regarding rnd.randint(..) in the above signature:
1232
+ # seed for srand in range [0..INT_MAX); due to limitations in Numpy
1233
+ # on 32-bit platforms, we can't get to the UINT_MAX limit that
1234
+ # srand supports
1235
+ n_iter_max = max(n_iter_)
1236
+ if n_iter_max >= max_iter:
1237
+ warnings.warn(
1238
+ "Liblinear failed to converge, increase the number of iterations.",
1239
+ ConvergenceWarning,
1240
+ )
1241
+
1242
+ if fit_intercept:
1243
+ coef_ = raw_coef_[:, :-1]
1244
+ intercept_ = intercept_scaling * raw_coef_[:, -1]
1245
+ else:
1246
+ coef_ = raw_coef_
1247
+ intercept_ = 0.0
1248
+
1249
+ return coef_, intercept_, n_iter_
venv/lib/python3.10/site-packages/sklearn/svm/_bounds.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Determination of parameter bounds"""
2
+ # Author: Paolo Losi
3
+ # License: BSD 3 clause
4
+
5
+ from numbers import Real
6
+
7
+ import numpy as np
8
+
9
+ from ..preprocessing import LabelBinarizer
10
+ from ..utils._param_validation import Interval, StrOptions, validate_params
11
+ from ..utils.extmath import safe_sparse_dot
12
+ from ..utils.validation import check_array, check_consistent_length
13
+
14
+
15
+ @validate_params(
16
+ {
17
+ "X": ["array-like", "sparse matrix"],
18
+ "y": ["array-like"],
19
+ "loss": [StrOptions({"squared_hinge", "log"})],
20
+ "fit_intercept": ["boolean"],
21
+ "intercept_scaling": [Interval(Real, 0, None, closed="neither")],
22
+ },
23
+ prefer_skip_nested_validation=True,
24
+ )
25
+ def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
26
+ """Return the lowest bound for C.
27
+
28
+ The lower bound for C is computed such that for C in (l1_min_C, infinity)
29
+ the model is guaranteed not to be empty. This applies to l1 penalized
30
+ classifiers, such as LinearSVC with penalty='l1' and
31
+ linear_model.LogisticRegression with penalty='l1'.
32
+
33
+ This value is valid if class_weight parameter in fit() is not set.
34
+
35
+ Parameters
36
+ ----------
37
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
38
+ Training vector, where `n_samples` is the number of samples and
39
+ `n_features` is the number of features.
40
+
41
+ y : array-like of shape (n_samples,)
42
+ Target vector relative to X.
43
+
44
+ loss : {'squared_hinge', 'log'}, default='squared_hinge'
45
+ Specifies the loss function.
46
+ With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
47
+ With 'log' it is the loss of logistic regression models.
48
+
49
+ fit_intercept : bool, default=True
50
+ Specifies if the intercept should be fitted by the model.
51
+ It must match the fit() method parameter.
52
+
53
+ intercept_scaling : float, default=1.0
54
+ When fit_intercept is True, instance vector x becomes
55
+ [x, intercept_scaling],
56
+ i.e. a "synthetic" feature with constant value equals to
57
+ intercept_scaling is appended to the instance vector.
58
+ It must match the fit() method parameter.
59
+
60
+ Returns
61
+ -------
62
+ l1_min_c : float
63
+ Minimum value for C.
64
+
65
+ Examples
66
+ --------
67
+ >>> from sklearn.svm import l1_min_c
68
+ >>> from sklearn.datasets import make_classification
69
+ >>> X, y = make_classification(n_samples=100, n_features=20, random_state=42)
70
+ >>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}")
71
+ 0.0044
72
+ """
73
+
74
+ X = check_array(X, accept_sparse="csc")
75
+ check_consistent_length(X, y)
76
+
77
+ Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
78
+ # maximum absolute value over classes and features
79
+ den = np.max(np.abs(safe_sparse_dot(Y, X)))
80
+ if fit_intercept:
81
+ bias = np.full(
82
+ (np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype
83
+ )
84
+ den = max(den, abs(np.dot(Y, bias)).max())
85
+
86
+ if den == 0.0:
87
+ raise ValueError(
88
+ "Ill-posed l1_min_c calculation: l1 will always "
89
+ "select zero coefficients for this data"
90
+ )
91
+ if loss == "squared_hinge":
92
+ return 0.5 / den
93
+ else: # loss == 'log':
94
+ return 2.0 / den
venv/lib/python3.10/site-packages/sklearn/svm/_classes.py ADDED
@@ -0,0 +1,1832 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from numbers import Integral, Real
3
+
4
+ import numpy as np
5
+
6
+ from ..base import BaseEstimator, OutlierMixin, RegressorMixin, _fit_context
7
+ from ..linear_model._base import LinearClassifierMixin, LinearModel, SparseCoefMixin
8
+ from ..utils._param_validation import Hidden, Interval, StrOptions
9
+ from ..utils.multiclass import check_classification_targets
10
+ from ..utils.validation import _num_samples
11
+ from ._base import BaseLibSVM, BaseSVC, _fit_liblinear, _get_liblinear_solver_type
12
+
13
+
14
+ def _validate_dual_parameter(dual, loss, penalty, multi_class, X):
15
+ """Helper function to assign the value of dual parameter."""
16
+ if dual == "auto":
17
+ if X.shape[0] < X.shape[1]:
18
+ try:
19
+ _get_liblinear_solver_type(multi_class, penalty, loss, True)
20
+ return True
21
+ except ValueError: # dual not supported for the combination
22
+ return False
23
+ else:
24
+ try:
25
+ _get_liblinear_solver_type(multi_class, penalty, loss, False)
26
+ return False
27
+ except ValueError: # primal not supported by the combination
28
+ return True
29
+ # TODO 1.5
30
+ elif dual == "warn":
31
+ warnings.warn(
32
+ (
33
+ "The default value of `dual` will change from `True` to `'auto'` in"
34
+ " 1.5. Set the value of `dual` explicitly to suppress the warning."
35
+ ),
36
+ FutureWarning,
37
+ )
38
+ return True
39
+ else:
40
+ return dual
41
+
42
+
43
+ class LinearSVC(LinearClassifierMixin, SparseCoefMixin, BaseEstimator):
44
+ """Linear Support Vector Classification.
45
+
46
+ Similar to SVC with parameter kernel='linear', but implemented in terms of
47
+ liblinear rather than libsvm, so it has more flexibility in the choice of
48
+ penalties and loss functions and should scale better to large numbers of
49
+ samples.
50
+
51
+ The main differences between :class:`~sklearn.svm.LinearSVC` and
52
+ :class:`~sklearn.svm.SVC` lie in the loss function used by default, and in
53
+ the handling of intercept regularization between those two implementations.
54
+
55
+ This class supports both dense and sparse input and the multiclass support
56
+ is handled according to a one-vs-the-rest scheme.
57
+
58
+ Read more in the :ref:`User Guide <svm_classification>`.
59
+
60
+ Parameters
61
+ ----------
62
+ penalty : {'l1', 'l2'}, default='l2'
63
+ Specifies the norm used in the penalization. The 'l2'
64
+ penalty is the standard used in SVC. The 'l1' leads to ``coef_``
65
+ vectors that are sparse.
66
+
67
+ loss : {'hinge', 'squared_hinge'}, default='squared_hinge'
68
+ Specifies the loss function. 'hinge' is the standard SVM loss
69
+ (used e.g. by the SVC class) while 'squared_hinge' is the
70
+ square of the hinge loss. The combination of ``penalty='l1'``
71
+ and ``loss='hinge'`` is not supported.
72
+
73
+ dual : "auto" or bool, default=True
74
+ Select the algorithm to either solve the dual or primal
75
+ optimization problem. Prefer dual=False when n_samples > n_features.
76
+ `dual="auto"` will choose the value of the parameter automatically,
77
+ based on the values of `n_samples`, `n_features`, `loss`, `multi_class`
78
+ and `penalty`. If `n_samples` < `n_features` and optimizer supports
79
+ chosen `loss`, `multi_class` and `penalty`, then dual will be set to True,
80
+ otherwise it will be set to False.
81
+
82
+ .. versionchanged:: 1.3
83
+ The `"auto"` option is added in version 1.3 and will be the default
84
+ in version 1.5.
85
+
86
+ tol : float, default=1e-4
87
+ Tolerance for stopping criteria.
88
+
89
+ C : float, default=1.0
90
+ Regularization parameter. The strength of the regularization is
91
+ inversely proportional to C. Must be strictly positive.
92
+
93
+ multi_class : {'ovr', 'crammer_singer'}, default='ovr'
94
+ Determines the multi-class strategy if `y` contains more than
95
+ two classes.
96
+ ``"ovr"`` trains n_classes one-vs-rest classifiers, while
97
+ ``"crammer_singer"`` optimizes a joint objective over all classes.
98
+ While `crammer_singer` is interesting from a theoretical perspective
99
+ as it is consistent, it is seldom used in practice as it rarely leads
100
+ to better accuracy and is more expensive to compute.
101
+ If ``"crammer_singer"`` is chosen, the options loss, penalty and dual
102
+ will be ignored.
103
+
104
+ fit_intercept : bool, default=True
105
+ Whether or not to fit an intercept. If set to True, the feature vector
106
+ is extended to include an intercept term: `[x_1, ..., x_n, 1]`, where
107
+ 1 corresponds to the intercept. If set to False, no intercept will be
108
+ used in calculations (i.e. data is expected to be already centered).
109
+
110
+ intercept_scaling : float, default=1.0
111
+ When `fit_intercept` is True, the instance vector x becomes ``[x_1,
112
+ ..., x_n, intercept_scaling]``, i.e. a "synthetic" feature with a
113
+ constant value equal to `intercept_scaling` is appended to the instance
114
+ vector. The intercept becomes intercept_scaling * synthetic feature
115
+ weight. Note that liblinear internally penalizes the intercept,
116
+ treating it like any other term in the feature vector. To reduce the
117
+ impact of the regularization on the intercept, the `intercept_scaling`
118
+ parameter can be set to a value greater than 1; the higher the value of
119
+ `intercept_scaling`, the lower the impact of regularization on it.
120
+ Then, the weights become `[w_x_1, ..., w_x_n,
121
+ w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
122
+ the feature weights and the intercept weight is scaled by
123
+ `intercept_scaling`. This scaling allows the intercept term to have a
124
+ different regularization behavior compared to the other features.
125
+
126
+ class_weight : dict or 'balanced', default=None
127
+ Set the parameter C of class i to ``class_weight[i]*C`` for
128
+ SVC. If not given, all classes are supposed to have
129
+ weight one.
130
+ The "balanced" mode uses the values of y to automatically adjust
131
+ weights inversely proportional to class frequencies in the input data
132
+ as ``n_samples / (n_classes * np.bincount(y))``.
133
+
134
+ verbose : int, default=0
135
+ Enable verbose output. Note that this setting takes advantage of a
136
+ per-process runtime setting in liblinear that, if enabled, may not work
137
+ properly in a multithreaded context.
138
+
139
+ random_state : int, RandomState instance or None, default=None
140
+ Controls the pseudo random number generation for shuffling the data for
141
+ the dual coordinate descent (if ``dual=True``). When ``dual=False`` the
142
+ underlying implementation of :class:`LinearSVC` is not random and
143
+ ``random_state`` has no effect on the results.
144
+ Pass an int for reproducible output across multiple function calls.
145
+ See :term:`Glossary <random_state>`.
146
+
147
+ max_iter : int, default=1000
148
+ The maximum number of iterations to be run.
149
+
150
+ Attributes
151
+ ----------
152
+ coef_ : ndarray of shape (1, n_features) if n_classes == 2 \
153
+ else (n_classes, n_features)
154
+ Weights assigned to the features (coefficients in the primal
155
+ problem).
156
+
157
+ ``coef_`` is a readonly property derived from ``raw_coef_`` that
158
+ follows the internal memory layout of liblinear.
159
+
160
+ intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
161
+ Constants in decision function.
162
+
163
+ classes_ : ndarray of shape (n_classes,)
164
+ The unique classes labels.
165
+
166
+ n_features_in_ : int
167
+ Number of features seen during :term:`fit`.
168
+
169
+ .. versionadded:: 0.24
170
+
171
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
172
+ Names of features seen during :term:`fit`. Defined only when `X`
173
+ has feature names that are all strings.
174
+
175
+ .. versionadded:: 1.0
176
+
177
+ n_iter_ : int
178
+ Maximum number of iterations run across all classes.
179
+
180
+ See Also
181
+ --------
182
+ SVC : Implementation of Support Vector Machine classifier using libsvm:
183
+ the kernel can be non-linear but its SMO algorithm does not
184
+ scale to large number of samples as LinearSVC does.
185
+
186
+ Furthermore SVC multi-class mode is implemented using one
187
+ vs one scheme while LinearSVC uses one vs the rest. It is
188
+ possible to implement one vs the rest with SVC by using the
189
+ :class:`~sklearn.multiclass.OneVsRestClassifier` wrapper.
190
+
191
+ Finally SVC can fit dense data without memory copy if the input
192
+ is C-contiguous. Sparse data will still incur memory copy though.
193
+
194
+ sklearn.linear_model.SGDClassifier : SGDClassifier can optimize the same
195
+ cost function as LinearSVC
196
+ by adjusting the penalty and loss parameters. In addition it requires
197
+ less memory, allows incremental (online) learning, and implements
198
+ various loss functions and regularization regimes.
199
+
200
+ Notes
201
+ -----
202
+ The underlying C implementation uses a random number generator to
203
+ select features when fitting the model. It is thus not uncommon
204
+ to have slightly different results for the same input data. If
205
+ that happens, try with a smaller ``tol`` parameter.
206
+
207
+ The underlying implementation, liblinear, uses a sparse internal
208
+ representation for the data that will incur a memory copy.
209
+
210
+ Predict output may not match that of standalone liblinear in certain
211
+ cases. See :ref:`differences from liblinear <liblinear_differences>`
212
+ in the narrative documentation.
213
+
214
+ References
215
+ ----------
216
+ `LIBLINEAR: A Library for Large Linear Classification
217
+ <https://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
218
+
219
+ Examples
220
+ --------
221
+ >>> from sklearn.svm import LinearSVC
222
+ >>> from sklearn.pipeline import make_pipeline
223
+ >>> from sklearn.preprocessing import StandardScaler
224
+ >>> from sklearn.datasets import make_classification
225
+ >>> X, y = make_classification(n_features=4, random_state=0)
226
+ >>> clf = make_pipeline(StandardScaler(),
227
+ ... LinearSVC(dual="auto", random_state=0, tol=1e-5))
228
+ >>> clf.fit(X, y)
229
+ Pipeline(steps=[('standardscaler', StandardScaler()),
230
+ ('linearsvc', LinearSVC(dual='auto', random_state=0, tol=1e-05))])
231
+
232
+ >>> print(clf.named_steps['linearsvc'].coef_)
233
+ [[0.141... 0.526... 0.679... 0.493...]]
234
+
235
+ >>> print(clf.named_steps['linearsvc'].intercept_)
236
+ [0.1693...]
237
+ >>> print(clf.predict([[0, 0, 0, 0]]))
238
+ [1]
239
+ """
240
+
241
+ _parameter_constraints: dict = {
242
+ "penalty": [StrOptions({"l1", "l2"})],
243
+ "loss": [StrOptions({"hinge", "squared_hinge"})],
244
+ "dual": ["boolean", StrOptions({"auto"}), Hidden(StrOptions({"warn"}))],
245
+ "tol": [Interval(Real, 0.0, None, closed="neither")],
246
+ "C": [Interval(Real, 0.0, None, closed="neither")],
247
+ "multi_class": [StrOptions({"ovr", "crammer_singer"})],
248
+ "fit_intercept": ["boolean"],
249
+ "intercept_scaling": [Interval(Real, 0, None, closed="neither")],
250
+ "class_weight": [None, dict, StrOptions({"balanced"})],
251
+ "verbose": ["verbose"],
252
+ "random_state": ["random_state"],
253
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
254
+ }
255
+
256
+ def __init__(
257
+ self,
258
+ penalty="l2",
259
+ loss="squared_hinge",
260
+ *,
261
+ dual="warn",
262
+ tol=1e-4,
263
+ C=1.0,
264
+ multi_class="ovr",
265
+ fit_intercept=True,
266
+ intercept_scaling=1,
267
+ class_weight=None,
268
+ verbose=0,
269
+ random_state=None,
270
+ max_iter=1000,
271
+ ):
272
+ self.dual = dual
273
+ self.tol = tol
274
+ self.C = C
275
+ self.multi_class = multi_class
276
+ self.fit_intercept = fit_intercept
277
+ self.intercept_scaling = intercept_scaling
278
+ self.class_weight = class_weight
279
+ self.verbose = verbose
280
+ self.random_state = random_state
281
+ self.max_iter = max_iter
282
+ self.penalty = penalty
283
+ self.loss = loss
284
+
285
+ @_fit_context(prefer_skip_nested_validation=True)
286
+ def fit(self, X, y, sample_weight=None):
287
+ """Fit the model according to the given training data.
288
+
289
+ Parameters
290
+ ----------
291
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
292
+ Training vector, where `n_samples` is the number of samples and
293
+ `n_features` is the number of features.
294
+
295
+ y : array-like of shape (n_samples,)
296
+ Target vector relative to X.
297
+
298
+ sample_weight : array-like of shape (n_samples,), default=None
299
+ Array of weights that are assigned to individual
300
+ samples. If not provided,
301
+ then each sample is given unit weight.
302
+
303
+ .. versionadded:: 0.18
304
+
305
+ Returns
306
+ -------
307
+ self : object
308
+ An instance of the estimator.
309
+ """
310
+ X, y = self._validate_data(
311
+ X,
312
+ y,
313
+ accept_sparse="csr",
314
+ dtype=np.float64,
315
+ order="C",
316
+ accept_large_sparse=False,
317
+ )
318
+ check_classification_targets(y)
319
+ self.classes_ = np.unique(y)
320
+
321
+ _dual = _validate_dual_parameter(
322
+ self.dual, self.loss, self.penalty, self.multi_class, X
323
+ )
324
+
325
+ self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
326
+ X,
327
+ y,
328
+ self.C,
329
+ self.fit_intercept,
330
+ self.intercept_scaling,
331
+ self.class_weight,
332
+ self.penalty,
333
+ _dual,
334
+ self.verbose,
335
+ self.max_iter,
336
+ self.tol,
337
+ self.random_state,
338
+ self.multi_class,
339
+ self.loss,
340
+ sample_weight=sample_weight,
341
+ )
342
+ # Backward compatibility: _fit_liblinear is used both by LinearSVC/R
343
+ # and LogisticRegression but LogisticRegression sets a structured
344
+ # `n_iter_` attribute with information about the underlying OvR fits
345
+ # while LinearSVC/R only reports the maximum value.
346
+ self.n_iter_ = n_iter_.max().item()
347
+
348
+ if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
349
+ self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
350
+ if self.fit_intercept:
351
+ intercept = self.intercept_[1] - self.intercept_[0]
352
+ self.intercept_ = np.array([intercept])
353
+
354
+ return self
355
+
356
+ def _more_tags(self):
357
+ return {
358
+ "_xfail_checks": {
359
+ "check_sample_weights_invariance": (
360
+ "zero sample_weight is not equivalent to removing samples"
361
+ ),
362
+ }
363
+ }
364
+
365
+
366
+ class LinearSVR(RegressorMixin, LinearModel):
367
+ """Linear Support Vector Regression.
368
+
369
+ Similar to SVR with parameter kernel='linear', but implemented in terms of
370
+ liblinear rather than libsvm, so it has more flexibility in the choice of
371
+ penalties and loss functions and should scale better to large numbers of
372
+ samples.
373
+
374
+ The main differences between :class:`~sklearn.svm.LinearSVR` and
375
+ :class:`~sklearn.svm.SVR` lie in the loss function used by default, and in
376
+ the handling of intercept regularization between those two implementations.
377
+
378
+ This class supports both dense and sparse input.
379
+
380
+ Read more in the :ref:`User Guide <svm_regression>`.
381
+
382
+ .. versionadded:: 0.16
383
+
384
+ Parameters
385
+ ----------
386
+ epsilon : float, default=0.0
387
+ Epsilon parameter in the epsilon-insensitive loss function. Note
388
+ that the value of this parameter depends on the scale of the target
389
+ variable y. If unsure, set ``epsilon=0``.
390
+
391
+ tol : float, default=1e-4
392
+ Tolerance for stopping criteria.
393
+
394
+ C : float, default=1.0
395
+ Regularization parameter. The strength of the regularization is
396
+ inversely proportional to C. Must be strictly positive.
397
+
398
+ loss : {'epsilon_insensitive', 'squared_epsilon_insensitive'}, \
399
+ default='epsilon_insensitive'
400
+ Specifies the loss function. The epsilon-insensitive loss
401
+ (standard SVR) is the L1 loss, while the squared epsilon-insensitive
402
+ loss ('squared_epsilon_insensitive') is the L2 loss.
403
+
404
+ fit_intercept : bool, default=True
405
+ Whether or not to fit an intercept. If set to True, the feature vector
406
+ is extended to include an intercept term: `[x_1, ..., x_n, 1]`, where
407
+ 1 corresponds to the intercept. If set to False, no intercept will be
408
+ used in calculations (i.e. data is expected to be already centered).
409
+
410
+ intercept_scaling : float, default=1.0
411
+ When `fit_intercept` is True, the instance vector x becomes `[x_1, ...,
412
+ x_n, intercept_scaling]`, i.e. a "synthetic" feature with a constant
413
+ value equal to `intercept_scaling` is appended to the instance vector.
414
+ The intercept becomes intercept_scaling * synthetic feature weight.
415
+ Note that liblinear internally penalizes the intercept, treating it
416
+ like any other term in the feature vector. To reduce the impact of the
417
+ regularization on the intercept, the `intercept_scaling` parameter can
418
+ be set to a value greater than 1; the higher the value of
419
+ `intercept_scaling`, the lower the impact of regularization on it.
420
+ Then, the weights become `[w_x_1, ..., w_x_n,
421
+ w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
422
+ the feature weights and the intercept weight is scaled by
423
+ `intercept_scaling`. This scaling allows the intercept term to have a
424
+ different regularization behavior compared to the other features.
425
+
426
+ dual : "auto" or bool, default=True
427
+ Select the algorithm to either solve the dual or primal
428
+ optimization problem. Prefer dual=False when n_samples > n_features.
429
+ `dual="auto"` will choose the value of the parameter automatically,
430
+ based on the values of `n_samples`, `n_features` and `loss`. If
431
+ `n_samples` < `n_features` and optimizer supports chosen `loss`,
432
+ then dual will be set to True, otherwise it will be set to False.
433
+
434
+ .. versionchanged:: 1.3
435
+ The `"auto"` option is added in version 1.3 and will be the default
436
+ in version 1.5.
437
+
438
+ verbose : int, default=0
439
+ Enable verbose output. Note that this setting takes advantage of a
440
+ per-process runtime setting in liblinear that, if enabled, may not work
441
+ properly in a multithreaded context.
442
+
443
+ random_state : int, RandomState instance or None, default=None
444
+ Controls the pseudo random number generation for shuffling the data.
445
+ Pass an int for reproducible output across multiple function calls.
446
+ See :term:`Glossary <random_state>`.
447
+
448
+ max_iter : int, default=1000
449
+ The maximum number of iterations to be run.
450
+
451
+ Attributes
452
+ ----------
453
+ coef_ : ndarray of shape (n_features) if n_classes == 2 \
454
+ else (n_classes, n_features)
455
+ Weights assigned to the features (coefficients in the primal
456
+ problem).
457
+
458
+ `coef_` is a readonly property derived from `raw_coef_` that
459
+ follows the internal memory layout of liblinear.
460
+
461
+ intercept_ : ndarray of shape (1) if n_classes == 2 else (n_classes)
462
+ Constants in decision function.
463
+
464
+ n_features_in_ : int
465
+ Number of features seen during :term:`fit`.
466
+
467
+ .. versionadded:: 0.24
468
+
469
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
470
+ Names of features seen during :term:`fit`. Defined only when `X`
471
+ has feature names that are all strings.
472
+
473
+ .. versionadded:: 1.0
474
+
475
+ n_iter_ : int
476
+ Maximum number of iterations run across all classes.
477
+
478
+ See Also
479
+ --------
480
+ LinearSVC : Implementation of Support Vector Machine classifier using the
481
+ same library as this class (liblinear).
482
+
483
+ SVR : Implementation of Support Vector Machine regression using libsvm:
484
+ the kernel can be non-linear but its SMO algorithm does not scale to
485
+ large number of samples as :class:`~sklearn.svm.LinearSVR` does.
486
+
487
+ sklearn.linear_model.SGDRegressor : SGDRegressor can optimize the same cost
488
+ function as LinearSVR
489
+ by adjusting the penalty and loss parameters. In addition it requires
490
+ less memory, allows incremental (online) learning, and implements
491
+ various loss functions and regularization regimes.
492
+
493
+ Examples
494
+ --------
495
+ >>> from sklearn.svm import LinearSVR
496
+ >>> from sklearn.pipeline import make_pipeline
497
+ >>> from sklearn.preprocessing import StandardScaler
498
+ >>> from sklearn.datasets import make_regression
499
+ >>> X, y = make_regression(n_features=4, random_state=0)
500
+ >>> regr = make_pipeline(StandardScaler(),
501
+ ... LinearSVR(dual="auto", random_state=0, tol=1e-5))
502
+ >>> regr.fit(X, y)
503
+ Pipeline(steps=[('standardscaler', StandardScaler()),
504
+ ('linearsvr', LinearSVR(dual='auto', random_state=0, tol=1e-05))])
505
+
506
+ >>> print(regr.named_steps['linearsvr'].coef_)
507
+ [18.582... 27.023... 44.357... 64.522...]
508
+ >>> print(regr.named_steps['linearsvr'].intercept_)
509
+ [-4...]
510
+ >>> print(regr.predict([[0, 0, 0, 0]]))
511
+ [-2.384...]
512
+ """
513
+
514
+ _parameter_constraints: dict = {
515
+ "epsilon": [Real],
516
+ "tol": [Interval(Real, 0.0, None, closed="neither")],
517
+ "C": [Interval(Real, 0.0, None, closed="neither")],
518
+ "loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})],
519
+ "fit_intercept": ["boolean"],
520
+ "intercept_scaling": [Interval(Real, 0, None, closed="neither")],
521
+ "dual": ["boolean", StrOptions({"auto"}), Hidden(StrOptions({"warn"}))],
522
+ "verbose": ["verbose"],
523
+ "random_state": ["random_state"],
524
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
525
+ }
526
+
527
+ def __init__(
528
+ self,
529
+ *,
530
+ epsilon=0.0,
531
+ tol=1e-4,
532
+ C=1.0,
533
+ loss="epsilon_insensitive",
534
+ fit_intercept=True,
535
+ intercept_scaling=1.0,
536
+ dual="warn",
537
+ verbose=0,
538
+ random_state=None,
539
+ max_iter=1000,
540
+ ):
541
+ self.tol = tol
542
+ self.C = C
543
+ self.epsilon = epsilon
544
+ self.fit_intercept = fit_intercept
545
+ self.intercept_scaling = intercept_scaling
546
+ self.verbose = verbose
547
+ self.random_state = random_state
548
+ self.max_iter = max_iter
549
+ self.dual = dual
550
+ self.loss = loss
551
+
552
+ @_fit_context(prefer_skip_nested_validation=True)
553
+ def fit(self, X, y, sample_weight=None):
554
+ """Fit the model according to the given training data.
555
+
556
+ Parameters
557
+ ----------
558
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
559
+ Training vector, where `n_samples` is the number of samples and
560
+ `n_features` is the number of features.
561
+
562
+ y : array-like of shape (n_samples,)
563
+ Target vector relative to X.
564
+
565
+ sample_weight : array-like of shape (n_samples,), default=None
566
+ Array of weights that are assigned to individual
567
+ samples. If not provided,
568
+ then each sample is given unit weight.
569
+
570
+ .. versionadded:: 0.18
571
+
572
+ Returns
573
+ -------
574
+ self : object
575
+ An instance of the estimator.
576
+ """
577
+ X, y = self._validate_data(
578
+ X,
579
+ y,
580
+ accept_sparse="csr",
581
+ dtype=np.float64,
582
+ order="C",
583
+ accept_large_sparse=False,
584
+ )
585
+ penalty = "l2" # SVR only accepts l2 penalty
586
+
587
+ _dual = _validate_dual_parameter(self.dual, self.loss, penalty, "ovr", X)
588
+
589
+ self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
590
+ X,
591
+ y,
592
+ self.C,
593
+ self.fit_intercept,
594
+ self.intercept_scaling,
595
+ None,
596
+ penalty,
597
+ _dual,
598
+ self.verbose,
599
+ self.max_iter,
600
+ self.tol,
601
+ self.random_state,
602
+ loss=self.loss,
603
+ epsilon=self.epsilon,
604
+ sample_weight=sample_weight,
605
+ )
606
+ self.coef_ = self.coef_.ravel()
607
+ # Backward compatibility: _fit_liblinear is used both by LinearSVC/R
608
+ # and LogisticRegression but LogisticRegression sets a structured
609
+ # `n_iter_` attribute with information about the underlying OvR fits
610
+ # while LinearSVC/R only reports the maximum value.
611
+ self.n_iter_ = n_iter_.max().item()
612
+
613
+ return self
614
+
615
+ def _more_tags(self):
616
+ return {
617
+ "_xfail_checks": {
618
+ "check_sample_weights_invariance": (
619
+ "zero sample_weight is not equivalent to removing samples"
620
+ ),
621
+ }
622
+ }
623
+
624
+
625
+ class SVC(BaseSVC):
626
+ """C-Support Vector Classification.
627
+
628
+ The implementation is based on libsvm. The fit time scales at least
629
+ quadratically with the number of samples and may be impractical
630
+ beyond tens of thousands of samples. For large datasets
631
+ consider using :class:`~sklearn.svm.LinearSVC` or
632
+ :class:`~sklearn.linear_model.SGDClassifier` instead, possibly after a
633
+ :class:`~sklearn.kernel_approximation.Nystroem` transformer or
634
+ other :ref:`kernel_approximation`.
635
+
636
+ The multiclass support is handled according to a one-vs-one scheme.
637
+
638
+ For details on the precise mathematical formulation of the provided
639
+ kernel functions and how `gamma`, `coef0` and `degree` affect each
640
+ other, see the corresponding section in the narrative documentation:
641
+ :ref:`svm_kernels`.
642
+
643
+ To learn how to tune SVC's hyperparameters, see the following example:
644
+ :ref:`sphx_glr_auto_examples_model_selection_plot_nested_cross_validation_iris.py`
645
+
646
+ Read more in the :ref:`User Guide <svm_classification>`.
647
+
648
+ Parameters
649
+ ----------
650
+ C : float, default=1.0
651
+ Regularization parameter. The strength of the regularization is
652
+ inversely proportional to C. Must be strictly positive. The penalty
653
+ is a squared l2 penalty.
654
+
655
+ kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \
656
+ default='rbf'
657
+ Specifies the kernel type to be used in the algorithm. If
658
+ none is given, 'rbf' will be used. If a callable is given it is used to
659
+ pre-compute the kernel matrix from data matrices; that matrix should be
660
+ an array of shape ``(n_samples, n_samples)``. For an intuitive
661
+ visualization of different kernel types see
662
+ :ref:`sphx_glr_auto_examples_svm_plot_svm_kernels.py`.
663
+
664
+ degree : int, default=3
665
+ Degree of the polynomial kernel function ('poly').
666
+ Must be non-negative. Ignored by all other kernels.
667
+
668
+ gamma : {'scale', 'auto'} or float, default='scale'
669
+ Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
670
+
671
+ - if ``gamma='scale'`` (default) is passed then it uses
672
+ 1 / (n_features * X.var()) as value of gamma,
673
+ - if 'auto', uses 1 / n_features
674
+ - if float, must be non-negative.
675
+
676
+ .. versionchanged:: 0.22
677
+ The default value of ``gamma`` changed from 'auto' to 'scale'.
678
+
679
+ coef0 : float, default=0.0
680
+ Independent term in kernel function.
681
+ It is only significant in 'poly' and 'sigmoid'.
682
+
683
+ shrinking : bool, default=True
684
+ Whether to use the shrinking heuristic.
685
+ See the :ref:`User Guide <shrinking_svm>`.
686
+
687
+ probability : bool, default=False
688
+ Whether to enable probability estimates. This must be enabled prior
689
+ to calling `fit`, will slow down that method as it internally uses
690
+ 5-fold cross-validation, and `predict_proba` may be inconsistent with
691
+ `predict`. Read more in the :ref:`User Guide <scores_probabilities>`.
692
+
693
+ tol : float, default=1e-3
694
+ Tolerance for stopping criterion.
695
+
696
+ cache_size : float, default=200
697
+ Specify the size of the kernel cache (in MB).
698
+
699
+ class_weight : dict or 'balanced', default=None
700
+ Set the parameter C of class i to class_weight[i]*C for
701
+ SVC. If not given, all classes are supposed to have
702
+ weight one.
703
+ The "balanced" mode uses the values of y to automatically adjust
704
+ weights inversely proportional to class frequencies in the input data
705
+ as ``n_samples / (n_classes * np.bincount(y))``.
706
+
707
+ verbose : bool, default=False
708
+ Enable verbose output. Note that this setting takes advantage of a
709
+ per-process runtime setting in libsvm that, if enabled, may not work
710
+ properly in a multithreaded context.
711
+
712
+ max_iter : int, default=-1
713
+ Hard limit on iterations within solver, or -1 for no limit.
714
+
715
+ decision_function_shape : {'ovo', 'ovr'}, default='ovr'
716
+ Whether to return a one-vs-rest ('ovr') decision function of shape
717
+ (n_samples, n_classes) as all other classifiers, or the original
718
+ one-vs-one ('ovo') decision function of libsvm which has shape
719
+ (n_samples, n_classes * (n_classes - 1) / 2). However, note that
720
+ internally, one-vs-one ('ovo') is always used as a multi-class strategy
721
+ to train models; an ovr matrix is only constructed from the ovo matrix.
722
+ The parameter is ignored for binary classification.
723
+
724
+ .. versionchanged:: 0.19
725
+ decision_function_shape is 'ovr' by default.
726
+
727
+ .. versionadded:: 0.17
728
+ *decision_function_shape='ovr'* is recommended.
729
+
730
+ .. versionchanged:: 0.17
731
+ Deprecated *decision_function_shape='ovo' and None*.
732
+
733
+ break_ties : bool, default=False
734
+ If true, ``decision_function_shape='ovr'``, and number of classes > 2,
735
+ :term:`predict` will break ties according to the confidence values of
736
+ :term:`decision_function`; otherwise the first class among the tied
737
+ classes is returned. Please note that breaking ties comes at a
738
+ relatively high computational cost compared to a simple predict.
739
+
740
+ .. versionadded:: 0.22
741
+
742
+ random_state : int, RandomState instance or None, default=None
743
+ Controls the pseudo random number generation for shuffling the data for
744
+ probability estimates. Ignored when `probability` is False.
745
+ Pass an int for reproducible output across multiple function calls.
746
+ See :term:`Glossary <random_state>`.
747
+
748
+ Attributes
749
+ ----------
750
+ class_weight_ : ndarray of shape (n_classes,)
751
+ Multipliers of parameter C for each class.
752
+ Computed based on the ``class_weight`` parameter.
753
+
754
+ classes_ : ndarray of shape (n_classes,)
755
+ The classes labels.
756
+
757
+ coef_ : ndarray of shape (n_classes * (n_classes - 1) / 2, n_features)
758
+ Weights assigned to the features (coefficients in the primal
759
+ problem). This is only available in the case of a linear kernel.
760
+
761
+ `coef_` is a readonly property derived from `dual_coef_` and
762
+ `support_vectors_`.
763
+
764
+ dual_coef_ : ndarray of shape (n_classes -1, n_SV)
765
+ Dual coefficients of the support vector in the decision
766
+ function (see :ref:`sgd_mathematical_formulation`), multiplied by
767
+ their targets.
768
+ For multiclass, coefficient for all 1-vs-1 classifiers.
769
+ The layout of the coefficients in the multiclass case is somewhat
770
+ non-trivial. See the :ref:`multi-class section of the User Guide
771
+ <svm_multi_class>` for details.
772
+
773
+ fit_status_ : int
774
+ 0 if correctly fitted, 1 otherwise (will raise warning)
775
+
776
+ intercept_ : ndarray of shape (n_classes * (n_classes - 1) / 2,)
777
+ Constants in decision function.
778
+
779
+ n_features_in_ : int
780
+ Number of features seen during :term:`fit`.
781
+
782
+ .. versionadded:: 0.24
783
+
784
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
785
+ Names of features seen during :term:`fit`. Defined only when `X`
786
+ has feature names that are all strings.
787
+
788
+ .. versionadded:: 1.0
789
+
790
+ n_iter_ : ndarray of shape (n_classes * (n_classes - 1) // 2,)
791
+ Number of iterations run by the optimization routine to fit the model.
792
+ The shape of this attribute depends on the number of models optimized
793
+ which in turn depends on the number of classes.
794
+
795
+ .. versionadded:: 1.1
796
+
797
+ support_ : ndarray of shape (n_SV)
798
+ Indices of support vectors.
799
+
800
+ support_vectors_ : ndarray of shape (n_SV, n_features)
801
+ Support vectors. An empty array if kernel is precomputed.
802
+
803
+ n_support_ : ndarray of shape (n_classes,), dtype=int32
804
+ Number of support vectors for each class.
805
+
806
+ probA_ : ndarray of shape (n_classes * (n_classes - 1) / 2)
807
+ probB_ : ndarray of shape (n_classes * (n_classes - 1) / 2)
808
+ If `probability=True`, it corresponds to the parameters learned in
809
+ Platt scaling to produce probability estimates from decision values.
810
+ If `probability=False`, it's an empty array. Platt scaling uses the
811
+ logistic function
812
+ ``1 / (1 + exp(decision_value * probA_ + probB_))``
813
+ where ``probA_`` and ``probB_`` are learned from the dataset [2]_. For
814
+ more information on the multiclass case and training procedure see
815
+ section 8 of [1]_.
816
+
817
+ shape_fit_ : tuple of int of shape (n_dimensions_of_X,)
818
+ Array dimensions of training vector ``X``.
819
+
820
+ See Also
821
+ --------
822
+ SVR : Support Vector Machine for Regression implemented using libsvm.
823
+
824
+ LinearSVC : Scalable Linear Support Vector Machine for classification
825
+ implemented using liblinear. Check the See Also section of
826
+ LinearSVC for more comparison element.
827
+
828
+ References
829
+ ----------
830
+ .. [1] `LIBSVM: A Library for Support Vector Machines
831
+ <http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf>`_
832
+
833
+ .. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector
834
+ Machines and Comparisons to Regularized Likelihood Methods"
835
+ <https://citeseerx.ist.psu.edu/doc_view/pid/42e5ed832d4310ce4378c44d05570439df28a393>`_
836
+
837
+ Examples
838
+ --------
839
+ >>> import numpy as np
840
+ >>> from sklearn.pipeline import make_pipeline
841
+ >>> from sklearn.preprocessing import StandardScaler
842
+ >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
843
+ >>> y = np.array([1, 1, 2, 2])
844
+ >>> from sklearn.svm import SVC
845
+ >>> clf = make_pipeline(StandardScaler(), SVC(gamma='auto'))
846
+ >>> clf.fit(X, y)
847
+ Pipeline(steps=[('standardscaler', StandardScaler()),
848
+ ('svc', SVC(gamma='auto'))])
849
+
850
+ >>> print(clf.predict([[-0.8, -1]]))
851
+ [1]
852
+ """
853
+
854
+ _impl = "c_svc"
855
+
856
+ def __init__(
857
+ self,
858
+ *,
859
+ C=1.0,
860
+ kernel="rbf",
861
+ degree=3,
862
+ gamma="scale",
863
+ coef0=0.0,
864
+ shrinking=True,
865
+ probability=False,
866
+ tol=1e-3,
867
+ cache_size=200,
868
+ class_weight=None,
869
+ verbose=False,
870
+ max_iter=-1,
871
+ decision_function_shape="ovr",
872
+ break_ties=False,
873
+ random_state=None,
874
+ ):
875
+ super().__init__(
876
+ kernel=kernel,
877
+ degree=degree,
878
+ gamma=gamma,
879
+ coef0=coef0,
880
+ tol=tol,
881
+ C=C,
882
+ nu=0.0,
883
+ shrinking=shrinking,
884
+ probability=probability,
885
+ cache_size=cache_size,
886
+ class_weight=class_weight,
887
+ verbose=verbose,
888
+ max_iter=max_iter,
889
+ decision_function_shape=decision_function_shape,
890
+ break_ties=break_ties,
891
+ random_state=random_state,
892
+ )
893
+
894
+ def _more_tags(self):
895
+ return {
896
+ "_xfail_checks": {
897
+ "check_sample_weights_invariance": (
898
+ "zero sample_weight is not equivalent to removing samples"
899
+ ),
900
+ }
901
+ }
902
+
903
+
904
+ class NuSVC(BaseSVC):
905
+ """Nu-Support Vector Classification.
906
+
907
+ Similar to SVC but uses a parameter to control the number of support
908
+ vectors.
909
+
910
+ The implementation is based on libsvm.
911
+
912
+ Read more in the :ref:`User Guide <svm_classification>`.
913
+
914
+ Parameters
915
+ ----------
916
+ nu : float, default=0.5
917
+ An upper bound on the fraction of margin errors (see :ref:`User Guide
918
+ <nu_svc>`) and a lower bound of the fraction of support vectors.
919
+ Should be in the interval (0, 1].
920
+
921
+ kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \
922
+ default='rbf'
923
+ Specifies the kernel type to be used in the algorithm.
924
+ If none is given, 'rbf' will be used. If a callable is given it is
925
+ used to precompute the kernel matrix. For an intuitive
926
+ visualization of different kernel types see
927
+ :ref:`sphx_glr_auto_examples_svm_plot_svm_kernels.py`.
928
+
929
+ degree : int, default=3
930
+ Degree of the polynomial kernel function ('poly').
931
+ Must be non-negative. Ignored by all other kernels.
932
+
933
+ gamma : {'scale', 'auto'} or float, default='scale'
934
+ Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
935
+
936
+ - if ``gamma='scale'`` (default) is passed then it uses
937
+ 1 / (n_features * X.var()) as value of gamma,
938
+ - if 'auto', uses 1 / n_features
939
+ - if float, must be non-negative.
940
+
941
+ .. versionchanged:: 0.22
942
+ The default value of ``gamma`` changed from 'auto' to 'scale'.
943
+
944
+ coef0 : float, default=0.0
945
+ Independent term in kernel function.
946
+ It is only significant in 'poly' and 'sigmoid'.
947
+
948
+ shrinking : bool, default=True
949
+ Whether to use the shrinking heuristic.
950
+ See the :ref:`User Guide <shrinking_svm>`.
951
+
952
+ probability : bool, default=False
953
+ Whether to enable probability estimates. This must be enabled prior
954
+ to calling `fit`, will slow down that method as it internally uses
955
+ 5-fold cross-validation, and `predict_proba` may be inconsistent with
956
+ `predict`. Read more in the :ref:`User Guide <scores_probabilities>`.
957
+
958
+ tol : float, default=1e-3
959
+ Tolerance for stopping criterion.
960
+
961
+ cache_size : float, default=200
962
+ Specify the size of the kernel cache (in MB).
963
+
964
+ class_weight : {dict, 'balanced'}, default=None
965
+ Set the parameter C of class i to class_weight[i]*C for
966
+ SVC. If not given, all classes are supposed to have
967
+ weight one. The "balanced" mode uses the values of y to automatically
968
+ adjust weights inversely proportional to class frequencies as
969
+ ``n_samples / (n_classes * np.bincount(y))``.
970
+
971
+ verbose : bool, default=False
972
+ Enable verbose output. Note that this setting takes advantage of a
973
+ per-process runtime setting in libsvm that, if enabled, may not work
974
+ properly in a multithreaded context.
975
+
976
+ max_iter : int, default=-1
977
+ Hard limit on iterations within solver, or -1 for no limit.
978
+
979
+ decision_function_shape : {'ovo', 'ovr'}, default='ovr'
980
+ Whether to return a one-vs-rest ('ovr') decision function of shape
981
+ (n_samples, n_classes) as all other classifiers, or the original
982
+ one-vs-one ('ovo') decision function of libsvm which has shape
983
+ (n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one
984
+ ('ovo') is always used as multi-class strategy. The parameter is
985
+ ignored for binary classification.
986
+
987
+ .. versionchanged:: 0.19
988
+ decision_function_shape is 'ovr' by default.
989
+
990
+ .. versionadded:: 0.17
991
+ *decision_function_shape='ovr'* is recommended.
992
+
993
+ .. versionchanged:: 0.17
994
+ Deprecated *decision_function_shape='ovo' and None*.
995
+
996
+ break_ties : bool, default=False
997
+ If true, ``decision_function_shape='ovr'``, and number of classes > 2,
998
+ :term:`predict` will break ties according to the confidence values of
999
+ :term:`decision_function`; otherwise the first class among the tied
1000
+ classes is returned. Please note that breaking ties comes at a
1001
+ relatively high computational cost compared to a simple predict.
1002
+
1003
+ .. versionadded:: 0.22
1004
+
1005
+ random_state : int, RandomState instance or None, default=None
1006
+ Controls the pseudo random number generation for shuffling the data for
1007
+ probability estimates. Ignored when `probability` is False.
1008
+ Pass an int for reproducible output across multiple function calls.
1009
+ See :term:`Glossary <random_state>`.
1010
+
1011
+ Attributes
1012
+ ----------
1013
+ class_weight_ : ndarray of shape (n_classes,)
1014
+ Multipliers of parameter C of each class.
1015
+ Computed based on the ``class_weight`` parameter.
1016
+
1017
+ classes_ : ndarray of shape (n_classes,)
1018
+ The unique classes labels.
1019
+
1020
+ coef_ : ndarray of shape (n_classes * (n_classes -1) / 2, n_features)
1021
+ Weights assigned to the features (coefficients in the primal
1022
+ problem). This is only available in the case of a linear kernel.
1023
+
1024
+ `coef_` is readonly property derived from `dual_coef_` and
1025
+ `support_vectors_`.
1026
+
1027
+ dual_coef_ : ndarray of shape (n_classes - 1, n_SV)
1028
+ Dual coefficients of the support vector in the decision
1029
+ function (see :ref:`sgd_mathematical_formulation`), multiplied by
1030
+ their targets.
1031
+ For multiclass, coefficient for all 1-vs-1 classifiers.
1032
+ The layout of the coefficients in the multiclass case is somewhat
1033
+ non-trivial. See the :ref:`multi-class section of the User Guide
1034
+ <svm_multi_class>` for details.
1035
+
1036
+ fit_status_ : int
1037
+ 0 if correctly fitted, 1 if the algorithm did not converge.
1038
+
1039
+ intercept_ : ndarray of shape (n_classes * (n_classes - 1) / 2,)
1040
+ Constants in decision function.
1041
+
1042
+ n_features_in_ : int
1043
+ Number of features seen during :term:`fit`.
1044
+
1045
+ .. versionadded:: 0.24
1046
+
1047
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1048
+ Names of features seen during :term:`fit`. Defined only when `X`
1049
+ has feature names that are all strings.
1050
+
1051
+ .. versionadded:: 1.0
1052
+
1053
+ n_iter_ : ndarray of shape (n_classes * (n_classes - 1) // 2,)
1054
+ Number of iterations run by the optimization routine to fit the model.
1055
+ The shape of this attribute depends on the number of models optimized
1056
+ which in turn depends on the number of classes.
1057
+
1058
+ .. versionadded:: 1.1
1059
+
1060
+ support_ : ndarray of shape (n_SV,)
1061
+ Indices of support vectors.
1062
+
1063
+ support_vectors_ : ndarray of shape (n_SV, n_features)
1064
+ Support vectors.
1065
+
1066
+ n_support_ : ndarray of shape (n_classes,), dtype=int32
1067
+ Number of support vectors for each class.
1068
+
1069
+ fit_status_ : int
1070
+ 0 if correctly fitted, 1 if the algorithm did not converge.
1071
+
1072
+ probA_ : ndarray of shape (n_classes * (n_classes - 1) / 2,)
1073
+
1074
+ probB_ : ndarray of shape (n_classes * (n_classes - 1) / 2,)
1075
+ If `probability=True`, it corresponds to the parameters learned in
1076
+ Platt scaling to produce probability estimates from decision values.
1077
+ If `probability=False`, it's an empty array. Platt scaling uses the
1078
+ logistic function
1079
+ ``1 / (1 + exp(decision_value * probA_ + probB_))``
1080
+ where ``probA_`` and ``probB_`` are learned from the dataset [2]_. For
1081
+ more information on the multiclass case and training procedure see
1082
+ section 8 of [1]_.
1083
+
1084
+ shape_fit_ : tuple of int of shape (n_dimensions_of_X,)
1085
+ Array dimensions of training vector ``X``.
1086
+
1087
+ See Also
1088
+ --------
1089
+ SVC : Support Vector Machine for classification using libsvm.
1090
+
1091
+ LinearSVC : Scalable linear Support Vector Machine for classification using
1092
+ liblinear.
1093
+
1094
+ References
1095
+ ----------
1096
+ .. [1] `LIBSVM: A Library for Support Vector Machines
1097
+ <http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf>`_
1098
+
1099
+ .. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector
1100
+ Machines and Comparisons to Regularized Likelihood Methods"
1101
+ <https://citeseerx.ist.psu.edu/doc_view/pid/42e5ed832d4310ce4378c44d05570439df28a393>`_
1102
+
1103
+ Examples
1104
+ --------
1105
+ >>> import numpy as np
1106
+ >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
1107
+ >>> y = np.array([1, 1, 2, 2])
1108
+ >>> from sklearn.pipeline import make_pipeline
1109
+ >>> from sklearn.preprocessing import StandardScaler
1110
+ >>> from sklearn.svm import NuSVC
1111
+ >>> clf = make_pipeline(StandardScaler(), NuSVC())
1112
+ >>> clf.fit(X, y)
1113
+ Pipeline(steps=[('standardscaler', StandardScaler()), ('nusvc', NuSVC())])
1114
+ >>> print(clf.predict([[-0.8, -1]]))
1115
+ [1]
1116
+ """
1117
+
1118
+ _impl = "nu_svc"
1119
+
1120
+ _parameter_constraints: dict = {
1121
+ **BaseSVC._parameter_constraints,
1122
+ "nu": [Interval(Real, 0.0, 1.0, closed="right")],
1123
+ }
1124
+ _parameter_constraints.pop("C")
1125
+
1126
+ def __init__(
1127
+ self,
1128
+ *,
1129
+ nu=0.5,
1130
+ kernel="rbf",
1131
+ degree=3,
1132
+ gamma="scale",
1133
+ coef0=0.0,
1134
+ shrinking=True,
1135
+ probability=False,
1136
+ tol=1e-3,
1137
+ cache_size=200,
1138
+ class_weight=None,
1139
+ verbose=False,
1140
+ max_iter=-1,
1141
+ decision_function_shape="ovr",
1142
+ break_ties=False,
1143
+ random_state=None,
1144
+ ):
1145
+ super().__init__(
1146
+ kernel=kernel,
1147
+ degree=degree,
1148
+ gamma=gamma,
1149
+ coef0=coef0,
1150
+ tol=tol,
1151
+ C=0.0,
1152
+ nu=nu,
1153
+ shrinking=shrinking,
1154
+ probability=probability,
1155
+ cache_size=cache_size,
1156
+ class_weight=class_weight,
1157
+ verbose=verbose,
1158
+ max_iter=max_iter,
1159
+ decision_function_shape=decision_function_shape,
1160
+ break_ties=break_ties,
1161
+ random_state=random_state,
1162
+ )
1163
+
1164
+ def _more_tags(self):
1165
+ return {
1166
+ "_xfail_checks": {
1167
+ "check_methods_subset_invariance": (
1168
+ "fails for the decision_function method"
1169
+ ),
1170
+ "check_class_weight_classifiers": "class_weight is ignored.",
1171
+ "check_sample_weights_invariance": (
1172
+ "zero sample_weight is not equivalent to removing samples"
1173
+ ),
1174
+ "check_classifiers_one_label_sample_weights": (
1175
+ "specified nu is infeasible for the fit."
1176
+ ),
1177
+ }
1178
+ }
1179
+
1180
+
1181
+ class SVR(RegressorMixin, BaseLibSVM):
1182
+ """Epsilon-Support Vector Regression.
1183
+
1184
+ The free parameters in the model are C and epsilon.
1185
+
1186
+ The implementation is based on libsvm. The fit time complexity
1187
+ is more than quadratic with the number of samples which makes it hard
1188
+ to scale to datasets with more than a couple of 10000 samples. For large
1189
+ datasets consider using :class:`~sklearn.svm.LinearSVR` or
1190
+ :class:`~sklearn.linear_model.SGDRegressor` instead, possibly after a
1191
+ :class:`~sklearn.kernel_approximation.Nystroem` transformer or
1192
+ other :ref:`kernel_approximation`.
1193
+
1194
+ Read more in the :ref:`User Guide <svm_regression>`.
1195
+
1196
+ Parameters
1197
+ ----------
1198
+ kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \
1199
+ default='rbf'
1200
+ Specifies the kernel type to be used in the algorithm.
1201
+ If none is given, 'rbf' will be used. If a callable is given it is
1202
+ used to precompute the kernel matrix.
1203
+
1204
+ degree : int, default=3
1205
+ Degree of the polynomial kernel function ('poly').
1206
+ Must be non-negative. Ignored by all other kernels.
1207
+
1208
+ gamma : {'scale', 'auto'} or float, default='scale'
1209
+ Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
1210
+
1211
+ - if ``gamma='scale'`` (default) is passed then it uses
1212
+ 1 / (n_features * X.var()) as value of gamma,
1213
+ - if 'auto', uses 1 / n_features
1214
+ - if float, must be non-negative.
1215
+
1216
+ .. versionchanged:: 0.22
1217
+ The default value of ``gamma`` changed from 'auto' to 'scale'.
1218
+
1219
+ coef0 : float, default=0.0
1220
+ Independent term in kernel function.
1221
+ It is only significant in 'poly' and 'sigmoid'.
1222
+
1223
+ tol : float, default=1e-3
1224
+ Tolerance for stopping criterion.
1225
+
1226
+ C : float, default=1.0
1227
+ Regularization parameter. The strength of the regularization is
1228
+ inversely proportional to C. Must be strictly positive.
1229
+ The penalty is a squared l2 penalty.
1230
+
1231
+ epsilon : float, default=0.1
1232
+ Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
1233
+ within which no penalty is associated in the training loss function
1234
+ with points predicted within a distance epsilon from the actual
1235
+ value. Must be non-negative.
1236
+
1237
+ shrinking : bool, default=True
1238
+ Whether to use the shrinking heuristic.
1239
+ See the :ref:`User Guide <shrinking_svm>`.
1240
+
1241
+ cache_size : float, default=200
1242
+ Specify the size of the kernel cache (in MB).
1243
+
1244
+ verbose : bool, default=False
1245
+ Enable verbose output. Note that this setting takes advantage of a
1246
+ per-process runtime setting in libsvm that, if enabled, may not work
1247
+ properly in a multithreaded context.
1248
+
1249
+ max_iter : int, default=-1
1250
+ Hard limit on iterations within solver, or -1 for no limit.
1251
+
1252
+ Attributes
1253
+ ----------
1254
+ coef_ : ndarray of shape (1, n_features)
1255
+ Weights assigned to the features (coefficients in the primal
1256
+ problem). This is only available in the case of a linear kernel.
1257
+
1258
+ `coef_` is readonly property derived from `dual_coef_` and
1259
+ `support_vectors_`.
1260
+
1261
+ dual_coef_ : ndarray of shape (1, n_SV)
1262
+ Coefficients of the support vector in the decision function.
1263
+
1264
+ fit_status_ : int
1265
+ 0 if correctly fitted, 1 otherwise (will raise warning)
1266
+
1267
+ intercept_ : ndarray of shape (1,)
1268
+ Constants in decision function.
1269
+
1270
+ n_features_in_ : int
1271
+ Number of features seen during :term:`fit`.
1272
+
1273
+ .. versionadded:: 0.24
1274
+
1275
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1276
+ Names of features seen during :term:`fit`. Defined only when `X`
1277
+ has feature names that are all strings.
1278
+
1279
+ .. versionadded:: 1.0
1280
+
1281
+ n_iter_ : int
1282
+ Number of iterations run by the optimization routine to fit the model.
1283
+
1284
+ .. versionadded:: 1.1
1285
+
1286
+ n_support_ : ndarray of shape (1,), dtype=int32
1287
+ Number of support vectors.
1288
+
1289
+ shape_fit_ : tuple of int of shape (n_dimensions_of_X,)
1290
+ Array dimensions of training vector ``X``.
1291
+
1292
+ support_ : ndarray of shape (n_SV,)
1293
+ Indices of support vectors.
1294
+
1295
+ support_vectors_ : ndarray of shape (n_SV, n_features)
1296
+ Support vectors.
1297
+
1298
+ See Also
1299
+ --------
1300
+ NuSVR : Support Vector Machine for regression implemented using libsvm
1301
+ using a parameter to control the number of support vectors.
1302
+
1303
+ LinearSVR : Scalable Linear Support Vector Machine for regression
1304
+ implemented using liblinear.
1305
+
1306
+ References
1307
+ ----------
1308
+ .. [1] `LIBSVM: A Library for Support Vector Machines
1309
+ <http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf>`_
1310
+
1311
+ .. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector
1312
+ Machines and Comparisons to Regularized Likelihood Methods"
1313
+ <https://citeseerx.ist.psu.edu/doc_view/pid/42e5ed832d4310ce4378c44d05570439df28a393>`_
1314
+
1315
+ Examples
1316
+ --------
1317
+ >>> from sklearn.svm import SVR
1318
+ >>> from sklearn.pipeline import make_pipeline
1319
+ >>> from sklearn.preprocessing import StandardScaler
1320
+ >>> import numpy as np
1321
+ >>> n_samples, n_features = 10, 5
1322
+ >>> rng = np.random.RandomState(0)
1323
+ >>> y = rng.randn(n_samples)
1324
+ >>> X = rng.randn(n_samples, n_features)
1325
+ >>> regr = make_pipeline(StandardScaler(), SVR(C=1.0, epsilon=0.2))
1326
+ >>> regr.fit(X, y)
1327
+ Pipeline(steps=[('standardscaler', StandardScaler()),
1328
+ ('svr', SVR(epsilon=0.2))])
1329
+ """
1330
+
1331
+ _impl = "epsilon_svr"
1332
+
1333
+ _parameter_constraints: dict = {**BaseLibSVM._parameter_constraints}
1334
+ for unused_param in ["class_weight", "nu", "probability", "random_state"]:
1335
+ _parameter_constraints.pop(unused_param)
1336
+
1337
+ def __init__(
1338
+ self,
1339
+ *,
1340
+ kernel="rbf",
1341
+ degree=3,
1342
+ gamma="scale",
1343
+ coef0=0.0,
1344
+ tol=1e-3,
1345
+ C=1.0,
1346
+ epsilon=0.1,
1347
+ shrinking=True,
1348
+ cache_size=200,
1349
+ verbose=False,
1350
+ max_iter=-1,
1351
+ ):
1352
+ super().__init__(
1353
+ kernel=kernel,
1354
+ degree=degree,
1355
+ gamma=gamma,
1356
+ coef0=coef0,
1357
+ tol=tol,
1358
+ C=C,
1359
+ nu=0.0,
1360
+ epsilon=epsilon,
1361
+ verbose=verbose,
1362
+ shrinking=shrinking,
1363
+ probability=False,
1364
+ cache_size=cache_size,
1365
+ class_weight=None,
1366
+ max_iter=max_iter,
1367
+ random_state=None,
1368
+ )
1369
+
1370
+ def _more_tags(self):
1371
+ return {
1372
+ "_xfail_checks": {
1373
+ "check_sample_weights_invariance": (
1374
+ "zero sample_weight is not equivalent to removing samples"
1375
+ ),
1376
+ }
1377
+ }
1378
+
1379
+
1380
+ class NuSVR(RegressorMixin, BaseLibSVM):
1381
+ """Nu Support Vector Regression.
1382
+
1383
+ Similar to NuSVC, for regression, uses a parameter nu to control
1384
+ the number of support vectors. However, unlike NuSVC, where nu
1385
+ replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
1386
+
1387
+ The implementation is based on libsvm.
1388
+
1389
+ Read more in the :ref:`User Guide <svm_regression>`.
1390
+
1391
+ Parameters
1392
+ ----------
1393
+ nu : float, default=0.5
1394
+ An upper bound on the fraction of training errors and a lower bound of
1395
+ the fraction of support vectors. Should be in the interval (0, 1]. By
1396
+ default 0.5 will be taken.
1397
+
1398
+ C : float, default=1.0
1399
+ Penalty parameter C of the error term.
1400
+
1401
+ kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \
1402
+ default='rbf'
1403
+ Specifies the kernel type to be used in the algorithm.
1404
+ If none is given, 'rbf' will be used. If a callable is given it is
1405
+ used to precompute the kernel matrix.
1406
+
1407
+ degree : int, default=3
1408
+ Degree of the polynomial kernel function ('poly').
1409
+ Must be non-negative. Ignored by all other kernels.
1410
+
1411
+ gamma : {'scale', 'auto'} or float, default='scale'
1412
+ Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
1413
+
1414
+ - if ``gamma='scale'`` (default) is passed then it uses
1415
+ 1 / (n_features * X.var()) as value of gamma,
1416
+ - if 'auto', uses 1 / n_features
1417
+ - if float, must be non-negative.
1418
+
1419
+ .. versionchanged:: 0.22
1420
+ The default value of ``gamma`` changed from 'auto' to 'scale'.
1421
+
1422
+ coef0 : float, default=0.0
1423
+ Independent term in kernel function.
1424
+ It is only significant in 'poly' and 'sigmoid'.
1425
+
1426
+ shrinking : bool, default=True
1427
+ Whether to use the shrinking heuristic.
1428
+ See the :ref:`User Guide <shrinking_svm>`.
1429
+
1430
+ tol : float, default=1e-3
1431
+ Tolerance for stopping criterion.
1432
+
1433
+ cache_size : float, default=200
1434
+ Specify the size of the kernel cache (in MB).
1435
+
1436
+ verbose : bool, default=False
1437
+ Enable verbose output. Note that this setting takes advantage of a
1438
+ per-process runtime setting in libsvm that, if enabled, may not work
1439
+ properly in a multithreaded context.
1440
+
1441
+ max_iter : int, default=-1
1442
+ Hard limit on iterations within solver, or -1 for no limit.
1443
+
1444
+ Attributes
1445
+ ----------
1446
+ coef_ : ndarray of shape (1, n_features)
1447
+ Weights assigned to the features (coefficients in the primal
1448
+ problem). This is only available in the case of a linear kernel.
1449
+
1450
+ `coef_` is readonly property derived from `dual_coef_` and
1451
+ `support_vectors_`.
1452
+
1453
+ dual_coef_ : ndarray of shape (1, n_SV)
1454
+ Coefficients of the support vector in the decision function.
1455
+
1456
+ fit_status_ : int
1457
+ 0 if correctly fitted, 1 otherwise (will raise warning)
1458
+
1459
+ intercept_ : ndarray of shape (1,)
1460
+ Constants in decision function.
1461
+
1462
+ n_features_in_ : int
1463
+ Number of features seen during :term:`fit`.
1464
+
1465
+ .. versionadded:: 0.24
1466
+
1467
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1468
+ Names of features seen during :term:`fit`. Defined only when `X`
1469
+ has feature names that are all strings.
1470
+
1471
+ .. versionadded:: 1.0
1472
+
1473
+ n_iter_ : int
1474
+ Number of iterations run by the optimization routine to fit the model.
1475
+
1476
+ .. versionadded:: 1.1
1477
+
1478
+ n_support_ : ndarray of shape (1,), dtype=int32
1479
+ Number of support vectors.
1480
+
1481
+ shape_fit_ : tuple of int of shape (n_dimensions_of_X,)
1482
+ Array dimensions of training vector ``X``.
1483
+
1484
+ support_ : ndarray of shape (n_SV,)
1485
+ Indices of support vectors.
1486
+
1487
+ support_vectors_ : ndarray of shape (n_SV, n_features)
1488
+ Support vectors.
1489
+
1490
+ See Also
1491
+ --------
1492
+ NuSVC : Support Vector Machine for classification implemented with libsvm
1493
+ with a parameter to control the number of support vectors.
1494
+
1495
+ SVR : Epsilon Support Vector Machine for regression implemented with
1496
+ libsvm.
1497
+
1498
+ References
1499
+ ----------
1500
+ .. [1] `LIBSVM: A Library for Support Vector Machines
1501
+ <http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf>`_
1502
+
1503
+ .. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector
1504
+ Machines and Comparisons to Regularized Likelihood Methods"
1505
+ <https://citeseerx.ist.psu.edu/doc_view/pid/42e5ed832d4310ce4378c44d05570439df28a393>`_
1506
+
1507
+ Examples
1508
+ --------
1509
+ >>> from sklearn.svm import NuSVR
1510
+ >>> from sklearn.pipeline import make_pipeline
1511
+ >>> from sklearn.preprocessing import StandardScaler
1512
+ >>> import numpy as np
1513
+ >>> n_samples, n_features = 10, 5
1514
+ >>> np.random.seed(0)
1515
+ >>> y = np.random.randn(n_samples)
1516
+ >>> X = np.random.randn(n_samples, n_features)
1517
+ >>> regr = make_pipeline(StandardScaler(), NuSVR(C=1.0, nu=0.1))
1518
+ >>> regr.fit(X, y)
1519
+ Pipeline(steps=[('standardscaler', StandardScaler()),
1520
+ ('nusvr', NuSVR(nu=0.1))])
1521
+ """
1522
+
1523
+ _impl = "nu_svr"
1524
+
1525
+ _parameter_constraints: dict = {**BaseLibSVM._parameter_constraints}
1526
+ for unused_param in ["class_weight", "epsilon", "probability", "random_state"]:
1527
+ _parameter_constraints.pop(unused_param)
1528
+
1529
+ def __init__(
1530
+ self,
1531
+ *,
1532
+ nu=0.5,
1533
+ C=1.0,
1534
+ kernel="rbf",
1535
+ degree=3,
1536
+ gamma="scale",
1537
+ coef0=0.0,
1538
+ shrinking=True,
1539
+ tol=1e-3,
1540
+ cache_size=200,
1541
+ verbose=False,
1542
+ max_iter=-1,
1543
+ ):
1544
+ super().__init__(
1545
+ kernel=kernel,
1546
+ degree=degree,
1547
+ gamma=gamma,
1548
+ coef0=coef0,
1549
+ tol=tol,
1550
+ C=C,
1551
+ nu=nu,
1552
+ epsilon=0.0,
1553
+ shrinking=shrinking,
1554
+ probability=False,
1555
+ cache_size=cache_size,
1556
+ class_weight=None,
1557
+ verbose=verbose,
1558
+ max_iter=max_iter,
1559
+ random_state=None,
1560
+ )
1561
+
1562
+ def _more_tags(self):
1563
+ return {
1564
+ "_xfail_checks": {
1565
+ "check_sample_weights_invariance": (
1566
+ "zero sample_weight is not equivalent to removing samples"
1567
+ ),
1568
+ }
1569
+ }
1570
+
1571
+
1572
+ class OneClassSVM(OutlierMixin, BaseLibSVM):
1573
+ """Unsupervised Outlier Detection.
1574
+
1575
+ Estimate the support of a high-dimensional distribution.
1576
+
1577
+ The implementation is based on libsvm.
1578
+
1579
+ Read more in the :ref:`User Guide <outlier_detection>`.
1580
+
1581
+ Parameters
1582
+ ----------
1583
+ kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \
1584
+ default='rbf'
1585
+ Specifies the kernel type to be used in the algorithm.
1586
+ If none is given, 'rbf' will be used. If a callable is given it is
1587
+ used to precompute the kernel matrix.
1588
+
1589
+ degree : int, default=3
1590
+ Degree of the polynomial kernel function ('poly').
1591
+ Must be non-negative. Ignored by all other kernels.
1592
+
1593
+ gamma : {'scale', 'auto'} or float, default='scale'
1594
+ Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
1595
+
1596
+ - if ``gamma='scale'`` (default) is passed then it uses
1597
+ 1 / (n_features * X.var()) as value of gamma,
1598
+ - if 'auto', uses 1 / n_features
1599
+ - if float, must be non-negative.
1600
+
1601
+ .. versionchanged:: 0.22
1602
+ The default value of ``gamma`` changed from 'auto' to 'scale'.
1603
+
1604
+ coef0 : float, default=0.0
1605
+ Independent term in kernel function.
1606
+ It is only significant in 'poly' and 'sigmoid'.
1607
+
1608
+ tol : float, default=1e-3
1609
+ Tolerance for stopping criterion.
1610
+
1611
+ nu : float, default=0.5
1612
+ An upper bound on the fraction of training
1613
+ errors and a lower bound of the fraction of support
1614
+ vectors. Should be in the interval (0, 1]. By default 0.5
1615
+ will be taken.
1616
+
1617
+ shrinking : bool, default=True
1618
+ Whether to use the shrinking heuristic.
1619
+ See the :ref:`User Guide <shrinking_svm>`.
1620
+
1621
+ cache_size : float, default=200
1622
+ Specify the size of the kernel cache (in MB).
1623
+
1624
+ verbose : bool, default=False
1625
+ Enable verbose output. Note that this setting takes advantage of a
1626
+ per-process runtime setting in libsvm that, if enabled, may not work
1627
+ properly in a multithreaded context.
1628
+
1629
+ max_iter : int, default=-1
1630
+ Hard limit on iterations within solver, or -1 for no limit.
1631
+
1632
+ Attributes
1633
+ ----------
1634
+ coef_ : ndarray of shape (1, n_features)
1635
+ Weights assigned to the features (coefficients in the primal
1636
+ problem). This is only available in the case of a linear kernel.
1637
+
1638
+ `coef_` is readonly property derived from `dual_coef_` and
1639
+ `support_vectors_`.
1640
+
1641
+ dual_coef_ : ndarray of shape (1, n_SV)
1642
+ Coefficients of the support vectors in the decision function.
1643
+
1644
+ fit_status_ : int
1645
+ 0 if correctly fitted, 1 otherwise (will raise warning)
1646
+
1647
+ intercept_ : ndarray of shape (1,)
1648
+ Constant in the decision function.
1649
+
1650
+ n_features_in_ : int
1651
+ Number of features seen during :term:`fit`.
1652
+
1653
+ .. versionadded:: 0.24
1654
+
1655
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1656
+ Names of features seen during :term:`fit`. Defined only when `X`
1657
+ has feature names that are all strings.
1658
+
1659
+ .. versionadded:: 1.0
1660
+
1661
+ n_iter_ : int
1662
+ Number of iterations run by the optimization routine to fit the model.
1663
+
1664
+ .. versionadded:: 1.1
1665
+
1666
+ n_support_ : ndarray of shape (n_classes,), dtype=int32
1667
+ Number of support vectors for each class.
1668
+
1669
+ offset_ : float
1670
+ Offset used to define the decision function from the raw scores.
1671
+ We have the relation: decision_function = score_samples - `offset_`.
1672
+ The offset is the opposite of `intercept_` and is provided for
1673
+ consistency with other outlier detection algorithms.
1674
+
1675
+ .. versionadded:: 0.20
1676
+
1677
+ shape_fit_ : tuple of int of shape (n_dimensions_of_X,)
1678
+ Array dimensions of training vector ``X``.
1679
+
1680
+ support_ : ndarray of shape (n_SV,)
1681
+ Indices of support vectors.
1682
+
1683
+ support_vectors_ : ndarray of shape (n_SV, n_features)
1684
+ Support vectors.
1685
+
1686
+ See Also
1687
+ --------
1688
+ sklearn.linear_model.SGDOneClassSVM : Solves linear One-Class SVM using
1689
+ Stochastic Gradient Descent.
1690
+ sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection using
1691
+ Local Outlier Factor (LOF).
1692
+ sklearn.ensemble.IsolationForest : Isolation Forest Algorithm.
1693
+
1694
+ Examples
1695
+ --------
1696
+ >>> from sklearn.svm import OneClassSVM
1697
+ >>> X = [[0], [0.44], [0.45], [0.46], [1]]
1698
+ >>> clf = OneClassSVM(gamma='auto').fit(X)
1699
+ >>> clf.predict(X)
1700
+ array([-1, 1, 1, 1, -1])
1701
+ >>> clf.score_samples(X)
1702
+ array([1.7798..., 2.0547..., 2.0556..., 2.0561..., 1.7332...])
1703
+ """
1704
+
1705
+ _impl = "one_class"
1706
+
1707
+ _parameter_constraints: dict = {**BaseLibSVM._parameter_constraints}
1708
+ for unused_param in ["C", "class_weight", "epsilon", "probability", "random_state"]:
1709
+ _parameter_constraints.pop(unused_param)
1710
+
1711
+ def __init__(
1712
+ self,
1713
+ *,
1714
+ kernel="rbf",
1715
+ degree=3,
1716
+ gamma="scale",
1717
+ coef0=0.0,
1718
+ tol=1e-3,
1719
+ nu=0.5,
1720
+ shrinking=True,
1721
+ cache_size=200,
1722
+ verbose=False,
1723
+ max_iter=-1,
1724
+ ):
1725
+ super().__init__(
1726
+ kernel,
1727
+ degree,
1728
+ gamma,
1729
+ coef0,
1730
+ tol,
1731
+ 0.0,
1732
+ nu,
1733
+ 0.0,
1734
+ shrinking,
1735
+ False,
1736
+ cache_size,
1737
+ None,
1738
+ verbose,
1739
+ max_iter,
1740
+ random_state=None,
1741
+ )
1742
+
1743
+ def fit(self, X, y=None, sample_weight=None):
1744
+ """Detect the soft boundary of the set of samples X.
1745
+
1746
+ Parameters
1747
+ ----------
1748
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1749
+ Set of samples, where `n_samples` is the number of samples and
1750
+ `n_features` is the number of features.
1751
+
1752
+ y : Ignored
1753
+ Not used, present for API consistency by convention.
1754
+
1755
+ sample_weight : array-like of shape (n_samples,), default=None
1756
+ Per-sample weights. Rescale C per sample. Higher weights
1757
+ force the classifier to put more emphasis on these points.
1758
+
1759
+ Returns
1760
+ -------
1761
+ self : object
1762
+ Fitted estimator.
1763
+
1764
+ Notes
1765
+ -----
1766
+ If X is not a C-ordered contiguous array it is copied.
1767
+ """
1768
+ super().fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight)
1769
+ self.offset_ = -self._intercept_
1770
+ return self
1771
+
1772
+ def decision_function(self, X):
1773
+ """Signed distance to the separating hyperplane.
1774
+
1775
+ Signed distance is positive for an inlier and negative for an outlier.
1776
+
1777
+ Parameters
1778
+ ----------
1779
+ X : array-like of shape (n_samples, n_features)
1780
+ The data matrix.
1781
+
1782
+ Returns
1783
+ -------
1784
+ dec : ndarray of shape (n_samples,)
1785
+ Returns the decision function of the samples.
1786
+ """
1787
+ dec = self._decision_function(X).ravel()
1788
+ return dec
1789
+
1790
+ def score_samples(self, X):
1791
+ """Raw scoring function of the samples.
1792
+
1793
+ Parameters
1794
+ ----------
1795
+ X : array-like of shape (n_samples, n_features)
1796
+ The data matrix.
1797
+
1798
+ Returns
1799
+ -------
1800
+ score_samples : ndarray of shape (n_samples,)
1801
+ Returns the (unshifted) scoring function of the samples.
1802
+ """
1803
+ return self.decision_function(X) + self.offset_
1804
+
1805
+ def predict(self, X):
1806
+ """Perform classification on samples in X.
1807
+
1808
+ For a one-class model, +1 or -1 is returned.
1809
+
1810
+ Parameters
1811
+ ----------
1812
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
1813
+ (n_samples_test, n_samples_train)
1814
+ For kernel="precomputed", the expected shape of X is
1815
+ (n_samples_test, n_samples_train).
1816
+
1817
+ Returns
1818
+ -------
1819
+ y_pred : ndarray of shape (n_samples,)
1820
+ Class labels for samples in X.
1821
+ """
1822
+ y = super().predict(X)
1823
+ return np.asarray(y, dtype=np.intp)
1824
+
1825
+ def _more_tags(self):
1826
+ return {
1827
+ "_xfail_checks": {
1828
+ "check_sample_weights_invariance": (
1829
+ "zero sample_weight is not equivalent to removing samples"
1830
+ ),
1831
+ }
1832
+ }
venv/lib/python3.10/site-packages/sklearn/svm/_liblinear.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (543 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/_libsvm.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (969 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/_libsvm_sparse.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (928 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/_newrand.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (68.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_bounds.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_sparse.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_svm.cpython-310.pyc ADDED
Binary file (34.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/svm/tests/test_bounds.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from scipy import stats
4
+
5
+ from sklearn.linear_model import LogisticRegression
6
+ from sklearn.svm import LinearSVC
7
+ from sklearn.svm._bounds import l1_min_c
8
+ from sklearn.svm._newrand import bounded_rand_int_wrap, set_seed_wrap
9
+ from sklearn.utils.fixes import CSR_CONTAINERS
10
+
11
+ dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
12
+
13
+ Y1 = [0, 1, 1, 1]
14
+ Y2 = [2, 1, 0, 0]
15
+
16
+
17
+ @pytest.mark.parametrize("X_container", CSR_CONTAINERS + [np.array])
18
+ @pytest.mark.parametrize("loss", ["squared_hinge", "log"])
19
+ @pytest.mark.parametrize("Y_label", ["two-classes", "multi-class"])
20
+ @pytest.mark.parametrize("intercept_label", ["no-intercept", "fit-intercept"])
21
+ def test_l1_min_c(X_container, loss, Y_label, intercept_label):
22
+ Ys = {"two-classes": Y1, "multi-class": Y2}
23
+ intercepts = {
24
+ "no-intercept": {"fit_intercept": False},
25
+ "fit-intercept": {"fit_intercept": True, "intercept_scaling": 10},
26
+ }
27
+
28
+ X = X_container(dense_X)
29
+ Y = Ys[Y_label]
30
+ intercept_params = intercepts[intercept_label]
31
+ check_l1_min_c(X, Y, loss, **intercept_params)
32
+
33
+
34
+ def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=1.0):
35
+ min_c = l1_min_c(
36
+ X,
37
+ y,
38
+ loss=loss,
39
+ fit_intercept=fit_intercept,
40
+ intercept_scaling=intercept_scaling,
41
+ )
42
+
43
+ clf = {
44
+ "log": LogisticRegression(penalty="l1", solver="liblinear"),
45
+ "squared_hinge": LinearSVC(loss="squared_hinge", penalty="l1", dual=False),
46
+ }[loss]
47
+
48
+ clf.fit_intercept = fit_intercept
49
+ clf.intercept_scaling = intercept_scaling
50
+
51
+ clf.C = min_c
52
+ clf.fit(X, y)
53
+ assert (np.asarray(clf.coef_) == 0).all()
54
+ assert (np.asarray(clf.intercept_) == 0).all()
55
+
56
+ clf.C = min_c * 1.01
57
+ clf.fit(X, y)
58
+ assert (np.asarray(clf.coef_) != 0).any() or (np.asarray(clf.intercept_) != 0).any()
59
+
60
+
61
+ def test_ill_posed_min_c():
62
+ X = [[0, 0], [0, 0]]
63
+ y = [0, 1]
64
+ with pytest.raises(ValueError):
65
+ l1_min_c(X, y)
66
+
67
+
68
+ _MAX_UNSIGNED_INT = 4294967295
69
+
70
+
71
+ def test_newrand_default():
72
+ """Test that bounded_rand_int_wrap without seeding respects the range
73
+
74
+ Note this test should pass either if executed alone, or in conjunctions
75
+ with other tests that call set_seed explicit in any order: it checks
76
+ invariants on the RNG instead of specific values.
77
+ """
78
+ generated = [bounded_rand_int_wrap(100) for _ in range(10)]
79
+ assert all(0 <= x < 100 for x in generated)
80
+ assert not all(x == generated[0] for x in generated)
81
+
82
+
83
+ @pytest.mark.parametrize("seed, expected", [(0, 54), (_MAX_UNSIGNED_INT, 9)])
84
+ def test_newrand_set_seed(seed, expected):
85
+ """Test that `set_seed` produces deterministic results"""
86
+ set_seed_wrap(seed)
87
+ generated = bounded_rand_int_wrap(100)
88
+ assert generated == expected
89
+
90
+
91
+ @pytest.mark.parametrize("seed", [-1, _MAX_UNSIGNED_INT + 1])
92
+ def test_newrand_set_seed_overflow(seed):
93
+ """Test that `set_seed_wrap` is defined for unsigned 32bits ints"""
94
+ with pytest.raises(OverflowError):
95
+ set_seed_wrap(seed)
96
+
97
+
98
+ @pytest.mark.parametrize("range_, n_pts", [(_MAX_UNSIGNED_INT, 10000), (100, 25)])
99
+ def test_newrand_bounded_rand_int(range_, n_pts):
100
+ """Test that `bounded_rand_int` follows a uniform distribution"""
101
+ # XXX: this test is very seed sensitive: either it is wrong (too strict?)
102
+ # or the wrapped RNG is not uniform enough, at least on some platforms.
103
+ set_seed_wrap(42)
104
+ n_iter = 100
105
+ ks_pvals = []
106
+ uniform_dist = stats.uniform(loc=0, scale=range_)
107
+ # perform multiple samplings to make chance of outlier sampling negligible
108
+ for _ in range(n_iter):
109
+ # Deterministic random sampling
110
+ sample = [bounded_rand_int_wrap(range_) for _ in range(n_pts)]
111
+ res = stats.kstest(sample, uniform_dist.cdf)
112
+ ks_pvals.append(res.pvalue)
113
+ # Null hypothesis = samples come from an uniform distribution.
114
+ # Under the null hypothesis, p-values should be uniformly distributed
115
+ # and not concentrated on low values
116
+ # (this may seem counter-intuitive but is backed by multiple refs)
117
+ # So we can do two checks:
118
+
119
+ # (1) check uniformity of p-values
120
+ uniform_p_vals_dist = stats.uniform(loc=0, scale=1)
121
+ res_pvals = stats.kstest(ks_pvals, uniform_p_vals_dist.cdf)
122
+ assert res_pvals.pvalue > 0.05, (
123
+ "Null hypothesis rejected: generated random numbers are not uniform."
124
+ " Details: the (meta) p-value of the test of uniform distribution"
125
+ f" of p-values is {res_pvals.pvalue} which is not > 0.05"
126
+ )
127
+
128
+ # (2) (safety belt) check that 90% of p-values are above 0.05
129
+ min_10pct_pval = np.percentile(ks_pvals, q=10)
130
+ # lower 10th quantile pvalue <= 0.05 means that the test rejects the
131
+ # null hypothesis that the sample came from the uniform distribution
132
+ assert min_10pct_pval > 0.05, (
133
+ "Null hypothesis rejected: generated random numbers are not uniform. "
134
+ f"Details: lower 10th quantile p-value of {min_10pct_pval} not > 0.05."
135
+ )
136
+
137
+
138
+ @pytest.mark.parametrize("range_", [-1, _MAX_UNSIGNED_INT + 1])
139
+ def test_newrand_bounded_rand_int_limits(range_):
140
+ """Test that `bounded_rand_int_wrap` is defined for unsigned 32bits ints"""
141
+ with pytest.raises(OverflowError):
142
+ bounded_rand_int_wrap(range_)
venv/lib/python3.10/site-packages/sklearn/svm/tests/test_sparse.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from scipy import sparse
4
+
5
+ from sklearn import base, datasets, linear_model, svm
6
+ from sklearn.datasets import load_digits, make_blobs, make_classification
7
+ from sklearn.exceptions import ConvergenceWarning
8
+ from sklearn.svm.tests import test_svm
9
+ from sklearn.utils._testing import (
10
+ assert_allclose,
11
+ assert_array_almost_equal,
12
+ assert_array_equal,
13
+ ignore_warnings,
14
+ skip_if_32bit,
15
+ )
16
+ from sklearn.utils.extmath import safe_sparse_dot
17
+ from sklearn.utils.fixes import (
18
+ CSR_CONTAINERS,
19
+ DOK_CONTAINERS,
20
+ LIL_CONTAINERS,
21
+ )
22
+
23
+ # test sample 1
24
+ X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
25
+ Y = [1, 1, 1, 2, 2, 2]
26
+ T = np.array([[-1, -1], [2, 2], [3, 2]])
27
+ true_result = [1, 2, 2]
28
+
29
+ # test sample 2
30
+ X2 = np.array(
31
+ [
32
+ [0, 0, 0],
33
+ [1, 1, 1],
34
+ [2, 0, 0],
35
+ [0, 0, 2],
36
+ [3, 3, 3],
37
+ ]
38
+ )
39
+ Y2 = [1, 2, 2, 2, 3]
40
+ T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
41
+ true_result2 = [1, 2, 3]
42
+
43
+ iris = datasets.load_iris()
44
+ rng = np.random.RandomState(0)
45
+ perm = rng.permutation(iris.target.size)
46
+ iris.data = iris.data[perm]
47
+ iris.target = iris.target[perm]
48
+
49
+ X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
50
+
51
+
52
+ def check_svm_model_equal(dense_svm, X_train, y_train, X_test):
53
+ # Use the original svm model for dense fit and clone an exactly same
54
+ # svm model for sparse fit
55
+ sparse_svm = base.clone(dense_svm)
56
+
57
+ dense_svm.fit(X_train.toarray(), y_train)
58
+ if sparse.issparse(X_test):
59
+ X_test_dense = X_test.toarray()
60
+ else:
61
+ X_test_dense = X_test
62
+ sparse_svm.fit(X_train, y_train)
63
+ assert sparse.issparse(sparse_svm.support_vectors_)
64
+ assert sparse.issparse(sparse_svm.dual_coef_)
65
+ assert_allclose(dense_svm.support_vectors_, sparse_svm.support_vectors_.toarray())
66
+ assert_allclose(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
67
+ if dense_svm.kernel == "linear":
68
+ assert sparse.issparse(sparse_svm.coef_)
69
+ assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
70
+ assert_allclose(dense_svm.support_, sparse_svm.support_)
71
+ assert_allclose(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
72
+
73
+ assert_array_almost_equal(
74
+ dense_svm.decision_function(X_test_dense), sparse_svm.decision_function(X_test)
75
+ )
76
+ assert_array_almost_equal(
77
+ dense_svm.decision_function(X_test_dense),
78
+ sparse_svm.decision_function(X_test_dense),
79
+ )
80
+ if isinstance(dense_svm, svm.OneClassSVM):
81
+ msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
82
+ else:
83
+ assert_array_almost_equal(
84
+ dense_svm.predict_proba(X_test_dense),
85
+ sparse_svm.predict_proba(X_test),
86
+ decimal=4,
87
+ )
88
+ msg = "cannot use sparse input in 'SVC' trained on dense data"
89
+ if sparse.issparse(X_test):
90
+ with pytest.raises(ValueError, match=msg):
91
+ dense_svm.predict(X_test)
92
+
93
+
94
+ @skip_if_32bit
95
+ @pytest.mark.parametrize(
96
+ "X_train, y_train, X_test",
97
+ [
98
+ [X, Y, T],
99
+ [X2, Y2, T2],
100
+ [X_blobs[:80], y_blobs[:80], X_blobs[80:]],
101
+ [iris.data, iris.target, iris.data],
102
+ ],
103
+ )
104
+ @pytest.mark.parametrize("kernel", ["linear", "poly", "rbf", "sigmoid"])
105
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS)
106
+ def test_svc(X_train, y_train, X_test, kernel, sparse_container):
107
+ """Check that sparse SVC gives the same result as SVC."""
108
+ X_train = sparse_container(X_train)
109
+
110
+ clf = svm.SVC(
111
+ gamma=1,
112
+ kernel=kernel,
113
+ probability=True,
114
+ random_state=0,
115
+ decision_function_shape="ovo",
116
+ )
117
+ check_svm_model_equal(clf, X_train, y_train, X_test)
118
+
119
+
120
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
121
+ def test_unsorted_indices(csr_container):
122
+ # test that the result with sorted and unsorted indices in csr is the same
123
+ # we use a subset of digits as iris, blobs or make_classification didn't
124
+ # show the problem
125
+ X, y = load_digits(return_X_y=True)
126
+ X_test = csr_container(X[50:100])
127
+ X, y = X[:50], y[:50]
128
+
129
+ X_sparse = csr_container(X)
130
+ coef_dense = (
131
+ svm.SVC(kernel="linear", probability=True, random_state=0).fit(X, y).coef_
132
+ )
133
+ sparse_svc = svm.SVC(kernel="linear", probability=True, random_state=0).fit(
134
+ X_sparse, y
135
+ )
136
+ coef_sorted = sparse_svc.coef_
137
+ # make sure dense and sparse SVM give the same result
138
+ assert_allclose(coef_dense, coef_sorted.toarray())
139
+
140
+ # reverse each row's indices
141
+ def scramble_indices(X):
142
+ new_data = []
143
+ new_indices = []
144
+ for i in range(1, len(X.indptr)):
145
+ row_slice = slice(*X.indptr[i - 1 : i + 1])
146
+ new_data.extend(X.data[row_slice][::-1])
147
+ new_indices.extend(X.indices[row_slice][::-1])
148
+ return csr_container((new_data, new_indices, X.indptr), shape=X.shape)
149
+
150
+ X_sparse_unsorted = scramble_indices(X_sparse)
151
+ X_test_unsorted = scramble_indices(X_test)
152
+
153
+ assert not X_sparse_unsorted.has_sorted_indices
154
+ assert not X_test_unsorted.has_sorted_indices
155
+
156
+ unsorted_svc = svm.SVC(kernel="linear", probability=True, random_state=0).fit(
157
+ X_sparse_unsorted, y
158
+ )
159
+ coef_unsorted = unsorted_svc.coef_
160
+ # make sure unsorted indices give same result
161
+ assert_allclose(coef_unsorted.toarray(), coef_sorted.toarray())
162
+ assert_allclose(
163
+ sparse_svc.predict_proba(X_test_unsorted), sparse_svc.predict_proba(X_test)
164
+ )
165
+
166
+
167
+ @pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
168
+ def test_svc_with_custom_kernel(lil_container):
169
+ def kfunc(x, y):
170
+ return safe_sparse_dot(x, y.T)
171
+
172
+ X_sp = lil_container(X)
173
+ clf_lin = svm.SVC(kernel="linear").fit(X_sp, Y)
174
+ clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
175
+ assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
176
+
177
+
178
+ @skip_if_32bit
179
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
180
+ @pytest.mark.parametrize("kernel", ["linear", "poly", "rbf"])
181
+ def test_svc_iris(csr_container, kernel):
182
+ # Test the sparse SVC with the iris dataset
183
+ iris_data_sp = csr_container(iris.data)
184
+
185
+ sp_clf = svm.SVC(kernel=kernel).fit(iris_data_sp, iris.target)
186
+ clf = svm.SVC(kernel=kernel).fit(iris.data, iris.target)
187
+
188
+ assert_allclose(clf.support_vectors_, sp_clf.support_vectors_.toarray())
189
+ assert_allclose(clf.dual_coef_, sp_clf.dual_coef_.toarray())
190
+ assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp))
191
+ if kernel == "linear":
192
+ assert_allclose(clf.coef_, sp_clf.coef_.toarray())
193
+
194
+
195
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
196
+ def test_sparse_decision_function(csr_container):
197
+ # Test decision_function
198
+
199
+ # Sanity check, test that decision_function implemented in python
200
+ # returns the same as the one in libsvm
201
+
202
+ # multi class:
203
+ iris_data_sp = csr_container(iris.data)
204
+ svc = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo")
205
+ clf = svc.fit(iris_data_sp, iris.target)
206
+
207
+ dec = safe_sparse_dot(iris_data_sp, clf.coef_.T) + clf.intercept_
208
+
209
+ assert_allclose(dec, clf.decision_function(iris_data_sp))
210
+
211
+ # binary:
212
+ clf.fit(X, Y)
213
+ dec = np.dot(X, clf.coef_.T) + clf.intercept_
214
+ prediction = clf.predict(X)
215
+ assert_allclose(dec.ravel(), clf.decision_function(X))
216
+ assert_allclose(
217
+ prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int).ravel()]
218
+ )
219
+ expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0])
220
+ assert_array_almost_equal(clf.decision_function(X), expected, decimal=2)
221
+
222
+
223
+ @pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
224
+ def test_error(lil_container):
225
+ # Test that it gives proper exception on deficient input
226
+ clf = svm.SVC()
227
+ X_sp = lil_container(X)
228
+
229
+ Y2 = Y[:-1] # wrong dimensions for labels
230
+ with pytest.raises(ValueError):
231
+ clf.fit(X_sp, Y2)
232
+
233
+ clf.fit(X_sp, Y)
234
+ assert_array_equal(clf.predict(T), true_result)
235
+
236
+
237
+ @pytest.mark.parametrize(
238
+ "lil_container, dok_container", zip(LIL_CONTAINERS, DOK_CONTAINERS)
239
+ )
240
+ def test_linearsvc(lil_container, dok_container):
241
+ # Similar to test_SVC
242
+ X_sp = lil_container(X)
243
+ X2_sp = dok_container(X2)
244
+
245
+ clf = svm.LinearSVC(dual="auto", random_state=0).fit(X, Y)
246
+ sp_clf = svm.LinearSVC(dual="auto", random_state=0).fit(X_sp, Y)
247
+
248
+ assert sp_clf.fit_intercept
249
+
250
+ assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
251
+ assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
252
+
253
+ assert_allclose(clf.predict(X), sp_clf.predict(X_sp))
254
+
255
+ clf.fit(X2, Y2)
256
+ sp_clf.fit(X2_sp, Y2)
257
+
258
+ assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
259
+ assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
260
+
261
+
262
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
263
+ def test_linearsvc_iris(csr_container):
264
+ # Test the sparse LinearSVC with the iris dataset
265
+ iris_data_sp = csr_container(iris.data)
266
+
267
+ sp_clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris_data_sp, iris.target)
268
+ clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris.data, iris.target)
269
+
270
+ assert clf.fit_intercept == sp_clf.fit_intercept
271
+
272
+ assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
273
+ assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
274
+ assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp))
275
+
276
+ # check decision_function
277
+ pred = np.argmax(sp_clf.decision_function(iris_data_sp), axis=1)
278
+ assert_allclose(pred, clf.predict(iris.data))
279
+
280
+ # sparsify the coefficients on both models and check that they still
281
+ # produce the same results
282
+ clf.sparsify()
283
+ assert_array_equal(pred, clf.predict(iris_data_sp))
284
+ sp_clf.sparsify()
285
+ assert_array_equal(pred, sp_clf.predict(iris_data_sp))
286
+
287
+
288
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
289
+ def test_weight(csr_container):
290
+ # Test class weights
291
+ X_, y_ = make_classification(
292
+ n_samples=200, n_features=100, weights=[0.833, 0.167], random_state=0
293
+ )
294
+
295
+ X_ = csr_container(X_)
296
+ for clf in (
297
+ linear_model.LogisticRegression(),
298
+ svm.LinearSVC(dual="auto", random_state=0),
299
+ svm.SVC(),
300
+ ):
301
+ clf.set_params(class_weight={0: 5})
302
+ clf.fit(X_[:180], y_[:180])
303
+ y_pred = clf.predict(X_[180:])
304
+ assert np.sum(y_pred == y_[180:]) >= 11
305
+
306
+
307
+ @pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
308
+ def test_sample_weights(lil_container):
309
+ # Test weights on individual samples
310
+ X_sp = lil_container(X)
311
+
312
+ clf = svm.SVC()
313
+ clf.fit(X_sp, Y)
314
+ assert_array_equal(clf.predict([X[2]]), [1.0])
315
+
316
+ sample_weight = [0.1] * 3 + [10] * 3
317
+ clf.fit(X_sp, Y, sample_weight=sample_weight)
318
+ assert_array_equal(clf.predict([X[2]]), [2.0])
319
+
320
+
321
+ def test_sparse_liblinear_intercept_handling():
322
+ # Test that sparse liblinear honours intercept_scaling param
323
+ test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
324
+
325
+
326
+ @pytest.mark.parametrize(
327
+ "X_train, y_train, X_test",
328
+ [
329
+ [X, None, T],
330
+ [X2, None, T2],
331
+ [X_blobs[:80], None, X_blobs[80:]],
332
+ [iris.data, None, iris.data],
333
+ ],
334
+ )
335
+ @pytest.mark.parametrize("kernel", ["linear", "poly", "rbf", "sigmoid"])
336
+ @pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS)
337
+ @skip_if_32bit
338
+ def test_sparse_oneclasssvm(X_train, y_train, X_test, kernel, sparse_container):
339
+ # Check that sparse OneClassSVM gives the same result as dense OneClassSVM
340
+ X_train = sparse_container(X_train)
341
+
342
+ clf = svm.OneClassSVM(gamma=1, kernel=kernel)
343
+ check_svm_model_equal(clf, X_train, y_train, X_test)
344
+
345
+
346
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
347
+ def test_sparse_realdata(csr_container):
348
+ # Test on a subset from the 20newsgroups dataset.
349
+ # This catches some bugs if input is not correctly converted into
350
+ # sparse format or weights are not correctly initialized.
351
+ data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
352
+
353
+ # SVC does not support large sparse, so we specify int32 indices
354
+ # In this case, `csr_matrix` automatically uses int32 regardless of the dtypes of
355
+ # `indices` and `indptr` but `csr_array` may or may not use the same dtype as
356
+ # `indices` and `indptr`, which would be int64 if not specified
357
+ indices = np.array([6, 5, 35, 31], dtype=np.int32)
358
+ indptr = np.array([0] * 8 + [1] * 32 + [2] * 38 + [4] * 3, dtype=np.int32)
359
+
360
+ X = csr_container((data, indices, indptr))
361
+ y = np.array(
362
+ [
363
+ 1.0,
364
+ 0.0,
365
+ 2.0,
366
+ 2.0,
367
+ 1.0,
368
+ 1.0,
369
+ 1.0,
370
+ 2.0,
371
+ 2.0,
372
+ 0.0,
373
+ 1.0,
374
+ 2.0,
375
+ 2.0,
376
+ 0.0,
377
+ 2.0,
378
+ 0.0,
379
+ 3.0,
380
+ 0.0,
381
+ 3.0,
382
+ 0.0,
383
+ 1.0,
384
+ 1.0,
385
+ 3.0,
386
+ 2.0,
387
+ 3.0,
388
+ 2.0,
389
+ 0.0,
390
+ 3.0,
391
+ 1.0,
392
+ 0.0,
393
+ 2.0,
394
+ 1.0,
395
+ 2.0,
396
+ 0.0,
397
+ 1.0,
398
+ 0.0,
399
+ 2.0,
400
+ 3.0,
401
+ 1.0,
402
+ 3.0,
403
+ 0.0,
404
+ 1.0,
405
+ 0.0,
406
+ 0.0,
407
+ 2.0,
408
+ 0.0,
409
+ 1.0,
410
+ 2.0,
411
+ 2.0,
412
+ 2.0,
413
+ 3.0,
414
+ 2.0,
415
+ 0.0,
416
+ 3.0,
417
+ 2.0,
418
+ 1.0,
419
+ 2.0,
420
+ 3.0,
421
+ 2.0,
422
+ 2.0,
423
+ 0.0,
424
+ 1.0,
425
+ 0.0,
426
+ 1.0,
427
+ 2.0,
428
+ 3.0,
429
+ 0.0,
430
+ 0.0,
431
+ 2.0,
432
+ 2.0,
433
+ 1.0,
434
+ 3.0,
435
+ 1.0,
436
+ 1.0,
437
+ 0.0,
438
+ 1.0,
439
+ 2.0,
440
+ 1.0,
441
+ 1.0,
442
+ 3.0,
443
+ ]
444
+ )
445
+
446
+ clf = svm.SVC(kernel="linear").fit(X.toarray(), y)
447
+ sp_clf = svm.SVC(kernel="linear").fit(X.tocoo(), y)
448
+
449
+ assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
450
+ assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
451
+
452
+
453
+ @pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
454
+ def test_sparse_svc_clone_with_callable_kernel(lil_container):
455
+ # Test that the "dense_fit" is called even though we use sparse input
456
+ # meaning that everything works fine.
457
+ a = svm.SVC(C=1, kernel=lambda x, y: x @ y.T, probability=True, random_state=0)
458
+ b = base.clone(a)
459
+
460
+ X_sp = lil_container(X)
461
+ b.fit(X_sp, Y)
462
+ pred = b.predict(X_sp)
463
+ b.predict_proba(X_sp)
464
+
465
+ dense_svm = svm.SVC(
466
+ C=1, kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0
467
+ )
468
+ pred_dense = dense_svm.fit(X, Y).predict(X)
469
+ assert_array_equal(pred_dense, pred)
470
+ # b.decision_function(X_sp) # XXX : should be supported
471
+
472
+
473
+ @pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
474
+ def test_timeout(lil_container):
475
+ sp = svm.SVC(
476
+ C=1, kernel=lambda x, y: x @ y.T, probability=True, random_state=0, max_iter=1
477
+ )
478
+ warning_msg = (
479
+ r"Solver terminated early \(max_iter=1\). Consider pre-processing "
480
+ r"your data with StandardScaler or MinMaxScaler."
481
+ )
482
+ with pytest.warns(ConvergenceWarning, match=warning_msg):
483
+ sp.fit(lil_container(X), Y)
484
+
485
+
486
+ def test_consistent_proba():
487
+ a = svm.SVC(probability=True, max_iter=1, random_state=0)
488
+ with ignore_warnings(category=ConvergenceWarning):
489
+ proba_1 = a.fit(X, Y).predict_proba(X)
490
+ a = svm.SVC(probability=True, max_iter=1, random_state=0)
491
+ with ignore_warnings(category=ConvergenceWarning):
492
+ proba_2 = a.fit(X, Y).predict_proba(X)
493
+ assert_allclose(proba_1, proba_2)
venv/lib/python3.10/site-packages/sklearn/svm/tests/test_svm.py ADDED
@@ -0,0 +1,1434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing for Support Vector Machine module (sklearn.svm)
3
+
4
+ TODO: remove hard coded numerical results when possible
5
+ """
6
+ import re
7
+
8
+ import numpy as np
9
+ import pytest
10
+ from numpy.testing import (
11
+ assert_allclose,
12
+ assert_almost_equal,
13
+ assert_array_almost_equal,
14
+ assert_array_equal,
15
+ )
16
+
17
+ from sklearn import base, datasets, linear_model, metrics, svm
18
+ from sklearn.datasets import make_blobs, make_classification
19
+ from sklearn.exceptions import (
20
+ ConvergenceWarning,
21
+ NotFittedError,
22
+ UndefinedMetricWarning,
23
+ )
24
+ from sklearn.metrics import f1_score
25
+ from sklearn.metrics.pairwise import rbf_kernel
26
+ from sklearn.model_selection import train_test_split
27
+ from sklearn.multiclass import OneVsRestClassifier
28
+
29
+ # mypy error: Module 'sklearn.svm' has no attribute '_libsvm'
30
+ from sklearn.svm import ( # type: ignore
31
+ SVR,
32
+ LinearSVC,
33
+ LinearSVR,
34
+ NuSVR,
35
+ OneClassSVM,
36
+ _libsvm,
37
+ )
38
+ from sklearn.svm._classes import _validate_dual_parameter
39
+ from sklearn.utils import check_random_state, shuffle
40
+ from sklearn.utils._testing import ignore_warnings
41
+ from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS
42
+ from sklearn.utils.validation import _num_samples
43
+
44
+ # toy sample
45
+ X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
46
+ Y = [1, 1, 1, 2, 2, 2]
47
+ T = [[-1, -1], [2, 2], [3, 2]]
48
+ true_result = [1, 2, 2]
49
+
50
+ # also load the iris dataset
51
+ iris = datasets.load_iris()
52
+ rng = check_random_state(42)
53
+ perm = rng.permutation(iris.target.size)
54
+ iris.data = iris.data[perm]
55
+ iris.target = iris.target[perm]
56
+
57
+
58
+ def test_libsvm_parameters():
59
+ # Test parameters on classes that make use of libsvm.
60
+ clf = svm.SVC(kernel="linear").fit(X, Y)
61
+ assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
62
+ assert_array_equal(clf.support_, [1, 3])
63
+ assert_array_equal(clf.support_vectors_, (X[1], X[3]))
64
+ assert_array_equal(clf.intercept_, [0.0])
65
+ assert_array_equal(clf.predict(X), Y)
66
+
67
+
68
+ def test_libsvm_iris():
69
+ # Check consistency on dataset iris.
70
+
71
+ # shuffle the dataset so that labels are not ordered
72
+ for k in ("linear", "rbf"):
73
+ clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
74
+ assert np.mean(clf.predict(iris.data) == iris.target) > 0.9
75
+ assert hasattr(clf, "coef_") == (k == "linear")
76
+
77
+ assert_array_equal(clf.classes_, np.sort(clf.classes_))
78
+
79
+ # check also the low-level API
80
+ # We unpack the values to create a dictionary with some of the return values
81
+ # from Libsvm's fit.
82
+ (
83
+ libsvm_support,
84
+ libsvm_support_vectors,
85
+ libsvm_n_class_SV,
86
+ libsvm_sv_coef,
87
+ libsvm_intercept,
88
+ libsvm_probA,
89
+ libsvm_probB,
90
+ # libsvm_fit_status and libsvm_n_iter won't be used below.
91
+ libsvm_fit_status,
92
+ libsvm_n_iter,
93
+ ) = _libsvm.fit(iris.data, iris.target.astype(np.float64))
94
+
95
+ model_params = {
96
+ "support": libsvm_support,
97
+ "SV": libsvm_support_vectors,
98
+ "nSV": libsvm_n_class_SV,
99
+ "sv_coef": libsvm_sv_coef,
100
+ "intercept": libsvm_intercept,
101
+ "probA": libsvm_probA,
102
+ "probB": libsvm_probB,
103
+ }
104
+ pred = _libsvm.predict(iris.data, **model_params)
105
+ assert np.mean(pred == iris.target) > 0.95
106
+
107
+ # We unpack the values to create a dictionary with some of the return values
108
+ # from Libsvm's fit.
109
+ (
110
+ libsvm_support,
111
+ libsvm_support_vectors,
112
+ libsvm_n_class_SV,
113
+ libsvm_sv_coef,
114
+ libsvm_intercept,
115
+ libsvm_probA,
116
+ libsvm_probB,
117
+ # libsvm_fit_status and libsvm_n_iter won't be used below.
118
+ libsvm_fit_status,
119
+ libsvm_n_iter,
120
+ ) = _libsvm.fit(iris.data, iris.target.astype(np.float64), kernel="linear")
121
+
122
+ model_params = {
123
+ "support": libsvm_support,
124
+ "SV": libsvm_support_vectors,
125
+ "nSV": libsvm_n_class_SV,
126
+ "sv_coef": libsvm_sv_coef,
127
+ "intercept": libsvm_intercept,
128
+ "probA": libsvm_probA,
129
+ "probB": libsvm_probB,
130
+ }
131
+ pred = _libsvm.predict(iris.data, **model_params, kernel="linear")
132
+ assert np.mean(pred == iris.target) > 0.95
133
+
134
+ pred = _libsvm.cross_validation(
135
+ iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0
136
+ )
137
+ assert np.mean(pred == iris.target) > 0.95
138
+
139
+ # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
140
+ # we should get deterministic results (assuming that there is no other
141
+ # thread calling this wrapper calling `srand` concurrently).
142
+ pred2 = _libsvm.cross_validation(
143
+ iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0
144
+ )
145
+ assert_array_equal(pred, pred2)
146
+
147
+
148
+ def test_precomputed():
149
+ # SVC with a precomputed kernel.
150
+ # We test it with a toy dataset and with iris.
151
+ clf = svm.SVC(kernel="precomputed")
152
+ # Gram matrix for train data (square matrix)
153
+ # (we use just a linear kernel)
154
+ K = np.dot(X, np.array(X).T)
155
+ clf.fit(K, Y)
156
+ # Gram matrix for test data (rectangular matrix)
157
+ KT = np.dot(T, np.array(X).T)
158
+ pred = clf.predict(KT)
159
+ with pytest.raises(ValueError):
160
+ clf.predict(KT.T)
161
+
162
+ assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
163
+ assert_array_equal(clf.support_, [1, 3])
164
+ assert_array_equal(clf.intercept_, [0])
165
+ assert_array_almost_equal(clf.support_, [1, 3])
166
+ assert_array_equal(pred, true_result)
167
+
168
+ # Gram matrix for test data but compute KT[i,j]
169
+ # for support vectors j only.
170
+ KT = np.zeros_like(KT)
171
+ for i in range(len(T)):
172
+ for j in clf.support_:
173
+ KT[i, j] = np.dot(T[i], X[j])
174
+
175
+ pred = clf.predict(KT)
176
+ assert_array_equal(pred, true_result)
177
+
178
+ # same as before, but using a callable function instead of the kernel
179
+ # matrix. kernel is just a linear kernel
180
+
181
+ def kfunc(x, y):
182
+ return np.dot(x, y.T)
183
+
184
+ clf = svm.SVC(kernel=kfunc)
185
+ clf.fit(np.array(X), Y)
186
+ pred = clf.predict(T)
187
+
188
+ assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
189
+ assert_array_equal(clf.intercept_, [0])
190
+ assert_array_almost_equal(clf.support_, [1, 3])
191
+ assert_array_equal(pred, true_result)
192
+
193
+ # test a precomputed kernel with the iris dataset
194
+ # and check parameters against a linear SVC
195
+ clf = svm.SVC(kernel="precomputed")
196
+ clf2 = svm.SVC(kernel="linear")
197
+ K = np.dot(iris.data, iris.data.T)
198
+ clf.fit(K, iris.target)
199
+ clf2.fit(iris.data, iris.target)
200
+ pred = clf.predict(K)
201
+ assert_array_almost_equal(clf.support_, clf2.support_)
202
+ assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
203
+ assert_array_almost_equal(clf.intercept_, clf2.intercept_)
204
+ assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2)
205
+
206
+ # Gram matrix for test data but compute KT[i,j]
207
+ # for support vectors j only.
208
+ K = np.zeros_like(K)
209
+ for i in range(len(iris.data)):
210
+ for j in clf.support_:
211
+ K[i, j] = np.dot(iris.data[i], iris.data[j])
212
+
213
+ pred = clf.predict(K)
214
+ assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2)
215
+
216
+ clf = svm.SVC(kernel=kfunc)
217
+ clf.fit(iris.data, iris.target)
218
+ assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2)
219
+
220
+
221
+ def test_svr():
222
+ # Test Support Vector Regression
223
+
224
+ diabetes = datasets.load_diabetes()
225
+ for clf in (
226
+ svm.NuSVR(kernel="linear", nu=0.4, C=1.0),
227
+ svm.NuSVR(kernel="linear", nu=0.4, C=10.0),
228
+ svm.SVR(kernel="linear", C=10.0),
229
+ svm.LinearSVR(dual="auto", C=10.0),
230
+ svm.LinearSVR(dual="auto", C=10.0),
231
+ ):
232
+ clf.fit(diabetes.data, diabetes.target)
233
+ assert clf.score(diabetes.data, diabetes.target) > 0.02
234
+
235
+ # non-regression test; previously, BaseLibSVM would check that
236
+ # len(np.unique(y)) < 2, which must only be done for SVC
237
+ svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
238
+ svm.LinearSVR(dual="auto").fit(diabetes.data, np.ones(len(diabetes.data)))
239
+
240
+
241
+ def test_linearsvr():
242
+ # check that SVR(kernel='linear') and LinearSVC() give
243
+ # comparable results
244
+ diabetes = datasets.load_diabetes()
245
+ lsvr = svm.LinearSVR(C=1e3, dual="auto").fit(diabetes.data, diabetes.target)
246
+ score1 = lsvr.score(diabetes.data, diabetes.target)
247
+
248
+ svr = svm.SVR(kernel="linear", C=1e3).fit(diabetes.data, diabetes.target)
249
+ score2 = svr.score(diabetes.data, diabetes.target)
250
+
251
+ assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001)
252
+ assert_almost_equal(score1, score2, 2)
253
+
254
+
255
+ def test_linearsvr_fit_sampleweight():
256
+ # check correct result when sample_weight is 1
257
+ # check that SVR(kernel='linear') and LinearSVC() give
258
+ # comparable results
259
+ diabetes = datasets.load_diabetes()
260
+ n_samples = len(diabetes.target)
261
+ unit_weight = np.ones(n_samples)
262
+ lsvr = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit(
263
+ diabetes.data, diabetes.target, sample_weight=unit_weight
264
+ )
265
+ score1 = lsvr.score(diabetes.data, diabetes.target)
266
+
267
+ lsvr_no_weight = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit(
268
+ diabetes.data, diabetes.target
269
+ )
270
+ score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
271
+
272
+ assert_allclose(
273
+ np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001
274
+ )
275
+ assert_almost_equal(score1, score2, 2)
276
+
277
+ # check that fit(X) = fit([X1, X2, X3], sample_weight = [n1, n2, n3]) where
278
+ # X = X1 repeated n1 times, X2 repeated n2 times and so forth
279
+ random_state = check_random_state(0)
280
+ random_weight = random_state.randint(0, 10, n_samples)
281
+ lsvr_unflat = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit(
282
+ diabetes.data, diabetes.target, sample_weight=random_weight
283
+ )
284
+ score3 = lsvr_unflat.score(
285
+ diabetes.data, diabetes.target, sample_weight=random_weight
286
+ )
287
+
288
+ X_flat = np.repeat(diabetes.data, random_weight, axis=0)
289
+ y_flat = np.repeat(diabetes.target, random_weight, axis=0)
290
+ lsvr_flat = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit(
291
+ X_flat, y_flat
292
+ )
293
+ score4 = lsvr_flat.score(X_flat, y_flat)
294
+
295
+ assert_almost_equal(score3, score4, 2)
296
+
297
+
298
+ def test_svr_errors():
299
+ X = [[0.0], [1.0]]
300
+ y = [0.0, 0.5]
301
+
302
+ # Bad kernel
303
+ clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
304
+ clf.fit(X, y)
305
+ with pytest.raises(ValueError):
306
+ clf.predict(X)
307
+
308
+
309
+ def test_oneclass():
310
+ # Test OneClassSVM
311
+ clf = svm.OneClassSVM()
312
+ clf.fit(X)
313
+ pred = clf.predict(T)
314
+
315
+ assert_array_equal(pred, [1, -1, -1])
316
+ assert pred.dtype == np.dtype("intp")
317
+ assert_array_almost_equal(clf.intercept_, [-1.218], decimal=3)
318
+ assert_array_almost_equal(clf.dual_coef_, [[0.750, 0.750, 0.750, 0.750]], decimal=3)
319
+ with pytest.raises(AttributeError):
320
+ (lambda: clf.coef_)()
321
+
322
+
323
+ def test_oneclass_decision_function():
324
+ # Test OneClassSVM decision function
325
+ clf = svm.OneClassSVM()
326
+ rnd = check_random_state(2)
327
+
328
+ # Generate train data
329
+ X = 0.3 * rnd.randn(100, 2)
330
+ X_train = np.r_[X + 2, X - 2]
331
+
332
+ # Generate some regular novel observations
333
+ X = 0.3 * rnd.randn(20, 2)
334
+ X_test = np.r_[X + 2, X - 2]
335
+ # Generate some abnormal novel observations
336
+ X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
337
+
338
+ # fit the model
339
+ clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
340
+ clf.fit(X_train)
341
+
342
+ # predict things
343
+ y_pred_test = clf.predict(X_test)
344
+ assert np.mean(y_pred_test == 1) > 0.9
345
+ y_pred_outliers = clf.predict(X_outliers)
346
+ assert np.mean(y_pred_outliers == -1) > 0.9
347
+ dec_func_test = clf.decision_function(X_test)
348
+ assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
349
+ dec_func_outliers = clf.decision_function(X_outliers)
350
+ assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
351
+
352
+
353
+ def test_oneclass_score_samples():
354
+ X_train = [[1, 1], [1, 2], [2, 1]]
355
+ clf = svm.OneClassSVM(gamma=1).fit(X_train)
356
+ assert_array_equal(
357
+ clf.score_samples([[2.0, 2.0]]),
358
+ clf.decision_function([[2.0, 2.0]]) + clf.offset_,
359
+ )
360
+
361
+
362
+ def test_tweak_params():
363
+ # Make sure some tweaking of parameters works.
364
+ # We change clf.dual_coef_ at run time and expect .predict() to change
365
+ # accordingly. Notice that this is not trivial since it involves a lot
366
+ # of C/Python copying in the libsvm bindings.
367
+ # The success of this test ensures that the mapping between libsvm and
368
+ # the python classifier is complete.
369
+ clf = svm.SVC(kernel="linear", C=1.0)
370
+ clf.fit(X, Y)
371
+ assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
372
+ assert_array_equal(clf.predict([[-0.1, -0.1]]), [1])
373
+ clf._dual_coef_ = np.array([[0.0, 1.0]])
374
+ assert_array_equal(clf.predict([[-0.1, -0.1]]), [2])
375
+
376
+
377
+ def test_probability():
378
+ # Predict probabilities using SVC
379
+ # This uses cross validation, so we use a slightly bigger testing set.
380
+
381
+ for clf in (
382
+ svm.SVC(probability=True, random_state=0, C=1.0),
383
+ svm.NuSVC(probability=True, random_state=0),
384
+ ):
385
+ clf.fit(iris.data, iris.target)
386
+
387
+ prob_predict = clf.predict_proba(iris.data)
388
+ assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
389
+ assert np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9
390
+
391
+ assert_almost_equal(
392
+ clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8
393
+ )
394
+
395
+
396
+ def test_decision_function():
397
+ # Test decision_function
398
+ # Sanity check, test that decision_function implemented in python
399
+ # returns the same as the one in libsvm
400
+ # multi class:
401
+ clf = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo").fit(
402
+ iris.data, iris.target
403
+ )
404
+
405
+ dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
406
+
407
+ assert_array_almost_equal(dec, clf.decision_function(iris.data))
408
+
409
+ # binary:
410
+ clf.fit(X, Y)
411
+ dec = np.dot(X, clf.coef_.T) + clf.intercept_
412
+ prediction = clf.predict(X)
413
+ assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
414
+ assert_array_almost_equal(
415
+ prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int)]
416
+ )
417
+ expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0])
418
+ assert_array_almost_equal(clf.decision_function(X), expected, 2)
419
+
420
+ # kernel binary:
421
+ clf = svm.SVC(kernel="rbf", gamma=1, decision_function_shape="ovo")
422
+ clf.fit(X, Y)
423
+
424
+ rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
425
+ dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
426
+ assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
427
+
428
+
429
+ @pytest.mark.parametrize("SVM", (svm.SVC, svm.NuSVC))
430
+ def test_decision_function_shape(SVM):
431
+ # check that decision_function_shape='ovr' or 'ovo' gives
432
+ # correct shape and is consistent with predict
433
+
434
+ clf = SVM(kernel="linear", decision_function_shape="ovr").fit(
435
+ iris.data, iris.target
436
+ )
437
+ dec = clf.decision_function(iris.data)
438
+ assert dec.shape == (len(iris.data), 3)
439
+ assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
440
+
441
+ # with five classes:
442
+ X, y = make_blobs(n_samples=80, centers=5, random_state=0)
443
+ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
444
+
445
+ clf = SVM(kernel="linear", decision_function_shape="ovr").fit(X_train, y_train)
446
+ dec = clf.decision_function(X_test)
447
+ assert dec.shape == (len(X_test), 5)
448
+ assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
449
+
450
+ # check shape of ovo_decition_function=True
451
+ clf = SVM(kernel="linear", decision_function_shape="ovo").fit(X_train, y_train)
452
+ dec = clf.decision_function(X_train)
453
+ assert dec.shape == (len(X_train), 10)
454
+
455
+
456
+ def test_svr_predict():
457
+ # Test SVR's decision_function
458
+ # Sanity check, test that predict implemented in python
459
+ # returns the same as the one in libsvm
460
+
461
+ X = iris.data
462
+ y = iris.target
463
+
464
+ # linear kernel
465
+ reg = svm.SVR(kernel="linear", C=0.1).fit(X, y)
466
+
467
+ dec = np.dot(X, reg.coef_.T) + reg.intercept_
468
+ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
469
+
470
+ # rbf kernel
471
+ reg = svm.SVR(kernel="rbf", gamma=1).fit(X, y)
472
+
473
+ rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
474
+ dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
475
+ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
476
+
477
+
478
+ def test_weight():
479
+ # Test class weights
480
+ clf = svm.SVC(class_weight={1: 0.1})
481
+ # we give a small weights to class 1
482
+ clf.fit(X, Y)
483
+ # so all predicted values belong to class 2
484
+ assert_array_almost_equal(clf.predict(X), [2] * 6)
485
+
486
+ X_, y_ = make_classification(
487
+ n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2
488
+ )
489
+
490
+ for clf in (
491
+ linear_model.LogisticRegression(),
492
+ svm.LinearSVC(dual="auto", random_state=0),
493
+ svm.SVC(),
494
+ ):
495
+ clf.set_params(class_weight={0: 0.1, 1: 10})
496
+ clf.fit(X_[:100], y_[:100])
497
+ y_pred = clf.predict(X_[100:])
498
+ assert f1_score(y_[100:], y_pred) > 0.3
499
+
500
+
501
+ @pytest.mark.parametrize("estimator", [svm.SVC(C=1e-2), svm.NuSVC()])
502
+ def test_svm_classifier_sided_sample_weight(estimator):
503
+ # fit a linear SVM and check that giving more weight to opposed samples
504
+ # in the space will flip the decision toward these samples.
505
+ X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]]
506
+ estimator.set_params(kernel="linear")
507
+
508
+ # check that with unit weights, a sample is supposed to be predicted on
509
+ # the boundary
510
+ sample_weight = [1] * 6
511
+ estimator.fit(X, Y, sample_weight=sample_weight)
512
+ y_pred = estimator.decision_function([[-1.0, 1.0]])
513
+ assert y_pred == pytest.approx(0)
514
+
515
+ # give more weights to opposed samples
516
+ sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10]
517
+ estimator.fit(X, Y, sample_weight=sample_weight)
518
+ y_pred = estimator.decision_function([[-1.0, 1.0]])
519
+ assert y_pred < 0
520
+
521
+ sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1]
522
+ estimator.fit(X, Y, sample_weight=sample_weight)
523
+ y_pred = estimator.decision_function([[-1.0, 1.0]])
524
+ assert y_pred > 0
525
+
526
+
527
+ @pytest.mark.parametrize("estimator", [svm.SVR(C=1e-2), svm.NuSVR(C=1e-2)])
528
+ def test_svm_regressor_sided_sample_weight(estimator):
529
+ # similar test to test_svm_classifier_sided_sample_weight but for
530
+ # SVM regressors
531
+ X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]]
532
+ estimator.set_params(kernel="linear")
533
+
534
+ # check that with unit weights, a sample is supposed to be predicted on
535
+ # the boundary
536
+ sample_weight = [1] * 6
537
+ estimator.fit(X, Y, sample_weight=sample_weight)
538
+ y_pred = estimator.predict([[-1.0, 1.0]])
539
+ assert y_pred == pytest.approx(1.5)
540
+
541
+ # give more weights to opposed samples
542
+ sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10]
543
+ estimator.fit(X, Y, sample_weight=sample_weight)
544
+ y_pred = estimator.predict([[-1.0, 1.0]])
545
+ assert y_pred < 1.5
546
+
547
+ sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1]
548
+ estimator.fit(X, Y, sample_weight=sample_weight)
549
+ y_pred = estimator.predict([[-1.0, 1.0]])
550
+ assert y_pred > 1.5
551
+
552
+
553
+ def test_svm_equivalence_sample_weight_C():
554
+ # test that rescaling all samples is the same as changing C
555
+ clf = svm.SVC()
556
+ clf.fit(X, Y)
557
+ dual_coef_no_weight = clf.dual_coef_
558
+ clf.set_params(C=100)
559
+ clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
560
+ assert_allclose(dual_coef_no_weight, clf.dual_coef_)
561
+
562
+
563
+ @pytest.mark.parametrize(
564
+ "Estimator, err_msg",
565
+ [
566
+ (svm.SVC, "Invalid input - all samples have zero or negative weights."),
567
+ (svm.NuSVC, "(negative dimensions are not allowed|nu is infeasible)"),
568
+ (svm.SVR, "Invalid input - all samples have zero or negative weights."),
569
+ (svm.NuSVR, "Invalid input - all samples have zero or negative weights."),
570
+ (svm.OneClassSVM, "Invalid input - all samples have zero or negative weights."),
571
+ ],
572
+ ids=["SVC", "NuSVC", "SVR", "NuSVR", "OneClassSVM"],
573
+ )
574
+ @pytest.mark.parametrize(
575
+ "sample_weight",
576
+ [[0] * len(Y), [-0.3] * len(Y)],
577
+ ids=["weights-are-zero", "weights-are-negative"],
578
+ )
579
+ def test_negative_sample_weights_mask_all_samples(Estimator, err_msg, sample_weight):
580
+ est = Estimator(kernel="linear")
581
+ with pytest.raises(ValueError, match=err_msg):
582
+ est.fit(X, Y, sample_weight=sample_weight)
583
+
584
+
585
+ @pytest.mark.parametrize(
586
+ "Classifier, err_msg",
587
+ [
588
+ (
589
+ svm.SVC,
590
+ (
591
+ "Invalid input - all samples with positive weights belong to the same"
592
+ " class"
593
+ ),
594
+ ),
595
+ (svm.NuSVC, "specified nu is infeasible"),
596
+ ],
597
+ ids=["SVC", "NuSVC"],
598
+ )
599
+ @pytest.mark.parametrize(
600
+ "sample_weight",
601
+ [[0, -0.5, 0, 1, 1, 1], [1, 1, 1, 0, -0.1, -0.3]],
602
+ ids=["mask-label-1", "mask-label-2"],
603
+ )
604
+ def test_negative_weights_svc_leave_just_one_label(Classifier, err_msg, sample_weight):
605
+ clf = Classifier(kernel="linear")
606
+ with pytest.raises(ValueError, match=err_msg):
607
+ clf.fit(X, Y, sample_weight=sample_weight)
608
+
609
+
610
+ @pytest.mark.parametrize(
611
+ "Classifier, model",
612
+ [
613
+ (svm.SVC, {"when-left": [0.3998, 0.4], "when-right": [0.4, 0.3999]}),
614
+ (svm.NuSVC, {"when-left": [0.3333, 0.3333], "when-right": [0.3333, 0.3333]}),
615
+ ],
616
+ ids=["SVC", "NuSVC"],
617
+ )
618
+ @pytest.mark.parametrize(
619
+ "sample_weight, mask_side",
620
+ [([1, -0.5, 1, 1, 1, 1], "when-left"), ([1, 1, 1, 0, 1, 1], "when-right")],
621
+ ids=["partial-mask-label-1", "partial-mask-label-2"],
622
+ )
623
+ def test_negative_weights_svc_leave_two_labels(
624
+ Classifier, model, sample_weight, mask_side
625
+ ):
626
+ clf = Classifier(kernel="linear")
627
+ clf.fit(X, Y, sample_weight=sample_weight)
628
+ assert_allclose(clf.coef_, [model[mask_side]], rtol=1e-3)
629
+
630
+
631
+ @pytest.mark.parametrize(
632
+ "Estimator", [svm.SVC, svm.NuSVC, svm.NuSVR], ids=["SVC", "NuSVC", "NuSVR"]
633
+ )
634
+ @pytest.mark.parametrize(
635
+ "sample_weight",
636
+ [[1, -0.5, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1]],
637
+ ids=["partial-mask-label-1", "partial-mask-label-2"],
638
+ )
639
+ def test_negative_weight_equal_coeffs(Estimator, sample_weight):
640
+ # model generates equal coefficients
641
+ est = Estimator(kernel="linear")
642
+ est.fit(X, Y, sample_weight=sample_weight)
643
+ coef = np.abs(est.coef_).ravel()
644
+ assert coef[0] == pytest.approx(coef[1], rel=1e-3)
645
+
646
+
647
+ @ignore_warnings(category=UndefinedMetricWarning)
648
+ def test_auto_weight():
649
+ # Test class weights for imbalanced data
650
+ from sklearn.linear_model import LogisticRegression
651
+
652
+ # We take as dataset the two-dimensional projection of iris so
653
+ # that it is not separable and remove half of predictors from
654
+ # class 1.
655
+ # We add one to the targets as a non-regression test:
656
+ # class_weight="balanced"
657
+ # used to work only when the labels where a range [0..K).
658
+ from sklearn.utils import compute_class_weight
659
+
660
+ X, y = iris.data[:, :2], iris.target + 1
661
+ unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
662
+
663
+ classes = np.unique(y[unbalanced])
664
+ class_weights = compute_class_weight("balanced", classes=classes, y=y[unbalanced])
665
+ assert np.argmax(class_weights) == 2
666
+
667
+ for clf in (
668
+ svm.SVC(kernel="linear"),
669
+ svm.LinearSVC(dual="auto", random_state=0),
670
+ LogisticRegression(),
671
+ ):
672
+ # check that score is better when class='balanced' is set.
673
+ y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
674
+ clf.set_params(class_weight="balanced")
675
+ y_pred_balanced = clf.fit(
676
+ X[unbalanced],
677
+ y[unbalanced],
678
+ ).predict(X)
679
+ assert metrics.f1_score(y, y_pred, average="macro") <= metrics.f1_score(
680
+ y, y_pred_balanced, average="macro"
681
+ )
682
+
683
+
684
+ @pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
685
+ def test_bad_input(lil_container):
686
+ # Test dimensions for labels
687
+ Y2 = Y[:-1] # wrong dimensions for labels
688
+ with pytest.raises(ValueError):
689
+ svm.SVC().fit(X, Y2)
690
+
691
+ # Test with arrays that are non-contiguous.
692
+ for clf in (svm.SVC(), svm.LinearSVC(dual="auto", random_state=0)):
693
+ Xf = np.asfortranarray(X)
694
+ assert not Xf.flags["C_CONTIGUOUS"]
695
+ yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
696
+ yf = yf[:, -1]
697
+ assert not yf.flags["F_CONTIGUOUS"]
698
+ assert not yf.flags["C_CONTIGUOUS"]
699
+ clf.fit(Xf, yf)
700
+ assert_array_equal(clf.predict(T), true_result)
701
+
702
+ # error for precomputed kernelsx
703
+ clf = svm.SVC(kernel="precomputed")
704
+ with pytest.raises(ValueError):
705
+ clf.fit(X, Y)
706
+
707
+ # predict with sparse input when trained with dense
708
+ clf = svm.SVC().fit(X, Y)
709
+ with pytest.raises(ValueError):
710
+ clf.predict(lil_container(X))
711
+
712
+ Xt = np.array(X).T
713
+ clf.fit(np.dot(X, Xt), Y)
714
+ with pytest.raises(ValueError):
715
+ clf.predict(X)
716
+
717
+ clf = svm.SVC()
718
+ clf.fit(X, Y)
719
+ with pytest.raises(ValueError):
720
+ clf.predict(Xt)
721
+
722
+
723
+ def test_svc_nonfinite_params():
724
+ # Check SVC throws ValueError when dealing with non-finite parameter values
725
+ rng = np.random.RandomState(0)
726
+ n_samples = 10
727
+ fmax = np.finfo(np.float64).max
728
+ X = fmax * rng.uniform(size=(n_samples, 2))
729
+ y = rng.randint(0, 2, size=n_samples)
730
+
731
+ clf = svm.SVC()
732
+ msg = "The dual coefficients or intercepts are not finite"
733
+ with pytest.raises(ValueError, match=msg):
734
+ clf.fit(X, y)
735
+
736
+
737
+ def test_unicode_kernel():
738
+ # Test that a unicode kernel name does not cause a TypeError
739
+ clf = svm.SVC(kernel="linear", probability=True)
740
+ clf.fit(X, Y)
741
+ clf.predict_proba(T)
742
+ _libsvm.cross_validation(
743
+ iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0
744
+ )
745
+
746
+
747
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
748
+ def test_sparse_precomputed(csr_container):
749
+ clf = svm.SVC(kernel="precomputed")
750
+ sparse_gram = csr_container([[1, 0], [0, 1]])
751
+ with pytest.raises(TypeError, match="Sparse precomputed"):
752
+ clf.fit(sparse_gram, [0, 1])
753
+
754
+
755
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
756
+ def test_sparse_fit_support_vectors_empty(csr_container):
757
+ # Regression test for #14893
758
+ X_train = csr_container([[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]])
759
+ y_train = np.array([0.04, 0.04, 0.10, 0.16])
760
+ model = svm.SVR(kernel="linear")
761
+ model.fit(X_train, y_train)
762
+ assert not model.support_vectors_.data.size
763
+ assert not model.dual_coef_.data.size
764
+
765
+
766
+ @pytest.mark.parametrize("loss", ["hinge", "squared_hinge"])
767
+ @pytest.mark.parametrize("penalty", ["l1", "l2"])
768
+ @pytest.mark.parametrize("dual", [True, False])
769
+ def test_linearsvc_parameters(loss, penalty, dual):
770
+ # Test possible parameter combinations in LinearSVC
771
+ # Generate list of possible parameter combinations
772
+ X, y = make_classification(n_samples=5, n_features=5, random_state=0)
773
+
774
+ clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual, random_state=0)
775
+ if (
776
+ (loss, penalty) == ("hinge", "l1")
777
+ or (loss, penalty, dual) == ("hinge", "l2", False)
778
+ or (penalty, dual) == ("l1", True)
779
+ ):
780
+ with pytest.raises(
781
+ ValueError,
782
+ match="Unsupported set of arguments.*penalty='%s.*loss='%s.*dual=%s"
783
+ % (penalty, loss, dual),
784
+ ):
785
+ clf.fit(X, y)
786
+ else:
787
+ clf.fit(X, y)
788
+
789
+
790
+ def test_linearsvc():
791
+ # Test basic routines using LinearSVC
792
+ clf = svm.LinearSVC(dual="auto", random_state=0).fit(X, Y)
793
+
794
+ # by default should have intercept
795
+ assert clf.fit_intercept
796
+
797
+ assert_array_equal(clf.predict(T), true_result)
798
+ assert_array_almost_equal(clf.intercept_, [0], decimal=3)
799
+
800
+ # the same with l1 penalty
801
+ clf = svm.LinearSVC(
802
+ penalty="l1", loss="squared_hinge", dual=False, random_state=0
803
+ ).fit(X, Y)
804
+ assert_array_equal(clf.predict(T), true_result)
805
+
806
+ # l2 penalty with dual formulation
807
+ clf = svm.LinearSVC(penalty="l2", dual=True, random_state=0).fit(X, Y)
808
+ assert_array_equal(clf.predict(T), true_result)
809
+
810
+ # l2 penalty, l1 loss
811
+ clf = svm.LinearSVC(penalty="l2", loss="hinge", dual=True, random_state=0)
812
+ clf.fit(X, Y)
813
+ assert_array_equal(clf.predict(T), true_result)
814
+
815
+ # test also decision function
816
+ dec = clf.decision_function(T)
817
+ res = (dec > 0).astype(int) + 1
818
+ assert_array_equal(res, true_result)
819
+
820
+
821
+ def test_linearsvc_crammer_singer():
822
+ # Test LinearSVC with crammer_singer multi-class svm
823
+ ovr_clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris.data, iris.target)
824
+ cs_clf = svm.LinearSVC(dual="auto", multi_class="crammer_singer", random_state=0)
825
+ cs_clf.fit(iris.data, iris.target)
826
+
827
+ # similar prediction for ovr and crammer-singer:
828
+ assert (ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > 0.9
829
+
830
+ # classifiers shouldn't be the same
831
+ assert (ovr_clf.coef_ != cs_clf.coef_).all()
832
+
833
+ # test decision function
834
+ assert_array_equal(
835
+ cs_clf.predict(iris.data),
836
+ np.argmax(cs_clf.decision_function(iris.data), axis=1),
837
+ )
838
+ dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
839
+ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
840
+
841
+
842
+ def test_linearsvc_fit_sampleweight():
843
+ # check correct result when sample_weight is 1
844
+ n_samples = len(X)
845
+ unit_weight = np.ones(n_samples)
846
+ clf = svm.LinearSVC(dual="auto", random_state=0).fit(X, Y)
847
+ clf_unitweight = svm.LinearSVC(
848
+ dual="auto", random_state=0, tol=1e-12, max_iter=1000
849
+ ).fit(X, Y, sample_weight=unit_weight)
850
+
851
+ # check if same as sample_weight=None
852
+ assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
853
+ assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
854
+
855
+ # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
856
+ # X = X1 repeated n1 times, X2 repeated n2 times and so forth
857
+
858
+ random_state = check_random_state(0)
859
+ random_weight = random_state.randint(0, 10, n_samples)
860
+ lsvc_unflat = svm.LinearSVC(
861
+ dual="auto", random_state=0, tol=1e-12, max_iter=1000
862
+ ).fit(X, Y, sample_weight=random_weight)
863
+
864
+ pred1 = lsvc_unflat.predict(T)
865
+
866
+ X_flat = np.repeat(X, random_weight, axis=0)
867
+ y_flat = np.repeat(Y, random_weight, axis=0)
868
+ lsvc_flat = svm.LinearSVC(
869
+ dual="auto", random_state=0, tol=1e-12, max_iter=1000
870
+ ).fit(X_flat, y_flat)
871
+ pred2 = lsvc_flat.predict(T)
872
+
873
+ assert_array_equal(pred1, pred2)
874
+ assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
875
+
876
+
877
+ def test_crammer_singer_binary():
878
+ # Test Crammer-Singer formulation in the binary case
879
+ X, y = make_classification(n_classes=2, random_state=0)
880
+
881
+ for fit_intercept in (True, False):
882
+ acc = (
883
+ svm.LinearSVC(
884
+ dual="auto",
885
+ fit_intercept=fit_intercept,
886
+ multi_class="crammer_singer",
887
+ random_state=0,
888
+ )
889
+ .fit(X, y)
890
+ .score(X, y)
891
+ )
892
+ assert acc > 0.9
893
+
894
+
895
+ def test_linearsvc_iris():
896
+ # Test that LinearSVC gives plausible predictions on the iris dataset
897
+ # Also, test symbolic class names (classes_).
898
+ target = iris.target_names[iris.target]
899
+ clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris.data, target)
900
+ assert set(clf.classes_) == set(iris.target_names)
901
+ assert np.mean(clf.predict(iris.data) == target) > 0.8
902
+
903
+ dec = clf.decision_function(iris.data)
904
+ pred = iris.target_names[np.argmax(dec, 1)]
905
+ assert_array_equal(pred, clf.predict(iris.data))
906
+
907
+
908
+ def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
909
+ # Test that dense liblinear honours intercept_scaling param
910
+ X = [[2, 1], [3, 1], [1, 3], [2, 3]]
911
+ y = [0, 0, 1, 1]
912
+ clf = classifier(
913
+ fit_intercept=True,
914
+ penalty="l1",
915
+ loss="squared_hinge",
916
+ dual=False,
917
+ C=4,
918
+ tol=1e-7,
919
+ random_state=0,
920
+ )
921
+ assert clf.intercept_scaling == 1, clf.intercept_scaling
922
+ assert clf.fit_intercept
923
+
924
+ # when intercept_scaling is low the intercept value is highly "penalized"
925
+ # by regularization
926
+ clf.intercept_scaling = 1
927
+ clf.fit(X, y)
928
+ assert_almost_equal(clf.intercept_, 0, decimal=5)
929
+
930
+ # when intercept_scaling is sufficiently high, the intercept value
931
+ # is not affected by regularization
932
+ clf.intercept_scaling = 100
933
+ clf.fit(X, y)
934
+ intercept1 = clf.intercept_
935
+ assert intercept1 < -1
936
+
937
+ # when intercept_scaling is sufficiently high, the intercept value
938
+ # doesn't depend on intercept_scaling value
939
+ clf.intercept_scaling = 1000
940
+ clf.fit(X, y)
941
+ intercept2 = clf.intercept_
942
+ assert_array_almost_equal(intercept1, intercept2, decimal=2)
943
+
944
+
945
+ def test_liblinear_set_coef():
946
+ # multi-class case
947
+ clf = svm.LinearSVC(dual="auto").fit(iris.data, iris.target)
948
+ values = clf.decision_function(iris.data)
949
+ clf.coef_ = clf.coef_.copy()
950
+ clf.intercept_ = clf.intercept_.copy()
951
+ values2 = clf.decision_function(iris.data)
952
+ assert_array_almost_equal(values, values2)
953
+
954
+ # binary-class case
955
+ X = [[2, 1], [3, 1], [1, 3], [2, 3]]
956
+ y = [0, 0, 1, 1]
957
+
958
+ clf = svm.LinearSVC(dual="auto").fit(X, y)
959
+ values = clf.decision_function(X)
960
+ clf.coef_ = clf.coef_.copy()
961
+ clf.intercept_ = clf.intercept_.copy()
962
+ values2 = clf.decision_function(X)
963
+ assert_array_equal(values, values2)
964
+
965
+
966
+ def test_immutable_coef_property():
967
+ # Check that primal coef modification are not silently ignored
968
+ svms = [
969
+ svm.SVC(kernel="linear").fit(iris.data, iris.target),
970
+ svm.NuSVC(kernel="linear").fit(iris.data, iris.target),
971
+ svm.SVR(kernel="linear").fit(iris.data, iris.target),
972
+ svm.NuSVR(kernel="linear").fit(iris.data, iris.target),
973
+ svm.OneClassSVM(kernel="linear").fit(iris.data),
974
+ ]
975
+ for clf in svms:
976
+ with pytest.raises(AttributeError):
977
+ clf.__setattr__("coef_", np.arange(3))
978
+ with pytest.raises((RuntimeError, ValueError)):
979
+ clf.coef_.__setitem__((0, 0), 0)
980
+
981
+
982
+ def test_linearsvc_verbose():
983
+ # stdout: redirect
984
+ import os
985
+
986
+ stdout = os.dup(1) # save original stdout
987
+ os.dup2(os.pipe()[1], 1) # replace it
988
+
989
+ # actual call
990
+ clf = svm.LinearSVC(dual="auto", verbose=1)
991
+ clf.fit(X, Y)
992
+
993
+ # stdout: restore
994
+ os.dup2(stdout, 1) # restore original stdout
995
+
996
+
997
+ def test_svc_clone_with_callable_kernel():
998
+ # create SVM with callable linear kernel, check that results are the same
999
+ # as with built-in linear kernel
1000
+ svm_callable = svm.SVC(
1001
+ kernel=lambda x, y: np.dot(x, y.T),
1002
+ probability=True,
1003
+ random_state=0,
1004
+ decision_function_shape="ovr",
1005
+ )
1006
+ # clone for checking clonability with lambda functions..
1007
+ svm_cloned = base.clone(svm_callable)
1008
+ svm_cloned.fit(iris.data, iris.target)
1009
+
1010
+ svm_builtin = svm.SVC(
1011
+ kernel="linear", probability=True, random_state=0, decision_function_shape="ovr"
1012
+ )
1013
+ svm_builtin.fit(iris.data, iris.target)
1014
+
1015
+ assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_)
1016
+ assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_)
1017
+ assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data))
1018
+
1019
+ assert_array_almost_equal(
1020
+ svm_cloned.predict_proba(iris.data),
1021
+ svm_builtin.predict_proba(iris.data),
1022
+ decimal=4,
1023
+ )
1024
+ assert_array_almost_equal(
1025
+ svm_cloned.decision_function(iris.data),
1026
+ svm_builtin.decision_function(iris.data),
1027
+ )
1028
+
1029
+
1030
+ def test_svc_bad_kernel():
1031
+ svc = svm.SVC(kernel=lambda x, y: x)
1032
+ with pytest.raises(ValueError):
1033
+ svc.fit(X, Y)
1034
+
1035
+
1036
+ def test_libsvm_convergence_warnings():
1037
+ a = svm.SVC(
1038
+ kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=2
1039
+ )
1040
+ warning_msg = (
1041
+ r"Solver terminated early \(max_iter=2\). Consider pre-processing "
1042
+ r"your data with StandardScaler or MinMaxScaler."
1043
+ )
1044
+ with pytest.warns(ConvergenceWarning, match=warning_msg):
1045
+ a.fit(np.array(X), Y)
1046
+ assert np.all(a.n_iter_ == 2)
1047
+
1048
+
1049
+ def test_unfitted():
1050
+ X = "foo!" # input validation not required when SVM not fitted
1051
+
1052
+ clf = svm.SVC()
1053
+ with pytest.raises(Exception, match=r".*\bSVC\b.*\bnot\b.*\bfitted\b"):
1054
+ clf.predict(X)
1055
+
1056
+ clf = svm.NuSVR()
1057
+ with pytest.raises(Exception, match=r".*\bNuSVR\b.*\bnot\b.*\bfitted\b"):
1058
+ clf.predict(X)
1059
+
1060
+
1061
+ # ignore convergence warnings from max_iter=1
1062
+ @ignore_warnings
1063
+ def test_consistent_proba():
1064
+ a = svm.SVC(probability=True, max_iter=1, random_state=0)
1065
+ proba_1 = a.fit(X, Y).predict_proba(X)
1066
+ a = svm.SVC(probability=True, max_iter=1, random_state=0)
1067
+ proba_2 = a.fit(X, Y).predict_proba(X)
1068
+ assert_array_almost_equal(proba_1, proba_2)
1069
+
1070
+
1071
+ def test_linear_svm_convergence_warnings():
1072
+ # Test that warnings are raised if model does not converge
1073
+
1074
+ lsvc = svm.LinearSVC(dual="auto", random_state=0, max_iter=2)
1075
+ warning_msg = "Liblinear failed to converge, increase the number of iterations."
1076
+ with pytest.warns(ConvergenceWarning, match=warning_msg):
1077
+ lsvc.fit(X, Y)
1078
+ # Check that we have an n_iter_ attribute with int type as opposed to a
1079
+ # numpy array or an np.int32 so as to match the docstring.
1080
+ assert isinstance(lsvc.n_iter_, int)
1081
+ assert lsvc.n_iter_ == 2
1082
+
1083
+ lsvr = svm.LinearSVR(dual="auto", random_state=0, max_iter=2)
1084
+ with pytest.warns(ConvergenceWarning, match=warning_msg):
1085
+ lsvr.fit(iris.data, iris.target)
1086
+ assert isinstance(lsvr.n_iter_, int)
1087
+ assert lsvr.n_iter_ == 2
1088
+
1089
+
1090
+ def test_svr_coef_sign():
1091
+ # Test that SVR(kernel="linear") has coef_ with the right sign.
1092
+ # Non-regression test for #2933.
1093
+ X = np.random.RandomState(21).randn(10, 3)
1094
+ y = np.random.RandomState(12).randn(10)
1095
+
1096
+ for svr in [
1097
+ svm.SVR(kernel="linear"),
1098
+ svm.NuSVR(kernel="linear"),
1099
+ svm.LinearSVR(dual="auto"),
1100
+ ]:
1101
+ svr.fit(X, y)
1102
+ assert_array_almost_equal(
1103
+ svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_
1104
+ )
1105
+
1106
+
1107
+ def test_lsvc_intercept_scaling_zero():
1108
+ # Test that intercept_scaling is ignored when fit_intercept is False
1109
+
1110
+ lsvc = svm.LinearSVC(dual="auto", fit_intercept=False)
1111
+ lsvc.fit(X, Y)
1112
+ assert lsvc.intercept_ == 0.0
1113
+
1114
+
1115
+ def test_hasattr_predict_proba():
1116
+ # Method must be (un)available before or after fit, switched by
1117
+ # `probability` param
1118
+
1119
+ G = svm.SVC(probability=True)
1120
+ assert hasattr(G, "predict_proba")
1121
+ G.fit(iris.data, iris.target)
1122
+ assert hasattr(G, "predict_proba")
1123
+
1124
+ G = svm.SVC(probability=False)
1125
+ assert not hasattr(G, "predict_proba")
1126
+ G.fit(iris.data, iris.target)
1127
+ assert not hasattr(G, "predict_proba")
1128
+
1129
+ # Switching to `probability=True` after fitting should make
1130
+ # predict_proba available, but calling it must not work:
1131
+ G.probability = True
1132
+ assert hasattr(G, "predict_proba")
1133
+ msg = "predict_proba is not available when fitted with probability=False"
1134
+
1135
+ with pytest.raises(NotFittedError, match=msg):
1136
+ G.predict_proba(iris.data)
1137
+
1138
+
1139
+ def test_decision_function_shape_two_class():
1140
+ for n_classes in [2, 3]:
1141
+ X, y = make_blobs(centers=n_classes, random_state=0)
1142
+ for estimator in [svm.SVC, svm.NuSVC]:
1143
+ clf = OneVsRestClassifier(estimator(decision_function_shape="ovr")).fit(
1144
+ X, y
1145
+ )
1146
+ assert len(clf.predict(X)) == len(y)
1147
+
1148
+
1149
+ def test_ovr_decision_function():
1150
+ # One point from each quadrant represents one class
1151
+ X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
1152
+ y_train = [0, 1, 2, 3]
1153
+
1154
+ # First point is closer to the decision boundaries than the second point
1155
+ base_points = np.array([[5, 5], [10, 10]])
1156
+
1157
+ # For all the quadrants (classes)
1158
+ X_test = np.vstack(
1159
+ (
1160
+ base_points * [1, 1], # Q1
1161
+ base_points * [-1, 1], # Q2
1162
+ base_points * [-1, -1], # Q3
1163
+ base_points * [1, -1], # Q4
1164
+ )
1165
+ )
1166
+
1167
+ y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2
1168
+
1169
+ clf = svm.SVC(kernel="linear", decision_function_shape="ovr")
1170
+ clf.fit(X_train, y_train)
1171
+
1172
+ y_pred = clf.predict(X_test)
1173
+
1174
+ # Test if the prediction is the same as y
1175
+ assert_array_equal(y_pred, y_test)
1176
+
1177
+ deci_val = clf.decision_function(X_test)
1178
+
1179
+ # Assert that the predicted class has the maximum value
1180
+ assert_array_equal(np.argmax(deci_val, axis=1), y_pred)
1181
+
1182
+ # Get decision value at test points for the predicted class
1183
+ pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2))
1184
+
1185
+ # Assert pred_class_deci_val > 0 here
1186
+ assert np.min(pred_class_deci_val) > 0.0
1187
+
1188
+ # Test if the first point has lower decision value on every quadrant
1189
+ # compared to the second point
1190
+ assert np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])
1191
+
1192
+
1193
+ @pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC])
1194
+ def test_svc_invalid_break_ties_param(SVCClass):
1195
+ X, y = make_blobs(random_state=42)
1196
+
1197
+ svm = SVCClass(
1198
+ kernel="linear", decision_function_shape="ovo", break_ties=True, random_state=42
1199
+ ).fit(X, y)
1200
+
1201
+ with pytest.raises(ValueError, match="break_ties must be False"):
1202
+ svm.predict(y)
1203
+
1204
+
1205
+ @pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC])
1206
+ def test_svc_ovr_tie_breaking(SVCClass):
1207
+ """Test if predict breaks ties in OVR mode.
1208
+ Related issue: https://github.com/scikit-learn/scikit-learn/issues/8277
1209
+ """
1210
+ X, y = make_blobs(random_state=0, n_samples=20, n_features=2)
1211
+
1212
+ xs = np.linspace(X[:, 0].min(), X[:, 0].max(), 100)
1213
+ ys = np.linspace(X[:, 1].min(), X[:, 1].max(), 100)
1214
+ xx, yy = np.meshgrid(xs, ys)
1215
+
1216
+ common_params = dict(
1217
+ kernel="rbf", gamma=1e6, random_state=42, decision_function_shape="ovr"
1218
+ )
1219
+ svm = SVCClass(
1220
+ break_ties=False,
1221
+ **common_params,
1222
+ ).fit(X, y)
1223
+ pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
1224
+ dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()])
1225
+ assert not np.all(pred == np.argmax(dv, axis=1))
1226
+
1227
+ svm = SVCClass(
1228
+ break_ties=True,
1229
+ **common_params,
1230
+ ).fit(X, y)
1231
+ pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
1232
+ dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()])
1233
+ assert np.all(pred == np.argmax(dv, axis=1))
1234
+
1235
+
1236
+ def test_gamma_scale():
1237
+ X, y = [[0.0], [1.0]], [0, 1]
1238
+
1239
+ clf = svm.SVC()
1240
+ clf.fit(X, y)
1241
+ assert_almost_equal(clf._gamma, 4)
1242
+
1243
+
1244
+ @pytest.mark.parametrize(
1245
+ "SVM, params",
1246
+ [
1247
+ (LinearSVC, {"penalty": "l1", "loss": "squared_hinge", "dual": False}),
1248
+ (LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": True}),
1249
+ (LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": False}),
1250
+ (LinearSVC, {"penalty": "l2", "loss": "hinge", "dual": True}),
1251
+ (LinearSVR, {"loss": "epsilon_insensitive", "dual": True}),
1252
+ (LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}),
1253
+ (LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}),
1254
+ ],
1255
+ )
1256
+ def test_linearsvm_liblinear_sample_weight(SVM, params):
1257
+ X = np.array(
1258
+ [
1259
+ [1, 3],
1260
+ [1, 3],
1261
+ [1, 3],
1262
+ [1, 3],
1263
+ [2, 1],
1264
+ [2, 1],
1265
+ [2, 1],
1266
+ [2, 1],
1267
+ [3, 3],
1268
+ [3, 3],
1269
+ [3, 3],
1270
+ [3, 3],
1271
+ [4, 1],
1272
+ [4, 1],
1273
+ [4, 1],
1274
+ [4, 1],
1275
+ ],
1276
+ dtype=np.dtype("float"),
1277
+ )
1278
+ y = np.array(
1279
+ [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype("int")
1280
+ )
1281
+
1282
+ X2 = np.vstack([X, X])
1283
+ y2 = np.hstack([y, 3 - y])
1284
+ sample_weight = np.ones(shape=len(y) * 2)
1285
+ sample_weight[len(y) :] = 0
1286
+ X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0)
1287
+
1288
+ base_estimator = SVM(random_state=42)
1289
+ base_estimator.set_params(**params)
1290
+ base_estimator.set_params(tol=1e-12, max_iter=1000)
1291
+ est_no_weight = base.clone(base_estimator).fit(X, y)
1292
+ est_with_weight = base.clone(base_estimator).fit(
1293
+ X2, y2, sample_weight=sample_weight
1294
+ )
1295
+
1296
+ for method in ("predict", "decision_function"):
1297
+ if hasattr(base_estimator, method):
1298
+ X_est_no_weight = getattr(est_no_weight, method)(X)
1299
+ X_est_with_weight = getattr(est_with_weight, method)(X)
1300
+ assert_allclose(X_est_no_weight, X_est_with_weight)
1301
+
1302
+
1303
+ @pytest.mark.parametrize("Klass", (OneClassSVM, SVR, NuSVR))
1304
+ def test_n_support(Klass):
1305
+ # Make n_support is correct for oneclass and SVR (used to be
1306
+ # non-initialized)
1307
+ # this is a non regression test for issue #14774
1308
+ X = np.array([[0], [0.44], [0.45], [0.46], [1]])
1309
+ y = np.arange(X.shape[0])
1310
+ est = Klass()
1311
+ assert not hasattr(est, "n_support_")
1312
+ est.fit(X, y)
1313
+ assert est.n_support_[0] == est.support_vectors_.shape[0]
1314
+ assert est.n_support_.size == 1
1315
+
1316
+
1317
+ @pytest.mark.parametrize("Estimator", [svm.SVC, svm.SVR])
1318
+ def test_custom_kernel_not_array_input(Estimator):
1319
+ """Test using a custom kernel that is not fed with array-like for floats"""
1320
+ data = ["A A", "A", "B", "B B", "A B"]
1321
+ X = np.array([[2, 0], [1, 0], [0, 1], [0, 2], [1, 1]]) # count encoding
1322
+ y = np.array([1, 1, 2, 2, 1])
1323
+
1324
+ def string_kernel(X1, X2):
1325
+ assert isinstance(X1[0], str)
1326
+ n_samples1 = _num_samples(X1)
1327
+ n_samples2 = _num_samples(X2)
1328
+ K = np.zeros((n_samples1, n_samples2))
1329
+ for ii in range(n_samples1):
1330
+ for jj in range(ii, n_samples2):
1331
+ K[ii, jj] = X1[ii].count("A") * X2[jj].count("A")
1332
+ K[ii, jj] += X1[ii].count("B") * X2[jj].count("B")
1333
+ K[jj, ii] = K[ii, jj]
1334
+ return K
1335
+
1336
+ K = string_kernel(data, data)
1337
+ assert_array_equal(np.dot(X, X.T), K)
1338
+
1339
+ svc1 = Estimator(kernel=string_kernel).fit(data, y)
1340
+ svc2 = Estimator(kernel="linear").fit(X, y)
1341
+ svc3 = Estimator(kernel="precomputed").fit(K, y)
1342
+
1343
+ assert svc1.score(data, y) == svc3.score(K, y)
1344
+ assert svc1.score(data, y) == svc2.score(X, y)
1345
+ if hasattr(svc1, "decision_function"): # classifier
1346
+ assert_allclose(svc1.decision_function(data), svc2.decision_function(X))
1347
+ assert_allclose(svc1.decision_function(data), svc3.decision_function(K))
1348
+ assert_array_equal(svc1.predict(data), svc2.predict(X))
1349
+ assert_array_equal(svc1.predict(data), svc3.predict(K))
1350
+ else: # regressor
1351
+ assert_allclose(svc1.predict(data), svc2.predict(X))
1352
+ assert_allclose(svc1.predict(data), svc3.predict(K))
1353
+
1354
+
1355
+ def test_svc_raises_error_internal_representation():
1356
+ """Check that SVC raises error when internal representation is altered.
1357
+
1358
+ Non-regression test for #18891 and https://nvd.nist.gov/vuln/detail/CVE-2020-28975
1359
+ """
1360
+ clf = svm.SVC(kernel="linear").fit(X, Y)
1361
+ clf._n_support[0] = 1000000
1362
+
1363
+ msg = "The internal representation of SVC was altered"
1364
+ with pytest.raises(ValueError, match=msg):
1365
+ clf.predict(X)
1366
+
1367
+
1368
+ @pytest.mark.parametrize(
1369
+ "estimator, expected_n_iter_type",
1370
+ [
1371
+ (svm.SVC, np.ndarray),
1372
+ (svm.NuSVC, np.ndarray),
1373
+ (svm.SVR, int),
1374
+ (svm.NuSVR, int),
1375
+ (svm.OneClassSVM, int),
1376
+ ],
1377
+ )
1378
+ @pytest.mark.parametrize(
1379
+ "dataset",
1380
+ [
1381
+ make_classification(n_classes=2, n_informative=2, random_state=0),
1382
+ make_classification(n_classes=3, n_informative=3, random_state=0),
1383
+ make_classification(n_classes=4, n_informative=4, random_state=0),
1384
+ ],
1385
+ )
1386
+ def test_n_iter_libsvm(estimator, expected_n_iter_type, dataset):
1387
+ # Check that the type of n_iter_ is correct for the classes that inherit
1388
+ # from BaseSVC.
1389
+ # Note that for SVC, and NuSVC this is an ndarray; while for SVR, NuSVR, and
1390
+ # OneClassSVM, it is an int.
1391
+ # For SVC and NuSVC also check the shape of n_iter_.
1392
+ X, y = dataset
1393
+ n_iter = estimator(kernel="linear").fit(X, y).n_iter_
1394
+ assert type(n_iter) == expected_n_iter_type
1395
+ if estimator in [svm.SVC, svm.NuSVC]:
1396
+ n_classes = len(np.unique(y))
1397
+ assert n_iter.shape == (n_classes * (n_classes - 1) // 2,)
1398
+
1399
+
1400
+ # TODO(1.5): Remove
1401
+ @pytest.mark.parametrize("Estimator", [LinearSVR, LinearSVC])
1402
+ def test_dual_auto_deprecation_warning(Estimator):
1403
+ svm = Estimator()
1404
+ msg = (
1405
+ "The default value of `dual` will change from `True` to `'auto'` in"
1406
+ " 1.5. Set the value of `dual` explicitly to suppress the warning."
1407
+ )
1408
+ with pytest.warns(FutureWarning, match=re.escape(msg)):
1409
+ svm.fit(X, Y)
1410
+
1411
+
1412
+ @pytest.mark.parametrize("loss", ["squared_hinge", "squared_epsilon_insensitive"])
1413
+ def test_dual_auto(loss):
1414
+ # OvR, L2, N > M (6,2)
1415
+ dual = _validate_dual_parameter("auto", loss, "l2", "ovr", np.asarray(X))
1416
+ assert dual is False
1417
+ # OvR, L2, N < M (2,6)
1418
+ dual = _validate_dual_parameter("auto", loss, "l2", "ovr", np.asarray(X).T)
1419
+ assert dual is True
1420
+
1421
+
1422
+ def test_dual_auto_edge_cases():
1423
+ # Hinge, OvR, L2, N > M (6,2)
1424
+ dual = _validate_dual_parameter("auto", "hinge", "l2", "ovr", np.asarray(X))
1425
+ assert dual is True # only supports True
1426
+ dual = _validate_dual_parameter(
1427
+ "auto", "epsilon_insensitive", "l2", "ovr", np.asarray(X)
1428
+ )
1429
+ assert dual is True # only supports True
1430
+ # SqHinge, OvR, L1, N < M (2,6)
1431
+ dual = _validate_dual_parameter(
1432
+ "auto", "squared_hinge", "l1", "ovr", np.asarray(X).T
1433
+ )
1434
+ assert dual is False # only supports False
venv/lib/python3.10/site-packages/sklearn/utils/__init__.py ADDED
@@ -0,0 +1,1299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.utils` module includes various utilities.
3
+ """
4
+
5
+ import math
6
+ import numbers
7
+ import platform
8
+ import struct
9
+ import timeit
10
+ import warnings
11
+ from collections.abc import Sequence
12
+ from contextlib import contextmanager, suppress
13
+ from itertools import compress, islice
14
+
15
+ import numpy as np
16
+ from scipy.sparse import issparse
17
+
18
+ from .. import get_config
19
+ from ..exceptions import DataConversionWarning
20
+ from . import _joblib, metadata_routing
21
+ from ._bunch import Bunch
22
+ from ._estimator_html_repr import estimator_html_repr
23
+ from ._param_validation import Integral, Interval, validate_params
24
+ from .class_weight import compute_class_weight, compute_sample_weight
25
+ from .deprecation import deprecated
26
+ from .discovery import all_estimators
27
+ from .fixes import parse_version, threadpool_info
28
+ from .murmurhash import murmurhash3_32
29
+ from .validation import (
30
+ _is_arraylike_not_scalar,
31
+ _is_pandas_df,
32
+ _is_polars_df,
33
+ _use_interchange_protocol,
34
+ as_float_array,
35
+ assert_all_finite,
36
+ check_array,
37
+ check_consistent_length,
38
+ check_random_state,
39
+ check_scalar,
40
+ check_symmetric,
41
+ check_X_y,
42
+ column_or_1d,
43
+ indexable,
44
+ )
45
+
46
+ # Do not deprecate parallel_backend and register_parallel_backend as they are
47
+ # needed to tune `scikit-learn` behavior and have different effect if called
48
+ # from the vendored version or or the site-package version. The other are
49
+ # utilities that are independent of scikit-learn so they are not part of
50
+ # scikit-learn public API.
51
+ parallel_backend = _joblib.parallel_backend
52
+ register_parallel_backend = _joblib.register_parallel_backend
53
+
54
+ __all__ = [
55
+ "murmurhash3_32",
56
+ "as_float_array",
57
+ "assert_all_finite",
58
+ "check_array",
59
+ "check_random_state",
60
+ "compute_class_weight",
61
+ "compute_sample_weight",
62
+ "column_or_1d",
63
+ "check_consistent_length",
64
+ "check_X_y",
65
+ "check_scalar",
66
+ "indexable",
67
+ "check_symmetric",
68
+ "indices_to_mask",
69
+ "deprecated",
70
+ "parallel_backend",
71
+ "register_parallel_backend",
72
+ "resample",
73
+ "shuffle",
74
+ "check_matplotlib_support",
75
+ "all_estimators",
76
+ "DataConversionWarning",
77
+ "estimator_html_repr",
78
+ "Bunch",
79
+ "metadata_routing",
80
+ ]
81
+
82
+ IS_PYPY = platform.python_implementation() == "PyPy"
83
+ _IS_32BIT = 8 * struct.calcsize("P") == 32
84
+ _IS_WASM = platform.machine() in ["wasm32", "wasm64"]
85
+
86
+
87
+ def _in_unstable_openblas_configuration():
88
+ """Return True if in an unstable configuration for OpenBLAS"""
89
+
90
+ # Import libraries which might load OpenBLAS.
91
+ import numpy # noqa
92
+ import scipy # noqa
93
+
94
+ modules_info = threadpool_info()
95
+
96
+ open_blas_used = any(info["internal_api"] == "openblas" for info in modules_info)
97
+ if not open_blas_used:
98
+ return False
99
+
100
+ # OpenBLAS 0.3.16 fixed instability for arm64, see:
101
+ # https://github.com/xianyi/OpenBLAS/blob/1b6db3dbba672b4f8af935bd43a1ff6cff4d20b7/Changelog.txt#L56-L58 # noqa
102
+ openblas_arm64_stable_version = parse_version("0.3.16")
103
+ for info in modules_info:
104
+ if info["internal_api"] != "openblas":
105
+ continue
106
+ openblas_version = info.get("version")
107
+ openblas_architecture = info.get("architecture")
108
+ if openblas_version is None or openblas_architecture is None:
109
+ # Cannot be sure that OpenBLAS is good enough. Assume unstable:
110
+ return True
111
+ if (
112
+ openblas_architecture == "neoversen1"
113
+ and parse_version(openblas_version) < openblas_arm64_stable_version
114
+ ):
115
+ # See discussions in https://github.com/numpy/numpy/issues/19411
116
+ return True
117
+ return False
118
+
119
+
120
+ @validate_params(
121
+ {
122
+ "X": ["array-like", "sparse matrix"],
123
+ "mask": ["array-like"],
124
+ },
125
+ prefer_skip_nested_validation=True,
126
+ )
127
+ def safe_mask(X, mask):
128
+ """Return a mask which is safe to use on X.
129
+
130
+ Parameters
131
+ ----------
132
+ X : {array-like, sparse matrix}
133
+ Data on which to apply mask.
134
+
135
+ mask : array-like
136
+ Mask to be used on X.
137
+
138
+ Returns
139
+ -------
140
+ mask : ndarray
141
+ Array that is safe to use on X.
142
+
143
+ Examples
144
+ --------
145
+ >>> from sklearn.utils import safe_mask
146
+ >>> from scipy.sparse import csr_matrix
147
+ >>> data = csr_matrix([[1], [2], [3], [4], [5]])
148
+ >>> condition = [False, True, True, False, True]
149
+ >>> mask = safe_mask(data, condition)
150
+ >>> data[mask].toarray()
151
+ array([[2],
152
+ [3],
153
+ [5]])
154
+ """
155
+ mask = np.asarray(mask)
156
+ if np.issubdtype(mask.dtype, np.signedinteger):
157
+ return mask
158
+
159
+ if hasattr(X, "toarray"):
160
+ ind = np.arange(mask.shape[0])
161
+ mask = ind[mask]
162
+ return mask
163
+
164
+
165
+ def axis0_safe_slice(X, mask, len_mask):
166
+ """Return a mask which is safer to use on X than safe_mask.
167
+
168
+ This mask is safer than safe_mask since it returns an
169
+ empty array, when a sparse matrix is sliced with a boolean mask
170
+ with all False, instead of raising an unhelpful error in older
171
+ versions of SciPy.
172
+
173
+ See: https://github.com/scipy/scipy/issues/5361
174
+
175
+ Also note that we can avoid doing the dot product by checking if
176
+ the len_mask is not zero in _huber_loss_and_gradient but this
177
+ is not going to be the bottleneck, since the number of outliers
178
+ and non_outliers are typically non-zero and it makes the code
179
+ tougher to follow.
180
+
181
+ Parameters
182
+ ----------
183
+ X : {array-like, sparse matrix}
184
+ Data on which to apply mask.
185
+
186
+ mask : ndarray
187
+ Mask to be used on X.
188
+
189
+ len_mask : int
190
+ The length of the mask.
191
+
192
+ Returns
193
+ -------
194
+ mask : ndarray
195
+ Array that is safe to use on X.
196
+ """
197
+ if len_mask != 0:
198
+ return X[safe_mask(X, mask), :]
199
+ return np.zeros(shape=(0, X.shape[1]))
200
+
201
+
202
+ def _array_indexing(array, key, key_dtype, axis):
203
+ """Index an array or scipy.sparse consistently across NumPy version."""
204
+ if issparse(array) and key_dtype == "bool":
205
+ key = np.asarray(key)
206
+ if isinstance(key, tuple):
207
+ key = list(key)
208
+ return array[key, ...] if axis == 0 else array[:, key]
209
+
210
+
211
+ def _pandas_indexing(X, key, key_dtype, axis):
212
+ """Index a pandas dataframe or a series."""
213
+ if _is_arraylike_not_scalar(key):
214
+ key = np.asarray(key)
215
+
216
+ if key_dtype == "int" and not (isinstance(key, slice) or np.isscalar(key)):
217
+ # using take() instead of iloc[] ensures the return value is a "proper"
218
+ # copy that will not raise SettingWithCopyWarning
219
+ return X.take(key, axis=axis)
220
+ else:
221
+ # check whether we should index with loc or iloc
222
+ indexer = X.iloc if key_dtype == "int" else X.loc
223
+ return indexer[:, key] if axis else indexer[key]
224
+
225
+
226
+ def _list_indexing(X, key, key_dtype):
227
+ """Index a Python list."""
228
+ if np.isscalar(key) or isinstance(key, slice):
229
+ # key is a slice or a scalar
230
+ return X[key]
231
+ if key_dtype == "bool":
232
+ # key is a boolean array-like
233
+ return list(compress(X, key))
234
+ # key is a integer array-like of key
235
+ return [X[idx] for idx in key]
236
+
237
+
238
+ def _polars_indexing(X, key, key_dtype, axis):
239
+ """Indexing X with polars interchange protocol."""
240
+ # Polars behavior is more consistent with lists
241
+ if isinstance(key, np.ndarray):
242
+ key = key.tolist()
243
+
244
+ if axis == 1:
245
+ return X[:, key]
246
+ else:
247
+ return X[key]
248
+
249
+
250
+ def _determine_key_type(key, accept_slice=True):
251
+ """Determine the data type of key.
252
+
253
+ Parameters
254
+ ----------
255
+ key : scalar, slice or array-like
256
+ The key from which we want to infer the data type.
257
+
258
+ accept_slice : bool, default=True
259
+ Whether or not to raise an error if the key is a slice.
260
+
261
+ Returns
262
+ -------
263
+ dtype : {'int', 'str', 'bool', None}
264
+ Returns the data type of key.
265
+ """
266
+ err_msg = (
267
+ "No valid specification of the columns. Only a scalar, list or "
268
+ "slice of all integers or all strings, or boolean mask is "
269
+ "allowed"
270
+ )
271
+
272
+ dtype_to_str = {int: "int", str: "str", bool: "bool", np.bool_: "bool"}
273
+ array_dtype_to_str = {
274
+ "i": "int",
275
+ "u": "int",
276
+ "b": "bool",
277
+ "O": "str",
278
+ "U": "str",
279
+ "S": "str",
280
+ }
281
+
282
+ if key is None:
283
+ return None
284
+ if isinstance(key, tuple(dtype_to_str.keys())):
285
+ try:
286
+ return dtype_to_str[type(key)]
287
+ except KeyError:
288
+ raise ValueError(err_msg)
289
+ if isinstance(key, slice):
290
+ if not accept_slice:
291
+ raise TypeError(
292
+ "Only array-like or scalar are supported. A Python slice was given."
293
+ )
294
+ if key.start is None and key.stop is None:
295
+ return None
296
+ key_start_type = _determine_key_type(key.start)
297
+ key_stop_type = _determine_key_type(key.stop)
298
+ if key_start_type is not None and key_stop_type is not None:
299
+ if key_start_type != key_stop_type:
300
+ raise ValueError(err_msg)
301
+ if key_start_type is not None:
302
+ return key_start_type
303
+ return key_stop_type
304
+ if isinstance(key, (list, tuple)):
305
+ unique_key = set(key)
306
+ key_type = {_determine_key_type(elt) for elt in unique_key}
307
+ if not key_type:
308
+ return None
309
+ if len(key_type) != 1:
310
+ raise ValueError(err_msg)
311
+ return key_type.pop()
312
+ if hasattr(key, "dtype"):
313
+ try:
314
+ return array_dtype_to_str[key.dtype.kind]
315
+ except KeyError:
316
+ raise ValueError(err_msg)
317
+ raise ValueError(err_msg)
318
+
319
+
320
+ def _safe_indexing(X, indices, *, axis=0):
321
+ """Return rows, items or columns of X using indices.
322
+
323
+ .. warning::
324
+
325
+ This utility is documented, but **private**. This means that
326
+ backward compatibility might be broken without any deprecation
327
+ cycle.
328
+
329
+ Parameters
330
+ ----------
331
+ X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series
332
+ Data from which to sample rows, items or columns. `list` are only
333
+ supported when `axis=0`.
334
+ indices : bool, int, str, slice, array-like
335
+ - If `axis=0`, boolean and integer array-like, integer slice,
336
+ and scalar integer are supported.
337
+ - If `axis=1`:
338
+ - to select a single column, `indices` can be of `int` type for
339
+ all `X` types and `str` only for dataframe. The selected subset
340
+ will be 1D, unless `X` is a sparse matrix in which case it will
341
+ be 2D.
342
+ - to select multiples columns, `indices` can be one of the
343
+ following: `list`, `array`, `slice`. The type used in
344
+ these containers can be one of the following: `int`, 'bool' and
345
+ `str`. However, `str` is only supported when `X` is a dataframe.
346
+ The selected subset will be 2D.
347
+ axis : int, default=0
348
+ The axis along which `X` will be subsampled. `axis=0` will select
349
+ rows while `axis=1` will select columns.
350
+
351
+ Returns
352
+ -------
353
+ subset
354
+ Subset of X on axis 0 or 1.
355
+
356
+ Notes
357
+ -----
358
+ CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
359
+ not supported.
360
+
361
+ Examples
362
+ --------
363
+ >>> import numpy as np
364
+ >>> from sklearn.utils import _safe_indexing
365
+ >>> data = np.array([[1, 2], [3, 4], [5, 6]])
366
+ >>> _safe_indexing(data, 0, axis=0) # select the first row
367
+ array([1, 2])
368
+ >>> _safe_indexing(data, 0, axis=1) # select the first column
369
+ array([1, 3, 5])
370
+ """
371
+ if indices is None:
372
+ return X
373
+
374
+ if axis not in (0, 1):
375
+ raise ValueError(
376
+ "'axis' should be either 0 (to index rows) or 1 (to index "
377
+ " column). Got {} instead.".format(axis)
378
+ )
379
+
380
+ indices_dtype = _determine_key_type(indices)
381
+
382
+ if axis == 0 and indices_dtype == "str":
383
+ raise ValueError("String indexing is not supported with 'axis=0'")
384
+
385
+ if axis == 1 and isinstance(X, list):
386
+ raise ValueError("axis=1 is not supported for lists")
387
+
388
+ if axis == 1 and hasattr(X, "ndim") and X.ndim != 2:
389
+ raise ValueError(
390
+ "'X' should be a 2D NumPy array, 2D sparse matrix or pandas "
391
+ "dataframe when indexing the columns (i.e. 'axis=1'). "
392
+ "Got {} instead with {} dimension(s).".format(type(X), X.ndim)
393
+ )
394
+
395
+ if (
396
+ axis == 1
397
+ and indices_dtype == "str"
398
+ and not (_is_pandas_df(X) or _use_interchange_protocol(X))
399
+ ):
400
+ raise ValueError(
401
+ "Specifying the columns using strings is only supported for dataframes."
402
+ )
403
+
404
+ if hasattr(X, "iloc"):
405
+ # TODO: we should probably use _is_pandas_df(X) instead but this would
406
+ # require updating some tests such as test_train_test_split_mock_pandas.
407
+ return _pandas_indexing(X, indices, indices_dtype, axis=axis)
408
+ elif _is_polars_df(X):
409
+ return _polars_indexing(X, indices, indices_dtype, axis=axis)
410
+ elif hasattr(X, "shape"):
411
+ return _array_indexing(X, indices, indices_dtype, axis=axis)
412
+ else:
413
+ return _list_indexing(X, indices, indices_dtype)
414
+
415
+
416
+ def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):
417
+ """Safe assignment to a numpy array, sparse matrix, or pandas dataframe.
418
+
419
+ Parameters
420
+ ----------
421
+ X : {ndarray, sparse-matrix, dataframe}
422
+ Array to be modified. It is expected to be 2-dimensional.
423
+
424
+ values : ndarray
425
+ The values to be assigned to `X`.
426
+
427
+ row_indexer : array-like, dtype={int, bool}, default=None
428
+ A 1-dimensional array to select the rows of interest. If `None`, all
429
+ rows are selected.
430
+
431
+ column_indexer : array-like, dtype={int, bool}, default=None
432
+ A 1-dimensional array to select the columns of interest. If `None`, all
433
+ columns are selected.
434
+ """
435
+ row_indexer = slice(None, None, None) if row_indexer is None else row_indexer
436
+ column_indexer = (
437
+ slice(None, None, None) if column_indexer is None else column_indexer
438
+ )
439
+
440
+ if hasattr(X, "iloc"): # pandas dataframe
441
+ with warnings.catch_warnings():
442
+ # pandas >= 1.5 raises a warning when using iloc to set values in a column
443
+ # that does not have the same type as the column being set. It happens
444
+ # for instance when setting a categorical column with a string.
445
+ # In the future the behavior won't change and the warning should disappear.
446
+ # TODO(1.3): check if the warning is still raised or remove the filter.
447
+ warnings.simplefilter("ignore", FutureWarning)
448
+ X.iloc[row_indexer, column_indexer] = values
449
+ else: # numpy array or sparse matrix
450
+ X[row_indexer, column_indexer] = values
451
+
452
+
453
+ def _get_column_indices_for_bool_or_int(key, n_columns):
454
+ # Convert key into list of positive integer indexes
455
+ try:
456
+ idx = _safe_indexing(np.arange(n_columns), key)
457
+ except IndexError as e:
458
+ raise ValueError(
459
+ f"all features must be in [0, {n_columns - 1}] or [-{n_columns}, 0]"
460
+ ) from e
461
+ return np.atleast_1d(idx).tolist()
462
+
463
+
464
+ def _get_column_indices(X, key):
465
+ """Get feature column indices for input data X and key.
466
+
467
+ For accepted values of `key`, see the docstring of
468
+ :func:`_safe_indexing`.
469
+ """
470
+ key_dtype = _determine_key_type(key)
471
+ if _use_interchange_protocol(X):
472
+ return _get_column_indices_interchange(X.__dataframe__(), key, key_dtype)
473
+
474
+ n_columns = X.shape[1]
475
+ if isinstance(key, (list, tuple)) and not key:
476
+ # we get an empty list
477
+ return []
478
+ elif key_dtype in ("bool", "int"):
479
+ return _get_column_indices_for_bool_or_int(key, n_columns)
480
+ else:
481
+ try:
482
+ all_columns = X.columns
483
+ except AttributeError:
484
+ raise ValueError(
485
+ "Specifying the columns using strings is only supported for dataframes."
486
+ )
487
+ if isinstance(key, str):
488
+ columns = [key]
489
+ elif isinstance(key, slice):
490
+ start, stop = key.start, key.stop
491
+ if start is not None:
492
+ start = all_columns.get_loc(start)
493
+ if stop is not None:
494
+ # pandas indexing with strings is endpoint included
495
+ stop = all_columns.get_loc(stop) + 1
496
+ else:
497
+ stop = n_columns + 1
498
+ return list(islice(range(n_columns), start, stop))
499
+ else:
500
+ columns = list(key)
501
+
502
+ try:
503
+ column_indices = []
504
+ for col in columns:
505
+ col_idx = all_columns.get_loc(col)
506
+ if not isinstance(col_idx, numbers.Integral):
507
+ raise ValueError(
508
+ f"Selected columns, {columns}, are not unique in dataframe"
509
+ )
510
+ column_indices.append(col_idx)
511
+
512
+ except KeyError as e:
513
+ raise ValueError("A given column is not a column of the dataframe") from e
514
+
515
+ return column_indices
516
+
517
+
518
+ def _get_column_indices_interchange(X_interchange, key, key_dtype):
519
+ """Same as _get_column_indices but for X with __dataframe__ protocol."""
520
+
521
+ n_columns = X_interchange.num_columns()
522
+
523
+ if isinstance(key, (list, tuple)) and not key:
524
+ # we get an empty list
525
+ return []
526
+ elif key_dtype in ("bool", "int"):
527
+ return _get_column_indices_for_bool_or_int(key, n_columns)
528
+ else:
529
+ column_names = list(X_interchange.column_names())
530
+
531
+ if isinstance(key, slice):
532
+ if key.step not in [1, None]:
533
+ raise NotImplementedError("key.step must be 1 or None")
534
+ start, stop = key.start, key.stop
535
+ if start is not None:
536
+ start = column_names.index(start)
537
+
538
+ if stop is not None:
539
+ stop = column_names.index(stop) + 1
540
+ else:
541
+ stop = n_columns + 1
542
+ return list(islice(range(n_columns), start, stop))
543
+
544
+ selected_columns = [key] if np.isscalar(key) else key
545
+
546
+ try:
547
+ return [column_names.index(col) for col in selected_columns]
548
+ except ValueError as e:
549
+ raise ValueError("A given column is not a column of the dataframe") from e
550
+
551
+
552
+ @validate_params(
553
+ {
554
+ "replace": ["boolean"],
555
+ "n_samples": [Interval(numbers.Integral, 1, None, closed="left"), None],
556
+ "random_state": ["random_state"],
557
+ "stratify": ["array-like", None],
558
+ },
559
+ prefer_skip_nested_validation=True,
560
+ )
561
+ def resample(*arrays, replace=True, n_samples=None, random_state=None, stratify=None):
562
+ """Resample arrays or sparse matrices in a consistent way.
563
+
564
+ The default strategy implements one step of the bootstrapping
565
+ procedure.
566
+
567
+ Parameters
568
+ ----------
569
+ *arrays : sequence of array-like of shape (n_samples,) or \
570
+ (n_samples, n_outputs)
571
+ Indexable data-structures can be arrays, lists, dataframes or scipy
572
+ sparse matrices with consistent first dimension.
573
+
574
+ replace : bool, default=True
575
+ Implements resampling with replacement. If False, this will implement
576
+ (sliced) random permutations.
577
+
578
+ n_samples : int, default=None
579
+ Number of samples to generate. If left to None this is
580
+ automatically set to the first dimension of the arrays.
581
+ If replace is False it should not be larger than the length of
582
+ arrays.
583
+
584
+ random_state : int, RandomState instance or None, default=None
585
+ Determines random number generation for shuffling
586
+ the data.
587
+ Pass an int for reproducible results across multiple function calls.
588
+ See :term:`Glossary <random_state>`.
589
+
590
+ stratify : array-like of shape (n_samples,) or (n_samples, n_outputs), \
591
+ default=None
592
+ If not None, data is split in a stratified fashion, using this as
593
+ the class labels.
594
+
595
+ Returns
596
+ -------
597
+ resampled_arrays : sequence of array-like of shape (n_samples,) or \
598
+ (n_samples, n_outputs)
599
+ Sequence of resampled copies of the collections. The original arrays
600
+ are not impacted.
601
+
602
+ See Also
603
+ --------
604
+ shuffle : Shuffle arrays or sparse matrices in a consistent way.
605
+
606
+ Examples
607
+ --------
608
+ It is possible to mix sparse and dense arrays in the same run::
609
+
610
+ >>> import numpy as np
611
+ >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
612
+ >>> y = np.array([0, 1, 2])
613
+
614
+ >>> from scipy.sparse import coo_matrix
615
+ >>> X_sparse = coo_matrix(X)
616
+
617
+ >>> from sklearn.utils import resample
618
+ >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
619
+ >>> X
620
+ array([[1., 0.],
621
+ [2., 1.],
622
+ [1., 0.]])
623
+
624
+ >>> X_sparse
625
+ <3x2 sparse matrix of type '<... 'numpy.float64'>'
626
+ with 4 stored elements in Compressed Sparse Row format>
627
+
628
+ >>> X_sparse.toarray()
629
+ array([[1., 0.],
630
+ [2., 1.],
631
+ [1., 0.]])
632
+
633
+ >>> y
634
+ array([0, 1, 0])
635
+
636
+ >>> resample(y, n_samples=2, random_state=0)
637
+ array([0, 1])
638
+
639
+ Example using stratification::
640
+
641
+ >>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1]
642
+ >>> resample(y, n_samples=5, replace=False, stratify=y,
643
+ ... random_state=0)
644
+ [1, 1, 1, 0, 1]
645
+ """
646
+ max_n_samples = n_samples
647
+ random_state = check_random_state(random_state)
648
+
649
+ if len(arrays) == 0:
650
+ return None
651
+
652
+ first = arrays[0]
653
+ n_samples = first.shape[0] if hasattr(first, "shape") else len(first)
654
+
655
+ if max_n_samples is None:
656
+ max_n_samples = n_samples
657
+ elif (max_n_samples > n_samples) and (not replace):
658
+ raise ValueError(
659
+ "Cannot sample %d out of arrays with dim %d when replace is False"
660
+ % (max_n_samples, n_samples)
661
+ )
662
+
663
+ check_consistent_length(*arrays)
664
+
665
+ if stratify is None:
666
+ if replace:
667
+ indices = random_state.randint(0, n_samples, size=(max_n_samples,))
668
+ else:
669
+ indices = np.arange(n_samples)
670
+ random_state.shuffle(indices)
671
+ indices = indices[:max_n_samples]
672
+ else:
673
+ # Code adapted from StratifiedShuffleSplit()
674
+ y = check_array(stratify, ensure_2d=False, dtype=None)
675
+ if y.ndim == 2:
676
+ # for multi-label y, map each distinct row to a string repr
677
+ # using join because str(row) uses an ellipsis if len(row) > 1000
678
+ y = np.array([" ".join(row.astype("str")) for row in y])
679
+
680
+ classes, y_indices = np.unique(y, return_inverse=True)
681
+ n_classes = classes.shape[0]
682
+
683
+ class_counts = np.bincount(y_indices)
684
+
685
+ # Find the sorted list of instances for each class:
686
+ # (np.unique above performs a sort, so code is O(n logn) already)
687
+ class_indices = np.split(
688
+ np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]
689
+ )
690
+
691
+ n_i = _approximate_mode(class_counts, max_n_samples, random_state)
692
+
693
+ indices = []
694
+
695
+ for i in range(n_classes):
696
+ indices_i = random_state.choice(class_indices[i], n_i[i], replace=replace)
697
+ indices.extend(indices_i)
698
+
699
+ indices = random_state.permutation(indices)
700
+
701
+ # convert sparse matrices to CSR for row-based indexing
702
+ arrays = [a.tocsr() if issparse(a) else a for a in arrays]
703
+ resampled_arrays = [_safe_indexing(a, indices) for a in arrays]
704
+ if len(resampled_arrays) == 1:
705
+ # syntactic sugar for the unit argument case
706
+ return resampled_arrays[0]
707
+ else:
708
+ return resampled_arrays
709
+
710
+
711
+ def shuffle(*arrays, random_state=None, n_samples=None):
712
+ """Shuffle arrays or sparse matrices in a consistent way.
713
+
714
+ This is a convenience alias to ``resample(*arrays, replace=False)`` to do
715
+ random permutations of the collections.
716
+
717
+ Parameters
718
+ ----------
719
+ *arrays : sequence of indexable data-structures
720
+ Indexable data-structures can be arrays, lists, dataframes or scipy
721
+ sparse matrices with consistent first dimension.
722
+
723
+ random_state : int, RandomState instance or None, default=None
724
+ Determines random number generation for shuffling
725
+ the data.
726
+ Pass an int for reproducible results across multiple function calls.
727
+ See :term:`Glossary <random_state>`.
728
+
729
+ n_samples : int, default=None
730
+ Number of samples to generate. If left to None this is
731
+ automatically set to the first dimension of the arrays. It should
732
+ not be larger than the length of arrays.
733
+
734
+ Returns
735
+ -------
736
+ shuffled_arrays : sequence of indexable data-structures
737
+ Sequence of shuffled copies of the collections. The original arrays
738
+ are not impacted.
739
+
740
+ See Also
741
+ --------
742
+ resample : Resample arrays or sparse matrices in a consistent way.
743
+
744
+ Examples
745
+ --------
746
+ It is possible to mix sparse and dense arrays in the same run::
747
+
748
+ >>> import numpy as np
749
+ >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
750
+ >>> y = np.array([0, 1, 2])
751
+
752
+ >>> from scipy.sparse import coo_matrix
753
+ >>> X_sparse = coo_matrix(X)
754
+
755
+ >>> from sklearn.utils import shuffle
756
+ >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
757
+ >>> X
758
+ array([[0., 0.],
759
+ [2., 1.],
760
+ [1., 0.]])
761
+
762
+ >>> X_sparse
763
+ <3x2 sparse matrix of type '<... 'numpy.float64'>'
764
+ with 3 stored elements in Compressed Sparse Row format>
765
+
766
+ >>> X_sparse.toarray()
767
+ array([[0., 0.],
768
+ [2., 1.],
769
+ [1., 0.]])
770
+
771
+ >>> y
772
+ array([2, 1, 0])
773
+
774
+ >>> shuffle(y, n_samples=2, random_state=0)
775
+ array([0, 1])
776
+ """
777
+ return resample(
778
+ *arrays, replace=False, n_samples=n_samples, random_state=random_state
779
+ )
780
+
781
+
782
+ def safe_sqr(X, *, copy=True):
783
+ """Element wise squaring of array-likes and sparse matrices.
784
+
785
+ Parameters
786
+ ----------
787
+ X : {array-like, ndarray, sparse matrix}
788
+
789
+ copy : bool, default=True
790
+ Whether to create a copy of X and operate on it or to perform
791
+ inplace computation (default behaviour).
792
+
793
+ Returns
794
+ -------
795
+ X ** 2 : element wise square
796
+ Return the element-wise square of the input.
797
+
798
+ Examples
799
+ --------
800
+ >>> from sklearn.utils import safe_sqr
801
+ >>> safe_sqr([1, 2, 3])
802
+ array([1, 4, 9])
803
+ """
804
+ X = check_array(X, accept_sparse=["csr", "csc", "coo"], ensure_2d=False)
805
+ if issparse(X):
806
+ if copy:
807
+ X = X.copy()
808
+ X.data **= 2
809
+ else:
810
+ if copy:
811
+ X = X**2
812
+ else:
813
+ X **= 2
814
+ return X
815
+
816
+
817
+ def _chunk_generator(gen, chunksize):
818
+ """Chunk generator, ``gen`` into lists of length ``chunksize``. The last
819
+ chunk may have a length less than ``chunksize``."""
820
+ while True:
821
+ chunk = list(islice(gen, chunksize))
822
+ if chunk:
823
+ yield chunk
824
+ else:
825
+ return
826
+
827
+
828
+ @validate_params(
829
+ {
830
+ "n": [Interval(numbers.Integral, 1, None, closed="left")],
831
+ "batch_size": [Interval(numbers.Integral, 1, None, closed="left")],
832
+ "min_batch_size": [Interval(numbers.Integral, 0, None, closed="left")],
833
+ },
834
+ prefer_skip_nested_validation=True,
835
+ )
836
+ def gen_batches(n, batch_size, *, min_batch_size=0):
837
+ """Generator to create slices containing `batch_size` elements from 0 to `n`.
838
+
839
+ The last slice may contain less than `batch_size` elements, when
840
+ `batch_size` does not divide `n`.
841
+
842
+ Parameters
843
+ ----------
844
+ n : int
845
+ Size of the sequence.
846
+ batch_size : int
847
+ Number of elements in each batch.
848
+ min_batch_size : int, default=0
849
+ Minimum number of elements in each batch.
850
+
851
+ Yields
852
+ ------
853
+ slice of `batch_size` elements
854
+
855
+ See Also
856
+ --------
857
+ gen_even_slices: Generator to create n_packs slices going up to n.
858
+
859
+ Examples
860
+ --------
861
+ >>> from sklearn.utils import gen_batches
862
+ >>> list(gen_batches(7, 3))
863
+ [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
864
+ >>> list(gen_batches(6, 3))
865
+ [slice(0, 3, None), slice(3, 6, None)]
866
+ >>> list(gen_batches(2, 3))
867
+ [slice(0, 2, None)]
868
+ >>> list(gen_batches(7, 3, min_batch_size=0))
869
+ [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
870
+ >>> list(gen_batches(7, 3, min_batch_size=2))
871
+ [slice(0, 3, None), slice(3, 7, None)]
872
+ """
873
+ start = 0
874
+ for _ in range(int(n // batch_size)):
875
+ end = start + batch_size
876
+ if end + min_batch_size > n:
877
+ continue
878
+ yield slice(start, end)
879
+ start = end
880
+ if start < n:
881
+ yield slice(start, n)
882
+
883
+
884
+ @validate_params(
885
+ {
886
+ "n": [Interval(Integral, 1, None, closed="left")],
887
+ "n_packs": [Interval(Integral, 1, None, closed="left")],
888
+ "n_samples": [Interval(Integral, 1, None, closed="left"), None],
889
+ },
890
+ prefer_skip_nested_validation=True,
891
+ )
892
+ def gen_even_slices(n, n_packs, *, n_samples=None):
893
+ """Generator to create `n_packs` evenly spaced slices going up to `n`.
894
+
895
+ If `n_packs` does not divide `n`, except for the first `n % n_packs`
896
+ slices, remaining slices may contain fewer elements.
897
+
898
+ Parameters
899
+ ----------
900
+ n : int
901
+ Size of the sequence.
902
+ n_packs : int
903
+ Number of slices to generate.
904
+ n_samples : int, default=None
905
+ Number of samples. Pass `n_samples` when the slices are to be used for
906
+ sparse matrix indexing; slicing off-the-end raises an exception, while
907
+ it works for NumPy arrays.
908
+
909
+ Yields
910
+ ------
911
+ `slice` representing a set of indices from 0 to n.
912
+
913
+ See Also
914
+ --------
915
+ gen_batches: Generator to create slices containing batch_size elements
916
+ from 0 to n.
917
+
918
+ Examples
919
+ --------
920
+ >>> from sklearn.utils import gen_even_slices
921
+ >>> list(gen_even_slices(10, 1))
922
+ [slice(0, 10, None)]
923
+ >>> list(gen_even_slices(10, 10))
924
+ [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
925
+ >>> list(gen_even_slices(10, 5))
926
+ [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
927
+ >>> list(gen_even_slices(10, 3))
928
+ [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
929
+ """
930
+ start = 0
931
+ for pack_num in range(n_packs):
932
+ this_n = n // n_packs
933
+ if pack_num < n % n_packs:
934
+ this_n += 1
935
+ if this_n > 0:
936
+ end = start + this_n
937
+ if n_samples is not None:
938
+ end = min(n_samples, end)
939
+ yield slice(start, end, None)
940
+ start = end
941
+
942
+
943
+ def tosequence(x):
944
+ """Cast iterable x to a Sequence, avoiding a copy if possible.
945
+
946
+ Parameters
947
+ ----------
948
+ x : iterable
949
+ The iterable to be converted.
950
+
951
+ Returns
952
+ -------
953
+ x : Sequence
954
+ If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
955
+ is a `Sequence`, `x` is returned as-is. If `x` is from any other
956
+ type, `x` is returned casted as a list.
957
+ """
958
+ if isinstance(x, np.ndarray):
959
+ return np.asarray(x)
960
+ elif isinstance(x, Sequence):
961
+ return x
962
+ else:
963
+ return list(x)
964
+
965
+
966
+ def _to_object_array(sequence):
967
+ """Convert sequence to a 1-D NumPy array of object dtype.
968
+
969
+ numpy.array constructor has a similar use but it's output
970
+ is ambiguous. It can be 1-D NumPy array of object dtype if
971
+ the input is a ragged array, but if the input is a list of
972
+ equal length arrays, then the output is a 2D numpy.array.
973
+ _to_object_array solves this ambiguity by guarantying that
974
+ the output is a 1-D NumPy array of objects for any input.
975
+
976
+ Parameters
977
+ ----------
978
+ sequence : array-like of shape (n_elements,)
979
+ The sequence to be converted.
980
+
981
+ Returns
982
+ -------
983
+ out : ndarray of shape (n_elements,), dtype=object
984
+ The converted sequence into a 1-D NumPy array of object dtype.
985
+
986
+ Examples
987
+ --------
988
+ >>> import numpy as np
989
+ >>> from sklearn.utils import _to_object_array
990
+ >>> _to_object_array([np.array([0]), np.array([1])])
991
+ array([array([0]), array([1])], dtype=object)
992
+ >>> _to_object_array([np.array([0]), np.array([1, 2])])
993
+ array([array([0]), array([1, 2])], dtype=object)
994
+ >>> _to_object_array([np.array([0]), np.array([1, 2])])
995
+ array([array([0]), array([1, 2])], dtype=object)
996
+ """
997
+ out = np.empty(len(sequence), dtype=object)
998
+ out[:] = sequence
999
+ return out
1000
+
1001
+
1002
+ def indices_to_mask(indices, mask_length):
1003
+ """Convert list of indices to boolean mask.
1004
+
1005
+ Parameters
1006
+ ----------
1007
+ indices : list-like
1008
+ List of integers treated as indices.
1009
+ mask_length : int
1010
+ Length of boolean mask to be generated.
1011
+ This parameter must be greater than max(indices).
1012
+
1013
+ Returns
1014
+ -------
1015
+ mask : 1d boolean nd-array
1016
+ Boolean array that is True where indices are present, else False.
1017
+
1018
+ Examples
1019
+ --------
1020
+ >>> from sklearn.utils import indices_to_mask
1021
+ >>> indices = [1, 2 , 3, 4]
1022
+ >>> indices_to_mask(indices, 5)
1023
+ array([False, True, True, True, True])
1024
+ """
1025
+ if mask_length <= np.max(indices):
1026
+ raise ValueError("mask_length must be greater than max(indices)")
1027
+
1028
+ mask = np.zeros(mask_length, dtype=bool)
1029
+ mask[indices] = True
1030
+
1031
+ return mask
1032
+
1033
+
1034
+ def _message_with_time(source, message, time):
1035
+ """Create one line message for logging purposes.
1036
+
1037
+ Parameters
1038
+ ----------
1039
+ source : str
1040
+ String indicating the source or the reference of the message.
1041
+
1042
+ message : str
1043
+ Short message.
1044
+
1045
+ time : int
1046
+ Time in seconds.
1047
+ """
1048
+ start_message = "[%s] " % source
1049
+
1050
+ # adapted from joblib.logger.short_format_time without the Windows -.1s
1051
+ # adjustment
1052
+ if time > 60:
1053
+ time_str = "%4.1fmin" % (time / 60)
1054
+ else:
1055
+ time_str = " %5.1fs" % time
1056
+ end_message = " %s, total=%s" % (message, time_str)
1057
+ dots_len = 70 - len(start_message) - len(end_message)
1058
+ return "%s%s%s" % (start_message, dots_len * ".", end_message)
1059
+
1060
+
1061
+ @contextmanager
1062
+ def _print_elapsed_time(source, message=None):
1063
+ """Log elapsed time to stdout when the context is exited.
1064
+
1065
+ Parameters
1066
+ ----------
1067
+ source : str
1068
+ String indicating the source or the reference of the message.
1069
+
1070
+ message : str, default=None
1071
+ Short message. If None, nothing will be printed.
1072
+
1073
+ Returns
1074
+ -------
1075
+ context_manager
1076
+ Prints elapsed time upon exit if verbose.
1077
+ """
1078
+ if message is None:
1079
+ yield
1080
+ else:
1081
+ start = timeit.default_timer()
1082
+ yield
1083
+ print(_message_with_time(source, message, timeit.default_timer() - start))
1084
+
1085
+
1086
+ def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):
1087
+ """Calculate how many rows can be processed within `working_memory`.
1088
+
1089
+ Parameters
1090
+ ----------
1091
+ row_bytes : int
1092
+ The expected number of bytes of memory that will be consumed
1093
+ during the processing of each row.
1094
+ max_n_rows : int, default=None
1095
+ The maximum return value.
1096
+ working_memory : int or float, default=None
1097
+ The number of rows to fit inside this number of MiB will be
1098
+ returned. When None (default), the value of
1099
+ ``sklearn.get_config()['working_memory']`` is used.
1100
+
1101
+ Returns
1102
+ -------
1103
+ int
1104
+ The number of rows which can be processed within `working_memory`.
1105
+
1106
+ Warns
1107
+ -----
1108
+ Issues a UserWarning if `row_bytes exceeds `working_memory` MiB.
1109
+ """
1110
+
1111
+ if working_memory is None:
1112
+ working_memory = get_config()["working_memory"]
1113
+
1114
+ chunk_n_rows = int(working_memory * (2**20) // row_bytes)
1115
+ if max_n_rows is not None:
1116
+ chunk_n_rows = min(chunk_n_rows, max_n_rows)
1117
+ if chunk_n_rows < 1:
1118
+ warnings.warn(
1119
+ "Could not adhere to working_memory config. "
1120
+ "Currently %.0fMiB, %.0fMiB required."
1121
+ % (working_memory, np.ceil(row_bytes * 2**-20))
1122
+ )
1123
+ chunk_n_rows = 1
1124
+ return chunk_n_rows
1125
+
1126
+
1127
+ def _is_pandas_na(x):
1128
+ """Test if x is pandas.NA.
1129
+
1130
+ We intentionally do not use this function to return `True` for `pd.NA` in
1131
+ `is_scalar_nan`, because estimators that support `pd.NA` are the exception
1132
+ rather than the rule at the moment. When `pd.NA` is more universally
1133
+ supported, we may reconsider this decision.
1134
+
1135
+ Parameters
1136
+ ----------
1137
+ x : any type
1138
+
1139
+ Returns
1140
+ -------
1141
+ boolean
1142
+ """
1143
+ with suppress(ImportError):
1144
+ from pandas import NA
1145
+
1146
+ return x is NA
1147
+
1148
+ return False
1149
+
1150
+
1151
+ def is_scalar_nan(x):
1152
+ """Test if x is NaN.
1153
+
1154
+ This function is meant to overcome the issue that np.isnan does not allow
1155
+ non-numerical types as input, and that np.nan is not float('nan').
1156
+
1157
+ Parameters
1158
+ ----------
1159
+ x : any type
1160
+ Any scalar value.
1161
+
1162
+ Returns
1163
+ -------
1164
+ bool
1165
+ Returns true if x is NaN, and false otherwise.
1166
+
1167
+ Examples
1168
+ --------
1169
+ >>> import numpy as np
1170
+ >>> from sklearn.utils import is_scalar_nan
1171
+ >>> is_scalar_nan(np.nan)
1172
+ True
1173
+ >>> is_scalar_nan(float("nan"))
1174
+ True
1175
+ >>> is_scalar_nan(None)
1176
+ False
1177
+ >>> is_scalar_nan("")
1178
+ False
1179
+ >>> is_scalar_nan([np.nan])
1180
+ False
1181
+ """
1182
+ return (
1183
+ not isinstance(x, numbers.Integral)
1184
+ and isinstance(x, numbers.Real)
1185
+ and math.isnan(x)
1186
+ )
1187
+
1188
+
1189
+ def _approximate_mode(class_counts, n_draws, rng):
1190
+ """Computes approximate mode of multivariate hypergeometric.
1191
+
1192
+ This is an approximation to the mode of the multivariate
1193
+ hypergeometric given by class_counts and n_draws.
1194
+ It shouldn't be off by more than one.
1195
+
1196
+ It is the mostly likely outcome of drawing n_draws many
1197
+ samples from the population given by class_counts.
1198
+
1199
+ Parameters
1200
+ ----------
1201
+ class_counts : ndarray of int
1202
+ Population per class.
1203
+ n_draws : int
1204
+ Number of draws (samples to draw) from the overall population.
1205
+ rng : random state
1206
+ Used to break ties.
1207
+
1208
+ Returns
1209
+ -------
1210
+ sampled_classes : ndarray of int
1211
+ Number of samples drawn from each class.
1212
+ np.sum(sampled_classes) == n_draws
1213
+
1214
+ Examples
1215
+ --------
1216
+ >>> import numpy as np
1217
+ >>> from sklearn.utils import _approximate_mode
1218
+ >>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
1219
+ array([2, 1])
1220
+ >>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
1221
+ array([3, 1])
1222
+ >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
1223
+ ... n_draws=2, rng=0)
1224
+ array([0, 1, 1, 0])
1225
+ >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
1226
+ ... n_draws=2, rng=42)
1227
+ array([1, 1, 0, 0])
1228
+ """
1229
+ rng = check_random_state(rng)
1230
+ # this computes a bad approximation to the mode of the
1231
+ # multivariate hypergeometric given by class_counts and n_draws
1232
+ continuous = class_counts / class_counts.sum() * n_draws
1233
+ # floored means we don't overshoot n_samples, but probably undershoot
1234
+ floored = np.floor(continuous)
1235
+ # we add samples according to how much "left over" probability
1236
+ # they had, until we arrive at n_samples
1237
+ need_to_add = int(n_draws - floored.sum())
1238
+ if need_to_add > 0:
1239
+ remainder = continuous - floored
1240
+ values = np.sort(np.unique(remainder))[::-1]
1241
+ # add according to remainder, but break ties
1242
+ # randomly to avoid biases
1243
+ for value in values:
1244
+ (inds,) = np.where(remainder == value)
1245
+ # if we need_to_add less than what's in inds
1246
+ # we draw randomly from them.
1247
+ # if we need to add more, we add them all and
1248
+ # go to the next value
1249
+ add_now = min(len(inds), need_to_add)
1250
+ inds = rng.choice(inds, size=add_now, replace=False)
1251
+ floored[inds] += 1
1252
+ need_to_add -= add_now
1253
+ if need_to_add == 0:
1254
+ break
1255
+ return floored.astype(int)
1256
+
1257
+
1258
+ def check_matplotlib_support(caller_name):
1259
+ """Raise ImportError with detailed error message if mpl is not installed.
1260
+
1261
+ Plot utilities like any of the Display's plotting functions should lazily import
1262
+ matplotlib and call this helper before any computation.
1263
+
1264
+ Parameters
1265
+ ----------
1266
+ caller_name : str
1267
+ The name of the caller that requires matplotlib.
1268
+ """
1269
+ try:
1270
+ import matplotlib # noqa
1271
+ except ImportError as e:
1272
+ raise ImportError(
1273
+ "{} requires matplotlib. You can install matplotlib with "
1274
+ "`pip install matplotlib`".format(caller_name)
1275
+ ) from e
1276
+
1277
+
1278
+ def check_pandas_support(caller_name):
1279
+ """Raise ImportError with detailed error message if pandas is not installed.
1280
+
1281
+ Plot utilities like :func:`fetch_openml` should lazily import
1282
+ pandas and call this helper before any computation.
1283
+
1284
+ Parameters
1285
+ ----------
1286
+ caller_name : str
1287
+ The name of the caller that requires pandas.
1288
+
1289
+ Returns
1290
+ -------
1291
+ pandas
1292
+ The pandas package.
1293
+ """
1294
+ try:
1295
+ import pandas # noqa
1296
+
1297
+ return pandas
1298
+ except ImportError as e:
1299
+ raise ImportError("{} requires pandas.".format(caller_name)) from e
venv/lib/python3.10/site-packages/sklearn/utils/_arpack.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .validation import check_random_state
2
+
3
+
4
+ def _init_arpack_v0(size, random_state):
5
+ """Initialize the starting vector for iteration in ARPACK functions.
6
+
7
+ Initialize a ndarray with values sampled from the uniform distribution on
8
+ [-1, 1]. This initialization model has been chosen to be consistent with
9
+ the ARPACK one as another initialization can lead to convergence issues.
10
+
11
+ Parameters
12
+ ----------
13
+ size : int
14
+ The size of the eigenvalue vector to be initialized.
15
+
16
+ random_state : int, RandomState instance or None, default=None
17
+ The seed of the pseudo random number generator used to generate a
18
+ uniform distribution. If int, random_state is the seed used by the
19
+ random number generator; If RandomState instance, random_state is the
20
+ random number generator; If None, the random number generator is the
21
+ RandomState instance used by `np.random`.
22
+
23
+ Returns
24
+ -------
25
+ v0 : ndarray of shape (size,)
26
+ The initialized vector.
27
+ """
28
+ random_state = check_random_state(random_state)
29
+ v0 = random_state.uniform(-1, 1, size)
30
+ return v0
venv/lib/python3.10/site-packages/sklearn/utils/_array_api.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tools to support array_api."""
2
+ import itertools
3
+ import math
4
+ from functools import wraps
5
+
6
+ import numpy
7
+ import scipy.special as special
8
+
9
+ from .._config import get_config
10
+ from .fixes import parse_version
11
+
12
+
13
+ def yield_namespace_device_dtype_combinations():
14
+ """Yield supported namespace, device, dtype tuples for testing.
15
+
16
+ Use this to test that an estimator works with all combinations.
17
+
18
+ Returns
19
+ -------
20
+ array_namespace : str
21
+ The name of the Array API namespace.
22
+
23
+ device : str
24
+ The name of the device on which to allocate the arrays. Can be None to
25
+ indicate that the default value should be used.
26
+
27
+ dtype_name : str
28
+ The name of the data type to use for arrays. Can be None to indicate
29
+ that the default value should be used.
30
+ """
31
+ for array_namespace in [
32
+ # The following is used to test the array_api_compat wrapper when
33
+ # array_api_dispatch is enabled: in particular, the arrays used in the
34
+ # tests are regular numpy arrays without any "device" attribute.
35
+ "numpy",
36
+ # Stricter NumPy-based Array API implementation. The
37
+ # numpy.array_api.Array instances always a dummy "device" attribute.
38
+ "numpy.array_api",
39
+ "cupy",
40
+ "cupy.array_api",
41
+ "torch",
42
+ ]:
43
+ if array_namespace == "torch":
44
+ for device, dtype in itertools.product(
45
+ ("cpu", "cuda"), ("float64", "float32")
46
+ ):
47
+ yield array_namespace, device, dtype
48
+ yield array_namespace, "mps", "float32"
49
+ else:
50
+ yield array_namespace, None, None
51
+
52
+
53
+ def _check_array_api_dispatch(array_api_dispatch):
54
+ """Check that array_api_compat is installed and NumPy version is compatible.
55
+
56
+ array_api_compat follows NEP29, which has a higher minimum NumPy version than
57
+ scikit-learn.
58
+ """
59
+ if array_api_dispatch:
60
+ try:
61
+ import array_api_compat # noqa
62
+ except ImportError:
63
+ raise ImportError(
64
+ "array_api_compat is required to dispatch arrays using the API"
65
+ " specification"
66
+ )
67
+
68
+ numpy_version = parse_version(numpy.__version__)
69
+ min_numpy_version = "1.21"
70
+ if numpy_version < parse_version(min_numpy_version):
71
+ raise ImportError(
72
+ f"NumPy must be {min_numpy_version} or newer to dispatch array using"
73
+ " the API specification"
74
+ )
75
+
76
+
77
+ def device(x):
78
+ """Hardware device the array data resides on.
79
+
80
+ Parameters
81
+ ----------
82
+ x : array
83
+ Array instance from NumPy or an array API compatible library.
84
+
85
+ Returns
86
+ -------
87
+ out : device
88
+ `device` object (see the "Device Support" section of the array API spec).
89
+ """
90
+ if isinstance(x, (numpy.ndarray, numpy.generic)):
91
+ return "cpu"
92
+ return x.device
93
+
94
+
95
+ def size(x):
96
+ """Return the total number of elements of x.
97
+
98
+ Parameters
99
+ ----------
100
+ x : array
101
+ Array instance from NumPy or an array API compatible library.
102
+
103
+ Returns
104
+ -------
105
+ out : int
106
+ Total number of elements.
107
+ """
108
+ return math.prod(x.shape)
109
+
110
+
111
+ def _is_numpy_namespace(xp):
112
+ """Return True if xp is backed by NumPy."""
113
+ return xp.__name__ in {"numpy", "array_api_compat.numpy", "numpy.array_api"}
114
+
115
+
116
+ def _union1d(a, b, xp):
117
+ if _is_numpy_namespace(xp):
118
+ return xp.asarray(numpy.union1d(a, b))
119
+ assert a.ndim == b.ndim == 1
120
+ return xp.unique_values(xp.concat([xp.unique_values(a), xp.unique_values(b)]))
121
+
122
+
123
+ def isdtype(dtype, kind, *, xp):
124
+ """Returns a boolean indicating whether a provided dtype is of type "kind".
125
+
126
+ Included in the v2022.12 of the Array API spec.
127
+ https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
128
+ """
129
+ if isinstance(kind, tuple):
130
+ return any(_isdtype_single(dtype, k, xp=xp) for k in kind)
131
+ else:
132
+ return _isdtype_single(dtype, kind, xp=xp)
133
+
134
+
135
+ def _isdtype_single(dtype, kind, *, xp):
136
+ if isinstance(kind, str):
137
+ if kind == "bool":
138
+ return dtype == xp.bool
139
+ elif kind == "signed integer":
140
+ return dtype in {xp.int8, xp.int16, xp.int32, xp.int64}
141
+ elif kind == "unsigned integer":
142
+ return dtype in {xp.uint8, xp.uint16, xp.uint32, xp.uint64}
143
+ elif kind == "integral":
144
+ return any(
145
+ _isdtype_single(dtype, k, xp=xp)
146
+ for k in ("signed integer", "unsigned integer")
147
+ )
148
+ elif kind == "real floating":
149
+ return dtype in supported_float_dtypes(xp)
150
+ elif kind == "complex floating":
151
+ # Some name spaces do not have complex, such as cupy.array_api
152
+ # and numpy.array_api
153
+ complex_dtypes = set()
154
+ if hasattr(xp, "complex64"):
155
+ complex_dtypes.add(xp.complex64)
156
+ if hasattr(xp, "complex128"):
157
+ complex_dtypes.add(xp.complex128)
158
+ return dtype in complex_dtypes
159
+ elif kind == "numeric":
160
+ return any(
161
+ _isdtype_single(dtype, k, xp=xp)
162
+ for k in ("integral", "real floating", "complex floating")
163
+ )
164
+ else:
165
+ raise ValueError(f"Unrecognized data type kind: {kind!r}")
166
+ else:
167
+ return dtype == kind
168
+
169
+
170
+ def supported_float_dtypes(xp):
171
+ """Supported floating point types for the namespace
172
+
173
+ Note: float16 is not officially part of the Array API spec at the
174
+ time of writing but scikit-learn estimators and functions can choose
175
+ to accept it when xp.float16 is defined.
176
+
177
+ https://data-apis.org/array-api/latest/API_specification/data_types.html
178
+ """
179
+ if hasattr(xp, "float16"):
180
+ return (xp.float64, xp.float32, xp.float16)
181
+ else:
182
+ return (xp.float64, xp.float32)
183
+
184
+
185
+ class _ArrayAPIWrapper:
186
+ """sklearn specific Array API compatibility wrapper
187
+
188
+ This wrapper makes it possible for scikit-learn maintainers to
189
+ deal with discrepancies between different implementations of the
190
+ Python Array API standard and its evolution over time.
191
+
192
+ The Python Array API standard specification:
193
+ https://data-apis.org/array-api/latest/
194
+
195
+ Documentation of the NumPy implementation:
196
+ https://numpy.org/neps/nep-0047-array-api-standard.html
197
+ """
198
+
199
+ def __init__(self, array_namespace):
200
+ self._namespace = array_namespace
201
+
202
+ def __getattr__(self, name):
203
+ return getattr(self._namespace, name)
204
+
205
+ def __eq__(self, other):
206
+ return self._namespace == other._namespace
207
+
208
+ def isdtype(self, dtype, kind):
209
+ return isdtype(dtype, kind, xp=self._namespace)
210
+
211
+
212
+ def _check_device_cpu(device): # noqa
213
+ if device not in {"cpu", None}:
214
+ raise ValueError(f"Unsupported device for NumPy: {device!r}")
215
+
216
+
217
+ def _accept_device_cpu(func):
218
+ @wraps(func)
219
+ def wrapped_func(*args, **kwargs):
220
+ _check_device_cpu(kwargs.pop("device", None))
221
+ return func(*args, **kwargs)
222
+
223
+ return wrapped_func
224
+
225
+
226
+ class _NumPyAPIWrapper:
227
+ """Array API compat wrapper for any numpy version
228
+
229
+ NumPy < 1.22 does not expose the numpy.array_api namespace. This
230
+ wrapper makes it possible to write code that uses the standard
231
+ Array API while working with any version of NumPy supported by
232
+ scikit-learn.
233
+
234
+ See the `get_namespace()` public function for more details.
235
+ """
236
+
237
+ # Creation functions in spec:
238
+ # https://data-apis.org/array-api/latest/API_specification/creation_functions.html
239
+ _CREATION_FUNCS = {
240
+ "arange",
241
+ "empty",
242
+ "empty_like",
243
+ "eye",
244
+ "full",
245
+ "full_like",
246
+ "linspace",
247
+ "ones",
248
+ "ones_like",
249
+ "zeros",
250
+ "zeros_like",
251
+ }
252
+ # Data types in spec
253
+ # https://data-apis.org/array-api/latest/API_specification/data_types.html
254
+ _DTYPES = {
255
+ "int8",
256
+ "int16",
257
+ "int32",
258
+ "int64",
259
+ "uint8",
260
+ "uint16",
261
+ "uint32",
262
+ "uint64",
263
+ # XXX: float16 is not part of the Array API spec but exposed by
264
+ # some namespaces.
265
+ "float16",
266
+ "float32",
267
+ "float64",
268
+ "complex64",
269
+ "complex128",
270
+ }
271
+
272
+ def __getattr__(self, name):
273
+ attr = getattr(numpy, name)
274
+
275
+ # Support device kwargs and make sure they are on the CPU
276
+ if name in self._CREATION_FUNCS:
277
+ return _accept_device_cpu(attr)
278
+
279
+ # Convert to dtype objects
280
+ if name in self._DTYPES:
281
+ return numpy.dtype(attr)
282
+ return attr
283
+
284
+ @property
285
+ def bool(self):
286
+ return numpy.bool_
287
+
288
+ def astype(self, x, dtype, *, copy=True, casting="unsafe"):
289
+ # astype is not defined in the top level NumPy namespace
290
+ return x.astype(dtype, copy=copy, casting=casting)
291
+
292
+ def asarray(self, x, *, dtype=None, device=None, copy=None): # noqa
293
+ _check_device_cpu(device)
294
+ # Support copy in NumPy namespace
295
+ if copy is True:
296
+ return numpy.array(x, copy=True, dtype=dtype)
297
+ else:
298
+ return numpy.asarray(x, dtype=dtype)
299
+
300
+ def unique_inverse(self, x):
301
+ return numpy.unique(x, return_inverse=True)
302
+
303
+ def unique_counts(self, x):
304
+ return numpy.unique(x, return_counts=True)
305
+
306
+ def unique_values(self, x):
307
+ return numpy.unique(x)
308
+
309
+ def concat(self, arrays, *, axis=None):
310
+ return numpy.concatenate(arrays, axis=axis)
311
+
312
+ def reshape(self, x, shape, *, copy=None):
313
+ """Gives a new shape to an array without changing its data.
314
+
315
+ The Array API specification requires shape to be a tuple.
316
+ https://data-apis.org/array-api/latest/API_specification/generated/array_api.reshape.html
317
+ """
318
+ if not isinstance(shape, tuple):
319
+ raise TypeError(
320
+ f"shape must be a tuple, got {shape!r} of type {type(shape)}"
321
+ )
322
+
323
+ if copy is True:
324
+ x = x.copy()
325
+ return numpy.reshape(x, shape)
326
+
327
+ def isdtype(self, dtype, kind):
328
+ return isdtype(dtype, kind, xp=self)
329
+
330
+
331
+ _NUMPY_API_WRAPPER_INSTANCE = _NumPyAPIWrapper()
332
+
333
+
334
+ def get_namespace(*arrays):
335
+ """Get namespace of arrays.
336
+
337
+ Introspect `arrays` arguments and return their common Array API
338
+ compatible namespace object, if any. NumPy 1.22 and later can
339
+ construct such containers using the `numpy.array_api` namespace
340
+ for instance.
341
+
342
+ See: https://numpy.org/neps/nep-0047-array-api-standard.html
343
+
344
+ If `arrays` are regular numpy arrays, an instance of the
345
+ `_NumPyAPIWrapper` compatibility wrapper is returned instead.
346
+
347
+ Namespace support is not enabled by default. To enabled it
348
+ call:
349
+
350
+ sklearn.set_config(array_api_dispatch=True)
351
+
352
+ or:
353
+
354
+ with sklearn.config_context(array_api_dispatch=True):
355
+ # your code here
356
+
357
+ Otherwise an instance of the `_NumPyAPIWrapper`
358
+ compatibility wrapper is always returned irrespective of
359
+ the fact that arrays implement the `__array_namespace__`
360
+ protocol or not.
361
+
362
+ Parameters
363
+ ----------
364
+ *arrays : array objects
365
+ Array objects.
366
+
367
+ Returns
368
+ -------
369
+ namespace : module
370
+ Namespace shared by array objects. If any of the `arrays` are not arrays,
371
+ the namespace defaults to NumPy.
372
+
373
+ is_array_api_compliant : bool
374
+ True if the arrays are containers that implement the Array API spec.
375
+ Always False when array_api_dispatch=False.
376
+ """
377
+ array_api_dispatch = get_config()["array_api_dispatch"]
378
+ if not array_api_dispatch:
379
+ return _NUMPY_API_WRAPPER_INSTANCE, False
380
+
381
+ _check_array_api_dispatch(array_api_dispatch)
382
+
383
+ # array-api-compat is a required dependency of scikit-learn only when
384
+ # configuring `array_api_dispatch=True`. Its import should therefore be
385
+ # protected by _check_array_api_dispatch to display an informative error
386
+ # message in case it is missing.
387
+ import array_api_compat
388
+
389
+ namespace, is_array_api_compliant = array_api_compat.get_namespace(*arrays), True
390
+
391
+ # These namespaces need additional wrapping to smooth out small differences
392
+ # between implementations
393
+ if namespace.__name__ in {"numpy.array_api", "cupy.array_api"}:
394
+ namespace = _ArrayAPIWrapper(namespace)
395
+
396
+ return namespace, is_array_api_compliant
397
+
398
+
399
+ def _expit(X):
400
+ xp, _ = get_namespace(X)
401
+ if _is_numpy_namespace(xp):
402
+ return xp.asarray(special.expit(numpy.asarray(X)))
403
+
404
+ return 1.0 / (1.0 + xp.exp(-X))
405
+
406
+
407
+ def _add_to_diagonal(array, value, xp):
408
+ # Workaround for the lack of support for xp.reshape(a, shape, copy=False) in
409
+ # numpy.array_api: https://github.com/numpy/numpy/issues/23410
410
+ value = xp.asarray(value, dtype=array.dtype)
411
+ if _is_numpy_namespace(xp):
412
+ array_np = numpy.asarray(array)
413
+ array_np.flat[:: array.shape[0] + 1] += value
414
+ return xp.asarray(array_np)
415
+ elif value.ndim == 1:
416
+ for i in range(array.shape[0]):
417
+ array[i, i] += value[i]
418
+ else:
419
+ # scalar value
420
+ for i in range(array.shape[0]):
421
+ array[i, i] += value
422
+
423
+
424
+ def _weighted_sum(sample_score, sample_weight, normalize=False, xp=None):
425
+ # XXX: this function accepts Array API input but returns a Python scalar
426
+ # float. The call to float() is convenient because it removes the need to
427
+ # move back results from device to host memory (e.g. calling `.cpu()` on a
428
+ # torch tensor). However, this might interact in unexpected ways (break?)
429
+ # with lazy Array API implementations. See:
430
+ # https://github.com/data-apis/array-api/issues/642
431
+ if xp is None:
432
+ xp, _ = get_namespace(sample_score)
433
+ if normalize and _is_numpy_namespace(xp):
434
+ sample_score_np = numpy.asarray(sample_score)
435
+ if sample_weight is not None:
436
+ sample_weight_np = numpy.asarray(sample_weight)
437
+ else:
438
+ sample_weight_np = None
439
+ return float(numpy.average(sample_score_np, weights=sample_weight_np))
440
+
441
+ if not xp.isdtype(sample_score.dtype, "real floating"):
442
+ # We move to cpu device ahead of time since certain devices may not support
443
+ # float64, but we want the same precision for all devices and namespaces.
444
+ sample_score = xp.astype(xp.asarray(sample_score, device="cpu"), xp.float64)
445
+
446
+ if sample_weight is not None:
447
+ sample_weight = xp.asarray(
448
+ sample_weight, dtype=sample_score.dtype, device=device(sample_score)
449
+ )
450
+ if not xp.isdtype(sample_weight.dtype, "real floating"):
451
+ sample_weight = xp.astype(sample_weight, xp.float64)
452
+
453
+ if normalize:
454
+ if sample_weight is not None:
455
+ scale = xp.sum(sample_weight)
456
+ else:
457
+ scale = sample_score.shape[0]
458
+ if scale != 0:
459
+ sample_score = sample_score / scale
460
+
461
+ if sample_weight is not None:
462
+ return float(sample_score @ sample_weight)
463
+ else:
464
+ return float(xp.sum(sample_score))
465
+
466
+
467
+ def _nanmin(X, axis=None):
468
+ # TODO: refactor once nan-aware reductions are standardized:
469
+ # https://github.com/data-apis/array-api/issues/621
470
+ xp, _ = get_namespace(X)
471
+ if _is_numpy_namespace(xp):
472
+ return xp.asarray(numpy.nanmin(X, axis=axis))
473
+
474
+ else:
475
+ mask = xp.isnan(X)
476
+ X = xp.min(xp.where(mask, xp.asarray(+xp.inf, device=device(X)), X), axis=axis)
477
+ # Replace Infs from all NaN slices with NaN again
478
+ mask = xp.all(mask, axis=axis)
479
+ if xp.any(mask):
480
+ X = xp.where(mask, xp.asarray(xp.nan), X)
481
+ return X
482
+
483
+
484
+ def _nanmax(X, axis=None):
485
+ # TODO: refactor once nan-aware reductions are standardized:
486
+ # https://github.com/data-apis/array-api/issues/621
487
+ xp, _ = get_namespace(X)
488
+ if _is_numpy_namespace(xp):
489
+ return xp.asarray(numpy.nanmax(X, axis=axis))
490
+
491
+ else:
492
+ mask = xp.isnan(X)
493
+ X = xp.max(xp.where(mask, xp.asarray(-xp.inf, device=device(X)), X), axis=axis)
494
+ # Replace Infs from all NaN slices with NaN again
495
+ mask = xp.all(mask, axis=axis)
496
+ if xp.any(mask):
497
+ X = xp.where(mask, xp.asarray(xp.nan), X)
498
+ return X
499
+
500
+
501
+ def _asarray_with_order(array, dtype=None, order=None, copy=None, *, xp=None):
502
+ """Helper to support the order kwarg only for NumPy-backed arrays
503
+
504
+ Memory layout parameter `order` is not exposed in the Array API standard,
505
+ however some input validation code in scikit-learn needs to work both
506
+ for classes and functions that will leverage Array API only operations
507
+ and for code that inherently relies on NumPy backed data containers with
508
+ specific memory layout constraints (e.g. our own Cython code). The
509
+ purpose of this helper is to make it possible to share code for data
510
+ container validation without memory copies for both downstream use cases:
511
+ the `order` parameter is only enforced if the input array implementation
512
+ is NumPy based, otherwise `order` is just silently ignored.
513
+ """
514
+ if xp is None:
515
+ xp, _ = get_namespace(array)
516
+ if _is_numpy_namespace(xp):
517
+ # Use NumPy API to support order
518
+ if copy is True:
519
+ array = numpy.array(array, order=order, dtype=dtype)
520
+ else:
521
+ array = numpy.asarray(array, order=order, dtype=dtype)
522
+
523
+ # At this point array is a NumPy ndarray. We convert it to an array
524
+ # container that is consistent with the input's namespace.
525
+ return xp.asarray(array)
526
+ else:
527
+ return xp.asarray(array, dtype=dtype, copy=copy)
528
+
529
+
530
+ def _convert_to_numpy(array, xp):
531
+ """Convert X into a NumPy ndarray on the CPU."""
532
+ xp_name = xp.__name__
533
+
534
+ if xp_name in {"array_api_compat.torch", "torch"}:
535
+ return array.cpu().numpy()
536
+ elif xp_name == "cupy.array_api":
537
+ return array._array.get()
538
+ elif xp_name in {"array_api_compat.cupy", "cupy"}: # pragma: nocover
539
+ return array.get()
540
+
541
+ return numpy.asarray(array)
542
+
543
+
544
+ def _estimator_with_converted_arrays(estimator, converter):
545
+ """Create new estimator which converting all attributes that are arrays.
546
+
547
+ The converter is called on all NumPy arrays and arrays that support the
548
+ `DLPack interface <https://dmlc.github.io/dlpack/latest/>`__.
549
+
550
+ Parameters
551
+ ----------
552
+ estimator : Estimator
553
+ Estimator to convert
554
+
555
+ converter : callable
556
+ Callable that takes an array attribute and returns the converted array.
557
+
558
+ Returns
559
+ -------
560
+ new_estimator : Estimator
561
+ Convert estimator
562
+ """
563
+ from sklearn.base import clone
564
+
565
+ new_estimator = clone(estimator)
566
+ for key, attribute in vars(estimator).items():
567
+ if hasattr(attribute, "__dlpack__") or isinstance(attribute, numpy.ndarray):
568
+ attribute = converter(attribute)
569
+ setattr(new_estimator, key, attribute)
570
+ return new_estimator
571
+
572
+
573
+ def _atol_for_type(dtype):
574
+ """Return the absolute tolerance for a given dtype."""
575
+ return numpy.finfo(dtype).eps * 100
venv/lib/python3.10/site-packages/sklearn/utils/_available_if.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import update_wrapper, wraps
2
+ from types import MethodType
3
+
4
+
5
+ class _AvailableIfDescriptor:
6
+ """Implements a conditional property using the descriptor protocol.
7
+
8
+ Using this class to create a decorator will raise an ``AttributeError``
9
+ if check(self) returns a falsey value. Note that if check raises an error
10
+ this will also result in hasattr returning false.
11
+
12
+ See https://docs.python.org/3/howto/descriptor.html for an explanation of
13
+ descriptors.
14
+ """
15
+
16
+ def __init__(self, fn, check, attribute_name):
17
+ self.fn = fn
18
+ self.check = check
19
+ self.attribute_name = attribute_name
20
+
21
+ # update the docstring of the descriptor
22
+ update_wrapper(self, fn)
23
+
24
+ def _check(self, obj, owner):
25
+ attr_err_msg = (
26
+ f"This {repr(owner.__name__)} has no attribute {repr(self.attribute_name)}"
27
+ )
28
+ try:
29
+ check_result = self.check(obj)
30
+ except Exception as e:
31
+ raise AttributeError(attr_err_msg) from e
32
+
33
+ if not check_result:
34
+ raise AttributeError(attr_err_msg)
35
+
36
+ def __get__(self, obj, owner=None):
37
+ if obj is not None:
38
+ # delegate only on instances, not the classes.
39
+ # this is to allow access to the docstrings.
40
+ self._check(obj, owner=owner)
41
+ out = MethodType(self.fn, obj)
42
+
43
+ else:
44
+ # This makes it possible to use the decorated method as an unbound method,
45
+ # for instance when monkeypatching.
46
+ @wraps(self.fn)
47
+ def out(*args, **kwargs):
48
+ self._check(args[0], owner=owner)
49
+ return self.fn(*args, **kwargs)
50
+
51
+ return out
52
+
53
+
54
+ def available_if(check):
55
+ """An attribute that is available only if check returns a truthy value.
56
+
57
+ Parameters
58
+ ----------
59
+ check : callable
60
+ When passed the object with the decorated method, this should return
61
+ a truthy value if the attribute is available, and either return False
62
+ or raise an AttributeError if not available.
63
+
64
+ Returns
65
+ -------
66
+ callable
67
+ Callable makes the decorated method available if `check` returns
68
+ a truthy value, otherwise the decorated method is unavailable.
69
+
70
+ Examples
71
+ --------
72
+ >>> from sklearn.utils.metaestimators import available_if
73
+ >>> class HelloIfEven:
74
+ ... def __init__(self, x):
75
+ ... self.x = x
76
+ ...
77
+ ... def _x_is_even(self):
78
+ ... return self.x % 2 == 0
79
+ ...
80
+ ... @available_if(_x_is_even)
81
+ ... def say_hello(self):
82
+ ... print("Hello")
83
+ ...
84
+ >>> obj = HelloIfEven(1)
85
+ >>> hasattr(obj, "say_hello")
86
+ False
87
+ >>> obj.x = 2
88
+ >>> hasattr(obj, "say_hello")
89
+ True
90
+ >>> obj.say_hello()
91
+ Hello
92
+ """
93
+ return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)