diff --git a/ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9df345657786dafaaa85725a2874c6353a2ea62b --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc6549c0a88fbfc97075ef2d2cbbe6845eb2da840f7a8893bf3cc6564668b787 +size 33555627 diff --git a/ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..10f039f4d4ede3c4edd5a7edc7d23f95e876ade6 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f130d87201997e48c47b0ea865c94a40798b6dbcbe1d99e771a2f139b2c2b6f1 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..51aee83da7aa87acf9c5664d656f98ec8df7795d --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76f467b45af601530430872d2c6314dff0b05a9f74496ad4d67a7030eb74f05c +size 16778411 diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f4db85303f4b6bc9983cda7bc31eee349c1f425a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py @@ -0,0 +1,19 @@ +""" +The :mod:`sklearn.feature_extraction` module deals with feature extraction +from raw data. It currently includes methods to extract features from text and +images. +""" + +from . import text +from ._dict_vectorizer import DictVectorizer +from ._hash import FeatureHasher +from .image import grid_to_graph, img_to_graph + +__all__ = [ + "DictVectorizer", + "image", + "img_to_graph", + "grid_to_graph", + "text", + "FeatureHasher", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aaebae4eeb89907c559115c1fbb037d0d118e039 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4e73423a40a1873a63838e4245f1f57efed7fb0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9285e8e9b50cd18f09c1f8e7cadbba9b6d95a402 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..909083290ca2322d59305b18a756b331179f6f13 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88a4092fe27eaac4473eddc95fdf0923f757eb83 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/image.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cdd15302f7f0a450917acaad658e237f5ace125 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/text.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py new file mode 100644 index 0000000000000000000000000000000000000000..9855684b550c40972cb242f48118f684e461f035 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py @@ -0,0 +1,452 @@ +# Authors: Lars Buitinck +# Dan Blanchard +# License: BSD 3 clause + +from array import array +from collections.abc import Iterable, Mapping +from numbers import Number +from operator import itemgetter + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import check_array +from ..utils.validation import check_is_fitted + + +class DictVectorizer(TransformerMixin, BaseEstimator): + """Transforms lists of feature-value mappings to vectors. + + This transformer turns lists of mappings (dict-like objects) of feature + names to feature values into Numpy arrays or scipy.sparse matrices for use + with scikit-learn estimators. + + When feature values are strings, this transformer will do a binary one-hot + (aka one-of-K) coding: one boolean-valued feature is constructed for each + of the possible string values that the feature can take on. For instance, + a feature "f" that can take on the values "ham" and "spam" will become two + features in the output, one signifying "f=ham", the other "f=spam". + + If a feature value is a sequence or set of strings, this transformer + will iterate over the values and will count the occurrences of each string + value. + + However, note that this transformer will only do a binary one-hot encoding + when feature values are of type string. If categorical features are + represented as numeric values such as int or iterables of strings, the + DictVectorizer can be followed by + :class:`~sklearn.preprocessing.OneHotEncoder` to complete + binary one-hot encoding. + + Features that do not occur in a sample (mapping) will have a zero value + in the resulting array/matrix. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + dtype : dtype, default=np.float64 + The type of feature values. Passed to Numpy array/scipy.sparse matrix + constructors as the dtype argument. + separator : str, default="=" + Separator string used when constructing new features for one-hot + coding. + sparse : bool, default=True + Whether transform should produce scipy.sparse matrices. + sort : bool, default=True + Whether ``feature_names_`` and ``vocabulary_`` should be + sorted when fitting. + + Attributes + ---------- + vocabulary_ : dict + A dictionary mapping feature names to feature indices. + + feature_names_ : list + A list of length n_features containing the feature names (e.g., "f=ham" + and "f=spam"). + + See Also + -------- + FeatureHasher : Performs vectorization using only a hash function. + sklearn.preprocessing.OrdinalEncoder : Handles nominal/categorical + features encoded as columns of arbitrary data types. + + Examples + -------- + >>> from sklearn.feature_extraction import DictVectorizer + >>> v = DictVectorizer(sparse=False) + >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] + >>> X = v.fit_transform(D) + >>> X + array([[2., 0., 1.], + [0., 1., 3.]]) + >>> v.inverse_transform(X) == [{'bar': 2.0, 'foo': 1.0}, + ... {'baz': 1.0, 'foo': 3.0}] + True + >>> v.transform({'foo': 4, 'unseen_feature': 3}) + array([[0., 0., 4.]]) + """ + + _parameter_constraints: dict = { + "dtype": "no_validation", # validation delegated to numpy, + "separator": [str], + "sparse": ["boolean"], + "sort": ["boolean"], + } + + def __init__(self, *, dtype=np.float64, separator="=", sparse=True, sort=True): + self.dtype = dtype + self.separator = separator + self.sparse = sparse + self.sort = sort + + def _add_iterable_element( + self, + f, + v, + feature_names, + vocab, + *, + fitting=True, + transforming=False, + indices=None, + values=None, + ): + """Add feature names for iterable of strings""" + for vv in v: + if isinstance(vv, str): + feature_name = "%s%s%s" % (f, self.separator, vv) + vv = 1 + else: + raise TypeError( + f"Unsupported type {type(vv)} in iterable " + "value. Only iterables of string are " + "supported." + ) + if fitting and feature_name not in vocab: + vocab[feature_name] = len(feature_names) + feature_names.append(feature_name) + + if transforming and feature_name in vocab: + indices.append(vocab[feature_name]) + values.append(self.dtype(vv)) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn a list of feature name -> indices mappings. + + Parameters + ---------- + X : Mapping or iterable over Mappings + Dict(s) or Mapping(s) from feature names (arbitrary Python + objects) to feature values (strings or convertible to dtype). + + .. versionchanged:: 0.24 + Accepts multiple string values for one categorical feature. + + y : (ignored) + Ignored parameter. + + Returns + ------- + self : object + DictVectorizer class instance. + """ + feature_names = [] + vocab = {} + + for x in X: + for f, v in x.items(): + if isinstance(v, str): + feature_name = "%s%s%s" % (f, self.separator, v) + elif isinstance(v, Number) or (v is None): + feature_name = f + elif isinstance(v, Mapping): + raise TypeError( + f"Unsupported value type {type(v)} " + f"for {f}: {v}.\n" + "Mapping objects are not supported." + ) + elif isinstance(v, Iterable): + feature_name = None + self._add_iterable_element(f, v, feature_names, vocab) + + if feature_name is not None: + if feature_name not in vocab: + vocab[feature_name] = len(feature_names) + feature_names.append(feature_name) + + if self.sort: + feature_names.sort() + vocab = {f: i for i, f in enumerate(feature_names)} + + self.feature_names_ = feature_names + self.vocabulary_ = vocab + + return self + + def _transform(self, X, fitting): + # Sanity check: Python's array has no way of explicitly requesting the + # signed 32-bit integers that scipy.sparse needs, so we use the next + # best thing: typecode "i" (int). However, if that gives larger or + # smaller integers than 32-bit ones, np.frombuffer screws up. + assert array("i").itemsize == 4, ( + "sizeof(int) != 4 on your platform; please report this at" + " https://github.com/scikit-learn/scikit-learn/issues and" + " include the output from platform.platform() in your bug report" + ) + + dtype = self.dtype + if fitting: + feature_names = [] + vocab = {} + else: + feature_names = self.feature_names_ + vocab = self.vocabulary_ + + transforming = True + + # Process everything as sparse regardless of setting + X = [X] if isinstance(X, Mapping) else X + + indices = array("i") + indptr = [0] + # XXX we could change values to an array.array as well, but it + # would require (heuristic) conversion of dtype to typecode... + values = [] + + # collect all the possible feature names and build sparse matrix at + # same time + for x in X: + for f, v in x.items(): + if isinstance(v, str): + feature_name = "%s%s%s" % (f, self.separator, v) + v = 1 + elif isinstance(v, Number) or (v is None): + feature_name = f + elif not isinstance(v, Mapping) and isinstance(v, Iterable): + feature_name = None + self._add_iterable_element( + f, + v, + feature_names, + vocab, + fitting=fitting, + transforming=transforming, + indices=indices, + values=values, + ) + else: + raise TypeError( + f"Unsupported value Type {type(v)} " + f"for {f}: {v}.\n" + f"{type(v)} objects are not supported." + ) + + if feature_name is not None: + if fitting and feature_name not in vocab: + vocab[feature_name] = len(feature_names) + feature_names.append(feature_name) + + if feature_name in vocab: + indices.append(vocab[feature_name]) + values.append(self.dtype(v)) + + indptr.append(len(indices)) + + if len(indptr) == 1: + raise ValueError("Sample sequence X is empty.") + + indices = np.frombuffer(indices, dtype=np.intc) + shape = (len(indptr) - 1, len(vocab)) + + result_matrix = sp.csr_matrix( + (values, indices, indptr), shape=shape, dtype=dtype + ) + + # Sort everything if asked + if fitting and self.sort: + feature_names.sort() + map_index = np.empty(len(feature_names), dtype=np.int32) + for new_val, f in enumerate(feature_names): + map_index[new_val] = vocab[f] + vocab[f] = new_val + result_matrix = result_matrix[:, map_index] + + if self.sparse: + result_matrix.sort_indices() + else: + result_matrix = result_matrix.toarray() + + if fitting: + self.feature_names_ = feature_names + self.vocabulary_ = vocab + + return result_matrix + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Learn a list of feature name -> indices mappings and transform X. + + Like fit(X) followed by transform(X), but does not require + materializing X in memory. + + Parameters + ---------- + X : Mapping or iterable over Mappings + Dict(s) or Mapping(s) from feature names (arbitrary Python + objects) to feature values (strings or convertible to dtype). + + .. versionchanged:: 0.24 + Accepts multiple string values for one categorical feature. + + y : (ignored) + Ignored parameter. + + Returns + ------- + Xa : {array, sparse matrix} + Feature vectors; always 2-d. + """ + return self._transform(X, fitting=True) + + def inverse_transform(self, X, dict_type=dict): + """Transform array or sparse matrix X back to feature mappings. + + X must have been produced by this DictVectorizer's transform or + fit_transform method; it may only have passed through transformers + that preserve the number of features and their order. + + In the case of one-hot/one-of-K coding, the constructed feature + names and values are returned rather than the original ones. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Sample matrix. + dict_type : type, default=dict + Constructor for feature mappings. Must conform to the + collections.Mapping API. + + Returns + ------- + D : list of dict_type objects of shape (n_samples,) + Feature mappings for the samples in X. + """ + check_is_fitted(self, "feature_names_") + + # COO matrix is not subscriptable + X = check_array(X, accept_sparse=["csr", "csc"]) + n_samples = X.shape[0] + + names = self.feature_names_ + dicts = [dict_type() for _ in range(n_samples)] + + if sp.issparse(X): + for i, j in zip(*X.nonzero()): + dicts[i][names[j]] = X[i, j] + else: + for i, d in enumerate(dicts): + for j, v in enumerate(X[i, :]): + if v != 0: + d[names[j]] = X[i, j] + + return dicts + + def transform(self, X): + """Transform feature->value dicts to array or sparse matrix. + + Named features not encountered during fit or fit_transform will be + silently ignored. + + Parameters + ---------- + X : Mapping or iterable over Mappings of shape (n_samples,) + Dict(s) or Mapping(s) from feature names (arbitrary Python + objects) to feature values (strings or convertible to dtype). + + Returns + ------- + Xa : {array, sparse matrix} + Feature vectors; always 2-d. + """ + check_is_fitted(self, ["feature_names_", "vocabulary_"]) + return self._transform(X, fitting=False) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Not used, present here for API consistency by convention. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "feature_names_") + if any(not isinstance(name, str) for name in self.feature_names_): + feature_names = [str(name) for name in self.feature_names_] + else: + feature_names = self.feature_names_ + return np.asarray(feature_names, dtype=object) + + def restrict(self, support, indices=False): + """Restrict the features to those in support using feature selection. + + This function modifies the estimator in-place. + + Parameters + ---------- + support : array-like + Boolean mask or list of indices (as returned by the get_support + member of feature selectors). + indices : bool, default=False + Whether support is a list of indices. + + Returns + ------- + self : object + DictVectorizer class instance. + + Examples + -------- + >>> from sklearn.feature_extraction import DictVectorizer + >>> from sklearn.feature_selection import SelectKBest, chi2 + >>> v = DictVectorizer() + >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] + >>> X = v.fit_transform(D) + >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1]) + >>> v.get_feature_names_out() + array(['bar', 'baz', 'foo'], ...) + >>> v.restrict(support.get_support()) + DictVectorizer() + >>> v.get_feature_names_out() + array(['bar', 'foo'], ...) + """ + check_is_fitted(self, "feature_names_") + + if not indices: + support = np.where(support)[0] + + names = self.feature_names_ + new_vocab = {} + for i in support: + new_vocab[names[i]] = len(new_vocab) + + self.vocabulary_ = new_vocab + self.feature_names_ = [ + f for f, i in sorted(new_vocab.items(), key=itemgetter(1)) + ] + + return self + + def _more_tags(self): + return {"X_types": ["dict"]} diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py new file mode 100644 index 0000000000000000000000000000000000000000..9874bc0a028352b4f9ca5fa517636ea8e0e6499e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py @@ -0,0 +1,197 @@ +# Author: Lars Buitinck +# License: BSD 3 clause + +from itertools import chain +from numbers import Integral + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils._param_validation import Interval, StrOptions +from ._hashing_fast import transform as _hashing_transform + + +def _iteritems(d): + """Like d.iteritems, but accepts any collections.Mapping.""" + return d.iteritems() if hasattr(d, "iteritems") else d.items() + + +class FeatureHasher(TransformerMixin, BaseEstimator): + """Implements feature hashing, aka the hashing trick. + + This class turns sequences of symbolic feature names (strings) into + scipy.sparse matrices, using a hash function to compute the matrix column + corresponding to a name. The hash function employed is the signed 32-bit + version of Murmurhash3. + + Feature names of type byte string are used as-is. Unicode strings are + converted to UTF-8 first, but no Unicode normalization is done. + Feature values must be (finite) numbers. + + This class is a low-memory alternative to DictVectorizer and + CountVectorizer, intended for large-scale (online) learning and situations + where memory is tight, e.g. when running prediction code on embedded + devices. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_features : int, default=2**20 + The number of features (columns) in the output matrices. Small numbers + of features are likely to cause hash collisions, but large numbers + will cause larger coefficient dimensions in linear learners. + input_type : str, default='dict' + Choose a string from {'dict', 'pair', 'string'}. + Either "dict" (the default) to accept dictionaries over + (feature_name, value); "pair" to accept pairs of (feature_name, value); + or "string" to accept single strings. + feature_name should be a string, while value should be a number. + In the case of "string", a value of 1 is implied. + The feature_name is hashed to find the appropriate column for the + feature. The value's sign might be flipped in the output (but see + non_negative, below). + dtype : numpy dtype, default=np.float64 + The type of feature values. Passed to scipy.sparse matrix constructors + as the dtype argument. Do not set this to bool, np.boolean or any + unsigned integer type. + alternate_sign : bool, default=True + When True, an alternating sign is added to the features as to + approximately conserve the inner product in the hashed space even for + small n_features. This approach is similar to sparse random projection. + + .. versionchanged:: 0.19 + ``alternate_sign`` replaces the now deprecated ``non_negative`` + parameter. + + See Also + -------- + DictVectorizer : Vectorizes string-valued features using a hash table. + sklearn.preprocessing.OneHotEncoder : Handles nominal/categorical features. + + Notes + ----- + This estimator is :term:`stateless` and does not need to be fitted. + However, we recommend to call :meth:`fit_transform` instead of + :meth:`transform`, as parameter validation is only performed in + :meth:`fit`. + + Examples + -------- + >>> from sklearn.feature_extraction import FeatureHasher + >>> h = FeatureHasher(n_features=10) + >>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}] + >>> f = h.transform(D) + >>> f.toarray() + array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.], + [ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]]) + + With `input_type="string"`, the input must be an iterable over iterables of + strings: + + >>> h = FeatureHasher(n_features=8, input_type="string") + >>> raw_X = [["dog", "cat", "snake"], ["snake", "dog"], ["cat", "bird"]] + >>> f = h.transform(raw_X) + >>> f.toarray() + array([[ 0., 0., 0., -1., 0., -1., 0., 1.], + [ 0., 0., 0., -1., 0., -1., 0., 0.], + [ 0., -1., 0., 0., 0., 0., 0., 1.]]) + """ + + _parameter_constraints: dict = { + "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="both")], + "input_type": [StrOptions({"dict", "pair", "string"})], + "dtype": "no_validation", # delegate to numpy + "alternate_sign": ["boolean"], + } + + def __init__( + self, + n_features=(2**20), + *, + input_type="dict", + dtype=np.float64, + alternate_sign=True, + ): + self.dtype = dtype + self.input_type = input_type + self.n_features = n_features + self.alternate_sign = alternate_sign + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X=None, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : Ignored + Not used, present here for API consistency by convention. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + FeatureHasher class instance. + """ + return self + + def transform(self, raw_X): + """Transform a sequence of instances to a scipy.sparse matrix. + + Parameters + ---------- + raw_X : iterable over iterable over raw features, length = n_samples + Samples. Each sample must be iterable an (e.g., a list or tuple) + containing/generating feature names (and optionally values, see + the input_type constructor argument) which will be hashed. + raw_X need not support the len function, so it can be the result + of a generator; n_samples is determined on the fly. + + Returns + ------- + X : sparse matrix of shape (n_samples, n_features) + Feature matrix, for use with estimators or further transformers. + """ + raw_X = iter(raw_X) + if self.input_type == "dict": + raw_X = (_iteritems(d) for d in raw_X) + elif self.input_type == "string": + first_raw_X = next(raw_X) + if isinstance(first_raw_X, str): + raise ValueError( + "Samples can not be a single string. The input must be an iterable" + " over iterables of strings." + ) + raw_X_ = chain([first_raw_X], raw_X) + raw_X = (((f, 1) for f in x) for x in raw_X_) + + indices, indptr, values = _hashing_transform( + raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0 + ) + n_samples = indptr.shape[0] - 1 + + if n_samples == 0: + raise ValueError("Cannot vectorize empty sequence.") + + X = sp.csr_matrix( + (values, indices, indptr), + dtype=self.dtype, + shape=(n_samples, self.n_features), + ) + X.sum_duplicates() # also sorts the indices + + return X + + def _more_tags(self): + return {"X_types": [self.input_type]} diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f628496339e5d710906eef7b8599438abc8215e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py new file mode 100644 index 0000000000000000000000000000000000000000..37ae02a0f36c54fb47b87d8368bc1a2507404cfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py @@ -0,0 +1,325 @@ +# This list of English stop words is taken from the "Glasgow Information +# Retrieval Group". The original list can be found at +# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words +ENGLISH_STOP_WORDS = frozenset( + [ + "a", + "about", + "above", + "across", + "after", + "afterwards", + "again", + "against", + "all", + "almost", + "alone", + "along", + "already", + "also", + "although", + "always", + "am", + "among", + "amongst", + "amoungst", + "amount", + "an", + "and", + "another", + "any", + "anyhow", + "anyone", + "anything", + "anyway", + "anywhere", + "are", + "around", + "as", + "at", + "back", + "be", + "became", + "because", + "become", + "becomes", + "becoming", + "been", + "before", + "beforehand", + "behind", + "being", + "below", + "beside", + "besides", + "between", + "beyond", + "bill", + "both", + "bottom", + "but", + "by", + "call", + "can", + "cannot", + "cant", + "co", + "con", + "could", + "couldnt", + "cry", + "de", + "describe", + "detail", + "do", + "done", + "down", + "due", + "during", + "each", + "eg", + "eight", + "either", + "eleven", + "else", + "elsewhere", + "empty", + "enough", + "etc", + "even", + "ever", + "every", + "everyone", + "everything", + "everywhere", + "except", + "few", + "fifteen", + "fifty", + "fill", + "find", + "fire", + "first", + "five", + "for", + "former", + "formerly", + "forty", + "found", + "four", + "from", + "front", + "full", + "further", + "get", + "give", + "go", + "had", + "has", + "hasnt", + "have", + "he", + "hence", + "her", + "here", + "hereafter", + "hereby", + "herein", + "hereupon", + "hers", + "herself", + "him", + "himself", + "his", + "how", + "however", + "hundred", + "i", + "ie", + "if", + "in", + "inc", + "indeed", + "interest", + "into", + "is", + "it", + "its", + "itself", + "keep", + "last", + "latter", + "latterly", + "least", + "less", + "ltd", + "made", + "many", + "may", + "me", + "meanwhile", + "might", + "mill", + "mine", + "more", + "moreover", + "most", + "mostly", + "move", + "much", + "must", + "my", + "myself", + "name", + "namely", + "neither", + "never", + "nevertheless", + "next", + "nine", + "no", + "nobody", + "none", + "noone", + "nor", + "not", + "nothing", + "now", + "nowhere", + "of", + "off", + "often", + "on", + "once", + "one", + "only", + "onto", + "or", + "other", + "others", + "otherwise", + "our", + "ours", + "ourselves", + "out", + "over", + "own", + "part", + "per", + "perhaps", + "please", + "put", + "rather", + "re", + "same", + "see", + "seem", + "seemed", + "seeming", + "seems", + "serious", + "several", + "she", + "should", + "show", + "side", + "since", + "sincere", + "six", + "sixty", + "so", + "some", + "somehow", + "someone", + "something", + "sometime", + "sometimes", + "somewhere", + "still", + "such", + "system", + "take", + "ten", + "than", + "that", + "the", + "their", + "them", + "themselves", + "then", + "thence", + "there", + "thereafter", + "thereby", + "therefore", + "therein", + "thereupon", + "these", + "they", + "thick", + "thin", + "third", + "this", + "those", + "though", + "three", + "through", + "throughout", + "thru", + "thus", + "to", + "together", + "too", + "top", + "toward", + "towards", + "twelve", + "twenty", + "two", + "un", + "under", + "until", + "up", + "upon", + "us", + "very", + "via", + "was", + "we", + "well", + "were", + "what", + "whatever", + "when", + "whence", + "whenever", + "where", + "whereafter", + "whereas", + "whereby", + "wherein", + "whereupon", + "wherever", + "whether", + "which", + "while", + "whither", + "who", + "whoever", + "whole", + "whom", + "whose", + "why", + "will", + "with", + "within", + "without", + "would", + "yet", + "you", + "your", + "yours", + "yourself", + "yourselves", + ] +) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/image.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/image.py new file mode 100644 index 0000000000000000000000000000000000000000..718f47e3e8a74693154655e727e3e250ce5bc008 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/image.py @@ -0,0 +1,671 @@ +""" +The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to +extract features from images. +""" + +# Authors: Emmanuelle Gouillart +# Gael Varoquaux +# Olivier Grisel +# Vlad Niculae +# License: BSD 3 clause + +from itertools import product +from numbers import Integral, Number, Real + +import numpy as np +from numpy.lib.stride_tricks import as_strided +from scipy import sparse + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import check_array, check_random_state +from ..utils._param_validation import Hidden, Interval, RealNotInt, validate_params + +__all__ = [ + "PatchExtractor", + "extract_patches_2d", + "grid_to_graph", + "img_to_graph", + "reconstruct_from_patches_2d", +] + +############################################################################### +# From an image to a graph + + +def _make_edges_3d(n_x, n_y, n_z=1): + """Returns a list of edges for a 3D image. + + Parameters + ---------- + n_x : int + The size of the grid in the x direction. + n_y : int + The size of the grid in the y direction. + n_z : integer, default=1 + The size of the grid in the z direction, defaults to 1 + """ + vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z)) + edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel())) + edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel())) + edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel())) + edges = np.hstack((edges_deep, edges_right, edges_down)) + return edges + + +def _compute_gradient_3d(edges, img): + _, n_y, n_z = img.shape + gradient = np.abs( + img[ + edges[0] // (n_y * n_z), + (edges[0] % (n_y * n_z)) // n_z, + (edges[0] % (n_y * n_z)) % n_z, + ] + - img[ + edges[1] // (n_y * n_z), + (edges[1] % (n_y * n_z)) // n_z, + (edges[1] % (n_y * n_z)) % n_z, + ] + ) + return gradient + + +# XXX: Why mask the image after computing the weights? + + +def _mask_edges_weights(mask, edges, weights=None): + """Apply a mask to edges (weighted or not)""" + inds = np.arange(mask.size) + inds = inds[mask.ravel()] + ind_mask = np.logical_and(np.isin(edges[0], inds), np.isin(edges[1], inds)) + edges = edges[:, ind_mask] + if weights is not None: + weights = weights[ind_mask] + if len(edges.ravel()): + maxval = edges.max() + else: + maxval = 0 + order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1)) + edges = order[edges] + if weights is None: + return edges + else: + return edges, weights + + +def _to_graph( + n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None +): + """Auxiliary function for img_to_graph and grid_to_graph""" + edges = _make_edges_3d(n_x, n_y, n_z) + + if dtype is None: # To not overwrite input dtype + if img is None: + dtype = int + else: + dtype = img.dtype + + if img is not None: + img = np.atleast_3d(img) + weights = _compute_gradient_3d(edges, img) + if mask is not None: + edges, weights = _mask_edges_weights(mask, edges, weights) + diag = img.squeeze()[mask] + else: + diag = img.ravel() + n_voxels = diag.size + else: + if mask is not None: + mask = mask.astype(dtype=bool, copy=False) + edges = _mask_edges_weights(mask, edges) + n_voxels = np.sum(mask) + else: + n_voxels = n_x * n_y * n_z + weights = np.ones(edges.shape[1], dtype=dtype) + diag = np.ones(n_voxels, dtype=dtype) + + diag_idx = np.arange(n_voxels) + i_idx = np.hstack((edges[0], edges[1])) + j_idx = np.hstack((edges[1], edges[0])) + graph = sparse.coo_matrix( + ( + np.hstack((weights, weights, diag)), + (np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx))), + ), + (n_voxels, n_voxels), + dtype=dtype, + ) + if return_as is np.ndarray: + return graph.toarray() + return return_as(graph) + + +@validate_params( + { + "img": ["array-like"], + "mask": [None, np.ndarray], + "return_as": [type], + "dtype": "no_validation", # validation delegated to numpy + }, + prefer_skip_nested_validation=True, +) +def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None): + """Graph of the pixel-to-pixel gradient connections. + + Edges are weighted with the gradient values. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + img : array-like of shape (height, width) or (height, width, channel) + 2D or 3D image. + mask : ndarray of shape (height, width) or \ + (height, width, channel), dtype=bool, default=None + An optional mask of the image, to consider only part of the + pixels. + return_as : np.ndarray or a sparse matrix class, \ + default=sparse.coo_matrix + The class to use to build the returned adjacency matrix. + dtype : dtype, default=None + The data of the returned sparse matrix. By default it is the + dtype of img. + + Returns + ------- + graph : ndarray or a sparse matrix class + The computed adjacency matrix. + + Notes + ----- + For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was + handled by returning a dense np.matrix instance. Going forward, np.ndarray + returns an np.ndarray, as expected. + + For compatibility, user code relying on this method should wrap its + calls in ``np.asarray`` to avoid type issues. + """ + img = np.atleast_3d(img) + n_x, n_y, n_z = img.shape + return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype) + + +@validate_params( + { + "n_x": [Interval(Integral, left=1, right=None, closed="left")], + "n_y": [Interval(Integral, left=1, right=None, closed="left")], + "n_z": [Interval(Integral, left=1, right=None, closed="left")], + "mask": [None, np.ndarray], + "return_as": [type], + "dtype": "no_validation", # validation delegated to numpy + }, + prefer_skip_nested_validation=True, +) +def grid_to_graph( + n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int +): + """Graph of the pixel-to-pixel connections. + + Edges exist if 2 voxels are connected. + + Parameters + ---------- + n_x : int + Dimension in x axis. + n_y : int + Dimension in y axis. + n_z : int, default=1 + Dimension in z axis. + mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None + An optional mask of the image, to consider only part of the + pixels. + return_as : np.ndarray or a sparse matrix class, \ + default=sparse.coo_matrix + The class to use to build the returned adjacency matrix. + dtype : dtype, default=int + The data of the returned sparse matrix. By default it is int. + + Returns + ------- + graph : np.ndarray or a sparse matrix class + The computed adjacency matrix. + + Notes + ----- + For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was + handled by returning a dense np.matrix instance. Going forward, np.ndarray + returns an np.ndarray, as expected. + + For compatibility, user code relying on this method should wrap its + calls in ``np.asarray`` to avoid type issues. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.feature_extraction.image import grid_to_graph + >>> shape_img = (4, 4, 1) + >>> mask = np.zeros(shape=shape_img, dtype=bool) + >>> mask[[1, 2], [1, 2], :] = True + >>> graph = grid_to_graph(*shape_img, mask=mask) + >>> print(graph) + (0, 0) 1 + (1, 1) 1 + """ + return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype) + + +############################################################################### +# From an image to a set of small image patches + + +def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None): + """Compute the number of patches that will be extracted in an image. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + i_h : int + The image height + i_w : int + The image with + p_h : int + The height of a patch + p_w : int + The width of a patch + max_patches : int or float, default=None + The maximum number of patches to extract. If `max_patches` is a float + between 0 and 1, it is taken to be a proportion of the total number + of patches. If `max_patches` is None, all possible patches are extracted. + """ + n_h = i_h - p_h + 1 + n_w = i_w - p_w + 1 + all_patches = n_h * n_w + + if max_patches: + if isinstance(max_patches, (Integral)) and max_patches < all_patches: + return max_patches + elif isinstance(max_patches, (Integral)) and max_patches >= all_patches: + return all_patches + elif isinstance(max_patches, (Real)) and 0 < max_patches < 1: + return int(max_patches * all_patches) + else: + raise ValueError("Invalid value for max_patches: %r" % max_patches) + else: + return all_patches + + +def _extract_patches(arr, patch_shape=8, extraction_step=1): + """Extracts patches of any n-dimensional array in place using strides. + + Given an n-dimensional array it will return a 2n-dimensional array with + the first n dimensions indexing patch position and the last n indexing + the patch content. This operation is immediate (O(1)). A reshape + performed on the first n dimensions will cause numpy to copy data, leading + to a list of extracted patches. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + arr : ndarray + n-dimensional array of which patches are to be extracted + + patch_shape : int or tuple of length arr.ndim.default=8 + Indicates the shape of the patches to be extracted. If an + integer is given, the shape will be a hypercube of + sidelength given by its value. + + extraction_step : int or tuple of length arr.ndim, default=1 + Indicates step size at which extraction shall be performed. + If integer is given, then the step is uniform in all dimensions. + + + Returns + ------- + patches : strided ndarray + 2n-dimensional array indexing patches on first n dimensions and + containing patches on the last n dimensions. These dimensions + are fake, but this way no data is copied. A simple reshape invokes + a copying operation to obtain a list of patches: + result.reshape([-1] + list(patch_shape)) + """ + + arr_ndim = arr.ndim + + if isinstance(patch_shape, Number): + patch_shape = tuple([patch_shape] * arr_ndim) + if isinstance(extraction_step, Number): + extraction_step = tuple([extraction_step] * arr_ndim) + + patch_strides = arr.strides + + slices = tuple(slice(None, None, st) for st in extraction_step) + indexing_strides = arr[slices].strides + + patch_indices_shape = ( + (np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step) + ) + 1 + + shape = tuple(list(patch_indices_shape) + list(patch_shape)) + strides = tuple(list(indexing_strides) + list(patch_strides)) + + patches = as_strided(arr, shape=shape, strides=strides) + return patches + + +@validate_params( + { + "image": [np.ndarray], + "patch_size": [tuple, list], + "max_patches": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(Integral, 1, None, closed="left"), + None, + ], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None): + """Reshape a 2D image into a collection of patches. + + The resulting patches are allocated in a dedicated array. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + image : ndarray of shape (image_height, image_width) or \ + (image_height, image_width, n_channels) + The original image data. For color images, the last dimension specifies + the channel: a RGB image would have `n_channels=3`. + + patch_size : tuple of int (patch_height, patch_width) + The dimensions of one patch. + + max_patches : int or float, default=None + The maximum number of patches to extract. If `max_patches` is a float + between 0 and 1, it is taken to be a proportion of the total number + of patches. If `max_patches` is None it corresponds to the total number + of patches that can be extracted. + + random_state : int, RandomState instance, default=None + Determines the random number generator used for random sampling when + `max_patches` is not None. Use an int to make the randomness + deterministic. + See :term:`Glossary `. + + Returns + ------- + patches : array of shape (n_patches, patch_height, patch_width) or \ + (n_patches, patch_height, patch_width, n_channels) + The collection of patches extracted from the image, where `n_patches` + is either `max_patches` or the total number of patches that can be + extracted. + + Examples + -------- + >>> from sklearn.datasets import load_sample_image + >>> from sklearn.feature_extraction import image + >>> # Use the array data from the first image in this dataset: + >>> one_image = load_sample_image("china.jpg") + >>> print('Image shape: {}'.format(one_image.shape)) + Image shape: (427, 640, 3) + >>> patches = image.extract_patches_2d(one_image, (2, 2)) + >>> print('Patches shape: {}'.format(patches.shape)) + Patches shape: (272214, 2, 2, 3) + >>> # Here are just two of these patches: + >>> print(patches[1]) + [[[174 201 231] + [174 201 231]] + [[173 200 230] + [173 200 230]]] + >>> print(patches[800]) + [[[187 214 243] + [188 215 244]] + [[187 214 243] + [188 215 244]]] + """ + i_h, i_w = image.shape[:2] + p_h, p_w = patch_size + + if p_h > i_h: + raise ValueError( + "Height of the patch should be less than the height of the image." + ) + + if p_w > i_w: + raise ValueError( + "Width of the patch should be less than the width of the image." + ) + + image = check_array(image, allow_nd=True) + image = image.reshape((i_h, i_w, -1)) + n_colors = image.shape[-1] + + extracted_patches = _extract_patches( + image, patch_shape=(p_h, p_w, n_colors), extraction_step=1 + ) + + n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches) + if max_patches: + rng = check_random_state(random_state) + i_s = rng.randint(i_h - p_h + 1, size=n_patches) + j_s = rng.randint(i_w - p_w + 1, size=n_patches) + patches = extracted_patches[i_s, j_s, 0] + else: + patches = extracted_patches + + patches = patches.reshape(-1, p_h, p_w, n_colors) + # remove the color dimension if useless + if patches.shape[-1] == 1: + return patches.reshape((n_patches, p_h, p_w)) + else: + return patches + + +@validate_params( + {"patches": [np.ndarray], "image_size": [tuple, Hidden(list)]}, + prefer_skip_nested_validation=True, +) +def reconstruct_from_patches_2d(patches, image_size): + """Reconstruct the image from all of its patches. + + Patches are assumed to overlap and the image is constructed by filling in + the patches from left to right, top to bottom, averaging the overlapping + regions. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + patches : ndarray of shape (n_patches, patch_height, patch_width) or \ + (n_patches, patch_height, patch_width, n_channels) + The complete set of patches. If the patches contain colour information, + channels are indexed along the last dimension: RGB patches would + have `n_channels=3`. + + image_size : tuple of int (image_height, image_width) or \ + (image_height, image_width, n_channels) + The size of the image that will be reconstructed. + + Returns + ------- + image : ndarray of shape image_size + The reconstructed image. + """ + i_h, i_w = image_size[:2] + p_h, p_w = patches.shape[1:3] + img = np.zeros(image_size) + # compute the dimensions of the patches array + n_h = i_h - p_h + 1 + n_w = i_w - p_w + 1 + for p, (i, j) in zip(patches, product(range(n_h), range(n_w))): + img[i : i + p_h, j : j + p_w] += p + + for i in range(i_h): + for j in range(i_w): + # divide by the amount of overlap + # XXX: is this the most efficient way? memory-wise yes, cpu wise? + img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j)) + return img + + +class PatchExtractor(TransformerMixin, BaseEstimator): + """Extracts patches from a collection of images. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + patch_size : tuple of int (patch_height, patch_width), default=None + The dimensions of one patch. If set to None, the patch size will be + automatically set to `(img_height // 10, img_width // 10)`, where + `img_height` and `img_width` are the dimensions of the input images. + + max_patches : int or float, default=None + The maximum number of patches per image to extract. If `max_patches` is + a float in (0, 1), it is taken to mean a proportion of the total number + of patches. If set to None, extract all possible patches. + + random_state : int, RandomState instance, default=None + Determines the random number generator used for random sampling when + `max_patches is not None`. Use an int to make the randomness + deterministic. + See :term:`Glossary `. + + See Also + -------- + reconstruct_from_patches_2d : Reconstruct image from all of its patches. + + Notes + ----- + This estimator is stateless and does not need to be fitted. However, we + recommend to call :meth:`fit_transform` instead of :meth:`transform`, as + parameter validation is only performed in :meth:`fit`. + + Examples + -------- + >>> from sklearn.datasets import load_sample_images + >>> from sklearn.feature_extraction import image + >>> # Use the array data from the second image in this dataset: + >>> X = load_sample_images().images[1] + >>> X = X[None, ...] + >>> print(f"Image shape: {X.shape}") + Image shape: (1, 427, 640, 3) + >>> pe = image.PatchExtractor(patch_size=(10, 10)) + >>> pe_trans = pe.transform(X) + >>> print(f"Patches shape: {pe_trans.shape}") + Patches shape: (263758, 10, 10, 3) + >>> X_reconstructed = image.reconstruct_from_patches_2d(pe_trans, X.shape[1:]) + >>> print(f"Reconstructed shape: {X_reconstructed.shape}") + Reconstructed shape: (427, 640, 3) + """ + + _parameter_constraints: dict = { + "patch_size": [tuple, None], + "max_patches": [ + None, + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(Integral, 1, None, closed="left"), + ], + "random_state": ["random_state"], + } + + def __init__(self, *, patch_size=None, max_patches=None, random_state=None): + self.patch_size = patch_size + self.max_patches = max_patches + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validate the parameters of the estimator. + + This method allows to: (i) validate the parameters of the estimator and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : ndarray of shape (n_samples, image_height, image_width) or \ + (n_samples, image_height, image_width, n_channels) + Array of images from which to extract patches. For color images, + the last dimension specifies the channel: a RGB image would have + `n_channels=3`. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + return self + + def transform(self, X): + """Transform the image samples in `X` into a matrix of patch data. + + Parameters + ---------- + X : ndarray of shape (n_samples, image_height, image_width) or \ + (n_samples, image_height, image_width, n_channels) + Array of images from which to extract patches. For color images, + the last dimension specifies the channel: a RGB image would have + `n_channels=3`. + + Returns + ------- + patches : array of shape (n_patches, patch_height, patch_width) or \ + (n_patches, patch_height, patch_width, n_channels) + The collection of patches extracted from the images, where + `n_patches` is either `n_samples * max_patches` or the total + number of patches that can be extracted. + """ + X = self._validate_data( + X=X, + ensure_2d=False, + allow_nd=True, + ensure_min_samples=1, + ensure_min_features=1, + reset=False, + ) + random_state = check_random_state(self.random_state) + n_imgs, img_height, img_width = X.shape[:3] + if self.patch_size is None: + patch_size = img_height // 10, img_width // 10 + else: + if len(self.patch_size) != 2: + raise ValueError( + "patch_size must be a tuple of two integers. Got" + f" {self.patch_size} instead." + ) + patch_size = self.patch_size + + n_imgs, img_height, img_width = X.shape[:3] + X = np.reshape(X, (n_imgs, img_height, img_width, -1)) + n_channels = X.shape[-1] + + # compute the dimensions of the patches array + patch_height, patch_width = patch_size + n_patches = _compute_n_patches( + img_height, img_width, patch_height, patch_width, self.max_patches + ) + patches_shape = (n_imgs * n_patches,) + patch_size + if n_channels > 1: + patches_shape += (n_channels,) + + # extract the patches + patches = np.empty(patches_shape) + for ii, image in enumerate(X): + patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d( + image, + patch_size, + max_patches=self.max_patches, + random_state=random_state, + ) + return patches + + def _more_tags(self): + return {"X_types": ["3darray"], "stateless": True} diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2819b274e5dbe9f251a158fee5e447ba0bfd2e28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b4183de25e1f093edf0dc1d60420b496cbdce5b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b67b0a428b7ca298258a1851b8a9af66cfc6463 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c65ff4b6bc87d02da5d0c0c2cceeae5dc69d0f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06f1f37536fd910beedaa04b966a964612a3c6aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_dict_vectorizer.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_dict_vectorizer.py new file mode 100644 index 0000000000000000000000000000000000000000..e9784d68d7199c7e40ad70d304c14ba8c66b04a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_dict_vectorizer.py @@ -0,0 +1,262 @@ +# Authors: Lars Buitinck +# Dan Blanchard +# License: BSD 3 clause + +from random import Random + +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn.exceptions import NotFittedError +from sklearn.feature_extraction import DictVectorizer +from sklearn.feature_selection import SelectKBest, chi2 + + +@pytest.mark.parametrize("sparse", (True, False)) +@pytest.mark.parametrize("dtype", (int, np.float32, np.int16)) +@pytest.mark.parametrize("sort", (True, False)) +@pytest.mark.parametrize("iterable", (True, False)) +def test_dictvectorizer(sparse, dtype, sort, iterable): + D = [{"foo": 1, "bar": 3}, {"bar": 4, "baz": 2}, {"bar": 1, "quux": 1, "quuux": 2}] + + v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort) + X = v.fit_transform(iter(D) if iterable else D) + + assert sp.issparse(X) == sparse + assert X.shape == (3, 5) + assert X.sum() == 14 + assert v.inverse_transform(X) == D + + if sparse: + # CSR matrices can't be compared for equality + assert_array_equal( + X.toarray(), v.transform(iter(D) if iterable else D).toarray() + ) + else: + assert_array_equal(X, v.transform(iter(D) if iterable else D)) + + if sort: + assert v.feature_names_ == sorted(v.feature_names_) + + +def test_feature_selection(): + # make two feature dicts with two useful features and a bunch of useless + # ones, in terms of chi2 + d1 = dict([("useless%d" % i, 10) for i in range(20)], useful1=1, useful2=20) + d2 = dict([("useless%d" % i, 10) for i in range(20)], useful1=20, useful2=1) + + for indices in (True, False): + v = DictVectorizer().fit([d1, d2]) + X = v.transform([d1, d2]) + sel = SelectKBest(chi2, k=2).fit(X, [0, 1]) + + v.restrict(sel.get_support(indices=indices), indices=indices) + assert_array_equal(v.get_feature_names_out(), ["useful1", "useful2"]) + + +def test_one_of_k(): + D_in = [ + {"version": "1", "ham": 2}, + {"version": "2", "spam": 0.3}, + {"version=3": True, "spam": -1}, + ] + v = DictVectorizer() + X = v.fit_transform(D_in) + assert X.shape == (3, 5) + + D_out = v.inverse_transform(X) + assert D_out[0] == {"version=1": 1, "ham": 2} + + names = v.get_feature_names_out() + assert "version=2" in names + assert "version" not in names + + +def test_iterable_value(): + D_names = ["ham", "spam", "version=1", "version=2", "version=3"] + X_expected = [ + [2.0, 0.0, 2.0, 1.0, 0.0], + [0.0, 0.3, 0.0, 1.0, 0.0], + [0.0, -1.0, 0.0, 0.0, 1.0], + ] + D_in = [ + {"version": ["1", "2", "1"], "ham": 2}, + {"version": "2", "spam": 0.3}, + {"version=3": True, "spam": -1}, + ] + v = DictVectorizer() + X = v.fit_transform(D_in) + X = X.toarray() + assert_array_equal(X, X_expected) + + D_out = v.inverse_transform(X) + assert D_out[0] == {"version=1": 2, "version=2": 1, "ham": 2} + + names = v.get_feature_names_out() + + assert_array_equal(names, D_names) + + +def test_iterable_not_string_error(): + error_value = ( + "Unsupported type in iterable value. " + "Only iterables of string are supported." + ) + D2 = [{"foo": "1", "bar": "2"}, {"foo": "3", "baz": "1"}, {"foo": [1, "three"]}] + v = DictVectorizer(sparse=False) + with pytest.raises(TypeError) as error: + v.fit(D2) + assert str(error.value) == error_value + + +def test_mapping_error(): + error_value = ( + "Unsupported value type " + "for foo: {'one': 1, 'three': 3}.\n" + "Mapping objects are not supported." + ) + D2 = [ + {"foo": "1", "bar": "2"}, + {"foo": "3", "baz": "1"}, + {"foo": {"one": 1, "three": 3}}, + ] + v = DictVectorizer(sparse=False) + with pytest.raises(TypeError) as error: + v.fit(D2) + assert str(error.value) == error_value + + +def test_unseen_or_no_features(): + D = [{"camelot": 0, "spamalot": 1}] + for sparse in [True, False]: + v = DictVectorizer(sparse=sparse).fit(D) + + X = v.transform({"push the pram a lot": 2}) + if sparse: + X = X.toarray() + assert_array_equal(X, np.zeros((1, 2))) + + X = v.transform({}) + if sparse: + X = X.toarray() + assert_array_equal(X, np.zeros((1, 2))) + + with pytest.raises(ValueError, match="empty"): + v.transform([]) + + +def test_deterministic_vocabulary(global_random_seed): + # Generate equal dictionaries with different memory layouts + items = [("%03d" % i, i) for i in range(1000)] + rng = Random(global_random_seed) + d_sorted = dict(items) + rng.shuffle(items) + d_shuffled = dict(items) + + # check that the memory layout does not impact the resulting vocabulary + v_1 = DictVectorizer().fit([d_sorted]) + v_2 = DictVectorizer().fit([d_shuffled]) + + assert v_1.vocabulary_ == v_2.vocabulary_ + + +def test_n_features_in(): + # For vectorizers, n_features_in_ does not make sense and does not exist. + dv = DictVectorizer() + assert not hasattr(dv, "n_features_in_") + d = [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}] + dv.fit(d) + assert not hasattr(dv, "n_features_in_") + + +def test_dictvectorizer_dense_sparse_equivalence(): + """Check the equivalence between between sparse and dense DictVectorizer. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19978 + """ + movie_entry_fit = [ + {"category": ["thriller", "drama"], "year": 2003}, + {"category": ["animation", "family"], "year": 2011}, + {"year": 1974}, + ] + movie_entry_transform = [{"category": ["thriller"], "unseen_feature": "3"}] + dense_vectorizer = DictVectorizer(sparse=False) + sparse_vectorizer = DictVectorizer(sparse=True) + + dense_vector_fit = dense_vectorizer.fit_transform(movie_entry_fit) + sparse_vector_fit = sparse_vectorizer.fit_transform(movie_entry_fit) + + assert not sp.issparse(dense_vector_fit) + assert sp.issparse(sparse_vector_fit) + + assert_allclose(dense_vector_fit, sparse_vector_fit.toarray()) + + dense_vector_transform = dense_vectorizer.transform(movie_entry_transform) + sparse_vector_transform = sparse_vectorizer.transform(movie_entry_transform) + + assert not sp.issparse(dense_vector_transform) + assert sp.issparse(sparse_vector_transform) + + assert_allclose(dense_vector_transform, sparse_vector_transform.toarray()) + + dense_inverse_transform = dense_vectorizer.inverse_transform(dense_vector_transform) + sparse_inverse_transform = sparse_vectorizer.inverse_transform( + sparse_vector_transform + ) + + expected_inverse = [{"category=thriller": 1.0}] + assert dense_inverse_transform == expected_inverse + assert sparse_inverse_transform == expected_inverse + + +def test_dict_vectorizer_unsupported_value_type(): + """Check that we raise an error when the value associated to a feature + is not supported. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19489 + """ + + class A: + pass + + vectorizer = DictVectorizer(sparse=True) + X = [{"foo": A()}] + err_msg = "Unsupported value Type" + with pytest.raises(TypeError, match=err_msg): + vectorizer.fit_transform(X) + + +def test_dict_vectorizer_get_feature_names_out(): + """Check that integer feature names are converted to strings in + feature_names_out.""" + + X = [{1: 2, 3: 4}, {2: 4}] + dv = DictVectorizer(sparse=False).fit(X) + + feature_names = dv.get_feature_names_out() + assert isinstance(feature_names, np.ndarray) + assert feature_names.dtype == object + assert_array_equal(feature_names, ["1", "2", "3"]) + + +@pytest.mark.parametrize( + "method, input", + [ + ("transform", [{1: 2, 3: 4}, {2: 4}]), + ("inverse_transform", [{1: 2, 3: 4}, {2: 4}]), + ("restrict", [True, False, True]), + ], +) +def test_dict_vectorizer_not_fitted_error(method, input): + """Check that unfitted DictVectorizer instance raises NotFittedError. + + This should be part of the common test but currently they test estimator accepting + text input. + """ + dv = DictVectorizer(sparse=False) + + with pytest.raises(NotFittedError): + getattr(dv, method)(input) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py new file mode 100644 index 0000000000000000000000000000000000000000..276d0d48b077022559c775eab90abf363ffc6989 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_feature_hasher.py @@ -0,0 +1,160 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.feature_extraction import FeatureHasher +from sklearn.feature_extraction._hashing_fast import transform as _hashing_transform + + +def test_feature_hasher_dicts(): + feature_hasher = FeatureHasher(n_features=16) + assert "dict" == feature_hasher.input_type + + raw_X = [{"foo": "bar", "dada": 42, "tzara": 37}, {"foo": "baz", "gaga": "string1"}] + X1 = FeatureHasher(n_features=16).transform(raw_X) + gen = (iter(d.items()) for d in raw_X) + X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen) + assert_array_equal(X1.toarray(), X2.toarray()) + + +def test_feature_hasher_strings(): + # mix byte and Unicode strings; note that "foo" is a duplicate in row 0 + raw_X = [ + ["foo", "bar", "baz", "foo".encode("ascii")], + ["bar".encode("ascii"), "baz", "quux"], + ] + + for lg_n_features in (7, 9, 11, 16, 22): + n_features = 2**lg_n_features + + it = (x for x in raw_X) # iterable + + feature_hasher = FeatureHasher( + n_features=n_features, input_type="string", alternate_sign=False + ) + X = feature_hasher.transform(it) + + assert X.shape[0] == len(raw_X) + assert X.shape[1] == n_features + + assert X[0].sum() == 4 + assert X[1].sum() == 3 + + assert X.nnz == 6 + + +@pytest.mark.parametrize( + "raw_X", + [ + ["my_string", "another_string"], + (x for x in ["my_string", "another_string"]), + ], + ids=["list", "generator"], +) +def test_feature_hasher_single_string(raw_X): + """FeatureHasher raises error when a sample is a single string. + + Non-regression test for gh-13199. + """ + msg = "Samples can not be a single string" + + feature_hasher = FeatureHasher(n_features=10, input_type="string") + with pytest.raises(ValueError, match=msg): + feature_hasher.transform(raw_X) + + +def test_hashing_transform_seed(): + # check the influence of the seed when computing the hashes + raw_X = [ + ["foo", "bar", "baz", "foo".encode("ascii")], + ["bar".encode("ascii"), "baz", "quux"], + ] + + raw_X_ = (((f, 1) for f in x) for x in raw_X) + indices, indptr, _ = _hashing_transform(raw_X_, 2**7, str, False) + + raw_X_ = (((f, 1) for f in x) for x in raw_X) + indices_0, indptr_0, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=0) + assert_array_equal(indices, indices_0) + assert_array_equal(indptr, indptr_0) + + raw_X_ = (((f, 1) for f in x) for x in raw_X) + indices_1, _, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=1) + with pytest.raises(AssertionError): + assert_array_equal(indices, indices_1) + + +def test_feature_hasher_pairs(): + raw_X = ( + iter(d.items()) + for d in [{"foo": 1, "bar": 2}, {"baz": 3, "quux": 4, "foo": -1}] + ) + feature_hasher = FeatureHasher(n_features=16, input_type="pair") + x1, x2 = feature_hasher.transform(raw_X).toarray() + x1_nz = sorted(np.abs(x1[x1 != 0])) + x2_nz = sorted(np.abs(x2[x2 != 0])) + assert [1, 2] == x1_nz + assert [1, 3, 4] == x2_nz + + +def test_feature_hasher_pairs_with_string_values(): + raw_X = ( + iter(d.items()) + for d in [{"foo": 1, "bar": "a"}, {"baz": "abc", "quux": 4, "foo": -1}] + ) + feature_hasher = FeatureHasher(n_features=16, input_type="pair") + x1, x2 = feature_hasher.transform(raw_X).toarray() + x1_nz = sorted(np.abs(x1[x1 != 0])) + x2_nz = sorted(np.abs(x2[x2 != 0])) + assert [1, 1] == x1_nz + assert [1, 1, 4] == x2_nz + + raw_X = (iter(d.items()) for d in [{"bax": "abc"}, {"bax": "abc"}]) + x1, x2 = feature_hasher.transform(raw_X).toarray() + x1_nz = np.abs(x1[x1 != 0]) + x2_nz = np.abs(x2[x2 != 0]) + assert [1] == x1_nz + assert [1] == x2_nz + assert_array_equal(x1, x2) + + +def test_hash_empty_input(): + n_features = 16 + raw_X = [[], (), iter(range(0))] + + feature_hasher = FeatureHasher(n_features=n_features, input_type="string") + X = feature_hasher.transform(raw_X) + + assert_array_equal(X.toarray(), np.zeros((len(raw_X), n_features))) + + +def test_hasher_zeros(): + # Assert that no zeros are materialized in the output. + X = FeatureHasher().transform([{"foo": 0}]) + assert X.data.shape == (0,) + + +def test_hasher_alternate_sign(): + X = [list("Thequickbrownfoxjumped")] + + Xt = FeatureHasher(alternate_sign=True, input_type="string").fit_transform(X) + assert Xt.data.min() < 0 and Xt.data.max() > 0 + + Xt = FeatureHasher(alternate_sign=False, input_type="string").fit_transform(X) + assert Xt.data.min() > 0 + + +def test_hash_collisions(): + X = [list("Thequickbrownfoxjumped")] + + Xt = FeatureHasher( + alternate_sign=True, n_features=1, input_type="string" + ).fit_transform(X) + # check that some of the hashed tokens are added + # with an opposite sign and cancel out + assert abs(Xt.data[0]) < len(X[0]) + + Xt = FeatureHasher( + alternate_sign=False, n_features=1, input_type="string" + ).fit_transform(X) + assert Xt.data[0] == len(X[0]) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_image.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_image.py new file mode 100644 index 0000000000000000000000000000000000000000..375652c848db66996f24d4d8c52d009659e8b16b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_image.py @@ -0,0 +1,356 @@ +# Authors: Emmanuelle Gouillart +# Gael Varoquaux +# License: BSD 3 clause + +import numpy as np +import pytest +from scipy import ndimage +from scipy.sparse.csgraph import connected_components + +from sklearn.feature_extraction.image import ( + PatchExtractor, + _extract_patches, + extract_patches_2d, + grid_to_graph, + img_to_graph, + reconstruct_from_patches_2d, +) + + +def test_img_to_graph(): + x, y = np.mgrid[:4, :4] - 10 + grad_x = img_to_graph(x) + grad_y = img_to_graph(y) + assert grad_x.nnz == grad_y.nnz + # Negative elements are the diagonal: the elements of the original + # image. Positive elements are the values of the gradient, they + # should all be equal on grad_x and grad_y + np.testing.assert_array_equal( + grad_x.data[grad_x.data > 0], grad_y.data[grad_y.data > 0] + ) + + +def test_img_to_graph_sparse(): + # Check that the edges are in the right position + # when using a sparse image with a singleton component + mask = np.zeros((2, 3), dtype=bool) + mask[0, 0] = 1 + mask[:, 2] = 1 + x = np.zeros((2, 3)) + x[0, 0] = 1 + x[0, 2] = -1 + x[1, 2] = -2 + grad_x = img_to_graph(x, mask=mask).todense() + desired = np.array([[1, 0, 0], [0, -1, 1], [0, 1, -2]]) + np.testing.assert_array_equal(grad_x, desired) + + +def test_grid_to_graph(): + # Checking that the function works with graphs containing no edges + size = 2 + roi_size = 1 + # Generating two convex parts with one vertex + # Thus, edges will be empty in _to_graph + mask = np.zeros((size, size), dtype=bool) + mask[0:roi_size, 0:roi_size] = True + mask[-roi_size:, -roi_size:] = True + mask = mask.reshape(size**2) + A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray) + assert connected_components(A)[0] == 2 + + # check ordering + mask = np.zeros((2, 3), dtype=bool) + mask[0, 0] = 1 + mask[:, 2] = 1 + graph = grid_to_graph(2, 3, 1, mask=mask.ravel()).todense() + desired = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) + np.testing.assert_array_equal(graph, desired) + + # Checking that the function works whatever the type of mask is + mask = np.ones((size, size), dtype=np.int16) + A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask) + assert connected_components(A)[0] == 1 + + # Checking dtype of the graph + mask = np.ones((size, size)) + A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=bool) + assert A.dtype == bool + A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=int) + assert A.dtype == int + A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64) + assert A.dtype == np.float64 + + +def test_connect_regions(raccoon_face_fxt): + face = raccoon_face_fxt + # subsample by 4 to reduce run time + face = face[::4, ::4] + for thr in (50, 150): + mask = face > thr + graph = img_to_graph(face, mask=mask) + assert ndimage.label(mask)[1] == connected_components(graph)[0] + + +def test_connect_regions_with_grid(raccoon_face_fxt): + face = raccoon_face_fxt + + # subsample by 4 to reduce run time + face = face[::4, ::4] + + mask = face > 50 + graph = grid_to_graph(*face.shape, mask=mask) + assert ndimage.label(mask)[1] == connected_components(graph)[0] + + mask = face > 150 + graph = grid_to_graph(*face.shape, mask=mask, dtype=None) + assert ndimage.label(mask)[1] == connected_components(graph)[0] + + +@pytest.fixture +def downsampled_face(raccoon_face_fxt): + face = raccoon_face_fxt + face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2] + face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2] + face = face.astype(np.float32) + face /= 16.0 + return face + + +@pytest.fixture +def orange_face(downsampled_face): + face = downsampled_face + face_color = np.zeros(face.shape + (3,)) + face_color[:, :, 0] = 256 - face + face_color[:, :, 1] = 256 - face / 2 + face_color[:, :, 2] = 256 - face / 4 + return face_color + + +def _make_images(face): + # make a collection of faces + images = np.zeros((3,) + face.shape) + images[0] = face + images[1] = face + 1 + images[2] = face + 2 + return images + + +@pytest.fixture +def downsampled_face_collection(downsampled_face): + return _make_images(downsampled_face) + + +def test_extract_patches_all(downsampled_face): + face = downsampled_face + i_h, i_w = face.shape + p_h, p_w = 16, 16 + expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) + patches = extract_patches_2d(face, (p_h, p_w)) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_extract_patches_all_color(orange_face): + face = orange_face + i_h, i_w = face.shape[:2] + p_h, p_w = 16, 16 + expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) + patches = extract_patches_2d(face, (p_h, p_w)) + assert patches.shape == (expected_n_patches, p_h, p_w, 3) + + +def test_extract_patches_all_rect(downsampled_face): + face = downsampled_face + face = face[:, 32:97] + i_h, i_w = face.shape + p_h, p_w = 16, 12 + expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) + + patches = extract_patches_2d(face, (p_h, p_w)) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_extract_patches_max_patches(downsampled_face): + face = downsampled_face + i_h, i_w = face.shape + p_h, p_w = 16, 16 + + patches = extract_patches_2d(face, (p_h, p_w), max_patches=100) + assert patches.shape == (100, p_h, p_w) + + expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1)) + patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5) + assert patches.shape == (expected_n_patches, p_h, p_w) + + with pytest.raises(ValueError): + extract_patches_2d(face, (p_h, p_w), max_patches=2.0) + with pytest.raises(ValueError): + extract_patches_2d(face, (p_h, p_w), max_patches=-1.0) + + +def test_extract_patch_same_size_image(downsampled_face): + face = downsampled_face + # Request patches of the same size as image + # Should return just the single patch a.k.a. the image + patches = extract_patches_2d(face, face.shape, max_patches=2) + assert patches.shape[0] == 1 + + +def test_extract_patches_less_than_max_patches(downsampled_face): + face = downsampled_face + i_h, i_w = face.shape + p_h, p_w = 3 * i_h // 4, 3 * i_w // 4 + # this is 3185 + expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) + + patches = extract_patches_2d(face, (p_h, p_w), max_patches=4000) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_reconstruct_patches_perfect(downsampled_face): + face = downsampled_face + p_h, p_w = 16, 16 + + patches = extract_patches_2d(face, (p_h, p_w)) + face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) + np.testing.assert_array_almost_equal(face, face_reconstructed) + + +def test_reconstruct_patches_perfect_color(orange_face): + face = orange_face + p_h, p_w = 16, 16 + + patches = extract_patches_2d(face, (p_h, p_w)) + face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) + np.testing.assert_array_almost_equal(face, face_reconstructed) + + +def test_patch_extractor_fit(downsampled_face_collection): + faces = downsampled_face_collection + extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0) + assert extr == extr.fit(faces) + + +def test_patch_extractor_max_patches(downsampled_face_collection): + faces = downsampled_face_collection + i_h, i_w = faces.shape[1:3] + p_h, p_w = 8, 8 + + max_patches = 100 + expected_n_patches = len(faces) * max_patches + extr = PatchExtractor( + patch_size=(p_h, p_w), max_patches=max_patches, random_state=0 + ) + patches = extr.transform(faces) + assert patches.shape == (expected_n_patches, p_h, p_w) + + max_patches = 0.5 + expected_n_patches = len(faces) * int( + (i_h - p_h + 1) * (i_w - p_w + 1) * max_patches + ) + extr = PatchExtractor( + patch_size=(p_h, p_w), max_patches=max_patches, random_state=0 + ) + patches = extr.transform(faces) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_patch_extractor_max_patches_default(downsampled_face_collection): + faces = downsampled_face_collection + extr = PatchExtractor(max_patches=100, random_state=0) + patches = extr.transform(faces) + assert patches.shape == (len(faces) * 100, 19, 25) + + +def test_patch_extractor_all_patches(downsampled_face_collection): + faces = downsampled_face_collection + i_h, i_w = faces.shape[1:3] + p_h, p_w = 8, 8 + expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) + extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) + patches = extr.transform(faces) + assert patches.shape == (expected_n_patches, p_h, p_w) + + +def test_patch_extractor_color(orange_face): + faces = _make_images(orange_face) + i_h, i_w = faces.shape[1:3] + p_h, p_w = 8, 8 + expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) + extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) + patches = extr.transform(faces) + assert patches.shape == (expected_n_patches, p_h, p_w, 3) + + +def test_extract_patches_strided(): + image_shapes_1D = [(10,), (10,), (11,), (10,)] + patch_sizes_1D = [(1,), (2,), (3,), (8,)] + patch_steps_1D = [(1,), (1,), (4,), (2,)] + + expected_views_1D = [(10,), (9,), (3,), (2,)] + last_patch_1D = [(10,), (8,), (8,), (2,)] + + image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)] + patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)] + patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)] + + expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)] + last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)] + + image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)] + patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)] + patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)] + + expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)] + last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)] + + image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D + patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D + patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D + expected_views = expected_views_1D + expected_views_2D + expected_views_3D + last_patches = last_patch_1D + last_patch_2D + last_patch_3D + + for image_shape, patch_size, patch_step, expected_view, last_patch in zip( + image_shapes, patch_sizes, patch_steps, expected_views, last_patches + ): + image = np.arange(np.prod(image_shape)).reshape(image_shape) + patches = _extract_patches( + image, patch_shape=patch_size, extraction_step=patch_step + ) + + ndim = len(image_shape) + + assert patches.shape[:ndim] == expected_view + last_patch_slices = tuple( + slice(i, i + j, None) for i, j in zip(last_patch, patch_size) + ) + assert ( + patches[(-1, None, None) * ndim] == image[last_patch_slices].squeeze() + ).all() + + +def test_extract_patches_square(downsampled_face): + # test same patch size for all dimensions + face = downsampled_face + i_h, i_w = face.shape + p = 8 + expected_n_patches = ((i_h - p + 1), (i_w - p + 1)) + patches = _extract_patches(face, patch_shape=p) + assert patches.shape == (expected_n_patches[0], expected_n_patches[1], p, p) + + +def test_width_patch(): + # width and height of the patch should be less than the image + x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + with pytest.raises(ValueError): + extract_patches_2d(x, (4, 1)) + with pytest.raises(ValueError): + extract_patches_2d(x, (1, 4)) + + +def test_patch_extractor_wrong_input(orange_face): + """Check that an informative error is raised if the patch_size is not valid.""" + faces = _make_images(orange_face) + err_msg = "patch_size must be a tuple of two integers" + extractor = PatchExtractor(patch_size=(8, 8, 8)) + with pytest.raises(ValueError, match=err_msg): + extractor.transform(faces) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py new file mode 100644 index 0000000000000000000000000000000000000000..7c7cac85ccc6ba3deeec862246f2118b6131fcf2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/tests/test_text.py @@ -0,0 +1,1655 @@ +import pickle +import re +import warnings +from collections import defaultdict +from collections.abc import Mapping +from functools import partial +from io import StringIO +from itertools import product + +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from scipy import sparse + +from sklearn.base import clone +from sklearn.feature_extraction.text import ( + ENGLISH_STOP_WORDS, + CountVectorizer, + HashingVectorizer, + TfidfTransformer, + TfidfVectorizer, + strip_accents_ascii, + strip_accents_unicode, + strip_tags, +) +from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split +from sklearn.pipeline import Pipeline +from sklearn.svm import LinearSVC +from sklearn.utils import _IS_WASM, IS_PYPY +from sklearn.utils._testing import ( + assert_allclose_dense_sparse, + assert_almost_equal, + fails_if_pypy, + skip_if_32bit, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +JUNK_FOOD_DOCS = ( + "the pizza pizza beer copyright", + "the pizza burger beer copyright", + "the the pizza beer beer copyright", + "the burger beer beer copyright", + "the coke burger coke copyright", + "the coke burger burger", +) + +NOTJUNK_FOOD_DOCS = ( + "the salad celeri copyright", + "the salad salad sparkling water copyright", + "the the celeri celeri copyright", + "the tomato tomato salad water", + "the tomato salad water copyright", +) + +ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + +def uppercase(s): + return strip_accents_unicode(s).upper() + + +def strip_eacute(s): + return s.replace("é", "e") + + +def split_tokenize(s): + return s.split() + + +def lazy_analyze(s): + return ["the_ultimate_feature"] + + +def test_strip_accents(): + # check some classical latin accentuated symbols + a = "àáâãäåçèéêë" + expected = "aaaaaaceeee" + assert strip_accents_unicode(a) == expected + + a = "ìíîïñòóôõöùúûüý" + expected = "iiiinooooouuuuy" + assert strip_accents_unicode(a) == expected + + # check some arabic + a = "\u0625" # alef with a hamza below: إ + expected = "\u0627" # simple alef: ا + assert strip_accents_unicode(a) == expected + + # mix letters accentuated and not + a = "this is à test" + expected = "this is a test" + assert strip_accents_unicode(a) == expected + + # strings that are already decomposed + a = "o\u0308" # o with diaeresis + expected = "o" + assert strip_accents_unicode(a) == expected + + # combining marks by themselves + a = "\u0300\u0301\u0302\u0303" + expected = "" + assert strip_accents_unicode(a) == expected + + # Multiple combining marks on one character + a = "o\u0308\u0304" + expected = "o" + assert strip_accents_unicode(a) == expected + + +def test_to_ascii(): + # check some classical latin accentuated symbols + a = "àáâãäåçèéêë" + expected = "aaaaaaceeee" + assert strip_accents_ascii(a) == expected + + a = "ìíîïñòóôõöùúûüý" + expected = "iiiinooooouuuuy" + assert strip_accents_ascii(a) == expected + + # check some arabic + a = "\u0625" # halef with a hamza below + expected = "" # halef has no direct ascii match + assert strip_accents_ascii(a) == expected + + # mix letters accentuated and not + a = "this is à test" + expected = "this is a test" + assert strip_accents_ascii(a) == expected + + +@pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer)) +def test_word_analyzer_unigrams(Vectorizer): + wa = Vectorizer(strip_accents="ascii").build_analyzer() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "ai", + "mange", + "du", + "kangourou", + "ce", + "midi", + "etait", + "pas", + "tres", + "bon", + ] + assert wa(text) == expected + + text = "This is a test, really.\n\n I met Harry yesterday." + expected = ["this", "is", "test", "really", "met", "harry", "yesterday"] + assert wa(text) == expected + + wa = Vectorizer(input="file").build_analyzer() + text = StringIO("This is a test with a file-like object!") + expected = ["this", "is", "test", "with", "file", "like", "object"] + assert wa(text) == expected + + # with custom preprocessor + wa = Vectorizer(preprocessor=uppercase).build_analyzer() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "AI", + "MANGE", + "DU", + "KANGOUROU", + "CE", + "MIDI", + "ETAIT", + "PAS", + "TRES", + "BON", + ] + assert wa(text) == expected + + # with custom tokenizer + wa = Vectorizer(tokenizer=split_tokenize, strip_accents="ascii").build_analyzer() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "j'ai", + "mange", + "du", + "kangourou", + "ce", + "midi,", + "c'etait", + "pas", + "tres", + "bon.", + ] + assert wa(text) == expected + + +def test_word_analyzer_unigrams_and_bigrams(): + wa = CountVectorizer( + analyzer="word", strip_accents="unicode", ngram_range=(1, 2) + ).build_analyzer() + + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = [ + "ai", + "mange", + "du", + "kangourou", + "ce", + "midi", + "etait", + "pas", + "tres", + "bon", + "ai mange", + "mange du", + "du kangourou", + "kangourou ce", + "ce midi", + "midi etait", + "etait pas", + "pas tres", + "tres bon", + ] + assert wa(text) == expected + + +def test_unicode_decode_error(): + # decode_error default to strict, so this should fail + # First, encode (as bytes) a unicode string. + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + text_bytes = text.encode("utf-8") + + # Then let the Analyzer try to decode it as ascii. It should fail, + # because we have given it an incorrect encoding. + wa = CountVectorizer(ngram_range=(1, 2), encoding="ascii").build_analyzer() + with pytest.raises(UnicodeDecodeError): + wa(text_bytes) + + ca = CountVectorizer( + analyzer="char", ngram_range=(3, 6), encoding="ascii" + ).build_analyzer() + with pytest.raises(UnicodeDecodeError): + ca(text_bytes) + + +def test_char_ngram_analyzer(): + cnga = CountVectorizer( + analyzer="char", strip_accents="unicode", ngram_range=(3, 6) + ).build_analyzer() + + text = "J'ai mangé du kangourou ce midi, c'était pas très bon" + expected = ["j'a", "'ai", "ai ", "i m", " ma"] + assert cnga(text)[:5] == expected + expected = ["s tres", " tres ", "tres b", "res bo", "es bon"] + assert cnga(text)[-5:] == expected + + text = "This \n\tis a test, really.\n\n I met Harry yesterday" + expected = ["thi", "his", "is ", "s i", " is"] + assert cnga(text)[:5] == expected + + expected = [" yeste", "yester", "esterd", "sterda", "terday"] + assert cnga(text)[-5:] == expected + + cnga = CountVectorizer( + input="file", analyzer="char", ngram_range=(3, 6) + ).build_analyzer() + text = StringIO("This is a test with a file-like object!") + expected = ["thi", "his", "is ", "s i", " is"] + assert cnga(text)[:5] == expected + + +def test_char_wb_ngram_analyzer(): + cnga = CountVectorizer( + analyzer="char_wb", strip_accents="unicode", ngram_range=(3, 6) + ).build_analyzer() + + text = "This \n\tis a test, really.\n\n I met Harry yesterday" + expected = [" th", "thi", "his", "is ", " thi"] + assert cnga(text)[:5] == expected + + expected = ["yester", "esterd", "sterda", "terday", "erday "] + assert cnga(text)[-5:] == expected + + cnga = CountVectorizer( + input="file", analyzer="char_wb", ngram_range=(3, 6) + ).build_analyzer() + text = StringIO("A test with a file-like object!") + expected = [" a ", " te", "tes", "est", "st ", " tes"] + assert cnga(text)[:6] == expected + + +def test_word_ngram_analyzer(): + cnga = CountVectorizer( + analyzer="word", strip_accents="unicode", ngram_range=(3, 6) + ).build_analyzer() + + text = "This \n\tis a test, really.\n\n I met Harry yesterday" + expected = ["this is test", "is test really", "test really met"] + assert cnga(text)[:3] == expected + + expected = [ + "test really met harry yesterday", + "this is test really met harry", + "is test really met harry yesterday", + ] + assert cnga(text)[-3:] == expected + + cnga_file = CountVectorizer( + input="file", analyzer="word", ngram_range=(3, 6) + ).build_analyzer() + file = StringIO(text) + assert cnga_file(file) == cnga(text) + + +def test_countvectorizer_custom_vocabulary(): + vocab = {"pizza": 0, "beer": 1} + terms = set(vocab.keys()) + + # Try a few of the supported types. + for typ in [dict, list, iter, partial(defaultdict, int)]: + v = typ(vocab) + vect = CountVectorizer(vocabulary=v) + vect.fit(JUNK_FOOD_DOCS) + if isinstance(v, Mapping): + assert vect.vocabulary_ == vocab + else: + assert set(vect.vocabulary_) == terms + X = vect.transform(JUNK_FOOD_DOCS) + assert X.shape[1] == len(terms) + v = typ(vocab) + vect = CountVectorizer(vocabulary=v) + inv = vect.inverse_transform(X) + assert len(inv) == X.shape[0] + + +def test_countvectorizer_custom_vocabulary_pipeline(): + what_we_like = ["pizza", "beer"] + pipe = Pipeline( + [ + ("count", CountVectorizer(vocabulary=what_we_like)), + ("tfidf", TfidfTransformer()), + ] + ) + X = pipe.fit_transform(ALL_FOOD_DOCS) + assert set(pipe.named_steps["count"].vocabulary_) == set(what_we_like) + assert X.shape[1] == len(what_we_like) + + +def test_countvectorizer_custom_vocabulary_repeated_indices(): + vocab = {"pizza": 0, "beer": 0} + msg = "Vocabulary contains repeated indices" + with pytest.raises(ValueError, match=msg): + vect = CountVectorizer(vocabulary=vocab) + vect.fit(["pasta_siziliana"]) + + +def test_countvectorizer_custom_vocabulary_gap_index(): + vocab = {"pizza": 1, "beer": 2} + with pytest.raises(ValueError, match="doesn't contain index"): + vect = CountVectorizer(vocabulary=vocab) + vect.fit(["pasta_verdura"]) + + +def test_countvectorizer_stop_words(): + cv = CountVectorizer() + cv.set_params(stop_words="english") + assert cv.get_stop_words() == ENGLISH_STOP_WORDS + cv.set_params(stop_words="_bad_str_stop_") + with pytest.raises(ValueError): + cv.get_stop_words() + cv.set_params(stop_words="_bad_unicode_stop_") + with pytest.raises(ValueError): + cv.get_stop_words() + stoplist = ["some", "other", "words"] + cv.set_params(stop_words=stoplist) + assert cv.get_stop_words() == set(stoplist) + + +def test_countvectorizer_empty_vocabulary(): + with pytest.raises(ValueError, match="empty vocabulary"): + vect = CountVectorizer(vocabulary=[]) + vect.fit(["foo"]) + + with pytest.raises(ValueError, match="empty vocabulary"): + v = CountVectorizer(max_df=1.0, stop_words="english") + # fit on stopwords only + v.fit(["to be or not to be", "and me too", "and so do you"]) + + +def test_fit_countvectorizer_twice(): + cv = CountVectorizer() + X1 = cv.fit_transform(ALL_FOOD_DOCS[:5]) + X2 = cv.fit_transform(ALL_FOOD_DOCS[5:]) + assert X1.shape[1] != X2.shape[1] + + +def test_countvectorizer_custom_token_pattern(): + """Check `get_feature_names_out()` when a custom token pattern is passed. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12971 + """ + corpus = [ + "This is the 1st document in my corpus.", + "This document is the 2nd sample.", + "And this is the 3rd one.", + "Is this the 4th document?", + ] + token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b" + vectorizer = CountVectorizer(token_pattern=token_pattern) + vectorizer.fit_transform(corpus) + expected = ["document", "one", "sample"] + feature_names_out = vectorizer.get_feature_names_out() + assert_array_equal(feature_names_out, expected) + + +def test_countvectorizer_custom_token_pattern_with_several_group(): + """Check that we raise an error if token pattern capture several groups. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12971 + """ + corpus = [ + "This is the 1st document in my corpus.", + "This document is the 2nd sample.", + "And this is the 3rd one.", + "Is this the 4th document?", + ] + + token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b" + err_msg = "More than 1 capturing group in token pattern" + vectorizer = CountVectorizer(token_pattern=token_pattern) + with pytest.raises(ValueError, match=err_msg): + vectorizer.fit(corpus) + + +def test_countvectorizer_uppercase_in_vocab(): + # Check that the check for uppercase in the provided vocabulary is only done at fit + # time and not at transform time (#21251) + vocabulary = ["Sample", "Upper", "Case", "Vocabulary"] + message = ( + "Upper case characters found in" + " vocabulary while 'lowercase'" + " is True. These entries will not" + " be matched with any documents" + ) + + vectorizer = CountVectorizer(lowercase=True, vocabulary=vocabulary) + + with pytest.warns(UserWarning, match=message): + vectorizer.fit(vocabulary) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + vectorizer.transform(vocabulary) + + +def test_tf_transformer_feature_names_out(): + """Check get_feature_names_out for TfidfTransformer""" + X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X) + + feature_names_in = ["a", "c", "b"] + feature_names_out = tr.get_feature_names_out(feature_names_in) + assert_array_equal(feature_names_in, feature_names_out) + + +def test_tf_idf_smoothing(): + X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=True, norm="l2") + tfidf = tr.fit_transform(X).toarray() + assert (tfidf >= 0).all() + + # check normalization + assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0]) + + # this is robust to features with only zeros + X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=True, norm="l2") + tfidf = tr.fit_transform(X).toarray() + assert (tfidf >= 0).all() + + +@pytest.mark.xfail( + _IS_WASM, + reason=( + "no floating point exceptions, see" + " https://github.com/numpy/numpy/pull/21895#issuecomment-1311525881" + ), +) +def test_tfidf_no_smoothing(): + X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=False, norm="l2") + tfidf = tr.fit_transform(X).toarray() + assert (tfidf >= 0).all() + + # check normalization + assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0]) + + # the lack of smoothing make IDF fragile in the presence of feature with + # only zeros + X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] + tr = TfidfTransformer(smooth_idf=False, norm="l2") + + in_warning_message = "divide by zero" + with pytest.warns(RuntimeWarning, match=in_warning_message): + tr.fit_transform(X).toarray() + + +def test_sublinear_tf(): + X = [[1], [2], [3]] + tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None) + tfidf = tr.fit_transform(X).toarray() + assert tfidf[0] == 1 + assert tfidf[1] > tfidf[0] + assert tfidf[2] > tfidf[1] + assert tfidf[1] < 2 + assert tfidf[2] < 3 + + +def test_vectorizer(): + # raw documents as an iterator + train_data = iter(ALL_FOOD_DOCS[:-1]) + test_data = [ALL_FOOD_DOCS[-1]] + n_train = len(ALL_FOOD_DOCS) - 1 + + # test without vocabulary + v1 = CountVectorizer(max_df=0.5) + counts_train = v1.fit_transform(train_data) + if hasattr(counts_train, "tocsr"): + counts_train = counts_train.tocsr() + assert counts_train[0, v1.vocabulary_["pizza"]] == 2 + + # build a vectorizer v1 with the same vocabulary as the one fitted by v1 + v2 = CountVectorizer(vocabulary=v1.vocabulary_) + + # compare that the two vectorizer give the same output on the test sample + for v in (v1, v2): + counts_test = v.transform(test_data) + if hasattr(counts_test, "tocsr"): + counts_test = counts_test.tocsr() + + vocabulary = v.vocabulary_ + assert counts_test[0, vocabulary["salad"]] == 1 + assert counts_test[0, vocabulary["tomato"]] == 1 + assert counts_test[0, vocabulary["water"]] == 1 + + # stop word from the fixed list + assert "the" not in vocabulary + + # stop word found automatically by the vectorizer DF thresholding + # words that are high frequent across the complete corpus are likely + # to be not informative (either real stop words of extraction + # artifacts) + assert "copyright" not in vocabulary + + # not present in the sample + assert counts_test[0, vocabulary["coke"]] == 0 + assert counts_test[0, vocabulary["burger"]] == 0 + assert counts_test[0, vocabulary["beer"]] == 0 + assert counts_test[0, vocabulary["pizza"]] == 0 + + # test tf-idf + t1 = TfidfTransformer(norm="l1") + tfidf = t1.fit(counts_train).transform(counts_train).toarray() + assert len(t1.idf_) == len(v1.vocabulary_) + assert tfidf.shape == (n_train, len(v1.vocabulary_)) + + # test tf-idf with new data + tfidf_test = t1.transform(counts_test).toarray() + assert tfidf_test.shape == (len(test_data), len(v1.vocabulary_)) + + # test tf alone + t2 = TfidfTransformer(norm="l1", use_idf=False) + tf = t2.fit(counts_train).transform(counts_train).toarray() + assert not hasattr(t2, "idf_") + + # test idf transform with unlearned idf vector + t3 = TfidfTransformer(use_idf=True) + with pytest.raises(ValueError): + t3.transform(counts_train) + + # L1-normalized term frequencies sum to one + assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train) + + # test the direct tfidf vectorizer + # (equivalent to term count vectorizer + tfidf transformer) + train_data = iter(ALL_FOOD_DOCS[:-1]) + tv = TfidfVectorizer(norm="l1") + + tv.max_df = v1.max_df + tfidf2 = tv.fit_transform(train_data).toarray() + assert not tv.fixed_vocabulary_ + assert_array_almost_equal(tfidf, tfidf2) + + # test the direct tfidf vectorizer with new data + tfidf_test2 = tv.transform(test_data).toarray() + assert_array_almost_equal(tfidf_test, tfidf_test2) + + # test transform on unfitted vectorizer with empty vocabulary + v3 = CountVectorizer(vocabulary=None) + with pytest.raises(ValueError): + v3.transform(train_data) + + # ascii preprocessor? + v3.set_params(strip_accents="ascii", lowercase=False) + processor = v3.build_preprocessor() + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + expected = strip_accents_ascii(text) + result = processor(text) + assert expected == result + + # error on bad strip_accents param + v3.set_params(strip_accents="_gabbledegook_", preprocessor=None) + with pytest.raises(ValueError): + v3.build_preprocessor() + + # error with bad analyzer type + v3.set_params = "_invalid_analyzer_type_" + with pytest.raises(ValueError): + v3.build_analyzer() + + +def test_tfidf_vectorizer_setters(): + norm, use_idf, smooth_idf, sublinear_tf = "l2", False, False, False + tv = TfidfVectorizer( + norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf + ) + tv.fit(JUNK_FOOD_DOCS) + assert tv._tfidf.norm == norm + assert tv._tfidf.use_idf == use_idf + assert tv._tfidf.smooth_idf == smooth_idf + assert tv._tfidf.sublinear_tf == sublinear_tf + + # assigning value to `TfidfTransformer` should not have any effect until + # fitting + tv.norm = "l1" + tv.use_idf = True + tv.smooth_idf = True + tv.sublinear_tf = True + assert tv._tfidf.norm == norm + assert tv._tfidf.use_idf == use_idf + assert tv._tfidf.smooth_idf == smooth_idf + assert tv._tfidf.sublinear_tf == sublinear_tf + + tv.fit(JUNK_FOOD_DOCS) + assert tv._tfidf.norm == tv.norm + assert tv._tfidf.use_idf == tv.use_idf + assert tv._tfidf.smooth_idf == tv.smooth_idf + assert tv._tfidf.sublinear_tf == tv.sublinear_tf + + +@fails_if_pypy +def test_hashing_vectorizer(): + v = HashingVectorizer() + X = v.transform(ALL_FOOD_DOCS) + token_nnz = X.nnz + assert X.shape == (len(ALL_FOOD_DOCS), v.n_features) + assert X.dtype == v.dtype + + # By default the hashed values receive a random sign and l2 normalization + # makes the feature values bounded + assert np.min(X.data) > -1 + assert np.min(X.data) < 0 + assert np.max(X.data) > 0 + assert np.max(X.data) < 1 + + # Check that the rows are normalized + for i in range(X.shape[0]): + assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0) + + # Check vectorization with some non-default parameters + v = HashingVectorizer(ngram_range=(1, 2), norm="l1") + X = v.transform(ALL_FOOD_DOCS) + assert X.shape == (len(ALL_FOOD_DOCS), v.n_features) + assert X.dtype == v.dtype + + # ngrams generate more non zeros + ngrams_nnz = X.nnz + assert ngrams_nnz > token_nnz + assert ngrams_nnz < 2 * token_nnz + + # makes the feature values bounded + assert np.min(X.data) > -1 + assert np.max(X.data) < 1 + + # Check that the rows are normalized + for i in range(X.shape[0]): + assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0) + + +def test_feature_names(): + cv = CountVectorizer(max_df=0.5) + + # test for Value error on unfitted/empty vocabulary + with pytest.raises(ValueError): + cv.get_feature_names_out() + assert not cv.fixed_vocabulary_ + + # test for vocabulary learned from data + X = cv.fit_transform(ALL_FOOD_DOCS) + n_samples, n_features = X.shape + assert len(cv.vocabulary_) == n_features + + feature_names = cv.get_feature_names_out() + assert isinstance(feature_names, np.ndarray) + assert feature_names.dtype == object + + assert len(feature_names) == n_features + assert_array_equal( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ], + feature_names, + ) + + for idx, name in enumerate(feature_names): + assert idx == cv.vocabulary_.get(name) + + # test for custom vocabulary + vocab = [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ] + + cv = CountVectorizer(vocabulary=vocab) + feature_names = cv.get_feature_names_out() + assert_array_equal( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ], + feature_names, + ) + assert cv.fixed_vocabulary_ + + for idx, name in enumerate(feature_names): + assert idx == cv.vocabulary_.get(name) + + +@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer)) +def test_vectorizer_max_features(Vectorizer): + expected_vocabulary = {"burger", "beer", "salad", "pizza"} + expected_stop_words = { + "celeri", + "tomato", + "copyright", + "coke", + "sparkling", + "water", + "the", + } + + # test bounded number of extracted features + vectorizer = Vectorizer(max_df=0.6, max_features=4) + vectorizer.fit(ALL_FOOD_DOCS) + assert set(vectorizer.vocabulary_) == expected_vocabulary + assert vectorizer.stop_words_ == expected_stop_words + + +def test_count_vectorizer_max_features(): + # Regression test: max_features didn't work correctly in 0.14. + + cv_1 = CountVectorizer(max_features=1) + cv_3 = CountVectorizer(max_features=3) + cv_None = CountVectorizer(max_features=None) + + counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) + counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) + counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) + + features_1 = cv_1.get_feature_names_out() + features_3 = cv_3.get_feature_names_out() + features_None = cv_None.get_feature_names_out() + + # The most common feature is "the", with frequency 7. + assert 7 == counts_1.max() + assert 7 == counts_3.max() + assert 7 == counts_None.max() + + # The most common feature should be the same + assert "the" == features_1[np.argmax(counts_1)] + assert "the" == features_3[np.argmax(counts_3)] + assert "the" == features_None[np.argmax(counts_None)] + + +def test_vectorizer_max_df(): + test_data = ["abc", "dea", "eat"] + vect = CountVectorizer(analyzer="char", max_df=1.0) + vect.fit(test_data) + assert "a" in vect.vocabulary_.keys() + assert len(vect.vocabulary_.keys()) == 6 + assert len(vect.stop_words_) == 0 + + vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5 + vect.fit(test_data) + assert "a" not in vect.vocabulary_.keys() # {ae} ignored + assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain + assert "a" in vect.stop_words_ + assert len(vect.stop_words_) == 2 + + vect.max_df = 1 + vect.fit(test_data) + assert "a" not in vect.vocabulary_.keys() # {ae} ignored + assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain + assert "a" in vect.stop_words_ + assert len(vect.stop_words_) == 2 + + +def test_vectorizer_min_df(): + test_data = ["abc", "dea", "eat"] + vect = CountVectorizer(analyzer="char", min_df=1) + vect.fit(test_data) + assert "a" in vect.vocabulary_.keys() + assert len(vect.vocabulary_.keys()) == 6 + assert len(vect.stop_words_) == 0 + + vect.min_df = 2 + vect.fit(test_data) + assert "c" not in vect.vocabulary_.keys() # {bcdt} ignored + assert len(vect.vocabulary_.keys()) == 2 # {ae} remain + assert "c" in vect.stop_words_ + assert len(vect.stop_words_) == 4 + + vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4 + vect.fit(test_data) + assert "c" not in vect.vocabulary_.keys() # {bcdet} ignored + assert len(vect.vocabulary_.keys()) == 1 # {a} remains + assert "c" in vect.stop_words_ + assert len(vect.stop_words_) == 5 + + +def test_count_binary_occurrences(): + # by default multiple occurrences are counted as longs + test_data = ["aaabc", "abbde"] + vect = CountVectorizer(analyzer="char", max_df=1.0) + X = vect.fit_transform(test_data).toarray() + assert_array_equal(["a", "b", "c", "d", "e"], vect.get_feature_names_out()) + assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X) + + # using boolean features, we can fetch the binary occurrence info + # instead. + vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True) + X = vect.fit_transform(test_data).toarray() + assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X) + + # check the ability to change the dtype + vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True, dtype=np.float32) + X_sparse = vect.fit_transform(test_data) + assert X_sparse.dtype == np.float32 + + +@fails_if_pypy +def test_hashed_binary_occurrences(): + # by default multiple occurrences are counted as longs + test_data = ["aaabc", "abbde"] + vect = HashingVectorizer(alternate_sign=False, analyzer="char", norm=None) + X = vect.transform(test_data) + assert np.max(X[0:1].data) == 3 + assert np.max(X[1:2].data) == 2 + assert X.dtype == np.float64 + + # using boolean features, we can fetch the binary occurrence info + # instead. + vect = HashingVectorizer( + analyzer="char", alternate_sign=False, binary=True, norm=None + ) + X = vect.transform(test_data) + assert np.max(X.data) == 1 + assert X.dtype == np.float64 + + # check the ability to change the dtype + vect = HashingVectorizer( + analyzer="char", alternate_sign=False, binary=True, norm=None, dtype=np.float64 + ) + X = vect.transform(test_data) + assert X.dtype == np.float64 + + +@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer)) +def test_vectorizer_inverse_transform(Vectorizer): + # raw documents + data = ALL_FOOD_DOCS + vectorizer = Vectorizer() + transformed_data = vectorizer.fit_transform(data) + inversed_data = vectorizer.inverse_transform(transformed_data) + assert isinstance(inversed_data, list) + + analyze = vectorizer.build_analyzer() + for doc, inversed_terms in zip(data, inversed_data): + terms = np.sort(np.unique(analyze(doc))) + inversed_terms = np.sort(np.unique(inversed_terms)) + assert_array_equal(terms, inversed_terms) + + assert sparse.issparse(transformed_data) + assert transformed_data.format == "csr" + + # Test that inverse_transform also works with numpy arrays and + # scipy + transformed_data2 = transformed_data.toarray() + inversed_data2 = vectorizer.inverse_transform(transformed_data2) + for terms, terms2 in zip(inversed_data, inversed_data2): + assert_array_equal(np.sort(terms), np.sort(terms2)) + + # Check that inverse_transform also works on non CSR sparse data: + transformed_data3 = transformed_data.tocsc() + inversed_data3 = vectorizer.inverse_transform(transformed_data3) + for terms, terms3 in zip(inversed_data, inversed_data3): + assert_array_equal(np.sort(terms), np.sort(terms3)) + + +def test_count_vectorizer_pipeline_grid_selection(): + # raw documents + data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + # label junk food as -1, the others as +1 + target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) + + # split the dataset for model development and final evaluation + train_data, test_data, target_train, target_test = train_test_split( + data, target, test_size=0.2, random_state=0 + ) + + pipeline = Pipeline([("vect", CountVectorizer()), ("svc", LinearSVC(dual="auto"))]) + + parameters = { + "vect__ngram_range": [(1, 1), (1, 2)], + "svc__loss": ("hinge", "squared_hinge"), + } + + # find the best parameters for both the feature extraction and the + # classifier + grid_search = GridSearchCV(pipeline, parameters, n_jobs=1, cv=3) + + # Check that the best model found by grid search is 100% correct on the + # held out evaluation set. + pred = grid_search.fit(train_data, target_train).predict(test_data) + assert_array_equal(pred, target_test) + + # on this toy dataset bigram representation which is used in the last of + # the grid_search is considered the best estimator since they all converge + # to 100% accuracy models + assert grid_search.best_score_ == 1.0 + best_vectorizer = grid_search.best_estimator_.named_steps["vect"] + assert best_vectorizer.ngram_range == (1, 1) + + +def test_vectorizer_pipeline_grid_selection(): + # raw documents + data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + # label junk food as -1, the others as +1 + target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) + + # split the dataset for model development and final evaluation + train_data, test_data, target_train, target_test = train_test_split( + data, target, test_size=0.1, random_state=0 + ) + + pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))]) + + parameters = { + "vect__ngram_range": [(1, 1), (1, 2)], + "vect__norm": ("l1", "l2"), + "svc__loss": ("hinge", "squared_hinge"), + } + + # find the best parameters for both the feature extraction and the + # classifier + grid_search = GridSearchCV(pipeline, parameters, n_jobs=1) + + # Check that the best model found by grid search is 100% correct on the + # held out evaluation set. + pred = grid_search.fit(train_data, target_train).predict(test_data) + assert_array_equal(pred, target_test) + + # on this toy dataset bigram representation which is used in the last of + # the grid_search is considered the best estimator since they all converge + # to 100% accuracy models + assert grid_search.best_score_ == 1.0 + best_vectorizer = grid_search.best_estimator_.named_steps["vect"] + assert best_vectorizer.ngram_range == (1, 1) + assert best_vectorizer.norm == "l2" + assert not best_vectorizer.fixed_vocabulary_ + + +def test_vectorizer_pipeline_cross_validation(): + # raw documents + data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS + + # label junk food as -1, the others as +1 + target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) + + pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))]) + + cv_scores = cross_val_score(pipeline, data, target, cv=3) + assert_array_equal(cv_scores, [1.0, 1.0, 1.0]) + + +@fails_if_pypy +def test_vectorizer_unicode(): + # tests that the count vectorizer works with cyrillic. + document = ( + "Машинное обучение — обширный подраздел искусственного " + "интеллекта, изучающий методы построения алгоритмов, " + "способных обучаться." + ) + + vect = CountVectorizer() + X_counted = vect.fit_transform([document]) + assert X_counted.shape == (1, 12) + + vect = HashingVectorizer(norm=None, alternate_sign=False) + X_hashed = vect.transform([document]) + assert X_hashed.shape == (1, 2**20) + + # No collisions on such a small dataset + assert X_counted.nnz == X_hashed.nnz + + # When norm is None and not alternate_sign, the tokens are counted up to + # collisions + assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data)) + + +def test_tfidf_vectorizer_with_fixed_vocabulary(): + # non regression smoke test for inheritance issues + vocabulary = ["pizza", "celeri"] + vect = TfidfVectorizer(vocabulary=vocabulary) + X_1 = vect.fit_transform(ALL_FOOD_DOCS) + X_2 = vect.transform(ALL_FOOD_DOCS) + assert_array_almost_equal(X_1.toarray(), X_2.toarray()) + assert vect.fixed_vocabulary_ + + +def test_pickling_vectorizer(): + instances = [ + HashingVectorizer(), + HashingVectorizer(norm="l1"), + HashingVectorizer(binary=True), + HashingVectorizer(ngram_range=(1, 2)), + CountVectorizer(), + CountVectorizer(preprocessor=strip_tags), + CountVectorizer(analyzer=lazy_analyze), + CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), + CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS), + TfidfVectorizer(), + TfidfVectorizer(analyzer=lazy_analyze), + TfidfVectorizer().fit(JUNK_FOOD_DOCS), + ] + + for orig in instances: + s = pickle.dumps(orig) + copy = pickle.loads(s) + assert type(copy) == orig.__class__ + assert copy.get_params() == orig.get_params() + if IS_PYPY and isinstance(orig, HashingVectorizer): + continue + else: + assert_allclose_dense_sparse( + copy.fit_transform(JUNK_FOOD_DOCS), + orig.fit_transform(JUNK_FOOD_DOCS), + ) + + +@pytest.mark.parametrize( + "factory", + [ + CountVectorizer.build_analyzer, + CountVectorizer.build_preprocessor, + CountVectorizer.build_tokenizer, + ], +) +def test_pickling_built_processors(factory): + """Tokenizers cannot be pickled + https://github.com/scikit-learn/scikit-learn/issues/12833 + """ + vec = CountVectorizer() + function = factory(vec) + text = "J'ai mangé du kangourou ce midi, c'était pas très bon." + roundtripped_function = pickle.loads(pickle.dumps(function)) + expected = function(text) + result = roundtripped_function(text) + assert result == expected + + +def test_countvectorizer_vocab_sets_when_pickling(): + # ensure that vocabulary of type set is coerced to a list to + # preserve iteration ordering after deserialization + rng = np.random.RandomState(0) + vocab_words = np.array( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ] + ) + for x in range(0, 100): + vocab_set = set(rng.choice(vocab_words, size=5, replace=False)) + cv = CountVectorizer(vocabulary=vocab_set) + unpickled_cv = pickle.loads(pickle.dumps(cv)) + cv.fit(ALL_FOOD_DOCS) + unpickled_cv.fit(ALL_FOOD_DOCS) + assert_array_equal( + cv.get_feature_names_out(), unpickled_cv.get_feature_names_out() + ) + + +def test_countvectorizer_vocab_dicts_when_pickling(): + rng = np.random.RandomState(0) + vocab_words = np.array( + [ + "beer", + "burger", + "celeri", + "coke", + "pizza", + "salad", + "sparkling", + "tomato", + "water", + ] + ) + for x in range(0, 100): + vocab_dict = dict() + words = rng.choice(vocab_words, size=5, replace=False) + for y in range(0, 5): + vocab_dict[words[y]] = y + cv = CountVectorizer(vocabulary=vocab_dict) + unpickled_cv = pickle.loads(pickle.dumps(cv)) + cv.fit(ALL_FOOD_DOCS) + unpickled_cv.fit(ALL_FOOD_DOCS) + assert_array_equal( + cv.get_feature_names_out(), unpickled_cv.get_feature_names_out() + ) + + +def test_stop_words_removal(): + # Ensure that deleting the stop_words_ attribute doesn't affect transform + + fitted_vectorizers = ( + TfidfVectorizer().fit(JUNK_FOOD_DOCS), + CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), + CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS), + ) + + for vect in fitted_vectorizers: + vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray() + + vect.stop_words_ = None + stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray() + + delattr(vect, "stop_words_") + stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray() + + assert_array_equal(stop_None_transform, vect_transform) + assert_array_equal(stop_del_transform, vect_transform) + + +def test_pickling_transformer(): + X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS) + orig = TfidfTransformer().fit(X) + s = pickle.dumps(orig) + copy = pickle.loads(s) + assert type(copy) == orig.__class__ + assert_array_equal(copy.fit_transform(X).toarray(), orig.fit_transform(X).toarray()) + + +def test_transformer_idf_setter(): + X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS) + orig = TfidfTransformer().fit(X) + copy = TfidfTransformer() + copy.idf_ = orig.idf_ + assert_array_equal(copy.transform(X).toarray(), orig.transform(X).toarray()) + + +def test_tfidf_vectorizer_setter(): + orig = TfidfVectorizer(use_idf=True) + orig.fit(JUNK_FOOD_DOCS) + copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True) + copy.idf_ = orig.idf_ + assert_array_equal( + copy.transform(JUNK_FOOD_DOCS).toarray(), + orig.transform(JUNK_FOOD_DOCS).toarray(), + ) + # `idf_` cannot be set with `use_idf=False` + copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=False) + err_msg = "`idf_` cannot be set when `user_idf=False`." + with pytest.raises(ValueError, match=err_msg): + copy.idf_ = orig.idf_ + + +def test_tfidfvectorizer_invalid_idf_attr(): + vect = TfidfVectorizer(use_idf=True) + vect.fit(JUNK_FOOD_DOCS) + copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True) + expected_idf_len = len(vect.idf_) + invalid_idf = [1.0] * (expected_idf_len + 1) + with pytest.raises(ValueError): + setattr(copy, "idf_", invalid_idf) + + +def test_non_unique_vocab(): + vocab = ["a", "b", "c", "a", "a"] + vect = CountVectorizer(vocabulary=vocab) + with pytest.raises(ValueError): + vect.fit([]) + + +@fails_if_pypy +def test_hashingvectorizer_nan_in_docs(): + # np.nan can appear when using pandas to load text fields from a csv file + # with missing values. + message = "np.nan is an invalid document, expected byte or unicode string." + exception = ValueError + + def func(): + hv = HashingVectorizer() + hv.fit_transform(["hello world", np.nan, "hello hello"]) + + with pytest.raises(exception, match=message): + func() + + +def test_tfidfvectorizer_binary(): + # Non-regression test: TfidfVectorizer used to ignore its "binary" param. + v = TfidfVectorizer(binary=True, use_idf=False, norm=None) + assert v.binary + + X = v.fit_transform(["hello world", "hello hello"]).toarray() + assert_array_equal(X.ravel(), [1, 1, 1, 0]) + X2 = v.transform(["hello world", "hello hello"]).toarray() + assert_array_equal(X2.ravel(), [1, 1, 1, 0]) + + +def test_tfidfvectorizer_export_idf(): + vect = TfidfVectorizer(use_idf=True) + vect.fit(JUNK_FOOD_DOCS) + assert_array_almost_equal(vect.idf_, vect._tfidf.idf_) + + +def test_vectorizer_vocab_clone(): + vect_vocab = TfidfVectorizer(vocabulary=["the"]) + vect_vocab_clone = clone(vect_vocab) + vect_vocab.fit(ALL_FOOD_DOCS) + vect_vocab_clone.fit(ALL_FOOD_DOCS) + assert vect_vocab_clone.vocabulary_ == vect_vocab.vocabulary_ + + +@pytest.mark.parametrize( + "Vectorizer", (CountVectorizer, TfidfVectorizer, HashingVectorizer) +) +def test_vectorizer_string_object_as_input(Vectorizer): + message = "Iterable over raw text documents expected, string object received." + vec = Vectorizer() + + with pytest.raises(ValueError, match=message): + vec.fit_transform("hello world!") + + with pytest.raises(ValueError, match=message): + vec.fit("hello world!") + vec.fit(["some text", "some other text"]) + + with pytest.raises(ValueError, match=message): + vec.transform("hello world!") + + +@pytest.mark.parametrize("X_dtype", [np.float32, np.float64]) +def test_tfidf_transformer_type(X_dtype): + X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42) + X_trans = TfidfTransformer().fit_transform(X) + assert X_trans.dtype == X.dtype + + +@pytest.mark.parametrize( + "csc_container, csr_container", product(CSC_CONTAINERS, CSR_CONTAINERS) +) +def test_tfidf_transformer_sparse(csc_container, csr_container): + X = sparse.rand(10, 20000, dtype=np.float64, random_state=42) + X_csc = csc_container(X) + X_csr = csr_container(X) + + X_trans_csc = TfidfTransformer().fit_transform(X_csc) + X_trans_csr = TfidfTransformer().fit_transform(X_csr) + assert_allclose_dense_sparse(X_trans_csc, X_trans_csr) + assert X_trans_csc.format == X_trans_csr.format + + +@pytest.mark.parametrize( + "vectorizer_dtype, output_dtype, warning_expected", + [ + (np.int32, np.float64, True), + (np.int64, np.float64, True), + (np.float32, np.float32, False), + (np.float64, np.float64, False), + ], +) +def test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype, warning_expected): + X = np.array(["numpy", "scipy", "sklearn"]) + vectorizer = TfidfVectorizer(dtype=vectorizer_dtype) + + warning_msg_match = "'dtype' should be used." + if warning_expected: + with pytest.warns(UserWarning, match=warning_msg_match): + X_idf = vectorizer.fit_transform(X) + else: + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + X_idf = vectorizer.fit_transform(X) + assert X_idf.dtype == output_dtype + + +@pytest.mark.parametrize( + "vec", + [ + HashingVectorizer(ngram_range=(2, 1)), + CountVectorizer(ngram_range=(2, 1)), + TfidfVectorizer(ngram_range=(2, 1)), + ], +) +def test_vectorizers_invalid_ngram_range(vec): + # vectorizers could be initialized with invalid ngram range + # test for raising error message + invalid_range = vec.ngram_range + message = re.escape( + f"Invalid value for ngram_range={invalid_range} " + "lower boundary larger than the upper boundary." + ) + if isinstance(vec, HashingVectorizer) and IS_PYPY: + pytest.xfail(reason="HashingVectorizer is not supported on PyPy") + + with pytest.raises(ValueError, match=message): + vec.fit(["good news everyone"]) + + with pytest.raises(ValueError, match=message): + vec.fit_transform(["good news everyone"]) + + if isinstance(vec, HashingVectorizer): + with pytest.raises(ValueError, match=message): + vec.transform(["good news everyone"]) + + +def _check_stop_words_consistency(estimator): + stop_words = estimator.get_stop_words() + tokenize = estimator.build_tokenizer() + preprocess = estimator.build_preprocessor() + return estimator._check_stop_words_consistency(stop_words, preprocess, tokenize) + + +@fails_if_pypy +def test_vectorizer_stop_words_inconsistent(): + lstr = r"\['and', 'll', 've'\]" + message = ( + "Your stop_words may be inconsistent with your " + "preprocessing. Tokenizing the stop words generated " + "tokens %s not in stop_words." % lstr + ) + for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]: + vec.set_params(stop_words=["you've", "you", "you'll", "AND"]) + with pytest.warns(UserWarning, match=message): + vec.fit_transform(["hello world"]) + # reset stop word validation + del vec._stop_words_id + assert _check_stop_words_consistency(vec) is False + + # Only one warning per stop list + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + vec.fit_transform(["hello world"]) + assert _check_stop_words_consistency(vec) is None + + # Test caching of inconsistency assessment + vec.set_params(stop_words=["you've", "you", "you'll", "blah", "AND"]) + with pytest.warns(UserWarning, match=message): + vec.fit_transform(["hello world"]) + + +@skip_if_32bit +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_countvectorizer_sort_features_64bit_sparse_indices(csr_container): + """ + Check that CountVectorizer._sort_features preserves the dtype of its sparse + feature matrix. + + This test is skipped on 32bit platforms, see: + https://github.com/scikit-learn/scikit-learn/pull/11295 + for more details. + """ + + X = csr_container((5, 5), dtype=np.int64) + + # force indices and indptr to int64. + INDICES_DTYPE = np.int64 + X.indices = X.indices.astype(INDICES_DTYPE) + X.indptr = X.indptr.astype(INDICES_DTYPE) + + vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2} + + Xs = CountVectorizer()._sort_features(X, vocabulary) + + assert INDICES_DTYPE == Xs.indices.dtype + + +@fails_if_pypy +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer] +) +def test_stop_word_validation_custom_preprocessor(Estimator): + data = [{"text": "some text"}] + + vec = Estimator() + assert _check_stop_words_consistency(vec) is True + + vec = Estimator(preprocessor=lambda x: x["text"], stop_words=["and"]) + assert _check_stop_words_consistency(vec) == "error" + # checks are cached + assert _check_stop_words_consistency(vec) is None + vec.fit_transform(data) + + class CustomEstimator(Estimator): + def build_preprocessor(self): + return lambda x: x["text"] + + vec = CustomEstimator(stop_words=["and"]) + assert _check_stop_words_consistency(vec) == "error" + + vec = Estimator( + tokenizer=lambda doc: re.compile(r"\w{1,}").findall(doc), stop_words=["and"] + ) + assert _check_stop_words_consistency(vec) is True + + +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer] +) +@pytest.mark.parametrize( + "input_type, err_type, err_msg", + [ + ("filename", FileNotFoundError, ""), + ("file", AttributeError, "'str' object has no attribute 'read'"), + ], +) +def test_callable_analyzer_error(Estimator, input_type, err_type, err_msg): + if issubclass(Estimator, HashingVectorizer) and IS_PYPY: + pytest.xfail("HashingVectorizer is not supported on PyPy") + data = ["this is text, not file or filename"] + with pytest.raises(err_type, match=err_msg): + Estimator(analyzer=lambda x: x.split(), input=input_type).fit_transform(data) + + +@pytest.mark.parametrize( + "Estimator", + [ + CountVectorizer, + TfidfVectorizer, + pytest.param(HashingVectorizer, marks=fails_if_pypy), + ], +) +@pytest.mark.parametrize( + "analyzer", [lambda doc: open(doc, "r"), lambda doc: doc.read()] +) +@pytest.mark.parametrize("input_type", ["file", "filename"]) +def test_callable_analyzer_change_behavior(Estimator, analyzer, input_type): + data = ["this is text, not file or filename"] + with pytest.raises((FileNotFoundError, AttributeError)): + Estimator(analyzer=analyzer, input=input_type).fit_transform(data) + + +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer] +) +def test_callable_analyzer_reraise_error(tmpdir, Estimator): + # check if a custom exception from the analyzer is shown to the user + def analyzer(doc): + raise Exception("testing") + + if issubclass(Estimator, HashingVectorizer) and IS_PYPY: + pytest.xfail("HashingVectorizer is not supported on PyPy") + + f = tmpdir.join("file.txt") + f.write("sample content\n") + + with pytest.raises(Exception, match="testing"): + Estimator(analyzer=analyzer, input="file").fit_transform([f]) + + +@pytest.mark.parametrize( + "Vectorizer", [CountVectorizer, HashingVectorizer, TfidfVectorizer] +) +@pytest.mark.parametrize( + ( + "stop_words, tokenizer, preprocessor, ngram_range, token_pattern," + "analyzer, unused_name, ovrd_name, ovrd_msg" + ), + [ + ( + ["you've", "you'll"], + None, + None, + (1, 1), + None, + "char", + "'stop_words'", + "'analyzer'", + "!= 'word'", + ), + ( + None, + lambda s: s.split(), + None, + (1, 1), + None, + "char", + "'tokenizer'", + "'analyzer'", + "!= 'word'", + ), + ( + None, + lambda s: s.split(), + None, + (1, 1), + r"\w+", + "word", + "'token_pattern'", + "'tokenizer'", + "is not None", + ), + ( + None, + None, + lambda s: s.upper(), + (1, 1), + r"\w+", + lambda s: s.upper(), + "'preprocessor'", + "'analyzer'", + "is callable", + ), + ( + None, + None, + None, + (1, 2), + None, + lambda s: s.upper(), + "'ngram_range'", + "'analyzer'", + "is callable", + ), + ( + None, + None, + None, + (1, 1), + r"\w+", + "char", + "'token_pattern'", + "'analyzer'", + "!= 'word'", + ), + ], +) +def test_unused_parameters_warn( + Vectorizer, + stop_words, + tokenizer, + preprocessor, + ngram_range, + token_pattern, + analyzer, + unused_name, + ovrd_name, + ovrd_msg, +): + train_data = JUNK_FOOD_DOCS + # setting parameter and checking for corresponding warning messages + vect = Vectorizer() + vect.set_params( + stop_words=stop_words, + tokenizer=tokenizer, + preprocessor=preprocessor, + ngram_range=ngram_range, + token_pattern=token_pattern, + analyzer=analyzer, + ) + msg = "The parameter %s will not be used since %s %s" % ( + unused_name, + ovrd_name, + ovrd_msg, + ) + with pytest.warns(UserWarning, match=msg): + vect.fit(train_data) + + +@pytest.mark.parametrize( + "Vectorizer, X", + ( + (HashingVectorizer, [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]), + (CountVectorizer, JUNK_FOOD_DOCS), + ), +) +def test_n_features_in(Vectorizer, X): + # For vectorizers, n_features_in_ does not make sense + vectorizer = Vectorizer() + assert not hasattr(vectorizer, "n_features_in_") + vectorizer.fit(X) + assert not hasattr(vectorizer, "n_features_in_") + + +def test_tie_breaking_sample_order_invariance(): + # Checks the sample order invariance when setting max_features + # non-regression test for #17939 + vec = CountVectorizer(max_features=1) + vocab1 = vec.fit(["hello", "world"]).vocabulary_ + vocab2 = vec.fit(["world", "hello"]).vocabulary_ + assert vocab1 == vocab2 + + +@fails_if_pypy +def test_nonnegative_hashing_vectorizer_result_indices(): + # add test for pr 19035 + hashing = HashingVectorizer(n_features=1000000, ngram_range=(2, 3)) + indices = hashing.transform(["22pcs efuture"]).indices + assert indices[0] >= 0 + + +@pytest.mark.parametrize( + "Estimator", [CountVectorizer, TfidfVectorizer, TfidfTransformer, HashingVectorizer] +) +def test_vectorizers_do_not_have_set_output(Estimator): + """Check that vectorizers do not define set_output.""" + est = Estimator() + assert not hasattr(est, "set_output") diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_extraction/text.py b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/text.py new file mode 100644 index 0000000000000000000000000000000000000000..29104c29e74acef6bcad1f1cc71c24757ea951a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_extraction/text.py @@ -0,0 +1,2166 @@ +# Authors: Olivier Grisel +# Mathieu Blondel +# Lars Buitinck +# Robert Layton +# Jochen Wersdörfer +# Roman Sinayev +# +# License: BSD 3 clause +""" +The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to +build feature vectors from text documents. +""" + +import array +import re +import unicodedata +import warnings +from collections import defaultdict +from collections.abc import Mapping +from functools import partial +from numbers import Integral +from operator import itemgetter + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context +from ..exceptions import NotFittedError +from ..preprocessing import normalize +from ..utils import _IS_32BIT +from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions +from ..utils.validation import FLOAT_DTYPES, check_array, check_is_fitted +from ._hash import FeatureHasher +from ._stop_words import ENGLISH_STOP_WORDS + +__all__ = [ + "HashingVectorizer", + "CountVectorizer", + "ENGLISH_STOP_WORDS", + "TfidfTransformer", + "TfidfVectorizer", + "strip_accents_ascii", + "strip_accents_unicode", + "strip_tags", +] + + +def _preprocess(doc, accent_function=None, lower=False): + """Chain together an optional series of text preprocessing steps to + apply to a document. + + Parameters + ---------- + doc: str + The string to preprocess + accent_function: callable, default=None + Function for handling accented characters. Common strategies include + normalizing and removing. + lower: bool, default=False + Whether to use str.lower to lowercase all of the text + + Returns + ------- + doc: str + preprocessed string + """ + if lower: + doc = doc.lower() + if accent_function is not None: + doc = accent_function(doc) + return doc + + +def _analyze( + doc, + analyzer=None, + tokenizer=None, + ngrams=None, + preprocessor=None, + decoder=None, + stop_words=None, +): + """Chain together an optional series of text processing steps to go from + a single document to ngrams, with or without tokenizing or preprocessing. + + If analyzer is used, only the decoder argument is used, as the analyzer is + intended to replace the preprocessor, tokenizer, and ngrams steps. + + Parameters + ---------- + analyzer: callable, default=None + tokenizer: callable, default=None + ngrams: callable, default=None + preprocessor: callable, default=None + decoder: callable, default=None + stop_words: list, default=None + + Returns + ------- + ngrams: list + A sequence of tokens, possibly with pairs, triples, etc. + """ + + if decoder is not None: + doc = decoder(doc) + if analyzer is not None: + doc = analyzer(doc) + else: + if preprocessor is not None: + doc = preprocessor(doc) + if tokenizer is not None: + doc = tokenizer(doc) + if ngrams is not None: + if stop_words is not None: + doc = ngrams(doc, stop_words) + else: + doc = ngrams(doc) + return doc + + +def strip_accents_unicode(s): + """Transform accentuated unicode symbols into their simple counterpart. + + Warning: the python-level loop and join operations make this + implementation 20 times slower than the strip_accents_ascii basic + normalization. + + Parameters + ---------- + s : str + The string to strip. + + Returns + ------- + s : str + The stripped string. + + See Also + -------- + strip_accents_ascii : Remove accentuated char for any unicode symbol that + has a direct ASCII equivalent. + """ + try: + # If `s` is ASCII-compatible, then it does not contain any accented + # characters and we can avoid an expensive list comprehension + s.encode("ASCII", errors="strict") + return s + except UnicodeEncodeError: + normalized = unicodedata.normalize("NFKD", s) + return "".join([c for c in normalized if not unicodedata.combining(c)]) + + +def strip_accents_ascii(s): + """Transform accentuated unicode symbols into ascii or nothing. + + Warning: this solution is only suited for languages that have a direct + transliteration to ASCII symbols. + + Parameters + ---------- + s : str + The string to strip. + + Returns + ------- + s : str + The stripped string. + + See Also + -------- + strip_accents_unicode : Remove accentuated char for any unicode symbol. + """ + nkfd_form = unicodedata.normalize("NFKD", s) + return nkfd_form.encode("ASCII", "ignore").decode("ASCII") + + +def strip_tags(s): + """Basic regexp based HTML / XML tag stripper function. + + For serious HTML/XML preprocessing you should rather use an external + library such as lxml or BeautifulSoup. + + Parameters + ---------- + s : str + The string to strip. + + Returns + ------- + s : str + The stripped string. + """ + return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s) + + +def _check_stop_list(stop): + if stop == "english": + return ENGLISH_STOP_WORDS + elif isinstance(stop, str): + raise ValueError("not a built-in stop list: %s" % stop) + elif stop is None: + return None + else: # assume it's a collection + return frozenset(stop) + + +class _VectorizerMixin: + """Provides common code for text vectorizers (tokenization logic).""" + + _white_spaces = re.compile(r"\s\s+") + + def decode(self, doc): + """Decode the input into a string of unicode symbols. + + The decoding strategy depends on the vectorizer parameters. + + Parameters + ---------- + doc : bytes or str + The string to decode. + + Returns + ------- + doc: str + A string of unicode symbols. + """ + if self.input == "filename": + with open(doc, "rb") as fh: + doc = fh.read() + + elif self.input == "file": + doc = doc.read() + + if isinstance(doc, bytes): + doc = doc.decode(self.encoding, self.decode_error) + + if doc is np.nan: + raise ValueError( + "np.nan is an invalid document, expected byte or unicode string." + ) + + return doc + + def _word_ngrams(self, tokens, stop_words=None): + """Turn tokens into a sequence of n-grams after stop words filtering""" + # handle stop words + if stop_words is not None: + tokens = [w for w in tokens if w not in stop_words] + + # handle token n-grams + min_n, max_n = self.ngram_range + if max_n != 1: + original_tokens = tokens + if min_n == 1: + # no need to do any slicing for unigrams + # just iterate through the original tokens + tokens = list(original_tokens) + min_n += 1 + else: + tokens = [] + + n_original_tokens = len(original_tokens) + + # bind method outside of loop to reduce overhead + tokens_append = tokens.append + space_join = " ".join + + for n in range(min_n, min(max_n + 1, n_original_tokens + 1)): + for i in range(n_original_tokens - n + 1): + tokens_append(space_join(original_tokens[i : i + n])) + + return tokens + + def _char_ngrams(self, text_document): + """Tokenize text_document into a sequence of character n-grams""" + # normalize white spaces + text_document = self._white_spaces.sub(" ", text_document) + + text_len = len(text_document) + min_n, max_n = self.ngram_range + if min_n == 1: + # no need to do any slicing for unigrams + # iterate through the string + ngrams = list(text_document) + min_n += 1 + else: + ngrams = [] + + # bind method outside of loop to reduce overhead + ngrams_append = ngrams.append + + for n in range(min_n, min(max_n + 1, text_len + 1)): + for i in range(text_len - n + 1): + ngrams_append(text_document[i : i + n]) + return ngrams + + def _char_wb_ngrams(self, text_document): + """Whitespace sensitive char-n-gram tokenization. + + Tokenize text_document into a sequence of character n-grams + operating only inside word boundaries. n-grams at the edges + of words are padded with space.""" + # normalize white spaces + text_document = self._white_spaces.sub(" ", text_document) + + min_n, max_n = self.ngram_range + ngrams = [] + + # bind method outside of loop to reduce overhead + ngrams_append = ngrams.append + + for w in text_document.split(): + w = " " + w + " " + w_len = len(w) + for n in range(min_n, max_n + 1): + offset = 0 + ngrams_append(w[offset : offset + n]) + while offset + n < w_len: + offset += 1 + ngrams_append(w[offset : offset + n]) + if offset == 0: # count a short word (w_len < n) only once + break + return ngrams + + def build_preprocessor(self): + """Return a function to preprocess the text before tokenization. + + Returns + ------- + preprocessor: callable + A function to preprocess the text before tokenization. + """ + if self.preprocessor is not None: + return self.preprocessor + + # accent stripping + if not self.strip_accents: + strip_accents = None + elif callable(self.strip_accents): + strip_accents = self.strip_accents + elif self.strip_accents == "ascii": + strip_accents = strip_accents_ascii + elif self.strip_accents == "unicode": + strip_accents = strip_accents_unicode + else: + raise ValueError( + 'Invalid value for "strip_accents": %s' % self.strip_accents + ) + + return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase) + + def build_tokenizer(self): + """Return a function that splits a string into a sequence of tokens. + + Returns + ------- + tokenizer: callable + A function to split a string into a sequence of tokens. + """ + if self.tokenizer is not None: + return self.tokenizer + token_pattern = re.compile(self.token_pattern) + + if token_pattern.groups > 1: + raise ValueError( + "More than 1 capturing group in token pattern. Only a single " + "group should be captured." + ) + + return token_pattern.findall + + def get_stop_words(self): + """Build or fetch the effective stop words list. + + Returns + ------- + stop_words: list or None + A list of stop words. + """ + return _check_stop_list(self.stop_words) + + def _check_stop_words_consistency(self, stop_words, preprocess, tokenize): + """Check if stop words are consistent + + Returns + ------- + is_consistent : True if stop words are consistent with the preprocessor + and tokenizer, False if they are not, None if the check + was previously performed, "error" if it could not be + performed (e.g. because of the use of a custom + preprocessor / tokenizer) + """ + if id(self.stop_words) == getattr(self, "_stop_words_id", None): + # Stop words are were previously validated + return None + + # NB: stop_words is validated, unlike self.stop_words + try: + inconsistent = set() + for w in stop_words or (): + tokens = list(tokenize(preprocess(w))) + for token in tokens: + if token not in stop_words: + inconsistent.add(token) + self._stop_words_id = id(self.stop_words) + + if inconsistent: + warnings.warn( + "Your stop_words may be inconsistent with " + "your preprocessing. Tokenizing the stop " + "words generated tokens %r not in " + "stop_words." + % sorted(inconsistent) + ) + return not inconsistent + except Exception: + # Failed to check stop words consistency (e.g. because a custom + # preprocessor or tokenizer was used) + self._stop_words_id = id(self.stop_words) + return "error" + + def build_analyzer(self): + """Return a callable to process input data. + + The callable handles preprocessing, tokenization, and n-grams generation. + + Returns + ------- + analyzer: callable + A function to handle preprocessing, tokenization + and n-grams generation. + """ + + if callable(self.analyzer): + return partial(_analyze, analyzer=self.analyzer, decoder=self.decode) + + preprocess = self.build_preprocessor() + + if self.analyzer == "char": + return partial( + _analyze, + ngrams=self._char_ngrams, + preprocessor=preprocess, + decoder=self.decode, + ) + + elif self.analyzer == "char_wb": + return partial( + _analyze, + ngrams=self._char_wb_ngrams, + preprocessor=preprocess, + decoder=self.decode, + ) + + elif self.analyzer == "word": + stop_words = self.get_stop_words() + tokenize = self.build_tokenizer() + self._check_stop_words_consistency(stop_words, preprocess, tokenize) + return partial( + _analyze, + ngrams=self._word_ngrams, + tokenizer=tokenize, + preprocessor=preprocess, + decoder=self.decode, + stop_words=stop_words, + ) + + else: + raise ValueError( + "%s is not a valid tokenization scheme/analyzer" % self.analyzer + ) + + def _validate_vocabulary(self): + vocabulary = self.vocabulary + if vocabulary is not None: + if isinstance(vocabulary, set): + vocabulary = sorted(vocabulary) + if not isinstance(vocabulary, Mapping): + vocab = {} + for i, t in enumerate(vocabulary): + if vocab.setdefault(t, i) != i: + msg = "Duplicate term in vocabulary: %r" % t + raise ValueError(msg) + vocabulary = vocab + else: + indices = set(vocabulary.values()) + if len(indices) != len(vocabulary): + raise ValueError("Vocabulary contains repeated indices.") + for i in range(len(vocabulary)): + if i not in indices: + msg = "Vocabulary of size %d doesn't contain index %d." % ( + len(vocabulary), + i, + ) + raise ValueError(msg) + if not vocabulary: + raise ValueError("empty vocabulary passed to fit") + self.fixed_vocabulary_ = True + self.vocabulary_ = dict(vocabulary) + else: + self.fixed_vocabulary_ = False + + def _check_vocabulary(self): + """Check if vocabulary is empty or missing (not fitted)""" + if not hasattr(self, "vocabulary_"): + self._validate_vocabulary() + if not self.fixed_vocabulary_: + raise NotFittedError("Vocabulary not fitted or provided") + + if len(self.vocabulary_) == 0: + raise ValueError("Vocabulary is empty") + + def _validate_ngram_range(self): + """Check validity of ngram_range parameter""" + min_n, max_m = self.ngram_range + if min_n > max_m: + raise ValueError( + "Invalid value for ngram_range=%s " + "lower boundary larger than the upper boundary." + % str(self.ngram_range) + ) + + def _warn_for_unused_params(self): + if self.tokenizer is not None and self.token_pattern is not None: + warnings.warn( + "The parameter 'token_pattern' will not be used" + " since 'tokenizer' is not None'" + ) + + if self.preprocessor is not None and callable(self.analyzer): + warnings.warn( + "The parameter 'preprocessor' will not be used" + " since 'analyzer' is callable'" + ) + + if ( + self.ngram_range != (1, 1) + and self.ngram_range is not None + and callable(self.analyzer) + ): + warnings.warn( + "The parameter 'ngram_range' will not be used" + " since 'analyzer' is callable'" + ) + if self.analyzer != "word" or callable(self.analyzer): + if self.stop_words is not None: + warnings.warn( + "The parameter 'stop_words' will not be used" + " since 'analyzer' != 'word'" + ) + if ( + self.token_pattern is not None + and self.token_pattern != r"(?u)\b\w\w+\b" + ): + warnings.warn( + "The parameter 'token_pattern' will not be used" + " since 'analyzer' != 'word'" + ) + if self.tokenizer is not None: + warnings.warn( + "The parameter 'tokenizer' will not be used" + " since 'analyzer' != 'word'" + ) + + +class HashingVectorizer( + TransformerMixin, _VectorizerMixin, BaseEstimator, auto_wrap_output_keys=None +): + r"""Convert a collection of text documents to a matrix of token occurrences. + + It turns a collection of text documents into a scipy.sparse matrix holding + token occurrence counts (or binary occurrence information), possibly + normalized as token frequencies if norm='l1' or projected on the euclidean + unit sphere if norm='l2'. + + This text vectorizer implementation uses the hashing trick to find the + token string name to feature integer index mapping. + + This strategy has several advantages: + + - it is very low memory scalable to large datasets as there is no need to + store a vocabulary dictionary in memory. + + - it is fast to pickle and un-pickle as it holds no state besides the + constructor parameters. + + - it can be used in a streaming (partial fit) or parallel pipeline as there + is no state computed during fit. + + There are also a couple of cons (vs using a CountVectorizer with an + in-memory vocabulary): + + - there is no way to compute the inverse transform (from feature indices to + string feature names) which can be a problem when trying to introspect + which features are most important to a model. + + - there can be collisions: distinct tokens can be mapped to the same + feature index. However in practice this is rarely an issue if n_features + is large enough (e.g. 2 ** 18 for text classification problems). + + - no IDF weighting as this would render the transformer stateful. + + The hash function employed is the signed 32-bit version of Murmurhash3. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + input : {'filename', 'file', 'content'}, default='content' + - If `'filename'`, the sequence passed as an argument to fit is + expected to be a list of filenames that need reading to fetch + the raw content to analyze. + + - If `'file'`, the sequence items must have a 'read' method (file-like + object) that is called to fetch the bytes in memory. + + - If `'content'`, the input is expected to be a sequence of items that + can be of type string or byte. + + encoding : str, default='utf-8' + If bytes or files are given to analyze, this encoding is used to + decode. + + decode_error : {'strict', 'ignore', 'replace'}, default='strict' + Instruction on what to do if a byte sequence is given to analyze that + contains characters not of the given `encoding`. By default, it is + 'strict', meaning that a UnicodeDecodeError will be raised. Other + values are 'ignore' and 'replace'. + + strip_accents : {'ascii', 'unicode'} or callable, default=None + Remove accents and perform other character normalization + during the preprocessing step. + 'ascii' is a fast method that only works on characters that have + a direct ASCII mapping. + 'unicode' is a slightly slower method that works on any character. + None (default) means no character normalization is performed. + + Both 'ascii' and 'unicode' use NFKD normalization from + :func:`unicodedata.normalize`. + + lowercase : bool, default=True + Convert all characters to lowercase before tokenizing. + + preprocessor : callable, default=None + Override the preprocessing (string transformation) stage while + preserving the tokenizing and n-grams generation steps. + Only applies if ``analyzer`` is not callable. + + tokenizer : callable, default=None + Override the string tokenization step while preserving the + preprocessing and n-grams generation steps. + Only applies if ``analyzer == 'word'``. + + stop_words : {'english'}, list, default=None + If 'english', a built-in stop word list for English is used. + There are several known issues with 'english' and you should + consider an alternative (see :ref:`stop_words`). + + If a list, that list is assumed to contain stop words, all of which + will be removed from the resulting tokens. + Only applies if ``analyzer == 'word'``. + + token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b" + Regular expression denoting what constitutes a "token", only used + if ``analyzer == 'word'``. The default regexp selects tokens of 2 + or more alphanumeric characters (punctuation is completely ignored + and always treated as a token separator). + + If there is a capturing group in token_pattern then the + captured group content, not the entire match, becomes the token. + At most one capturing group is permitted. + + ngram_range : tuple (min_n, max_n), default=(1, 1) + The lower and upper boundary of the range of n-values for different + n-grams to be extracted. All values of n such that min_n <= n <= max_n + will be used. For example an ``ngram_range`` of ``(1, 1)`` means only + unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means + only bigrams. + Only applies if ``analyzer`` is not callable. + + analyzer : {'word', 'char', 'char_wb'} or callable, default='word' + Whether the feature should be made of word or character n-grams. + Option 'char_wb' creates character n-grams only from text inside + word boundaries; n-grams at the edges of words are padded with space. + + If a callable is passed it is used to extract the sequence of features + out of the raw, unprocessed input. + + .. versionchanged:: 0.21 + Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data + is first read from the file and then passed to the given callable + analyzer. + + n_features : int, default=(2 ** 20) + The number of features (columns) in the output matrices. Small numbers + of features are likely to cause hash collisions, but large numbers + will cause larger coefficient dimensions in linear learners. + + binary : bool, default=False + If True, all non zero counts are set to 1. This is useful for discrete + probabilistic models that model binary events rather than integer + counts. + + norm : {'l1', 'l2'}, default='l2' + Norm used to normalize term vectors. None for no normalization. + + alternate_sign : bool, default=True + When True, an alternating sign is added to the features as to + approximately conserve the inner product in the hashed space even for + small n_features. This approach is similar to sparse random projection. + + .. versionadded:: 0.19 + + dtype : type, default=np.float64 + Type of the matrix returned by fit_transform() or transform(). + + See Also + -------- + CountVectorizer : Convert a collection of text documents to a matrix of + token counts. + TfidfVectorizer : Convert a collection of raw documents to a matrix of + TF-IDF features. + + Notes + ----- + This estimator is :term:`stateless` and does not need to be fitted. + However, we recommend to call :meth:`fit_transform` instead of + :meth:`transform`, as parameter validation is only performed in + :meth:`fit`. + + Examples + -------- + >>> from sklearn.feature_extraction.text import HashingVectorizer + >>> corpus = [ + ... 'This is the first document.', + ... 'This document is the second document.', + ... 'And this is the third one.', + ... 'Is this the first document?', + ... ] + >>> vectorizer = HashingVectorizer(n_features=2**4) + >>> X = vectorizer.fit_transform(corpus) + >>> print(X.shape) + (4, 16) + """ + + _parameter_constraints: dict = { + "input": [StrOptions({"filename", "file", "content"})], + "encoding": [str], + "decode_error": [StrOptions({"strict", "ignore", "replace"})], + "strip_accents": [StrOptions({"ascii", "unicode"}), None, callable], + "lowercase": ["boolean"], + "preprocessor": [callable, None], + "tokenizer": [callable, None], + "stop_words": [StrOptions({"english"}), list, None], + "token_pattern": [str, None], + "ngram_range": [tuple], + "analyzer": [StrOptions({"word", "char", "char_wb"}), callable], + "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="left")], + "binary": ["boolean"], + "norm": [StrOptions({"l1", "l2"}), None], + "alternate_sign": ["boolean"], + "dtype": "no_validation", # delegate to numpy + } + + def __init__( + self, + *, + input="content", + encoding="utf-8", + decode_error="strict", + strip_accents=None, + lowercase=True, + preprocessor=None, + tokenizer=None, + stop_words=None, + token_pattern=r"(?u)\b\w\w+\b", + ngram_range=(1, 1), + analyzer="word", + n_features=(2**20), + binary=False, + norm="l2", + alternate_sign=True, + dtype=np.float64, + ): + self.input = input + self.encoding = encoding + self.decode_error = decode_error + self.strip_accents = strip_accents + self.preprocessor = preprocessor + self.tokenizer = tokenizer + self.analyzer = analyzer + self.lowercase = lowercase + self.token_pattern = token_pattern + self.stop_words = stop_words + self.n_features = n_features + self.ngram_range = ngram_range + self.binary = binary + self.norm = norm + self.alternate_sign = alternate_sign + self.dtype = dtype + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : ndarray of shape [n_samples, n_features] + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + HashingVectorizer instance. + """ + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : ndarray of shape [n_samples, n_features] + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + HashingVectorizer instance. + """ + # triggers a parameter validation + if isinstance(X, str): + raise ValueError( + "Iterable over raw text documents expected, string object received." + ) + + self._warn_for_unused_params() + self._validate_ngram_range() + + self._get_hasher().fit(X, y=y) + return self + + def transform(self, X): + """Transform a sequence of documents to a document-term matrix. + + Parameters + ---------- + X : iterable over raw text documents, length = n_samples + Samples. Each sample must be a text document (either bytes or + unicode strings, file name or file object depending on the + constructor argument) which will be tokenized and hashed. + + Returns + ------- + X : sparse matrix of shape (n_samples, n_features) + Document-term matrix. + """ + if isinstance(X, str): + raise ValueError( + "Iterable over raw text documents expected, string object received." + ) + + self._validate_ngram_range() + + analyzer = self.build_analyzer() + X = self._get_hasher().transform(analyzer(doc) for doc in X) + if self.binary: + X.data.fill(1) + if self.norm is not None: + X = normalize(X, norm=self.norm, copy=False) + return X + + def fit_transform(self, X, y=None): + """Transform a sequence of documents to a document-term matrix. + + Parameters + ---------- + X : iterable over raw text documents, length = n_samples + Samples. Each sample must be a text document (either bytes or + unicode strings, file name or file object depending on the + constructor argument) which will be tokenized and hashed. + y : any + Ignored. This parameter exists only for compatibility with + sklearn.pipeline.Pipeline. + + Returns + ------- + X : sparse matrix of shape (n_samples, n_features) + Document-term matrix. + """ + return self.fit(X, y).transform(X) + + def _get_hasher(self): + return FeatureHasher( + n_features=self.n_features, + input_type="string", + dtype=self.dtype, + alternate_sign=self.alternate_sign, + ) + + def _more_tags(self): + return {"X_types": ["string"]} + + +def _document_frequency(X): + """Count the number of non-zero values for each feature in sparse X.""" + if sp.issparse(X) and X.format == "csr": + return np.bincount(X.indices, minlength=X.shape[1]) + else: + return np.diff(X.indptr) + + +class CountVectorizer(_VectorizerMixin, BaseEstimator): + r"""Convert a collection of text documents to a matrix of token counts. + + This implementation produces a sparse representation of the counts using + scipy.sparse.csr_matrix. + + If you do not provide an a-priori dictionary and you do not use an analyzer + that does some kind of feature selection then the number of features will + be equal to the vocabulary size found by analyzing the data. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + input : {'filename', 'file', 'content'}, default='content' + - If `'filename'`, the sequence passed as an argument to fit is + expected to be a list of filenames that need reading to fetch + the raw content to analyze. + + - If `'file'`, the sequence items must have a 'read' method (file-like + object) that is called to fetch the bytes in memory. + + - If `'content'`, the input is expected to be a sequence of items that + can be of type string or byte. + + encoding : str, default='utf-8' + If bytes or files are given to analyze, this encoding is used to + decode. + + decode_error : {'strict', 'ignore', 'replace'}, default='strict' + Instruction on what to do if a byte sequence is given to analyze that + contains characters not of the given `encoding`. By default, it is + 'strict', meaning that a UnicodeDecodeError will be raised. Other + values are 'ignore' and 'replace'. + + strip_accents : {'ascii', 'unicode'} or callable, default=None + Remove accents and perform other character normalization + during the preprocessing step. + 'ascii' is a fast method that only works on characters that have + a direct ASCII mapping. + 'unicode' is a slightly slower method that works on any characters. + None (default) means no character normalization is performed. + + Both 'ascii' and 'unicode' use NFKD normalization from + :func:`unicodedata.normalize`. + + lowercase : bool, default=True + Convert all characters to lowercase before tokenizing. + + preprocessor : callable, default=None + Override the preprocessing (strip_accents and lowercase) stage while + preserving the tokenizing and n-grams generation steps. + Only applies if ``analyzer`` is not callable. + + tokenizer : callable, default=None + Override the string tokenization step while preserving the + preprocessing and n-grams generation steps. + Only applies if ``analyzer == 'word'``. + + stop_words : {'english'}, list, default=None + If 'english', a built-in stop word list for English is used. + There are several known issues with 'english' and you should + consider an alternative (see :ref:`stop_words`). + + If a list, that list is assumed to contain stop words, all of which + will be removed from the resulting tokens. + Only applies if ``analyzer == 'word'``. + + If None, no stop words will be used. In this case, setting `max_df` + to a higher value, such as in the range (0.7, 1.0), can automatically detect + and filter stop words based on intra corpus document frequency of terms. + + token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b" + Regular expression denoting what constitutes a "token", only used + if ``analyzer == 'word'``. The default regexp select tokens of 2 + or more alphanumeric characters (punctuation is completely ignored + and always treated as a token separator). + + If there is a capturing group in token_pattern then the + captured group content, not the entire match, becomes the token. + At most one capturing group is permitted. + + ngram_range : tuple (min_n, max_n), default=(1, 1) + The lower and upper boundary of the range of n-values for different + word n-grams or char n-grams to be extracted. All values of n such + such that min_n <= n <= max_n will be used. For example an + ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means + unigrams and bigrams, and ``(2, 2)`` means only bigrams. + Only applies if ``analyzer`` is not callable. + + analyzer : {'word', 'char', 'char_wb'} or callable, default='word' + Whether the feature should be made of word n-gram or character + n-grams. + Option 'char_wb' creates character n-grams only from text inside + word boundaries; n-grams at the edges of words are padded with space. + + If a callable is passed it is used to extract the sequence of features + out of the raw, unprocessed input. + + .. versionchanged:: 0.21 + + Since v0.21, if ``input`` is ``filename`` or ``file``, the data is + first read from the file and then passed to the given callable + analyzer. + + max_df : float in range [0.0, 1.0] or int, default=1.0 + When building the vocabulary ignore terms that have a document + frequency strictly higher than the given threshold (corpus-specific + stop words). + If float, the parameter represents a proportion of documents, integer + absolute counts. + This parameter is ignored if vocabulary is not None. + + min_df : float in range [0.0, 1.0] or int, default=1 + When building the vocabulary ignore terms that have a document + frequency strictly lower than the given threshold. This value is also + called cut-off in the literature. + If float, the parameter represents a proportion of documents, integer + absolute counts. + This parameter is ignored if vocabulary is not None. + + max_features : int, default=None + If not None, build a vocabulary that only consider the top + `max_features` ordered by term frequency across the corpus. + Otherwise, all features are used. + + This parameter is ignored if vocabulary is not None. + + vocabulary : Mapping or iterable, default=None + Either a Mapping (e.g., a dict) where keys are terms and values are + indices in the feature matrix, or an iterable over terms. If not + given, a vocabulary is determined from the input documents. Indices + in the mapping should not be repeated and should not have any gap + between 0 and the largest index. + + binary : bool, default=False + If True, all non zero counts are set to 1. This is useful for discrete + probabilistic models that model binary events rather than integer + counts. + + dtype : dtype, default=np.int64 + Type of the matrix returned by fit_transform() or transform(). + + Attributes + ---------- + vocabulary_ : dict + A mapping of terms to feature indices. + + fixed_vocabulary_ : bool + True if a fixed vocabulary of term to indices mapping + is provided by the user. + + stop_words_ : set + Terms that were ignored because they either: + + - occurred in too many documents (`max_df`) + - occurred in too few documents (`min_df`) + - were cut off by feature selection (`max_features`). + + This is only available if no vocabulary was given. + + See Also + -------- + HashingVectorizer : Convert a collection of text documents to a + matrix of token counts. + + TfidfVectorizer : Convert a collection of raw documents to a matrix + of TF-IDF features. + + Notes + ----- + The ``stop_words_`` attribute can get large and increase the model size + when pickling. This attribute is provided only for introspection and can + be safely removed using delattr or set to None before pickling. + + Examples + -------- + >>> from sklearn.feature_extraction.text import CountVectorizer + >>> corpus = [ + ... 'This is the first document.', + ... 'This document is the second document.', + ... 'And this is the third one.', + ... 'Is this the first document?', + ... ] + >>> vectorizer = CountVectorizer() + >>> X = vectorizer.fit_transform(corpus) + >>> vectorizer.get_feature_names_out() + array(['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third', + 'this'], ...) + >>> print(X.toarray()) + [[0 1 1 1 0 0 1 0 1] + [0 2 0 1 0 1 1 0 1] + [1 0 0 1 1 0 1 1 1] + [0 1 1 1 0 0 1 0 1]] + >>> vectorizer2 = CountVectorizer(analyzer='word', ngram_range=(2, 2)) + >>> X2 = vectorizer2.fit_transform(corpus) + >>> vectorizer2.get_feature_names_out() + array(['and this', 'document is', 'first document', 'is the', 'is this', + 'second document', 'the first', 'the second', 'the third', 'third one', + 'this document', 'this is', 'this the'], ...) + >>> print(X2.toarray()) + [[0 0 1 1 0 0 1 0 0 0 0 1 0] + [0 1 0 1 0 1 0 1 0 0 1 0 0] + [1 0 0 1 0 0 0 0 1 1 0 1 0] + [0 0 1 0 1 0 1 0 0 0 0 0 1]] + """ + + _parameter_constraints: dict = { + "input": [StrOptions({"filename", "file", "content"})], + "encoding": [str], + "decode_error": [StrOptions({"strict", "ignore", "replace"})], + "strip_accents": [StrOptions({"ascii", "unicode"}), None, callable], + "lowercase": ["boolean"], + "preprocessor": [callable, None], + "tokenizer": [callable, None], + "stop_words": [StrOptions({"english"}), list, None], + "token_pattern": [str, None], + "ngram_range": [tuple], + "analyzer": [StrOptions({"word", "char", "char_wb"}), callable], + "max_df": [ + Interval(RealNotInt, 0, 1, closed="both"), + Interval(Integral, 1, None, closed="left"), + ], + "min_df": [ + Interval(RealNotInt, 0, 1, closed="both"), + Interval(Integral, 1, None, closed="left"), + ], + "max_features": [Interval(Integral, 1, None, closed="left"), None], + "vocabulary": [Mapping, HasMethods("__iter__"), None], + "binary": ["boolean"], + "dtype": "no_validation", # delegate to numpy + } + + def __init__( + self, + *, + input="content", + encoding="utf-8", + decode_error="strict", + strip_accents=None, + lowercase=True, + preprocessor=None, + tokenizer=None, + stop_words=None, + token_pattern=r"(?u)\b\w\w+\b", + ngram_range=(1, 1), + analyzer="word", + max_df=1.0, + min_df=1, + max_features=None, + vocabulary=None, + binary=False, + dtype=np.int64, + ): + self.input = input + self.encoding = encoding + self.decode_error = decode_error + self.strip_accents = strip_accents + self.preprocessor = preprocessor + self.tokenizer = tokenizer + self.analyzer = analyzer + self.lowercase = lowercase + self.token_pattern = token_pattern + self.stop_words = stop_words + self.max_df = max_df + self.min_df = min_df + self.max_features = max_features + self.ngram_range = ngram_range + self.vocabulary = vocabulary + self.binary = binary + self.dtype = dtype + + def _sort_features(self, X, vocabulary): + """Sort features by name + + Returns a reordered matrix and modifies the vocabulary in place + """ + sorted_features = sorted(vocabulary.items()) + map_index = np.empty(len(sorted_features), dtype=X.indices.dtype) + for new_val, (term, old_val) in enumerate(sorted_features): + vocabulary[term] = new_val + map_index[old_val] = new_val + + X.indices = map_index.take(X.indices, mode="clip") + return X + + def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): + """Remove too rare or too common features. + + Prune features that are non zero in more samples than high or less + documents than low, modifying the vocabulary, and restricting it to + at most the limit most frequent. + + This does not prune samples with zero features. + """ + if high is None and low is None and limit is None: + return X, set() + + # Calculate a mask based on document frequencies + dfs = _document_frequency(X) + mask = np.ones(len(dfs), dtype=bool) + if high is not None: + mask &= dfs <= high + if low is not None: + mask &= dfs >= low + if limit is not None and mask.sum() > limit: + tfs = np.asarray(X.sum(axis=0)).ravel() + mask_inds = (-tfs[mask]).argsort()[:limit] + new_mask = np.zeros(len(dfs), dtype=bool) + new_mask[np.where(mask)[0][mask_inds]] = True + mask = new_mask + + new_indices = np.cumsum(mask) - 1 # maps old indices to new + removed_terms = set() + for term, old_index in list(vocabulary.items()): + if mask[old_index]: + vocabulary[term] = new_indices[old_index] + else: + del vocabulary[term] + removed_terms.add(term) + kept_indices = np.where(mask)[0] + if len(kept_indices) == 0: + raise ValueError( + "After pruning, no terms remain. Try a lower min_df or a higher max_df." + ) + return X[:, kept_indices], removed_terms + + def _count_vocab(self, raw_documents, fixed_vocab): + """Create sparse feature matrix, and vocabulary where fixed_vocab=False""" + if fixed_vocab: + vocabulary = self.vocabulary_ + else: + # Add a new value when a new vocabulary item is seen + vocabulary = defaultdict() + vocabulary.default_factory = vocabulary.__len__ + + analyze = self.build_analyzer() + j_indices = [] + indptr = [] + + values = _make_int_array() + indptr.append(0) + for doc in raw_documents: + feature_counter = {} + for feature in analyze(doc): + try: + feature_idx = vocabulary[feature] + if feature_idx not in feature_counter: + feature_counter[feature_idx] = 1 + else: + feature_counter[feature_idx] += 1 + except KeyError: + # Ignore out-of-vocabulary items for fixed_vocab=True + continue + + j_indices.extend(feature_counter.keys()) + values.extend(feature_counter.values()) + indptr.append(len(j_indices)) + + if not fixed_vocab: + # disable defaultdict behaviour + vocabulary = dict(vocabulary) + if not vocabulary: + raise ValueError( + "empty vocabulary; perhaps the documents only contain stop words" + ) + + if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1 + if _IS_32BIT: + raise ValueError( + ( + "sparse CSR array has {} non-zero " + "elements and requires 64 bit indexing, " + "which is unsupported with 32 bit Python." + ).format(indptr[-1]) + ) + indices_dtype = np.int64 + + else: + indices_dtype = np.int32 + j_indices = np.asarray(j_indices, dtype=indices_dtype) + indptr = np.asarray(indptr, dtype=indices_dtype) + values = np.frombuffer(values, dtype=np.intc) + + X = sp.csr_matrix( + (values, j_indices, indptr), + shape=(len(indptr) - 1, len(vocabulary)), + dtype=self.dtype, + ) + X.sort_indices() + return vocabulary, X + + def fit(self, raw_documents, y=None): + """Learn a vocabulary dictionary of all tokens in the raw documents. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + y : None + This parameter is ignored. + + Returns + ------- + self : object + Fitted vectorizer. + """ + self.fit_transform(raw_documents) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, raw_documents, y=None): + """Learn the vocabulary dictionary and return document-term matrix. + + This is equivalent to fit followed by transform, but more efficiently + implemented. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + y : None + This parameter is ignored. + + Returns + ------- + X : array of shape (n_samples, n_features) + Document-term matrix. + """ + # We intentionally don't call the transform method to make + # fit_transform overridable without unwanted side effects in + # TfidfVectorizer. + if isinstance(raw_documents, str): + raise ValueError( + "Iterable over raw text documents expected, string object received." + ) + + self._validate_ngram_range() + self._warn_for_unused_params() + self._validate_vocabulary() + max_df = self.max_df + min_df = self.min_df + max_features = self.max_features + + if self.fixed_vocabulary_ and self.lowercase: + for term in self.vocabulary: + if any(map(str.isupper, term)): + warnings.warn( + "Upper case characters found in" + " vocabulary while 'lowercase'" + " is True. These entries will not" + " be matched with any documents" + ) + break + + vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_) + + if self.binary: + X.data.fill(1) + + if not self.fixed_vocabulary_: + n_doc = X.shape[0] + max_doc_count = max_df if isinstance(max_df, Integral) else max_df * n_doc + min_doc_count = min_df if isinstance(min_df, Integral) else min_df * n_doc + if max_doc_count < min_doc_count: + raise ValueError("max_df corresponds to < documents than min_df") + if max_features is not None: + X = self._sort_features(X, vocabulary) + X, self.stop_words_ = self._limit_features( + X, vocabulary, max_doc_count, min_doc_count, max_features + ) + if max_features is None: + X = self._sort_features(X, vocabulary) + self.vocabulary_ = vocabulary + + return X + + def transform(self, raw_documents): + """Transform documents to document-term matrix. + + Extract token counts out of raw text documents using the vocabulary + fitted with fit or the one provided to the constructor. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + Returns + ------- + X : sparse matrix of shape (n_samples, n_features) + Document-term matrix. + """ + if isinstance(raw_documents, str): + raise ValueError( + "Iterable over raw text documents expected, string object received." + ) + self._check_vocabulary() + + # use the same matrix-building strategy as fit_transform + _, X = self._count_vocab(raw_documents, fixed_vocab=True) + if self.binary: + X.data.fill(1) + return X + + def inverse_transform(self, X): + """Return terms per document with nonzero entries in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document-term matrix. + + Returns + ------- + X_inv : list of arrays of shape (n_samples,) + List of arrays of terms. + """ + self._check_vocabulary() + # We need CSR format for fast row manipulations. + X = check_array(X, accept_sparse="csr") + n_samples = X.shape[0] + + terms = np.array(list(self.vocabulary_.keys())) + indices = np.array(list(self.vocabulary_.values())) + inverse_vocabulary = terms[np.argsort(indices)] + + if sp.issparse(X): + return [ + inverse_vocabulary[X[i, :].nonzero()[1]].ravel() + for i in range(n_samples) + ] + else: + return [ + inverse_vocabulary[np.flatnonzero(X[i, :])].ravel() + for i in range(n_samples) + ] + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Not used, present here for API consistency by convention. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + self._check_vocabulary() + return np.asarray( + [t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))], + dtype=object, + ) + + def _more_tags(self): + return {"X_types": ["string"]} + + +def _make_int_array(): + """Construct an array.array of a type suitable for scipy.sparse indices.""" + return array.array(str("i")) + + +class TfidfTransformer( + OneToOneFeatureMixin, TransformerMixin, BaseEstimator, auto_wrap_output_keys=None +): + """Transform a count matrix to a normalized tf or tf-idf representation. + + Tf means term-frequency while tf-idf means term-frequency times inverse + document-frequency. This is a common term weighting scheme in information + retrieval, that has also found good use in document classification. + + The goal of using tf-idf instead of the raw frequencies of occurrence of a + token in a given document is to scale down the impact of tokens that occur + very frequently in a given corpus and that are hence empirically less + informative than features that occur in a small fraction of the training + corpus. + + The formula that is used to compute the tf-idf for a term t of a document d + in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is + computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where + n is the total number of documents in the document set and df(t) is the + document frequency of t; the document frequency is the number of documents + in the document set that contain the term t. The effect of adding "1" to + the idf in the equation above is that terms with zero idf, i.e., terms + that occur in all documents in a training set, will not be entirely + ignored. + (Note that the idf formula above differs from the standard textbook + notation that defines the idf as + idf(t) = log [ n / (df(t) + 1) ]). + + If ``smooth_idf=True`` (the default), the constant "1" is added to the + numerator and denominator of the idf as if an extra document was seen + containing every term in the collection exactly once, which prevents + zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1. + + Furthermore, the formulas used to compute tf and idf depend + on parameter settings that correspond to the SMART notation used in IR + as follows: + + Tf is "n" (natural) by default, "l" (logarithmic) when + ``sublinear_tf=True``. + Idf is "t" when use_idf is given, "n" (none) otherwise. + Normalization is "c" (cosine) when ``norm='l2'``, "n" (none) + when ``norm=None``. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + norm : {'l1', 'l2'} or None, default='l2' + Each output row will have unit norm, either: + + - 'l2': Sum of squares of vector elements is 1. The cosine + similarity between two vectors is their dot product when l2 norm has + been applied. + - 'l1': Sum of absolute values of vector elements is 1. + See :func:`~sklearn.preprocessing.normalize`. + - None: No normalization. + + use_idf : bool, default=True + Enable inverse-document-frequency reweighting. If False, idf(t) = 1. + + smooth_idf : bool, default=True + Smooth idf weights by adding one to document frequencies, as if an + extra document was seen containing every term in the collection + exactly once. Prevents zero divisions. + + sublinear_tf : bool, default=False + Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). + + Attributes + ---------- + idf_ : array of shape (n_features) + The inverse document frequency (IDF) vector; only defined + if ``use_idf`` is True. + + .. versionadded:: 0.20 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 1.0 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + CountVectorizer : Transforms text into a sparse matrix of n-gram counts. + + TfidfVectorizer : Convert a collection of raw documents to a matrix of + TF-IDF features. + + HashingVectorizer : Convert a collection of text documents to a matrix + of token occurrences. + + References + ---------- + .. [Yates2011] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern + Information Retrieval. Addison Wesley, pp. 68-74. + + .. [MRS2008] C.D. Manning, P. Raghavan and H. Schütze (2008). + Introduction to Information Retrieval. Cambridge University + Press, pp. 118-120. + + Examples + -------- + >>> from sklearn.feature_extraction.text import TfidfTransformer + >>> from sklearn.feature_extraction.text import CountVectorizer + >>> from sklearn.pipeline import Pipeline + >>> corpus = ['this is the first document', + ... 'this document is the second document', + ... 'and this is the third one', + ... 'is this the first document'] + >>> vocabulary = ['this', 'document', 'first', 'is', 'second', 'the', + ... 'and', 'one'] + >>> pipe = Pipeline([('count', CountVectorizer(vocabulary=vocabulary)), + ... ('tfid', TfidfTransformer())]).fit(corpus) + >>> pipe['count'].transform(corpus).toarray() + array([[1, 1, 1, 1, 0, 1, 0, 0], + [1, 2, 0, 1, 1, 1, 0, 0], + [1, 0, 0, 1, 0, 1, 1, 1], + [1, 1, 1, 1, 0, 1, 0, 0]]) + >>> pipe['tfid'].idf_ + array([1. , 1.22314355, 1.51082562, 1. , 1.91629073, + 1. , 1.91629073, 1.91629073]) + >>> pipe.transform(corpus).shape + (4, 8) + """ + + _parameter_constraints: dict = { + "norm": [StrOptions({"l1", "l2"}), None], + "use_idf": ["boolean"], + "smooth_idf": ["boolean"], + "sublinear_tf": ["boolean"], + } + + def __init__(self, *, norm="l2", use_idf=True, smooth_idf=True, sublinear_tf=False): + self.norm = norm + self.use_idf = use_idf + self.smooth_idf = smooth_idf + self.sublinear_tf = sublinear_tf + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn the idf vector (global term weights). + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + A matrix of term/token counts. + + y : None + This parameter is not needed to compute tf-idf. + + Returns + ------- + self : object + Fitted transformer. + """ + # large sparse data is not supported for 32bit platforms because + # _document_frequency uses np.bincount which works on arrays of + # dtype NPY_INTP which is int32 for 32bit platforms. See #20923 + X = self._validate_data( + X, accept_sparse=("csr", "csc"), accept_large_sparse=not _IS_32BIT + ) + if not sp.issparse(X): + X = sp.csr_matrix(X) + dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64 + + if self.use_idf: + n_samples, n_features = X.shape + df = _document_frequency(X) + df = df.astype(dtype, copy=False) + + # perform idf smoothing if required + df += int(self.smooth_idf) + n_samples += int(self.smooth_idf) + + # log+1 instead of log makes sure terms with zero idf don't get + # suppressed entirely. + idf = np.log(n_samples / df) + 1 + self._idf_diag = sp.diags( + idf, + offsets=0, + shape=(n_features, n_features), + format="csr", + dtype=dtype, + ) + + return self + + def transform(self, X, copy=True): + """Transform a count matrix to a tf or tf-idf representation. + + Parameters + ---------- + X : sparse matrix of (n_samples, n_features) + A matrix of term/token counts. + + copy : bool, default=True + Whether to copy X and operate on the copy or perform in-place + operations. + + Returns + ------- + vectors : sparse matrix of shape (n_samples, n_features) + Tf-idf-weighted document-term matrix. + """ + X = self._validate_data( + X, accept_sparse="csr", dtype=FLOAT_DTYPES, copy=copy, reset=False + ) + if not sp.issparse(X): + X = sp.csr_matrix(X, dtype=np.float64) + + if self.sublinear_tf: + np.log(X.data, X.data) + X.data += 1 + + if self.use_idf: + # idf_ being a property, the automatic attributes detection + # does not work as usual and we need to specify the attribute + # name: + check_is_fitted(self, attributes=["idf_"], msg="idf vector is not fitted") + + X = X @ self._idf_diag + + if self.norm is not None: + X = normalize(X, norm=self.norm, copy=False) + + return X + + @property + def idf_(self): + """Inverse document frequency vector, only defined if `use_idf=True`. + + Returns + ------- + ndarray of shape (n_features,) + """ + # if _idf_diag is not set, this will raise an attribute error, + # which means hasattr(self, "idf_") is False + return np.ravel(self._idf_diag.sum(axis=0)) + + @idf_.setter + def idf_(self, value): + value = np.asarray(value, dtype=np.float64) + n_features = value.shape[0] + self._idf_diag = sp.spdiags( + value, diags=0, m=n_features, n=n_features, format="csr" + ) + + def _more_tags(self): + return {"X_types": ["2darray", "sparse"]} + + +class TfidfVectorizer(CountVectorizer): + r"""Convert a collection of raw documents to a matrix of TF-IDF features. + + Equivalent to :class:`CountVectorizer` followed by + :class:`TfidfTransformer`. + + For an example of usage, see + :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + input : {'filename', 'file', 'content'}, default='content' + - If `'filename'`, the sequence passed as an argument to fit is + expected to be a list of filenames that need reading to fetch + the raw content to analyze. + + - If `'file'`, the sequence items must have a 'read' method (file-like + object) that is called to fetch the bytes in memory. + + - If `'content'`, the input is expected to be a sequence of items that + can be of type string or byte. + + encoding : str, default='utf-8' + If bytes or files are given to analyze, this encoding is used to + decode. + + decode_error : {'strict', 'ignore', 'replace'}, default='strict' + Instruction on what to do if a byte sequence is given to analyze that + contains characters not of the given `encoding`. By default, it is + 'strict', meaning that a UnicodeDecodeError will be raised. Other + values are 'ignore' and 'replace'. + + strip_accents : {'ascii', 'unicode'} or callable, default=None + Remove accents and perform other character normalization + during the preprocessing step. + 'ascii' is a fast method that only works on characters that have + a direct ASCII mapping. + 'unicode' is a slightly slower method that works on any characters. + None (default) means no character normalization is performed. + + Both 'ascii' and 'unicode' use NFKD normalization from + :func:`unicodedata.normalize`. + + lowercase : bool, default=True + Convert all characters to lowercase before tokenizing. + + preprocessor : callable, default=None + Override the preprocessing (string transformation) stage while + preserving the tokenizing and n-grams generation steps. + Only applies if ``analyzer`` is not callable. + + tokenizer : callable, default=None + Override the string tokenization step while preserving the + preprocessing and n-grams generation steps. + Only applies if ``analyzer == 'word'``. + + analyzer : {'word', 'char', 'char_wb'} or callable, default='word' + Whether the feature should be made of word or character n-grams. + Option 'char_wb' creates character n-grams only from text inside + word boundaries; n-grams at the edges of words are padded with space. + + If a callable is passed it is used to extract the sequence of features + out of the raw, unprocessed input. + + .. versionchanged:: 0.21 + Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data + is first read from the file and then passed to the given callable + analyzer. + + stop_words : {'english'}, list, default=None + If a string, it is passed to _check_stop_list and the appropriate stop + list is returned. 'english' is currently the only supported string + value. + There are several known issues with 'english' and you should + consider an alternative (see :ref:`stop_words`). + + If a list, that list is assumed to contain stop words, all of which + will be removed from the resulting tokens. + Only applies if ``analyzer == 'word'``. + + If None, no stop words will be used. In this case, setting `max_df` + to a higher value, such as in the range (0.7, 1.0), can automatically detect + and filter stop words based on intra corpus document frequency of terms. + + token_pattern : str, default=r"(?u)\\b\\w\\w+\\b" + Regular expression denoting what constitutes a "token", only used + if ``analyzer == 'word'``. The default regexp selects tokens of 2 + or more alphanumeric characters (punctuation is completely ignored + and always treated as a token separator). + + If there is a capturing group in token_pattern then the + captured group content, not the entire match, becomes the token. + At most one capturing group is permitted. + + ngram_range : tuple (min_n, max_n), default=(1, 1) + The lower and upper boundary of the range of n-values for different + n-grams to be extracted. All values of n such that min_n <= n <= max_n + will be used. For example an ``ngram_range`` of ``(1, 1)`` means only + unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means + only bigrams. + Only applies if ``analyzer`` is not callable. + + max_df : float or int, default=1.0 + When building the vocabulary ignore terms that have a document + frequency strictly higher than the given threshold (corpus-specific + stop words). + If float in range [0.0, 1.0], the parameter represents a proportion of + documents, integer absolute counts. + This parameter is ignored if vocabulary is not None. + + min_df : float or int, default=1 + When building the vocabulary ignore terms that have a document + frequency strictly lower than the given threshold. This value is also + called cut-off in the literature. + If float in range of [0.0, 1.0], the parameter represents a proportion + of documents, integer absolute counts. + This parameter is ignored if vocabulary is not None. + + max_features : int, default=None + If not None, build a vocabulary that only consider the top + `max_features` ordered by term frequency across the corpus. + Otherwise, all features are used. + + This parameter is ignored if vocabulary is not None. + + vocabulary : Mapping or iterable, default=None + Either a Mapping (e.g., a dict) where keys are terms and values are + indices in the feature matrix, or an iterable over terms. If not + given, a vocabulary is determined from the input documents. + + binary : bool, default=False + If True, all non-zero term counts are set to 1. This does not mean + outputs will have only 0/1 values, only that the tf term in tf-idf + is binary. (Set `binary` to True, `use_idf` to False and + `norm` to None to get 0/1 outputs). + + dtype : dtype, default=float64 + Type of the matrix returned by fit_transform() or transform(). + + norm : {'l1', 'l2'} or None, default='l2' + Each output row will have unit norm, either: + + - 'l2': Sum of squares of vector elements is 1. The cosine + similarity between two vectors is their dot product when l2 norm has + been applied. + - 'l1': Sum of absolute values of vector elements is 1. + See :func:`~sklearn.preprocessing.normalize`. + - None: No normalization. + + use_idf : bool, default=True + Enable inverse-document-frequency reweighting. If False, idf(t) = 1. + + smooth_idf : bool, default=True + Smooth idf weights by adding one to document frequencies, as if an + extra document was seen containing every term in the collection + exactly once. Prevents zero divisions. + + sublinear_tf : bool, default=False + Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). + + Attributes + ---------- + vocabulary_ : dict + A mapping of terms to feature indices. + + fixed_vocabulary_ : bool + True if a fixed vocabulary of term to indices mapping + is provided by the user. + + idf_ : array of shape (n_features,) + The inverse document frequency (IDF) vector; only defined + if ``use_idf`` is True. + + stop_words_ : set + Terms that were ignored because they either: + + - occurred in too many documents (`max_df`) + - occurred in too few documents (`min_df`) + - were cut off by feature selection (`max_features`). + + This is only available if no vocabulary was given. + + See Also + -------- + CountVectorizer : Transforms text into a sparse matrix of n-gram counts. + + TfidfTransformer : Performs the TF-IDF transformation from a provided + matrix of counts. + + Notes + ----- + The ``stop_words_`` attribute can get large and increase the model size + when pickling. This attribute is provided only for introspection and can + be safely removed using delattr or set to None before pickling. + + Examples + -------- + >>> from sklearn.feature_extraction.text import TfidfVectorizer + >>> corpus = [ + ... 'This is the first document.', + ... 'This document is the second document.', + ... 'And this is the third one.', + ... 'Is this the first document?', + ... ] + >>> vectorizer = TfidfVectorizer() + >>> X = vectorizer.fit_transform(corpus) + >>> vectorizer.get_feature_names_out() + array(['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third', + 'this'], ...) + >>> print(X.shape) + (4, 9) + """ + + _parameter_constraints: dict = {**CountVectorizer._parameter_constraints} + _parameter_constraints.update( + { + "norm": [StrOptions({"l1", "l2"}), None], + "use_idf": ["boolean"], + "smooth_idf": ["boolean"], + "sublinear_tf": ["boolean"], + } + ) + + def __init__( + self, + *, + input="content", + encoding="utf-8", + decode_error="strict", + strip_accents=None, + lowercase=True, + preprocessor=None, + tokenizer=None, + analyzer="word", + stop_words=None, + token_pattern=r"(?u)\b\w\w+\b", + ngram_range=(1, 1), + max_df=1.0, + min_df=1, + max_features=None, + vocabulary=None, + binary=False, + dtype=np.float64, + norm="l2", + use_idf=True, + smooth_idf=True, + sublinear_tf=False, + ): + super().__init__( + input=input, + encoding=encoding, + decode_error=decode_error, + strip_accents=strip_accents, + lowercase=lowercase, + preprocessor=preprocessor, + tokenizer=tokenizer, + analyzer=analyzer, + stop_words=stop_words, + token_pattern=token_pattern, + ngram_range=ngram_range, + max_df=max_df, + min_df=min_df, + max_features=max_features, + vocabulary=vocabulary, + binary=binary, + dtype=dtype, + ) + self.norm = norm + self.use_idf = use_idf + self.smooth_idf = smooth_idf + self.sublinear_tf = sublinear_tf + + # Broadcast the TF-IDF parameters to the underlying transformer instance + # for easy grid search and repr + + @property + def idf_(self): + """Inverse document frequency vector, only defined if `use_idf=True`. + + Returns + ------- + ndarray of shape (n_features,) + """ + if not hasattr(self, "_tfidf"): + raise NotFittedError( + f"{self.__class__.__name__} is not fitted yet. Call 'fit' with " + "appropriate arguments before using this attribute." + ) + return self._tfidf.idf_ + + @idf_.setter + def idf_(self, value): + if not self.use_idf: + raise ValueError("`idf_` cannot be set when `user_idf=False`.") + if not hasattr(self, "_tfidf"): + # We should support transferring `idf_` from another `TfidfTransformer` + # and therefore, we need to create the transformer instance it does not + # exist yet. + self._tfidf = TfidfTransformer( + norm=self.norm, + use_idf=self.use_idf, + smooth_idf=self.smooth_idf, + sublinear_tf=self.sublinear_tf, + ) + self._validate_vocabulary() + if hasattr(self, "vocabulary_"): + if len(self.vocabulary_) != len(value): + raise ValueError( + "idf length = %d must be equal to vocabulary size = %d" + % (len(value), len(self.vocabulary)) + ) + self._tfidf.idf_ = value + + def _check_params(self): + if self.dtype not in FLOAT_DTYPES: + warnings.warn( + "Only {} 'dtype' should be used. {} 'dtype' will " + "be converted to np.float64.".format(FLOAT_DTYPES, self.dtype), + UserWarning, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, raw_documents, y=None): + """Learn vocabulary and idf from training set. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + y : None + This parameter is not needed to compute tfidf. + + Returns + ------- + self : object + Fitted vectorizer. + """ + self._check_params() + self._warn_for_unused_params() + self._tfidf = TfidfTransformer( + norm=self.norm, + use_idf=self.use_idf, + smooth_idf=self.smooth_idf, + sublinear_tf=self.sublinear_tf, + ) + X = super().fit_transform(raw_documents) + self._tfidf.fit(X) + return self + + def fit_transform(self, raw_documents, y=None): + """Learn vocabulary and idf, return document-term matrix. + + This is equivalent to fit followed by transform, but more efficiently + implemented. + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + y : None + This parameter is ignored. + + Returns + ------- + X : sparse matrix of (n_samples, n_features) + Tf-idf-weighted document-term matrix. + """ + self._check_params() + self._tfidf = TfidfTransformer( + norm=self.norm, + use_idf=self.use_idf, + smooth_idf=self.smooth_idf, + sublinear_tf=self.sublinear_tf, + ) + X = super().fit_transform(raw_documents) + self._tfidf.fit(X) + # X is already a transformed view of raw_documents so + # we set copy to False + return self._tfidf.transform(X, copy=False) + + def transform(self, raw_documents): + """Transform documents to document-term matrix. + + Uses the vocabulary and document frequencies (df) learned by fit (or + fit_transform). + + Parameters + ---------- + raw_documents : iterable + An iterable which generates either str, unicode or file objects. + + Returns + ------- + X : sparse matrix of (n_samples, n_features) + Tf-idf-weighted document-term matrix. + """ + check_is_fitted(self, msg="The TF-IDF vectorizer is not fitted") + + X = super().transform(raw_documents) + return self._tfidf.transform(X, copy=False) + + def _more_tags(self): + return {"X_types": ["string"], "_skip_test": True} diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/__init__.py b/venv/lib/python3.10/site-packages/sklearn/svm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0d64ce24cdd634bc2afcd9357388c7404eb7edbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/svm/__init__.py @@ -0,0 +1,25 @@ +""" +The :mod:`sklearn.svm` module includes Support Vector Machine algorithms. +""" + +# See http://scikit-learn.sourceforge.net/modules/svm.html for complete +# documentation. + +# Author: Fabian Pedregosa with help from +# the scikit-learn community. LibSVM and LibLinear are copyright +# of their respective owners. +# License: BSD 3 clause (C) INRIA 2010 + +from ._bounds import l1_min_c +from ._classes import SVC, SVR, LinearSVC, LinearSVR, NuSVC, NuSVR, OneClassSVM + +__all__ = [ + "LinearSVC", + "LinearSVR", + "NuSVC", + "NuSVR", + "OneClassSVM", + "SVC", + "SVR", + "l1_min_c", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fde9bb7b07a811fdc9c8de632656d433c46096a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a34918ae5d7daa2f1f170ca2f8c6872eea4b8a7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_bounds.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_bounds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba91565eb3c8ad842ff9cb4f44ec71db56b2166f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_bounds.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_classes.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_classes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b06d421530ae1e3247c1b47f1fb31581cf7a510 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/__pycache__/_classes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/_base.py b/venv/lib/python3.10/site-packages/sklearn/svm/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..6d154c99dc6694eb38721cd411a7f68c3a074f66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/svm/_base.py @@ -0,0 +1,1249 @@ +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, ClassifierMixin, _fit_context +from ..exceptions import ConvergenceWarning, NotFittedError +from ..preprocessing import LabelEncoder +from ..utils import check_array, check_random_state, column_or_1d, compute_class_weight +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import safe_sparse_dot +from ..utils.metaestimators import available_if +from ..utils.multiclass import _ovr_decision_function, check_classification_targets +from ..utils.validation import ( + _check_large_sparse, + _check_sample_weight, + _num_samples, + check_consistent_length, + check_is_fitted, +) +from . import _liblinear as liblinear # type: ignore + +# mypy error: error: Module 'sklearn.svm' has no attribute '_libsvm' +# (and same for other imports) +from . import _libsvm as libsvm # type: ignore +from . import _libsvm_sparse as libsvm_sparse # type: ignore + +LIBSVM_IMPL = ["c_svc", "nu_svc", "one_class", "epsilon_svr", "nu_svr"] + + +def _one_vs_one_coef(dual_coef, n_support, support_vectors): + """Generate primal coefficients from dual coefficients + for the one-vs-one multi class LibSVM in the case + of a linear kernel.""" + + # get 1vs1 weights for all n*(n-1) classifiers. + # this is somewhat messy. + # shape of dual_coef_ is nSV * (n_classes -1) + # see docs for details + n_class = dual_coef.shape[0] + 1 + + # XXX we could do preallocation of coef but + # would have to take care in the sparse case + coef = [] + sv_locs = np.cumsum(np.hstack([[0], n_support])) + for class1 in range(n_class): + # SVs for class1: + sv1 = support_vectors[sv_locs[class1] : sv_locs[class1 + 1], :] + for class2 in range(class1 + 1, n_class): + # SVs for class1: + sv2 = support_vectors[sv_locs[class2] : sv_locs[class2 + 1], :] + + # dual coef for class1 SVs: + alpha1 = dual_coef[class2 - 1, sv_locs[class1] : sv_locs[class1 + 1]] + # dual coef for class2 SVs: + alpha2 = dual_coef[class1, sv_locs[class2] : sv_locs[class2 + 1]] + # build weight for class1 vs class2 + + coef.append(safe_sparse_dot(alpha1, sv1) + safe_sparse_dot(alpha2, sv2)) + return coef + + +class BaseLibSVM(BaseEstimator, metaclass=ABCMeta): + """Base class for estimators that use libsvm as backing library. + + This implements support vector machine classification and regression. + + Parameter documentation is in the derived `SVC` class. + """ + + _parameter_constraints: dict = { + "kernel": [ + StrOptions({"linear", "poly", "rbf", "sigmoid", "precomputed"}), + callable, + ], + "degree": [Interval(Integral, 0, None, closed="left")], + "gamma": [ + StrOptions({"scale", "auto"}), + Interval(Real, 0.0, None, closed="left"), + ], + "coef0": [Interval(Real, None, None, closed="neither")], + "tol": [Interval(Real, 0.0, None, closed="neither")], + "C": [Interval(Real, 0.0, None, closed="neither")], + "nu": [Interval(Real, 0.0, 1.0, closed="right")], + "epsilon": [Interval(Real, 0.0, None, closed="left")], + "shrinking": ["boolean"], + "probability": ["boolean"], + "cache_size": [Interval(Real, 0, None, closed="neither")], + "class_weight": [StrOptions({"balanced"}), dict, None], + "verbose": ["verbose"], + "max_iter": [Interval(Integral, -1, None, closed="left")], + "random_state": ["random_state"], + } + + # The order of these must match the integer values in LibSVM. + # XXX These are actually the same in the dense case. Need to factor + # this out. + _sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"] + + @abstractmethod + def __init__( + self, + kernel, + degree, + gamma, + coef0, + tol, + C, + nu, + epsilon, + shrinking, + probability, + cache_size, + class_weight, + verbose, + max_iter, + random_state, + ): + if self._impl not in LIBSVM_IMPL: + raise ValueError( + "impl should be one of %s, %s was given" % (LIBSVM_IMPL, self._impl) + ) + + self.kernel = kernel + self.degree = degree + self.gamma = gamma + self.coef0 = coef0 + self.tol = tol + self.C = C + self.nu = nu + self.epsilon = epsilon + self.shrinking = shrinking + self.probability = probability + self.cache_size = cache_size + self.class_weight = class_weight + self.verbose = verbose + self.max_iter = max_iter + self.random_state = random_state + + def _more_tags(self): + # Used by cross_val_score. + return {"pairwise": self.kernel == "precomputed"} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the SVM model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) \ + or (n_samples, n_samples) + Training vectors, where `n_samples` is the number of samples + and `n_features` is the number of features. + For kernel="precomputed", the expected shape of X is + (n_samples, n_samples). + + y : array-like of shape (n_samples,) + Target values (class labels in classification, real numbers in + regression). + + sample_weight : array-like of shape (n_samples,), default=None + Per-sample weights. Rescale C per sample. Higher weights + force the classifier to put more emphasis on these points. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + If X and y are not C-ordered and contiguous arrays of np.float64 and + X is not a scipy.sparse.csr_matrix, X and/or y may be copied. + + If X is a dense array, then the other methods will not support sparse + matrices as input. + """ + rnd = check_random_state(self.random_state) + + sparse = sp.issparse(X) + if sparse and self.kernel == "precomputed": + raise TypeError("Sparse precomputed kernels are not supported.") + self._sparse = sparse and not callable(self.kernel) + + if callable(self.kernel): + check_consistent_length(X, y) + else: + X, y = self._validate_data( + X, + y, + dtype=np.float64, + order="C", + accept_sparse="csr", + accept_large_sparse=False, + ) + + y = self._validate_targets(y) + + sample_weight = np.asarray( + [] if sample_weight is None else sample_weight, dtype=np.float64 + ) + solver_type = LIBSVM_IMPL.index(self._impl) + + # input validation + n_samples = _num_samples(X) + if solver_type != 2 and n_samples != y.shape[0]: + raise ValueError( + "X and y have incompatible shapes.\n" + + "X has %s samples, but y has %s." % (n_samples, y.shape[0]) + ) + + if self.kernel == "precomputed" and n_samples != X.shape[1]: + raise ValueError( + "Precomputed matrix must be a square matrix." + " Input is a {}x{} matrix.".format(X.shape[0], X.shape[1]) + ) + + if sample_weight.shape[0] > 0 and sample_weight.shape[0] != n_samples: + raise ValueError( + "sample_weight and X have incompatible shapes: " + "%r vs %r\n" + "Note: Sparse matrices cannot be indexed w/" + "boolean masks (use `indices=True` in CV)." + % (sample_weight.shape, X.shape) + ) + + kernel = "precomputed" if callable(self.kernel) else self.kernel + + if kernel == "precomputed": + # unused but needs to be a float for cython code that ignores + # it anyway + self._gamma = 0.0 + elif isinstance(self.gamma, str): + if self.gamma == "scale": + # var = E[X^2] - E[X]^2 if sparse + X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var() + self._gamma = 1.0 / (X.shape[1] * X_var) if X_var != 0 else 1.0 + elif self.gamma == "auto": + self._gamma = 1.0 / X.shape[1] + elif isinstance(self.gamma, Real): + self._gamma = self.gamma + + fit = self._sparse_fit if self._sparse else self._dense_fit + if self.verbose: + print("[LibSVM]", end="") + + seed = rnd.randint(np.iinfo("i").max) + fit(X, y, sample_weight, solver_type, kernel, random_seed=seed) + # see comment on the other call to np.iinfo in this file + + self.shape_fit_ = X.shape if hasattr(X, "shape") else (n_samples,) + + # In binary case, we need to flip the sign of coef, intercept and + # decision function. Use self._intercept_ and self._dual_coef_ + # internally. + self._intercept_ = self.intercept_.copy() + self._dual_coef_ = self.dual_coef_ + if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2: + self.intercept_ *= -1 + self.dual_coef_ = -self.dual_coef_ + + dual_coef = self._dual_coef_.data if self._sparse else self._dual_coef_ + intercept_finiteness = np.isfinite(self._intercept_).all() + dual_coef_finiteness = np.isfinite(dual_coef).all() + if not (intercept_finiteness and dual_coef_finiteness): + raise ValueError( + "The dual coefficients or intercepts are not finite." + " The input data may contain large values and need to be" + " preprocessed." + ) + + # Since, in the case of SVC and NuSVC, the number of models optimized by + # libSVM could be greater than one (depending on the input), `n_iter_` + # stores an ndarray. + # For the other sub-classes (SVR, NuSVR, and OneClassSVM), the number of + # models optimized by libSVM is always one, so `n_iter_` stores an + # integer. + if self._impl in ["c_svc", "nu_svc"]: + self.n_iter_ = self._num_iter + else: + self.n_iter_ = self._num_iter.item() + + return self + + def _validate_targets(self, y): + """Validation of y and class_weight. + + Default implementation for SVR and one-class; overridden in BaseSVC. + """ + return column_or_1d(y, warn=True).astype(np.float64, copy=False) + + def _warn_from_fit_status(self): + assert self.fit_status_ in (0, 1) + if self.fit_status_ == 1: + warnings.warn( + "Solver terminated early (max_iter=%i)." + " Consider pre-processing your data with" + " StandardScaler or MinMaxScaler." + % self.max_iter, + ConvergenceWarning, + ) + + def _dense_fit(self, X, y, sample_weight, solver_type, kernel, random_seed): + if callable(self.kernel): + # you must store a reference to X to compute the kernel in predict + # TODO: add keyword copy to copy on demand + self.__Xfit = X + X = self._compute_kernel(X) + + if X.shape[0] != X.shape[1]: + raise ValueError("X.shape[0] should be equal to X.shape[1]") + + libsvm.set_verbosity_wrap(self.verbose) + + # we don't pass **self.get_params() to allow subclasses to + # add other parameters to __init__ + ( + self.support_, + self.support_vectors_, + self._n_support, + self.dual_coef_, + self.intercept_, + self._probA, + self._probB, + self.fit_status_, + self._num_iter, + ) = libsvm.fit( + X, + y, + svm_type=solver_type, + sample_weight=sample_weight, + class_weight=getattr(self, "class_weight_", np.empty(0)), + kernel=kernel, + C=self.C, + nu=self.nu, + probability=self.probability, + degree=self.degree, + shrinking=self.shrinking, + tol=self.tol, + cache_size=self.cache_size, + coef0=self.coef0, + gamma=self._gamma, + epsilon=self.epsilon, + max_iter=self.max_iter, + random_seed=random_seed, + ) + + self._warn_from_fit_status() + + def _sparse_fit(self, X, y, sample_weight, solver_type, kernel, random_seed): + X.data = np.asarray(X.data, dtype=np.float64, order="C") + X.sort_indices() + + kernel_type = self._sparse_kernels.index(kernel) + + libsvm_sparse.set_verbosity_wrap(self.verbose) + + ( + self.support_, + self.support_vectors_, + dual_coef_data, + self.intercept_, + self._n_support, + self._probA, + self._probB, + self.fit_status_, + self._num_iter, + ) = libsvm_sparse.libsvm_sparse_train( + X.shape[1], + X.data, + X.indices, + X.indptr, + y, + solver_type, + kernel_type, + self.degree, + self._gamma, + self.coef0, + self.tol, + self.C, + getattr(self, "class_weight_", np.empty(0)), + sample_weight, + self.nu, + self.cache_size, + self.epsilon, + int(self.shrinking), + int(self.probability), + self.max_iter, + random_seed, + ) + + self._warn_from_fit_status() + + if hasattr(self, "classes_"): + n_class = len(self.classes_) - 1 + else: # regression + n_class = 1 + n_SV = self.support_vectors_.shape[0] + + dual_coef_indices = np.tile(np.arange(n_SV), n_class) + if not n_SV: + self.dual_coef_ = sp.csr_matrix([]) + else: + dual_coef_indptr = np.arange( + 0, dual_coef_indices.size + 1, dual_coef_indices.size / n_class + ) + self.dual_coef_ = sp.csr_matrix( + (dual_coef_data, dual_coef_indices, dual_coef_indptr), (n_class, n_SV) + ) + + def predict(self, X): + """Perform regression on samples in X. + + For an one-class model, +1 (inlier) or -1 (outlier) is returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + For kernel="precomputed", the expected shape of X is + (n_samples_test, n_samples_train). + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + The predicted values. + """ + X = self._validate_for_predict(X) + predict = self._sparse_predict if self._sparse else self._dense_predict + return predict(X) + + def _dense_predict(self, X): + X = self._compute_kernel(X) + if X.ndim == 1: + X = check_array(X, order="C", accept_large_sparse=False) + + kernel = self.kernel + if callable(self.kernel): + kernel = "precomputed" + if X.shape[1] != self.shape_fit_[0]: + raise ValueError( + "X.shape[1] = %d should be equal to %d, " + "the number of samples at training time" + % (X.shape[1], self.shape_fit_[0]) + ) + + svm_type = LIBSVM_IMPL.index(self._impl) + + return libsvm.predict( + X, + self.support_, + self.support_vectors_, + self._n_support, + self._dual_coef_, + self._intercept_, + self._probA, + self._probB, + svm_type=svm_type, + kernel=kernel, + degree=self.degree, + coef0=self.coef0, + gamma=self._gamma, + cache_size=self.cache_size, + ) + + def _sparse_predict(self, X): + # Precondition: X is a csr_matrix of dtype np.float64. + kernel = self.kernel + if callable(kernel): + kernel = "precomputed" + + kernel_type = self._sparse_kernels.index(kernel) + + C = 0.0 # C is not useful here + + return libsvm_sparse.libsvm_sparse_predict( + X.data, + X.indices, + X.indptr, + self.support_vectors_.data, + self.support_vectors_.indices, + self.support_vectors_.indptr, + self._dual_coef_.data, + self._intercept_, + LIBSVM_IMPL.index(self._impl), + kernel_type, + self.degree, + self._gamma, + self.coef0, + self.tol, + C, + getattr(self, "class_weight_", np.empty(0)), + self.nu, + self.epsilon, + self.shrinking, + self.probability, + self._n_support, + self._probA, + self._probB, + ) + + def _compute_kernel(self, X): + """Return the data transformed by a callable kernel""" + if callable(self.kernel): + # in the case of precomputed kernel given as a function, we + # have to compute explicitly the kernel matrix + kernel = self.kernel(X, self.__Xfit) + if sp.issparse(kernel): + kernel = kernel.toarray() + X = np.asarray(kernel, dtype=np.float64, order="C") + return X + + def _decision_function(self, X): + """Evaluates the decision function for the samples in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + X : array-like of shape (n_samples, n_class * (n_class-1) / 2) + Returns the decision function of the sample for each class + in the model. + """ + # NOTE: _validate_for_predict contains check for is_fitted + # hence must be placed before any other attributes are used. + X = self._validate_for_predict(X) + X = self._compute_kernel(X) + + if self._sparse: + dec_func = self._sparse_decision_function(X) + else: + dec_func = self._dense_decision_function(X) + + # In binary case, we need to flip the sign of coef, intercept and + # decision function. + if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2: + return -dec_func.ravel() + + return dec_func + + def _dense_decision_function(self, X): + X = check_array(X, dtype=np.float64, order="C", accept_large_sparse=False) + + kernel = self.kernel + if callable(kernel): + kernel = "precomputed" + + return libsvm.decision_function( + X, + self.support_, + self.support_vectors_, + self._n_support, + self._dual_coef_, + self._intercept_, + self._probA, + self._probB, + svm_type=LIBSVM_IMPL.index(self._impl), + kernel=kernel, + degree=self.degree, + cache_size=self.cache_size, + coef0=self.coef0, + gamma=self._gamma, + ) + + def _sparse_decision_function(self, X): + X.data = np.asarray(X.data, dtype=np.float64, order="C") + + kernel = self.kernel + if hasattr(kernel, "__call__"): + kernel = "precomputed" + + kernel_type = self._sparse_kernels.index(kernel) + + return libsvm_sparse.libsvm_sparse_decision_function( + X.data, + X.indices, + X.indptr, + self.support_vectors_.data, + self.support_vectors_.indices, + self.support_vectors_.indptr, + self._dual_coef_.data, + self._intercept_, + LIBSVM_IMPL.index(self._impl), + kernel_type, + self.degree, + self._gamma, + self.coef0, + self.tol, + self.C, + getattr(self, "class_weight_", np.empty(0)), + self.nu, + self.epsilon, + self.shrinking, + self.probability, + self._n_support, + self._probA, + self._probB, + ) + + def _validate_for_predict(self, X): + check_is_fitted(self) + + if not callable(self.kernel): + X = self._validate_data( + X, + accept_sparse="csr", + dtype=np.float64, + order="C", + accept_large_sparse=False, + reset=False, + ) + + if self._sparse and not sp.issparse(X): + X = sp.csr_matrix(X) + if self._sparse: + X.sort_indices() + + if sp.issparse(X) and not self._sparse and not callable(self.kernel): + raise ValueError( + "cannot use sparse input in %r trained on dense data" + % type(self).__name__ + ) + + if self.kernel == "precomputed": + if X.shape[1] != self.shape_fit_[0]: + raise ValueError( + "X.shape[1] = %d should be equal to %d, " + "the number of samples at training time" + % (X.shape[1], self.shape_fit_[0]) + ) + # Fixes https://nvd.nist.gov/vuln/detail/CVE-2020-28975 + # Check that _n_support is consistent with support_vectors + sv = self.support_vectors_ + if not self._sparse and sv.size > 0 and self.n_support_.sum() != sv.shape[0]: + raise ValueError( + f"The internal representation of {self.__class__.__name__} was altered" + ) + return X + + @property + def coef_(self): + """Weights assigned to the features when `kernel="linear"`. + + Returns + ------- + ndarray of shape (n_features, n_classes) + """ + if self.kernel != "linear": + raise AttributeError("coef_ is only available when using a linear kernel") + + coef = self._get_coef() + + # coef_ being a read-only property, it's better to mark the value as + # immutable to avoid hiding potential bugs for the unsuspecting user. + if sp.issparse(coef): + # sparse matrix do not have global flags + coef.data.flags.writeable = False + else: + # regular dense array + coef.flags.writeable = False + return coef + + def _get_coef(self): + return safe_sparse_dot(self._dual_coef_, self.support_vectors_) + + @property + def n_support_(self): + """Number of support vectors for each class.""" + try: + check_is_fitted(self) + except NotFittedError: + raise AttributeError + + svm_type = LIBSVM_IMPL.index(self._impl) + if svm_type in (0, 1): + return self._n_support + else: + # SVR and OneClass + # _n_support has size 2, we make it size 1 + return np.array([self._n_support[0]]) + + +class BaseSVC(ClassifierMixin, BaseLibSVM, metaclass=ABCMeta): + """ABC for LibSVM-based classifiers.""" + + _parameter_constraints: dict = { + **BaseLibSVM._parameter_constraints, + "decision_function_shape": [StrOptions({"ovr", "ovo"})], + "break_ties": ["boolean"], + } + for unused_param in ["epsilon", "nu"]: + _parameter_constraints.pop(unused_param) + + @abstractmethod + def __init__( + self, + kernel, + degree, + gamma, + coef0, + tol, + C, + nu, + shrinking, + probability, + cache_size, + class_weight, + verbose, + max_iter, + decision_function_shape, + random_state, + break_ties, + ): + self.decision_function_shape = decision_function_shape + self.break_ties = break_ties + super().__init__( + kernel=kernel, + degree=degree, + gamma=gamma, + coef0=coef0, + tol=tol, + C=C, + nu=nu, + epsilon=0.0, + shrinking=shrinking, + probability=probability, + cache_size=cache_size, + class_weight=class_weight, + verbose=verbose, + max_iter=max_iter, + random_state=random_state, + ) + + def _validate_targets(self, y): + y_ = column_or_1d(y, warn=True) + check_classification_targets(y) + cls, y = np.unique(y_, return_inverse=True) + self.class_weight_ = compute_class_weight(self.class_weight, classes=cls, y=y_) + if len(cls) < 2: + raise ValueError( + "The number of classes has to be greater than one; got %d class" + % len(cls) + ) + + self.classes_ = cls + + return np.asarray(y, dtype=np.float64, order="C") + + def decision_function(self, X): + """Evaluate the decision function for the samples in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2) + Returns the decision function of the sample for each class + in the model. + If decision_function_shape='ovr', the shape is (n_samples, + n_classes). + + Notes + ----- + If decision_function_shape='ovo', the function values are proportional + to the distance of the samples X to the separating hyperplane. If the + exact distances are required, divide the function values by the norm of + the weight vector (``coef_``). See also `this question + `_ for further details. + If decision_function_shape='ovr', the decision function is a monotonic + transformation of ovo decision function. + """ + dec = self._decision_function(X) + if self.decision_function_shape == "ovr" and len(self.classes_) > 2: + return _ovr_decision_function(dec < 0, -dec, len(self.classes_)) + return dec + + def predict(self, X): + """Perform classification on samples in X. + + For an one-class model, +1 or -1 is returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples_test, n_samples_train) + For kernel="precomputed", the expected shape of X is + (n_samples_test, n_samples_train). + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Class labels for samples in X. + """ + check_is_fitted(self) + if self.break_ties and self.decision_function_shape == "ovo": + raise ValueError( + "break_ties must be False when decision_function_shape is 'ovo'" + ) + + if ( + self.break_ties + and self.decision_function_shape == "ovr" + and len(self.classes_) > 2 + ): + y = np.argmax(self.decision_function(X), axis=1) + else: + y = super().predict(X) + return self.classes_.take(np.asarray(y, dtype=np.intp)) + + # Hacky way of getting predict_proba to raise an AttributeError when + # probability=False using properties. Do not use this in new code; when + # probabilities are not available depending on a setting, introduce two + # estimators. + def _check_proba(self): + if not self.probability: + raise AttributeError( + "predict_proba is not available when probability=False" + ) + if self._impl not in ("c_svc", "nu_svc"): + raise AttributeError("predict_proba only implemented for SVC and NuSVC") + return True + + @available_if(_check_proba) + def predict_proba(self, X): + """Compute probabilities of possible outcomes for samples in X. + + The model needs to have probability information computed at training + time: fit with attribute `probability` set to True. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + For kernel="precomputed", the expected shape of X is + (n_samples_test, n_samples_train). + + Returns + ------- + T : ndarray of shape (n_samples, n_classes) + Returns the probability of the sample for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + + Notes + ----- + The probability model is created using cross validation, so + the results can be slightly different than those obtained by + predict. Also, it will produce meaningless results on very small + datasets. + """ + X = self._validate_for_predict(X) + if self.probA_.size == 0 or self.probB_.size == 0: + raise NotFittedError( + "predict_proba is not available when fitted with probability=False" + ) + pred_proba = ( + self._sparse_predict_proba if self._sparse else self._dense_predict_proba + ) + return pred_proba(X) + + @available_if(_check_proba) + def predict_log_proba(self, X): + """Compute log probabilities of possible outcomes for samples in X. + + The model need to have probability information computed at training + time: fit with attribute `probability` set to True. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or \ + (n_samples_test, n_samples_train) + For kernel="precomputed", the expected shape of X is + (n_samples_test, n_samples_train). + + Returns + ------- + T : ndarray of shape (n_samples, n_classes) + Returns the log-probabilities of the sample for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + + Notes + ----- + The probability model is created using cross validation, so + the results can be slightly different than those obtained by + predict. Also, it will produce meaningless results on very small + datasets. + """ + return np.log(self.predict_proba(X)) + + def _dense_predict_proba(self, X): + X = self._compute_kernel(X) + + kernel = self.kernel + if callable(kernel): + kernel = "precomputed" + + svm_type = LIBSVM_IMPL.index(self._impl) + pprob = libsvm.predict_proba( + X, + self.support_, + self.support_vectors_, + self._n_support, + self._dual_coef_, + self._intercept_, + self._probA, + self._probB, + svm_type=svm_type, + kernel=kernel, + degree=self.degree, + cache_size=self.cache_size, + coef0=self.coef0, + gamma=self._gamma, + ) + + return pprob + + def _sparse_predict_proba(self, X): + X.data = np.asarray(X.data, dtype=np.float64, order="C") + + kernel = self.kernel + if callable(kernel): + kernel = "precomputed" + + kernel_type = self._sparse_kernels.index(kernel) + + return libsvm_sparse.libsvm_sparse_predict_proba( + X.data, + X.indices, + X.indptr, + self.support_vectors_.data, + self.support_vectors_.indices, + self.support_vectors_.indptr, + self._dual_coef_.data, + self._intercept_, + LIBSVM_IMPL.index(self._impl), + kernel_type, + self.degree, + self._gamma, + self.coef0, + self.tol, + self.C, + getattr(self, "class_weight_", np.empty(0)), + self.nu, + self.epsilon, + self.shrinking, + self.probability, + self._n_support, + self._probA, + self._probB, + ) + + def _get_coef(self): + if self.dual_coef_.shape[0] == 1: + # binary classifier + coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_) + else: + # 1vs1 classifier + coef = _one_vs_one_coef( + self.dual_coef_, self._n_support, self.support_vectors_ + ) + if sp.issparse(coef[0]): + coef = sp.vstack(coef).tocsr() + else: + coef = np.vstack(coef) + + return coef + + @property + def probA_(self): + """Parameter learned in Platt scaling when `probability=True`. + + Returns + ------- + ndarray of shape (n_classes * (n_classes - 1) / 2) + """ + return self._probA + + @property + def probB_(self): + """Parameter learned in Platt scaling when `probability=True`. + + Returns + ------- + ndarray of shape (n_classes * (n_classes - 1) / 2) + """ + return self._probB + + +def _get_liblinear_solver_type(multi_class, penalty, loss, dual): + """Find the liblinear magic number for the solver. + + This number depends on the values of the following attributes: + - multi_class + - penalty + - loss + - dual + + The same number is also internally used by LibLinear to determine + which solver to use. + """ + # nested dicts containing level 1: available loss functions, + # level2: available penalties for the given loss function, + # level3: whether the dual solver is available for the specified + # combination of loss function and penalty + _solver_type_dict = { + "logistic_regression": {"l1": {False: 6}, "l2": {False: 0, True: 7}}, + "hinge": {"l2": {True: 3}}, + "squared_hinge": {"l1": {False: 5}, "l2": {False: 2, True: 1}}, + "epsilon_insensitive": {"l2": {True: 13}}, + "squared_epsilon_insensitive": {"l2": {False: 11, True: 12}}, + "crammer_singer": 4, + } + + if multi_class == "crammer_singer": + return _solver_type_dict[multi_class] + elif multi_class != "ovr": + raise ValueError( + "`multi_class` must be one of `ovr`, `crammer_singer`, got %r" % multi_class + ) + + _solver_pen = _solver_type_dict.get(loss, None) + if _solver_pen is None: + error_string = "loss='%s' is not supported" % loss + else: + _solver_dual = _solver_pen.get(penalty, None) + if _solver_dual is None: + error_string = ( + "The combination of penalty='%s' and loss='%s' is not supported" + % (penalty, loss) + ) + else: + solver_num = _solver_dual.get(dual, None) + if solver_num is None: + error_string = ( + "The combination of penalty='%s' and " + "loss='%s' are not supported when dual=%s" % (penalty, loss, dual) + ) + else: + return solver_num + raise ValueError( + "Unsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r" + % (error_string, penalty, loss, dual) + ) + + +def _fit_liblinear( + X, + y, + C, + fit_intercept, + intercept_scaling, + class_weight, + penalty, + dual, + verbose, + max_iter, + tol, + random_state=None, + multi_class="ovr", + loss="logistic_regression", + epsilon=0.1, + sample_weight=None, +): + """Used by Logistic Regression (and CV) and LinearSVC/LinearSVR. + + Preprocessing is done in this function before supplying it to liblinear. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X + + C : float + Inverse of cross-validation parameter. The lower the C, the higher + the penalization. + + fit_intercept : bool + Whether or not to fit an intercept. If set to True, the feature vector + is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where + 1 corresponds to the intercept. If set to False, no intercept will be + used in calculations (i.e. data is expected to be already centered). + + intercept_scaling : float + Liblinear internally penalizes the intercept, treating it like any + other term in the feature vector. To reduce the impact of the + regularization on the intercept, the `intercept_scaling` parameter can + be set to a value greater than 1; the higher the value of + `intercept_scaling`, the lower the impact of regularization on it. + Then, the weights become `[w_x_1, ..., w_x_n, + w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent + the feature weights and the intercept weight is scaled by + `intercept_scaling`. This scaling allows the intercept term to have a + different regularization behavior compared to the other features. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. For + multi-output problems, a list of dicts can be provided in the same + order as the columns of y. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + penalty : {'l1', 'l2'} + The norm of the penalty used in regularization. + + dual : bool + Dual or primal formulation, + + verbose : int + Set verbose to any positive number for verbosity. + + max_iter : int + Number of iterations. + + tol : float + Stopping condition. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generation for shuffling the data. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + multi_class : {'ovr', 'crammer_singer'}, default='ovr' + `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer` + optimizes a joint objective over all classes. + While `crammer_singer` is interesting from an theoretical perspective + as it is consistent it is seldom used in practice and rarely leads to + better accuracy and is more expensive to compute. + If `crammer_singer` is chosen, the options loss, penalty and dual will + be ignored. + + loss : {'logistic_regression', 'hinge', 'squared_hinge', \ + 'epsilon_insensitive', 'squared_epsilon_insensitive}, \ + default='logistic_regression' + The loss function used to fit the model. + + epsilon : float, default=0.1 + Epsilon parameter in the epsilon-insensitive loss function. Note + that the value of this parameter depends on the scale of the target + variable y. If unsure, set epsilon=0. + + sample_weight : array-like of shape (n_samples,), default=None + Weights assigned to each sample. + + Returns + ------- + coef_ : ndarray of shape (n_features, n_features + 1) + The coefficient vector got by minimizing the objective function. + + intercept_ : float + The intercept term added to the vector. + + n_iter_ : array of int + Number of iterations run across for each class. + """ + if loss not in ["epsilon_insensitive", "squared_epsilon_insensitive"]: + enc = LabelEncoder() + y_ind = enc.fit_transform(y) + classes_ = enc.classes_ + if len(classes_) < 2: + raise ValueError( + "This solver needs samples of at least 2 classes" + " in the data, but the data contains only one" + " class: %r" + % classes_[0] + ) + + class_weight_ = compute_class_weight(class_weight, classes=classes_, y=y) + else: + class_weight_ = np.empty(0, dtype=np.float64) + y_ind = y + liblinear.set_verbosity_wrap(verbose) + rnd = check_random_state(random_state) + if verbose: + print("[LibLinear]", end="") + + # LinearSVC breaks when intercept_scaling is <= 0 + bias = -1.0 + if fit_intercept: + if intercept_scaling <= 0: + raise ValueError( + "Intercept scaling is %r but needs to be greater " + "than 0. To disable fitting an intercept," + " set fit_intercept=False." % intercept_scaling + ) + else: + bias = intercept_scaling + + libsvm.set_verbosity_wrap(verbose) + libsvm_sparse.set_verbosity_wrap(verbose) + liblinear.set_verbosity_wrap(verbose) + + # Liblinear doesn't support 64bit sparse matrix indices yet + if sp.issparse(X): + _check_large_sparse(X) + + # LibLinear wants targets as doubles, even for classification + y_ind = np.asarray(y_ind, dtype=np.float64).ravel() + y_ind = np.require(y_ind, requirements="W") + + sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64) + + solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual) + raw_coef_, n_iter_ = liblinear.train_wrap( + X, + y_ind, + sp.issparse(X), + solver_type, + tol, + bias, + C, + class_weight_, + max_iter, + rnd.randint(np.iinfo("i").max), + epsilon, + sample_weight, + ) + # Regarding rnd.randint(..) in the above signature: + # seed for srand in range [0..INT_MAX); due to limitations in Numpy + # on 32-bit platforms, we can't get to the UINT_MAX limit that + # srand supports + n_iter_max = max(n_iter_) + if n_iter_max >= max_iter: + warnings.warn( + "Liblinear failed to converge, increase the number of iterations.", + ConvergenceWarning, + ) + + if fit_intercept: + coef_ = raw_coef_[:, :-1] + intercept_ = intercept_scaling * raw_coef_[:, -1] + else: + coef_ = raw_coef_ + intercept_ = 0.0 + + return coef_, intercept_, n_iter_ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/_bounds.py b/venv/lib/python3.10/site-packages/sklearn/svm/_bounds.py new file mode 100644 index 0000000000000000000000000000000000000000..d14297230af4cd8a73428825c7dc8f93ccc71d72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/svm/_bounds.py @@ -0,0 +1,94 @@ +"""Determination of parameter bounds""" +# Author: Paolo Losi +# License: BSD 3 clause + +from numbers import Real + +import numpy as np + +from ..preprocessing import LabelBinarizer +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import safe_sparse_dot +from ..utils.validation import check_array, check_consistent_length + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "loss": [StrOptions({"squared_hinge", "log"})], + "fit_intercept": ["boolean"], + "intercept_scaling": [Interval(Real, 0, None, closed="neither")], + }, + prefer_skip_nested_validation=True, +) +def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0): + """Return the lowest bound for C. + + The lower bound for C is computed such that for C in (l1_min_C, infinity) + the model is guaranteed not to be empty. This applies to l1 penalized + classifiers, such as LinearSVC with penalty='l1' and + linear_model.LogisticRegression with penalty='l1'. + + This value is valid if class_weight parameter in fit() is not set. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X. + + loss : {'squared_hinge', 'log'}, default='squared_hinge' + Specifies the loss function. + With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss). + With 'log' it is the loss of logistic regression models. + + fit_intercept : bool, default=True + Specifies if the intercept should be fitted by the model. + It must match the fit() method parameter. + + intercept_scaling : float, default=1.0 + When fit_intercept is True, instance vector x becomes + [x, intercept_scaling], + i.e. a "synthetic" feature with constant value equals to + intercept_scaling is appended to the instance vector. + It must match the fit() method parameter. + + Returns + ------- + l1_min_c : float + Minimum value for C. + + Examples + -------- + >>> from sklearn.svm import l1_min_c + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_samples=100, n_features=20, random_state=42) + >>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}") + 0.0044 + """ + + X = check_array(X, accept_sparse="csc") + check_consistent_length(X, y) + + Y = LabelBinarizer(neg_label=-1).fit_transform(y).T + # maximum absolute value over classes and features + den = np.max(np.abs(safe_sparse_dot(Y, X))) + if fit_intercept: + bias = np.full( + (np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype + ) + den = max(den, abs(np.dot(Y, bias)).max()) + + if den == 0.0: + raise ValueError( + "Ill-posed l1_min_c calculation: l1 will always " + "select zero coefficients for this data" + ) + if loss == "squared_hinge": + return 0.5 / den + else: # loss == 'log': + return 2.0 / den diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/_classes.py b/venv/lib/python3.10/site-packages/sklearn/svm/_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..00854f47d9a8425773dfa8dbe6f0981666f772ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/svm/_classes.py @@ -0,0 +1,1832 @@ +import warnings +from numbers import Integral, Real + +import numpy as np + +from ..base import BaseEstimator, OutlierMixin, RegressorMixin, _fit_context +from ..linear_model._base import LinearClassifierMixin, LinearModel, SparseCoefMixin +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.multiclass import check_classification_targets +from ..utils.validation import _num_samples +from ._base import BaseLibSVM, BaseSVC, _fit_liblinear, _get_liblinear_solver_type + + +def _validate_dual_parameter(dual, loss, penalty, multi_class, X): + """Helper function to assign the value of dual parameter.""" + if dual == "auto": + if X.shape[0] < X.shape[1]: + try: + _get_liblinear_solver_type(multi_class, penalty, loss, True) + return True + except ValueError: # dual not supported for the combination + return False + else: + try: + _get_liblinear_solver_type(multi_class, penalty, loss, False) + return False + except ValueError: # primal not supported by the combination + return True + # TODO 1.5 + elif dual == "warn": + warnings.warn( + ( + "The default value of `dual` will change from `True` to `'auto'` in" + " 1.5. Set the value of `dual` explicitly to suppress the warning." + ), + FutureWarning, + ) + return True + else: + return dual + + +class LinearSVC(LinearClassifierMixin, SparseCoefMixin, BaseEstimator): + """Linear Support Vector Classification. + + Similar to SVC with parameter kernel='linear', but implemented in terms of + liblinear rather than libsvm, so it has more flexibility in the choice of + penalties and loss functions and should scale better to large numbers of + samples. + + The main differences between :class:`~sklearn.svm.LinearSVC` and + :class:`~sklearn.svm.SVC` lie in the loss function used by default, and in + the handling of intercept regularization between those two implementations. + + This class supports both dense and sparse input and the multiclass support + is handled according to a one-vs-the-rest scheme. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + penalty : {'l1', 'l2'}, default='l2' + Specifies the norm used in the penalization. The 'l2' + penalty is the standard used in SVC. The 'l1' leads to ``coef_`` + vectors that are sparse. + + loss : {'hinge', 'squared_hinge'}, default='squared_hinge' + Specifies the loss function. 'hinge' is the standard SVM loss + (used e.g. by the SVC class) while 'squared_hinge' is the + square of the hinge loss. The combination of ``penalty='l1'`` + and ``loss='hinge'`` is not supported. + + dual : "auto" or bool, default=True + Select the algorithm to either solve the dual or primal + optimization problem. Prefer dual=False when n_samples > n_features. + `dual="auto"` will choose the value of the parameter automatically, + based on the values of `n_samples`, `n_features`, `loss`, `multi_class` + and `penalty`. If `n_samples` < `n_features` and optimizer supports + chosen `loss`, `multi_class` and `penalty`, then dual will be set to True, + otherwise it will be set to False. + + .. versionchanged:: 1.3 + The `"auto"` option is added in version 1.3 and will be the default + in version 1.5. + + tol : float, default=1e-4 + Tolerance for stopping criteria. + + C : float, default=1.0 + Regularization parameter. The strength of the regularization is + inversely proportional to C. Must be strictly positive. + + multi_class : {'ovr', 'crammer_singer'}, default='ovr' + Determines the multi-class strategy if `y` contains more than + two classes. + ``"ovr"`` trains n_classes one-vs-rest classifiers, while + ``"crammer_singer"`` optimizes a joint objective over all classes. + While `crammer_singer` is interesting from a theoretical perspective + as it is consistent, it is seldom used in practice as it rarely leads + to better accuracy and is more expensive to compute. + If ``"crammer_singer"`` is chosen, the options loss, penalty and dual + will be ignored. + + fit_intercept : bool, default=True + Whether or not to fit an intercept. If set to True, the feature vector + is extended to include an intercept term: `[x_1, ..., x_n, 1]`, where + 1 corresponds to the intercept. If set to False, no intercept will be + used in calculations (i.e. data is expected to be already centered). + + intercept_scaling : float, default=1.0 + When `fit_intercept` is True, the instance vector x becomes ``[x_1, + ..., x_n, intercept_scaling]``, i.e. a "synthetic" feature with a + constant value equal to `intercept_scaling` is appended to the instance + vector. The intercept becomes intercept_scaling * synthetic feature + weight. Note that liblinear internally penalizes the intercept, + treating it like any other term in the feature vector. To reduce the + impact of the regularization on the intercept, the `intercept_scaling` + parameter can be set to a value greater than 1; the higher the value of + `intercept_scaling`, the lower the impact of regularization on it. + Then, the weights become `[w_x_1, ..., w_x_n, + w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent + the feature weights and the intercept weight is scaled by + `intercept_scaling`. This scaling allows the intercept term to have a + different regularization behavior compared to the other features. + + class_weight : dict or 'balanced', default=None + Set the parameter C of class i to ``class_weight[i]*C`` for + SVC. If not given, all classes are supposed to have + weight one. + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + verbose : int, default=0 + Enable verbose output. Note that this setting takes advantage of a + per-process runtime setting in liblinear that, if enabled, may not work + properly in a multithreaded context. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generation for shuffling the data for + the dual coordinate descent (if ``dual=True``). When ``dual=False`` the + underlying implementation of :class:`LinearSVC` is not random and + ``random_state`` has no effect on the results. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + max_iter : int, default=1000 + The maximum number of iterations to be run. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) if n_classes == 2 \ + else (n_classes, n_features) + Weights assigned to the features (coefficients in the primal + problem). + + ``coef_`` is a readonly property derived from ``raw_coef_`` that + follows the internal memory layout of liblinear. + + intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) + Constants in decision function. + + classes_ : ndarray of shape (n_classes,) + The unique classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Maximum number of iterations run across all classes. + + See Also + -------- + SVC : Implementation of Support Vector Machine classifier using libsvm: + the kernel can be non-linear but its SMO algorithm does not + scale to large number of samples as LinearSVC does. + + Furthermore SVC multi-class mode is implemented using one + vs one scheme while LinearSVC uses one vs the rest. It is + possible to implement one vs the rest with SVC by using the + :class:`~sklearn.multiclass.OneVsRestClassifier` wrapper. + + Finally SVC can fit dense data without memory copy if the input + is C-contiguous. Sparse data will still incur memory copy though. + + sklearn.linear_model.SGDClassifier : SGDClassifier can optimize the same + cost function as LinearSVC + by adjusting the penalty and loss parameters. In addition it requires + less memory, allows incremental (online) learning, and implements + various loss functions and regularization regimes. + + Notes + ----- + The underlying C implementation uses a random number generator to + select features when fitting the model. It is thus not uncommon + to have slightly different results for the same input data. If + that happens, try with a smaller ``tol`` parameter. + + The underlying implementation, liblinear, uses a sparse internal + representation for the data that will incur a memory copy. + + Predict output may not match that of standalone liblinear in certain + cases. See :ref:`differences from liblinear ` + in the narrative documentation. + + References + ---------- + `LIBLINEAR: A Library for Large Linear Classification + `__ + + Examples + -------- + >>> from sklearn.svm import LinearSVC + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_features=4, random_state=0) + >>> clf = make_pipeline(StandardScaler(), + ... LinearSVC(dual="auto", random_state=0, tol=1e-5)) + >>> clf.fit(X, y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('linearsvc', LinearSVC(dual='auto', random_state=0, tol=1e-05))]) + + >>> print(clf.named_steps['linearsvc'].coef_) + [[0.141... 0.526... 0.679... 0.493...]] + + >>> print(clf.named_steps['linearsvc'].intercept_) + [0.1693...] + >>> print(clf.predict([[0, 0, 0, 0]])) + [1] + """ + + _parameter_constraints: dict = { + "penalty": [StrOptions({"l1", "l2"})], + "loss": [StrOptions({"hinge", "squared_hinge"})], + "dual": ["boolean", StrOptions({"auto"}), Hidden(StrOptions({"warn"}))], + "tol": [Interval(Real, 0.0, None, closed="neither")], + "C": [Interval(Real, 0.0, None, closed="neither")], + "multi_class": [StrOptions({"ovr", "crammer_singer"})], + "fit_intercept": ["boolean"], + "intercept_scaling": [Interval(Real, 0, None, closed="neither")], + "class_weight": [None, dict, StrOptions({"balanced"})], + "verbose": ["verbose"], + "random_state": ["random_state"], + "max_iter": [Interval(Integral, 0, None, closed="left")], + } + + def __init__( + self, + penalty="l2", + loss="squared_hinge", + *, + dual="warn", + tol=1e-4, + C=1.0, + multi_class="ovr", + fit_intercept=True, + intercept_scaling=1, + class_weight=None, + verbose=0, + random_state=None, + max_iter=1000, + ): + self.dual = dual + self.tol = tol + self.C = C + self.multi_class = multi_class + self.fit_intercept = fit_intercept + self.intercept_scaling = intercept_scaling + self.class_weight = class_weight + self.verbose = verbose + self.random_state = random_state + self.max_iter = max_iter + self.penalty = penalty + self.loss = loss + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X. + + sample_weight : array-like of shape (n_samples,), default=None + Array of weights that are assigned to individual + samples. If not provided, + then each sample is given unit weight. + + .. versionadded:: 0.18 + + Returns + ------- + self : object + An instance of the estimator. + """ + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + dtype=np.float64, + order="C", + accept_large_sparse=False, + ) + check_classification_targets(y) + self.classes_ = np.unique(y) + + _dual = _validate_dual_parameter( + self.dual, self.loss, self.penalty, self.multi_class, X + ) + + self.coef_, self.intercept_, n_iter_ = _fit_liblinear( + X, + y, + self.C, + self.fit_intercept, + self.intercept_scaling, + self.class_weight, + self.penalty, + _dual, + self.verbose, + self.max_iter, + self.tol, + self.random_state, + self.multi_class, + self.loss, + sample_weight=sample_weight, + ) + # Backward compatibility: _fit_liblinear is used both by LinearSVC/R + # and LogisticRegression but LogisticRegression sets a structured + # `n_iter_` attribute with information about the underlying OvR fits + # while LinearSVC/R only reports the maximum value. + self.n_iter_ = n_iter_.max().item() + + if self.multi_class == "crammer_singer" and len(self.classes_) == 2: + self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1) + if self.fit_intercept: + intercept = self.intercept_[1] - self.intercept_[0] + self.intercept_ = np.array([intercept]) + + return self + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } + + +class LinearSVR(RegressorMixin, LinearModel): + """Linear Support Vector Regression. + + Similar to SVR with parameter kernel='linear', but implemented in terms of + liblinear rather than libsvm, so it has more flexibility in the choice of + penalties and loss functions and should scale better to large numbers of + samples. + + The main differences between :class:`~sklearn.svm.LinearSVR` and + :class:`~sklearn.svm.SVR` lie in the loss function used by default, and in + the handling of intercept regularization between those two implementations. + + This class supports both dense and sparse input. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.16 + + Parameters + ---------- + epsilon : float, default=0.0 + Epsilon parameter in the epsilon-insensitive loss function. Note + that the value of this parameter depends on the scale of the target + variable y. If unsure, set ``epsilon=0``. + + tol : float, default=1e-4 + Tolerance for stopping criteria. + + C : float, default=1.0 + Regularization parameter. The strength of the regularization is + inversely proportional to C. Must be strictly positive. + + loss : {'epsilon_insensitive', 'squared_epsilon_insensitive'}, \ + default='epsilon_insensitive' + Specifies the loss function. The epsilon-insensitive loss + (standard SVR) is the L1 loss, while the squared epsilon-insensitive + loss ('squared_epsilon_insensitive') is the L2 loss. + + fit_intercept : bool, default=True + Whether or not to fit an intercept. If set to True, the feature vector + is extended to include an intercept term: `[x_1, ..., x_n, 1]`, where + 1 corresponds to the intercept. If set to False, no intercept will be + used in calculations (i.e. data is expected to be already centered). + + intercept_scaling : float, default=1.0 + When `fit_intercept` is True, the instance vector x becomes `[x_1, ..., + x_n, intercept_scaling]`, i.e. a "synthetic" feature with a constant + value equal to `intercept_scaling` is appended to the instance vector. + The intercept becomes intercept_scaling * synthetic feature weight. + Note that liblinear internally penalizes the intercept, treating it + like any other term in the feature vector. To reduce the impact of the + regularization on the intercept, the `intercept_scaling` parameter can + be set to a value greater than 1; the higher the value of + `intercept_scaling`, the lower the impact of regularization on it. + Then, the weights become `[w_x_1, ..., w_x_n, + w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent + the feature weights and the intercept weight is scaled by + `intercept_scaling`. This scaling allows the intercept term to have a + different regularization behavior compared to the other features. + + dual : "auto" or bool, default=True + Select the algorithm to either solve the dual or primal + optimization problem. Prefer dual=False when n_samples > n_features. + `dual="auto"` will choose the value of the parameter automatically, + based on the values of `n_samples`, `n_features` and `loss`. If + `n_samples` < `n_features` and optimizer supports chosen `loss`, + then dual will be set to True, otherwise it will be set to False. + + .. versionchanged:: 1.3 + The `"auto"` option is added in version 1.3 and will be the default + in version 1.5. + + verbose : int, default=0 + Enable verbose output. Note that this setting takes advantage of a + per-process runtime setting in liblinear that, if enabled, may not work + properly in a multithreaded context. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generation for shuffling the data. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + max_iter : int, default=1000 + The maximum number of iterations to be run. + + Attributes + ---------- + coef_ : ndarray of shape (n_features) if n_classes == 2 \ + else (n_classes, n_features) + Weights assigned to the features (coefficients in the primal + problem). + + `coef_` is a readonly property derived from `raw_coef_` that + follows the internal memory layout of liblinear. + + intercept_ : ndarray of shape (1) if n_classes == 2 else (n_classes) + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Maximum number of iterations run across all classes. + + See Also + -------- + LinearSVC : Implementation of Support Vector Machine classifier using the + same library as this class (liblinear). + + SVR : Implementation of Support Vector Machine regression using libsvm: + the kernel can be non-linear but its SMO algorithm does not scale to + large number of samples as :class:`~sklearn.svm.LinearSVR` does. + + sklearn.linear_model.SGDRegressor : SGDRegressor can optimize the same cost + function as LinearSVR + by adjusting the penalty and loss parameters. In addition it requires + less memory, allows incremental (online) learning, and implements + various loss functions and regularization regimes. + + Examples + -------- + >>> from sklearn.svm import LinearSVR + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_features=4, random_state=0) + >>> regr = make_pipeline(StandardScaler(), + ... LinearSVR(dual="auto", random_state=0, tol=1e-5)) + >>> regr.fit(X, y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('linearsvr', LinearSVR(dual='auto', random_state=0, tol=1e-05))]) + + >>> print(regr.named_steps['linearsvr'].coef_) + [18.582... 27.023... 44.357... 64.522...] + >>> print(regr.named_steps['linearsvr'].intercept_) + [-4...] + >>> print(regr.predict([[0, 0, 0, 0]])) + [-2.384...] + """ + + _parameter_constraints: dict = { + "epsilon": [Real], + "tol": [Interval(Real, 0.0, None, closed="neither")], + "C": [Interval(Real, 0.0, None, closed="neither")], + "loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})], + "fit_intercept": ["boolean"], + "intercept_scaling": [Interval(Real, 0, None, closed="neither")], + "dual": ["boolean", StrOptions({"auto"}), Hidden(StrOptions({"warn"}))], + "verbose": ["verbose"], + "random_state": ["random_state"], + "max_iter": [Interval(Integral, 0, None, closed="left")], + } + + def __init__( + self, + *, + epsilon=0.0, + tol=1e-4, + C=1.0, + loss="epsilon_insensitive", + fit_intercept=True, + intercept_scaling=1.0, + dual="warn", + verbose=0, + random_state=None, + max_iter=1000, + ): + self.tol = tol + self.C = C + self.epsilon = epsilon + self.fit_intercept = fit_intercept + self.intercept_scaling = intercept_scaling + self.verbose = verbose + self.random_state = random_state + self.max_iter = max_iter + self.dual = dual + self.loss = loss + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X. + + sample_weight : array-like of shape (n_samples,), default=None + Array of weights that are assigned to individual + samples. If not provided, + then each sample is given unit weight. + + .. versionadded:: 0.18 + + Returns + ------- + self : object + An instance of the estimator. + """ + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + dtype=np.float64, + order="C", + accept_large_sparse=False, + ) + penalty = "l2" # SVR only accepts l2 penalty + + _dual = _validate_dual_parameter(self.dual, self.loss, penalty, "ovr", X) + + self.coef_, self.intercept_, n_iter_ = _fit_liblinear( + X, + y, + self.C, + self.fit_intercept, + self.intercept_scaling, + None, + penalty, + _dual, + self.verbose, + self.max_iter, + self.tol, + self.random_state, + loss=self.loss, + epsilon=self.epsilon, + sample_weight=sample_weight, + ) + self.coef_ = self.coef_.ravel() + # Backward compatibility: _fit_liblinear is used both by LinearSVC/R + # and LogisticRegression but LogisticRegression sets a structured + # `n_iter_` attribute with information about the underlying OvR fits + # while LinearSVC/R only reports the maximum value. + self.n_iter_ = n_iter_.max().item() + + return self + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } + + +class SVC(BaseSVC): + """C-Support Vector Classification. + + The implementation is based on libsvm. The fit time scales at least + quadratically with the number of samples and may be impractical + beyond tens of thousands of samples. For large datasets + consider using :class:`~sklearn.svm.LinearSVC` or + :class:`~sklearn.linear_model.SGDClassifier` instead, possibly after a + :class:`~sklearn.kernel_approximation.Nystroem` transformer or + other :ref:`kernel_approximation`. + + The multiclass support is handled according to a one-vs-one scheme. + + For details on the precise mathematical formulation of the provided + kernel functions and how `gamma`, `coef0` and `degree` affect each + other, see the corresponding section in the narrative documentation: + :ref:`svm_kernels`. + + To learn how to tune SVC's hyperparameters, see the following example: + :ref:`sphx_glr_auto_examples_model_selection_plot_nested_cross_validation_iris.py` + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + C : float, default=1.0 + Regularization parameter. The strength of the regularization is + inversely proportional to C. Must be strictly positive. The penalty + is a squared l2 penalty. + + kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \ + default='rbf' + Specifies the kernel type to be used in the algorithm. If + none is given, 'rbf' will be used. If a callable is given it is used to + pre-compute the kernel matrix from data matrices; that matrix should be + an array of shape ``(n_samples, n_samples)``. For an intuitive + visualization of different kernel types see + :ref:`sphx_glr_auto_examples_svm_plot_svm_kernels.py`. + + degree : int, default=3 + Degree of the polynomial kernel function ('poly'). + Must be non-negative. Ignored by all other kernels. + + gamma : {'scale', 'auto'} or float, default='scale' + Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. + + - if ``gamma='scale'`` (default) is passed then it uses + 1 / (n_features * X.var()) as value of gamma, + - if 'auto', uses 1 / n_features + - if float, must be non-negative. + + .. versionchanged:: 0.22 + The default value of ``gamma`` changed from 'auto' to 'scale'. + + coef0 : float, default=0.0 + Independent term in kernel function. + It is only significant in 'poly' and 'sigmoid'. + + shrinking : bool, default=True + Whether to use the shrinking heuristic. + See the :ref:`User Guide `. + + probability : bool, default=False + Whether to enable probability estimates. This must be enabled prior + to calling `fit`, will slow down that method as it internally uses + 5-fold cross-validation, and `predict_proba` may be inconsistent with + `predict`. Read more in the :ref:`User Guide `. + + tol : float, default=1e-3 + Tolerance for stopping criterion. + + cache_size : float, default=200 + Specify the size of the kernel cache (in MB). + + class_weight : dict or 'balanced', default=None + Set the parameter C of class i to class_weight[i]*C for + SVC. If not given, all classes are supposed to have + weight one. + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + verbose : bool, default=False + Enable verbose output. Note that this setting takes advantage of a + per-process runtime setting in libsvm that, if enabled, may not work + properly in a multithreaded context. + + max_iter : int, default=-1 + Hard limit on iterations within solver, or -1 for no limit. + + decision_function_shape : {'ovo', 'ovr'}, default='ovr' + Whether to return a one-vs-rest ('ovr') decision function of shape + (n_samples, n_classes) as all other classifiers, or the original + one-vs-one ('ovo') decision function of libsvm which has shape + (n_samples, n_classes * (n_classes - 1) / 2). However, note that + internally, one-vs-one ('ovo') is always used as a multi-class strategy + to train models; an ovr matrix is only constructed from the ovo matrix. + The parameter is ignored for binary classification. + + .. versionchanged:: 0.19 + decision_function_shape is 'ovr' by default. + + .. versionadded:: 0.17 + *decision_function_shape='ovr'* is recommended. + + .. versionchanged:: 0.17 + Deprecated *decision_function_shape='ovo' and None*. + + break_ties : bool, default=False + If true, ``decision_function_shape='ovr'``, and number of classes > 2, + :term:`predict` will break ties according to the confidence values of + :term:`decision_function`; otherwise the first class among the tied + classes is returned. Please note that breaking ties comes at a + relatively high computational cost compared to a simple predict. + + .. versionadded:: 0.22 + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generation for shuffling the data for + probability estimates. Ignored when `probability` is False. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + class_weight_ : ndarray of shape (n_classes,) + Multipliers of parameter C for each class. + Computed based on the ``class_weight`` parameter. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + coef_ : ndarray of shape (n_classes * (n_classes - 1) / 2, n_features) + Weights assigned to the features (coefficients in the primal + problem). This is only available in the case of a linear kernel. + + `coef_` is a readonly property derived from `dual_coef_` and + `support_vectors_`. + + dual_coef_ : ndarray of shape (n_classes -1, n_SV) + Dual coefficients of the support vector in the decision + function (see :ref:`sgd_mathematical_formulation`), multiplied by + their targets. + For multiclass, coefficient for all 1-vs-1 classifiers. + The layout of the coefficients in the multiclass case is somewhat + non-trivial. See the :ref:`multi-class section of the User Guide + ` for details. + + fit_status_ : int + 0 if correctly fitted, 1 otherwise (will raise warning) + + intercept_ : ndarray of shape (n_classes * (n_classes - 1) / 2,) + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : ndarray of shape (n_classes * (n_classes - 1) // 2,) + Number of iterations run by the optimization routine to fit the model. + The shape of this attribute depends on the number of models optimized + which in turn depends on the number of classes. + + .. versionadded:: 1.1 + + support_ : ndarray of shape (n_SV) + Indices of support vectors. + + support_vectors_ : ndarray of shape (n_SV, n_features) + Support vectors. An empty array if kernel is precomputed. + + n_support_ : ndarray of shape (n_classes,), dtype=int32 + Number of support vectors for each class. + + probA_ : ndarray of shape (n_classes * (n_classes - 1) / 2) + probB_ : ndarray of shape (n_classes * (n_classes - 1) / 2) + If `probability=True`, it corresponds to the parameters learned in + Platt scaling to produce probability estimates from decision values. + If `probability=False`, it's an empty array. Platt scaling uses the + logistic function + ``1 / (1 + exp(decision_value * probA_ + probB_))`` + where ``probA_`` and ``probB_`` are learned from the dataset [2]_. For + more information on the multiclass case and training procedure see + section 8 of [1]_. + + shape_fit_ : tuple of int of shape (n_dimensions_of_X,) + Array dimensions of training vector ``X``. + + See Also + -------- + SVR : Support Vector Machine for Regression implemented using libsvm. + + LinearSVC : Scalable Linear Support Vector Machine for classification + implemented using liblinear. Check the See Also section of + LinearSVC for more comparison element. + + References + ---------- + .. [1] `LIBSVM: A Library for Support Vector Machines + `_ + + .. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector + Machines and Comparisons to Regularized Likelihood Methods" + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> y = np.array([1, 1, 2, 2]) + >>> from sklearn.svm import SVC + >>> clf = make_pipeline(StandardScaler(), SVC(gamma='auto')) + >>> clf.fit(X, y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('svc', SVC(gamma='auto'))]) + + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _impl = "c_svc" + + def __init__( + self, + *, + C=1.0, + kernel="rbf", + degree=3, + gamma="scale", + coef0=0.0, + shrinking=True, + probability=False, + tol=1e-3, + cache_size=200, + class_weight=None, + verbose=False, + max_iter=-1, + decision_function_shape="ovr", + break_ties=False, + random_state=None, + ): + super().__init__( + kernel=kernel, + degree=degree, + gamma=gamma, + coef0=coef0, + tol=tol, + C=C, + nu=0.0, + shrinking=shrinking, + probability=probability, + cache_size=cache_size, + class_weight=class_weight, + verbose=verbose, + max_iter=max_iter, + decision_function_shape=decision_function_shape, + break_ties=break_ties, + random_state=random_state, + ) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } + + +class NuSVC(BaseSVC): + """Nu-Support Vector Classification. + + Similar to SVC but uses a parameter to control the number of support + vectors. + + The implementation is based on libsvm. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + nu : float, default=0.5 + An upper bound on the fraction of margin errors (see :ref:`User Guide + `) and a lower bound of the fraction of support vectors. + Should be in the interval (0, 1]. + + kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \ + default='rbf' + Specifies the kernel type to be used in the algorithm. + If none is given, 'rbf' will be used. If a callable is given it is + used to precompute the kernel matrix. For an intuitive + visualization of different kernel types see + :ref:`sphx_glr_auto_examples_svm_plot_svm_kernels.py`. + + degree : int, default=3 + Degree of the polynomial kernel function ('poly'). + Must be non-negative. Ignored by all other kernels. + + gamma : {'scale', 'auto'} or float, default='scale' + Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. + + - if ``gamma='scale'`` (default) is passed then it uses + 1 / (n_features * X.var()) as value of gamma, + - if 'auto', uses 1 / n_features + - if float, must be non-negative. + + .. versionchanged:: 0.22 + The default value of ``gamma`` changed from 'auto' to 'scale'. + + coef0 : float, default=0.0 + Independent term in kernel function. + It is only significant in 'poly' and 'sigmoid'. + + shrinking : bool, default=True + Whether to use the shrinking heuristic. + See the :ref:`User Guide `. + + probability : bool, default=False + Whether to enable probability estimates. This must be enabled prior + to calling `fit`, will slow down that method as it internally uses + 5-fold cross-validation, and `predict_proba` may be inconsistent with + `predict`. Read more in the :ref:`User Guide `. + + tol : float, default=1e-3 + Tolerance for stopping criterion. + + cache_size : float, default=200 + Specify the size of the kernel cache (in MB). + + class_weight : {dict, 'balanced'}, default=None + Set the parameter C of class i to class_weight[i]*C for + SVC. If not given, all classes are supposed to have + weight one. The "balanced" mode uses the values of y to automatically + adjust weights inversely proportional to class frequencies as + ``n_samples / (n_classes * np.bincount(y))``. + + verbose : bool, default=False + Enable verbose output. Note that this setting takes advantage of a + per-process runtime setting in libsvm that, if enabled, may not work + properly in a multithreaded context. + + max_iter : int, default=-1 + Hard limit on iterations within solver, or -1 for no limit. + + decision_function_shape : {'ovo', 'ovr'}, default='ovr' + Whether to return a one-vs-rest ('ovr') decision function of shape + (n_samples, n_classes) as all other classifiers, or the original + one-vs-one ('ovo') decision function of libsvm which has shape + (n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one + ('ovo') is always used as multi-class strategy. The parameter is + ignored for binary classification. + + .. versionchanged:: 0.19 + decision_function_shape is 'ovr' by default. + + .. versionadded:: 0.17 + *decision_function_shape='ovr'* is recommended. + + .. versionchanged:: 0.17 + Deprecated *decision_function_shape='ovo' and None*. + + break_ties : bool, default=False + If true, ``decision_function_shape='ovr'``, and number of classes > 2, + :term:`predict` will break ties according to the confidence values of + :term:`decision_function`; otherwise the first class among the tied + classes is returned. Please note that breaking ties comes at a + relatively high computational cost compared to a simple predict. + + .. versionadded:: 0.22 + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generation for shuffling the data for + probability estimates. Ignored when `probability` is False. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + class_weight_ : ndarray of shape (n_classes,) + Multipliers of parameter C of each class. + Computed based on the ``class_weight`` parameter. + + classes_ : ndarray of shape (n_classes,) + The unique classes labels. + + coef_ : ndarray of shape (n_classes * (n_classes -1) / 2, n_features) + Weights assigned to the features (coefficients in the primal + problem). This is only available in the case of a linear kernel. + + `coef_` is readonly property derived from `dual_coef_` and + `support_vectors_`. + + dual_coef_ : ndarray of shape (n_classes - 1, n_SV) + Dual coefficients of the support vector in the decision + function (see :ref:`sgd_mathematical_formulation`), multiplied by + their targets. + For multiclass, coefficient for all 1-vs-1 classifiers. + The layout of the coefficients in the multiclass case is somewhat + non-trivial. See the :ref:`multi-class section of the User Guide + ` for details. + + fit_status_ : int + 0 if correctly fitted, 1 if the algorithm did not converge. + + intercept_ : ndarray of shape (n_classes * (n_classes - 1) / 2,) + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : ndarray of shape (n_classes * (n_classes - 1) // 2,) + Number of iterations run by the optimization routine to fit the model. + The shape of this attribute depends on the number of models optimized + which in turn depends on the number of classes. + + .. versionadded:: 1.1 + + support_ : ndarray of shape (n_SV,) + Indices of support vectors. + + support_vectors_ : ndarray of shape (n_SV, n_features) + Support vectors. + + n_support_ : ndarray of shape (n_classes,), dtype=int32 + Number of support vectors for each class. + + fit_status_ : int + 0 if correctly fitted, 1 if the algorithm did not converge. + + probA_ : ndarray of shape (n_classes * (n_classes - 1) / 2,) + + probB_ : ndarray of shape (n_classes * (n_classes - 1) / 2,) + If `probability=True`, it corresponds to the parameters learned in + Platt scaling to produce probability estimates from decision values. + If `probability=False`, it's an empty array. Platt scaling uses the + logistic function + ``1 / (1 + exp(decision_value * probA_ + probB_))`` + where ``probA_`` and ``probB_`` are learned from the dataset [2]_. For + more information on the multiclass case and training procedure see + section 8 of [1]_. + + shape_fit_ : tuple of int of shape (n_dimensions_of_X,) + Array dimensions of training vector ``X``. + + See Also + -------- + SVC : Support Vector Machine for classification using libsvm. + + LinearSVC : Scalable linear Support Vector Machine for classification using + liblinear. + + References + ---------- + .. [1] `LIBSVM: A Library for Support Vector Machines + `_ + + .. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector + Machines and Comparisons to Regularized Likelihood Methods" + `_ + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> y = np.array([1, 1, 2, 2]) + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.svm import NuSVC + >>> clf = make_pipeline(StandardScaler(), NuSVC()) + >>> clf.fit(X, y) + Pipeline(steps=[('standardscaler', StandardScaler()), ('nusvc', NuSVC())]) + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _impl = "nu_svc" + + _parameter_constraints: dict = { + **BaseSVC._parameter_constraints, + "nu": [Interval(Real, 0.0, 1.0, closed="right")], + } + _parameter_constraints.pop("C") + + def __init__( + self, + *, + nu=0.5, + kernel="rbf", + degree=3, + gamma="scale", + coef0=0.0, + shrinking=True, + probability=False, + tol=1e-3, + cache_size=200, + class_weight=None, + verbose=False, + max_iter=-1, + decision_function_shape="ovr", + break_ties=False, + random_state=None, + ): + super().__init__( + kernel=kernel, + degree=degree, + gamma=gamma, + coef0=coef0, + tol=tol, + C=0.0, + nu=nu, + shrinking=shrinking, + probability=probability, + cache_size=cache_size, + class_weight=class_weight, + verbose=verbose, + max_iter=max_iter, + decision_function_shape=decision_function_shape, + break_ties=break_ties, + random_state=random_state, + ) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_methods_subset_invariance": ( + "fails for the decision_function method" + ), + "check_class_weight_classifiers": "class_weight is ignored.", + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + "check_classifiers_one_label_sample_weights": ( + "specified nu is infeasible for the fit." + ), + } + } + + +class SVR(RegressorMixin, BaseLibSVM): + """Epsilon-Support Vector Regression. + + The free parameters in the model are C and epsilon. + + The implementation is based on libsvm. The fit time complexity + is more than quadratic with the number of samples which makes it hard + to scale to datasets with more than a couple of 10000 samples. For large + datasets consider using :class:`~sklearn.svm.LinearSVR` or + :class:`~sklearn.linear_model.SGDRegressor` instead, possibly after a + :class:`~sklearn.kernel_approximation.Nystroem` transformer or + other :ref:`kernel_approximation`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \ + default='rbf' + Specifies the kernel type to be used in the algorithm. + If none is given, 'rbf' will be used. If a callable is given it is + used to precompute the kernel matrix. + + degree : int, default=3 + Degree of the polynomial kernel function ('poly'). + Must be non-negative. Ignored by all other kernels. + + gamma : {'scale', 'auto'} or float, default='scale' + Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. + + - if ``gamma='scale'`` (default) is passed then it uses + 1 / (n_features * X.var()) as value of gamma, + - if 'auto', uses 1 / n_features + - if float, must be non-negative. + + .. versionchanged:: 0.22 + The default value of ``gamma`` changed from 'auto' to 'scale'. + + coef0 : float, default=0.0 + Independent term in kernel function. + It is only significant in 'poly' and 'sigmoid'. + + tol : float, default=1e-3 + Tolerance for stopping criterion. + + C : float, default=1.0 + Regularization parameter. The strength of the regularization is + inversely proportional to C. Must be strictly positive. + The penalty is a squared l2 penalty. + + epsilon : float, default=0.1 + Epsilon in the epsilon-SVR model. It specifies the epsilon-tube + within which no penalty is associated in the training loss function + with points predicted within a distance epsilon from the actual + value. Must be non-negative. + + shrinking : bool, default=True + Whether to use the shrinking heuristic. + See the :ref:`User Guide `. + + cache_size : float, default=200 + Specify the size of the kernel cache (in MB). + + verbose : bool, default=False + Enable verbose output. Note that this setting takes advantage of a + per-process runtime setting in libsvm that, if enabled, may not work + properly in a multithreaded context. + + max_iter : int, default=-1 + Hard limit on iterations within solver, or -1 for no limit. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) + Weights assigned to the features (coefficients in the primal + problem). This is only available in the case of a linear kernel. + + `coef_` is readonly property derived from `dual_coef_` and + `support_vectors_`. + + dual_coef_ : ndarray of shape (1, n_SV) + Coefficients of the support vector in the decision function. + + fit_status_ : int + 0 if correctly fitted, 1 otherwise (will raise warning) + + intercept_ : ndarray of shape (1,) + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run by the optimization routine to fit the model. + + .. versionadded:: 1.1 + + n_support_ : ndarray of shape (1,), dtype=int32 + Number of support vectors. + + shape_fit_ : tuple of int of shape (n_dimensions_of_X,) + Array dimensions of training vector ``X``. + + support_ : ndarray of shape (n_SV,) + Indices of support vectors. + + support_vectors_ : ndarray of shape (n_SV, n_features) + Support vectors. + + See Also + -------- + NuSVR : Support Vector Machine for regression implemented using libsvm + using a parameter to control the number of support vectors. + + LinearSVR : Scalable Linear Support Vector Machine for regression + implemented using liblinear. + + References + ---------- + .. [1] `LIBSVM: A Library for Support Vector Machines + `_ + + .. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector + Machines and Comparisons to Regularized Likelihood Methods" + `_ + + Examples + -------- + >>> from sklearn.svm import SVR + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> import numpy as np + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> regr = make_pipeline(StandardScaler(), SVR(C=1.0, epsilon=0.2)) + >>> regr.fit(X, y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('svr', SVR(epsilon=0.2))]) + """ + + _impl = "epsilon_svr" + + _parameter_constraints: dict = {**BaseLibSVM._parameter_constraints} + for unused_param in ["class_weight", "nu", "probability", "random_state"]: + _parameter_constraints.pop(unused_param) + + def __init__( + self, + *, + kernel="rbf", + degree=3, + gamma="scale", + coef0=0.0, + tol=1e-3, + C=1.0, + epsilon=0.1, + shrinking=True, + cache_size=200, + verbose=False, + max_iter=-1, + ): + super().__init__( + kernel=kernel, + degree=degree, + gamma=gamma, + coef0=coef0, + tol=tol, + C=C, + nu=0.0, + epsilon=epsilon, + verbose=verbose, + shrinking=shrinking, + probability=False, + cache_size=cache_size, + class_weight=None, + max_iter=max_iter, + random_state=None, + ) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } + + +class NuSVR(RegressorMixin, BaseLibSVM): + """Nu Support Vector Regression. + + Similar to NuSVC, for regression, uses a parameter nu to control + the number of support vectors. However, unlike NuSVC, where nu + replaces C, here nu replaces the parameter epsilon of epsilon-SVR. + + The implementation is based on libsvm. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + nu : float, default=0.5 + An upper bound on the fraction of training errors and a lower bound of + the fraction of support vectors. Should be in the interval (0, 1]. By + default 0.5 will be taken. + + C : float, default=1.0 + Penalty parameter C of the error term. + + kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \ + default='rbf' + Specifies the kernel type to be used in the algorithm. + If none is given, 'rbf' will be used. If a callable is given it is + used to precompute the kernel matrix. + + degree : int, default=3 + Degree of the polynomial kernel function ('poly'). + Must be non-negative. Ignored by all other kernels. + + gamma : {'scale', 'auto'} or float, default='scale' + Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. + + - if ``gamma='scale'`` (default) is passed then it uses + 1 / (n_features * X.var()) as value of gamma, + - if 'auto', uses 1 / n_features + - if float, must be non-negative. + + .. versionchanged:: 0.22 + The default value of ``gamma`` changed from 'auto' to 'scale'. + + coef0 : float, default=0.0 + Independent term in kernel function. + It is only significant in 'poly' and 'sigmoid'. + + shrinking : bool, default=True + Whether to use the shrinking heuristic. + See the :ref:`User Guide `. + + tol : float, default=1e-3 + Tolerance for stopping criterion. + + cache_size : float, default=200 + Specify the size of the kernel cache (in MB). + + verbose : bool, default=False + Enable verbose output. Note that this setting takes advantage of a + per-process runtime setting in libsvm that, if enabled, may not work + properly in a multithreaded context. + + max_iter : int, default=-1 + Hard limit on iterations within solver, or -1 for no limit. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) + Weights assigned to the features (coefficients in the primal + problem). This is only available in the case of a linear kernel. + + `coef_` is readonly property derived from `dual_coef_` and + `support_vectors_`. + + dual_coef_ : ndarray of shape (1, n_SV) + Coefficients of the support vector in the decision function. + + fit_status_ : int + 0 if correctly fitted, 1 otherwise (will raise warning) + + intercept_ : ndarray of shape (1,) + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run by the optimization routine to fit the model. + + .. versionadded:: 1.1 + + n_support_ : ndarray of shape (1,), dtype=int32 + Number of support vectors. + + shape_fit_ : tuple of int of shape (n_dimensions_of_X,) + Array dimensions of training vector ``X``. + + support_ : ndarray of shape (n_SV,) + Indices of support vectors. + + support_vectors_ : ndarray of shape (n_SV, n_features) + Support vectors. + + See Also + -------- + NuSVC : Support Vector Machine for classification implemented with libsvm + with a parameter to control the number of support vectors. + + SVR : Epsilon Support Vector Machine for regression implemented with + libsvm. + + References + ---------- + .. [1] `LIBSVM: A Library for Support Vector Machines + `_ + + .. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector + Machines and Comparisons to Regularized Likelihood Methods" + `_ + + Examples + -------- + >>> from sklearn.svm import NuSVR + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> import numpy as np + >>> n_samples, n_features = 10, 5 + >>> np.random.seed(0) + >>> y = np.random.randn(n_samples) + >>> X = np.random.randn(n_samples, n_features) + >>> regr = make_pipeline(StandardScaler(), NuSVR(C=1.0, nu=0.1)) + >>> regr.fit(X, y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('nusvr', NuSVR(nu=0.1))]) + """ + + _impl = "nu_svr" + + _parameter_constraints: dict = {**BaseLibSVM._parameter_constraints} + for unused_param in ["class_weight", "epsilon", "probability", "random_state"]: + _parameter_constraints.pop(unused_param) + + def __init__( + self, + *, + nu=0.5, + C=1.0, + kernel="rbf", + degree=3, + gamma="scale", + coef0=0.0, + shrinking=True, + tol=1e-3, + cache_size=200, + verbose=False, + max_iter=-1, + ): + super().__init__( + kernel=kernel, + degree=degree, + gamma=gamma, + coef0=coef0, + tol=tol, + C=C, + nu=nu, + epsilon=0.0, + shrinking=shrinking, + probability=False, + cache_size=cache_size, + class_weight=None, + verbose=verbose, + max_iter=max_iter, + random_state=None, + ) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } + + +class OneClassSVM(OutlierMixin, BaseLibSVM): + """Unsupervised Outlier Detection. + + Estimate the support of a high-dimensional distribution. + + The implementation is based on libsvm. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \ + default='rbf' + Specifies the kernel type to be used in the algorithm. + If none is given, 'rbf' will be used. If a callable is given it is + used to precompute the kernel matrix. + + degree : int, default=3 + Degree of the polynomial kernel function ('poly'). + Must be non-negative. Ignored by all other kernels. + + gamma : {'scale', 'auto'} or float, default='scale' + Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. + + - if ``gamma='scale'`` (default) is passed then it uses + 1 / (n_features * X.var()) as value of gamma, + - if 'auto', uses 1 / n_features + - if float, must be non-negative. + + .. versionchanged:: 0.22 + The default value of ``gamma`` changed from 'auto' to 'scale'. + + coef0 : float, default=0.0 + Independent term in kernel function. + It is only significant in 'poly' and 'sigmoid'. + + tol : float, default=1e-3 + Tolerance for stopping criterion. + + nu : float, default=0.5 + An upper bound on the fraction of training + errors and a lower bound of the fraction of support + vectors. Should be in the interval (0, 1]. By default 0.5 + will be taken. + + shrinking : bool, default=True + Whether to use the shrinking heuristic. + See the :ref:`User Guide `. + + cache_size : float, default=200 + Specify the size of the kernel cache (in MB). + + verbose : bool, default=False + Enable verbose output. Note that this setting takes advantage of a + per-process runtime setting in libsvm that, if enabled, may not work + properly in a multithreaded context. + + max_iter : int, default=-1 + Hard limit on iterations within solver, or -1 for no limit. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) + Weights assigned to the features (coefficients in the primal + problem). This is only available in the case of a linear kernel. + + `coef_` is readonly property derived from `dual_coef_` and + `support_vectors_`. + + dual_coef_ : ndarray of shape (1, n_SV) + Coefficients of the support vectors in the decision function. + + fit_status_ : int + 0 if correctly fitted, 1 otherwise (will raise warning) + + intercept_ : ndarray of shape (1,) + Constant in the decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run by the optimization routine to fit the model. + + .. versionadded:: 1.1 + + n_support_ : ndarray of shape (n_classes,), dtype=int32 + Number of support vectors for each class. + + offset_ : float + Offset used to define the decision function from the raw scores. + We have the relation: decision_function = score_samples - `offset_`. + The offset is the opposite of `intercept_` and is provided for + consistency with other outlier detection algorithms. + + .. versionadded:: 0.20 + + shape_fit_ : tuple of int of shape (n_dimensions_of_X,) + Array dimensions of training vector ``X``. + + support_ : ndarray of shape (n_SV,) + Indices of support vectors. + + support_vectors_ : ndarray of shape (n_SV, n_features) + Support vectors. + + See Also + -------- + sklearn.linear_model.SGDOneClassSVM : Solves linear One-Class SVM using + Stochastic Gradient Descent. + sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection using + Local Outlier Factor (LOF). + sklearn.ensemble.IsolationForest : Isolation Forest Algorithm. + + Examples + -------- + >>> from sklearn.svm import OneClassSVM + >>> X = [[0], [0.44], [0.45], [0.46], [1]] + >>> clf = OneClassSVM(gamma='auto').fit(X) + >>> clf.predict(X) + array([-1, 1, 1, 1, -1]) + >>> clf.score_samples(X) + array([1.7798..., 2.0547..., 2.0556..., 2.0561..., 1.7332...]) + """ + + _impl = "one_class" + + _parameter_constraints: dict = {**BaseLibSVM._parameter_constraints} + for unused_param in ["C", "class_weight", "epsilon", "probability", "random_state"]: + _parameter_constraints.pop(unused_param) + + def __init__( + self, + *, + kernel="rbf", + degree=3, + gamma="scale", + coef0=0.0, + tol=1e-3, + nu=0.5, + shrinking=True, + cache_size=200, + verbose=False, + max_iter=-1, + ): + super().__init__( + kernel, + degree, + gamma, + coef0, + tol, + 0.0, + nu, + 0.0, + shrinking, + False, + cache_size, + None, + verbose, + max_iter, + random_state=None, + ) + + def fit(self, X, y=None, sample_weight=None): + """Detect the soft boundary of the set of samples X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Set of samples, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + Per-sample weights. Rescale C per sample. Higher weights + force the classifier to put more emphasis on these points. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + If X is not a C-ordered contiguous array it is copied. + """ + super().fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight) + self.offset_ = -self._intercept_ + return self + + def decision_function(self, X): + """Signed distance to the separating hyperplane. + + Signed distance is positive for an inlier and negative for an outlier. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + dec : ndarray of shape (n_samples,) + Returns the decision function of the samples. + """ + dec = self._decision_function(X).ravel() + return dec + + def score_samples(self, X): + """Raw scoring function of the samples. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + score_samples : ndarray of shape (n_samples,) + Returns the (unshifted) scoring function of the samples. + """ + return self.decision_function(X) + self.offset_ + + def predict(self, X): + """Perform classification on samples in X. + + For a one-class model, +1 or -1 is returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples_test, n_samples_train) + For kernel="precomputed", the expected shape of X is + (n_samples_test, n_samples_train). + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Class labels for samples in X. + """ + y = super().predict(X) + return np.asarray(y, dtype=np.intp) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/_liblinear.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/svm/_liblinear.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9ce1513a4dce3cec377fdd6816edb7e8dc9b889d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/_liblinear.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/_libsvm.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/svm/_libsvm.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9746860ccffde396da07680ab20d6cd78768bf26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/_libsvm.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/_libsvm_sparse.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/svm/_libsvm_sparse.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ac62d63665d36a72cf79dac40c43b42a29ae7d8e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/_libsvm_sparse.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/_newrand.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/svm/_newrand.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..efef1d1e500d6fc4de114fdbfd3ddd1c61a403e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/_newrand.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/svm/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21d4e25e6f73412c4f0acbb49db642489e238951 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_bounds.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_bounds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc36da0f8130c4e74fdfa1dcb2c59e291d20b18b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_bounds.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_sparse.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cd62df0e4cc9d69e9340688c2415584cae76bac Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_sparse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_svm.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_svm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc8e8f81fa1acd255f3ffff8d966a018e89c4f0c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/svm/tests/__pycache__/test_svm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/tests/test_bounds.py b/venv/lib/python3.10/site-packages/sklearn/svm/tests/test_bounds.py new file mode 100644 index 0000000000000000000000000000000000000000..ecf88dde42aa0352e066692d9f7e6480f5e7c4ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/svm/tests/test_bounds.py @@ -0,0 +1,142 @@ +import numpy as np +import pytest +from scipy import stats + +from sklearn.linear_model import LogisticRegression +from sklearn.svm import LinearSVC +from sklearn.svm._bounds import l1_min_c +from sklearn.svm._newrand import bounded_rand_int_wrap, set_seed_wrap +from sklearn.utils.fixes import CSR_CONTAINERS + +dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]] + +Y1 = [0, 1, 1, 1] +Y2 = [2, 1, 0, 0] + + +@pytest.mark.parametrize("X_container", CSR_CONTAINERS + [np.array]) +@pytest.mark.parametrize("loss", ["squared_hinge", "log"]) +@pytest.mark.parametrize("Y_label", ["two-classes", "multi-class"]) +@pytest.mark.parametrize("intercept_label", ["no-intercept", "fit-intercept"]) +def test_l1_min_c(X_container, loss, Y_label, intercept_label): + Ys = {"two-classes": Y1, "multi-class": Y2} + intercepts = { + "no-intercept": {"fit_intercept": False}, + "fit-intercept": {"fit_intercept": True, "intercept_scaling": 10}, + } + + X = X_container(dense_X) + Y = Ys[Y_label] + intercept_params = intercepts[intercept_label] + check_l1_min_c(X, Y, loss, **intercept_params) + + +def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=1.0): + min_c = l1_min_c( + X, + y, + loss=loss, + fit_intercept=fit_intercept, + intercept_scaling=intercept_scaling, + ) + + clf = { + "log": LogisticRegression(penalty="l1", solver="liblinear"), + "squared_hinge": LinearSVC(loss="squared_hinge", penalty="l1", dual=False), + }[loss] + + clf.fit_intercept = fit_intercept + clf.intercept_scaling = intercept_scaling + + clf.C = min_c + clf.fit(X, y) + assert (np.asarray(clf.coef_) == 0).all() + assert (np.asarray(clf.intercept_) == 0).all() + + clf.C = min_c * 1.01 + clf.fit(X, y) + assert (np.asarray(clf.coef_) != 0).any() or (np.asarray(clf.intercept_) != 0).any() + + +def test_ill_posed_min_c(): + X = [[0, 0], [0, 0]] + y = [0, 1] + with pytest.raises(ValueError): + l1_min_c(X, y) + + +_MAX_UNSIGNED_INT = 4294967295 + + +def test_newrand_default(): + """Test that bounded_rand_int_wrap without seeding respects the range + + Note this test should pass either if executed alone, or in conjunctions + with other tests that call set_seed explicit in any order: it checks + invariants on the RNG instead of specific values. + """ + generated = [bounded_rand_int_wrap(100) for _ in range(10)] + assert all(0 <= x < 100 for x in generated) + assert not all(x == generated[0] for x in generated) + + +@pytest.mark.parametrize("seed, expected", [(0, 54), (_MAX_UNSIGNED_INT, 9)]) +def test_newrand_set_seed(seed, expected): + """Test that `set_seed` produces deterministic results""" + set_seed_wrap(seed) + generated = bounded_rand_int_wrap(100) + assert generated == expected + + +@pytest.mark.parametrize("seed", [-1, _MAX_UNSIGNED_INT + 1]) +def test_newrand_set_seed_overflow(seed): + """Test that `set_seed_wrap` is defined for unsigned 32bits ints""" + with pytest.raises(OverflowError): + set_seed_wrap(seed) + + +@pytest.mark.parametrize("range_, n_pts", [(_MAX_UNSIGNED_INT, 10000), (100, 25)]) +def test_newrand_bounded_rand_int(range_, n_pts): + """Test that `bounded_rand_int` follows a uniform distribution""" + # XXX: this test is very seed sensitive: either it is wrong (too strict?) + # or the wrapped RNG is not uniform enough, at least on some platforms. + set_seed_wrap(42) + n_iter = 100 + ks_pvals = [] + uniform_dist = stats.uniform(loc=0, scale=range_) + # perform multiple samplings to make chance of outlier sampling negligible + for _ in range(n_iter): + # Deterministic random sampling + sample = [bounded_rand_int_wrap(range_) for _ in range(n_pts)] + res = stats.kstest(sample, uniform_dist.cdf) + ks_pvals.append(res.pvalue) + # Null hypothesis = samples come from an uniform distribution. + # Under the null hypothesis, p-values should be uniformly distributed + # and not concentrated on low values + # (this may seem counter-intuitive but is backed by multiple refs) + # So we can do two checks: + + # (1) check uniformity of p-values + uniform_p_vals_dist = stats.uniform(loc=0, scale=1) + res_pvals = stats.kstest(ks_pvals, uniform_p_vals_dist.cdf) + assert res_pvals.pvalue > 0.05, ( + "Null hypothesis rejected: generated random numbers are not uniform." + " Details: the (meta) p-value of the test of uniform distribution" + f" of p-values is {res_pvals.pvalue} which is not > 0.05" + ) + + # (2) (safety belt) check that 90% of p-values are above 0.05 + min_10pct_pval = np.percentile(ks_pvals, q=10) + # lower 10th quantile pvalue <= 0.05 means that the test rejects the + # null hypothesis that the sample came from the uniform distribution + assert min_10pct_pval > 0.05, ( + "Null hypothesis rejected: generated random numbers are not uniform. " + f"Details: lower 10th quantile p-value of {min_10pct_pval} not > 0.05." + ) + + +@pytest.mark.parametrize("range_", [-1, _MAX_UNSIGNED_INT + 1]) +def test_newrand_bounded_rand_int_limits(range_): + """Test that `bounded_rand_int_wrap` is defined for unsigned 32bits ints""" + with pytest.raises(OverflowError): + bounded_rand_int_wrap(range_) diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/tests/test_sparse.py b/venv/lib/python3.10/site-packages/sklearn/svm/tests/test_sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e517fdce8932c8161832eede7dd1aaef2a2108 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/svm/tests/test_sparse.py @@ -0,0 +1,493 @@ +import numpy as np +import pytest +from scipy import sparse + +from sklearn import base, datasets, linear_model, svm +from sklearn.datasets import load_digits, make_blobs, make_classification +from sklearn.exceptions import ConvergenceWarning +from sklearn.svm.tests import test_svm +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, + skip_if_32bit, +) +from sklearn.utils.extmath import safe_sparse_dot +from sklearn.utils.fixes import ( + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +# test sample 1 +X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) +Y = [1, 1, 1, 2, 2, 2] +T = np.array([[-1, -1], [2, 2], [3, 2]]) +true_result = [1, 2, 2] + +# test sample 2 +X2 = np.array( + [ + [0, 0, 0], + [1, 1, 1], + [2, 0, 0], + [0, 0, 2], + [3, 3, 3], + ] +) +Y2 = [1, 2, 2, 2, 3] +T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]]) +true_result2 = [1, 2, 3] + +iris = datasets.load_iris() +rng = np.random.RandomState(0) +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + +X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0) + + +def check_svm_model_equal(dense_svm, X_train, y_train, X_test): + # Use the original svm model for dense fit and clone an exactly same + # svm model for sparse fit + sparse_svm = base.clone(dense_svm) + + dense_svm.fit(X_train.toarray(), y_train) + if sparse.issparse(X_test): + X_test_dense = X_test.toarray() + else: + X_test_dense = X_test + sparse_svm.fit(X_train, y_train) + assert sparse.issparse(sparse_svm.support_vectors_) + assert sparse.issparse(sparse_svm.dual_coef_) + assert_allclose(dense_svm.support_vectors_, sparse_svm.support_vectors_.toarray()) + assert_allclose(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray()) + if dense_svm.kernel == "linear": + assert sparse.issparse(sparse_svm.coef_) + assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray()) + assert_allclose(dense_svm.support_, sparse_svm.support_) + assert_allclose(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test)) + + assert_array_almost_equal( + dense_svm.decision_function(X_test_dense), sparse_svm.decision_function(X_test) + ) + assert_array_almost_equal( + dense_svm.decision_function(X_test_dense), + sparse_svm.decision_function(X_test_dense), + ) + if isinstance(dense_svm, svm.OneClassSVM): + msg = "cannot use sparse input in 'OneClassSVM' trained on dense data" + else: + assert_array_almost_equal( + dense_svm.predict_proba(X_test_dense), + sparse_svm.predict_proba(X_test), + decimal=4, + ) + msg = "cannot use sparse input in 'SVC' trained on dense data" + if sparse.issparse(X_test): + with pytest.raises(ValueError, match=msg): + dense_svm.predict(X_test) + + +@skip_if_32bit +@pytest.mark.parametrize( + "X_train, y_train, X_test", + [ + [X, Y, T], + [X2, Y2, T2], + [X_blobs[:80], y_blobs[:80], X_blobs[80:]], + [iris.data, iris.target, iris.data], + ], +) +@pytest.mark.parametrize("kernel", ["linear", "poly", "rbf", "sigmoid"]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS) +def test_svc(X_train, y_train, X_test, kernel, sparse_container): + """Check that sparse SVC gives the same result as SVC.""" + X_train = sparse_container(X_train) + + clf = svm.SVC( + gamma=1, + kernel=kernel, + probability=True, + random_state=0, + decision_function_shape="ovo", + ) + check_svm_model_equal(clf, X_train, y_train, X_test) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_unsorted_indices(csr_container): + # test that the result with sorted and unsorted indices in csr is the same + # we use a subset of digits as iris, blobs or make_classification didn't + # show the problem + X, y = load_digits(return_X_y=True) + X_test = csr_container(X[50:100]) + X, y = X[:50], y[:50] + + X_sparse = csr_container(X) + coef_dense = ( + svm.SVC(kernel="linear", probability=True, random_state=0).fit(X, y).coef_ + ) + sparse_svc = svm.SVC(kernel="linear", probability=True, random_state=0).fit( + X_sparse, y + ) + coef_sorted = sparse_svc.coef_ + # make sure dense and sparse SVM give the same result + assert_allclose(coef_dense, coef_sorted.toarray()) + + # reverse each row's indices + def scramble_indices(X): + new_data = [] + new_indices = [] + for i in range(1, len(X.indptr)): + row_slice = slice(*X.indptr[i - 1 : i + 1]) + new_data.extend(X.data[row_slice][::-1]) + new_indices.extend(X.indices[row_slice][::-1]) + return csr_container((new_data, new_indices, X.indptr), shape=X.shape) + + X_sparse_unsorted = scramble_indices(X_sparse) + X_test_unsorted = scramble_indices(X_test) + + assert not X_sparse_unsorted.has_sorted_indices + assert not X_test_unsorted.has_sorted_indices + + unsorted_svc = svm.SVC(kernel="linear", probability=True, random_state=0).fit( + X_sparse_unsorted, y + ) + coef_unsorted = unsorted_svc.coef_ + # make sure unsorted indices give same result + assert_allclose(coef_unsorted.toarray(), coef_sorted.toarray()) + assert_allclose( + sparse_svc.predict_proba(X_test_unsorted), sparse_svc.predict_proba(X_test) + ) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_svc_with_custom_kernel(lil_container): + def kfunc(x, y): + return safe_sparse_dot(x, y.T) + + X_sp = lil_container(X) + clf_lin = svm.SVC(kernel="linear").fit(X_sp, Y) + clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y) + assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp)) + + +@skip_if_32bit +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +@pytest.mark.parametrize("kernel", ["linear", "poly", "rbf"]) +def test_svc_iris(csr_container, kernel): + # Test the sparse SVC with the iris dataset + iris_data_sp = csr_container(iris.data) + + sp_clf = svm.SVC(kernel=kernel).fit(iris_data_sp, iris.target) + clf = svm.SVC(kernel=kernel).fit(iris.data, iris.target) + + assert_allclose(clf.support_vectors_, sp_clf.support_vectors_.toarray()) + assert_allclose(clf.dual_coef_, sp_clf.dual_coef_.toarray()) + assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp)) + if kernel == "linear": + assert_allclose(clf.coef_, sp_clf.coef_.toarray()) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_decision_function(csr_container): + # Test decision_function + + # Sanity check, test that decision_function implemented in python + # returns the same as the one in libsvm + + # multi class: + iris_data_sp = csr_container(iris.data) + svc = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo") + clf = svc.fit(iris_data_sp, iris.target) + + dec = safe_sparse_dot(iris_data_sp, clf.coef_.T) + clf.intercept_ + + assert_allclose(dec, clf.decision_function(iris_data_sp)) + + # binary: + clf.fit(X, Y) + dec = np.dot(X, clf.coef_.T) + clf.intercept_ + prediction = clf.predict(X) + assert_allclose(dec.ravel(), clf.decision_function(X)) + assert_allclose( + prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int).ravel()] + ) + expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0]) + assert_array_almost_equal(clf.decision_function(X), expected, decimal=2) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_error(lil_container): + # Test that it gives proper exception on deficient input + clf = svm.SVC() + X_sp = lil_container(X) + + Y2 = Y[:-1] # wrong dimensions for labels + with pytest.raises(ValueError): + clf.fit(X_sp, Y2) + + clf.fit(X_sp, Y) + assert_array_equal(clf.predict(T), true_result) + + +@pytest.mark.parametrize( + "lil_container, dok_container", zip(LIL_CONTAINERS, DOK_CONTAINERS) +) +def test_linearsvc(lil_container, dok_container): + # Similar to test_SVC + X_sp = lil_container(X) + X2_sp = dok_container(X2) + + clf = svm.LinearSVC(dual="auto", random_state=0).fit(X, Y) + sp_clf = svm.LinearSVC(dual="auto", random_state=0).fit(X_sp, Y) + + assert sp_clf.fit_intercept + + assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) + assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) + + assert_allclose(clf.predict(X), sp_clf.predict(X_sp)) + + clf.fit(X2, Y2) + sp_clf.fit(X2_sp, Y2) + + assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) + assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_linearsvc_iris(csr_container): + # Test the sparse LinearSVC with the iris dataset + iris_data_sp = csr_container(iris.data) + + sp_clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris_data_sp, iris.target) + clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris.data, iris.target) + + assert clf.fit_intercept == sp_clf.fit_intercept + + assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1) + assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1) + assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp)) + + # check decision_function + pred = np.argmax(sp_clf.decision_function(iris_data_sp), axis=1) + assert_allclose(pred, clf.predict(iris.data)) + + # sparsify the coefficients on both models and check that they still + # produce the same results + clf.sparsify() + assert_array_equal(pred, clf.predict(iris_data_sp)) + sp_clf.sparsify() + assert_array_equal(pred, sp_clf.predict(iris_data_sp)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_weight(csr_container): + # Test class weights + X_, y_ = make_classification( + n_samples=200, n_features=100, weights=[0.833, 0.167], random_state=0 + ) + + X_ = csr_container(X_) + for clf in ( + linear_model.LogisticRegression(), + svm.LinearSVC(dual="auto", random_state=0), + svm.SVC(), + ): + clf.set_params(class_weight={0: 5}) + clf.fit(X_[:180], y_[:180]) + y_pred = clf.predict(X_[180:]) + assert np.sum(y_pred == y_[180:]) >= 11 + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_sample_weights(lil_container): + # Test weights on individual samples + X_sp = lil_container(X) + + clf = svm.SVC() + clf.fit(X_sp, Y) + assert_array_equal(clf.predict([X[2]]), [1.0]) + + sample_weight = [0.1] * 3 + [10] * 3 + clf.fit(X_sp, Y, sample_weight=sample_weight) + assert_array_equal(clf.predict([X[2]]), [2.0]) + + +def test_sparse_liblinear_intercept_handling(): + # Test that sparse liblinear honours intercept_scaling param + test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC) + + +@pytest.mark.parametrize( + "X_train, y_train, X_test", + [ + [X, None, T], + [X2, None, T2], + [X_blobs[:80], None, X_blobs[80:]], + [iris.data, None, iris.data], + ], +) +@pytest.mark.parametrize("kernel", ["linear", "poly", "rbf", "sigmoid"]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS) +@skip_if_32bit +def test_sparse_oneclasssvm(X_train, y_train, X_test, kernel, sparse_container): + # Check that sparse OneClassSVM gives the same result as dense OneClassSVM + X_train = sparse_container(X_train) + + clf = svm.OneClassSVM(gamma=1, kernel=kernel) + check_svm_model_equal(clf, X_train, y_train, X_test) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_realdata(csr_container): + # Test on a subset from the 20newsgroups dataset. + # This catches some bugs if input is not correctly converted into + # sparse format or weights are not correctly initialized. + data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069]) + + # SVC does not support large sparse, so we specify int32 indices + # In this case, `csr_matrix` automatically uses int32 regardless of the dtypes of + # `indices` and `indptr` but `csr_array` may or may not use the same dtype as + # `indices` and `indptr`, which would be int64 if not specified + indices = np.array([6, 5, 35, 31], dtype=np.int32) + indptr = np.array([0] * 8 + [1] * 32 + [2] * 38 + [4] * 3, dtype=np.int32) + + X = csr_container((data, indices, indptr)) + y = np.array( + [ + 1.0, + 0.0, + 2.0, + 2.0, + 1.0, + 1.0, + 1.0, + 2.0, + 2.0, + 0.0, + 1.0, + 2.0, + 2.0, + 0.0, + 2.0, + 0.0, + 3.0, + 0.0, + 3.0, + 0.0, + 1.0, + 1.0, + 3.0, + 2.0, + 3.0, + 2.0, + 0.0, + 3.0, + 1.0, + 0.0, + 2.0, + 1.0, + 2.0, + 0.0, + 1.0, + 0.0, + 2.0, + 3.0, + 1.0, + 3.0, + 0.0, + 1.0, + 0.0, + 0.0, + 2.0, + 0.0, + 1.0, + 2.0, + 2.0, + 2.0, + 3.0, + 2.0, + 0.0, + 3.0, + 2.0, + 1.0, + 2.0, + 3.0, + 2.0, + 2.0, + 0.0, + 1.0, + 0.0, + 1.0, + 2.0, + 3.0, + 0.0, + 0.0, + 2.0, + 2.0, + 1.0, + 3.0, + 1.0, + 1.0, + 0.0, + 1.0, + 2.0, + 1.0, + 1.0, + 3.0, + ] + ) + + clf = svm.SVC(kernel="linear").fit(X.toarray(), y) + sp_clf = svm.SVC(kernel="linear").fit(X.tocoo(), y) + + assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) + assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_sparse_svc_clone_with_callable_kernel(lil_container): + # Test that the "dense_fit" is called even though we use sparse input + # meaning that everything works fine. + a = svm.SVC(C=1, kernel=lambda x, y: x @ y.T, probability=True, random_state=0) + b = base.clone(a) + + X_sp = lil_container(X) + b.fit(X_sp, Y) + pred = b.predict(X_sp) + b.predict_proba(X_sp) + + dense_svm = svm.SVC( + C=1, kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0 + ) + pred_dense = dense_svm.fit(X, Y).predict(X) + assert_array_equal(pred_dense, pred) + # b.decision_function(X_sp) # XXX : should be supported + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_timeout(lil_container): + sp = svm.SVC( + C=1, kernel=lambda x, y: x @ y.T, probability=True, random_state=0, max_iter=1 + ) + warning_msg = ( + r"Solver terminated early \(max_iter=1\). Consider pre-processing " + r"your data with StandardScaler or MinMaxScaler." + ) + with pytest.warns(ConvergenceWarning, match=warning_msg): + sp.fit(lil_container(X), Y) + + +def test_consistent_proba(): + a = svm.SVC(probability=True, max_iter=1, random_state=0) + with ignore_warnings(category=ConvergenceWarning): + proba_1 = a.fit(X, Y).predict_proba(X) + a = svm.SVC(probability=True, max_iter=1, random_state=0) + with ignore_warnings(category=ConvergenceWarning): + proba_2 = a.fit(X, Y).predict_proba(X) + assert_allclose(proba_1, proba_2) diff --git a/venv/lib/python3.10/site-packages/sklearn/svm/tests/test_svm.py b/venv/lib/python3.10/site-packages/sklearn/svm/tests/test_svm.py new file mode 100644 index 0000000000000000000000000000000000000000..e1c6e36af28fb5da770fe6aa77191ec76a66b99b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/svm/tests/test_svm.py @@ -0,0 +1,1434 @@ +""" +Testing for Support Vector Machine module (sklearn.svm) + +TODO: remove hard coded numerical results when possible +""" +import re + +import numpy as np +import pytest +from numpy.testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + +from sklearn import base, datasets, linear_model, metrics, svm +from sklearn.datasets import make_blobs, make_classification +from sklearn.exceptions import ( + ConvergenceWarning, + NotFittedError, + UndefinedMetricWarning, +) +from sklearn.metrics import f1_score +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.model_selection import train_test_split +from sklearn.multiclass import OneVsRestClassifier + +# mypy error: Module 'sklearn.svm' has no attribute '_libsvm' +from sklearn.svm import ( # type: ignore + SVR, + LinearSVC, + LinearSVR, + NuSVR, + OneClassSVM, + _libsvm, +) +from sklearn.svm._classes import _validate_dual_parameter +from sklearn.utils import check_random_state, shuffle +from sklearn.utils._testing import ignore_warnings +from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS +from sklearn.utils.validation import _num_samples + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +Y = [1, 1, 1, 2, 2, 2] +T = [[-1, -1], [2, 2], [3, 2]] +true_result = [1, 2, 2] + +# also load the iris dataset +iris = datasets.load_iris() +rng = check_random_state(42) +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + + +def test_libsvm_parameters(): + # Test parameters on classes that make use of libsvm. + clf = svm.SVC(kernel="linear").fit(X, Y) + assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) + assert_array_equal(clf.support_, [1, 3]) + assert_array_equal(clf.support_vectors_, (X[1], X[3])) + assert_array_equal(clf.intercept_, [0.0]) + assert_array_equal(clf.predict(X), Y) + + +def test_libsvm_iris(): + # Check consistency on dataset iris. + + # shuffle the dataset so that labels are not ordered + for k in ("linear", "rbf"): + clf = svm.SVC(kernel=k).fit(iris.data, iris.target) + assert np.mean(clf.predict(iris.data) == iris.target) > 0.9 + assert hasattr(clf, "coef_") == (k == "linear") + + assert_array_equal(clf.classes_, np.sort(clf.classes_)) + + # check also the low-level API + # We unpack the values to create a dictionary with some of the return values + # from Libsvm's fit. + ( + libsvm_support, + libsvm_support_vectors, + libsvm_n_class_SV, + libsvm_sv_coef, + libsvm_intercept, + libsvm_probA, + libsvm_probB, + # libsvm_fit_status and libsvm_n_iter won't be used below. + libsvm_fit_status, + libsvm_n_iter, + ) = _libsvm.fit(iris.data, iris.target.astype(np.float64)) + + model_params = { + "support": libsvm_support, + "SV": libsvm_support_vectors, + "nSV": libsvm_n_class_SV, + "sv_coef": libsvm_sv_coef, + "intercept": libsvm_intercept, + "probA": libsvm_probA, + "probB": libsvm_probB, + } + pred = _libsvm.predict(iris.data, **model_params) + assert np.mean(pred == iris.target) > 0.95 + + # We unpack the values to create a dictionary with some of the return values + # from Libsvm's fit. + ( + libsvm_support, + libsvm_support_vectors, + libsvm_n_class_SV, + libsvm_sv_coef, + libsvm_intercept, + libsvm_probA, + libsvm_probB, + # libsvm_fit_status and libsvm_n_iter won't be used below. + libsvm_fit_status, + libsvm_n_iter, + ) = _libsvm.fit(iris.data, iris.target.astype(np.float64), kernel="linear") + + model_params = { + "support": libsvm_support, + "SV": libsvm_support_vectors, + "nSV": libsvm_n_class_SV, + "sv_coef": libsvm_sv_coef, + "intercept": libsvm_intercept, + "probA": libsvm_probA, + "probB": libsvm_probB, + } + pred = _libsvm.predict(iris.data, **model_params, kernel="linear") + assert np.mean(pred == iris.target) > 0.95 + + pred = _libsvm.cross_validation( + iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0 + ) + assert np.mean(pred == iris.target) > 0.95 + + # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence + # we should get deterministic results (assuming that there is no other + # thread calling this wrapper calling `srand` concurrently). + pred2 = _libsvm.cross_validation( + iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0 + ) + assert_array_equal(pred, pred2) + + +def test_precomputed(): + # SVC with a precomputed kernel. + # We test it with a toy dataset and with iris. + clf = svm.SVC(kernel="precomputed") + # Gram matrix for train data (square matrix) + # (we use just a linear kernel) + K = np.dot(X, np.array(X).T) + clf.fit(K, Y) + # Gram matrix for test data (rectangular matrix) + KT = np.dot(T, np.array(X).T) + pred = clf.predict(KT) + with pytest.raises(ValueError): + clf.predict(KT.T) + + assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) + assert_array_equal(clf.support_, [1, 3]) + assert_array_equal(clf.intercept_, [0]) + assert_array_almost_equal(clf.support_, [1, 3]) + assert_array_equal(pred, true_result) + + # Gram matrix for test data but compute KT[i,j] + # for support vectors j only. + KT = np.zeros_like(KT) + for i in range(len(T)): + for j in clf.support_: + KT[i, j] = np.dot(T[i], X[j]) + + pred = clf.predict(KT) + assert_array_equal(pred, true_result) + + # same as before, but using a callable function instead of the kernel + # matrix. kernel is just a linear kernel + + def kfunc(x, y): + return np.dot(x, y.T) + + clf = svm.SVC(kernel=kfunc) + clf.fit(np.array(X), Y) + pred = clf.predict(T) + + assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) + assert_array_equal(clf.intercept_, [0]) + assert_array_almost_equal(clf.support_, [1, 3]) + assert_array_equal(pred, true_result) + + # test a precomputed kernel with the iris dataset + # and check parameters against a linear SVC + clf = svm.SVC(kernel="precomputed") + clf2 = svm.SVC(kernel="linear") + K = np.dot(iris.data, iris.data.T) + clf.fit(K, iris.target) + clf2.fit(iris.data, iris.target) + pred = clf.predict(K) + assert_array_almost_equal(clf.support_, clf2.support_) + assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) + assert_array_almost_equal(clf.intercept_, clf2.intercept_) + assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2) + + # Gram matrix for test data but compute KT[i,j] + # for support vectors j only. + K = np.zeros_like(K) + for i in range(len(iris.data)): + for j in clf.support_: + K[i, j] = np.dot(iris.data[i], iris.data[j]) + + pred = clf.predict(K) + assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2) + + clf = svm.SVC(kernel=kfunc) + clf.fit(iris.data, iris.target) + assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2) + + +def test_svr(): + # Test Support Vector Regression + + diabetes = datasets.load_diabetes() + for clf in ( + svm.NuSVR(kernel="linear", nu=0.4, C=1.0), + svm.NuSVR(kernel="linear", nu=0.4, C=10.0), + svm.SVR(kernel="linear", C=10.0), + svm.LinearSVR(dual="auto", C=10.0), + svm.LinearSVR(dual="auto", C=10.0), + ): + clf.fit(diabetes.data, diabetes.target) + assert clf.score(diabetes.data, diabetes.target) > 0.02 + + # non-regression test; previously, BaseLibSVM would check that + # len(np.unique(y)) < 2, which must only be done for SVC + svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data))) + svm.LinearSVR(dual="auto").fit(diabetes.data, np.ones(len(diabetes.data))) + + +def test_linearsvr(): + # check that SVR(kernel='linear') and LinearSVC() give + # comparable results + diabetes = datasets.load_diabetes() + lsvr = svm.LinearSVR(C=1e3, dual="auto").fit(diabetes.data, diabetes.target) + score1 = lsvr.score(diabetes.data, diabetes.target) + + svr = svm.SVR(kernel="linear", C=1e3).fit(diabetes.data, diabetes.target) + score2 = svr.score(diabetes.data, diabetes.target) + + assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) + assert_almost_equal(score1, score2, 2) + + +def test_linearsvr_fit_sampleweight(): + # check correct result when sample_weight is 1 + # check that SVR(kernel='linear') and LinearSVC() give + # comparable results + diabetes = datasets.load_diabetes() + n_samples = len(diabetes.target) + unit_weight = np.ones(n_samples) + lsvr = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit( + diabetes.data, diabetes.target, sample_weight=unit_weight + ) + score1 = lsvr.score(diabetes.data, diabetes.target) + + lsvr_no_weight = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit( + diabetes.data, diabetes.target + ) + score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) + + assert_allclose( + np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001 + ) + assert_almost_equal(score1, score2, 2) + + # check that fit(X) = fit([X1, X2, X3], sample_weight = [n1, n2, n3]) where + # X = X1 repeated n1 times, X2 repeated n2 times and so forth + random_state = check_random_state(0) + random_weight = random_state.randint(0, 10, n_samples) + lsvr_unflat = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit( + diabetes.data, diabetes.target, sample_weight=random_weight + ) + score3 = lsvr_unflat.score( + diabetes.data, diabetes.target, sample_weight=random_weight + ) + + X_flat = np.repeat(diabetes.data, random_weight, axis=0) + y_flat = np.repeat(diabetes.target, random_weight, axis=0) + lsvr_flat = svm.LinearSVR(dual="auto", C=1e3, tol=1e-12, max_iter=10000).fit( + X_flat, y_flat + ) + score4 = lsvr_flat.score(X_flat, y_flat) + + assert_almost_equal(score3, score4, 2) + + +def test_svr_errors(): + X = [[0.0], [1.0]] + y = [0.0, 0.5] + + # Bad kernel + clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]])) + clf.fit(X, y) + with pytest.raises(ValueError): + clf.predict(X) + + +def test_oneclass(): + # Test OneClassSVM + clf = svm.OneClassSVM() + clf.fit(X) + pred = clf.predict(T) + + assert_array_equal(pred, [1, -1, -1]) + assert pred.dtype == np.dtype("intp") + assert_array_almost_equal(clf.intercept_, [-1.218], decimal=3) + assert_array_almost_equal(clf.dual_coef_, [[0.750, 0.750, 0.750, 0.750]], decimal=3) + with pytest.raises(AttributeError): + (lambda: clf.coef_)() + + +def test_oneclass_decision_function(): + # Test OneClassSVM decision function + clf = svm.OneClassSVM() + rnd = check_random_state(2) + + # Generate train data + X = 0.3 * rnd.randn(100, 2) + X_train = np.r_[X + 2, X - 2] + + # Generate some regular novel observations + X = 0.3 * rnd.randn(20, 2) + X_test = np.r_[X + 2, X - 2] + # Generate some abnormal novel observations + X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) + + # fit the model + clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) + clf.fit(X_train) + + # predict things + y_pred_test = clf.predict(X_test) + assert np.mean(y_pred_test == 1) > 0.9 + y_pred_outliers = clf.predict(X_outliers) + assert np.mean(y_pred_outliers == -1) > 0.9 + dec_func_test = clf.decision_function(X_test) + assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) + dec_func_outliers = clf.decision_function(X_outliers) + assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) + + +def test_oneclass_score_samples(): + X_train = [[1, 1], [1, 2], [2, 1]] + clf = svm.OneClassSVM(gamma=1).fit(X_train) + assert_array_equal( + clf.score_samples([[2.0, 2.0]]), + clf.decision_function([[2.0, 2.0]]) + clf.offset_, + ) + + +def test_tweak_params(): + # Make sure some tweaking of parameters works. + # We change clf.dual_coef_ at run time and expect .predict() to change + # accordingly. Notice that this is not trivial since it involves a lot + # of C/Python copying in the libsvm bindings. + # The success of this test ensures that the mapping between libsvm and + # the python classifier is complete. + clf = svm.SVC(kernel="linear", C=1.0) + clf.fit(X, Y) + assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) + assert_array_equal(clf.predict([[-0.1, -0.1]]), [1]) + clf._dual_coef_ = np.array([[0.0, 1.0]]) + assert_array_equal(clf.predict([[-0.1, -0.1]]), [2]) + + +def test_probability(): + # Predict probabilities using SVC + # This uses cross validation, so we use a slightly bigger testing set. + + for clf in ( + svm.SVC(probability=True, random_state=0, C=1.0), + svm.NuSVC(probability=True, random_state=0), + ): + clf.fit(iris.data, iris.target) + + prob_predict = clf.predict_proba(iris.data) + assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) + assert np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9 + + assert_almost_equal( + clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8 + ) + + +def test_decision_function(): + # Test decision_function + # Sanity check, test that decision_function implemented in python + # returns the same as the one in libsvm + # multi class: + clf = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo").fit( + iris.data, iris.target + ) + + dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ + + assert_array_almost_equal(dec, clf.decision_function(iris.data)) + + # binary: + clf.fit(X, Y) + dec = np.dot(X, clf.coef_.T) + clf.intercept_ + prediction = clf.predict(X) + assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) + assert_array_almost_equal( + prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int)] + ) + expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0]) + assert_array_almost_equal(clf.decision_function(X), expected, 2) + + # kernel binary: + clf = svm.SVC(kernel="rbf", gamma=1, decision_function_shape="ovo") + clf.fit(X, Y) + + rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) + dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ + assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) + + +@pytest.mark.parametrize("SVM", (svm.SVC, svm.NuSVC)) +def test_decision_function_shape(SVM): + # check that decision_function_shape='ovr' or 'ovo' gives + # correct shape and is consistent with predict + + clf = SVM(kernel="linear", decision_function_shape="ovr").fit( + iris.data, iris.target + ) + dec = clf.decision_function(iris.data) + assert dec.shape == (len(iris.data), 3) + assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) + + # with five classes: + X, y = make_blobs(n_samples=80, centers=5, random_state=0) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + clf = SVM(kernel="linear", decision_function_shape="ovr").fit(X_train, y_train) + dec = clf.decision_function(X_test) + assert dec.shape == (len(X_test), 5) + assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) + + # check shape of ovo_decition_function=True + clf = SVM(kernel="linear", decision_function_shape="ovo").fit(X_train, y_train) + dec = clf.decision_function(X_train) + assert dec.shape == (len(X_train), 10) + + +def test_svr_predict(): + # Test SVR's decision_function + # Sanity check, test that predict implemented in python + # returns the same as the one in libsvm + + X = iris.data + y = iris.target + + # linear kernel + reg = svm.SVR(kernel="linear", C=0.1).fit(X, y) + + dec = np.dot(X, reg.coef_.T) + reg.intercept_ + assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) + + # rbf kernel + reg = svm.SVR(kernel="rbf", gamma=1).fit(X, y) + + rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) + dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ + assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) + + +def test_weight(): + # Test class weights + clf = svm.SVC(class_weight={1: 0.1}) + # we give a small weights to class 1 + clf.fit(X, Y) + # so all predicted values belong to class 2 + assert_array_almost_equal(clf.predict(X), [2] * 6) + + X_, y_ = make_classification( + n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2 + ) + + for clf in ( + linear_model.LogisticRegression(), + svm.LinearSVC(dual="auto", random_state=0), + svm.SVC(), + ): + clf.set_params(class_weight={0: 0.1, 1: 10}) + clf.fit(X_[:100], y_[:100]) + y_pred = clf.predict(X_[100:]) + assert f1_score(y_[100:], y_pred) > 0.3 + + +@pytest.mark.parametrize("estimator", [svm.SVC(C=1e-2), svm.NuSVC()]) +def test_svm_classifier_sided_sample_weight(estimator): + # fit a linear SVM and check that giving more weight to opposed samples + # in the space will flip the decision toward these samples. + X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]] + estimator.set_params(kernel="linear") + + # check that with unit weights, a sample is supposed to be predicted on + # the boundary + sample_weight = [1] * 6 + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.decision_function([[-1.0, 1.0]]) + assert y_pred == pytest.approx(0) + + # give more weights to opposed samples + sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10] + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.decision_function([[-1.0, 1.0]]) + assert y_pred < 0 + + sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1] + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.decision_function([[-1.0, 1.0]]) + assert y_pred > 0 + + +@pytest.mark.parametrize("estimator", [svm.SVR(C=1e-2), svm.NuSVR(C=1e-2)]) +def test_svm_regressor_sided_sample_weight(estimator): + # similar test to test_svm_classifier_sided_sample_weight but for + # SVM regressors + X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]] + estimator.set_params(kernel="linear") + + # check that with unit weights, a sample is supposed to be predicted on + # the boundary + sample_weight = [1] * 6 + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.predict([[-1.0, 1.0]]) + assert y_pred == pytest.approx(1.5) + + # give more weights to opposed samples + sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10] + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.predict([[-1.0, 1.0]]) + assert y_pred < 1.5 + + sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1] + estimator.fit(X, Y, sample_weight=sample_weight) + y_pred = estimator.predict([[-1.0, 1.0]]) + assert y_pred > 1.5 + + +def test_svm_equivalence_sample_weight_C(): + # test that rescaling all samples is the same as changing C + clf = svm.SVC() + clf.fit(X, Y) + dual_coef_no_weight = clf.dual_coef_ + clf.set_params(C=100) + clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) + assert_allclose(dual_coef_no_weight, clf.dual_coef_) + + +@pytest.mark.parametrize( + "Estimator, err_msg", + [ + (svm.SVC, "Invalid input - all samples have zero or negative weights."), + (svm.NuSVC, "(negative dimensions are not allowed|nu is infeasible)"), + (svm.SVR, "Invalid input - all samples have zero or negative weights."), + (svm.NuSVR, "Invalid input - all samples have zero or negative weights."), + (svm.OneClassSVM, "Invalid input - all samples have zero or negative weights."), + ], + ids=["SVC", "NuSVC", "SVR", "NuSVR", "OneClassSVM"], +) +@pytest.mark.parametrize( + "sample_weight", + [[0] * len(Y), [-0.3] * len(Y)], + ids=["weights-are-zero", "weights-are-negative"], +) +def test_negative_sample_weights_mask_all_samples(Estimator, err_msg, sample_weight): + est = Estimator(kernel="linear") + with pytest.raises(ValueError, match=err_msg): + est.fit(X, Y, sample_weight=sample_weight) + + +@pytest.mark.parametrize( + "Classifier, err_msg", + [ + ( + svm.SVC, + ( + "Invalid input - all samples with positive weights belong to the same" + " class" + ), + ), + (svm.NuSVC, "specified nu is infeasible"), + ], + ids=["SVC", "NuSVC"], +) +@pytest.mark.parametrize( + "sample_weight", + [[0, -0.5, 0, 1, 1, 1], [1, 1, 1, 0, -0.1, -0.3]], + ids=["mask-label-1", "mask-label-2"], +) +def test_negative_weights_svc_leave_just_one_label(Classifier, err_msg, sample_weight): + clf = Classifier(kernel="linear") + with pytest.raises(ValueError, match=err_msg): + clf.fit(X, Y, sample_weight=sample_weight) + + +@pytest.mark.parametrize( + "Classifier, model", + [ + (svm.SVC, {"when-left": [0.3998, 0.4], "when-right": [0.4, 0.3999]}), + (svm.NuSVC, {"when-left": [0.3333, 0.3333], "when-right": [0.3333, 0.3333]}), + ], + ids=["SVC", "NuSVC"], +) +@pytest.mark.parametrize( + "sample_weight, mask_side", + [([1, -0.5, 1, 1, 1, 1], "when-left"), ([1, 1, 1, 0, 1, 1], "when-right")], + ids=["partial-mask-label-1", "partial-mask-label-2"], +) +def test_negative_weights_svc_leave_two_labels( + Classifier, model, sample_weight, mask_side +): + clf = Classifier(kernel="linear") + clf.fit(X, Y, sample_weight=sample_weight) + assert_allclose(clf.coef_, [model[mask_side]], rtol=1e-3) + + +@pytest.mark.parametrize( + "Estimator", [svm.SVC, svm.NuSVC, svm.NuSVR], ids=["SVC", "NuSVC", "NuSVR"] +) +@pytest.mark.parametrize( + "sample_weight", + [[1, -0.5, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1]], + ids=["partial-mask-label-1", "partial-mask-label-2"], +) +def test_negative_weight_equal_coeffs(Estimator, sample_weight): + # model generates equal coefficients + est = Estimator(kernel="linear") + est.fit(X, Y, sample_weight=sample_weight) + coef = np.abs(est.coef_).ravel() + assert coef[0] == pytest.approx(coef[1], rel=1e-3) + + +@ignore_warnings(category=UndefinedMetricWarning) +def test_auto_weight(): + # Test class weights for imbalanced data + from sklearn.linear_model import LogisticRegression + + # We take as dataset the two-dimensional projection of iris so + # that it is not separable and remove half of predictors from + # class 1. + # We add one to the targets as a non-regression test: + # class_weight="balanced" + # used to work only when the labels where a range [0..K). + from sklearn.utils import compute_class_weight + + X, y = iris.data[:, :2], iris.target + 1 + unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) + + classes = np.unique(y[unbalanced]) + class_weights = compute_class_weight("balanced", classes=classes, y=y[unbalanced]) + assert np.argmax(class_weights) == 2 + + for clf in ( + svm.SVC(kernel="linear"), + svm.LinearSVC(dual="auto", random_state=0), + LogisticRegression(), + ): + # check that score is better when class='balanced' is set. + y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) + clf.set_params(class_weight="balanced") + y_pred_balanced = clf.fit( + X[unbalanced], + y[unbalanced], + ).predict(X) + assert metrics.f1_score(y, y_pred, average="macro") <= metrics.f1_score( + y, y_pred_balanced, average="macro" + ) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_bad_input(lil_container): + # Test dimensions for labels + Y2 = Y[:-1] # wrong dimensions for labels + with pytest.raises(ValueError): + svm.SVC().fit(X, Y2) + + # Test with arrays that are non-contiguous. + for clf in (svm.SVC(), svm.LinearSVC(dual="auto", random_state=0)): + Xf = np.asfortranarray(X) + assert not Xf.flags["C_CONTIGUOUS"] + yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) + yf = yf[:, -1] + assert not yf.flags["F_CONTIGUOUS"] + assert not yf.flags["C_CONTIGUOUS"] + clf.fit(Xf, yf) + assert_array_equal(clf.predict(T), true_result) + + # error for precomputed kernelsx + clf = svm.SVC(kernel="precomputed") + with pytest.raises(ValueError): + clf.fit(X, Y) + + # predict with sparse input when trained with dense + clf = svm.SVC().fit(X, Y) + with pytest.raises(ValueError): + clf.predict(lil_container(X)) + + Xt = np.array(X).T + clf.fit(np.dot(X, Xt), Y) + with pytest.raises(ValueError): + clf.predict(X) + + clf = svm.SVC() + clf.fit(X, Y) + with pytest.raises(ValueError): + clf.predict(Xt) + + +def test_svc_nonfinite_params(): + # Check SVC throws ValueError when dealing with non-finite parameter values + rng = np.random.RandomState(0) + n_samples = 10 + fmax = np.finfo(np.float64).max + X = fmax * rng.uniform(size=(n_samples, 2)) + y = rng.randint(0, 2, size=n_samples) + + clf = svm.SVC() + msg = "The dual coefficients or intercepts are not finite" + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_unicode_kernel(): + # Test that a unicode kernel name does not cause a TypeError + clf = svm.SVC(kernel="linear", probability=True) + clf.fit(X, Y) + clf.predict_proba(T) + _libsvm.cross_validation( + iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0 + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_precomputed(csr_container): + clf = svm.SVC(kernel="precomputed") + sparse_gram = csr_container([[1, 0], [0, 1]]) + with pytest.raises(TypeError, match="Sparse precomputed"): + clf.fit(sparse_gram, [0, 1]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_fit_support_vectors_empty(csr_container): + # Regression test for #14893 + X_train = csr_container([[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]]) + y_train = np.array([0.04, 0.04, 0.10, 0.16]) + model = svm.SVR(kernel="linear") + model.fit(X_train, y_train) + assert not model.support_vectors_.data.size + assert not model.dual_coef_.data.size + + +@pytest.mark.parametrize("loss", ["hinge", "squared_hinge"]) +@pytest.mark.parametrize("penalty", ["l1", "l2"]) +@pytest.mark.parametrize("dual", [True, False]) +def test_linearsvc_parameters(loss, penalty, dual): + # Test possible parameter combinations in LinearSVC + # Generate list of possible parameter combinations + X, y = make_classification(n_samples=5, n_features=5, random_state=0) + + clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual, random_state=0) + if ( + (loss, penalty) == ("hinge", "l1") + or (loss, penalty, dual) == ("hinge", "l2", False) + or (penalty, dual) == ("l1", True) + ): + with pytest.raises( + ValueError, + match="Unsupported set of arguments.*penalty='%s.*loss='%s.*dual=%s" + % (penalty, loss, dual), + ): + clf.fit(X, y) + else: + clf.fit(X, y) + + +def test_linearsvc(): + # Test basic routines using LinearSVC + clf = svm.LinearSVC(dual="auto", random_state=0).fit(X, Y) + + # by default should have intercept + assert clf.fit_intercept + + assert_array_equal(clf.predict(T), true_result) + assert_array_almost_equal(clf.intercept_, [0], decimal=3) + + # the same with l1 penalty + clf = svm.LinearSVC( + penalty="l1", loss="squared_hinge", dual=False, random_state=0 + ).fit(X, Y) + assert_array_equal(clf.predict(T), true_result) + + # l2 penalty with dual formulation + clf = svm.LinearSVC(penalty="l2", dual=True, random_state=0).fit(X, Y) + assert_array_equal(clf.predict(T), true_result) + + # l2 penalty, l1 loss + clf = svm.LinearSVC(penalty="l2", loss="hinge", dual=True, random_state=0) + clf.fit(X, Y) + assert_array_equal(clf.predict(T), true_result) + + # test also decision function + dec = clf.decision_function(T) + res = (dec > 0).astype(int) + 1 + assert_array_equal(res, true_result) + + +def test_linearsvc_crammer_singer(): + # Test LinearSVC with crammer_singer multi-class svm + ovr_clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris.data, iris.target) + cs_clf = svm.LinearSVC(dual="auto", multi_class="crammer_singer", random_state=0) + cs_clf.fit(iris.data, iris.target) + + # similar prediction for ovr and crammer-singer: + assert (ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > 0.9 + + # classifiers shouldn't be the same + assert (ovr_clf.coef_ != cs_clf.coef_).all() + + # test decision function + assert_array_equal( + cs_clf.predict(iris.data), + np.argmax(cs_clf.decision_function(iris.data), axis=1), + ) + dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ + assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) + + +def test_linearsvc_fit_sampleweight(): + # check correct result when sample_weight is 1 + n_samples = len(X) + unit_weight = np.ones(n_samples) + clf = svm.LinearSVC(dual="auto", random_state=0).fit(X, Y) + clf_unitweight = svm.LinearSVC( + dual="auto", random_state=0, tol=1e-12, max_iter=1000 + ).fit(X, Y, sample_weight=unit_weight) + + # check if same as sample_weight=None + assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) + assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) + + # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where + # X = X1 repeated n1 times, X2 repeated n2 times and so forth + + random_state = check_random_state(0) + random_weight = random_state.randint(0, 10, n_samples) + lsvc_unflat = svm.LinearSVC( + dual="auto", random_state=0, tol=1e-12, max_iter=1000 + ).fit(X, Y, sample_weight=random_weight) + + pred1 = lsvc_unflat.predict(T) + + X_flat = np.repeat(X, random_weight, axis=0) + y_flat = np.repeat(Y, random_weight, axis=0) + lsvc_flat = svm.LinearSVC( + dual="auto", random_state=0, tol=1e-12, max_iter=1000 + ).fit(X_flat, y_flat) + pred2 = lsvc_flat.predict(T) + + assert_array_equal(pred1, pred2) + assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) + + +def test_crammer_singer_binary(): + # Test Crammer-Singer formulation in the binary case + X, y = make_classification(n_classes=2, random_state=0) + + for fit_intercept in (True, False): + acc = ( + svm.LinearSVC( + dual="auto", + fit_intercept=fit_intercept, + multi_class="crammer_singer", + random_state=0, + ) + .fit(X, y) + .score(X, y) + ) + assert acc > 0.9 + + +def test_linearsvc_iris(): + # Test that LinearSVC gives plausible predictions on the iris dataset + # Also, test symbolic class names (classes_). + target = iris.target_names[iris.target] + clf = svm.LinearSVC(dual="auto", random_state=0).fit(iris.data, target) + assert set(clf.classes_) == set(iris.target_names) + assert np.mean(clf.predict(iris.data) == target) > 0.8 + + dec = clf.decision_function(iris.data) + pred = iris.target_names[np.argmax(dec, 1)] + assert_array_equal(pred, clf.predict(iris.data)) + + +def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): + # Test that dense liblinear honours intercept_scaling param + X = [[2, 1], [3, 1], [1, 3], [2, 3]] + y = [0, 0, 1, 1] + clf = classifier( + fit_intercept=True, + penalty="l1", + loss="squared_hinge", + dual=False, + C=4, + tol=1e-7, + random_state=0, + ) + assert clf.intercept_scaling == 1, clf.intercept_scaling + assert clf.fit_intercept + + # when intercept_scaling is low the intercept value is highly "penalized" + # by regularization + clf.intercept_scaling = 1 + clf.fit(X, y) + assert_almost_equal(clf.intercept_, 0, decimal=5) + + # when intercept_scaling is sufficiently high, the intercept value + # is not affected by regularization + clf.intercept_scaling = 100 + clf.fit(X, y) + intercept1 = clf.intercept_ + assert intercept1 < -1 + + # when intercept_scaling is sufficiently high, the intercept value + # doesn't depend on intercept_scaling value + clf.intercept_scaling = 1000 + clf.fit(X, y) + intercept2 = clf.intercept_ + assert_array_almost_equal(intercept1, intercept2, decimal=2) + + +def test_liblinear_set_coef(): + # multi-class case + clf = svm.LinearSVC(dual="auto").fit(iris.data, iris.target) + values = clf.decision_function(iris.data) + clf.coef_ = clf.coef_.copy() + clf.intercept_ = clf.intercept_.copy() + values2 = clf.decision_function(iris.data) + assert_array_almost_equal(values, values2) + + # binary-class case + X = [[2, 1], [3, 1], [1, 3], [2, 3]] + y = [0, 0, 1, 1] + + clf = svm.LinearSVC(dual="auto").fit(X, y) + values = clf.decision_function(X) + clf.coef_ = clf.coef_.copy() + clf.intercept_ = clf.intercept_.copy() + values2 = clf.decision_function(X) + assert_array_equal(values, values2) + + +def test_immutable_coef_property(): + # Check that primal coef modification are not silently ignored + svms = [ + svm.SVC(kernel="linear").fit(iris.data, iris.target), + svm.NuSVC(kernel="linear").fit(iris.data, iris.target), + svm.SVR(kernel="linear").fit(iris.data, iris.target), + svm.NuSVR(kernel="linear").fit(iris.data, iris.target), + svm.OneClassSVM(kernel="linear").fit(iris.data), + ] + for clf in svms: + with pytest.raises(AttributeError): + clf.__setattr__("coef_", np.arange(3)) + with pytest.raises((RuntimeError, ValueError)): + clf.coef_.__setitem__((0, 0), 0) + + +def test_linearsvc_verbose(): + # stdout: redirect + import os + + stdout = os.dup(1) # save original stdout + os.dup2(os.pipe()[1], 1) # replace it + + # actual call + clf = svm.LinearSVC(dual="auto", verbose=1) + clf.fit(X, Y) + + # stdout: restore + os.dup2(stdout, 1) # restore original stdout + + +def test_svc_clone_with_callable_kernel(): + # create SVM with callable linear kernel, check that results are the same + # as with built-in linear kernel + svm_callable = svm.SVC( + kernel=lambda x, y: np.dot(x, y.T), + probability=True, + random_state=0, + decision_function_shape="ovr", + ) + # clone for checking clonability with lambda functions.. + svm_cloned = base.clone(svm_callable) + svm_cloned.fit(iris.data, iris.target) + + svm_builtin = svm.SVC( + kernel="linear", probability=True, random_state=0, decision_function_shape="ovr" + ) + svm_builtin.fit(iris.data, iris.target) + + assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) + assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) + assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) + + assert_array_almost_equal( + svm_cloned.predict_proba(iris.data), + svm_builtin.predict_proba(iris.data), + decimal=4, + ) + assert_array_almost_equal( + svm_cloned.decision_function(iris.data), + svm_builtin.decision_function(iris.data), + ) + + +def test_svc_bad_kernel(): + svc = svm.SVC(kernel=lambda x, y: x) + with pytest.raises(ValueError): + svc.fit(X, Y) + + +def test_libsvm_convergence_warnings(): + a = svm.SVC( + kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=2 + ) + warning_msg = ( + r"Solver terminated early \(max_iter=2\). Consider pre-processing " + r"your data with StandardScaler or MinMaxScaler." + ) + with pytest.warns(ConvergenceWarning, match=warning_msg): + a.fit(np.array(X), Y) + assert np.all(a.n_iter_ == 2) + + +def test_unfitted(): + X = "foo!" # input validation not required when SVM not fitted + + clf = svm.SVC() + with pytest.raises(Exception, match=r".*\bSVC\b.*\bnot\b.*\bfitted\b"): + clf.predict(X) + + clf = svm.NuSVR() + with pytest.raises(Exception, match=r".*\bNuSVR\b.*\bnot\b.*\bfitted\b"): + clf.predict(X) + + +# ignore convergence warnings from max_iter=1 +@ignore_warnings +def test_consistent_proba(): + a = svm.SVC(probability=True, max_iter=1, random_state=0) + proba_1 = a.fit(X, Y).predict_proba(X) + a = svm.SVC(probability=True, max_iter=1, random_state=0) + proba_2 = a.fit(X, Y).predict_proba(X) + assert_array_almost_equal(proba_1, proba_2) + + +def test_linear_svm_convergence_warnings(): + # Test that warnings are raised if model does not converge + + lsvc = svm.LinearSVC(dual="auto", random_state=0, max_iter=2) + warning_msg = "Liblinear failed to converge, increase the number of iterations." + with pytest.warns(ConvergenceWarning, match=warning_msg): + lsvc.fit(X, Y) + # Check that we have an n_iter_ attribute with int type as opposed to a + # numpy array or an np.int32 so as to match the docstring. + assert isinstance(lsvc.n_iter_, int) + assert lsvc.n_iter_ == 2 + + lsvr = svm.LinearSVR(dual="auto", random_state=0, max_iter=2) + with pytest.warns(ConvergenceWarning, match=warning_msg): + lsvr.fit(iris.data, iris.target) + assert isinstance(lsvr.n_iter_, int) + assert lsvr.n_iter_ == 2 + + +def test_svr_coef_sign(): + # Test that SVR(kernel="linear") has coef_ with the right sign. + # Non-regression test for #2933. + X = np.random.RandomState(21).randn(10, 3) + y = np.random.RandomState(12).randn(10) + + for svr in [ + svm.SVR(kernel="linear"), + svm.NuSVR(kernel="linear"), + svm.LinearSVR(dual="auto"), + ]: + svr.fit(X, y) + assert_array_almost_equal( + svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_ + ) + + +def test_lsvc_intercept_scaling_zero(): + # Test that intercept_scaling is ignored when fit_intercept is False + + lsvc = svm.LinearSVC(dual="auto", fit_intercept=False) + lsvc.fit(X, Y) + assert lsvc.intercept_ == 0.0 + + +def test_hasattr_predict_proba(): + # Method must be (un)available before or after fit, switched by + # `probability` param + + G = svm.SVC(probability=True) + assert hasattr(G, "predict_proba") + G.fit(iris.data, iris.target) + assert hasattr(G, "predict_proba") + + G = svm.SVC(probability=False) + assert not hasattr(G, "predict_proba") + G.fit(iris.data, iris.target) + assert not hasattr(G, "predict_proba") + + # Switching to `probability=True` after fitting should make + # predict_proba available, but calling it must not work: + G.probability = True + assert hasattr(G, "predict_proba") + msg = "predict_proba is not available when fitted with probability=False" + + with pytest.raises(NotFittedError, match=msg): + G.predict_proba(iris.data) + + +def test_decision_function_shape_two_class(): + for n_classes in [2, 3]: + X, y = make_blobs(centers=n_classes, random_state=0) + for estimator in [svm.SVC, svm.NuSVC]: + clf = OneVsRestClassifier(estimator(decision_function_shape="ovr")).fit( + X, y + ) + assert len(clf.predict(X)) == len(y) + + +def test_ovr_decision_function(): + # One point from each quadrant represents one class + X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) + y_train = [0, 1, 2, 3] + + # First point is closer to the decision boundaries than the second point + base_points = np.array([[5, 5], [10, 10]]) + + # For all the quadrants (classes) + X_test = np.vstack( + ( + base_points * [1, 1], # Q1 + base_points * [-1, 1], # Q2 + base_points * [-1, -1], # Q3 + base_points * [1, -1], # Q4 + ) + ) + + y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 + + clf = svm.SVC(kernel="linear", decision_function_shape="ovr") + clf.fit(X_train, y_train) + + y_pred = clf.predict(X_test) + + # Test if the prediction is the same as y + assert_array_equal(y_pred, y_test) + + deci_val = clf.decision_function(X_test) + + # Assert that the predicted class has the maximum value + assert_array_equal(np.argmax(deci_val, axis=1), y_pred) + + # Get decision value at test points for the predicted class + pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) + + # Assert pred_class_deci_val > 0 here + assert np.min(pred_class_deci_val) > 0.0 + + # Test if the first point has lower decision value on every quadrant + # compared to the second point + assert np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1]) + + +@pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC]) +def test_svc_invalid_break_ties_param(SVCClass): + X, y = make_blobs(random_state=42) + + svm = SVCClass( + kernel="linear", decision_function_shape="ovo", break_ties=True, random_state=42 + ).fit(X, y) + + with pytest.raises(ValueError, match="break_ties must be False"): + svm.predict(y) + + +@pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC]) +def test_svc_ovr_tie_breaking(SVCClass): + """Test if predict breaks ties in OVR mode. + Related issue: https://github.com/scikit-learn/scikit-learn/issues/8277 + """ + X, y = make_blobs(random_state=0, n_samples=20, n_features=2) + + xs = np.linspace(X[:, 0].min(), X[:, 0].max(), 100) + ys = np.linspace(X[:, 1].min(), X[:, 1].max(), 100) + xx, yy = np.meshgrid(xs, ys) + + common_params = dict( + kernel="rbf", gamma=1e6, random_state=42, decision_function_shape="ovr" + ) + svm = SVCClass( + break_ties=False, + **common_params, + ).fit(X, y) + pred = svm.predict(np.c_[xx.ravel(), yy.ravel()]) + dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()]) + assert not np.all(pred == np.argmax(dv, axis=1)) + + svm = SVCClass( + break_ties=True, + **common_params, + ).fit(X, y) + pred = svm.predict(np.c_[xx.ravel(), yy.ravel()]) + dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()]) + assert np.all(pred == np.argmax(dv, axis=1)) + + +def test_gamma_scale(): + X, y = [[0.0], [1.0]], [0, 1] + + clf = svm.SVC() + clf.fit(X, y) + assert_almost_equal(clf._gamma, 4) + + +@pytest.mark.parametrize( + "SVM, params", + [ + (LinearSVC, {"penalty": "l1", "loss": "squared_hinge", "dual": False}), + (LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": True}), + (LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": False}), + (LinearSVC, {"penalty": "l2", "loss": "hinge", "dual": True}), + (LinearSVR, {"loss": "epsilon_insensitive", "dual": True}), + (LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}), + (LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}), + ], +) +def test_linearsvm_liblinear_sample_weight(SVM, params): + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.dtype("float"), + ) + y = np.array( + [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype("int") + ) + + X2 = np.vstack([X, X]) + y2 = np.hstack([y, 3 - y]) + sample_weight = np.ones(shape=len(y) * 2) + sample_weight[len(y) :] = 0 + X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0) + + base_estimator = SVM(random_state=42) + base_estimator.set_params(**params) + base_estimator.set_params(tol=1e-12, max_iter=1000) + est_no_weight = base.clone(base_estimator).fit(X, y) + est_with_weight = base.clone(base_estimator).fit( + X2, y2, sample_weight=sample_weight + ) + + for method in ("predict", "decision_function"): + if hasattr(base_estimator, method): + X_est_no_weight = getattr(est_no_weight, method)(X) + X_est_with_weight = getattr(est_with_weight, method)(X) + assert_allclose(X_est_no_weight, X_est_with_weight) + + +@pytest.mark.parametrize("Klass", (OneClassSVM, SVR, NuSVR)) +def test_n_support(Klass): + # Make n_support is correct for oneclass and SVR (used to be + # non-initialized) + # this is a non regression test for issue #14774 + X = np.array([[0], [0.44], [0.45], [0.46], [1]]) + y = np.arange(X.shape[0]) + est = Klass() + assert not hasattr(est, "n_support_") + est.fit(X, y) + assert est.n_support_[0] == est.support_vectors_.shape[0] + assert est.n_support_.size == 1 + + +@pytest.mark.parametrize("Estimator", [svm.SVC, svm.SVR]) +def test_custom_kernel_not_array_input(Estimator): + """Test using a custom kernel that is not fed with array-like for floats""" + data = ["A A", "A", "B", "B B", "A B"] + X = np.array([[2, 0], [1, 0], [0, 1], [0, 2], [1, 1]]) # count encoding + y = np.array([1, 1, 2, 2, 1]) + + def string_kernel(X1, X2): + assert isinstance(X1[0], str) + n_samples1 = _num_samples(X1) + n_samples2 = _num_samples(X2) + K = np.zeros((n_samples1, n_samples2)) + for ii in range(n_samples1): + for jj in range(ii, n_samples2): + K[ii, jj] = X1[ii].count("A") * X2[jj].count("A") + K[ii, jj] += X1[ii].count("B") * X2[jj].count("B") + K[jj, ii] = K[ii, jj] + return K + + K = string_kernel(data, data) + assert_array_equal(np.dot(X, X.T), K) + + svc1 = Estimator(kernel=string_kernel).fit(data, y) + svc2 = Estimator(kernel="linear").fit(X, y) + svc3 = Estimator(kernel="precomputed").fit(K, y) + + assert svc1.score(data, y) == svc3.score(K, y) + assert svc1.score(data, y) == svc2.score(X, y) + if hasattr(svc1, "decision_function"): # classifier + assert_allclose(svc1.decision_function(data), svc2.decision_function(X)) + assert_allclose(svc1.decision_function(data), svc3.decision_function(K)) + assert_array_equal(svc1.predict(data), svc2.predict(X)) + assert_array_equal(svc1.predict(data), svc3.predict(K)) + else: # regressor + assert_allclose(svc1.predict(data), svc2.predict(X)) + assert_allclose(svc1.predict(data), svc3.predict(K)) + + +def test_svc_raises_error_internal_representation(): + """Check that SVC raises error when internal representation is altered. + + Non-regression test for #18891 and https://nvd.nist.gov/vuln/detail/CVE-2020-28975 + """ + clf = svm.SVC(kernel="linear").fit(X, Y) + clf._n_support[0] = 1000000 + + msg = "The internal representation of SVC was altered" + with pytest.raises(ValueError, match=msg): + clf.predict(X) + + +@pytest.mark.parametrize( + "estimator, expected_n_iter_type", + [ + (svm.SVC, np.ndarray), + (svm.NuSVC, np.ndarray), + (svm.SVR, int), + (svm.NuSVR, int), + (svm.OneClassSVM, int), + ], +) +@pytest.mark.parametrize( + "dataset", + [ + make_classification(n_classes=2, n_informative=2, random_state=0), + make_classification(n_classes=3, n_informative=3, random_state=0), + make_classification(n_classes=4, n_informative=4, random_state=0), + ], +) +def test_n_iter_libsvm(estimator, expected_n_iter_type, dataset): + # Check that the type of n_iter_ is correct for the classes that inherit + # from BaseSVC. + # Note that for SVC, and NuSVC this is an ndarray; while for SVR, NuSVR, and + # OneClassSVM, it is an int. + # For SVC and NuSVC also check the shape of n_iter_. + X, y = dataset + n_iter = estimator(kernel="linear").fit(X, y).n_iter_ + assert type(n_iter) == expected_n_iter_type + if estimator in [svm.SVC, svm.NuSVC]: + n_classes = len(np.unique(y)) + assert n_iter.shape == (n_classes * (n_classes - 1) // 2,) + + +# TODO(1.5): Remove +@pytest.mark.parametrize("Estimator", [LinearSVR, LinearSVC]) +def test_dual_auto_deprecation_warning(Estimator): + svm = Estimator() + msg = ( + "The default value of `dual` will change from `True` to `'auto'` in" + " 1.5. Set the value of `dual` explicitly to suppress the warning." + ) + with pytest.warns(FutureWarning, match=re.escape(msg)): + svm.fit(X, Y) + + +@pytest.mark.parametrize("loss", ["squared_hinge", "squared_epsilon_insensitive"]) +def test_dual_auto(loss): + # OvR, L2, N > M (6,2) + dual = _validate_dual_parameter("auto", loss, "l2", "ovr", np.asarray(X)) + assert dual is False + # OvR, L2, N < M (2,6) + dual = _validate_dual_parameter("auto", loss, "l2", "ovr", np.asarray(X).T) + assert dual is True + + +def test_dual_auto_edge_cases(): + # Hinge, OvR, L2, N > M (6,2) + dual = _validate_dual_parameter("auto", "hinge", "l2", "ovr", np.asarray(X)) + assert dual is True # only supports True + dual = _validate_dual_parameter( + "auto", "epsilon_insensitive", "l2", "ovr", np.asarray(X) + ) + assert dual is True # only supports True + # SqHinge, OvR, L1, N < M (2,6) + dual = _validate_dual_parameter( + "auto", "squared_hinge", "l1", "ovr", np.asarray(X).T + ) + assert dual is False # only supports False diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__init__.py b/venv/lib/python3.10/site-packages/sklearn/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d8cc67c5322e260ac281e0768a90842449a97519 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/__init__.py @@ -0,0 +1,1299 @@ +""" +The :mod:`sklearn.utils` module includes various utilities. +""" + +import math +import numbers +import platform +import struct +import timeit +import warnings +from collections.abc import Sequence +from contextlib import contextmanager, suppress +from itertools import compress, islice + +import numpy as np +from scipy.sparse import issparse + +from .. import get_config +from ..exceptions import DataConversionWarning +from . import _joblib, metadata_routing +from ._bunch import Bunch +from ._estimator_html_repr import estimator_html_repr +from ._param_validation import Integral, Interval, validate_params +from .class_weight import compute_class_weight, compute_sample_weight +from .deprecation import deprecated +from .discovery import all_estimators +from .fixes import parse_version, threadpool_info +from .murmurhash import murmurhash3_32 +from .validation import ( + _is_arraylike_not_scalar, + _is_pandas_df, + _is_polars_df, + _use_interchange_protocol, + as_float_array, + assert_all_finite, + check_array, + check_consistent_length, + check_random_state, + check_scalar, + check_symmetric, + check_X_y, + column_or_1d, + indexable, +) + +# Do not deprecate parallel_backend and register_parallel_backend as they are +# needed to tune `scikit-learn` behavior and have different effect if called +# from the vendored version or or the site-package version. The other are +# utilities that are independent of scikit-learn so they are not part of +# scikit-learn public API. +parallel_backend = _joblib.parallel_backend +register_parallel_backend = _joblib.register_parallel_backend + +__all__ = [ + "murmurhash3_32", + "as_float_array", + "assert_all_finite", + "check_array", + "check_random_state", + "compute_class_weight", + "compute_sample_weight", + "column_or_1d", + "check_consistent_length", + "check_X_y", + "check_scalar", + "indexable", + "check_symmetric", + "indices_to_mask", + "deprecated", + "parallel_backend", + "register_parallel_backend", + "resample", + "shuffle", + "check_matplotlib_support", + "all_estimators", + "DataConversionWarning", + "estimator_html_repr", + "Bunch", + "metadata_routing", +] + +IS_PYPY = platform.python_implementation() == "PyPy" +_IS_32BIT = 8 * struct.calcsize("P") == 32 +_IS_WASM = platform.machine() in ["wasm32", "wasm64"] + + +def _in_unstable_openblas_configuration(): + """Return True if in an unstable configuration for OpenBLAS""" + + # Import libraries which might load OpenBLAS. + import numpy # noqa + import scipy # noqa + + modules_info = threadpool_info() + + open_blas_used = any(info["internal_api"] == "openblas" for info in modules_info) + if not open_blas_used: + return False + + # OpenBLAS 0.3.16 fixed instability for arm64, see: + # https://github.com/xianyi/OpenBLAS/blob/1b6db3dbba672b4f8af935bd43a1ff6cff4d20b7/Changelog.txt#L56-L58 # noqa + openblas_arm64_stable_version = parse_version("0.3.16") + for info in modules_info: + if info["internal_api"] != "openblas": + continue + openblas_version = info.get("version") + openblas_architecture = info.get("architecture") + if openblas_version is None or openblas_architecture is None: + # Cannot be sure that OpenBLAS is good enough. Assume unstable: + return True + if ( + openblas_architecture == "neoversen1" + and parse_version(openblas_version) < openblas_arm64_stable_version + ): + # See discussions in https://github.com/numpy/numpy/issues/19411 + return True + return False + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "mask": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def safe_mask(X, mask): + """Return a mask which is safe to use on X. + + Parameters + ---------- + X : {array-like, sparse matrix} + Data on which to apply mask. + + mask : array-like + Mask to be used on X. + + Returns + ------- + mask : ndarray + Array that is safe to use on X. + + Examples + -------- + >>> from sklearn.utils import safe_mask + >>> from scipy.sparse import csr_matrix + >>> data = csr_matrix([[1], [2], [3], [4], [5]]) + >>> condition = [False, True, True, False, True] + >>> mask = safe_mask(data, condition) + >>> data[mask].toarray() + array([[2], + [3], + [5]]) + """ + mask = np.asarray(mask) + if np.issubdtype(mask.dtype, np.signedinteger): + return mask + + if hasattr(X, "toarray"): + ind = np.arange(mask.shape[0]) + mask = ind[mask] + return mask + + +def axis0_safe_slice(X, mask, len_mask): + """Return a mask which is safer to use on X than safe_mask. + + This mask is safer than safe_mask since it returns an + empty array, when a sparse matrix is sliced with a boolean mask + with all False, instead of raising an unhelpful error in older + versions of SciPy. + + See: https://github.com/scipy/scipy/issues/5361 + + Also note that we can avoid doing the dot product by checking if + the len_mask is not zero in _huber_loss_and_gradient but this + is not going to be the bottleneck, since the number of outliers + and non_outliers are typically non-zero and it makes the code + tougher to follow. + + Parameters + ---------- + X : {array-like, sparse matrix} + Data on which to apply mask. + + mask : ndarray + Mask to be used on X. + + len_mask : int + The length of the mask. + + Returns + ------- + mask : ndarray + Array that is safe to use on X. + """ + if len_mask != 0: + return X[safe_mask(X, mask), :] + return np.zeros(shape=(0, X.shape[1])) + + +def _array_indexing(array, key, key_dtype, axis): + """Index an array or scipy.sparse consistently across NumPy version.""" + if issparse(array) and key_dtype == "bool": + key = np.asarray(key) + if isinstance(key, tuple): + key = list(key) + return array[key, ...] if axis == 0 else array[:, key] + + +def _pandas_indexing(X, key, key_dtype, axis): + """Index a pandas dataframe or a series.""" + if _is_arraylike_not_scalar(key): + key = np.asarray(key) + + if key_dtype == "int" and not (isinstance(key, slice) or np.isscalar(key)): + # using take() instead of iloc[] ensures the return value is a "proper" + # copy that will not raise SettingWithCopyWarning + return X.take(key, axis=axis) + else: + # check whether we should index with loc or iloc + indexer = X.iloc if key_dtype == "int" else X.loc + return indexer[:, key] if axis else indexer[key] + + +def _list_indexing(X, key, key_dtype): + """Index a Python list.""" + if np.isscalar(key) or isinstance(key, slice): + # key is a slice or a scalar + return X[key] + if key_dtype == "bool": + # key is a boolean array-like + return list(compress(X, key)) + # key is a integer array-like of key + return [X[idx] for idx in key] + + +def _polars_indexing(X, key, key_dtype, axis): + """Indexing X with polars interchange protocol.""" + # Polars behavior is more consistent with lists + if isinstance(key, np.ndarray): + key = key.tolist() + + if axis == 1: + return X[:, key] + else: + return X[key] + + +def _determine_key_type(key, accept_slice=True): + """Determine the data type of key. + + Parameters + ---------- + key : scalar, slice or array-like + The key from which we want to infer the data type. + + accept_slice : bool, default=True + Whether or not to raise an error if the key is a slice. + + Returns + ------- + dtype : {'int', 'str', 'bool', None} + Returns the data type of key. + """ + err_msg = ( + "No valid specification of the columns. Only a scalar, list or " + "slice of all integers or all strings, or boolean mask is " + "allowed" + ) + + dtype_to_str = {int: "int", str: "str", bool: "bool", np.bool_: "bool"} + array_dtype_to_str = { + "i": "int", + "u": "int", + "b": "bool", + "O": "str", + "U": "str", + "S": "str", + } + + if key is None: + return None + if isinstance(key, tuple(dtype_to_str.keys())): + try: + return dtype_to_str[type(key)] + except KeyError: + raise ValueError(err_msg) + if isinstance(key, slice): + if not accept_slice: + raise TypeError( + "Only array-like or scalar are supported. A Python slice was given." + ) + if key.start is None and key.stop is None: + return None + key_start_type = _determine_key_type(key.start) + key_stop_type = _determine_key_type(key.stop) + if key_start_type is not None and key_stop_type is not None: + if key_start_type != key_stop_type: + raise ValueError(err_msg) + if key_start_type is not None: + return key_start_type + return key_stop_type + if isinstance(key, (list, tuple)): + unique_key = set(key) + key_type = {_determine_key_type(elt) for elt in unique_key} + if not key_type: + return None + if len(key_type) != 1: + raise ValueError(err_msg) + return key_type.pop() + if hasattr(key, "dtype"): + try: + return array_dtype_to_str[key.dtype.kind] + except KeyError: + raise ValueError(err_msg) + raise ValueError(err_msg) + + +def _safe_indexing(X, indices, *, axis=0): + """Return rows, items or columns of X using indices. + + .. warning:: + + This utility is documented, but **private**. This means that + backward compatibility might be broken without any deprecation + cycle. + + Parameters + ---------- + X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series + Data from which to sample rows, items or columns. `list` are only + supported when `axis=0`. + indices : bool, int, str, slice, array-like + - If `axis=0`, boolean and integer array-like, integer slice, + and scalar integer are supported. + - If `axis=1`: + - to select a single column, `indices` can be of `int` type for + all `X` types and `str` only for dataframe. The selected subset + will be 1D, unless `X` is a sparse matrix in which case it will + be 2D. + - to select multiples columns, `indices` can be one of the + following: `list`, `array`, `slice`. The type used in + these containers can be one of the following: `int`, 'bool' and + `str`. However, `str` is only supported when `X` is a dataframe. + The selected subset will be 2D. + axis : int, default=0 + The axis along which `X` will be subsampled. `axis=0` will select + rows while `axis=1` will select columns. + + Returns + ------- + subset + Subset of X on axis 0 or 1. + + Notes + ----- + CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are + not supported. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils import _safe_indexing + >>> data = np.array([[1, 2], [3, 4], [5, 6]]) + >>> _safe_indexing(data, 0, axis=0) # select the first row + array([1, 2]) + >>> _safe_indexing(data, 0, axis=1) # select the first column + array([1, 3, 5]) + """ + if indices is None: + return X + + if axis not in (0, 1): + raise ValueError( + "'axis' should be either 0 (to index rows) or 1 (to index " + " column). Got {} instead.".format(axis) + ) + + indices_dtype = _determine_key_type(indices) + + if axis == 0 and indices_dtype == "str": + raise ValueError("String indexing is not supported with 'axis=0'") + + if axis == 1 and isinstance(X, list): + raise ValueError("axis=1 is not supported for lists") + + if axis == 1 and hasattr(X, "ndim") and X.ndim != 2: + raise ValueError( + "'X' should be a 2D NumPy array, 2D sparse matrix or pandas " + "dataframe when indexing the columns (i.e. 'axis=1'). " + "Got {} instead with {} dimension(s).".format(type(X), X.ndim) + ) + + if ( + axis == 1 + and indices_dtype == "str" + and not (_is_pandas_df(X) or _use_interchange_protocol(X)) + ): + raise ValueError( + "Specifying the columns using strings is only supported for dataframes." + ) + + if hasattr(X, "iloc"): + # TODO: we should probably use _is_pandas_df(X) instead but this would + # require updating some tests such as test_train_test_split_mock_pandas. + return _pandas_indexing(X, indices, indices_dtype, axis=axis) + elif _is_polars_df(X): + return _polars_indexing(X, indices, indices_dtype, axis=axis) + elif hasattr(X, "shape"): + return _array_indexing(X, indices, indices_dtype, axis=axis) + else: + return _list_indexing(X, indices, indices_dtype) + + +def _safe_assign(X, values, *, row_indexer=None, column_indexer=None): + """Safe assignment to a numpy array, sparse matrix, or pandas dataframe. + + Parameters + ---------- + X : {ndarray, sparse-matrix, dataframe} + Array to be modified. It is expected to be 2-dimensional. + + values : ndarray + The values to be assigned to `X`. + + row_indexer : array-like, dtype={int, bool}, default=None + A 1-dimensional array to select the rows of interest. If `None`, all + rows are selected. + + column_indexer : array-like, dtype={int, bool}, default=None + A 1-dimensional array to select the columns of interest. If `None`, all + columns are selected. + """ + row_indexer = slice(None, None, None) if row_indexer is None else row_indexer + column_indexer = ( + slice(None, None, None) if column_indexer is None else column_indexer + ) + + if hasattr(X, "iloc"): # pandas dataframe + with warnings.catch_warnings(): + # pandas >= 1.5 raises a warning when using iloc to set values in a column + # that does not have the same type as the column being set. It happens + # for instance when setting a categorical column with a string. + # In the future the behavior won't change and the warning should disappear. + # TODO(1.3): check if the warning is still raised or remove the filter. + warnings.simplefilter("ignore", FutureWarning) + X.iloc[row_indexer, column_indexer] = values + else: # numpy array or sparse matrix + X[row_indexer, column_indexer] = values + + +def _get_column_indices_for_bool_or_int(key, n_columns): + # Convert key into list of positive integer indexes + try: + idx = _safe_indexing(np.arange(n_columns), key) + except IndexError as e: + raise ValueError( + f"all features must be in [0, {n_columns - 1}] or [-{n_columns}, 0]" + ) from e + return np.atleast_1d(idx).tolist() + + +def _get_column_indices(X, key): + """Get feature column indices for input data X and key. + + For accepted values of `key`, see the docstring of + :func:`_safe_indexing`. + """ + key_dtype = _determine_key_type(key) + if _use_interchange_protocol(X): + return _get_column_indices_interchange(X.__dataframe__(), key, key_dtype) + + n_columns = X.shape[1] + if isinstance(key, (list, tuple)) and not key: + # we get an empty list + return [] + elif key_dtype in ("bool", "int"): + return _get_column_indices_for_bool_or_int(key, n_columns) + else: + try: + all_columns = X.columns + except AttributeError: + raise ValueError( + "Specifying the columns using strings is only supported for dataframes." + ) + if isinstance(key, str): + columns = [key] + elif isinstance(key, slice): + start, stop = key.start, key.stop + if start is not None: + start = all_columns.get_loc(start) + if stop is not None: + # pandas indexing with strings is endpoint included + stop = all_columns.get_loc(stop) + 1 + else: + stop = n_columns + 1 + return list(islice(range(n_columns), start, stop)) + else: + columns = list(key) + + try: + column_indices = [] + for col in columns: + col_idx = all_columns.get_loc(col) + if not isinstance(col_idx, numbers.Integral): + raise ValueError( + f"Selected columns, {columns}, are not unique in dataframe" + ) + column_indices.append(col_idx) + + except KeyError as e: + raise ValueError("A given column is not a column of the dataframe") from e + + return column_indices + + +def _get_column_indices_interchange(X_interchange, key, key_dtype): + """Same as _get_column_indices but for X with __dataframe__ protocol.""" + + n_columns = X_interchange.num_columns() + + if isinstance(key, (list, tuple)) and not key: + # we get an empty list + return [] + elif key_dtype in ("bool", "int"): + return _get_column_indices_for_bool_or_int(key, n_columns) + else: + column_names = list(X_interchange.column_names()) + + if isinstance(key, slice): + if key.step not in [1, None]: + raise NotImplementedError("key.step must be 1 or None") + start, stop = key.start, key.stop + if start is not None: + start = column_names.index(start) + + if stop is not None: + stop = column_names.index(stop) + 1 + else: + stop = n_columns + 1 + return list(islice(range(n_columns), start, stop)) + + selected_columns = [key] if np.isscalar(key) else key + + try: + return [column_names.index(col) for col in selected_columns] + except ValueError as e: + raise ValueError("A given column is not a column of the dataframe") from e + + +@validate_params( + { + "replace": ["boolean"], + "n_samples": [Interval(numbers.Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + "stratify": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def resample(*arrays, replace=True, n_samples=None, random_state=None, stratify=None): + """Resample arrays or sparse matrices in a consistent way. + + The default strategy implements one step of the bootstrapping + procedure. + + Parameters + ---------- + *arrays : sequence of array-like of shape (n_samples,) or \ + (n_samples, n_outputs) + Indexable data-structures can be arrays, lists, dataframes or scipy + sparse matrices with consistent first dimension. + + replace : bool, default=True + Implements resampling with replacement. If False, this will implement + (sliced) random permutations. + + n_samples : int, default=None + Number of samples to generate. If left to None this is + automatically set to the first dimension of the arrays. + If replace is False it should not be larger than the length of + arrays. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for shuffling + the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + stratify : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + If not None, data is split in a stratified fashion, using this as + the class labels. + + Returns + ------- + resampled_arrays : sequence of array-like of shape (n_samples,) or \ + (n_samples, n_outputs) + Sequence of resampled copies of the collections. The original arrays + are not impacted. + + See Also + -------- + shuffle : Shuffle arrays or sparse matrices in a consistent way. + + Examples + -------- + It is possible to mix sparse and dense arrays in the same run:: + + >>> import numpy as np + >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) + >>> y = np.array([0, 1, 2]) + + >>> from scipy.sparse import coo_matrix + >>> X_sparse = coo_matrix(X) + + >>> from sklearn.utils import resample + >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) + >>> X + array([[1., 0.], + [2., 1.], + [1., 0.]]) + + >>> X_sparse + <3x2 sparse matrix of type '<... 'numpy.float64'>' + with 4 stored elements in Compressed Sparse Row format> + + >>> X_sparse.toarray() + array([[1., 0.], + [2., 1.], + [1., 0.]]) + + >>> y + array([0, 1, 0]) + + >>> resample(y, n_samples=2, random_state=0) + array([0, 1]) + + Example using stratification:: + + >>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1] + >>> resample(y, n_samples=5, replace=False, stratify=y, + ... random_state=0) + [1, 1, 1, 0, 1] + """ + max_n_samples = n_samples + random_state = check_random_state(random_state) + + if len(arrays) == 0: + return None + + first = arrays[0] + n_samples = first.shape[0] if hasattr(first, "shape") else len(first) + + if max_n_samples is None: + max_n_samples = n_samples + elif (max_n_samples > n_samples) and (not replace): + raise ValueError( + "Cannot sample %d out of arrays with dim %d when replace is False" + % (max_n_samples, n_samples) + ) + + check_consistent_length(*arrays) + + if stratify is None: + if replace: + indices = random_state.randint(0, n_samples, size=(max_n_samples,)) + else: + indices = np.arange(n_samples) + random_state.shuffle(indices) + indices = indices[:max_n_samples] + else: + # Code adapted from StratifiedShuffleSplit() + y = check_array(stratify, ensure_2d=False, dtype=None) + if y.ndim == 2: + # for multi-label y, map each distinct row to a string repr + # using join because str(row) uses an ellipsis if len(row) > 1000 + y = np.array([" ".join(row.astype("str")) for row in y]) + + classes, y_indices = np.unique(y, return_inverse=True) + n_classes = classes.shape[0] + + class_counts = np.bincount(y_indices) + + # Find the sorted list of instances for each class: + # (np.unique above performs a sort, so code is O(n logn) already) + class_indices = np.split( + np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1] + ) + + n_i = _approximate_mode(class_counts, max_n_samples, random_state) + + indices = [] + + for i in range(n_classes): + indices_i = random_state.choice(class_indices[i], n_i[i], replace=replace) + indices.extend(indices_i) + + indices = random_state.permutation(indices) + + # convert sparse matrices to CSR for row-based indexing + arrays = [a.tocsr() if issparse(a) else a for a in arrays] + resampled_arrays = [_safe_indexing(a, indices) for a in arrays] + if len(resampled_arrays) == 1: + # syntactic sugar for the unit argument case + return resampled_arrays[0] + else: + return resampled_arrays + + +def shuffle(*arrays, random_state=None, n_samples=None): + """Shuffle arrays or sparse matrices in a consistent way. + + This is a convenience alias to ``resample(*arrays, replace=False)`` to do + random permutations of the collections. + + Parameters + ---------- + *arrays : sequence of indexable data-structures + Indexable data-structures can be arrays, lists, dataframes or scipy + sparse matrices with consistent first dimension. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for shuffling + the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + n_samples : int, default=None + Number of samples to generate. If left to None this is + automatically set to the first dimension of the arrays. It should + not be larger than the length of arrays. + + Returns + ------- + shuffled_arrays : sequence of indexable data-structures + Sequence of shuffled copies of the collections. The original arrays + are not impacted. + + See Also + -------- + resample : Resample arrays or sparse matrices in a consistent way. + + Examples + -------- + It is possible to mix sparse and dense arrays in the same run:: + + >>> import numpy as np + >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) + >>> y = np.array([0, 1, 2]) + + >>> from scipy.sparse import coo_matrix + >>> X_sparse = coo_matrix(X) + + >>> from sklearn.utils import shuffle + >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) + >>> X + array([[0., 0.], + [2., 1.], + [1., 0.]]) + + >>> X_sparse + <3x2 sparse matrix of type '<... 'numpy.float64'>' + with 3 stored elements in Compressed Sparse Row format> + + >>> X_sparse.toarray() + array([[0., 0.], + [2., 1.], + [1., 0.]]) + + >>> y + array([2, 1, 0]) + + >>> shuffle(y, n_samples=2, random_state=0) + array([0, 1]) + """ + return resample( + *arrays, replace=False, n_samples=n_samples, random_state=random_state + ) + + +def safe_sqr(X, *, copy=True): + """Element wise squaring of array-likes and sparse matrices. + + Parameters + ---------- + X : {array-like, ndarray, sparse matrix} + + copy : bool, default=True + Whether to create a copy of X and operate on it or to perform + inplace computation (default behaviour). + + Returns + ------- + X ** 2 : element wise square + Return the element-wise square of the input. + + Examples + -------- + >>> from sklearn.utils import safe_sqr + >>> safe_sqr([1, 2, 3]) + array([1, 4, 9]) + """ + X = check_array(X, accept_sparse=["csr", "csc", "coo"], ensure_2d=False) + if issparse(X): + if copy: + X = X.copy() + X.data **= 2 + else: + if copy: + X = X**2 + else: + X **= 2 + return X + + +def _chunk_generator(gen, chunksize): + """Chunk generator, ``gen`` into lists of length ``chunksize``. The last + chunk may have a length less than ``chunksize``.""" + while True: + chunk = list(islice(gen, chunksize)) + if chunk: + yield chunk + else: + return + + +@validate_params( + { + "n": [Interval(numbers.Integral, 1, None, closed="left")], + "batch_size": [Interval(numbers.Integral, 1, None, closed="left")], + "min_batch_size": [Interval(numbers.Integral, 0, None, closed="left")], + }, + prefer_skip_nested_validation=True, +) +def gen_batches(n, batch_size, *, min_batch_size=0): + """Generator to create slices containing `batch_size` elements from 0 to `n`. + + The last slice may contain less than `batch_size` elements, when + `batch_size` does not divide `n`. + + Parameters + ---------- + n : int + Size of the sequence. + batch_size : int + Number of elements in each batch. + min_batch_size : int, default=0 + Minimum number of elements in each batch. + + Yields + ------ + slice of `batch_size` elements + + See Also + -------- + gen_even_slices: Generator to create n_packs slices going up to n. + + Examples + -------- + >>> from sklearn.utils import gen_batches + >>> list(gen_batches(7, 3)) + [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] + >>> list(gen_batches(6, 3)) + [slice(0, 3, None), slice(3, 6, None)] + >>> list(gen_batches(2, 3)) + [slice(0, 2, None)] + >>> list(gen_batches(7, 3, min_batch_size=0)) + [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] + >>> list(gen_batches(7, 3, min_batch_size=2)) + [slice(0, 3, None), slice(3, 7, None)] + """ + start = 0 + for _ in range(int(n // batch_size)): + end = start + batch_size + if end + min_batch_size > n: + continue + yield slice(start, end) + start = end + if start < n: + yield slice(start, n) + + +@validate_params( + { + "n": [Interval(Integral, 1, None, closed="left")], + "n_packs": [Interval(Integral, 1, None, closed="left")], + "n_samples": [Interval(Integral, 1, None, closed="left"), None], + }, + prefer_skip_nested_validation=True, +) +def gen_even_slices(n, n_packs, *, n_samples=None): + """Generator to create `n_packs` evenly spaced slices going up to `n`. + + If `n_packs` does not divide `n`, except for the first `n % n_packs` + slices, remaining slices may contain fewer elements. + + Parameters + ---------- + n : int + Size of the sequence. + n_packs : int + Number of slices to generate. + n_samples : int, default=None + Number of samples. Pass `n_samples` when the slices are to be used for + sparse matrix indexing; slicing off-the-end raises an exception, while + it works for NumPy arrays. + + Yields + ------ + `slice` representing a set of indices from 0 to n. + + See Also + -------- + gen_batches: Generator to create slices containing batch_size elements + from 0 to n. + + Examples + -------- + >>> from sklearn.utils import gen_even_slices + >>> list(gen_even_slices(10, 1)) + [slice(0, 10, None)] + >>> list(gen_even_slices(10, 10)) + [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] + >>> list(gen_even_slices(10, 5)) + [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] + >>> list(gen_even_slices(10, 3)) + [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)] + """ + start = 0 + for pack_num in range(n_packs): + this_n = n // n_packs + if pack_num < n % n_packs: + this_n += 1 + if this_n > 0: + end = start + this_n + if n_samples is not None: + end = min(n_samples, end) + yield slice(start, end, None) + start = end + + +def tosequence(x): + """Cast iterable x to a Sequence, avoiding a copy if possible. + + Parameters + ---------- + x : iterable + The iterable to be converted. + + Returns + ------- + x : Sequence + If `x` is a NumPy array, it returns it as a `ndarray`. If `x` + is a `Sequence`, `x` is returned as-is. If `x` is from any other + type, `x` is returned casted as a list. + """ + if isinstance(x, np.ndarray): + return np.asarray(x) + elif isinstance(x, Sequence): + return x + else: + return list(x) + + +def _to_object_array(sequence): + """Convert sequence to a 1-D NumPy array of object dtype. + + numpy.array constructor has a similar use but it's output + is ambiguous. It can be 1-D NumPy array of object dtype if + the input is a ragged array, but if the input is a list of + equal length arrays, then the output is a 2D numpy.array. + _to_object_array solves this ambiguity by guarantying that + the output is a 1-D NumPy array of objects for any input. + + Parameters + ---------- + sequence : array-like of shape (n_elements,) + The sequence to be converted. + + Returns + ------- + out : ndarray of shape (n_elements,), dtype=object + The converted sequence into a 1-D NumPy array of object dtype. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils import _to_object_array + >>> _to_object_array([np.array([0]), np.array([1])]) + array([array([0]), array([1])], dtype=object) + >>> _to_object_array([np.array([0]), np.array([1, 2])]) + array([array([0]), array([1, 2])], dtype=object) + >>> _to_object_array([np.array([0]), np.array([1, 2])]) + array([array([0]), array([1, 2])], dtype=object) + """ + out = np.empty(len(sequence), dtype=object) + out[:] = sequence + return out + + +def indices_to_mask(indices, mask_length): + """Convert list of indices to boolean mask. + + Parameters + ---------- + indices : list-like + List of integers treated as indices. + mask_length : int + Length of boolean mask to be generated. + This parameter must be greater than max(indices). + + Returns + ------- + mask : 1d boolean nd-array + Boolean array that is True where indices are present, else False. + + Examples + -------- + >>> from sklearn.utils import indices_to_mask + >>> indices = [1, 2 , 3, 4] + >>> indices_to_mask(indices, 5) + array([False, True, True, True, True]) + """ + if mask_length <= np.max(indices): + raise ValueError("mask_length must be greater than max(indices)") + + mask = np.zeros(mask_length, dtype=bool) + mask[indices] = True + + return mask + + +def _message_with_time(source, message, time): + """Create one line message for logging purposes. + + Parameters + ---------- + source : str + String indicating the source or the reference of the message. + + message : str + Short message. + + time : int + Time in seconds. + """ + start_message = "[%s] " % source + + # adapted from joblib.logger.short_format_time without the Windows -.1s + # adjustment + if time > 60: + time_str = "%4.1fmin" % (time / 60) + else: + time_str = " %5.1fs" % time + end_message = " %s, total=%s" % (message, time_str) + dots_len = 70 - len(start_message) - len(end_message) + return "%s%s%s" % (start_message, dots_len * ".", end_message) + + +@contextmanager +def _print_elapsed_time(source, message=None): + """Log elapsed time to stdout when the context is exited. + + Parameters + ---------- + source : str + String indicating the source or the reference of the message. + + message : str, default=None + Short message. If None, nothing will be printed. + + Returns + ------- + context_manager + Prints elapsed time upon exit if verbose. + """ + if message is None: + yield + else: + start = timeit.default_timer() + yield + print(_message_with_time(source, message, timeit.default_timer() - start)) + + +def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None): + """Calculate how many rows can be processed within `working_memory`. + + Parameters + ---------- + row_bytes : int + The expected number of bytes of memory that will be consumed + during the processing of each row. + max_n_rows : int, default=None + The maximum return value. + working_memory : int or float, default=None + The number of rows to fit inside this number of MiB will be + returned. When None (default), the value of + ``sklearn.get_config()['working_memory']`` is used. + + Returns + ------- + int + The number of rows which can be processed within `working_memory`. + + Warns + ----- + Issues a UserWarning if `row_bytes exceeds `working_memory` MiB. + """ + + if working_memory is None: + working_memory = get_config()["working_memory"] + + chunk_n_rows = int(working_memory * (2**20) // row_bytes) + if max_n_rows is not None: + chunk_n_rows = min(chunk_n_rows, max_n_rows) + if chunk_n_rows < 1: + warnings.warn( + "Could not adhere to working_memory config. " + "Currently %.0fMiB, %.0fMiB required." + % (working_memory, np.ceil(row_bytes * 2**-20)) + ) + chunk_n_rows = 1 + return chunk_n_rows + + +def _is_pandas_na(x): + """Test if x is pandas.NA. + + We intentionally do not use this function to return `True` for `pd.NA` in + `is_scalar_nan`, because estimators that support `pd.NA` are the exception + rather than the rule at the moment. When `pd.NA` is more universally + supported, we may reconsider this decision. + + Parameters + ---------- + x : any type + + Returns + ------- + boolean + """ + with suppress(ImportError): + from pandas import NA + + return x is NA + + return False + + +def is_scalar_nan(x): + """Test if x is NaN. + + This function is meant to overcome the issue that np.isnan does not allow + non-numerical types as input, and that np.nan is not float('nan'). + + Parameters + ---------- + x : any type + Any scalar value. + + Returns + ------- + bool + Returns true if x is NaN, and false otherwise. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils import is_scalar_nan + >>> is_scalar_nan(np.nan) + True + >>> is_scalar_nan(float("nan")) + True + >>> is_scalar_nan(None) + False + >>> is_scalar_nan("") + False + >>> is_scalar_nan([np.nan]) + False + """ + return ( + not isinstance(x, numbers.Integral) + and isinstance(x, numbers.Real) + and math.isnan(x) + ) + + +def _approximate_mode(class_counts, n_draws, rng): + """Computes approximate mode of multivariate hypergeometric. + + This is an approximation to the mode of the multivariate + hypergeometric given by class_counts and n_draws. + It shouldn't be off by more than one. + + It is the mostly likely outcome of drawing n_draws many + samples from the population given by class_counts. + + Parameters + ---------- + class_counts : ndarray of int + Population per class. + n_draws : int + Number of draws (samples to draw) from the overall population. + rng : random state + Used to break ties. + + Returns + ------- + sampled_classes : ndarray of int + Number of samples drawn from each class. + np.sum(sampled_classes) == n_draws + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils import _approximate_mode + >>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0) + array([2, 1]) + >>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0) + array([3, 1]) + >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), + ... n_draws=2, rng=0) + array([0, 1, 1, 0]) + >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), + ... n_draws=2, rng=42) + array([1, 1, 0, 0]) + """ + rng = check_random_state(rng) + # this computes a bad approximation to the mode of the + # multivariate hypergeometric given by class_counts and n_draws + continuous = class_counts / class_counts.sum() * n_draws + # floored means we don't overshoot n_samples, but probably undershoot + floored = np.floor(continuous) + # we add samples according to how much "left over" probability + # they had, until we arrive at n_samples + need_to_add = int(n_draws - floored.sum()) + if need_to_add > 0: + remainder = continuous - floored + values = np.sort(np.unique(remainder))[::-1] + # add according to remainder, but break ties + # randomly to avoid biases + for value in values: + (inds,) = np.where(remainder == value) + # if we need_to_add less than what's in inds + # we draw randomly from them. + # if we need to add more, we add them all and + # go to the next value + add_now = min(len(inds), need_to_add) + inds = rng.choice(inds, size=add_now, replace=False) + floored[inds] += 1 + need_to_add -= add_now + if need_to_add == 0: + break + return floored.astype(int) + + +def check_matplotlib_support(caller_name): + """Raise ImportError with detailed error message if mpl is not installed. + + Plot utilities like any of the Display's plotting functions should lazily import + matplotlib and call this helper before any computation. + + Parameters + ---------- + caller_name : str + The name of the caller that requires matplotlib. + """ + try: + import matplotlib # noqa + except ImportError as e: + raise ImportError( + "{} requires matplotlib. You can install matplotlib with " + "`pip install matplotlib`".format(caller_name) + ) from e + + +def check_pandas_support(caller_name): + """Raise ImportError with detailed error message if pandas is not installed. + + Plot utilities like :func:`fetch_openml` should lazily import + pandas and call this helper before any computation. + + Parameters + ---------- + caller_name : str + The name of the caller that requires pandas. + + Returns + ------- + pandas + The pandas package. + """ + try: + import pandas # noqa + + return pandas + except ImportError as e: + raise ImportError("{} requires pandas.".format(caller_name)) from e diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_arpack.py b/venv/lib/python3.10/site-packages/sklearn/utils/_arpack.py new file mode 100644 index 0000000000000000000000000000000000000000..3465ac98c2e81a2fb1ad0c971e6494867d2cec1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_arpack.py @@ -0,0 +1,30 @@ +from .validation import check_random_state + + +def _init_arpack_v0(size, random_state): + """Initialize the starting vector for iteration in ARPACK functions. + + Initialize a ndarray with values sampled from the uniform distribution on + [-1, 1]. This initialization model has been chosen to be consistent with + the ARPACK one as another initialization can lead to convergence issues. + + Parameters + ---------- + size : int + The size of the eigenvalue vector to be initialized. + + random_state : int, RandomState instance or None, default=None + The seed of the pseudo random number generator used to generate a + uniform distribution. If int, random_state is the seed used by the + random number generator; If RandomState instance, random_state is the + random number generator; If None, the random number generator is the + RandomState instance used by `np.random`. + + Returns + ------- + v0 : ndarray of shape (size,) + The initialized vector. + """ + random_state = check_random_state(random_state) + v0 = random_state.uniform(-1, 1, size) + return v0 diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_array_api.py b/venv/lib/python3.10/site-packages/sklearn/utils/_array_api.py new file mode 100644 index 0000000000000000000000000000000000000000..0c386a843bffb782eaa2586b5fe8f41bb4096198 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_array_api.py @@ -0,0 +1,575 @@ +"""Tools to support array_api.""" +import itertools +import math +from functools import wraps + +import numpy +import scipy.special as special + +from .._config import get_config +from .fixes import parse_version + + +def yield_namespace_device_dtype_combinations(): + """Yield supported namespace, device, dtype tuples for testing. + + Use this to test that an estimator works with all combinations. + + Returns + ------- + array_namespace : str + The name of the Array API namespace. + + device : str + The name of the device on which to allocate the arrays. Can be None to + indicate that the default value should be used. + + dtype_name : str + The name of the data type to use for arrays. Can be None to indicate + that the default value should be used. + """ + for array_namespace in [ + # The following is used to test the array_api_compat wrapper when + # array_api_dispatch is enabled: in particular, the arrays used in the + # tests are regular numpy arrays without any "device" attribute. + "numpy", + # Stricter NumPy-based Array API implementation. The + # numpy.array_api.Array instances always a dummy "device" attribute. + "numpy.array_api", + "cupy", + "cupy.array_api", + "torch", + ]: + if array_namespace == "torch": + for device, dtype in itertools.product( + ("cpu", "cuda"), ("float64", "float32") + ): + yield array_namespace, device, dtype + yield array_namespace, "mps", "float32" + else: + yield array_namespace, None, None + + +def _check_array_api_dispatch(array_api_dispatch): + """Check that array_api_compat is installed and NumPy version is compatible. + + array_api_compat follows NEP29, which has a higher minimum NumPy version than + scikit-learn. + """ + if array_api_dispatch: + try: + import array_api_compat # noqa + except ImportError: + raise ImportError( + "array_api_compat is required to dispatch arrays using the API" + " specification" + ) + + numpy_version = parse_version(numpy.__version__) + min_numpy_version = "1.21" + if numpy_version < parse_version(min_numpy_version): + raise ImportError( + f"NumPy must be {min_numpy_version} or newer to dispatch array using" + " the API specification" + ) + + +def device(x): + """Hardware device the array data resides on. + + Parameters + ---------- + x : array + Array instance from NumPy or an array API compatible library. + + Returns + ------- + out : device + `device` object (see the "Device Support" section of the array API spec). + """ + if isinstance(x, (numpy.ndarray, numpy.generic)): + return "cpu" + return x.device + + +def size(x): + """Return the total number of elements of x. + + Parameters + ---------- + x : array + Array instance from NumPy or an array API compatible library. + + Returns + ------- + out : int + Total number of elements. + """ + return math.prod(x.shape) + + +def _is_numpy_namespace(xp): + """Return True if xp is backed by NumPy.""" + return xp.__name__ in {"numpy", "array_api_compat.numpy", "numpy.array_api"} + + +def _union1d(a, b, xp): + if _is_numpy_namespace(xp): + return xp.asarray(numpy.union1d(a, b)) + assert a.ndim == b.ndim == 1 + return xp.unique_values(xp.concat([xp.unique_values(a), xp.unique_values(b)])) + + +def isdtype(dtype, kind, *, xp): + """Returns a boolean indicating whether a provided dtype is of type "kind". + + Included in the v2022.12 of the Array API spec. + https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html + """ + if isinstance(kind, tuple): + return any(_isdtype_single(dtype, k, xp=xp) for k in kind) + else: + return _isdtype_single(dtype, kind, xp=xp) + + +def _isdtype_single(dtype, kind, *, xp): + if isinstance(kind, str): + if kind == "bool": + return dtype == xp.bool + elif kind == "signed integer": + return dtype in {xp.int8, xp.int16, xp.int32, xp.int64} + elif kind == "unsigned integer": + return dtype in {xp.uint8, xp.uint16, xp.uint32, xp.uint64} + elif kind == "integral": + return any( + _isdtype_single(dtype, k, xp=xp) + for k in ("signed integer", "unsigned integer") + ) + elif kind == "real floating": + return dtype in supported_float_dtypes(xp) + elif kind == "complex floating": + # Some name spaces do not have complex, such as cupy.array_api + # and numpy.array_api + complex_dtypes = set() + if hasattr(xp, "complex64"): + complex_dtypes.add(xp.complex64) + if hasattr(xp, "complex128"): + complex_dtypes.add(xp.complex128) + return dtype in complex_dtypes + elif kind == "numeric": + return any( + _isdtype_single(dtype, k, xp=xp) + for k in ("integral", "real floating", "complex floating") + ) + else: + raise ValueError(f"Unrecognized data type kind: {kind!r}") + else: + return dtype == kind + + +def supported_float_dtypes(xp): + """Supported floating point types for the namespace + + Note: float16 is not officially part of the Array API spec at the + time of writing but scikit-learn estimators and functions can choose + to accept it when xp.float16 is defined. + + https://data-apis.org/array-api/latest/API_specification/data_types.html + """ + if hasattr(xp, "float16"): + return (xp.float64, xp.float32, xp.float16) + else: + return (xp.float64, xp.float32) + + +class _ArrayAPIWrapper: + """sklearn specific Array API compatibility wrapper + + This wrapper makes it possible for scikit-learn maintainers to + deal with discrepancies between different implementations of the + Python Array API standard and its evolution over time. + + The Python Array API standard specification: + https://data-apis.org/array-api/latest/ + + Documentation of the NumPy implementation: + https://numpy.org/neps/nep-0047-array-api-standard.html + """ + + def __init__(self, array_namespace): + self._namespace = array_namespace + + def __getattr__(self, name): + return getattr(self._namespace, name) + + def __eq__(self, other): + return self._namespace == other._namespace + + def isdtype(self, dtype, kind): + return isdtype(dtype, kind, xp=self._namespace) + + +def _check_device_cpu(device): # noqa + if device not in {"cpu", None}: + raise ValueError(f"Unsupported device for NumPy: {device!r}") + + +def _accept_device_cpu(func): + @wraps(func) + def wrapped_func(*args, **kwargs): + _check_device_cpu(kwargs.pop("device", None)) + return func(*args, **kwargs) + + return wrapped_func + + +class _NumPyAPIWrapper: + """Array API compat wrapper for any numpy version + + NumPy < 1.22 does not expose the numpy.array_api namespace. This + wrapper makes it possible to write code that uses the standard + Array API while working with any version of NumPy supported by + scikit-learn. + + See the `get_namespace()` public function for more details. + """ + + # Creation functions in spec: + # https://data-apis.org/array-api/latest/API_specification/creation_functions.html + _CREATION_FUNCS = { + "arange", + "empty", + "empty_like", + "eye", + "full", + "full_like", + "linspace", + "ones", + "ones_like", + "zeros", + "zeros_like", + } + # Data types in spec + # https://data-apis.org/array-api/latest/API_specification/data_types.html + _DTYPES = { + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + # XXX: float16 is not part of the Array API spec but exposed by + # some namespaces. + "float16", + "float32", + "float64", + "complex64", + "complex128", + } + + def __getattr__(self, name): + attr = getattr(numpy, name) + + # Support device kwargs and make sure they are on the CPU + if name in self._CREATION_FUNCS: + return _accept_device_cpu(attr) + + # Convert to dtype objects + if name in self._DTYPES: + return numpy.dtype(attr) + return attr + + @property + def bool(self): + return numpy.bool_ + + def astype(self, x, dtype, *, copy=True, casting="unsafe"): + # astype is not defined in the top level NumPy namespace + return x.astype(dtype, copy=copy, casting=casting) + + def asarray(self, x, *, dtype=None, device=None, copy=None): # noqa + _check_device_cpu(device) + # Support copy in NumPy namespace + if copy is True: + return numpy.array(x, copy=True, dtype=dtype) + else: + return numpy.asarray(x, dtype=dtype) + + def unique_inverse(self, x): + return numpy.unique(x, return_inverse=True) + + def unique_counts(self, x): + return numpy.unique(x, return_counts=True) + + def unique_values(self, x): + return numpy.unique(x) + + def concat(self, arrays, *, axis=None): + return numpy.concatenate(arrays, axis=axis) + + def reshape(self, x, shape, *, copy=None): + """Gives a new shape to an array without changing its data. + + The Array API specification requires shape to be a tuple. + https://data-apis.org/array-api/latest/API_specification/generated/array_api.reshape.html + """ + if not isinstance(shape, tuple): + raise TypeError( + f"shape must be a tuple, got {shape!r} of type {type(shape)}" + ) + + if copy is True: + x = x.copy() + return numpy.reshape(x, shape) + + def isdtype(self, dtype, kind): + return isdtype(dtype, kind, xp=self) + + +_NUMPY_API_WRAPPER_INSTANCE = _NumPyAPIWrapper() + + +def get_namespace(*arrays): + """Get namespace of arrays. + + Introspect `arrays` arguments and return their common Array API + compatible namespace object, if any. NumPy 1.22 and later can + construct such containers using the `numpy.array_api` namespace + for instance. + + See: https://numpy.org/neps/nep-0047-array-api-standard.html + + If `arrays` are regular numpy arrays, an instance of the + `_NumPyAPIWrapper` compatibility wrapper is returned instead. + + Namespace support is not enabled by default. To enabled it + call: + + sklearn.set_config(array_api_dispatch=True) + + or: + + with sklearn.config_context(array_api_dispatch=True): + # your code here + + Otherwise an instance of the `_NumPyAPIWrapper` + compatibility wrapper is always returned irrespective of + the fact that arrays implement the `__array_namespace__` + protocol or not. + + Parameters + ---------- + *arrays : array objects + Array objects. + + Returns + ------- + namespace : module + Namespace shared by array objects. If any of the `arrays` are not arrays, + the namespace defaults to NumPy. + + is_array_api_compliant : bool + True if the arrays are containers that implement the Array API spec. + Always False when array_api_dispatch=False. + """ + array_api_dispatch = get_config()["array_api_dispatch"] + if not array_api_dispatch: + return _NUMPY_API_WRAPPER_INSTANCE, False + + _check_array_api_dispatch(array_api_dispatch) + + # array-api-compat is a required dependency of scikit-learn only when + # configuring `array_api_dispatch=True`. Its import should therefore be + # protected by _check_array_api_dispatch to display an informative error + # message in case it is missing. + import array_api_compat + + namespace, is_array_api_compliant = array_api_compat.get_namespace(*arrays), True + + # These namespaces need additional wrapping to smooth out small differences + # between implementations + if namespace.__name__ in {"numpy.array_api", "cupy.array_api"}: + namespace = _ArrayAPIWrapper(namespace) + + return namespace, is_array_api_compliant + + +def _expit(X): + xp, _ = get_namespace(X) + if _is_numpy_namespace(xp): + return xp.asarray(special.expit(numpy.asarray(X))) + + return 1.0 / (1.0 + xp.exp(-X)) + + +def _add_to_diagonal(array, value, xp): + # Workaround for the lack of support for xp.reshape(a, shape, copy=False) in + # numpy.array_api: https://github.com/numpy/numpy/issues/23410 + value = xp.asarray(value, dtype=array.dtype) + if _is_numpy_namespace(xp): + array_np = numpy.asarray(array) + array_np.flat[:: array.shape[0] + 1] += value + return xp.asarray(array_np) + elif value.ndim == 1: + for i in range(array.shape[0]): + array[i, i] += value[i] + else: + # scalar value + for i in range(array.shape[0]): + array[i, i] += value + + +def _weighted_sum(sample_score, sample_weight, normalize=False, xp=None): + # XXX: this function accepts Array API input but returns a Python scalar + # float. The call to float() is convenient because it removes the need to + # move back results from device to host memory (e.g. calling `.cpu()` on a + # torch tensor). However, this might interact in unexpected ways (break?) + # with lazy Array API implementations. See: + # https://github.com/data-apis/array-api/issues/642 + if xp is None: + xp, _ = get_namespace(sample_score) + if normalize and _is_numpy_namespace(xp): + sample_score_np = numpy.asarray(sample_score) + if sample_weight is not None: + sample_weight_np = numpy.asarray(sample_weight) + else: + sample_weight_np = None + return float(numpy.average(sample_score_np, weights=sample_weight_np)) + + if not xp.isdtype(sample_score.dtype, "real floating"): + # We move to cpu device ahead of time since certain devices may not support + # float64, but we want the same precision for all devices and namespaces. + sample_score = xp.astype(xp.asarray(sample_score, device="cpu"), xp.float64) + + if sample_weight is not None: + sample_weight = xp.asarray( + sample_weight, dtype=sample_score.dtype, device=device(sample_score) + ) + if not xp.isdtype(sample_weight.dtype, "real floating"): + sample_weight = xp.astype(sample_weight, xp.float64) + + if normalize: + if sample_weight is not None: + scale = xp.sum(sample_weight) + else: + scale = sample_score.shape[0] + if scale != 0: + sample_score = sample_score / scale + + if sample_weight is not None: + return float(sample_score @ sample_weight) + else: + return float(xp.sum(sample_score)) + + +def _nanmin(X, axis=None): + # TODO: refactor once nan-aware reductions are standardized: + # https://github.com/data-apis/array-api/issues/621 + xp, _ = get_namespace(X) + if _is_numpy_namespace(xp): + return xp.asarray(numpy.nanmin(X, axis=axis)) + + else: + mask = xp.isnan(X) + X = xp.min(xp.where(mask, xp.asarray(+xp.inf, device=device(X)), X), axis=axis) + # Replace Infs from all NaN slices with NaN again + mask = xp.all(mask, axis=axis) + if xp.any(mask): + X = xp.where(mask, xp.asarray(xp.nan), X) + return X + + +def _nanmax(X, axis=None): + # TODO: refactor once nan-aware reductions are standardized: + # https://github.com/data-apis/array-api/issues/621 + xp, _ = get_namespace(X) + if _is_numpy_namespace(xp): + return xp.asarray(numpy.nanmax(X, axis=axis)) + + else: + mask = xp.isnan(X) + X = xp.max(xp.where(mask, xp.asarray(-xp.inf, device=device(X)), X), axis=axis) + # Replace Infs from all NaN slices with NaN again + mask = xp.all(mask, axis=axis) + if xp.any(mask): + X = xp.where(mask, xp.asarray(xp.nan), X) + return X + + +def _asarray_with_order(array, dtype=None, order=None, copy=None, *, xp=None): + """Helper to support the order kwarg only for NumPy-backed arrays + + Memory layout parameter `order` is not exposed in the Array API standard, + however some input validation code in scikit-learn needs to work both + for classes and functions that will leverage Array API only operations + and for code that inherently relies on NumPy backed data containers with + specific memory layout constraints (e.g. our own Cython code). The + purpose of this helper is to make it possible to share code for data + container validation without memory copies for both downstream use cases: + the `order` parameter is only enforced if the input array implementation + is NumPy based, otherwise `order` is just silently ignored. + """ + if xp is None: + xp, _ = get_namespace(array) + if _is_numpy_namespace(xp): + # Use NumPy API to support order + if copy is True: + array = numpy.array(array, order=order, dtype=dtype) + else: + array = numpy.asarray(array, order=order, dtype=dtype) + + # At this point array is a NumPy ndarray. We convert it to an array + # container that is consistent with the input's namespace. + return xp.asarray(array) + else: + return xp.asarray(array, dtype=dtype, copy=copy) + + +def _convert_to_numpy(array, xp): + """Convert X into a NumPy ndarray on the CPU.""" + xp_name = xp.__name__ + + if xp_name in {"array_api_compat.torch", "torch"}: + return array.cpu().numpy() + elif xp_name == "cupy.array_api": + return array._array.get() + elif xp_name in {"array_api_compat.cupy", "cupy"}: # pragma: nocover + return array.get() + + return numpy.asarray(array) + + +def _estimator_with_converted_arrays(estimator, converter): + """Create new estimator which converting all attributes that are arrays. + + The converter is called on all NumPy arrays and arrays that support the + `DLPack interface `__. + + Parameters + ---------- + estimator : Estimator + Estimator to convert + + converter : callable + Callable that takes an array attribute and returns the converted array. + + Returns + ------- + new_estimator : Estimator + Convert estimator + """ + from sklearn.base import clone + + new_estimator = clone(estimator) + for key, attribute in vars(estimator).items(): + if hasattr(attribute, "__dlpack__") or isinstance(attribute, numpy.ndarray): + attribute = converter(attribute) + setattr(new_estimator, key, attribute) + return new_estimator + + +def _atol_for_type(dtype): + """Return the absolute tolerance for a given dtype.""" + return numpy.finfo(dtype).eps * 100 diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_available_if.py b/venv/lib/python3.10/site-packages/sklearn/utils/_available_if.py new file mode 100644 index 0000000000000000000000000000000000000000..2d9598df9de7e8e1c0d85640f278b5e669302094 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_available_if.py @@ -0,0 +1,93 @@ +from functools import update_wrapper, wraps +from types import MethodType + + +class _AvailableIfDescriptor: + """Implements a conditional property using the descriptor protocol. + + Using this class to create a decorator will raise an ``AttributeError`` + if check(self) returns a falsey value. Note that if check raises an error + this will also result in hasattr returning false. + + See https://docs.python.org/3/howto/descriptor.html for an explanation of + descriptors. + """ + + def __init__(self, fn, check, attribute_name): + self.fn = fn + self.check = check + self.attribute_name = attribute_name + + # update the docstring of the descriptor + update_wrapper(self, fn) + + def _check(self, obj, owner): + attr_err_msg = ( + f"This {repr(owner.__name__)} has no attribute {repr(self.attribute_name)}" + ) + try: + check_result = self.check(obj) + except Exception as e: + raise AttributeError(attr_err_msg) from e + + if not check_result: + raise AttributeError(attr_err_msg) + + def __get__(self, obj, owner=None): + if obj is not None: + # delegate only on instances, not the classes. + # this is to allow access to the docstrings. + self._check(obj, owner=owner) + out = MethodType(self.fn, obj) + + else: + # This makes it possible to use the decorated method as an unbound method, + # for instance when monkeypatching. + @wraps(self.fn) + def out(*args, **kwargs): + self._check(args[0], owner=owner) + return self.fn(*args, **kwargs) + + return out + + +def available_if(check): + """An attribute that is available only if check returns a truthy value. + + Parameters + ---------- + check : callable + When passed the object with the decorated method, this should return + a truthy value if the attribute is available, and either return False + or raise an AttributeError if not available. + + Returns + ------- + callable + Callable makes the decorated method available if `check` returns + a truthy value, otherwise the decorated method is unavailable. + + Examples + -------- + >>> from sklearn.utils.metaestimators import available_if + >>> class HelloIfEven: + ... def __init__(self, x): + ... self.x = x + ... + ... def _x_is_even(self): + ... return self.x % 2 == 0 + ... + ... @available_if(_x_is_even) + ... def say_hello(self): + ... print("Hello") + ... + >>> obj = HelloIfEven(1) + >>> hasattr(obj, "say_hello") + False + >>> obj.x = 2 + >>> hasattr(obj, "say_hello") + True + >>> obj.say_hello() + Hello + """ + return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_encode.py b/venv/lib/python3.10/site-packages/sklearn/utils/_encode.py new file mode 100644 index 0000000000000000000000000000000000000000..b3bf1c2a317ece98f786cda08aae0ef3df2e3390 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_encode.py @@ -0,0 +1,367 @@ +from collections import Counter +from contextlib import suppress +from typing import NamedTuple + +import numpy as np + +from . import is_scalar_nan + + +def _unique(values, *, return_inverse=False, return_counts=False): + """Helper function to find unique values with support for python objects. + + Uses pure python method for object dtype, and numpy method for + all other dtypes. + + Parameters + ---------- + values : ndarray + Values to check for unknowns. + + return_inverse : bool, default=False + If True, also return the indices of the unique values. + + return_counts : bool, default=False + If True, also return the number of times each unique item appears in + values. + + Returns + ------- + unique : ndarray + The sorted unique values. + + unique_inverse : ndarray + The indices to reconstruct the original array from the unique array. + Only provided if `return_inverse` is True. + + unique_counts : ndarray + The number of times each of the unique values comes up in the original + array. Only provided if `return_counts` is True. + """ + if values.dtype == object: + return _unique_python( + values, return_inverse=return_inverse, return_counts=return_counts + ) + # numerical + return _unique_np( + values, return_inverse=return_inverse, return_counts=return_counts + ) + + +def _unique_np(values, return_inverse=False, return_counts=False): + """Helper function to find unique values for numpy arrays that correctly + accounts for nans. See `_unique` documentation for details.""" + uniques = np.unique( + values, return_inverse=return_inverse, return_counts=return_counts + ) + + inverse, counts = None, None + + if return_counts: + *uniques, counts = uniques + + if return_inverse: + *uniques, inverse = uniques + + if return_counts or return_inverse: + uniques = uniques[0] + + # np.unique will have duplicate missing values at the end of `uniques` + # here we clip the nans and remove it from uniques + if uniques.size and is_scalar_nan(uniques[-1]): + nan_idx = np.searchsorted(uniques, np.nan) + uniques = uniques[: nan_idx + 1] + if return_inverse: + inverse[inverse > nan_idx] = nan_idx + + if return_counts: + counts[nan_idx] = np.sum(counts[nan_idx:]) + counts = counts[: nan_idx + 1] + + ret = (uniques,) + + if return_inverse: + ret += (inverse,) + + if return_counts: + ret += (counts,) + + return ret[0] if len(ret) == 1 else ret + + +class MissingValues(NamedTuple): + """Data class for missing data information""" + + nan: bool + none: bool + + def to_list(self): + """Convert tuple to a list where None is always first.""" + output = [] + if self.none: + output.append(None) + if self.nan: + output.append(np.nan) + return output + + +def _extract_missing(values): + """Extract missing values from `values`. + + Parameters + ---------- + values: set + Set of values to extract missing from. + + Returns + ------- + output: set + Set with missing values extracted. + + missing_values: MissingValues + Object with missing value information. + """ + missing_values_set = { + value for value in values if value is None or is_scalar_nan(value) + } + + if not missing_values_set: + return values, MissingValues(nan=False, none=False) + + if None in missing_values_set: + if len(missing_values_set) == 1: + output_missing_values = MissingValues(nan=False, none=True) + else: + # If there is more than one missing value, then it has to be + # float('nan') or np.nan + output_missing_values = MissingValues(nan=True, none=True) + else: + output_missing_values = MissingValues(nan=True, none=False) + + # create set without the missing values + output = values - missing_values_set + return output, output_missing_values + + +class _nandict(dict): + """Dictionary with support for nans.""" + + def __init__(self, mapping): + super().__init__(mapping) + for key, value in mapping.items(): + if is_scalar_nan(key): + self.nan_value = value + break + + def __missing__(self, key): + if hasattr(self, "nan_value") and is_scalar_nan(key): + return self.nan_value + raise KeyError(key) + + +def _map_to_integer(values, uniques): + """Map values based on its position in uniques.""" + table = _nandict({val: i for i, val in enumerate(uniques)}) + return np.array([table[v] for v in values]) + + +def _unique_python(values, *, return_inverse, return_counts): + # Only used in `_uniques`, see docstring there for details + try: + uniques_set = set(values) + uniques_set, missing_values = _extract_missing(uniques_set) + + uniques = sorted(uniques_set) + uniques.extend(missing_values.to_list()) + uniques = np.array(uniques, dtype=values.dtype) + except TypeError: + types = sorted(t.__qualname__ for t in set(type(v) for v in values)) + raise TypeError( + "Encoders require their input argument must be uniformly " + f"strings or numbers. Got {types}" + ) + ret = (uniques,) + + if return_inverse: + ret += (_map_to_integer(values, uniques),) + + if return_counts: + ret += (_get_counts(values, uniques),) + + return ret[0] if len(ret) == 1 else ret + + +def _encode(values, *, uniques, check_unknown=True): + """Helper function to encode values into [0, n_uniques - 1]. + + Uses pure python method for object dtype, and numpy method for + all other dtypes. + The numpy method has the limitation that the `uniques` need to + be sorted. Importantly, this is not checked but assumed to already be + the case. The calling method needs to ensure this for all non-object + values. + + Parameters + ---------- + values : ndarray + Values to encode. + uniques : ndarray + The unique values in `values`. If the dtype is not object, then + `uniques` needs to be sorted. + check_unknown : bool, default=True + If True, check for values in `values` that are not in `unique` + and raise an error. This is ignored for object dtype, and treated as + True in this case. This parameter is useful for + _BaseEncoder._transform() to avoid calling _check_unknown() + twice. + + Returns + ------- + encoded : ndarray + Encoded values + """ + if values.dtype.kind in "OUS": + try: + return _map_to_integer(values, uniques) + except KeyError as e: + raise ValueError(f"y contains previously unseen labels: {str(e)}") + else: + if check_unknown: + diff = _check_unknown(values, uniques) + if diff: + raise ValueError(f"y contains previously unseen labels: {str(diff)}") + return np.searchsorted(uniques, values) + + +def _check_unknown(values, known_values, return_mask=False): + """ + Helper function to check for unknowns in values to be encoded. + + Uses pure python method for object dtype, and numpy method for + all other dtypes. + + Parameters + ---------- + values : array + Values to check for unknowns. + known_values : array + Known values. Must be unique. + return_mask : bool, default=False + If True, return a mask of the same shape as `values` indicating + the valid values. + + Returns + ------- + diff : list + The unique values present in `values` and not in `know_values`. + valid_mask : boolean array + Additionally returned if ``return_mask=True``. + + """ + valid_mask = None + + if values.dtype.kind in "OUS": + values_set = set(values) + values_set, missing_in_values = _extract_missing(values_set) + + uniques_set = set(known_values) + uniques_set, missing_in_uniques = _extract_missing(uniques_set) + diff = values_set - uniques_set + + nan_in_diff = missing_in_values.nan and not missing_in_uniques.nan + none_in_diff = missing_in_values.none and not missing_in_uniques.none + + def is_valid(value): + return ( + value in uniques_set + or missing_in_uniques.none + and value is None + or missing_in_uniques.nan + and is_scalar_nan(value) + ) + + if return_mask: + if diff or nan_in_diff or none_in_diff: + valid_mask = np.array([is_valid(value) for value in values]) + else: + valid_mask = np.ones(len(values), dtype=bool) + + diff = list(diff) + if none_in_diff: + diff.append(None) + if nan_in_diff: + diff.append(np.nan) + else: + unique_values = np.unique(values) + diff = np.setdiff1d(unique_values, known_values, assume_unique=True) + if return_mask: + if diff.size: + valid_mask = np.isin(values, known_values) + else: + valid_mask = np.ones(len(values), dtype=bool) + + # check for nans in the known_values + if np.isnan(known_values).any(): + diff_is_nan = np.isnan(diff) + if diff_is_nan.any(): + # removes nan from valid_mask + if diff.size and return_mask: + is_nan = np.isnan(values) + valid_mask[is_nan] = 1 + + # remove nan from diff + diff = diff[~diff_is_nan] + diff = list(diff) + + if return_mask: + return diff, valid_mask + return diff + + +class _NaNCounter(Counter): + """Counter with support for nan values.""" + + def __init__(self, items): + super().__init__(self._generate_items(items)) + + def _generate_items(self, items): + """Generate items without nans. Stores the nan counts separately.""" + for item in items: + if not is_scalar_nan(item): + yield item + continue + if not hasattr(self, "nan_count"): + self.nan_count = 0 + self.nan_count += 1 + + def __missing__(self, key): + if hasattr(self, "nan_count") and is_scalar_nan(key): + return self.nan_count + raise KeyError(key) + + +def _get_counts(values, uniques): + """Get the count of each of the `uniques` in `values`. + + The counts will use the order passed in by `uniques`. For non-object dtypes, + `uniques` is assumed to be sorted and `np.nan` is at the end. + """ + if values.dtype.kind in "OU": + counter = _NaNCounter(values) + output = np.zeros(len(uniques), dtype=np.int64) + for i, item in enumerate(uniques): + with suppress(KeyError): + output[i] = counter[item] + return output + + unique_values, counts = _unique_np(values, return_counts=True) + + # Recorder unique_values based on input: `uniques` + uniques_in_values = np.isin(uniques, unique_values, assume_unique=True) + if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]): + uniques_in_values[-1] = True + + unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values]) + output = np.zeros_like(uniques, dtype=np.int64) + output[uniques_in_values] = counts[unique_valid_indices] + return output diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.css b/venv/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.css new file mode 100644 index 0000000000000000000000000000000000000000..3f29c70eddefc51c25d30d4f5472e3b848d60632 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.css @@ -0,0 +1,404 @@ +#$id { + /* Definition of color scheme common for light and dark mode */ + --sklearn-color-text: black; + --sklearn-color-line: gray; + /* Definition of color scheme for unfitted estimators */ + --sklearn-color-unfitted-level-0: #fff5e6; + --sklearn-color-unfitted-level-1: #f6e4d2; + --sklearn-color-unfitted-level-2: #ffe0b3; + --sklearn-color-unfitted-level-3: chocolate; + /* Definition of color scheme for fitted estimators */ + --sklearn-color-fitted-level-0: #f0f8ff; + --sklearn-color-fitted-level-1: #d4ebff; + --sklearn-color-fitted-level-2: #b3dbfd; + --sklearn-color-fitted-level-3: cornflowerblue; + + /* Specific color for light theme */ + --sklearn-color-text-on-default-background: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, black))); + --sklearn-color-background: var(--sg-background-color, var(--theme-background, var(--jp-layout-color0, white))); + --sklearn-color-border-box: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, black))); + --sklearn-color-icon: #696969; + + @media (prefers-color-scheme: dark) { + /* Redefinition of color scheme for dark theme */ + --sklearn-color-text-on-default-background: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, white))); + --sklearn-color-background: var(--sg-background-color, var(--theme-background, var(--jp-layout-color0, #111))); + --sklearn-color-border-box: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, white))); + --sklearn-color-icon: #878787; + } +} + +#$id { + color: var(--sklearn-color-text); +} + +#$id pre { + padding: 0; +} + +#$id input.sk-hidden--visually { + border: 0; + clip: rect(1px 1px 1px 1px); + clip: rect(1px, 1px, 1px, 1px); + height: 1px; + margin: -1px; + overflow: hidden; + padding: 0; + position: absolute; + width: 1px; +} + +#$id div.sk-dashed-wrapped { + border: 1px dashed var(--sklearn-color-line); + margin: 0 0.4em 0.5em 0.4em; + box-sizing: border-box; + padding-bottom: 0.4em; + background-color: var(--sklearn-color-background); +} + +#$id div.sk-container { + /* jupyter's `normalize.less` sets `[hidden] { display: none; }` + but bootstrap.min.css set `[hidden] { display: none !important; }` + so we also need the `!important` here to be able to override the + default hidden behavior on the sphinx rendered scikit-learn.org. + See: https://github.com/scikit-learn/scikit-learn/issues/21755 */ + display: inline-block !important; + position: relative; +} + +#$id div.sk-text-repr-fallback { + display: none; +} + +div.sk-parallel-item, +div.sk-serial, +div.sk-item { + /* draw centered vertical line to link estimators */ + background-image: linear-gradient(var(--sklearn-color-text-on-default-background), var(--sklearn-color-text-on-default-background)); + background-size: 2px 100%; + background-repeat: no-repeat; + background-position: center center; +} + +/* Parallel-specific style estimator block */ + +#$id div.sk-parallel-item::after { + content: ""; + width: 100%; + border-bottom: 2px solid var(--sklearn-color-text-on-default-background); + flex-grow: 1; +} + +#$id div.sk-parallel { + display: flex; + align-items: stretch; + justify-content: center; + background-color: var(--sklearn-color-background); + position: relative; +} + +#$id div.sk-parallel-item { + display: flex; + flex-direction: column; +} + +#$id div.sk-parallel-item:first-child::after { + align-self: flex-end; + width: 50%; +} + +#$id div.sk-parallel-item:last-child::after { + align-self: flex-start; + width: 50%; +} + +#$id div.sk-parallel-item:only-child::after { + width: 0; +} + +/* Serial-specific style estimator block */ + +#$id div.sk-serial { + display: flex; + flex-direction: column; + align-items: center; + background-color: var(--sklearn-color-background); + padding-right: 1em; + padding-left: 1em; +} + + +/* Toggleable style: style used for estimator/Pipeline/ColumnTransformer box that is +clickable and can be expanded/collapsed. +- Pipeline and ColumnTransformer use this feature and define the default style +- Estimators will overwrite some part of the style using the `sk-estimator` class +*/ + +/* Pipeline and ColumnTransformer style (default) */ + +#$id div.sk-toggleable { + /* Default theme specific background. It is overwritten whether we have a + specific estimator or a Pipeline/ColumnTransformer */ + background-color: var(--sklearn-color-background); +} + +/* Toggleable label */ +#$id label.sk-toggleable__label { + cursor: pointer; + display: block; + width: 100%; + margin-bottom: 0; + padding: 0.5em; + box-sizing: border-box; + text-align: center; +} + +#$id label.sk-toggleable__label-arrow:before { + /* Arrow on the left of the label */ + content: "▸"; + float: left; + margin-right: 0.25em; + color: var(--sklearn-color-icon); +} + +#$id label.sk-toggleable__label-arrow:hover:before { + color: var(--sklearn-color-text); +} + +/* Toggleable content - dropdown */ + +#$id div.sk-toggleable__content { + max-height: 0; + max-width: 0; + overflow: hidden; + text-align: left; + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-0); +} + +#$id div.sk-toggleable__content.fitted { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-0); +} + +#$id div.sk-toggleable__content pre { + margin: 0.2em; + border-radius: 0.25em; + color: var(--sklearn-color-text); + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-0); +} + +#$id div.sk-toggleable__content.fitted pre { + /* unfitted */ + background-color: var(--sklearn-color-fitted-level-0); +} + +#$id input.sk-toggleable__control:checked~div.sk-toggleable__content { + /* Expand drop-down */ + max-height: 200px; + max-width: 100%; + overflow: auto; +} + +#$id input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before { + content: "▾"; +} + +/* Pipeline/ColumnTransformer-specific style */ + +#$id div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label { + color: var(--sklearn-color-text); + background-color: var(--sklearn-color-unfitted-level-2); +} + +#$id div.sk-label.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label { + background-color: var(--sklearn-color-fitted-level-2); +} + +/* Estimator-specific style */ + +/* Colorize estimator box */ +#$id div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label { + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-2); +} + +#$id div.sk-estimator.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-2); +} + +#$id div.sk-label label.sk-toggleable__label, +#$id div.sk-label label { + /* The background is the default theme color */ + color: var(--sklearn-color-text-on-default-background); +} + +/* On hover, darken the color of the background */ +#$id div.sk-label:hover label.sk-toggleable__label { + color: var(--sklearn-color-text); + background-color: var(--sklearn-color-unfitted-level-2); +} + +/* Label box, darken color on hover, fitted */ +#$id div.sk-label.fitted:hover label.sk-toggleable__label.fitted { + color: var(--sklearn-color-text); + background-color: var(--sklearn-color-fitted-level-2); +} + +/* Estimator label */ + +#$id div.sk-label label { + font-family: monospace; + font-weight: bold; + display: inline-block; + line-height: 1.2em; +} + +#$id div.sk-label-container { + text-align: center; +} + +/* Estimator-specific */ +#$id div.sk-estimator { + font-family: monospace; + border: 1px dotted var(--sklearn-color-border-box); + border-radius: 0.25em; + box-sizing: border-box; + margin-bottom: 0.5em; + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-0); +} + +#$id div.sk-estimator.fitted { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-0); +} + +/* on hover */ +#$id div.sk-estimator:hover { + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-2); +} + +#$id div.sk-estimator.fitted:hover { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-2); +} + +/* Specification for estimator info (e.g. "i" and "?") */ + +/* Common style for "i" and "?" */ + +.sk-estimator-doc-link, +a:link.sk-estimator-doc-link, +a:visited.sk-estimator-doc-link { + float: right; + font-size: smaller; + line-height: 1em; + font-family: monospace; + background-color: var(--sklearn-color-background); + border-radius: 1em; + height: 1em; + width: 1em; + text-decoration: none !important; + margin-left: 1ex; + /* unfitted */ + border: var(--sklearn-color-unfitted-level-1) 1pt solid; + color: var(--sklearn-color-unfitted-level-1); +} + +.sk-estimator-doc-link.fitted, +a:link.sk-estimator-doc-link.fitted, +a:visited.sk-estimator-doc-link.fitted { + /* fitted */ + border: var(--sklearn-color-fitted-level-1) 1pt solid; + color: var(--sklearn-color-fitted-level-1); +} + +/* On hover */ +div.sk-estimator:hover .sk-estimator-doc-link:hover, +.sk-estimator-doc-link:hover, +div.sk-label-container:hover .sk-estimator-doc-link:hover, +.sk-estimator-doc-link:hover { + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-3); + color: var(--sklearn-color-background); + text-decoration: none; +} + +div.sk-estimator.fitted:hover .sk-estimator-doc-link.fitted:hover, +.sk-estimator-doc-link.fitted:hover, +div.sk-label-container:hover .sk-estimator-doc-link.fitted:hover, +.sk-estimator-doc-link.fitted:hover { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-3); + color: var(--sklearn-color-background); + text-decoration: none; +} + +/* Span, style for the box shown on hovering the info icon */ +.sk-estimator-doc-link span { + display: none; + z-index: 9999; + position: relative; + font-weight: normal; + right: .2ex; + padding: .5ex; + margin: .5ex; + width: min-content; + min-width: 20ex; + max-width: 50ex; + color: var(--sklearn-color-text); + box-shadow: 2pt 2pt 4pt #999; + /* unfitted */ + background: var(--sklearn-color-unfitted-level-0); + border: .5pt solid var(--sklearn-color-unfitted-level-3); +} + +.sk-estimator-doc-link.fitted span { + /* fitted */ + background: var(--sklearn-color-fitted-level-0); + border: var(--sklearn-color-fitted-level-3); +} + +.sk-estimator-doc-link:hover span { + display: block; +} + +/* "?"-specific style due to the `` HTML tag */ + +#$id a.estimator_doc_link { + float: right; + font-size: 1rem; + line-height: 1em; + font-family: monospace; + background-color: var(--sklearn-color-background); + border-radius: 1rem; + height: 1rem; + width: 1rem; + text-decoration: none; + /* unfitted */ + color: var(--sklearn-color-unfitted-level-1); + border: var(--sklearn-color-unfitted-level-1) 1pt solid; +} + +#$id a.estimator_doc_link.fitted { + /* fitted */ + border: var(--sklearn-color-fitted-level-1) 1pt solid; + color: var(--sklearn-color-fitted-level-1); +} + +/* On hover */ +#$id a.estimator_doc_link:hover { + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-3); + color: var(--sklearn-color-background); + text-decoration: none; +} + +#$id a.estimator_doc_link.fitted:hover { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-3); +} diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.py b/venv/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..5e465234f516bd032b4214c85cf6e50d1573cd5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.py @@ -0,0 +1,496 @@ +import html +import itertools +from contextlib import closing +from inspect import isclass +from io import StringIO +from pathlib import Path +from string import Template + +from .. import __version__, config_context +from .fixes import parse_version + + +class _IDCounter: + """Generate sequential ids with a prefix.""" + + def __init__(self, prefix): + self.prefix = prefix + self.count = 0 + + def get_id(self): + self.count += 1 + return f"{self.prefix}-{self.count}" + + +def _get_css_style(): + return Path(__file__).with_suffix(".css").read_text(encoding="utf-8") + + +_CONTAINER_ID_COUNTER = _IDCounter("sk-container-id") +_ESTIMATOR_ID_COUNTER = _IDCounter("sk-estimator-id") +_CSS_STYLE = _get_css_style() + + +class _VisualBlock: + """HTML Representation of Estimator + + Parameters + ---------- + kind : {'serial', 'parallel', 'single'} + kind of HTML block + + estimators : list of estimators or `_VisualBlock`s or a single estimator + If kind != 'single', then `estimators` is a list of + estimators. + If kind == 'single', then `estimators` is a single estimator. + + names : list of str, default=None + If kind != 'single', then `names` corresponds to estimators. + If kind == 'single', then `names` is a single string corresponding to + the single estimator. + + name_details : list of str, str, or None, default=None + If kind != 'single', then `name_details` corresponds to `names`. + If kind == 'single', then `name_details` is a single string + corresponding to the single estimator. + + dash_wrapped : bool, default=True + If true, wrapped HTML element will be wrapped with a dashed border. + Only active when kind != 'single'. + """ + + def __init__( + self, kind, estimators, *, names=None, name_details=None, dash_wrapped=True + ): + self.kind = kind + self.estimators = estimators + self.dash_wrapped = dash_wrapped + + if self.kind in ("parallel", "serial"): + if names is None: + names = (None,) * len(estimators) + if name_details is None: + name_details = (None,) * len(estimators) + + self.names = names + self.name_details = name_details + + def _sk_visual_block_(self): + return self + + +def _write_label_html( + out, + name, + name_details, + outer_class="sk-label-container", + inner_class="sk-label", + checked=False, + doc_link="", + is_fitted_css_class="", + is_fitted_icon="", +): + """Write labeled html with or without a dropdown with named details. + + Parameters + ---------- + out : file-like object + The file to write the HTML representation to. + name : str + The label for the estimator. It corresponds either to the estimator class name + for a simple estimator or in the case of a `Pipeline` and `ColumnTransformer`, + it corresponds to the name of the step. + name_details : str + The details to show as content in the dropdown part of the toggleable label. It + can contain information such as non-default parameters or column information for + `ColumnTransformer`. + outer_class : {"sk-label-container", "sk-item"}, default="sk-label-container" + The CSS class for the outer container. + inner_class : {"sk-label", "sk-estimator"}, default="sk-label" + The CSS class for the inner container. + checked : bool, default=False + Whether the dropdown is folded or not. With a single estimator, we intend to + unfold the content. + doc_link : str, default="" + The link to the documentation for the estimator. If an empty string, no link is + added to the diagram. This can be generated for an estimator if it uses the + `_HTMLDocumentationLinkMixin`. + is_fitted_css_class : {"", "fitted"} + The CSS class to indicate whether or not the estimator is fitted. The + empty string means that the estimator is not fitted and "fitted" means that the + estimator is fitted. + is_fitted_icon : str, default="" + The HTML representation to show the fitted information in the diagram. An empty + string means that no information is shown. + """ + # we need to add some padding to the left of the label to be sure it is centered + padding_label = " " if is_fitted_icon else "" # add padding for the "i" char + + out.write( + f'") # outer_class inner_class + + +def _get_visual_block(estimator): + """Generate information about how to display an estimator.""" + if hasattr(estimator, "_sk_visual_block_"): + try: + return estimator._sk_visual_block_() + except Exception: + return _VisualBlock( + "single", + estimator, + names=estimator.__class__.__name__, + name_details=str(estimator), + ) + + if isinstance(estimator, str): + return _VisualBlock( + "single", estimator, names=estimator, name_details=estimator + ) + elif estimator is None: + return _VisualBlock("single", estimator, names="None", name_details="None") + + # check if estimator looks like a meta estimator (wraps estimators) + if hasattr(estimator, "get_params") and not isclass(estimator): + estimators = [ + (key, est) + for key, est in estimator.get_params(deep=False).items() + if hasattr(est, "get_params") and hasattr(est, "fit") and not isclass(est) + ] + if estimators: + return _VisualBlock( + "parallel", + [est for _, est in estimators], + names=[f"{key}: {est.__class__.__name__}" for key, est in estimators], + name_details=[str(est) for _, est in estimators], + ) + + return _VisualBlock( + "single", + estimator, + names=estimator.__class__.__name__, + name_details=str(estimator), + ) + + +def _write_estimator_html( + out, + estimator, + estimator_label, + estimator_label_details, + is_fitted_css_class, + is_fitted_icon="", + first_call=False, +): + """Write estimator to html in serial, parallel, or by itself (single). + + For multiple estimators, this function is called recursively. + + Parameters + ---------- + out : file-like object + The file to write the HTML representation to. + estimator : estimator object + The estimator to visualize. + estimator_label : str + The label for the estimator. It corresponds either to the estimator class name + for simple estimator or in the case of `Pipeline` and `ColumnTransformer`, it + corresponds to the name of the step. + estimator_label_details : str + The details to show as content in the dropdown part of the toggleable label. + It can contain information as non-default parameters or column information for + `ColumnTransformer`. + is_fitted_css_class : {"", "fitted"} + The CSS class to indicate whether or not the estimator is fitted or not. The + empty string means that the estimator is not fitted and "fitted" means that the + estimator is fitted. + is_fitted_icon : str, default="" + The HTML representation to show the fitted information in the diagram. An empty + string means that no information is shown. If the estimator to be shown is not + the first estimator (i.e. `first_call=False`), `is_fitted_icon` is always an + empty string. + first_call : bool, default=False + Whether this is the first time this function is called. + """ + if first_call: + est_block = _get_visual_block(estimator) + else: + is_fitted_icon = "" + with config_context(print_changed_only=True): + est_block = _get_visual_block(estimator) + # `estimator` can also be an instance of `_VisualBlock` + if hasattr(estimator, "_get_doc_link"): + doc_link = estimator._get_doc_link() + else: + doc_link = "" + if est_block.kind in ("serial", "parallel"): + dashed_wrapped = first_call or est_block.dash_wrapped + dash_cls = " sk-dashed-wrapped" if dashed_wrapped else "" + out.write(f'
') + + if estimator_label: + _write_label_html( + out, + estimator_label, + estimator_label_details, + doc_link=doc_link, + is_fitted_css_class=is_fitted_css_class, + is_fitted_icon=is_fitted_icon, + ) + + kind = est_block.kind + out.write(f'
') + est_infos = zip(est_block.estimators, est_block.names, est_block.name_details) + + for est, name, name_details in est_infos: + if kind == "serial": + _write_estimator_html( + out, + est, + name, + name_details, + is_fitted_css_class=is_fitted_css_class, + ) + else: # parallel + out.write('
') + # wrap element in a serial visualblock + serial_block = _VisualBlock("serial", [est], dash_wrapped=False) + _write_estimator_html( + out, + serial_block, + name, + name_details, + is_fitted_css_class=is_fitted_css_class, + ) + out.write("
") # sk-parallel-item + + out.write("
") + elif est_block.kind == "single": + _write_label_html( + out, + est_block.names, + est_block.name_details, + outer_class="sk-item", + inner_class="sk-estimator", + checked=first_call, + doc_link=doc_link, + is_fitted_css_class=is_fitted_css_class, + is_fitted_icon=is_fitted_icon, + ) + + +def estimator_html_repr(estimator): + """Build a HTML representation of an estimator. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + The estimator to visualize. + + Returns + ------- + html: str + HTML representation of estimator. + + Examples + -------- + >>> from sklearn.utils._estimator_html_repr import estimator_html_repr + >>> from sklearn.linear_model import LogisticRegression + >>> estimator_html_repr(LogisticRegression()) + '" + f'
' + '
' + f"
{html.escape(estimator_str)}
{fallback_msg}" + "
" + '
") + + html_output = out.getvalue() + return html_output + + +class _HTMLDocumentationLinkMixin: + """Mixin class allowing to generate a link to the API documentation. + + This mixin relies on three attributes: + - `_doc_link_module`: it corresponds to the root module (e.g. `sklearn`). Using this + mixin, the default value is `sklearn`. + - `_doc_link_template`: it corresponds to the template used to generate the + link to the API documentation. Using this mixin, the default value is + `"https://scikit-learn.org/{version_url}/modules/generated/ + {estimator_module}.{estimator_name}.html"`. + - `_doc_link_url_param_generator`: it corresponds to a function that generates the + parameters to be used in the template when the estimator module and name are not + sufficient. + + The method :meth:`_get_doc_link` generates the link to the API documentation for a + given estimator. + + This useful provides all the necessary states for + :func:`sklearn.utils.estimator_html_repr` to generate a link to the API + documentation for the estimator HTML diagram. + + Examples + -------- + If the default values for `_doc_link_module`, `_doc_link_template` are not suitable, + then you can override them: + >>> from sklearn.base import BaseEstimator + >>> estimator = BaseEstimator() + >>> estimator._doc_link_template = "https://website.com/{single_param}.html" + >>> def url_param_generator(estimator): + ... return {"single_param": estimator.__class__.__name__} + >>> estimator._doc_link_url_param_generator = url_param_generator + >>> estimator._get_doc_link() + 'https://website.com/BaseEstimator.html' + """ + + _doc_link_module = "sklearn" + _doc_link_url_param_generator = None + + @property + def _doc_link_template(self): + sklearn_version = parse_version(__version__) + if sklearn_version.dev is None: + version_url = f"{sklearn_version.major}.{sklearn_version.minor}" + else: + version_url = "dev" + return getattr( + self, + "__doc_link_template", + ( + f"https://scikit-learn.org/{version_url}/modules/generated/" + "{estimator_module}.{estimator_name}.html" + ), + ) + + @_doc_link_template.setter + def _doc_link_template(self, value): + setattr(self, "__doc_link_template", value) + + def _get_doc_link(self): + """Generates a link to the API documentation for a given estimator. + + This method generates the link to the estimator's documentation page + by using the template defined by the attribute `_doc_link_template`. + + Returns + ------- + url : str + The URL to the API documentation for this estimator. If the estimator does + not belong to module `_doc_link_module`, the empty string (i.e. `""`) is + returned. + """ + if self.__class__.__module__.split(".")[0] != self._doc_link_module: + return "" + + if self._doc_link_url_param_generator is None: + estimator_name = self.__class__.__name__ + # Construct the estimator's module name, up to the first private submodule. + # This works because in scikit-learn all public estimators are exposed at + # that level, even if they actually live in a private sub-module. + estimator_module = ".".join( + itertools.takewhile( + lambda part: not part.startswith("_"), + self.__class__.__module__.split("."), + ) + ) + return self._doc_link_template.format( + estimator_module=estimator_module, estimator_name=estimator_name + ) + return self._doc_link_template.format( + **self._doc_link_url_param_generator(self) + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_fast_dict.pxd b/venv/lib/python3.10/site-packages/sklearn/utils/_fast_dict.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4a9d6ef4eb7b74a7cba19ec5d62ccc748dbeb768 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_fast_dict.pxd @@ -0,0 +1,18 @@ +# Author: Gael Varoquaux +# License: BSD +""" +Uses C++ map containers for fast dict-like behavior with keys being +integers, and values float. +""" + +from libcpp.map cimport map as cpp_map + +from ._typedefs cimport float64_t, intp_t + + +############################################################################### +# An object to be used in Python + +cdef class IntFloatDict: + cdef cpp_map[intp_t, float64_t] my_map + cdef _to_arrays(self, intp_t [:] keys, float64_t [:] values) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_heap.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/utils/_heap.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..951995a44e1ca06d3af4db07d35737112f2f2fc0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/_heap.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_isfinite.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/utils/_isfinite.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f12e3990fa1cfd36fbbcafabe5aadefc3216168b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/_isfinite.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_joblib.py b/venv/lib/python3.10/site-packages/sklearn/utils/_joblib.py new file mode 100644 index 0000000000000000000000000000000000000000..590fdc6170c64210917f0bb811fe65fc92b3ff36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_joblib.py @@ -0,0 +1,38 @@ +import warnings as _warnings + +with _warnings.catch_warnings(): + _warnings.simplefilter("ignore") + # joblib imports may raise DeprecationWarning on certain Python + # versions + import joblib + from joblib import ( + Memory, + Parallel, + __version__, + cpu_count, + delayed, + dump, + effective_n_jobs, + hash, + load, + logger, + parallel_backend, + register_parallel_backend, + ) + + +__all__ = [ + "parallel_backend", + "register_parallel_backend", + "cpu_count", + "Parallel", + "Memory", + "delayed", + "effective_n_jobs", + "hash", + "logger", + "dump", + "load", + "joblib", + "__version__", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_metadata_requests.py b/venv/lib/python3.10/site-packages/sklearn/utils/_metadata_requests.py new file mode 100644 index 0000000000000000000000000000000000000000..8b99012d7b0fbc7759f3f50d746d96aa355b757d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_metadata_requests.py @@ -0,0 +1,1563 @@ +""" +Metadata Routing Utility + +In order to better understand the components implemented in this file, one +needs to understand their relationship to one another. + +The only relevant public API for end users are the ``set_{method}_request``, +e.g. ``estimator.set_fit_request(sample_weight=True)``. However, third-party +developers and users who implement custom meta-estimators, need to deal with +the objects implemented in this file. + +All estimators (should) implement a ``get_metadata_routing`` method, returning +the routing requests set for the estimator. This method is automatically +implemented via ``BaseEstimator`` for all simple estimators, but needs a custom +implementation for meta-estimators. + +In non-routing consumers, i.e. the simplest case, e.g. ``SVM``, +``get_metadata_routing`` returns a ``MetadataRequest`` object. + +In routers, e.g. meta-estimators and a multi metric scorer, +``get_metadata_routing`` returns a ``MetadataRouter`` object. + +An object which is both a router and a consumer, e.g. a meta-estimator which +consumes ``sample_weight`` and routes ``sample_weight`` to its sub-estimators, +routing information includes both information about the object itself (added +via ``MetadataRouter.add_self_request``), as well as the routing information +for its sub-estimators. + +A ``MetadataRequest`` instance includes one ``MethodMetadataRequest`` per +method in ``METHODS``, which includes ``fit``, ``score``, etc. + +Request values are added to the routing mechanism by adding them to +``MethodMetadataRequest`` instances, e.g. +``metadatarequest.fit.add(param="sample_weight", alias="my_weights")``. This is +used in ``set_{method}_request`` which are automatically generated, so users +and developers almost never need to directly call methods on a +``MethodMetadataRequest``. + +The ``alias`` above in the ``add`` method has to be either a string (an alias), +or a {True (requested), False (unrequested), None (error if passed)}``. There +are some other special values such as ``UNUSED`` and ``WARN`` which are used +for purposes such as warning of removing a metadata in a child class, but not +used by the end users. + +``MetadataRouter`` includes information about sub-objects' routing and how +methods are mapped together. For instance, the information about which methods +of a sub-estimator are called in which methods of the meta-estimator are all +stored here. Conceptually, this information looks like: + +``` +{ + "sub_estimator1": ( + mapping=[(caller="fit", callee="transform"), ...], + router=MetadataRequest(...), # or another MetadataRouter + ), + ... +} +``` + +To give the above representation some structure, we use the following objects: + +- ``(caller, callee)`` is a namedtuple called ``MethodPair`` + +- The list of ``MethodPair`` stored in the ``mapping`` field is a + ``MethodMapping`` object + +- ``(mapping=..., router=...)`` is a namedtuple called ``RouterMappingPair`` + +The ``set_{method}_request`` methods are dynamically generated for estimators +which inherit from the ``BaseEstimator``. This is done by attaching instances +of the ``RequestMethod`` descriptor to classes, which is done in the +``_MetadataRequester`` class, and ``BaseEstimator`` inherits from this mixin. +This mixin also implements the ``get_metadata_routing``, which meta-estimators +need to override, but it works for simple consumers as is. +""" + +# Author: Adrin Jalali +# License: BSD 3 clause + +import inspect +from collections import namedtuple +from copy import deepcopy +from typing import TYPE_CHECKING, Optional, Union +from warnings import warn + +from .. import get_config +from ..exceptions import UnsetMetadataPassedError +from ._bunch import Bunch + +# Only the following methods are supported in the routing mechanism. Adding new +# methods at the moment involves monkeypatching this list. +# Note that if this list is changed or monkeypatched, the corresponding method +# needs to be added under a TYPE_CHECKING condition like the one done here in +# _MetadataRequester +SIMPLE_METHODS = [ + "fit", + "partial_fit", + "predict", + "predict_proba", + "predict_log_proba", + "decision_function", + "score", + "split", + "transform", + "inverse_transform", +] + +# These methods are a composite of other methods and one cannot set their +# requests directly. Instead they should be set by setting the requests of the +# simple methods which make the composite ones. +COMPOSITE_METHODS = { + "fit_transform": ["fit", "transform"], + "fit_predict": ["fit", "predict"], +} + +METHODS = SIMPLE_METHODS + list(COMPOSITE_METHODS.keys()) + + +def _routing_enabled(): + """Return whether metadata routing is enabled. + + .. versionadded:: 1.3 + + Returns + ------- + enabled : bool + Whether metadata routing is enabled. If the config is not set, it + defaults to False. + """ + return get_config().get("enable_metadata_routing", False) + + +def _raise_for_params(params, owner, method): + """Raise an error if metadata routing is not enabled and params are passed. + + .. versionadded:: 1.4 + + Parameters + ---------- + params : dict + The metadata passed to a method. + + owner : object + The object to which the method belongs. + + method : str + The name of the method, e.g. "fit". + + Raises + ------ + ValueError + If metadata routing is not enabled and params are passed. + """ + caller = ( + f"{owner.__class__.__name__}.{method}" if method else owner.__class__.__name__ + ) + if not _routing_enabled() and params: + raise ValueError( + f"Passing extra keyword arguments to {caller} is only supported if" + " enable_metadata_routing=True, which you can set using" + " `sklearn.set_config`. See the User Guide" + " for more" + f" details. Extra parameters passed are: {set(params)}" + ) + + +def _raise_for_unsupported_routing(obj, method, **kwargs): + """Raise when metadata routing is enabled and metadata is passed. + + This is used in meta-estimators which have not implemented metadata routing + to prevent silent bugs. There is no need to use this function if the + meta-estimator is not accepting any metadata, especially in `fit`, since + if a meta-estimator accepts any metadata, they would do that in `fit` as + well. + + Parameters + ---------- + obj : estimator + The estimator for which we're raising the error. + + method : str + The method where the error is raised. + + **kwargs : dict + The metadata passed to the method. + """ + kwargs = {key: value for key, value in kwargs.items() if value is not None} + if _routing_enabled() and kwargs: + cls_name = obj.__class__.__name__ + raise NotImplementedError( + f"{cls_name}.{method} cannot accept given metadata ({set(kwargs.keys())})" + f" since metadata routing is not yet implemented for {cls_name}." + ) + + +class _RoutingNotSupportedMixin: + """A mixin to be used to remove the default `get_metadata_routing`. + + This is used in meta-estimators where metadata routing is not yet + implemented. + + This also makes it clear in our rendered documentation that this method + cannot be used. + """ + + def get_metadata_routing(self): + """Raise `NotImplementedError`. + + This estimator does not support metadata routing yet.""" + raise NotImplementedError( + f"{self.__class__.__name__} has not implemented metadata routing yet." + ) + + +# Request values +# ============== +# Each request value needs to be one of the following values, or an alias. + +# this is used in `__metadata_request__*` attributes to indicate that a +# metadata is not present even though it may be present in the +# corresponding method's signature. +UNUSED = "$UNUSED$" + +# this is used whenever a default value is changed, and therefore the user +# should explicitly set the value, otherwise a warning is shown. An example +# is when a meta-estimator is only a router, but then becomes also a +# consumer in a new release. +WARN = "$WARN$" + +# this is the default used in `set_{method}_request` methods to indicate no +# change requested by the user. +UNCHANGED = "$UNCHANGED$" + +VALID_REQUEST_VALUES = [False, True, None, UNUSED, WARN] + + +def request_is_alias(item): + """Check if an item is a valid alias. + + Values in ``VALID_REQUEST_VALUES`` are not considered aliases in this + context. Only a string which is a valid identifier is. + + Parameters + ---------- + item : object + The given item to be checked if it can be an alias. + + Returns + ------- + result : bool + Whether the given item is a valid alias. + """ + if item in VALID_REQUEST_VALUES: + return False + + # item is only an alias if it's a valid identifier + return isinstance(item, str) and item.isidentifier() + + +def request_is_valid(item): + """Check if an item is a valid request value (and not an alias). + + Parameters + ---------- + item : object + The given item to be checked. + + Returns + ------- + result : bool + Whether the given item is valid. + """ + return item in VALID_REQUEST_VALUES + + +# Metadata Request for Simple Consumers +# ===================================== +# This section includes MethodMetadataRequest and MetadataRequest which are +# used in simple consumers. + + +class MethodMetadataRequest: + """A prescription of how metadata is to be passed to a single method. + + Refer to :class:`MetadataRequest` for how this class is used. + + .. versionadded:: 1.3 + + Parameters + ---------- + owner : str + A display name for the object owning these requests. + + method : str + The name of the method to which these requests belong. + + requests : dict of {str: bool, None or str}, default=None + The initial requests for this method. + """ + + def __init__(self, owner, method, requests=None): + self._requests = requests or dict() + self.owner = owner + self.method = method + + @property + def requests(self): + """Dictionary of the form: ``{key: alias}``.""" + return self._requests + + def add_request( + self, + *, + param, + alias, + ): + """Add request info for a metadata. + + Parameters + ---------- + param : str + The property for which a request is set. + + alias : str, or {True, False, None} + Specifies which metadata should be routed to `param` + + - str: the name (or alias) of metadata given to a meta-estimator that + should be routed to this parameter. + + - True: requested + + - False: not requested + + - None: error if passed + """ + if not request_is_alias(alias) and not request_is_valid(alias): + raise ValueError( + f"The alias you're setting for `{param}` should be either a " + "valid identifier or one of {None, True, False}, but given " + f"value is: `{alias}`" + ) + + if alias == param: + alias = True + + if alias == UNUSED: + if param in self._requests: + del self._requests[param] + else: + raise ValueError( + f"Trying to remove parameter {param} with UNUSED which doesn't" + " exist." + ) + else: + self._requests[param] = alias + + return self + + def _get_param_names(self, return_alias): + """Get names of all metadata that can be consumed or routed by this method. + + This method returns the names of all metadata, even the ``False`` + ones. + + Parameters + ---------- + return_alias : bool + Controls whether original or aliased names should be returned. If + ``False``, aliases are ignored and original names are returned. + + Returns + ------- + names : set of str + A set of strings with the names of all parameters. + """ + return set( + alias if return_alias and not request_is_valid(alias) else prop + for prop, alias in self._requests.items() + if not request_is_valid(alias) or alias is not False + ) + + def _check_warnings(self, *, params): + """Check whether metadata is passed which is marked as WARN. + + If any metadata is passed which is marked as WARN, a warning is raised. + + Parameters + ---------- + params : dict + The metadata passed to a method. + """ + params = {} if params is None else params + warn_params = { + prop + for prop, alias in self._requests.items() + if alias == WARN and prop in params + } + for param in warn_params: + warn( + f"Support for {param} has recently been added to this class. " + "To maintain backward compatibility, it is ignored now. " + "You can set the request value to False to silence this " + "warning, or to True to consume and use the metadata." + ) + + def _route_params(self, params): + """Prepare the given parameters to be passed to the method. + + The output of this method can be used directly as the input to the + corresponding method as extra props. + + Parameters + ---------- + params : dict + A dictionary of provided metadata. + + Returns + ------- + params : Bunch + A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the + corresponding method. + """ + self._check_warnings(params=params) + unrequested = dict() + args = {arg: value for arg, value in params.items() if value is not None} + res = Bunch() + for prop, alias in self._requests.items(): + if alias is False or alias == WARN: + continue + elif alias is True and prop in args: + res[prop] = args[prop] + elif alias is None and prop in args: + unrequested[prop] = args[prop] + elif alias in args: + res[prop] = args[alias] + if unrequested: + raise UnsetMetadataPassedError( + message=( + f"[{', '.join([key for key in unrequested])}] are passed but are" + " not explicitly set as requested or not for" + f" {self.owner}.{self.method}" + ), + unrequested_params=unrequested, + routed_params=res, + ) + return res + + def _consumes(self, params): + """Check whether the given parameters are consumed by this method. + + Parameters + ---------- + params : iterable of str + An iterable of parameters to check. + + Returns + ------- + consumed : set of str + A set of parameters which are consumed by this method. + """ + params = set(params) + res = set() + for prop, alias in self._requests.items(): + if alias is True and prop in params: + res.add(prop) + elif isinstance(alias, str) and alias in params: + res.add(alias) + return res + + def _serialize(self): + """Serialize the object. + + Returns + ------- + obj : dict + A serialized version of the instance in the form of a dictionary. + """ + return self._requests + + def __repr__(self): + return str(self._serialize()) + + def __str__(self): + return str(repr(self)) + + +class MetadataRequest: + """Contains the metadata request info of a consumer. + + Instances of `MethodMetadataRequest` are used in this class for each + available method under `metadatarequest.{method}`. + + Consumer-only classes such as simple estimators return a serialized + version of this class as the output of `get_metadata_routing()`. + + .. versionadded:: 1.3 + + Parameters + ---------- + owner : str + The name of the object to which these requests belong. + """ + + # this is here for us to use this attribute's value instead of doing + # `isinstance` in our checks, so that we avoid issues when people vendor + # this file instead of using it directly from scikit-learn. + _type = "metadata_request" + + def __init__(self, owner): + self.owner = owner + for method in SIMPLE_METHODS: + setattr( + self, + method, + MethodMetadataRequest(owner=owner, method=method), + ) + + def consumes(self, method, params): + """Check whether the given parameters are consumed by the given method. + + .. versionadded:: 1.4 + + Parameters + ---------- + method : str + The name of the method to check. + + params : iterable of str + An iterable of parameters to check. + + Returns + ------- + consumed : set of str + A set of parameters which are consumed by the given method. + """ + return getattr(self, method)._consumes(params=params) + + def __getattr__(self, name): + # Called when the default attribute access fails with an AttributeError + # (either __getattribute__() raises an AttributeError because name is + # not an instance attribute or an attribute in the class tree for self; + # or __get__() of a name property raises AttributeError). This method + # should either return the (computed) attribute value or raise an + # AttributeError exception. + # https://docs.python.org/3/reference/datamodel.html#object.__getattr__ + if name not in COMPOSITE_METHODS: + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{name}'" + ) + + requests = {} + for method in COMPOSITE_METHODS[name]: + mmr = getattr(self, method) + existing = set(requests.keys()) + upcoming = set(mmr.requests.keys()) + common = existing & upcoming + conflicts = [key for key in common if requests[key] != mmr._requests[key]] + if conflicts: + raise ValueError( + f"Conflicting metadata requests for {', '.join(conflicts)} while" + f" composing the requests for {name}. Metadata with the same name" + f" for methods {', '.join(COMPOSITE_METHODS[name])} should have the" + " same request value." + ) + requests.update(mmr._requests) + return MethodMetadataRequest(owner=self.owner, method=name, requests=requests) + + def _get_param_names(self, method, return_alias, ignore_self_request=None): + """Get names of all metadata that can be consumed or routed by specified \ + method. + + This method returns the names of all metadata, even the ``False`` + ones. + + Parameters + ---------- + method : str + The name of the method for which metadata names are requested. + + return_alias : bool + Controls whether original or aliased names should be returned. If + ``False``, aliases are ignored and original names are returned. + + ignore_self_request : bool + Ignored. Present for API compatibility. + + Returns + ------- + names : set of str + A set of strings with the names of all parameters. + """ + return getattr(self, method)._get_param_names(return_alias=return_alias) + + def _route_params(self, *, method, params): + """Prepare the given parameters to be passed to the method. + + The output of this method can be used directly as the input to the + corresponding method as extra keyword arguments to pass metadata. + + Parameters + ---------- + method : str + The name of the method for which the parameters are requested and + routed. + + params : dict + A dictionary of provided metadata. + + Returns + ------- + params : Bunch + A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the + corresponding method. + """ + return getattr(self, method)._route_params(params=params) + + def _check_warnings(self, *, method, params): + """Check whether metadata is passed which is marked as WARN. + + If any metadata is passed which is marked as WARN, a warning is raised. + + Parameters + ---------- + method : str + The name of the method for which the warnings should be checked. + + params : dict + The metadata passed to a method. + """ + getattr(self, method)._check_warnings(params=params) + + def _serialize(self): + """Serialize the object. + + Returns + ------- + obj : dict + A serialized version of the instance in the form of a dictionary. + """ + output = dict() + for method in SIMPLE_METHODS: + mmr = getattr(self, method) + if len(mmr.requests): + output[method] = mmr._serialize() + return output + + def __repr__(self): + return str(self._serialize()) + + def __str__(self): + return str(repr(self)) + + +# Metadata Request for Routers +# ============================ +# This section includes all objects required for MetadataRouter which is used +# in routers, returned by their ``get_metadata_routing``. + +# This namedtuple is used to store a (mapping, routing) pair. Mapping is a +# MethodMapping object, and routing is the output of `get_metadata_routing`. +# MetadataRouter stores a collection of these namedtuples. +RouterMappingPair = namedtuple("RouterMappingPair", ["mapping", "router"]) + +# A namedtuple storing a single method route. A collection of these namedtuples +# is stored in a MetadataRouter. +MethodPair = namedtuple("MethodPair", ["callee", "caller"]) + + +class MethodMapping: + """Stores the mapping between callee and caller methods for a router. + + This class is primarily used in a ``get_metadata_routing()`` of a router + object when defining the mapping between a sub-object (a sub-estimator or a + scorer) to the router's methods. It stores a collection of ``Route`` + namedtuples. + + Iterating through an instance of this class will yield named + ``MethodPair(callee, caller)`` tuples. + + .. versionadded:: 1.3 + """ + + def __init__(self): + self._routes = [] + + def __iter__(self): + return iter(self._routes) + + def add(self, *, callee, caller): + """Add a method mapping. + + Parameters + ---------- + callee : str + Child object's method name. This method is called in ``caller``. + + caller : str + Parent estimator's method name in which the ``callee`` is called. + + Returns + ------- + self : MethodMapping + Returns self. + """ + if callee not in METHODS: + raise ValueError( + f"Given callee:{callee} is not a valid method. Valid methods are:" + f" {METHODS}" + ) + if caller not in METHODS: + raise ValueError( + f"Given caller:{caller} is not a valid method. Valid methods are:" + f" {METHODS}" + ) + self._routes.append(MethodPair(callee=callee, caller=caller)) + return self + + def _serialize(self): + """Serialize the object. + + Returns + ------- + obj : list + A serialized version of the instance in the form of a list. + """ + result = list() + for route in self._routes: + result.append({"callee": route.callee, "caller": route.caller}) + return result + + @classmethod + def from_str(cls, route): + """Construct an instance from a string. + + Parameters + ---------- + route : str + A string representing the mapping, it can be: + + - `"one-to-one"`: a one to one mapping for all methods. + - `"method"`: the name of a single method, such as ``fit``, + ``transform``, ``score``, etc. + + Returns + ------- + obj : MethodMapping + A :class:`~sklearn.utils.metadata_routing.MethodMapping` instance + constructed from the given string. + """ + routing = cls() + if route == "one-to-one": + for method in METHODS: + routing.add(callee=method, caller=method) + elif route in METHODS: + routing.add(callee=route, caller=route) + else: + raise ValueError("route should be 'one-to-one' or a single method!") + return routing + + def __repr__(self): + return str(self._serialize()) + + def __str__(self): + return str(repr(self)) + + +class MetadataRouter: + """Stores and handles metadata routing for a router object. + + This class is used by router objects to store and handle metadata routing. + Routing information is stored as a dictionary of the form ``{"object_name": + RouteMappingPair(method_mapping, routing_info)}``, where ``method_mapping`` + is an instance of :class:`~sklearn.utils.metadata_routing.MethodMapping` and + ``routing_info`` is either a + :class:`~sklearn.utils.metadata_routing.MetadataRequest` or a + :class:`~sklearn.utils.metadata_routing.MetadataRouter` instance. + + .. versionadded:: 1.3 + + Parameters + ---------- + owner : str + The name of the object to which these requests belong. + """ + + # this is here for us to use this attribute's value instead of doing + # `isinstance`` in our checks, so that we avoid issues when people vendor + # this file instead of using it directly from scikit-learn. + _type = "metadata_router" + + def __init__(self, owner): + self._route_mappings = dict() + # `_self_request` is used if the router is also a consumer. + # _self_request, (added using `add_self_request()`) is treated + # differently from the other objects which are stored in + # _route_mappings. + self._self_request = None + self.owner = owner + + def add_self_request(self, obj): + """Add `self` (as a consumer) to the routing. + + This method is used if the router is also a consumer, and hence the + router itself needs to be included in the routing. The passed object + can be an estimator or a + :class:`~sklearn.utils.metadata_routing.MetadataRequest`. + + A router should add itself using this method instead of `add` since it + should be treated differently than the other objects to which metadata + is routed by the router. + + Parameters + ---------- + obj : object + This is typically the router instance, i.e. `self` in a + ``get_metadata_routing()`` implementation. It can also be a + ``MetadataRequest`` instance. + + Returns + ------- + self : MetadataRouter + Returns `self`. + """ + if getattr(obj, "_type", None) == "metadata_request": + self._self_request = deepcopy(obj) + elif hasattr(obj, "_get_metadata_request"): + self._self_request = deepcopy(obj._get_metadata_request()) + else: + raise ValueError( + "Given `obj` is neither a `MetadataRequest` nor does it implement the" + " required API. Inheriting from `BaseEstimator` implements the required" + " API." + ) + return self + + def add(self, *, method_mapping, **objs): + """Add named objects with their corresponding method mapping. + + Parameters + ---------- + method_mapping : MethodMapping or str + The mapping between the child and the parent's methods. If str, the + output of :func:`~sklearn.utils.metadata_routing.MethodMapping.from_str` + is used. + + **objs : dict + A dictionary of objects from which metadata is extracted by calling + :func:`~sklearn.utils.metadata_routing.get_routing_for_object` on them. + + Returns + ------- + self : MetadataRouter + Returns `self`. + """ + if isinstance(method_mapping, str): + method_mapping = MethodMapping.from_str(method_mapping) + else: + method_mapping = deepcopy(method_mapping) + + for name, obj in objs.items(): + self._route_mappings[name] = RouterMappingPair( + mapping=method_mapping, router=get_routing_for_object(obj) + ) + return self + + def consumes(self, method, params): + """Check whether the given parameters are consumed by the given method. + + .. versionadded:: 1.4 + + Parameters + ---------- + method : str + The name of the method to check. + + params : iterable of str + An iterable of parameters to check. + + Returns + ------- + consumed : set of str + A set of parameters which are consumed by the given method. + """ + res = set() + if self._self_request: + res = res | self._self_request.consumes(method=method, params=params) + + for _, route_mapping in self._route_mappings.items(): + for callee, caller in route_mapping.mapping: + if caller == method: + res = res | route_mapping.router.consumes( + method=callee, params=params + ) + + return res + + def _get_param_names(self, *, method, return_alias, ignore_self_request): + """Get names of all metadata that can be consumed or routed by specified \ + method. + + This method returns the names of all metadata, even the ``False`` + ones. + + Parameters + ---------- + method : str + The name of the method for which metadata names are requested. + + return_alias : bool + Controls whether original or aliased names should be returned, + which only applies to the stored `self`. If no `self` routing + object is stored, this parameter has no effect. + + ignore_self_request : bool + If `self._self_request` should be ignored. This is used in `_route_params`. + If ``True``, ``return_alias`` has no effect. + + Returns + ------- + names : set of str + A set of strings with the names of all parameters. + """ + res = set() + if self._self_request and not ignore_self_request: + res = res.union( + self._self_request._get_param_names( + method=method, return_alias=return_alias + ) + ) + + for name, route_mapping in self._route_mappings.items(): + for callee, caller in route_mapping.mapping: + if caller == method: + res = res.union( + route_mapping.router._get_param_names( + method=callee, return_alias=True, ignore_self_request=False + ) + ) + return res + + def _route_params(self, *, params, method): + """Prepare the given parameters to be passed to the method. + + This is used when a router is used as a child object of another router. + The parent router then passes all parameters understood by the child + object to it and delegates their validation to the child. + + The output of this method can be used directly as the input to the + corresponding method as extra props. + + Parameters + ---------- + method : str + The name of the method for which the parameters are requested and + routed. + + params : dict + A dictionary of provided metadata. + + Returns + ------- + params : Bunch + A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the + corresponding method. + """ + res = Bunch() + if self._self_request: + res.update(self._self_request._route_params(params=params, method=method)) + + param_names = self._get_param_names( + method=method, return_alias=True, ignore_self_request=True + ) + child_params = { + key: value for key, value in params.items() if key in param_names + } + for key in set(res.keys()).intersection(child_params.keys()): + # conflicts are okay if the passed objects are the same, but it's + # an issue if they're different objects. + if child_params[key] is not res[key]: + raise ValueError( + f"In {self.owner}, there is a conflict on {key} between what is" + " requested for this estimator and what is requested by its" + " children. You can resolve this conflict by using an alias for" + " the child estimator(s) requested metadata." + ) + + res.update(child_params) + return res + + def route_params(self, *, caller, params): + """Return the input parameters requested by child objects. + + The output of this method is a bunch, which includes the inputs for all + methods of each child object that are used in the router's `caller` + method. + + If the router is also a consumer, it also checks for warnings of + `self`'s/consumer's requested metadata. + + Parameters + ---------- + caller : str + The name of the method for which the parameters are requested and + routed. If called inside the :term:`fit` method of a router, it + would be `"fit"`. + + params : dict + A dictionary of provided metadata. + + Returns + ------- + params : Bunch + A :class:`~sklearn.utils.Bunch` of the form + ``{"object_name": {"method_name": {prop: value}}}`` which can be + used to pass the required metadata to corresponding methods or + corresponding child objects. + """ + if self._self_request: + self._self_request._check_warnings(params=params, method=caller) + + res = Bunch() + for name, route_mapping in self._route_mappings.items(): + router, mapping = route_mapping.router, route_mapping.mapping + + res[name] = Bunch() + for _callee, _caller in mapping: + if _caller == caller: + res[name][_callee] = router._route_params( + params=params, method=_callee + ) + return res + + def validate_metadata(self, *, method, params): + """Validate given metadata for a method. + + This raises a ``TypeError`` if some of the passed metadata are not + understood by child objects. + + Parameters + ---------- + method : str + The name of the method for which the parameters are requested and + routed. If called inside the :term:`fit` method of a router, it + would be `"fit"`. + + params : dict + A dictionary of provided metadata. + """ + param_names = self._get_param_names( + method=method, return_alias=False, ignore_self_request=False + ) + if self._self_request: + self_params = self._self_request._get_param_names( + method=method, return_alias=False + ) + else: + self_params = set() + extra_keys = set(params.keys()) - param_names - self_params + if extra_keys: + raise TypeError( + f"{self.owner}.{method} got unexpected argument(s) {extra_keys}, which" + " are not requested metadata in any object." + ) + + def _serialize(self): + """Serialize the object. + + Returns + ------- + obj : dict + A serialized version of the instance in the form of a dictionary. + """ + res = dict() + if self._self_request: + res["$self_request"] = self._self_request._serialize() + for name, route_mapping in self._route_mappings.items(): + res[name] = dict() + res[name]["mapping"] = route_mapping.mapping._serialize() + res[name]["router"] = route_mapping.router._serialize() + + return res + + def __iter__(self): + if self._self_request: + yield ( + "$self_request", + RouterMappingPair( + mapping=MethodMapping.from_str("one-to-one"), + router=self._self_request, + ), + ) + for name, route_mapping in self._route_mappings.items(): + yield (name, route_mapping) + + def __repr__(self): + return str(self._serialize()) + + def __str__(self): + return str(repr(self)) + + +def get_routing_for_object(obj=None): + """Get a ``Metadata{Router, Request}`` instance from the given object. + + This function returns a + :class:`~sklearn.utils.metadata_routing.MetadataRouter` or a + :class:`~sklearn.utils.metadata_routing.MetadataRequest` from the given input. + + This function always returns a copy or an instance constructed from the + input, such that changing the output of this function will not change the + original object. + + .. versionadded:: 1.3 + + Parameters + ---------- + obj : object + - If the object is already a + :class:`~sklearn.utils.metadata_routing.MetadataRequest` or a + :class:`~sklearn.utils.metadata_routing.MetadataRouter`, return a copy + of that. + - If the object provides a `get_metadata_routing` method, return a copy + of the output of that method. + - Returns an empty :class:`~sklearn.utils.metadata_routing.MetadataRequest` + otherwise. + + Returns + ------- + obj : MetadataRequest or MetadataRouting + A ``MetadataRequest`` or a ``MetadataRouting`` taken or created from + the given object. + """ + # doing this instead of a try/except since an AttributeError could be raised + # for other reasons. + if hasattr(obj, "get_metadata_routing"): + return deepcopy(obj.get_metadata_routing()) + + elif getattr(obj, "_type", None) in ["metadata_request", "metadata_router"]: + return deepcopy(obj) + + return MetadataRequest(owner=None) + + +# Request method +# ============== +# This section includes what's needed for the request method descriptor and +# their dynamic generation in a meta class. + +# These strings are used to dynamically generate the docstrings for +# set_{method}_request methods. +REQUESTER_DOC = """ Request metadata passed to the ``{method}`` method. + + Note that this method is only relevant if + ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`). + Please see :ref:`User Guide ` on how the routing + mechanism works. + + The options for each parameter are: + + - ``True``: metadata is requested, and \ +passed to ``{method}`` if provided. The request is ignored if \ +metadata is not provided. + + - ``False``: metadata is not requested and the meta-estimator \ +will not pass it to ``{method}``. + + - ``None``: metadata is not requested, and the meta-estimator \ +will raise an error if the user provides it. + + - ``str``: metadata should be passed to the meta-estimator with \ +this given alias instead of the original name. + + The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the + existing request. This allows you to change the request for some + parameters and not others. + + .. versionadded:: 1.3 + + .. note:: + This method is only relevant if this estimator is used as a + sub-estimator of a meta-estimator, e.g. used inside a + :class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect. + + Parameters + ---------- +""" +REQUESTER_DOC_PARAM = """ {metadata} : str, True, False, or None, \ + default=sklearn.utils.metadata_routing.UNCHANGED + Metadata routing for ``{metadata}`` parameter in ``{method}``. + +""" +REQUESTER_DOC_RETURN = """ Returns + ------- + self : object + The updated object. +""" + + +class RequestMethod: + """ + A descriptor for request methods. + + .. versionadded:: 1.3 + + Parameters + ---------- + name : str + The name of the method for which the request function should be + created, e.g. ``"fit"`` would create a ``set_fit_request`` function. + + keys : list of str + A list of strings which are accepted parameters by the created + function, e.g. ``["sample_weight"]`` if the corresponding method + accepts it as a metadata. + + validate_keys : bool, default=True + Whether to check if the requested parameters fit the actual parameters + of the method. + + Notes + ----- + This class is a descriptor [1]_ and uses PEP-362 to set the signature of + the returned function [2]_. + + References + ---------- + .. [1] https://docs.python.org/3/howto/descriptor.html + + .. [2] https://www.python.org/dev/peps/pep-0362/ + """ + + def __init__(self, name, keys, validate_keys=True): + self.name = name + self.keys = keys + self.validate_keys = validate_keys + + def __get__(self, instance, owner): + # we would want to have a method which accepts only the expected args + def func(**kw): + """Updates the request for provided parameters + + This docstring is overwritten below. + See REQUESTER_DOC for expected functionality + """ + if not _routing_enabled(): + raise RuntimeError( + "This method is only available when metadata routing is enabled." + " You can enable it using" + " sklearn.set_config(enable_metadata_routing=True)." + ) + + if self.validate_keys and (set(kw) - set(self.keys)): + raise TypeError( + f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments" + f" are: {set(self.keys)}" + ) + + requests = instance._get_metadata_request() + method_metadata_request = getattr(requests, self.name) + + for prop, alias in kw.items(): + if alias is not UNCHANGED: + method_metadata_request.add_request(param=prop, alias=alias) + instance._metadata_request = requests + + return instance + + # Now we set the relevant attributes of the function so that it seems + # like a normal method to the end user, with known expected arguments. + func.__name__ = f"set_{self.name}_request" + params = [ + inspect.Parameter( + name="self", + kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, + annotation=owner, + ) + ] + params.extend( + [ + inspect.Parameter( + k, + inspect.Parameter.KEYWORD_ONLY, + default=UNCHANGED, + annotation=Optional[Union[bool, None, str]], + ) + for k in self.keys + ] + ) + func.__signature__ = inspect.Signature( + params, + return_annotation=owner, + ) + doc = REQUESTER_DOC.format(method=self.name) + for metadata in self.keys: + doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name) + doc += REQUESTER_DOC_RETURN + func.__doc__ = doc + return func + + +class _MetadataRequester: + """Mixin class for adding metadata request functionality. + + ``BaseEstimator`` inherits from this Mixin. + + .. versionadded:: 1.3 + """ + + if TYPE_CHECKING: # pragma: no cover + # This code is never run in runtime, but it's here for type checking. + # Type checkers fail to understand that the `set_{method}_request` + # methods are dynamically generated, and they complain that they are + # not defined. We define them here to make type checkers happy. + # During type checking analyzers assume this to be True. + # The following list of defined methods mirrors the list of methods + # in SIMPLE_METHODS. + # fmt: off + def set_fit_request(self, **kwargs): pass + def set_partial_fit_request(self, **kwargs): pass + def set_predict_request(self, **kwargs): pass + def set_predict_proba_request(self, **kwargs): pass + def set_predict_log_proba_request(self, **kwargs): pass + def set_decision_function_request(self, **kwargs): pass + def set_score_request(self, **kwargs): pass + def set_split_request(self, **kwargs): pass + def set_transform_request(self, **kwargs): pass + def set_inverse_transform_request(self, **kwargs): pass + # fmt: on + + def __init_subclass__(cls, **kwargs): + """Set the ``set_{method}_request`` methods. + + This uses PEP-487 [1]_ to set the ``set_{method}_request`` methods. It + looks for the information available in the set default values which are + set using ``__metadata_request__*`` class attributes, or inferred + from method signatures. + + The ``__metadata_request__*`` class attributes are used when a method + does not explicitly accept a metadata through its arguments or if the + developer would like to specify a request value for those metadata + which are different from the default ``None``. + + References + ---------- + .. [1] https://www.python.org/dev/peps/pep-0487 + """ + try: + requests = cls._get_default_requests() + except Exception: + # if there are any issues in the default values, it will be raised + # when ``get_metadata_routing`` is called. Here we are going to + # ignore all the issues such as bad defaults etc. + super().__init_subclass__(**kwargs) + return + + for method in SIMPLE_METHODS: + mmr = getattr(requests, method) + # set ``set_{method}_request``` methods + if not len(mmr.requests): + continue + setattr( + cls, + f"set_{method}_request", + RequestMethod(method, sorted(mmr.requests.keys())), + ) + super().__init_subclass__(**kwargs) + + @classmethod + def _build_request_for_signature(cls, router, method): + """Build the `MethodMetadataRequest` for a method using its signature. + + This method takes all arguments from the method signature and uses + ``None`` as their default request value, except ``X``, ``y``, ``Y``, + ``Xt``, ``yt``, ``*args``, and ``**kwargs``. + + Parameters + ---------- + router : MetadataRequest + The parent object for the created `MethodMetadataRequest`. + method : str + The name of the method. + + Returns + ------- + method_request : MethodMetadataRequest + The prepared request using the method's signature. + """ + mmr = MethodMetadataRequest(owner=cls.__name__, method=method) + # Here we use `isfunction` instead of `ismethod` because calling `getattr` + # on a class instead of an instance returns an unbound function. + if not hasattr(cls, method) or not inspect.isfunction(getattr(cls, method)): + return mmr + # ignore the first parameter of the method, which is usually "self" + params = list(inspect.signature(getattr(cls, method)).parameters.items())[1:] + for pname, param in params: + if pname in {"X", "y", "Y", "Xt", "yt"}: + continue + if param.kind in {param.VAR_POSITIONAL, param.VAR_KEYWORD}: + continue + mmr.add_request( + param=pname, + alias=None, + ) + return mmr + + @classmethod + def _get_default_requests(cls): + """Collect default request values. + + This method combines the information present in ``__metadata_request__*`` + class attributes, as well as determining request keys from method + signatures. + """ + requests = MetadataRequest(owner=cls.__name__) + + for method in SIMPLE_METHODS: + setattr( + requests, + method, + cls._build_request_for_signature(router=requests, method=method), + ) + + # Then overwrite those defaults with the ones provided in + # __metadata_request__* attributes. Defaults set in + # __metadata_request__* attributes take precedence over signature + # sniffing. + + # need to go through the MRO since this is a class attribute and + # ``vars`` doesn't report the parent class attributes. We go through + # the reverse of the MRO so that child classes have precedence over + # their parents. + defaults = dict() + for base_class in reversed(inspect.getmro(cls)): + base_defaults = { + attr: value + for attr, value in vars(base_class).items() + if "__metadata_request__" in attr + } + defaults.update(base_defaults) + defaults = dict(sorted(defaults.items())) + + for attr, value in defaults.items(): + # we don't check for attr.startswith() since python prefixes attrs + # starting with __ with the `_ClassName`. + substr = "__metadata_request__" + method = attr[attr.index(substr) + len(substr) :] + for prop, alias in value.items(): + getattr(requests, method).add_request(param=prop, alias=alias) + + return requests + + def _get_metadata_request(self): + """Get requested data properties. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + Returns + ------- + request : MetadataRequest + A :class:`~sklearn.utils.metadata_routing.MetadataRequest` instance. + """ + if hasattr(self, "_metadata_request"): + requests = get_routing_for_object(self._metadata_request) + else: + requests = self._get_default_requests() + + return requests + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + Returns + ------- + routing : MetadataRequest + A :class:`~sklearn.utils.metadata_routing.MetadataRequest` encapsulating + routing information. + """ + return self._get_metadata_request() + + +# Process Routing in Routers +# ========================== +# This is almost always the only method used in routers to process and route +# given metadata. This is to minimize the boilerplate required in routers. + + +# Here the first two arguments are positional only which makes everything +# passed as keyword argument a metadata. The first two args also have an `_` +# prefix to reduce the chances of name collisions with the passed metadata, and +# since they're positional only, users will never type those underscores. +def process_routing(_obj, _method, /, **kwargs): + """Validate and route input parameters. + + This function is used inside a router's method, e.g. :term:`fit`, + to validate the metadata and handle the routing. + + Assuming this signature: ``fit(self, X, y, sample_weight=None, **fit_params)``, + a call to this function would be: + ``process_routing(self, sample_weight=sample_weight, **fit_params)``. + + Note that if routing is not enabled and ``kwargs`` is empty, then it + returns an empty routing where ``process_routing(...).ANYTHING.ANY_METHOD`` + is always an empty dictionary. + + .. versionadded:: 1.3 + + Parameters + ---------- + _obj : object + An object implementing ``get_metadata_routing``. Typically a + meta-estimator. + + _method : str + The name of the router's method in which this function is called. + + **kwargs : dict + Metadata to be routed. + + Returns + ------- + routed_params : Bunch + A :class:`~sklearn.utils.Bunch` of the form ``{"object_name": {"method_name": + {prop: value}}}`` which can be used to pass the required metadata to + corresponding methods or corresponding child objects. The object names + are those defined in `obj.get_metadata_routing()`. + """ + if not kwargs: + # If routing is not enabled and kwargs are empty, then we don't have to + # try doing any routing, we can simply return a structure which returns + # an empty dict on routed_params.ANYTHING.ANY_METHOD. + class EmptyRequest: + def get(self, name, default=None): + return Bunch(**{method: dict() for method in METHODS}) + + def __getitem__(self, name): + return Bunch(**{method: dict() for method in METHODS}) + + def __getattr__(self, name): + return Bunch(**{method: dict() for method in METHODS}) + + return EmptyRequest() + + if not (hasattr(_obj, "get_metadata_routing") or isinstance(_obj, MetadataRouter)): + raise AttributeError( + f"The given object ({repr(_obj.__class__.__name__)}) needs to either" + " implement the routing method `get_metadata_routing` or be a" + " `MetadataRouter` instance." + ) + if _method not in METHODS: + raise TypeError( + f"Can only route and process input on these methods: {METHODS}, " + f"while the passed method is: {_method}." + ) + + request_routing = get_routing_for_object(_obj) + request_routing.validate_metadata(params=kwargs, method=_method) + routed_params = request_routing.route_params(params=kwargs, caller=_method) + + return routed_params diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.pxd b/venv/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a7694d0be2d93b77fc89e7c8eb8d15338fe3ebb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.pxd @@ -0,0 +1,33 @@ +# Helpers to safely access OpenMP routines +# +# no-op implementations are provided for the case where OpenMP is not available. +# +# All calls to OpenMP routines should be cimported from this module. + +cdef extern from *: + """ + #ifdef _OPENMP + #include + #define SKLEARN_OPENMP_PARALLELISM_ENABLED 1 + #else + #define SKLEARN_OPENMP_PARALLELISM_ENABLED 0 + #define omp_lock_t int + #define omp_init_lock(l) (void)0 + #define omp_destroy_lock(l) (void)0 + #define omp_set_lock(l) (void)0 + #define omp_unset_lock(l) (void)0 + #define omp_get_thread_num() 0 + #define omp_get_max_threads() 1 + #endif + """ + bint SKLEARN_OPENMP_PARALLELISM_ENABLED + + ctypedef struct omp_lock_t: + pass + + void omp_init_lock(omp_lock_t*) noexcept nogil + void omp_destroy_lock(omp_lock_t*) noexcept nogil + void omp_set_lock(omp_lock_t*) noexcept nogil + void omp_unset_lock(omp_lock_t*) noexcept nogil + int omp_get_thread_num() noexcept nogil + int omp_get_max_threads() noexcept nogil diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_param_validation.py b/venv/lib/python3.10/site-packages/sklearn/utils/_param_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..ae2e9648a4ccb195fd3e14bede1359e161d30846 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_param_validation.py @@ -0,0 +1,905 @@ +import functools +import math +import operator +import re +from abc import ABC, abstractmethod +from collections.abc import Iterable +from inspect import signature +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import csr_matrix, issparse + +from .._config import config_context, get_config +from .validation import _is_arraylike_not_scalar + + +class InvalidParameterError(ValueError, TypeError): + """Custom exception to be raised when the parameter of a class/method/function + does not have a valid type or value. + """ + + # Inherits from ValueError and TypeError to keep backward compatibility. + + +def validate_parameter_constraints(parameter_constraints, params, caller_name): + """Validate types and values of given parameters. + + Parameters + ---------- + parameter_constraints : dict or {"no_validation"} + If "no_validation", validation is skipped for this parameter. + + If a dict, it must be a dictionary `param_name: list of constraints`. + A parameter is valid if it satisfies one of the constraints from the list. + Constraints can be: + - an Interval object, representing a continuous or discrete range of numbers + - the string "array-like" + - the string "sparse matrix" + - the string "random_state" + - callable + - None, meaning that None is a valid value for the parameter + - any type, meaning that any instance of this type is valid + - an Options object, representing a set of elements of a given type + - a StrOptions object, representing a set of strings + - the string "boolean" + - the string "verbose" + - the string "cv_object" + - the string "nan" + - a MissingValues object representing markers for missing values + - a HasMethods object, representing method(s) an object must have + - a Hidden object, representing a constraint not meant to be exposed to the user + + params : dict + A dictionary `param_name: param_value`. The parameters to validate against the + constraints. + + caller_name : str + The name of the estimator or function or method that called this function. + """ + for param_name, param_val in params.items(): + # We allow parameters to not have a constraint so that third party estimators + # can inherit from sklearn estimators without having to necessarily use the + # validation tools. + if param_name not in parameter_constraints: + continue + + constraints = parameter_constraints[param_name] + + if constraints == "no_validation": + continue + + constraints = [make_constraint(constraint) for constraint in constraints] + + for constraint in constraints: + if constraint.is_satisfied_by(param_val): + # this constraint is satisfied, no need to check further. + break + else: + # No constraint is satisfied, raise with an informative message. + + # Ignore constraints that we don't want to expose in the error message, + # i.e. options that are for internal purpose or not officially supported. + constraints = [ + constraint for constraint in constraints if not constraint.hidden + ] + + if len(constraints) == 1: + constraints_str = f"{constraints[0]}" + else: + constraints_str = ( + f"{', '.join([str(c) for c in constraints[:-1]])} or" + f" {constraints[-1]}" + ) + + raise InvalidParameterError( + f"The {param_name!r} parameter of {caller_name} must be" + f" {constraints_str}. Got {param_val!r} instead." + ) + + +def make_constraint(constraint): + """Convert the constraint into the appropriate Constraint object. + + Parameters + ---------- + constraint : object + The constraint to convert. + + Returns + ------- + constraint : instance of _Constraint + The converted constraint. + """ + if isinstance(constraint, str) and constraint == "array-like": + return _ArrayLikes() + if isinstance(constraint, str) and constraint == "sparse matrix": + return _SparseMatrices() + if isinstance(constraint, str) and constraint == "random_state": + return _RandomStates() + if constraint is callable: + return _Callables() + if constraint is None: + return _NoneConstraint() + if isinstance(constraint, type): + return _InstancesOf(constraint) + if isinstance( + constraint, (Interval, StrOptions, Options, HasMethods, MissingValues) + ): + return constraint + if isinstance(constraint, str) and constraint == "boolean": + return _Booleans() + if isinstance(constraint, str) and constraint == "verbose": + return _VerboseHelper() + if isinstance(constraint, str) and constraint == "cv_object": + return _CVObjects() + if isinstance(constraint, Hidden): + constraint = make_constraint(constraint.constraint) + constraint.hidden = True + return constraint + if isinstance(constraint, str) and constraint == "nan": + return _NanConstraint() + raise ValueError(f"Unknown constraint type: {constraint}") + + +def validate_params(parameter_constraints, *, prefer_skip_nested_validation): + """Decorator to validate types and values of functions and methods. + + Parameters + ---------- + parameter_constraints : dict + A dictionary `param_name: list of constraints`. See the docstring of + `validate_parameter_constraints` for a description of the accepted constraints. + + Note that the *args and **kwargs parameters are not validated and must not be + present in the parameter_constraints dictionary. + + prefer_skip_nested_validation : bool + If True, the validation of parameters of inner estimators or functions + called by the decorated function will be skipped. + + This is useful to avoid validating many times the parameters passed by the + user from the public facing API. It's also useful to avoid validating + parameters that we pass internally to inner functions that are guaranteed to + be valid by the test suite. + + It should be set to True for most functions, except for those that receive + non-validated objects as parameters or that are just wrappers around classes + because they only perform a partial validation. + + Returns + ------- + decorated_function : function or method + The decorated function. + """ + + def decorator(func): + # The dict of parameter constraints is set as an attribute of the function + # to make it possible to dynamically introspect the constraints for + # automatic testing. + setattr(func, "_skl_parameter_constraints", parameter_constraints) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + global_skip_validation = get_config()["skip_parameter_validation"] + if global_skip_validation: + return func(*args, **kwargs) + + func_sig = signature(func) + + # Map *args/**kwargs to the function signature + params = func_sig.bind(*args, **kwargs) + params.apply_defaults() + + # ignore self/cls and positional/keyword markers + to_ignore = [ + p.name + for p in func_sig.parameters.values() + if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD) + ] + to_ignore += ["self", "cls"] + params = {k: v for k, v in params.arguments.items() if k not in to_ignore} + + validate_parameter_constraints( + parameter_constraints, params, caller_name=func.__qualname__ + ) + + try: + with config_context( + skip_parameter_validation=( + prefer_skip_nested_validation or global_skip_validation + ) + ): + return func(*args, **kwargs) + except InvalidParameterError as e: + # When the function is just a wrapper around an estimator, we allow + # the function to delegate validation to the estimator, but we replace + # the name of the estimator by the name of the function in the error + # message to avoid confusion. + msg = re.sub( + r"parameter of \w+ must be", + f"parameter of {func.__qualname__} must be", + str(e), + ) + raise InvalidParameterError(msg) from e + + return wrapper + + return decorator + + +class RealNotInt(Real): + """A type that represents reals that are not instances of int. + + Behaves like float, but also works with values extracted from numpy arrays. + isintance(1, RealNotInt) -> False + isinstance(1.0, RealNotInt) -> True + """ + + +RealNotInt.register(float) + + +def _type_name(t): + """Convert type into human readable string.""" + module = t.__module__ + qualname = t.__qualname__ + if module == "builtins": + return qualname + elif t == Real: + return "float" + elif t == Integral: + return "int" + return f"{module}.{qualname}" + + +class _Constraint(ABC): + """Base class for the constraint objects.""" + + def __init__(self): + self.hidden = False + + @abstractmethod + def is_satisfied_by(self, val): + """Whether or not a value satisfies the constraint. + + Parameters + ---------- + val : object + The value to check. + + Returns + ------- + is_satisfied : bool + Whether or not the constraint is satisfied by this value. + """ + + @abstractmethod + def __str__(self): + """A human readable representational string of the constraint.""" + + +class _InstancesOf(_Constraint): + """Constraint representing instances of a given type. + + Parameters + ---------- + type : type + The valid type. + """ + + def __init__(self, type): + super().__init__() + self.type = type + + def is_satisfied_by(self, val): + return isinstance(val, self.type) + + def __str__(self): + return f"an instance of {_type_name(self.type)!r}" + + +class _NoneConstraint(_Constraint): + """Constraint representing the None singleton.""" + + def is_satisfied_by(self, val): + return val is None + + def __str__(self): + return "None" + + +class _NanConstraint(_Constraint): + """Constraint representing the indicator `np.nan`.""" + + def is_satisfied_by(self, val): + return ( + not isinstance(val, Integral) and isinstance(val, Real) and math.isnan(val) + ) + + def __str__(self): + return "numpy.nan" + + +class _PandasNAConstraint(_Constraint): + """Constraint representing the indicator `pd.NA`.""" + + def is_satisfied_by(self, val): + try: + import pandas as pd + + return isinstance(val, type(pd.NA)) and pd.isna(val) + except ImportError: + return False + + def __str__(self): + return "pandas.NA" + + +class Options(_Constraint): + """Constraint representing a finite set of instances of a given type. + + Parameters + ---------- + type : type + + options : set + The set of valid scalars. + + deprecated : set or None, default=None + A subset of the `options` to mark as deprecated in the string + representation of the constraint. + """ + + def __init__(self, type, options, *, deprecated=None): + super().__init__() + self.type = type + self.options = options + self.deprecated = deprecated or set() + + if self.deprecated - self.options: + raise ValueError("The deprecated options must be a subset of the options.") + + def is_satisfied_by(self, val): + return isinstance(val, self.type) and val in self.options + + def _mark_if_deprecated(self, option): + """Add a deprecated mark to an option if needed.""" + option_str = f"{option!r}" + if option in self.deprecated: + option_str = f"{option_str} (deprecated)" + return option_str + + def __str__(self): + options_str = ( + f"{', '.join([self._mark_if_deprecated(o) for o in self.options])}" + ) + return f"a {_type_name(self.type)} among {{{options_str}}}" + + +class StrOptions(Options): + """Constraint representing a finite set of strings. + + Parameters + ---------- + options : set of str + The set of valid strings. + + deprecated : set of str or None, default=None + A subset of the `options` to mark as deprecated in the string + representation of the constraint. + """ + + def __init__(self, options, *, deprecated=None): + super().__init__(type=str, options=options, deprecated=deprecated) + + +class Interval(_Constraint): + """Constraint representing a typed interval. + + Parameters + ---------- + type : {numbers.Integral, numbers.Real, RealNotInt} + The set of numbers in which to set the interval. + + If RealNotInt, only reals that don't have the integer type + are allowed. For example 1.0 is allowed but 1 is not. + + left : float or int or None + The left bound of the interval. None means left bound is -∞. + + right : float, int or None + The right bound of the interval. None means right bound is +∞. + + closed : {"left", "right", "both", "neither"} + Whether the interval is open or closed. Possible choices are: + + - `"left"`: the interval is closed on the left and open on the right. + It is equivalent to the interval `[ left, right )`. + - `"right"`: the interval is closed on the right and open on the left. + It is equivalent to the interval `( left, right ]`. + - `"both"`: the interval is closed. + It is equivalent to the interval `[ left, right ]`. + - `"neither"`: the interval is open. + It is equivalent to the interval `( left, right )`. + + Notes + ----- + Setting a bound to `None` and setting the interval closed is valid. For instance, + strictly speaking, `Interval(Real, 0, None, closed="both")` corresponds to + `[0, +∞) U {+∞}`. + """ + + def __init__(self, type, left, right, *, closed): + super().__init__() + self.type = type + self.left = left + self.right = right + self.closed = closed + + self._check_params() + + def _check_params(self): + if self.type not in (Integral, Real, RealNotInt): + raise ValueError( + "type must be either numbers.Integral, numbers.Real or RealNotInt." + f" Got {self.type} instead." + ) + + if self.closed not in ("left", "right", "both", "neither"): + raise ValueError( + "closed must be either 'left', 'right', 'both' or 'neither'. " + f"Got {self.closed} instead." + ) + + if self.type is Integral: + suffix = "for an interval over the integers." + if self.left is not None and not isinstance(self.left, Integral): + raise TypeError(f"Expecting left to be an int {suffix}") + if self.right is not None and not isinstance(self.right, Integral): + raise TypeError(f"Expecting right to be an int {suffix}") + if self.left is None and self.closed in ("left", "both"): + raise ValueError( + f"left can't be None when closed == {self.closed} {suffix}" + ) + if self.right is None and self.closed in ("right", "both"): + raise ValueError( + f"right can't be None when closed == {self.closed} {suffix}" + ) + else: + if self.left is not None and not isinstance(self.left, Real): + raise TypeError("Expecting left to be a real number.") + if self.right is not None and not isinstance(self.right, Real): + raise TypeError("Expecting right to be a real number.") + + if self.right is not None and self.left is not None and self.right <= self.left: + raise ValueError( + f"right can't be less than left. Got left={self.left} and " + f"right={self.right}" + ) + + def __contains__(self, val): + if not isinstance(val, Integral) and np.isnan(val): + return False + + left_cmp = operator.lt if self.closed in ("left", "both") else operator.le + right_cmp = operator.gt if self.closed in ("right", "both") else operator.ge + + left = -np.inf if self.left is None else self.left + right = np.inf if self.right is None else self.right + + if left_cmp(val, left): + return False + if right_cmp(val, right): + return False + return True + + def is_satisfied_by(self, val): + if not isinstance(val, self.type): + return False + + return val in self + + def __str__(self): + type_str = "an int" if self.type is Integral else "a float" + left_bracket = "[" if self.closed in ("left", "both") else "(" + left_bound = "-inf" if self.left is None else self.left + right_bound = "inf" if self.right is None else self.right + right_bracket = "]" if self.closed in ("right", "both") else ")" + + # better repr if the bounds were given as integers + if not self.type == Integral and isinstance(self.left, Real): + left_bound = float(left_bound) + if not self.type == Integral and isinstance(self.right, Real): + right_bound = float(right_bound) + + return ( + f"{type_str} in the range " + f"{left_bracket}{left_bound}, {right_bound}{right_bracket}" + ) + + +class _ArrayLikes(_Constraint): + """Constraint representing array-likes""" + + def is_satisfied_by(self, val): + return _is_arraylike_not_scalar(val) + + def __str__(self): + return "an array-like" + + +class _SparseMatrices(_Constraint): + """Constraint representing sparse matrices.""" + + def is_satisfied_by(self, val): + return issparse(val) + + def __str__(self): + return "a sparse matrix" + + +class _Callables(_Constraint): + """Constraint representing callables.""" + + def is_satisfied_by(self, val): + return callable(val) + + def __str__(self): + return "a callable" + + +class _RandomStates(_Constraint): + """Constraint representing random states. + + Convenience class for + [Interval(Integral, 0, 2**32 - 1, closed="both"), np.random.RandomState, None] + """ + + def __init__(self): + super().__init__() + self._constraints = [ + Interval(Integral, 0, 2**32 - 1, closed="both"), + _InstancesOf(np.random.RandomState), + _NoneConstraint(), + ] + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class _Booleans(_Constraint): + """Constraint representing boolean likes. + + Convenience class for + [bool, np.bool_, Integral (deprecated)] + """ + + def __init__(self): + super().__init__() + self._constraints = [ + _InstancesOf(bool), + _InstancesOf(np.bool_), + ] + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class _VerboseHelper(_Constraint): + """Helper constraint for the verbose parameter. + + Convenience class for + [Interval(Integral, 0, None, closed="left"), bool, numpy.bool_] + """ + + def __init__(self): + super().__init__() + self._constraints = [ + Interval(Integral, 0, None, closed="left"), + _InstancesOf(bool), + _InstancesOf(np.bool_), + ] + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class MissingValues(_Constraint): + """Helper constraint for the `missing_values` parameters. + + Convenience for + [ + Integral, + Interval(Real, None, None, closed="both"), + str, # when numeric_only is False + None, # when numeric_only is False + _NanConstraint(), + _PandasNAConstraint(), + ] + + Parameters + ---------- + numeric_only : bool, default=False + Whether to consider only numeric missing value markers. + + """ + + def __init__(self, numeric_only=False): + super().__init__() + + self.numeric_only = numeric_only + + self._constraints = [ + _InstancesOf(Integral), + # we use an interval of Real to ignore np.nan that has its own constraint + Interval(Real, None, None, closed="both"), + _NanConstraint(), + _PandasNAConstraint(), + ] + if not self.numeric_only: + self._constraints.extend([_InstancesOf(str), _NoneConstraint()]) + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class HasMethods(_Constraint): + """Constraint representing objects that expose specific methods. + + It is useful for parameters following a protocol and where we don't want to impose + an affiliation to a specific module or class. + + Parameters + ---------- + methods : str or list of str + The method(s) that the object is expected to expose. + """ + + @validate_params( + {"methods": [str, list]}, + prefer_skip_nested_validation=True, + ) + def __init__(self, methods): + super().__init__() + if isinstance(methods, str): + methods = [methods] + self.methods = methods + + def is_satisfied_by(self, val): + return all(callable(getattr(val, method, None)) for method in self.methods) + + def __str__(self): + if len(self.methods) == 1: + methods = f"{self.methods[0]!r}" + else: + methods = ( + f"{', '.join([repr(m) for m in self.methods[:-1]])} and" + f" {self.methods[-1]!r}" + ) + return f"an object implementing {methods}" + + +class _IterablesNotString(_Constraint): + """Constraint representing iterables that are not strings.""" + + def is_satisfied_by(self, val): + return isinstance(val, Iterable) and not isinstance(val, str) + + def __str__(self): + return "an iterable" + + +class _CVObjects(_Constraint): + """Constraint representing cv objects. + + Convenient class for + [ + Interval(Integral, 2, None, closed="left"), + HasMethods(["split", "get_n_splits"]), + _IterablesNotString(), + None, + ] + """ + + def __init__(self): + super().__init__() + self._constraints = [ + Interval(Integral, 2, None, closed="left"), + HasMethods(["split", "get_n_splits"]), + _IterablesNotString(), + _NoneConstraint(), + ] + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class Hidden: + """Class encapsulating a constraint not meant to be exposed to the user. + + Parameters + ---------- + constraint : str or _Constraint instance + The constraint to be used internally. + """ + + def __init__(self, constraint): + self.constraint = constraint + + +def generate_invalid_param_val(constraint): + """Return a value that does not satisfy the constraint. + + Raises a NotImplementedError if there exists no invalid value for this constraint. + + This is only useful for testing purpose. + + Parameters + ---------- + constraint : _Constraint instance + The constraint to generate a value for. + + Returns + ------- + val : object + A value that does not satisfy the constraint. + """ + if isinstance(constraint, StrOptions): + return f"not {' or '.join(constraint.options)}" + + if isinstance(constraint, MissingValues): + return np.array([1, 2, 3]) + + if isinstance(constraint, _VerboseHelper): + return -1 + + if isinstance(constraint, HasMethods): + return type("HasNotMethods", (), {})() + + if isinstance(constraint, _IterablesNotString): + return "a string" + + if isinstance(constraint, _CVObjects): + return "not a cv object" + + if isinstance(constraint, Interval) and constraint.type is Integral: + if constraint.left is not None: + return constraint.left - 1 + if constraint.right is not None: + return constraint.right + 1 + + # There's no integer outside (-inf, +inf) + raise NotImplementedError + + if isinstance(constraint, Interval) and constraint.type in (Real, RealNotInt): + if constraint.left is not None: + return constraint.left - 1e-6 + if constraint.right is not None: + return constraint.right + 1e-6 + + # bounds are -inf, +inf + if constraint.closed in ("right", "neither"): + return -np.inf + if constraint.closed in ("left", "neither"): + return np.inf + + # interval is [-inf, +inf] + return np.nan + + raise NotImplementedError + + +def generate_valid_param(constraint): + """Return a value that does satisfy a constraint. + + This is only useful for testing purpose. + + Parameters + ---------- + constraint : Constraint instance + The constraint to generate a value for. + + Returns + ------- + val : object + A value that does satisfy the constraint. + """ + if isinstance(constraint, _ArrayLikes): + return np.array([1, 2, 3]) + + if isinstance(constraint, _SparseMatrices): + return csr_matrix([[0, 1], [1, 0]]) + + if isinstance(constraint, _RandomStates): + return np.random.RandomState(42) + + if isinstance(constraint, _Callables): + return lambda x: x + + if isinstance(constraint, _NoneConstraint): + return None + + if isinstance(constraint, _InstancesOf): + if constraint.type is np.ndarray: + # special case for ndarray since it can't be instantiated without arguments + return np.array([1, 2, 3]) + + if constraint.type in (Integral, Real): + # special case for Integral and Real since they are abstract classes + return 1 + + return constraint.type() + + if isinstance(constraint, _Booleans): + return True + + if isinstance(constraint, _VerboseHelper): + return 1 + + if isinstance(constraint, MissingValues) and constraint.numeric_only: + return np.nan + + if isinstance(constraint, MissingValues) and not constraint.numeric_only: + return "missing" + + if isinstance(constraint, HasMethods): + return type( + "ValidHasMethods", (), {m: lambda self: None for m in constraint.methods} + )() + + if isinstance(constraint, _IterablesNotString): + return [1, 2, 3] + + if isinstance(constraint, _CVObjects): + return 5 + + if isinstance(constraint, Options): # includes StrOptions + for option in constraint.options: + return option + + if isinstance(constraint, Interval): + interval = constraint + if interval.left is None and interval.right is None: + return 0 + elif interval.left is None: + return interval.right - 1 + elif interval.right is None: + return interval.left + 1 + else: + if interval.type is Real: + return (interval.left + interval.right) / 2 + else: + return interval.left + 1 + + raise ValueError(f"Unknown constraint type: {constraint}") diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_pprint.py b/venv/lib/python3.10/site-packages/sklearn/utils/_pprint.py new file mode 100644 index 0000000000000000000000000000000000000000..cea1510746cbed06e708ad939e507aa84ba733f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_pprint.py @@ -0,0 +1,463 @@ +"""This module contains the _EstimatorPrettyPrinter class used in +BaseEstimator.__repr__ for pretty-printing estimators""" + +# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation; +# All Rights Reserved + +# Authors: Fred L. Drake, Jr. (built-in CPython pprint module) +# Nicolas Hug (scikit-learn specific changes) + +# License: PSF License version 2 (see below) + +# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +# -------------------------------------------- + +# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), +# and the Individual or Organization ("Licensee") accessing and otherwise +# using this software ("Python") in source or binary form and its associated +# documentation. + +# 2. Subject to the terms and conditions of this License Agreement, PSF hereby +# grants Licensee a nonexclusive, royalty-free, world-wide license to +# reproduce, analyze, test, perform and/or display publicly, prepare +# derivative works, distribute, and otherwise use Python alone or in any +# derivative version, provided, however, that PSF's License Agreement and +# PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, +# 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, +# 2017, 2018 Python Software Foundation; All Rights Reserved" are retained in +# Python alone or in any derivative version prepared by Licensee. + +# 3. In the event Licensee prepares a derivative work that is based on or +# incorporates Python or any part thereof, and wants to make the derivative +# work available to others as provided herein, then Licensee hereby agrees to +# include in any such work a brief summary of the changes made to Python. + +# 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES +# NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT +# NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF +# MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF +# PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY +# INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF +# MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE +# THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +# 6. This License Agreement will automatically terminate upon a material +# breach of its terms and conditions. + +# 7. Nothing in this License Agreement shall be deemed to create any +# relationship of agency, partnership, or joint venture between PSF and +# Licensee. This License Agreement does not grant permission to use PSF +# trademarks or trade name in a trademark sense to endorse or promote products +# or services of Licensee, or any third party. + +# 8. By copying, installing or otherwise using Python, Licensee agrees to be +# bound by the terms and conditions of this License Agreement. + + +# Brief summary of changes to original code: +# - "compact" parameter is supported for dicts, not just lists or tuples +# - estimators have a custom handler, they're not just treated as objects +# - long sequences (lists, tuples, dict items) with more than N elements are +# shortened using ellipsis (', ...') at the end. + +import inspect +import pprint +from collections import OrderedDict + +from .._config import get_config +from ..base import BaseEstimator +from . import is_scalar_nan + + +class KeyValTuple(tuple): + """Dummy class for correctly rendering key-value tuples from dicts.""" + + def __repr__(self): + # needed for _dispatch[tuple.__repr__] not to be overridden + return super().__repr__() + + +class KeyValTupleParam(KeyValTuple): + """Dummy class for correctly rendering key-value tuples from parameters.""" + + pass + + +def _changed_params(estimator): + """Return dict (param_name: value) of parameters that were given to + estimator with non-default values.""" + + params = estimator.get_params(deep=False) + init_func = getattr(estimator.__init__, "deprecated_original", estimator.__init__) + init_params = inspect.signature(init_func).parameters + init_params = {name: param.default for name, param in init_params.items()} + + def has_changed(k, v): + if k not in init_params: # happens if k is part of a **kwargs + return True + if init_params[k] == inspect._empty: # k has no default value + return True + # try to avoid calling repr on nested estimators + if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__: + return True + # Use repr as a last resort. It may be expensive. + if repr(v) != repr(init_params[k]) and not ( + is_scalar_nan(init_params[k]) and is_scalar_nan(v) + ): + return True + return False + + return {k: v for k, v in params.items() if has_changed(k, v)} + + +class _EstimatorPrettyPrinter(pprint.PrettyPrinter): + """Pretty Printer class for estimator objects. + + This extends the pprint.PrettyPrinter class, because: + - we need estimators to be printed with their parameters, e.g. + Estimator(param1=value1, ...) which is not supported by default. + - the 'compact' parameter of PrettyPrinter is ignored for dicts, which + may lead to very long representations that we want to avoid. + + Quick overview of pprint.PrettyPrinter (see also + https://stackoverflow.com/questions/49565047/pprint-with-hex-numbers): + + - the entry point is the _format() method which calls format() (overridden + here) + - format() directly calls _safe_repr() for a first try at rendering the + object + - _safe_repr formats the whole object recursively, only calling itself, + not caring about line length or anything + - back to _format(), if the output string is too long, _format() then calls + the appropriate _pprint_TYPE() method (e.g. _pprint_list()) depending on + the type of the object. This where the line length and the compact + parameters are taken into account. + - those _pprint_TYPE() methods will internally use the format() method for + rendering the nested objects of an object (e.g. the elements of a list) + + In the end, everything has to be implemented twice: in _safe_repr and in + the custom _pprint_TYPE methods. Unfortunately PrettyPrinter is really not + straightforward to extend (especially when we want a compact output), so + the code is a bit convoluted. + + This class overrides: + - format() to support the changed_only parameter + - _safe_repr to support printing of estimators (for when they fit on a + single line) + - _format_dict_items so that dict are correctly 'compacted' + - _format_items so that ellipsis is used on long lists and tuples + + When estimators cannot be printed on a single line, the builtin _format() + will call _pprint_estimator() because it was registered to do so (see + _dispatch[BaseEstimator.__repr__] = _pprint_estimator). + + both _format_dict_items() and _pprint_estimator() use the + _format_params_or_dict_items() method that will format parameters and + key-value pairs respecting the compact parameter. This method needs another + subroutine _pprint_key_val_tuple() used when a parameter or a key-value + pair is too long to fit on a single line. This subroutine is called in + _format() and is registered as well in the _dispatch dict (just like + _pprint_estimator). We had to create the two classes KeyValTuple and + KeyValTupleParam for this. + """ + + def __init__( + self, + indent=1, + width=80, + depth=None, + stream=None, + *, + compact=False, + indent_at_name=True, + n_max_elements_to_show=None, + ): + super().__init__(indent, width, depth, stream, compact=compact) + self._indent_at_name = indent_at_name + if self._indent_at_name: + self._indent_per_level = 1 # ignore indent param + self._changed_only = get_config()["print_changed_only"] + # Max number of elements in a list, dict, tuple until we start using + # ellipsis. This also affects the number of arguments of an estimators + # (they are treated as dicts) + self.n_max_elements_to_show = n_max_elements_to_show + + def format(self, object, context, maxlevels, level): + return _safe_repr( + object, context, maxlevels, level, changed_only=self._changed_only + ) + + def _pprint_estimator(self, object, stream, indent, allowance, context, level): + stream.write(object.__class__.__name__ + "(") + if self._indent_at_name: + indent += len(object.__class__.__name__) + + if self._changed_only: + params = _changed_params(object) + else: + params = object.get_params(deep=False) + + params = OrderedDict((name, val) for (name, val) in sorted(params.items())) + + self._format_params( + params.items(), stream, indent, allowance + 1, context, level + ) + stream.write(")") + + def _format_dict_items(self, items, stream, indent, allowance, context, level): + return self._format_params_or_dict_items( + items, stream, indent, allowance, context, level, is_dict=True + ) + + def _format_params(self, items, stream, indent, allowance, context, level): + return self._format_params_or_dict_items( + items, stream, indent, allowance, context, level, is_dict=False + ) + + def _format_params_or_dict_items( + self, object, stream, indent, allowance, context, level, is_dict + ): + """Format dict items or parameters respecting the compact=True + parameter. For some reason, the builtin rendering of dict items doesn't + respect compact=True and will use one line per key-value if all cannot + fit in a single line. + Dict items will be rendered as <'key': value> while params will be + rendered as . The implementation is mostly copy/pasting from + the builtin _format_items(). + This also adds ellipsis if the number of items is greater than + self.n_max_elements_to_show. + """ + write = stream.write + indent += self._indent_per_level + delimnl = ",\n" + " " * indent + delim = "" + width = max_width = self._width - indent + 1 + it = iter(object) + try: + next_ent = next(it) + except StopIteration: + return + last = False + n_items = 0 + while not last: + if n_items == self.n_max_elements_to_show: + write(", ...") + break + n_items += 1 + ent = next_ent + try: + next_ent = next(it) + except StopIteration: + last = True + max_width -= allowance + width -= allowance + if self._compact: + k, v = ent + krepr = self._repr(k, context, level) + vrepr = self._repr(v, context, level) + if not is_dict: + krepr = krepr.strip("'") + middle = ": " if is_dict else "=" + rep = krepr + middle + vrepr + w = len(rep) + 2 + if width < w: + width = max_width + if delim: + delim = delimnl + if width >= w: + width -= w + write(delim) + delim = ", " + write(rep) + continue + write(delim) + delim = delimnl + class_ = KeyValTuple if is_dict else KeyValTupleParam + self._format( + class_(ent), stream, indent, allowance if last else 1, context, level + ) + + def _format_items(self, items, stream, indent, allowance, context, level): + """Format the items of an iterable (list, tuple...). Same as the + built-in _format_items, with support for ellipsis if the number of + elements is greater than self.n_max_elements_to_show. + """ + write = stream.write + indent += self._indent_per_level + if self._indent_per_level > 1: + write((self._indent_per_level - 1) * " ") + delimnl = ",\n" + " " * indent + delim = "" + width = max_width = self._width - indent + 1 + it = iter(items) + try: + next_ent = next(it) + except StopIteration: + return + last = False + n_items = 0 + while not last: + if n_items == self.n_max_elements_to_show: + write(", ...") + break + n_items += 1 + ent = next_ent + try: + next_ent = next(it) + except StopIteration: + last = True + max_width -= allowance + width -= allowance + if self._compact: + rep = self._repr(ent, context, level) + w = len(rep) + 2 + if width < w: + width = max_width + if delim: + delim = delimnl + if width >= w: + width -= w + write(delim) + delim = ", " + write(rep) + continue + write(delim) + delim = delimnl + self._format(ent, stream, indent, allowance if last else 1, context, level) + + def _pprint_key_val_tuple(self, object, stream, indent, allowance, context, level): + """Pretty printing for key-value tuples from dict or parameters.""" + k, v = object + rep = self._repr(k, context, level) + if isinstance(object, KeyValTupleParam): + rep = rep.strip("'") + middle = "=" + else: + middle = ": " + stream.write(rep) + stream.write(middle) + self._format( + v, stream, indent + len(rep) + len(middle), allowance, context, level + ) + + # Note: need to copy _dispatch to prevent instances of the builtin + # PrettyPrinter class to call methods of _EstimatorPrettyPrinter (see issue + # 12906) + # mypy error: "Type[PrettyPrinter]" has no attribute "_dispatch" + _dispatch = pprint.PrettyPrinter._dispatch.copy() # type: ignore + _dispatch[BaseEstimator.__repr__] = _pprint_estimator + _dispatch[KeyValTuple.__repr__] = _pprint_key_val_tuple + + +def _safe_repr(object, context, maxlevels, level, changed_only=False): + """Same as the builtin _safe_repr, with added support for Estimator + objects.""" + typ = type(object) + + if typ in pprint._builtin_scalars: + return repr(object), True, False + + r = getattr(typ, "__repr__", None) + if issubclass(typ, dict) and r is dict.__repr__: + if not object: + return "{}", True, False + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}", False, objid in context + if objid in context: + return pprint._recursion(object), False, True + context[objid] = 1 + readable = True + recursive = False + components = [] + append = components.append + level += 1 + saferepr = _safe_repr + items = sorted(object.items(), key=pprint._safe_tuple) + for k, v in items: + krepr, kreadable, krecur = saferepr( + k, context, maxlevels, level, changed_only=changed_only + ) + vrepr, vreadable, vrecur = saferepr( + v, context, maxlevels, level, changed_only=changed_only + ) + append("%s: %s" % (krepr, vrepr)) + readable = readable and kreadable and vreadable + if krecur or vrecur: + recursive = True + del context[objid] + return "{%s}" % ", ".join(components), readable, recursive + + if (issubclass(typ, list) and r is list.__repr__) or ( + issubclass(typ, tuple) and r is tuple.__repr__ + ): + if issubclass(typ, list): + if not object: + return "[]", True, False + format = "[%s]" + elif len(object) == 1: + format = "(%s,)" + else: + if not object: + return "()", True, False + format = "(%s)" + objid = id(object) + if maxlevels and level >= maxlevels: + return format % "...", False, objid in context + if objid in context: + return pprint._recursion(object), False, True + context[objid] = 1 + readable = True + recursive = False + components = [] + append = components.append + level += 1 + for o in object: + orepr, oreadable, orecur = _safe_repr( + o, context, maxlevels, level, changed_only=changed_only + ) + append(orepr) + if not oreadable: + readable = False + if orecur: + recursive = True + del context[objid] + return format % ", ".join(components), readable, recursive + + if issubclass(typ, BaseEstimator): + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}", False, objid in context + if objid in context: + return pprint._recursion(object), False, True + context[objid] = 1 + readable = True + recursive = False + if changed_only: + params = _changed_params(object) + else: + params = object.get_params(deep=False) + components = [] + append = components.append + level += 1 + saferepr = _safe_repr + items = sorted(params.items(), key=pprint._safe_tuple) + for k, v in items: + krepr, kreadable, krecur = saferepr( + k, context, maxlevels, level, changed_only=changed_only + ) + vrepr, vreadable, vrecur = saferepr( + v, context, maxlevels, level, changed_only=changed_only + ) + append("%s=%s" % (krepr.strip("'"), vrepr)) + readable = readable and kreadable and vreadable + if krecur or vrecur: + recursive = True + del context[objid] + return ("%s(%s)" % (typ.__name__, ", ".join(components)), readable, recursive) + + rep = repr(object) + return rep, (rep and not rep.startswith("<")), False diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_random.pxd b/venv/lib/python3.10/site-packages/sklearn/utils/_random.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0ebcc1de0cce69cde50615c2e9f7a8d51f2559da --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_random.pxd @@ -0,0 +1,36 @@ +# Authors: Arnaud Joly +# +# License: BSD 3 clause + + +cimport numpy as cnp +ctypedef cnp.npy_uint32 UINT32_t + +cdef inline UINT32_t DEFAULT_SEED = 1 + +cdef enum: + # Max value for our rand_r replacement (near the bottom). + # We don't use RAND_MAX because it's different across platforms and + # particularly tiny on Windows/MSVC. + # It corresponds to the maximum representable value for + # 32-bit signed integers (i.e. 2^31 - 1). + RAND_R_MAX = 2147483647 + + +# rand_r replacement using a 32bit XorShift generator +# See http://www.jstatsoft.org/v08/i14/paper for details +cdef inline UINT32_t our_rand_r(UINT32_t* seed) nogil: + """Generate a pseudo-random np.uint32 from a np.uint32 seed""" + # seed shouldn't ever be 0. + if (seed[0] == 0): + seed[0] = DEFAULT_SEED + + seed[0] ^= (seed[0] << 13) + seed[0] ^= (seed[0] >> 17) + seed[0] ^= (seed[0] << 5) + + # Use the modulo to make sure that we don't return a values greater than the + # maximum representable value for signed 32bit integers (i.e. 2^31 - 1). + # Note that the parenthesis are needed to avoid overflow: here + # RAND_R_MAX is cast to UINT32_t before 1 is added. + return seed[0] % ((RAND_R_MAX) + 1) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..04fc2fd38886317ad58921b28a8359824613acec Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_show_versions.py b/venv/lib/python3.10/site-packages/sklearn/utils/_show_versions.py new file mode 100644 index 0000000000000000000000000000000000000000..89052e88b65fe8dd27a5c63181693581827a8110 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_show_versions.py @@ -0,0 +1,112 @@ +""" +Utility methods to print system info for debugging + +adapted from :func:`pandas.show_versions` +""" +# License: BSD 3 clause + +import platform +import sys + +from .. import __version__ +from ..utils.fixes import threadpool_info +from ._openmp_helpers import _openmp_parallelism_enabled + + +def _get_sys_info(): + """System information + + Returns + ------- + sys_info : dict + system and Python version information + + """ + python = sys.version.replace("\n", " ") + + blob = [ + ("python", python), + ("executable", sys.executable), + ("machine", platform.platform()), + ] + + return dict(blob) + + +def _get_deps_info(): + """Overview of the installed version of main dependencies + + This function does not import the modules to collect the version numbers + but instead relies on standard Python package metadata. + + Returns + ------- + deps_info: dict + version information on relevant Python libraries + + """ + deps = [ + "pip", + "setuptools", + "numpy", + "scipy", + "Cython", + "pandas", + "matplotlib", + "joblib", + "threadpoolctl", + ] + + deps_info = { + "sklearn": __version__, + } + + from importlib.metadata import PackageNotFoundError, version + + for modname in deps: + try: + deps_info[modname] = version(modname) + except PackageNotFoundError: + deps_info[modname] = None + return deps_info + + +def show_versions(): + """Print useful debugging information" + + .. versionadded:: 0.20 + + Examples + -------- + >>> from sklearn import show_versions + >>> show_versions() # doctest: +SKIP + """ + + sys_info = _get_sys_info() + deps_info = _get_deps_info() + + print("\nSystem:") + for k, stat in sys_info.items(): + print("{k:>10}: {stat}".format(k=k, stat=stat)) + + print("\nPython dependencies:") + for k, stat in deps_info.items(): + print("{k:>13}: {stat}".format(k=k, stat=stat)) + + print( + "\n{k}: {stat}".format( + k="Built with OpenMP", stat=_openmp_parallelism_enabled() + ) + ) + + # show threadpoolctl results + threadpool_results = threadpool_info() + if threadpool_results: + print() + print("threadpoolctl info:") + + for i, result in enumerate(threadpool_results): + for key, val in result.items(): + print(f"{key:>15}: {val}") + if i != len(threadpool_results) - 1: + print() diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_sorting.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/utils/_sorting.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..76592d0a20e9d4d8782e98c7b0ea7301429df1a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/_sorting.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_typedefs.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/utils/_typedefs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cd65d20e367fd5e1dc0b2d0aba1cb07cc0d34383 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/_typedefs.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.pxd b/venv/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.pxd new file mode 100644 index 0000000000000000000000000000000000000000..64de6c18830b5e24c77bfed38cfffccc3b62955a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.pxd @@ -0,0 +1,12 @@ +cimport numpy as cnp + +from libcpp.vector cimport vector +from ..utils._typedefs cimport intp_t, float64_t, int32_t, int64_t + +ctypedef fused vector_typed: + vector[float64_t] + vector[intp_t] + vector[int32_t] + vector[int64_t] + +cdef cnp.ndarray vector_to_nd_array(vector_typed * vect_ptr) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/arrayfuncs.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/utils/arrayfuncs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..69d04db31d222b55a767ae976e7302a2b773274b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/arrayfuncs.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/discovery.py b/venv/lib/python3.10/site-packages/sklearn/utils/discovery.py new file mode 100644 index 0000000000000000000000000000000000000000..c1fdca3beafb27ce74432ab1170ae718d94eb8ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/discovery.py @@ -0,0 +1,265 @@ +""" +The :mod:`sklearn.utils.discovery` module includes utilities to discover +objects (i.e. estimators, displays, functions) from the `sklearn` package. +""" + +import inspect +import pkgutil +from importlib import import_module +from operator import itemgetter +from pathlib import Path + +_MODULE_TO_IGNORE = { + "tests", + "externals", + "setup", + "conftest", + "experimental", + "estimator_checks", +} + + +def all_estimators(type_filter=None): + """Get a list of all estimators from `sklearn`. + + This function crawls the module and gets all classes that inherit + from BaseEstimator. Classes that are defined in test-modules are not + included. + + Parameters + ---------- + type_filter : {"classifier", "regressor", "cluster", "transformer"} \ + or list of such str, default=None + Which kind of estimators should be returned. If None, no filter is + applied and all estimators are returned. Possible values are + 'classifier', 'regressor', 'cluster' and 'transformer' to get + estimators only of these specific types, or a list of these to + get the estimators that fit at least one of the types. + + Returns + ------- + estimators : list of tuples + List of (name, class), where ``name`` is the class name as string + and ``class`` is the actual type of the class. + + Examples + -------- + >>> from sklearn.utils.discovery import all_estimators + >>> estimators = all_estimators() + >>> type(estimators) + + >>> type(estimators[0]) + + >>> estimators[:2] + [('ARDRegression', ), + ('AdaBoostClassifier', + )] + >>> classifiers = all_estimators(type_filter="classifier") + >>> classifiers[:2] + [('AdaBoostClassifier', + ), + ('BaggingClassifier', )] + >>> regressors = all_estimators(type_filter="regressor") + >>> regressors[:2] + [('ARDRegression', ), + ('AdaBoostRegressor', + )] + >>> both = all_estimators(type_filter=["classifier", "regressor"]) + >>> both[:2] + [('ARDRegression', ), + ('AdaBoostClassifier', + )] + """ + # lazy import to avoid circular imports from sklearn.base + from ..base import ( + BaseEstimator, + ClassifierMixin, + ClusterMixin, + RegressorMixin, + TransformerMixin, + ) + from . import IS_PYPY + from ._testing import ignore_warnings + + def is_abstract(c): + if not (hasattr(c, "__abstractmethods__")): + return False + if not len(c.__abstractmethods__): + return False + return True + + all_classes = [] + root = str(Path(__file__).parent.parent) # sklearn package + # Ignore deprecation warnings triggered at import time and from walking + # packages + with ignore_warnings(category=FutureWarning): + for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."): + module_parts = module_name.split(".") + if ( + any(part in _MODULE_TO_IGNORE for part in module_parts) + or "._" in module_name + ): + continue + module = import_module(module_name) + classes = inspect.getmembers(module, inspect.isclass) + classes = [ + (name, est_cls) for name, est_cls in classes if not name.startswith("_") + ] + + # TODO: Remove when FeatureHasher is implemented in PYPY + # Skips FeatureHasher for PYPY + if IS_PYPY and "feature_extraction" in module_name: + classes = [ + (name, est_cls) + for name, est_cls in classes + if name == "FeatureHasher" + ] + + all_classes.extend(classes) + + all_classes = set(all_classes) + + estimators = [ + c + for c in all_classes + if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator") + ] + # get rid of abstract base classes + estimators = [c for c in estimators if not is_abstract(c[1])] + + if type_filter is not None: + if not isinstance(type_filter, list): + type_filter = [type_filter] + else: + type_filter = list(type_filter) # copy + filtered_estimators = [] + filters = { + "classifier": ClassifierMixin, + "regressor": RegressorMixin, + "transformer": TransformerMixin, + "cluster": ClusterMixin, + } + for name, mixin in filters.items(): + if name in type_filter: + type_filter.remove(name) + filtered_estimators.extend( + [est for est in estimators if issubclass(est[1], mixin)] + ) + estimators = filtered_estimators + if type_filter: + raise ValueError( + "Parameter type_filter must be 'classifier', " + "'regressor', 'transformer', 'cluster' or " + "None, got" + f" {repr(type_filter)}." + ) + + # drop duplicates, sort for reproducibility + # itemgetter is used to ensure the sort does not extend to the 2nd item of + # the tuple + return sorted(set(estimators), key=itemgetter(0)) + + +def all_displays(): + """Get a list of all displays from `sklearn`. + + Returns + ------- + displays : list of tuples + List of (name, class), where ``name`` is the display class name as + string and ``class`` is the actual type of the class. + + Examples + -------- + >>> from sklearn.utils.discovery import all_displays + >>> displays = all_displays() + >>> displays[0] + ('CalibrationDisplay', ) + """ + # lazy import to avoid circular imports from sklearn.base + from ._testing import ignore_warnings + + all_classes = [] + root = str(Path(__file__).parent.parent) # sklearn package + # Ignore deprecation warnings triggered at import time and from walking + # packages + with ignore_warnings(category=FutureWarning): + for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."): + module_parts = module_name.split(".") + if ( + any(part in _MODULE_TO_IGNORE for part in module_parts) + or "._" in module_name + ): + continue + module = import_module(module_name) + classes = inspect.getmembers(module, inspect.isclass) + classes = [ + (name, display_class) + for name, display_class in classes + if not name.startswith("_") and name.endswith("Display") + ] + all_classes.extend(classes) + + return sorted(set(all_classes), key=itemgetter(0)) + + +def _is_checked_function(item): + if not inspect.isfunction(item): + return False + + if item.__name__.startswith("_"): + return False + + mod = item.__module__ + if not mod.startswith("sklearn.") or mod.endswith("estimator_checks"): + return False + + return True + + +def all_functions(): + """Get a list of all functions from `sklearn`. + + Returns + ------- + functions : list of tuples + List of (name, function), where ``name`` is the function name as + string and ``function`` is the actual function. + + Examples + -------- + >>> from sklearn.utils.discovery import all_functions + >>> functions = all_functions() + >>> name, function = functions[0] + >>> name + 'accuracy_score' + """ + # lazy import to avoid circular imports from sklearn.base + from ._testing import ignore_warnings + + all_functions = [] + root = str(Path(__file__).parent.parent) # sklearn package + # Ignore deprecation warnings triggered at import time and from walking + # packages + with ignore_warnings(category=FutureWarning): + for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."): + module_parts = module_name.split(".") + if ( + any(part in _MODULE_TO_IGNORE for part in module_parts) + or "._" in module_name + ): + continue + + module = import_module(module_name) + functions = inspect.getmembers(module, _is_checked_function) + functions = [ + (func.__name__, func) + for name, func in functions + if not name.startswith("_") + ] + all_functions.extend(functions) + + # drop duplicates, sort for reproducibility + # itemgetter is used to ensure the sort does not extend to the 2nd item of + # the tuple + return sorted(set(all_functions), key=itemgetter(0)) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/estimator_checks.py b/venv/lib/python3.10/site-packages/sklearn/utils/estimator_checks.py new file mode 100644 index 0000000000000000000000000000000000000000..535862fcd8f1cdad9a0bfb36468eb4f0e244d647 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/estimator_checks.py @@ -0,0 +1,4728 @@ +""" +The :mod:`sklearn.utils.estimator_checks` module includes various utilities to +check the compatibility of estimators with the scikit-learn API. +""" + +import pickle +import re +import warnings +from contextlib import nullcontext +from copy import deepcopy +from functools import partial, wraps +from inspect import signature +from numbers import Integral, Real + +import joblib +import numpy as np +from scipy import sparse +from scipy.stats import rankdata + +from .. import config_context +from ..base import ( + ClusterMixin, + RegressorMixin, + clone, + is_classifier, + is_outlier_detector, + is_regressor, +) +from ..datasets import ( + load_iris, + make_blobs, + make_classification, + make_multilabel_classification, + make_regression, +) +from ..exceptions import DataConversionWarning, NotFittedError, SkipTestWarning +from ..feature_selection import SelectFromModel, SelectKBest +from ..linear_model import ( + LinearRegression, + LogisticRegression, + RANSACRegressor, + Ridge, + SGDRegressor, +) +from ..metrics import accuracy_score, adjusted_rand_score, f1_score +from ..metrics.pairwise import linear_kernel, pairwise_distances, rbf_kernel +from ..model_selection import ShuffleSplit, train_test_split +from ..model_selection._validation import _safe_split +from ..pipeline import make_pipeline +from ..preprocessing import StandardScaler, scale +from ..random_projection import BaseRandomProjection +from ..tree import DecisionTreeClassifier, DecisionTreeRegressor +from ..utils._array_api import ( + _convert_to_numpy, + get_namespace, + yield_namespace_device_dtype_combinations, +) +from ..utils._array_api import ( + device as array_device, +) +from ..utils._param_validation import ( + InvalidParameterError, + generate_invalid_param_val, + make_constraint, +) +from ..utils.fixes import parse_version, sp_version +from ..utils.validation import check_is_fitted +from . import IS_PYPY, is_scalar_nan, shuffle +from ._param_validation import Interval +from ._tags import ( + _DEFAULT_TAGS, + _safe_tags, +) +from ._testing import ( + SkipTest, + _array_api_for_tests, + _get_args, + assert_allclose, + assert_allclose_dense_sparse, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + assert_raise_message, + create_memmap_backed_data, + ignore_warnings, + raises, + set_random_state, +) +from .validation import _num_samples, has_fit_parameter + +REGRESSION_DATASET = None +CROSS_DECOMPOSITION = ["PLSCanonical", "PLSRegression", "CCA", "PLSSVD"] + + +def _yield_checks(estimator): + name = estimator.__class__.__name__ + tags = _safe_tags(estimator) + + yield check_no_attributes_set_in_init + yield check_estimators_dtypes + yield check_fit_score_takes_y + if has_fit_parameter(estimator, "sample_weight"): + yield check_sample_weights_pandas_series + yield check_sample_weights_not_an_array + yield check_sample_weights_list + if not tags["pairwise"]: + # We skip pairwise because the data is not pairwise + yield check_sample_weights_shape + yield check_sample_weights_not_overwritten + yield partial(check_sample_weights_invariance, kind="ones") + yield partial(check_sample_weights_invariance, kind="zeros") + yield check_estimators_fit_returns_self + yield partial(check_estimators_fit_returns_self, readonly_memmap=True) + + # Check that all estimator yield informative messages when + # trained on empty datasets + if not tags["no_validation"]: + yield check_complex_data + yield check_dtype_object + yield check_estimators_empty_data_messages + + if name not in CROSS_DECOMPOSITION: + # cross-decomposition's "transform" returns X and Y + yield check_pipeline_consistency + + if not tags["allow_nan"] and not tags["no_validation"]: + # Test that all estimators check their input for NaN's and infs + yield check_estimators_nan_inf + + if tags["pairwise"]: + # Check that pairwise estimator throws error on non-square input + yield check_nonsquare_error + + yield check_estimators_overwrite_params + if hasattr(estimator, "sparsify"): + yield check_sparsify_coefficients + + yield check_estimator_sparse_data + + # Test that estimators can be pickled, and once pickled + # give the same answer as before. + yield check_estimators_pickle + yield partial(check_estimators_pickle, readonly_memmap=True) + + yield check_estimator_get_tags_default_keys + + if tags["array_api_support"]: + for check in _yield_array_api_checks(estimator): + yield check + + +def _yield_classifier_checks(classifier): + tags = _safe_tags(classifier) + + # test classifiers can handle non-array data and pandas objects + yield check_classifier_data_not_an_array + # test classifiers trained on a single label always return this label + yield check_classifiers_one_label + yield check_classifiers_one_label_sample_weights + yield check_classifiers_classes + yield check_estimators_partial_fit_n_features + if tags["multioutput"]: + yield check_classifier_multioutput + # basic consistency testing + yield check_classifiers_train + yield partial(check_classifiers_train, readonly_memmap=True) + yield partial(check_classifiers_train, readonly_memmap=True, X_dtype="float32") + yield check_classifiers_regression_target + if tags["multilabel"]: + yield check_classifiers_multilabel_representation_invariance + yield check_classifiers_multilabel_output_format_predict + yield check_classifiers_multilabel_output_format_predict_proba + yield check_classifiers_multilabel_output_format_decision_function + if not tags["no_validation"]: + yield check_supervised_y_no_nan + if not tags["multioutput_only"]: + yield check_supervised_y_2d + if tags["requires_fit"]: + yield check_estimators_unfitted + if "class_weight" in classifier.get_params().keys(): + yield check_class_weight_classifiers + + yield check_non_transformer_estimators_n_iter + # test if predict_proba is a monotonic transformation of decision_function + yield check_decision_proba_consistency + + +@ignore_warnings(category=FutureWarning) +def check_supervised_y_no_nan(name, estimator_orig): + # Checks that the Estimator targets are not NaN. + estimator = clone(estimator_orig) + rng = np.random.RandomState(888) + X = rng.standard_normal(size=(10, 5)) + + for value in [np.nan, np.inf]: + y = np.full(10, value) + y = _enforce_estimator_tags_y(estimator, y) + + module_name = estimator.__module__ + if module_name.startswith("sklearn.") and not ( + "test_" in module_name or module_name.endswith("_testing") + ): + # In scikit-learn we want the error message to mention the input + # name and be specific about the kind of unexpected value. + if np.isinf(value): + match = ( + r"Input (y|Y) contains infinity or a value too large for" + r" dtype\('float64'\)." + ) + else: + match = r"Input (y|Y) contains NaN." + else: + # Do not impose a particular error message to third-party libraries. + match = None + err_msg = ( + f"Estimator {name} should have raised error on fitting array y with inf" + " value." + ) + with raises(ValueError, match=match, err_msg=err_msg): + estimator.fit(X, y) + + +def _yield_regressor_checks(regressor): + tags = _safe_tags(regressor) + # TODO: test with intercept + # TODO: test with multiple responses + # basic testing + yield check_regressors_train + yield partial(check_regressors_train, readonly_memmap=True) + yield partial(check_regressors_train, readonly_memmap=True, X_dtype="float32") + yield check_regressor_data_not_an_array + yield check_estimators_partial_fit_n_features + if tags["multioutput"]: + yield check_regressor_multioutput + yield check_regressors_no_decision_function + if not tags["no_validation"] and not tags["multioutput_only"]: + yield check_supervised_y_2d + yield check_supervised_y_no_nan + name = regressor.__class__.__name__ + if name != "CCA": + # check that the regressor handles int input + yield check_regressors_int + if tags["requires_fit"]: + yield check_estimators_unfitted + yield check_non_transformer_estimators_n_iter + + +def _yield_transformer_checks(transformer): + tags = _safe_tags(transformer) + # All transformers should either deal with sparse data or raise an + # exception with type TypeError and an intelligible error message + if not tags["no_validation"]: + yield check_transformer_data_not_an_array + # these don't actually fit the data, so don't raise errors + yield check_transformer_general + if tags["preserves_dtype"]: + yield check_transformer_preserve_dtypes + yield partial(check_transformer_general, readonly_memmap=True) + if not _safe_tags(transformer, key="stateless"): + yield check_transformers_unfitted + else: + yield check_transformers_unfitted_stateless + # Dependent on external solvers and hence accessing the iter + # param is non-trivial. + external_solver = [ + "Isomap", + "KernelPCA", + "LocallyLinearEmbedding", + "RandomizedLasso", + "LogisticRegressionCV", + "BisectingKMeans", + ] + + name = transformer.__class__.__name__ + if name not in external_solver: + yield check_transformer_n_iter + + +def _yield_clustering_checks(clusterer): + yield check_clusterer_compute_labels_predict + name = clusterer.__class__.__name__ + if name not in ("WardAgglomeration", "FeatureAgglomeration"): + # this is clustering on the features + # let's not test that here. + yield check_clustering + yield partial(check_clustering, readonly_memmap=True) + yield check_estimators_partial_fit_n_features + if not hasattr(clusterer, "transform"): + yield check_non_transformer_estimators_n_iter + + +def _yield_outliers_checks(estimator): + # checks for the contamination parameter + if hasattr(estimator, "contamination"): + yield check_outlier_contamination + + # checks for outlier detectors that have a fit_predict method + if hasattr(estimator, "fit_predict"): + yield check_outliers_fit_predict + + # checks for estimators that can be used on a test set + if hasattr(estimator, "predict"): + yield check_outliers_train + yield partial(check_outliers_train, readonly_memmap=True) + # test outlier detectors can handle non-array data + yield check_classifier_data_not_an_array + # test if NotFittedError is raised + if _safe_tags(estimator, key="requires_fit"): + yield check_estimators_unfitted + yield check_non_transformer_estimators_n_iter + + +def _yield_array_api_checks(estimator): + for ( + array_namespace, + device, + dtype_name, + ) in yield_namespace_device_dtype_combinations(): + yield partial( + check_array_api_input, + array_namespace=array_namespace, + dtype_name=dtype_name, + device=device, + ) + + +def _yield_all_checks(estimator): + name = estimator.__class__.__name__ + tags = _safe_tags(estimator) + if "2darray" not in tags["X_types"]: + warnings.warn( + "Can't test estimator {} which requires input of type {}".format( + name, tags["X_types"] + ), + SkipTestWarning, + ) + return + if tags["_skip_test"]: + warnings.warn( + "Explicit SKIP via _skip_test tag for estimator {}.".format(name), + SkipTestWarning, + ) + return + + for check in _yield_checks(estimator): + yield check + if is_classifier(estimator): + for check in _yield_classifier_checks(estimator): + yield check + if is_regressor(estimator): + for check in _yield_regressor_checks(estimator): + yield check + if hasattr(estimator, "transform"): + for check in _yield_transformer_checks(estimator): + yield check + if isinstance(estimator, ClusterMixin): + for check in _yield_clustering_checks(estimator): + yield check + if is_outlier_detector(estimator): + for check in _yield_outliers_checks(estimator): + yield check + yield check_parameters_default_constructible + if not tags["non_deterministic"]: + yield check_methods_sample_order_invariance + yield check_methods_subset_invariance + yield check_fit2d_1sample + yield check_fit2d_1feature + yield check_get_params_invariance + yield check_set_params + yield check_dict_unchanged + yield check_dont_overwrite_parameters + yield check_fit_idempotent + yield check_fit_check_is_fitted + if not tags["no_validation"]: + yield check_n_features_in + yield check_fit1d + yield check_fit2d_predict1d + if tags["requires_y"]: + yield check_requires_y_none + if tags["requires_positive_X"]: + yield check_fit_non_negative + + +def _get_check_estimator_ids(obj): + """Create pytest ids for checks. + + When `obj` is an estimator, this returns the pprint version of the + estimator (with `print_changed_only=True`). When `obj` is a function, the + name of the function is returned with its keyword arguments. + + `_get_check_estimator_ids` is designed to be used as the `id` in + `pytest.mark.parametrize` where `check_estimator(..., generate_only=True)` + is yielding estimators and checks. + + Parameters + ---------- + obj : estimator or function + Items generated by `check_estimator`. + + Returns + ------- + id : str or None + + See Also + -------- + check_estimator + """ + if callable(obj): + if not isinstance(obj, partial): + return obj.__name__ + + if not obj.keywords: + return obj.func.__name__ + + kwstring = ",".join(["{}={}".format(k, v) for k, v in obj.keywords.items()]) + return "{}({})".format(obj.func.__name__, kwstring) + if hasattr(obj, "get_params"): + with config_context(print_changed_only=True): + return re.sub(r"\s", "", str(obj)) + + +def _construct_instance(Estimator): + """Construct Estimator instance if possible.""" + required_parameters = getattr(Estimator, "_required_parameters", []) + if len(required_parameters): + if required_parameters in (["estimator"], ["base_estimator"]): + # `RANSACRegressor` will raise an error with any model other + # than `LinearRegression` if we don't fix `min_samples` parameter. + # For common test, we can enforce using `LinearRegression` that + # is the default estimator in `RANSACRegressor` instead of `Ridge`. + if issubclass(Estimator, RANSACRegressor): + estimator = Estimator(LinearRegression()) + elif issubclass(Estimator, RegressorMixin): + estimator = Estimator(Ridge()) + elif issubclass(Estimator, SelectFromModel): + # Increases coverage because SGDRegressor has partial_fit + estimator = Estimator(SGDRegressor(random_state=0)) + else: + estimator = Estimator(LogisticRegression(C=1)) + elif required_parameters in (["estimators"],): + # Heterogeneous ensemble classes (i.e. stacking, voting) + if issubclass(Estimator, RegressorMixin): + estimator = Estimator( + estimators=[ + ("est1", DecisionTreeRegressor(max_depth=3, random_state=0)), + ("est2", DecisionTreeRegressor(max_depth=3, random_state=1)), + ] + ) + else: + estimator = Estimator( + estimators=[ + ("est1", DecisionTreeClassifier(max_depth=3, random_state=0)), + ("est2", DecisionTreeClassifier(max_depth=3, random_state=1)), + ] + ) + else: + msg = ( + f"Can't instantiate estimator {Estimator.__name__} " + f"parameters {required_parameters}" + ) + # raise additional warning to be shown by pytest + warnings.warn(msg, SkipTestWarning) + raise SkipTest(msg) + else: + estimator = Estimator() + return estimator + + +def _maybe_mark_xfail(estimator, check, pytest): + # Mark (estimator, check) pairs as XFAIL if needed (see conditions in + # _should_be_skipped_or_marked()) + # This is similar to _maybe_skip(), but this one is used by + # @parametrize_with_checks() instead of check_estimator() + + should_be_marked, reason = _should_be_skipped_or_marked(estimator, check) + if not should_be_marked: + return estimator, check + else: + return pytest.param(estimator, check, marks=pytest.mark.xfail(reason=reason)) + + +def _maybe_skip(estimator, check): + # Wrap a check so that it's skipped if needed (see conditions in + # _should_be_skipped_or_marked()) + # This is similar to _maybe_mark_xfail(), but this one is used by + # check_estimator() instead of @parametrize_with_checks which requires + # pytest + should_be_skipped, reason = _should_be_skipped_or_marked(estimator, check) + if not should_be_skipped: + return check + + check_name = check.func.__name__ if isinstance(check, partial) else check.__name__ + + @wraps(check) + def wrapped(*args, **kwargs): + raise SkipTest( + f"Skipping {check_name} for {estimator.__class__.__name__}: {reason}" + ) + + return wrapped + + +def _should_be_skipped_or_marked(estimator, check): + # Return whether a check should be skipped (when using check_estimator()) + # or marked as XFAIL (when using @parametrize_with_checks()), along with a + # reason. + # Currently, a check should be skipped or marked if + # the check is in the _xfail_checks tag of the estimator + + check_name = check.func.__name__ if isinstance(check, partial) else check.__name__ + + xfail_checks = _safe_tags(estimator, key="_xfail_checks") or {} + if check_name in xfail_checks: + return True, xfail_checks[check_name] + + return False, "placeholder reason that will never be used" + + +def parametrize_with_checks(estimators): + """Pytest specific decorator for parametrizing estimator checks. + + The `id` of each check is set to be a pprint version of the estimator + and the name of the check with its keyword arguments. + This allows to use `pytest -k` to specify which tests to run:: + + pytest test_check_estimators.py -k check_estimators_fit_returns_self + + Parameters + ---------- + estimators : list of estimators instances + Estimators to generated checks for. + + .. versionchanged:: 0.24 + Passing a class was deprecated in version 0.23, and support for + classes was removed in 0.24. Pass an instance instead. + + .. versionadded:: 0.24 + + Returns + ------- + decorator : `pytest.mark.parametrize` + + See Also + -------- + check_estimator : Check if estimator adheres to scikit-learn conventions. + + Examples + -------- + >>> from sklearn.utils.estimator_checks import parametrize_with_checks + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.tree import DecisionTreeRegressor + + >>> @parametrize_with_checks([LogisticRegression(), + ... DecisionTreeRegressor()]) + ... def test_sklearn_compatible_estimator(estimator, check): + ... check(estimator) + + """ + import pytest + + if any(isinstance(est, type) for est in estimators): + msg = ( + "Passing a class was deprecated in version 0.23 " + "and isn't supported anymore from 0.24." + "Please pass an instance instead." + ) + raise TypeError(msg) + + def checks_generator(): + for estimator in estimators: + name = type(estimator).__name__ + for check in _yield_all_checks(estimator): + check = partial(check, name) + yield _maybe_mark_xfail(estimator, check, pytest) + + return pytest.mark.parametrize( + "estimator, check", checks_generator(), ids=_get_check_estimator_ids + ) + + +def check_estimator(estimator=None, generate_only=False): + """Check if estimator adheres to scikit-learn conventions. + + This function will run an extensive test-suite for input validation, + shapes, etc, making sure that the estimator complies with `scikit-learn` + conventions as detailed in :ref:`rolling_your_own_estimator`. + Additional tests for classifiers, regressors, clustering or transformers + will be run if the Estimator class inherits from the corresponding mixin + from sklearn.base. + + Setting `generate_only=True` returns a generator that yields (estimator, + check) tuples where the check can be called independently from each + other, i.e. `check(estimator)`. This allows all checks to be run + independently and report the checks that are failing. + + scikit-learn provides a pytest specific decorator, + :func:`~sklearn.utils.estimator_checks.parametrize_with_checks`, making it + easier to test multiple estimators. + + Parameters + ---------- + estimator : estimator object + Estimator instance to check. + + .. versionadded:: 1.1 + Passing a class was deprecated in version 0.23, and support for + classes was removed in 0.24. + + generate_only : bool, default=False + When `False`, checks are evaluated when `check_estimator` is called. + When `True`, `check_estimator` returns a generator that yields + (estimator, check) tuples. The check is run by calling + `check(estimator)`. + + .. versionadded:: 0.22 + + Returns + ------- + checks_generator : generator + Generator that yields (estimator, check) tuples. Returned when + `generate_only=True`. + + See Also + -------- + parametrize_with_checks : Pytest specific decorator for parametrizing estimator + checks. + + Examples + -------- + >>> from sklearn.utils.estimator_checks import check_estimator + >>> from sklearn.linear_model import LogisticRegression + >>> check_estimator(LogisticRegression(), generate_only=True) + + """ + if isinstance(estimator, type): + msg = ( + "Passing a class was deprecated in version 0.23 " + "and isn't supported anymore from 0.24." + "Please pass an instance instead." + ) + raise TypeError(msg) + + name = type(estimator).__name__ + + def checks_generator(): + for check in _yield_all_checks(estimator): + check = _maybe_skip(estimator, check) + yield estimator, partial(check, name) + + if generate_only: + return checks_generator() + + for estimator, check in checks_generator(): + try: + check(estimator) + except SkipTest as exception: + # SkipTest is thrown when pandas can't be imported, or by checks + # that are in the xfail_checks tag + warnings.warn(str(exception), SkipTestWarning) + + +def _regression_dataset(): + global REGRESSION_DATASET + if REGRESSION_DATASET is None: + X, y = make_regression( + n_samples=200, + n_features=10, + n_informative=1, + bias=5.0, + noise=20, + random_state=42, + ) + X = StandardScaler().fit_transform(X) + REGRESSION_DATASET = X, y + return REGRESSION_DATASET + + +def _set_checking_parameters(estimator): + # set parameters to speed up some estimators and + # avoid deprecated behaviour + params = estimator.get_params() + name = estimator.__class__.__name__ + if name == "TSNE": + estimator.set_params(perplexity=2) + if "n_iter" in params and name != "TSNE": + estimator.set_params(n_iter=5) + if "max_iter" in params: + if estimator.max_iter is not None: + estimator.set_params(max_iter=min(5, estimator.max_iter)) + # LinearSVR, LinearSVC + if name in ["LinearSVR", "LinearSVC"]: + estimator.set_params(max_iter=20) + # NMF + if name == "NMF": + estimator.set_params(max_iter=500) + # DictionaryLearning + if name == "DictionaryLearning": + estimator.set_params(max_iter=20, transform_algorithm="lasso_lars") + # MiniBatchNMF + if estimator.__class__.__name__ == "MiniBatchNMF": + estimator.set_params(max_iter=20, fresh_restarts=True) + # MLP + if name in ["MLPClassifier", "MLPRegressor"]: + estimator.set_params(max_iter=100) + # MiniBatchDictionaryLearning + if name == "MiniBatchDictionaryLearning": + estimator.set_params(max_iter=5) + + if "n_resampling" in params: + # randomized lasso + estimator.set_params(n_resampling=5) + if "n_estimators" in params: + estimator.set_params(n_estimators=min(5, estimator.n_estimators)) + if "max_trials" in params: + # RANSAC + estimator.set_params(max_trials=10) + if "n_init" in params: + # K-Means + estimator.set_params(n_init=2) + if "batch_size" in params and not name.startswith("MLP"): + estimator.set_params(batch_size=10) + + if name == "MeanShift": + # In the case of check_fit2d_1sample, bandwidth is set to None and + # is thus estimated. De facto it is 0.0 as a single sample is provided + # and this makes the test fails. Hence we give it a placeholder value. + estimator.set_params(bandwidth=1.0) + + if name == "TruncatedSVD": + # TruncatedSVD doesn't run with n_components = n_features + # This is ugly :-/ + estimator.n_components = 1 + + if name == "LassoLarsIC": + # Noise variance estimation does not work when `n_samples < n_features`. + # We need to provide the noise variance explicitly. + estimator.set_params(noise_variance=1.0) + + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = min(estimator.n_clusters, 2) + + if hasattr(estimator, "n_best"): + estimator.n_best = 1 + + if name == "SelectFdr": + # be tolerant of noisy datasets (not actually speed) + estimator.set_params(alpha=0.5) + + if name == "TheilSenRegressor": + estimator.max_subpopulation = 100 + + if isinstance(estimator, BaseRandomProjection): + # Due to the jl lemma and often very few samples, the number + # of components of the random matrix projection will be probably + # greater than the number of features. + # So we impose a smaller number (avoid "auto" mode) + estimator.set_params(n_components=2) + + if isinstance(estimator, SelectKBest): + # SelectKBest has a default of k=10 + # which is more feature than we have in most case. + estimator.set_params(k=1) + + if name in ("HistGradientBoostingClassifier", "HistGradientBoostingRegressor"): + # The default min_samples_leaf (20) isn't appropriate for small + # datasets (only very shallow trees are built) that the checks use. + estimator.set_params(min_samples_leaf=5) + + if name == "DummyClassifier": + # the default strategy prior would output constant predictions and fail + # for check_classifiers_predictions + estimator.set_params(strategy="stratified") + + # Speed-up by reducing the number of CV or splits for CV estimators + loo_cv = ["RidgeCV", "RidgeClassifierCV"] + if name not in loo_cv and hasattr(estimator, "cv"): + estimator.set_params(cv=3) + if hasattr(estimator, "n_splits"): + estimator.set_params(n_splits=3) + + if name == "OneHotEncoder": + estimator.set_params(handle_unknown="ignore") + + if name == "QuantileRegressor": + # Avoid warning due to Scipy deprecating interior-point solver + solver = "highs" if sp_version >= parse_version("1.6.0") else "interior-point" + estimator.set_params(solver=solver) + + if name in CROSS_DECOMPOSITION: + estimator.set_params(n_components=1) + + # Default "auto" parameter can lead to different ordering of eigenvalues on + # windows: #24105 + if name == "SpectralEmbedding": + estimator.set_params(eigen_tol=1e-5) + + if name == "HDBSCAN": + estimator.set_params(min_samples=1) + + +class _NotAnArray: + """An object that is convertible to an array. + + Parameters + ---------- + data : array-like + The data. + """ + + def __init__(self, data): + self.data = np.asarray(data) + + def __array__(self, dtype=None): + return self.data + + def __array_function__(self, func, types, args, kwargs): + if func.__name__ == "may_share_memory": + return True + raise TypeError("Don't want to call array_function {}!".format(func.__name__)) + + +def _is_pairwise_metric(estimator): + """Returns True if estimator accepts pairwise metric. + + Parameters + ---------- + estimator : object + Estimator object to test. + + Returns + ------- + out : bool + True if _pairwise is set to True and False otherwise. + """ + metric = getattr(estimator, "metric", None) + + return bool(metric == "precomputed") + + +def _generate_sparse_matrix(X_csr): + """Generate sparse matrices with {32,64}bit indices of diverse format. + + Parameters + ---------- + X_csr: CSR Matrix + Input matrix in CSR format. + + Returns + ------- + out: iter(Matrices) + In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo', + 'coo_64', 'csc_64', 'csr_64'] + """ + + assert X_csr.format == "csr" + yield "csr", X_csr.copy() + for sparse_format in ["dok", "lil", "dia", "bsr", "csc", "coo"]: + yield sparse_format, X_csr.asformat(sparse_format) + + # Generate large indices matrix only if its supported by scipy + X_coo = X_csr.asformat("coo") + X_coo.row = X_coo.row.astype("int64") + X_coo.col = X_coo.col.astype("int64") + yield "coo_64", X_coo + + for sparse_format in ["csc", "csr"]: + X = X_csr.asformat(sparse_format) + X.indices = X.indices.astype("int64") + X.indptr = X.indptr.astype("int64") + yield sparse_format + "_64", X + + +def check_array_api_input( + name, + estimator_orig, + array_namespace, + device=None, + dtype_name="float64", + check_values=False, +): + """Check that the estimator can work consistently with the Array API + + By default, this just checks that the types and shapes of the arrays are + consistent with calling the same estimator with numpy arrays. + + When check_values is True, it also checks that calling the estimator on the + array_api Array gives the same results as ndarrays. + """ + xp = _array_api_for_tests(array_namespace, device) + + X, y = make_classification(random_state=42) + X = X.astype(dtype_name, copy=False) + + X = _enforce_estimator_tags_X(estimator_orig, X) + y = _enforce_estimator_tags_y(estimator_orig, y) + + est = clone(estimator_orig) + + X_xp = xp.asarray(X, device=device) + y_xp = xp.asarray(y, device=device) + + est.fit(X, y) + + array_attributes = { + key: value for key, value in vars(est).items() if isinstance(value, np.ndarray) + } + + est_xp = clone(est) + with config_context(array_api_dispatch=True): + est_xp.fit(X_xp, y_xp) + input_ns = get_namespace(X_xp)[0].__name__ + + # Fitted attributes which are arrays must have the same + # namespace as the one of the training data. + for key, attribute in array_attributes.items(): + est_xp_param = getattr(est_xp, key) + with config_context(array_api_dispatch=True): + attribute_ns = get_namespace(est_xp_param)[0].__name__ + assert attribute_ns == input_ns, ( + f"'{key}' attribute is in wrong namespace, expected {input_ns} " + f"got {attribute_ns}" + ) + + assert array_device(est_xp_param) == array_device(X_xp) + + est_xp_param_np = _convert_to_numpy(est_xp_param, xp=xp) + if check_values: + assert_allclose( + attribute, + est_xp_param_np, + err_msg=f"{key} not the same", + atol=np.finfo(X.dtype).eps * 100, + ) + else: + assert attribute.shape == est_xp_param_np.shape + assert attribute.dtype == est_xp_param_np.dtype + + # Check estimator methods, if supported, give the same results + methods = ( + "score", + "score_samples", + "decision_function", + "predict", + "predict_log_proba", + "predict_proba", + "transform", + ) + + for method_name in methods: + method = getattr(est, method_name, None) + if method is None: + continue + + if method_name == "score": + result = method(X, y) + with config_context(array_api_dispatch=True): + result_xp = getattr(est_xp, method_name)(X_xp, y_xp) + # score typically returns a Python float + assert isinstance(result, float) + assert isinstance(result_xp, float) + if check_values: + assert abs(result - result_xp) < np.finfo(X.dtype).eps * 100 + continue + else: + result = method(X) + with config_context(array_api_dispatch=True): + result_xp = getattr(est_xp, method_name)(X_xp) + + with config_context(array_api_dispatch=True): + result_ns = get_namespace(result_xp)[0].__name__ + assert result_ns == input_ns, ( + f"'{method}' output is in wrong namespace, expected {input_ns}, " + f"got {result_ns}." + ) + + assert array_device(result_xp) == array_device(X_xp) + result_xp_np = _convert_to_numpy(result_xp, xp=xp) + + if check_values: + assert_allclose( + result, + result_xp_np, + err_msg=f"{method} did not the return the same result", + atol=np.finfo(X.dtype).eps * 100, + ) + else: + if hasattr(result, "shape"): + assert result.shape == result_xp_np.shape + assert result.dtype == result_xp_np.dtype + + if method_name == "transform" and hasattr(est, "inverse_transform"): + inverse_result = est.inverse_transform(result) + with config_context(array_api_dispatch=True): + invese_result_xp = est_xp.inverse_transform(result_xp) + inverse_result_ns = get_namespace(invese_result_xp)[0].__name__ + assert inverse_result_ns == input_ns, ( + "'inverse_transform' output is in wrong namespace, expected" + f" {input_ns}, got {inverse_result_ns}." + ) + + assert array_device(invese_result_xp) == array_device(X_xp) + + invese_result_xp_np = _convert_to_numpy(invese_result_xp, xp=xp) + if check_values: + assert_allclose( + inverse_result, + invese_result_xp_np, + err_msg="inverse_transform did not the return the same result", + atol=np.finfo(X.dtype).eps * 100, + ) + else: + assert inverse_result.shape == invese_result_xp_np.shape + assert inverse_result.dtype == invese_result_xp_np.dtype + + +def check_array_api_input_and_values( + name, + estimator_orig, + array_namespace, + device=None, + dtype_name="float64", +): + return check_array_api_input( + name, + estimator_orig, + array_namespace=array_namespace, + device=device, + dtype_name=dtype_name, + check_values=True, + ) + + +def check_estimator_sparse_data(name, estimator_orig): + rng = np.random.RandomState(0) + X = rng.uniform(size=(40, 3)) + X[X < 0.8] = 0 + X = _enforce_estimator_tags_X(estimator_orig, X) + X_csr = sparse.csr_matrix(X) + y = (4 * rng.uniform(size=40)).astype(int) + # catch deprecation warnings + with ignore_warnings(category=FutureWarning): + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + tags = _safe_tags(estimator_orig) + for matrix_format, X in _generate_sparse_matrix(X_csr): + # catch deprecation warnings + with ignore_warnings(category=FutureWarning): + estimator = clone(estimator_orig) + if name in ["Scaler", "StandardScaler"]: + estimator.set_params(with_mean=False) + # fit and predict + if "64" in matrix_format: + err_msg = ( + f"Estimator {name} doesn't seem to support {matrix_format} " + "matrix, and is not failing gracefully, e.g. by using " + "check_array(X, accept_large_sparse=False)" + ) + else: + err_msg = ( + f"Estimator {name} doesn't seem to fail gracefully on sparse " + "data: error message should state explicitly that sparse " + "input is not supported if this is not the case." + ) + with raises( + (TypeError, ValueError), + match=["sparse", "Sparse"], + may_pass=True, + err_msg=err_msg, + ): + with ignore_warnings(category=FutureWarning): + estimator.fit(X, y) + if hasattr(estimator, "predict"): + pred = estimator.predict(X) + if tags["multioutput_only"]: + assert pred.shape == (X.shape[0], 1) + else: + assert pred.shape == (X.shape[0],) + if hasattr(estimator, "predict_proba"): + probs = estimator.predict_proba(X) + if tags["binary_only"]: + expected_probs_shape = (X.shape[0], 2) + else: + expected_probs_shape = (X.shape[0], 4) + assert probs.shape == expected_probs_shape + + +@ignore_warnings(category=FutureWarning) +def check_sample_weights_pandas_series(name, estimator_orig): + # check that estimators will accept a 'sample_weight' parameter of + # type pandas.Series in the 'fit' function. + estimator = clone(estimator_orig) + try: + import pandas as pd + + X = np.array( + [ + [1, 1], + [1, 2], + [1, 3], + [1, 4], + [2, 1], + [2, 2], + [2, 3], + [2, 4], + [3, 1], + [3, 2], + [3, 3], + [3, 4], + ] + ) + X = pd.DataFrame(_enforce_estimator_tags_X(estimator_orig, X), copy=False) + y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2]) + weights = pd.Series([1] * 12) + if _safe_tags(estimator, key="multioutput_only"): + y = pd.DataFrame(y, copy=False) + try: + estimator.fit(X, y, sample_weight=weights) + except ValueError: + raise ValueError( + "Estimator {0} raises error if " + "'sample_weight' parameter is of " + "type pandas.Series".format(name) + ) + except ImportError: + raise SkipTest( + "pandas is not installed: not testing for " + "input of type pandas.Series to class weight." + ) + + +@ignore_warnings(category=(FutureWarning)) +def check_sample_weights_not_an_array(name, estimator_orig): + # check that estimators will accept a 'sample_weight' parameter of + # type _NotAnArray in the 'fit' function. + estimator = clone(estimator_orig) + X = np.array( + [ + [1, 1], + [1, 2], + [1, 3], + [1, 4], + [2, 1], + [2, 2], + [2, 3], + [2, 4], + [3, 1], + [3, 2], + [3, 3], + [3, 4], + ] + ) + X = _NotAnArray(_enforce_estimator_tags_X(estimator_orig, X)) + y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2]) + weights = _NotAnArray([1] * 12) + if _safe_tags(estimator, key="multioutput_only"): + y = _NotAnArray(y.data.reshape(-1, 1)) + estimator.fit(X, y, sample_weight=weights) + + +@ignore_warnings(category=(FutureWarning)) +def check_sample_weights_list(name, estimator_orig): + # check that estimators will accept a 'sample_weight' parameter of + # type list in the 'fit' function. + estimator = clone(estimator_orig) + rnd = np.random.RandomState(0) + n_samples = 30 + X = _enforce_estimator_tags_X(estimator_orig, rnd.uniform(size=(n_samples, 3))) + y = np.arange(n_samples) % 3 + y = _enforce_estimator_tags_y(estimator, y) + sample_weight = [3] * n_samples + # Test that estimators don't raise any exception + estimator.fit(X, y, sample_weight=sample_weight) + + +@ignore_warnings(category=FutureWarning) +def check_sample_weights_shape(name, estimator_orig): + # check that estimators raise an error if sample_weight + # shape mismatches the input + estimator = clone(estimator_orig) + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ] + ) + y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2]) + y = _enforce_estimator_tags_y(estimator, y) + + estimator.fit(X, y, sample_weight=np.ones(len(y))) + + with raises(ValueError): + estimator.fit(X, y, sample_weight=np.ones(2 * len(y))) + + with raises(ValueError): + estimator.fit(X, y, sample_weight=np.ones((len(y), 2))) + + +@ignore_warnings(category=FutureWarning) +def check_sample_weights_invariance(name, estimator_orig, kind="ones"): + # For kind="ones" check that the estimators yield same results for + # unit weights and no weights + # For kind="zeros" check that setting sample_weight to 0 is equivalent + # to removing corresponding samples. + estimator1 = clone(estimator_orig) + estimator2 = clone(estimator_orig) + set_random_state(estimator1, random_state=0) + set_random_state(estimator2, random_state=0) + + X1 = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.float64, + ) + y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int) + + if kind == "ones": + X2 = X1 + y2 = y1 + sw2 = np.ones(shape=len(y1)) + err_msg = ( + f"For {name} sample_weight=None is not equivalent to sample_weight=ones" + ) + elif kind == "zeros": + # Construct a dataset that is very different to (X, y) if weights + # are disregarded, but identical to (X, y) given weights. + X2 = np.vstack([X1, X1 + 1]) + y2 = np.hstack([y1, 3 - y1]) + sw2 = np.ones(shape=len(y1) * 2) + sw2[len(y1) :] = 0 + X2, y2, sw2 = shuffle(X2, y2, sw2, random_state=0) + + err_msg = ( + f"For {name}, a zero sample_weight is not equivalent to removing the sample" + ) + else: # pragma: no cover + raise ValueError + + y1 = _enforce_estimator_tags_y(estimator1, y1) + y2 = _enforce_estimator_tags_y(estimator2, y2) + + estimator1.fit(X1, y=y1, sample_weight=None) + estimator2.fit(X2, y=y2, sample_weight=sw2) + + for method in ["predict", "predict_proba", "decision_function", "transform"]: + if hasattr(estimator_orig, method): + X_pred1 = getattr(estimator1, method)(X1) + X_pred2 = getattr(estimator2, method)(X1) + assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg) + + +def check_sample_weights_not_overwritten(name, estimator_orig): + # check that estimators don't override the passed sample_weight parameter + estimator = clone(estimator_orig) + set_random_state(estimator, random_state=0) + + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.float64, + ) + y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int) + y = _enforce_estimator_tags_y(estimator, y) + + sample_weight_original = np.ones(y.shape[0]) + sample_weight_original[0] = 10.0 + + sample_weight_fit = sample_weight_original.copy() + + estimator.fit(X, y, sample_weight=sample_weight_fit) + + err_msg = f"{name} overwrote the original `sample_weight` given during fit" + assert_allclose(sample_weight_fit, sample_weight_original, err_msg=err_msg) + + +@ignore_warnings(category=(FutureWarning, UserWarning)) +def check_dtype_object(name, estimator_orig): + # check that estimators treat dtype object as numeric if possible + rng = np.random.RandomState(0) + X = _enforce_estimator_tags_X(estimator_orig, rng.uniform(size=(40, 10))) + X = X.astype(object) + tags = _safe_tags(estimator_orig) + y = (X[:, 0] * 4).astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + estimator.fit(X, y) + if hasattr(estimator, "predict"): + estimator.predict(X) + + if hasattr(estimator, "transform"): + estimator.transform(X) + + with raises(Exception, match="Unknown label type", may_pass=True): + estimator.fit(X, y.astype(object)) + + if "string" not in tags["X_types"]: + X[0, 0] = {"foo": "bar"} + # This error is raised by: + # - `np.asarray` in `check_array` + # - `_unique_python` for encoders + msg = "argument must be .* string.* number" + with raises(TypeError, match=msg): + estimator.fit(X, y) + else: + # Estimators supporting string will not call np.asarray to convert the + # data to numeric and therefore, the error will not be raised. + # Checking for each element dtype in the input array will be costly. + # Refer to #11401 for full discussion. + estimator.fit(X, y) + + +def check_complex_data(name, estimator_orig): + rng = np.random.RandomState(42) + # check that estimators raise an exception on providing complex data + X = rng.uniform(size=10) + 1j * rng.uniform(size=10) + X = X.reshape(-1, 1) + + # Something both valid for classification and regression + y = rng.randint(low=0, high=2, size=10) + 1j + estimator = clone(estimator_orig) + set_random_state(estimator, random_state=0) + with raises(ValueError, match="Complex data not supported"): + estimator.fit(X, y) + + +@ignore_warnings +def check_dict_unchanged(name, estimator_orig): + # this estimator raises + # ValueError: Found array with 0 feature(s) (shape=(23, 0)) + # while a minimum of 1 is required. + # error + if name in ["SpectralCoclustering"]: + return + rnd = np.random.RandomState(0) + if name in ["RANSACRegressor"]: + X = 3 * rnd.uniform(size=(20, 3)) + else: + X = 2 * rnd.uniform(size=(20, 3)) + + X = _enforce_estimator_tags_X(estimator_orig, X) + + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + if hasattr(estimator, "n_best"): + estimator.n_best = 1 + + set_random_state(estimator, 1) + + estimator.fit(X, y) + for method in ["predict", "transform", "decision_function", "predict_proba"]: + if hasattr(estimator, method): + dict_before = estimator.__dict__.copy() + getattr(estimator, method)(X) + assert estimator.__dict__ == dict_before, ( + "Estimator changes __dict__ during %s" % method + ) + + +def _is_public_parameter(attr): + return not (attr.startswith("_") or attr.endswith("_")) + + +@ignore_warnings(category=FutureWarning) +def check_dont_overwrite_parameters(name, estimator_orig): + # check that fit method only changes or sets private attributes + if hasattr(estimator_orig.__init__, "deprecated_original"): + # to not check deprecated classes + return + estimator = clone(estimator_orig) + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(int) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + dict_before_fit = estimator.__dict__.copy() + estimator.fit(X, y) + + dict_after_fit = estimator.__dict__ + + public_keys_after_fit = [ + key for key in dict_after_fit.keys() if _is_public_parameter(key) + ] + + attrs_added_by_fit = [ + key for key in public_keys_after_fit if key not in dict_before_fit.keys() + ] + + # check that fit doesn't add any public attribute + assert not attrs_added_by_fit, ( + "Estimator adds public attribute(s) during" + " the fit method." + " Estimators are only allowed to add private attributes" + " either started with _ or ended" + " with _ but %s added" + % ", ".join(attrs_added_by_fit) + ) + + # check that fit doesn't change any public attribute + attrs_changed_by_fit = [ + key + for key in public_keys_after_fit + if (dict_before_fit[key] is not dict_after_fit[key]) + ] + + assert not attrs_changed_by_fit, ( + "Estimator changes public attribute(s) during" + " the fit method. Estimators are only allowed" + " to change attributes started" + " or ended with _, but" + " %s changed" + % ", ".join(attrs_changed_by_fit) + ) + + +@ignore_warnings(category=FutureWarning) +def check_fit2d_predict1d(name, estimator_orig): + # check by fitting a 2d array and predicting with a 1d array + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + estimator.fit(X, y) + + for method in ["predict", "transform", "decision_function", "predict_proba"]: + if hasattr(estimator, method): + assert_raise_message( + ValueError, "Reshape your data", getattr(estimator, method), X[0] + ) + + +def _apply_on_subsets(func, X): + # apply function on the whole set and on mini batches + result_full = func(X) + n_features = X.shape[1] + result_by_batch = [func(batch.reshape(1, n_features)) for batch in X] + + # func can output tuple (e.g. score_samples) + if type(result_full) == tuple: + result_full = result_full[0] + result_by_batch = list(map(lambda x: x[0], result_by_batch)) + + if sparse.issparse(result_full): + result_full = result_full.toarray() + result_by_batch = [x.toarray() for x in result_by_batch] + + return np.ravel(result_full), np.ravel(result_by_batch) + + +@ignore_warnings(category=FutureWarning) +def check_methods_subset_invariance(name, estimator_orig): + # check that method gives invariant results if applied + # on mini batches or the whole set + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + estimator.fit(X, y) + + for method in [ + "predict", + "transform", + "decision_function", + "score_samples", + "predict_proba", + ]: + msg = ("{method} of {name} is not invariant when applied to a subset.").format( + method=method, name=name + ) + + if hasattr(estimator, method): + result_full, result_by_batch = _apply_on_subsets( + getattr(estimator, method), X + ) + assert_allclose(result_full, result_by_batch, atol=1e-7, err_msg=msg) + + +@ignore_warnings(category=FutureWarning) +def check_methods_sample_order_invariance(name, estimator_orig): + # check that method gives invariant results if applied + # on a subset with different sample order + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(np.int64) + if _safe_tags(estimator_orig, key="binary_only"): + y[y == 2] = 1 + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 2 + + set_random_state(estimator, 1) + estimator.fit(X, y) + + idx = np.random.permutation(X.shape[0]) + + for method in [ + "predict", + "transform", + "decision_function", + "score_samples", + "predict_proba", + ]: + msg = ( + "{method} of {name} is not invariant when applied to a dataset" + "with different sample order." + ).format(method=method, name=name) + + if hasattr(estimator, method): + assert_allclose_dense_sparse( + getattr(estimator, method)(X)[idx], + getattr(estimator, method)(X[idx]), + atol=1e-9, + err_msg=msg, + ) + + +@ignore_warnings +def check_fit2d_1sample(name, estimator_orig): + # Check that fitting a 2d array with only one sample either works or + # returns an informative message. The error message should either mention + # the number of samples or the number of classes. + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(1, 10)) + X = _enforce_estimator_tags_X(estimator_orig, X) + + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + + # min_cluster_size cannot be less than the data size for OPTICS. + if name == "OPTICS": + estimator.set_params(min_samples=1.0) + + # perplexity cannot be more than the number of samples for TSNE. + if name == "TSNE": + estimator.set_params(perplexity=0.5) + + msgs = [ + "1 sample", + "n_samples = 1", + "n_samples=1", + "one sample", + "1 class", + "one class", + ] + + with raises(ValueError, match=msgs, may_pass=True): + estimator.fit(X, y) + + +@ignore_warnings +def check_fit2d_1feature(name, estimator_orig): + # check fitting a 2d array with only 1 feature either works or returns + # informative message + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(10, 1)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + # ensure two labels in subsample for RandomizedLogisticRegression + if name == "RandomizedLogisticRegression": + estimator.sample_fraction = 1 + # ensure non skipped trials for RANSACRegressor + if name == "RANSACRegressor": + estimator.residual_threshold = 0.5 + + y = _enforce_estimator_tags_y(estimator, y) + set_random_state(estimator, 1) + + msgs = [r"1 feature\(s\)", "n_features = 1", "n_features=1"] + + with raises(ValueError, match=msgs, may_pass=True): + estimator.fit(X, y) + + +@ignore_warnings +def check_fit1d(name, estimator_orig): + # check fitting 1d X array raises a ValueError + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20)) + y = X.astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + with raises(ValueError): + estimator.fit(X, y) + + +@ignore_warnings(category=FutureWarning) +def check_transformer_general(name, transformer, readonly_memmap=False): + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + X = _enforce_estimator_tags_X(transformer, X) + + if readonly_memmap: + X, y = create_memmap_backed_data([X, y]) + + _check_transformer(name, transformer, X, y) + + +@ignore_warnings(category=FutureWarning) +def check_transformer_data_not_an_array(name, transformer): + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + X = _enforce_estimator_tags_X(transformer, X) + this_X = _NotAnArray(X) + this_y = _NotAnArray(np.asarray(y)) + _check_transformer(name, transformer, this_X, this_y) + # try the same with some list + _check_transformer(name, transformer, X.tolist(), y.tolist()) + + +@ignore_warnings(category=FutureWarning) +def check_transformers_unfitted(name, transformer): + X, y = _regression_dataset() + + transformer = clone(transformer) + with raises( + (AttributeError, ValueError), + err_msg=( + "The unfitted " + f"transformer {name} does not raise an error when " + "transform is called. Perhaps use " + "check_is_fitted in transform." + ), + ): + transformer.transform(X) + + +@ignore_warnings(category=FutureWarning) +def check_transformers_unfitted_stateless(name, transformer): + """Check that using transform without prior fitting + doesn't raise a NotFittedError for stateless transformers. + """ + rng = np.random.RandomState(0) + X = rng.uniform(size=(20, 5)) + X = _enforce_estimator_tags_X(transformer, X) + + transformer = clone(transformer) + X_trans = transformer.transform(X) + + assert X_trans.shape[0] == X.shape[0] + + +def _check_transformer(name, transformer_orig, X, y): + n_samples, n_features = np.asarray(X).shape + transformer = clone(transformer_orig) + set_random_state(transformer) + + # fit + + if name in CROSS_DECOMPOSITION: + y_ = np.c_[np.asarray(y), np.asarray(y)] + y_[::2, 1] *= 2 + if isinstance(X, _NotAnArray): + y_ = _NotAnArray(y_) + else: + y_ = y + + transformer.fit(X, y_) + # fit_transform method should work on non fitted estimator + transformer_clone = clone(transformer) + X_pred = transformer_clone.fit_transform(X, y=y_) + + if isinstance(X_pred, tuple): + for x_pred in X_pred: + assert x_pred.shape[0] == n_samples + else: + # check for consistent n_samples + assert X_pred.shape[0] == n_samples + + if hasattr(transformer, "transform"): + if name in CROSS_DECOMPOSITION: + X_pred2 = transformer.transform(X, y_) + X_pred3 = transformer.fit_transform(X, y=y_) + else: + X_pred2 = transformer.transform(X) + X_pred3 = transformer.fit_transform(X, y=y_) + + if _safe_tags(transformer_orig, key="non_deterministic"): + msg = name + " is non deterministic" + raise SkipTest(msg) + if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple): + for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3): + assert_allclose_dense_sparse( + x_pred, + x_pred2, + atol=1e-2, + err_msg="fit_transform and transform outcomes not consistent in %s" + % transformer, + ) + assert_allclose_dense_sparse( + x_pred, + x_pred3, + atol=1e-2, + err_msg="consecutive fit_transform outcomes not consistent in %s" + % transformer, + ) + else: + assert_allclose_dense_sparse( + X_pred, + X_pred2, + err_msg="fit_transform and transform outcomes not consistent in %s" + % transformer, + atol=1e-2, + ) + assert_allclose_dense_sparse( + X_pred, + X_pred3, + atol=1e-2, + err_msg="consecutive fit_transform outcomes not consistent in %s" + % transformer, + ) + assert _num_samples(X_pred2) == n_samples + assert _num_samples(X_pred3) == n_samples + + # raises error on malformed input for transform + if ( + hasattr(X, "shape") + and not _safe_tags(transformer, key="stateless") + and X.ndim == 2 + and X.shape[1] > 1 + ): + # If it's not an array, it does not have a 'T' property + with raises( + ValueError, + err_msg=( + f"The transformer {name} does not raise an error " + "when the number of features in transform is different from " + "the number of features in fit." + ), + ): + transformer.transform(X[:, :-1]) + + +@ignore_warnings +def check_pipeline_consistency(name, estimator_orig): + if _safe_tags(estimator_orig, key="non_deterministic"): + msg = name + " is non deterministic" + raise SkipTest(msg) + + # check that make_pipeline(est) gives same score as est + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + set_random_state(estimator) + pipeline = make_pipeline(estimator) + estimator.fit(X, y) + pipeline.fit(X, y) + + funcs = ["score", "fit_transform"] + + for func_name in funcs: + func = getattr(estimator, func_name, None) + if func is not None: + func_pipeline = getattr(pipeline, func_name) + result = func(X, y) + result_pipe = func_pipeline(X, y) + assert_allclose_dense_sparse(result, result_pipe) + + +@ignore_warnings +def check_fit_score_takes_y(name, estimator_orig): + # check that all estimators accept an optional y + # in fit and score so they can be used in pipelines + rnd = np.random.RandomState(0) + n_samples = 30 + X = rnd.uniform(size=(n_samples, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = np.arange(n_samples) % 3 + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + set_random_state(estimator) + + funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"] + for func_name in funcs: + func = getattr(estimator, func_name, None) + if func is not None: + func(X, y) + args = [p.name for p in signature(func).parameters.values()] + if args[0] == "self": + # available_if makes methods into functions + # with an explicit "self", so need to shift arguments + args = args[1:] + assert args[1] in ["y", "Y"], ( + "Expected y or Y as second argument for method " + "%s of %s. Got arguments: %r." + % (func_name, type(estimator).__name__, args) + ) + + +@ignore_warnings +def check_estimators_dtypes(name, estimator_orig): + rnd = np.random.RandomState(0) + X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32) + X_train_32 = _enforce_estimator_tags_X(estimator_orig, X_train_32) + X_train_64 = X_train_32.astype(np.float64) + X_train_int_64 = X_train_32.astype(np.int64) + X_train_int_32 = X_train_32.astype(np.int32) + y = X_train_int_64[:, 0] + y = _enforce_estimator_tags_y(estimator_orig, y) + + methods = ["predict", "transform", "decision_function", "predict_proba"] + + for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]: + estimator = clone(estimator_orig) + set_random_state(estimator, 1) + estimator.fit(X_train, y) + + for method in methods: + if hasattr(estimator, method): + getattr(estimator, method)(X_train) + + +def check_transformer_preserve_dtypes(name, transformer_orig): + # check that dtype are preserved meaning if input X is of some dtype + # X_transformed should be from the same dtype. + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + X = _enforce_estimator_tags_X(transformer_orig, X) + + for dtype in _safe_tags(transformer_orig, key="preserves_dtype"): + X_cast = X.astype(dtype) + transformer = clone(transformer_orig) + set_random_state(transformer) + X_trans1 = transformer.fit_transform(X_cast, y) + X_trans2 = transformer.fit(X_cast, y).transform(X_cast) + + for Xt, method in zip([X_trans1, X_trans2], ["fit_transform", "transform"]): + if isinstance(Xt, tuple): + # cross-decompostion returns a tuple of (x_scores, y_scores) + # when given y with fit_transform; only check the first element + Xt = Xt[0] + + # check that the output dtype is preserved + assert Xt.dtype == dtype, ( + f"{name} (method={method}) does not preserve dtype. " + f"Original/Expected dtype={dtype.__name__}, got dtype={Xt.dtype}." + ) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_empty_data_messages(name, estimator_orig): + e = clone(estimator_orig) + set_random_state(e, 1) + + X_zero_samples = np.empty(0).reshape(0, 3) + # The precise message can change depending on whether X or y is + # validated first. Let us test the type of exception only: + err_msg = ( + f"The estimator {name} does not raise a ValueError when an " + "empty data is used to train. Perhaps use check_array in train." + ) + with raises(ValueError, err_msg=err_msg): + e.fit(X_zero_samples, []) + + X_zero_features = np.empty(0).reshape(12, 0) + # the following y should be accepted by both classifiers and regressors + # and ignored by unsupervised models + y = _enforce_estimator_tags_y(e, np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0])) + msg = r"0 feature\(s\) \(shape=\(\d*, 0\)\) while a minimum of \d* " "is required." + with raises(ValueError, match=msg): + e.fit(X_zero_features, y) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_nan_inf(name, estimator_orig): + # Checks that Estimator X's do not contain NaN or inf. + rnd = np.random.RandomState(0) + X_train_finite = _enforce_estimator_tags_X( + estimator_orig, rnd.uniform(size=(10, 3)) + ) + X_train_nan = rnd.uniform(size=(10, 3)) + X_train_nan[0, 0] = np.nan + X_train_inf = rnd.uniform(size=(10, 3)) + X_train_inf[0, 0] = np.inf + y = np.ones(10) + y[:5] = 0 + y = _enforce_estimator_tags_y(estimator_orig, y) + error_string_fit = f"Estimator {name} doesn't check for NaN and inf in fit." + error_string_predict = f"Estimator {name} doesn't check for NaN and inf in predict." + error_string_transform = ( + f"Estimator {name} doesn't check for NaN and inf in transform." + ) + for X_train in [X_train_nan, X_train_inf]: + # catch deprecation warnings + with ignore_warnings(category=FutureWarning): + estimator = clone(estimator_orig) + set_random_state(estimator, 1) + # try to fit + with raises(ValueError, match=["inf", "NaN"], err_msg=error_string_fit): + estimator.fit(X_train, y) + # actually fit + estimator.fit(X_train_finite, y) + + # predict + if hasattr(estimator, "predict"): + with raises( + ValueError, + match=["inf", "NaN"], + err_msg=error_string_predict, + ): + estimator.predict(X_train) + + # transform + if hasattr(estimator, "transform"): + with raises( + ValueError, + match=["inf", "NaN"], + err_msg=error_string_transform, + ): + estimator.transform(X_train) + + +@ignore_warnings +def check_nonsquare_error(name, estimator_orig): + """Test that error is thrown when non-square data provided.""" + + X, y = make_blobs(n_samples=20, n_features=10) + estimator = clone(estimator_orig) + + with raises( + ValueError, + err_msg=( + f"The pairwise estimator {name} does not raise an error on non-square data" + ), + ): + estimator.fit(X, y) + + +@ignore_warnings +def check_estimators_pickle(name, estimator_orig, readonly_memmap=False): + """Test that we can pickle all estimators.""" + check_methods = ["predict", "transform", "decision_function", "predict_proba"] + + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + + X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel) + + tags = _safe_tags(estimator_orig) + # include NaN values when the estimator should deal with them + if tags["allow_nan"]: + # set randomly 10 elements to np.nan + rng = np.random.RandomState(42) + mask = rng.choice(X.size, 10, replace=False) + X.reshape(-1)[mask] = np.nan + + estimator = clone(estimator_orig) + + y = _enforce_estimator_tags_y(estimator, y) + + set_random_state(estimator) + estimator.fit(X, y) + + if readonly_memmap: + unpickled_estimator = create_memmap_backed_data(estimator) + else: + # No need to touch the file system in that case. + pickled_estimator = pickle.dumps(estimator) + module_name = estimator.__module__ + if module_name.startswith("sklearn.") and not ( + "test_" in module_name or module_name.endswith("_testing") + ): + # strict check for sklearn estimators that are not implemented in test + # modules. + assert b"_sklearn_version" in pickled_estimator + unpickled_estimator = pickle.loads(pickled_estimator) + + result = dict() + for method in check_methods: + if hasattr(estimator, method): + result[method] = getattr(estimator, method)(X) + + for method in result: + unpickled_result = getattr(unpickled_estimator, method)(X) + assert_allclose_dense_sparse(result[method], unpickled_result) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_partial_fit_n_features(name, estimator_orig): + # check if number of features changes between calls to partial_fit. + if not hasattr(estimator_orig, "partial_fit"): + return + estimator = clone(estimator_orig) + X, y = make_blobs(n_samples=50, random_state=1) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = _enforce_estimator_tags_y(estimator_orig, y) + + try: + if is_classifier(estimator): + classes = np.unique(y) + estimator.partial_fit(X, y, classes=classes) + else: + estimator.partial_fit(X, y) + except NotImplementedError: + return + + with raises( + ValueError, + err_msg=( + f"The estimator {name} does not raise an error when the " + "number of features changes between calls to partial_fit." + ), + ): + estimator.partial_fit(X[:, :-1], y) + + +@ignore_warnings(category=FutureWarning) +def check_classifier_multioutput(name, estimator): + n_samples, n_labels, n_classes = 42, 5, 3 + tags = _safe_tags(estimator) + estimator = clone(estimator) + X, y = make_multilabel_classification( + random_state=42, n_samples=n_samples, n_labels=n_labels, n_classes=n_classes + ) + estimator.fit(X, y) + y_pred = estimator.predict(X) + + assert y_pred.shape == (n_samples, n_classes), ( + "The shape of the prediction for multioutput data is " + "incorrect. Expected {}, got {}.".format((n_samples, n_labels), y_pred.shape) + ) + assert y_pred.dtype.kind == "i" + + if hasattr(estimator, "decision_function"): + decision = estimator.decision_function(X) + assert isinstance(decision, np.ndarray) + assert decision.shape == (n_samples, n_classes), ( + "The shape of the decision function output for " + "multioutput data is incorrect. Expected {}, got {}.".format( + (n_samples, n_classes), decision.shape + ) + ) + + dec_pred = (decision > 0).astype(int) + dec_exp = estimator.classes_[dec_pred] + assert_array_equal(dec_exp, y_pred) + + if hasattr(estimator, "predict_proba"): + y_prob = estimator.predict_proba(X) + + if isinstance(y_prob, list) and not tags["poor_score"]: + for i in range(n_classes): + assert y_prob[i].shape == (n_samples, 2), ( + "The shape of the probability for multioutput data is" + " incorrect. Expected {}, got {}.".format( + (n_samples, 2), y_prob[i].shape + ) + ) + assert_array_equal( + np.argmax(y_prob[i], axis=1).astype(int), y_pred[:, i] + ) + elif not tags["poor_score"]: + assert y_prob.shape == (n_samples, n_classes), ( + "The shape of the probability for multioutput data is" + " incorrect. Expected {}, got {}.".format( + (n_samples, n_classes), y_prob.shape + ) + ) + assert_array_equal(y_prob.round().astype(int), y_pred) + + if hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba"): + for i in range(n_classes): + y_proba = estimator.predict_proba(X)[:, i] + y_decision = estimator.decision_function(X) + assert_array_equal(rankdata(y_proba), rankdata(y_decision[:, i])) + + +@ignore_warnings(category=FutureWarning) +def check_regressor_multioutput(name, estimator): + estimator = clone(estimator) + n_samples = n_features = 10 + + if not _is_pairwise_metric(estimator): + n_samples = n_samples + 1 + + X, y = make_regression( + random_state=42, n_targets=5, n_samples=n_samples, n_features=n_features + ) + X = _enforce_estimator_tags_X(estimator, X) + + estimator.fit(X, y) + y_pred = estimator.predict(X) + + assert y_pred.dtype == np.dtype("float64"), ( + "Multioutput predictions by a regressor are expected to be" + " floating-point precision. Got {} instead".format(y_pred.dtype) + ) + assert y_pred.shape == y.shape, ( + "The shape of the prediction for multioutput data is incorrect." + " Expected {}, got {}." + ) + + +@ignore_warnings(category=FutureWarning) +def check_clustering(name, clusterer_orig, readonly_memmap=False): + clusterer = clone(clusterer_orig) + X, y = make_blobs(n_samples=50, random_state=1) + X, y = shuffle(X, y, random_state=7) + X = StandardScaler().fit_transform(X) + rng = np.random.RandomState(7) + X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))]) + + if readonly_memmap: + X, y, X_noise = create_memmap_backed_data([X, y, X_noise]) + + n_samples, n_features = X.shape + # catch deprecation and neighbors warnings + if hasattr(clusterer, "n_clusters"): + clusterer.set_params(n_clusters=3) + set_random_state(clusterer) + if name == "AffinityPropagation": + clusterer.set_params(preference=-100) + clusterer.set_params(max_iter=100) + + # fit + clusterer.fit(X) + # with lists + clusterer.fit(X.tolist()) + + pred = clusterer.labels_ + assert pred.shape == (n_samples,) + assert adjusted_rand_score(pred, y) > 0.4 + if _safe_tags(clusterer, key="non_deterministic"): + return + set_random_state(clusterer) + with warnings.catch_warnings(record=True): + pred2 = clusterer.fit_predict(X) + assert_array_equal(pred, pred2) + + # fit_predict(X) and labels_ should be of type int + assert pred.dtype in [np.dtype("int32"), np.dtype("int64")] + assert pred2.dtype in [np.dtype("int32"), np.dtype("int64")] + + # Add noise to X to test the possible values of the labels + labels = clusterer.fit_predict(X_noise) + + # There should be at least one sample in every cluster. Equivalently + # labels_ should contain all the consecutive values between its + # min and its max. + labels_sorted = np.unique(labels) + assert_array_equal( + labels_sorted, np.arange(labels_sorted[0], labels_sorted[-1] + 1) + ) + + # Labels are expected to start at 0 (no noise) or -1 (if noise) + assert labels_sorted[0] in [0, -1] + # Labels should be less than n_clusters - 1 + if hasattr(clusterer, "n_clusters"): + n_clusters = getattr(clusterer, "n_clusters") + assert n_clusters - 1 >= labels_sorted[-1] + # else labels should be less than max(labels_) which is necessarily true + + +@ignore_warnings(category=FutureWarning) +def check_clusterer_compute_labels_predict(name, clusterer_orig): + """Check that predict is invariant of compute_labels.""" + X, y = make_blobs(n_samples=20, random_state=0) + clusterer = clone(clusterer_orig) + set_random_state(clusterer) + + if hasattr(clusterer, "compute_labels"): + # MiniBatchKMeans + X_pred1 = clusterer.fit(X).predict(X) + clusterer.set_params(compute_labels=False) + X_pred2 = clusterer.fit(X).predict(X) + assert_array_equal(X_pred1, X_pred2) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_one_label(name, classifier_orig): + error_string_fit = "Classifier can't train when only one class is present." + error_string_predict = "Classifier can't predict when only one class is present." + rnd = np.random.RandomState(0) + X_train = rnd.uniform(size=(10, 3)) + X_test = rnd.uniform(size=(10, 3)) + y = np.ones(10) + # catch deprecation warnings + with ignore_warnings(category=FutureWarning): + classifier = clone(classifier_orig) + with raises( + ValueError, match="class", may_pass=True, err_msg=error_string_fit + ) as cm: + classifier.fit(X_train, y) + + if cm.raised_and_matched: + # ValueError was raised with proper error message + return + + assert_array_equal(classifier.predict(X_test), y, err_msg=error_string_predict) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_one_label_sample_weights(name, classifier_orig): + """Check that classifiers accepting sample_weight fit or throws a ValueError with + an explicit message if the problem is reduced to one class. + """ + error_fit = ( + f"{name} failed when fitted on one label after sample_weight trimming. Error " + "message is not explicit, it should have 'class'." + ) + error_predict = f"{name} prediction results should only output the remaining class." + rnd = np.random.RandomState(0) + # X should be square for test on SVC with precomputed kernel + X_train = rnd.uniform(size=(10, 10)) + X_test = rnd.uniform(size=(10, 10)) + y = np.arange(10) % 2 + sample_weight = y.copy() # select a single class + classifier = clone(classifier_orig) + + if has_fit_parameter(classifier, "sample_weight"): + match = [r"\bclass(es)?\b", error_predict] + err_type, err_msg = (AssertionError, ValueError), error_fit + else: + match = r"\bsample_weight\b" + err_type, err_msg = (TypeError, ValueError), None + + with raises(err_type, match=match, may_pass=True, err_msg=err_msg) as cm: + classifier.fit(X_train, y, sample_weight=sample_weight) + if cm.raised_and_matched: + # raise the proper error type with the proper error message + return + # for estimators that do not fail, they should be able to predict the only + # class remaining during fit + assert_array_equal( + classifier.predict(X_test), np.ones(10), err_msg=error_predict + ) + + +@ignore_warnings # Warnings are raised by decision function +def check_classifiers_train( + name, classifier_orig, readonly_memmap=False, X_dtype="float64" +): + X_m, y_m = make_blobs(n_samples=300, random_state=0) + X_m = X_m.astype(X_dtype) + X_m, y_m = shuffle(X_m, y_m, random_state=7) + X_m = StandardScaler().fit_transform(X_m) + # generate binary problem from multi-class one + y_b = y_m[y_m != 2] + X_b = X_m[y_m != 2] + + if name in ["BernoulliNB", "MultinomialNB", "ComplementNB", "CategoricalNB"]: + X_m -= X_m.min() + X_b -= X_b.min() + + if readonly_memmap: + X_m, y_m, X_b, y_b = create_memmap_backed_data([X_m, y_m, X_b, y_b]) + + problems = [(X_b, y_b)] + tags = _safe_tags(classifier_orig) + if not tags["binary_only"]: + problems.append((X_m, y_m)) + + for X, y in problems: + classes = np.unique(y) + n_classes = len(classes) + n_samples, n_features = X.shape + classifier = clone(classifier_orig) + X = _enforce_estimator_tags_X(classifier, X) + y = _enforce_estimator_tags_y(classifier, y) + + set_random_state(classifier) + # raises error on malformed input for fit + if not tags["no_validation"]: + with raises( + ValueError, + err_msg=( + f"The classifier {name} does not raise an error when " + "incorrect/malformed input data for fit is passed. The number " + "of training examples is not the same as the number of " + "labels. Perhaps use check_X_y in fit." + ), + ): + classifier.fit(X, y[:-1]) + + # fit + classifier.fit(X, y) + # with lists + classifier.fit(X.tolist(), y.tolist()) + assert hasattr(classifier, "classes_") + y_pred = classifier.predict(X) + + assert y_pred.shape == (n_samples,) + # training set performance + if not tags["poor_score"]: + assert accuracy_score(y, y_pred) > 0.83 + + # raises error on malformed input for predict + msg_pairwise = ( + "The classifier {} does not raise an error when shape of X in " + " {} is not equal to (n_test_samples, n_training_samples)" + ) + msg = ( + "The classifier {} does not raise an error when the number of " + "features in {} is different from the number of features in " + "fit." + ) + + if not tags["no_validation"]: + if tags["pairwise"]: + with raises( + ValueError, + err_msg=msg_pairwise.format(name, "predict"), + ): + classifier.predict(X.reshape(-1, 1)) + else: + with raises(ValueError, err_msg=msg.format(name, "predict")): + classifier.predict(X.T) + if hasattr(classifier, "decision_function"): + try: + # decision_function agrees with predict + decision = classifier.decision_function(X) + if n_classes == 2: + if not tags["multioutput_only"]: + assert decision.shape == (n_samples,) + else: + assert decision.shape == (n_samples, 1) + dec_pred = (decision.ravel() > 0).astype(int) + assert_array_equal(dec_pred, y_pred) + else: + assert decision.shape == (n_samples, n_classes) + assert_array_equal(np.argmax(decision, axis=1), y_pred) + + # raises error on malformed input for decision_function + if not tags["no_validation"]: + if tags["pairwise"]: + with raises( + ValueError, + err_msg=msg_pairwise.format(name, "decision_function"), + ): + classifier.decision_function(X.reshape(-1, 1)) + else: + with raises( + ValueError, + err_msg=msg.format(name, "decision_function"), + ): + classifier.decision_function(X.T) + except NotImplementedError: + pass + + if hasattr(classifier, "predict_proba"): + # predict_proba agrees with predict + y_prob = classifier.predict_proba(X) + assert y_prob.shape == (n_samples, n_classes) + assert_array_equal(np.argmax(y_prob, axis=1), y_pred) + # check that probas for all classes sum to one + assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples)) + if not tags["no_validation"]: + # raises error on malformed input for predict_proba + if tags["pairwise"]: + with raises( + ValueError, + err_msg=msg_pairwise.format(name, "predict_proba"), + ): + classifier.predict_proba(X.reshape(-1, 1)) + else: + with raises( + ValueError, + err_msg=msg.format(name, "predict_proba"), + ): + classifier.predict_proba(X.T) + if hasattr(classifier, "predict_log_proba"): + # predict_log_proba is a transformation of predict_proba + y_log_prob = classifier.predict_log_proba(X) + assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9) + assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob)) + + +def check_outlier_corruption(num_outliers, expected_outliers, decision): + # Check for deviation from the precise given contamination level that may + # be due to ties in the anomaly scores. + if num_outliers < expected_outliers: + start = num_outliers + end = expected_outliers + 1 + else: + start = expected_outliers + end = num_outliers + 1 + + # ensure that all values in the 'critical area' are tied, + # leading to the observed discrepancy between provided + # and actual contamination levels. + sorted_decision = np.sort(decision) + msg = ( + "The number of predicted outliers is not equal to the expected " + "number of outliers and this difference is not explained by the " + "number of ties in the decision_function values" + ) + assert len(np.unique(sorted_decision[start:end])) == 1, msg + + +def check_outliers_train(name, estimator_orig, readonly_memmap=True): + n_samples = 300 + X, _ = make_blobs(n_samples=n_samples, random_state=0) + X = shuffle(X, random_state=7) + + if readonly_memmap: + X = create_memmap_backed_data(X) + + n_samples, n_features = X.shape + estimator = clone(estimator_orig) + set_random_state(estimator) + + # fit + estimator.fit(X) + # with lists + estimator.fit(X.tolist()) + + y_pred = estimator.predict(X) + assert y_pred.shape == (n_samples,) + assert y_pred.dtype.kind == "i" + assert_array_equal(np.unique(y_pred), np.array([-1, 1])) + + decision = estimator.decision_function(X) + scores = estimator.score_samples(X) + for output in [decision, scores]: + assert output.dtype == np.dtype("float") + assert output.shape == (n_samples,) + + # raises error on malformed input for predict + with raises(ValueError): + estimator.predict(X.T) + + # decision_function agrees with predict + dec_pred = (decision >= 0).astype(int) + dec_pred[dec_pred == 0] = -1 + assert_array_equal(dec_pred, y_pred) + + # raises error on malformed input for decision_function + with raises(ValueError): + estimator.decision_function(X.T) + + # decision_function is a translation of score_samples + y_dec = scores - estimator.offset_ + assert_allclose(y_dec, decision) + + # raises error on malformed input for score_samples + with raises(ValueError): + estimator.score_samples(X.T) + + # contamination parameter (not for OneClassSVM which has the nu parameter) + if hasattr(estimator, "contamination") and not hasattr(estimator, "novelty"): + # proportion of outliers equal to contamination parameter when not + # set to 'auto'. This is true for the training set and cannot thus be + # checked as follows for estimators with a novelty parameter such as + # LocalOutlierFactor (tested in check_outliers_fit_predict) + expected_outliers = 30 + contamination = expected_outliers / n_samples + estimator.set_params(contamination=contamination) + estimator.fit(X) + y_pred = estimator.predict(X) + + num_outliers = np.sum(y_pred != 1) + # num_outliers should be equal to expected_outliers unless + # there are ties in the decision_function values. this can + # only be tested for estimators with a decision_function + # method, i.e. all estimators except LOF which is already + # excluded from this if branch. + if num_outliers != expected_outliers: + decision = estimator.decision_function(X) + check_outlier_corruption(num_outliers, expected_outliers, decision) + + +def check_outlier_contamination(name, estimator_orig): + # Check that the contamination parameter is in (0.0, 0.5] when it is an + # interval constraint. + + if not hasattr(estimator_orig, "_parameter_constraints"): + # Only estimator implementing parameter constraints will be checked + return + + if "contamination" not in estimator_orig._parameter_constraints: + return + + contamination_constraints = estimator_orig._parameter_constraints["contamination"] + if not any([isinstance(c, Interval) for c in contamination_constraints]): + raise AssertionError( + "contamination constraints should contain a Real Interval constraint." + ) + + for constraint in contamination_constraints: + if isinstance(constraint, Interval): + assert ( + constraint.type == Real + and constraint.left >= 0.0 + and constraint.right <= 0.5 + and (constraint.left > 0 or constraint.closed in {"right", "neither"}) + ), "contamination constraint should be an interval in (0, 0.5]" + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_multilabel_representation_invariance(name, classifier_orig): + X, y = make_multilabel_classification( + n_samples=100, + n_features=2, + n_classes=5, + n_labels=3, + length=50, + allow_unlabeled=True, + random_state=0, + ) + X = scale(X) + + X_train, y_train = X[:80], y[:80] + X_test = X[80:] + + y_train_list_of_lists = y_train.tolist() + y_train_list_of_arrays = list(y_train) + + classifier = clone(classifier_orig) + set_random_state(classifier) + + y_pred = classifier.fit(X_train, y_train).predict(X_test) + + y_pred_list_of_lists = classifier.fit(X_train, y_train_list_of_lists).predict( + X_test + ) + + y_pred_list_of_arrays = classifier.fit(X_train, y_train_list_of_arrays).predict( + X_test + ) + + assert_array_equal(y_pred, y_pred_list_of_arrays) + assert_array_equal(y_pred, y_pred_list_of_lists) + + assert y_pred.dtype == y_pred_list_of_arrays.dtype + assert y_pred.dtype == y_pred_list_of_lists.dtype + assert type(y_pred) == type(y_pred_list_of_arrays) + assert type(y_pred) == type(y_pred_list_of_lists) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_multilabel_output_format_predict(name, classifier_orig): + """Check the output of the `predict` method for classifiers supporting + multilabel-indicator targets.""" + classifier = clone(classifier_orig) + set_random_state(classifier) + + n_samples, test_size, n_outputs = 100, 25, 5 + X, y = make_multilabel_classification( + n_samples=n_samples, + n_features=2, + n_classes=n_outputs, + n_labels=3, + length=50, + allow_unlabeled=True, + random_state=0, + ) + X = scale(X) + + X_train, X_test = X[:-test_size], X[-test_size:] + y_train, y_test = y[:-test_size], y[-test_size:] + classifier.fit(X_train, y_train) + + response_method_name = "predict" + predict_method = getattr(classifier, response_method_name, None) + if predict_method is None: + raise SkipTest(f"{name} does not have a {response_method_name} method.") + + y_pred = predict_method(X_test) + + # y_pred.shape -> y_test.shape with the same dtype + assert isinstance(y_pred, np.ndarray), ( + f"{name}.predict is expected to output a NumPy array. Got " + f"{type(y_pred)} instead." + ) + assert y_pred.shape == y_test.shape, ( + f"{name}.predict outputs a NumPy array of shape {y_pred.shape} " + f"instead of {y_test.shape}." + ) + assert y_pred.dtype == y_test.dtype, ( + f"{name}.predict does not output the same dtype than the targets. " + f"Got {y_pred.dtype} instead of {y_test.dtype}." + ) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_multilabel_output_format_predict_proba(name, classifier_orig): + """Check the output of the `predict_proba` method for classifiers supporting + multilabel-indicator targets.""" + classifier = clone(classifier_orig) + set_random_state(classifier) + + n_samples, test_size, n_outputs = 100, 25, 5 + X, y = make_multilabel_classification( + n_samples=n_samples, + n_features=2, + n_classes=n_outputs, + n_labels=3, + length=50, + allow_unlabeled=True, + random_state=0, + ) + X = scale(X) + + X_train, X_test = X[:-test_size], X[-test_size:] + y_train = y[:-test_size] + classifier.fit(X_train, y_train) + + response_method_name = "predict_proba" + predict_proba_method = getattr(classifier, response_method_name, None) + if predict_proba_method is None: + raise SkipTest(f"{name} does not have a {response_method_name} method.") + + y_pred = predict_proba_method(X_test) + + # y_pred.shape -> 2 possibilities: + # - list of length n_outputs of shape (n_samples, 2); + # - ndarray of shape (n_samples, n_outputs). + # dtype should be floating + if isinstance(y_pred, list): + assert len(y_pred) == n_outputs, ( + f"When {name}.predict_proba returns a list, the list should " + "be of length n_outputs and contain NumPy arrays. Got length " + f"of {len(y_pred)} instead of {n_outputs}." + ) + for pred in y_pred: + assert pred.shape == (test_size, 2), ( + f"When {name}.predict_proba returns a list, this list " + "should contain NumPy arrays of shape (n_samples, 2). Got " + f"NumPy arrays of shape {pred.shape} instead of " + f"{(test_size, 2)}." + ) + assert pred.dtype.kind == "f", ( + f"When {name}.predict_proba returns a list, it should " + "contain NumPy arrays with floating dtype. Got " + f"{pred.dtype} instead." + ) + # check that we have the correct probabilities + err_msg = ( + f"When {name}.predict_proba returns a list, each NumPy " + "array should contain probabilities for each class and " + "thus each row should sum to 1 (or close to 1 due to " + "numerical errors)." + ) + assert_allclose(pred.sum(axis=1), 1, err_msg=err_msg) + elif isinstance(y_pred, np.ndarray): + assert y_pred.shape == (test_size, n_outputs), ( + f"When {name}.predict_proba returns a NumPy array, the " + f"expected shape is (n_samples, n_outputs). Got {y_pred.shape}" + f" instead of {(test_size, n_outputs)}." + ) + assert y_pred.dtype.kind == "f", ( + f"When {name}.predict_proba returns a NumPy array, the " + f"expected data type is floating. Got {y_pred.dtype} instead." + ) + err_msg = ( + f"When {name}.predict_proba returns a NumPy array, this array " + "is expected to provide probabilities of the positive class " + "and should therefore contain values between 0 and 1." + ) + assert_array_less(0, y_pred, err_msg=err_msg) + assert_array_less(y_pred, 1, err_msg=err_msg) + else: + raise ValueError( + f"Unknown returned type {type(y_pred)} by {name}." + "predict_proba. A list or a Numpy array is expected." + ) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_multilabel_output_format_decision_function(name, classifier_orig): + """Check the output of the `decision_function` method for classifiers supporting + multilabel-indicator targets.""" + classifier = clone(classifier_orig) + set_random_state(classifier) + + n_samples, test_size, n_outputs = 100, 25, 5 + X, y = make_multilabel_classification( + n_samples=n_samples, + n_features=2, + n_classes=n_outputs, + n_labels=3, + length=50, + allow_unlabeled=True, + random_state=0, + ) + X = scale(X) + + X_train, X_test = X[:-test_size], X[-test_size:] + y_train = y[:-test_size] + classifier.fit(X_train, y_train) + + response_method_name = "decision_function" + decision_function_method = getattr(classifier, response_method_name, None) + if decision_function_method is None: + raise SkipTest(f"{name} does not have a {response_method_name} method.") + + y_pred = decision_function_method(X_test) + + # y_pred.shape -> y_test.shape with floating dtype + assert isinstance(y_pred, np.ndarray), ( + f"{name}.decision_function is expected to output a NumPy array." + f" Got {type(y_pred)} instead." + ) + assert y_pred.shape == (test_size, n_outputs), ( + f"{name}.decision_function is expected to provide a NumPy array " + f"of shape (n_samples, n_outputs). Got {y_pred.shape} instead of " + f"{(test_size, n_outputs)}." + ) + assert y_pred.dtype.kind == "f", ( + f"{name}.decision_function is expected to output a floating dtype." + f" Got {y_pred.dtype} instead." + ) + + +@ignore_warnings(category=FutureWarning) +def check_get_feature_names_out_error(name, estimator_orig): + """Check the error raised by get_feature_names_out when called before fit. + + Unfitted estimators with get_feature_names_out should raise a NotFittedError. + """ + + estimator = clone(estimator_orig) + err_msg = ( + f"Estimator {name} should have raised a NotFitted error when fit is called" + " before get_feature_names_out" + ) + with raises(NotFittedError, err_msg=err_msg): + estimator.get_feature_names_out() + + +@ignore_warnings(category=FutureWarning) +def check_estimators_fit_returns_self(name, estimator_orig, readonly_memmap=False): + """Check if self is returned when calling fit.""" + X, y = make_blobs(random_state=0, n_samples=21) + X = _enforce_estimator_tags_X(estimator_orig, X) + + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if readonly_memmap: + X, y = create_memmap_backed_data([X, y]) + + set_random_state(estimator) + assert estimator.fit(X, y) is estimator + + +@ignore_warnings +def check_estimators_unfitted(name, estimator_orig): + """Check that predict raises an exception in an unfitted estimator. + + Unfitted estimators should raise a NotFittedError. + """ + # Common test for Regressors, Classifiers and Outlier detection estimators + X, y = _regression_dataset() + + estimator = clone(estimator_orig) + for method in ( + "decision_function", + "predict", + "predict_proba", + "predict_log_proba", + ): + if hasattr(estimator, method): + with raises(NotFittedError): + getattr(estimator, method)(X) + + +@ignore_warnings(category=FutureWarning) +def check_supervised_y_2d(name, estimator_orig): + tags = _safe_tags(estimator_orig) + rnd = np.random.RandomState(0) + n_samples = 30 + X = _enforce_estimator_tags_X(estimator_orig, rnd.uniform(size=(n_samples, 3))) + y = np.arange(n_samples) % 3 + y = _enforce_estimator_tags_y(estimator_orig, y) + estimator = clone(estimator_orig) + set_random_state(estimator) + # fit + estimator.fit(X, y) + y_pred = estimator.predict(X) + + set_random_state(estimator) + # Check that when a 2D y is given, a DataConversionWarning is + # raised + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", DataConversionWarning) + warnings.simplefilter("ignore", RuntimeWarning) + estimator.fit(X, y[:, np.newaxis]) + y_pred_2d = estimator.predict(X) + msg = "expected 1 DataConversionWarning, got: %s" % ", ".join( + [str(w_x) for w_x in w] + ) + if not tags["multioutput"]: + # check that we warned if we don't support multi-output + assert len(w) > 0, msg + assert ( + "DataConversionWarning('A column-vector y" + " was passed when a 1d array was expected" + in msg + ) + assert_allclose(y_pred.ravel(), y_pred_2d.ravel()) + + +@ignore_warnings +def check_classifiers_predictions(X, y, name, classifier_orig): + classes = np.unique(y) + classifier = clone(classifier_orig) + if name == "BernoulliNB": + X = X > X.mean() + set_random_state(classifier) + + classifier.fit(X, y) + y_pred = classifier.predict(X) + + if hasattr(classifier, "decision_function"): + decision = classifier.decision_function(X) + assert isinstance(decision, np.ndarray) + if len(classes) == 2: + dec_pred = (decision.ravel() > 0).astype(int) + dec_exp = classifier.classes_[dec_pred] + assert_array_equal( + dec_exp, + y_pred, + err_msg=( + "decision_function does not match " + "classifier for %r: expected '%s', got '%s'" + ) + % ( + classifier, + ", ".join(map(str, dec_exp)), + ", ".join(map(str, y_pred)), + ), + ) + elif getattr(classifier, "decision_function_shape", "ovr") == "ovr": + decision_y = np.argmax(decision, axis=1).astype(int) + y_exp = classifier.classes_[decision_y] + assert_array_equal( + y_exp, + y_pred, + err_msg=( + "decision_function does not match " + "classifier for %r: expected '%s', got '%s'" + ) + % ( + classifier, + ", ".join(map(str, y_exp)), + ", ".join(map(str, y_pred)), + ), + ) + + # training set performance + if name != "ComplementNB": + # This is a pathological data set for ComplementNB. + # For some specific cases 'ComplementNB' predicts less classes + # than expected + assert_array_equal(np.unique(y), np.unique(y_pred)) + assert_array_equal( + classes, + classifier.classes_, + err_msg="Unexpected classes_ attribute for %r: expected '%s', got '%s'" + % ( + classifier, + ", ".join(map(str, classes)), + ", ".join(map(str, classifier.classes_)), + ), + ) + + +def _choose_check_classifiers_labels(name, y, y_names): + # Semisupervised classifiers use -1 as the indicator for an unlabeled + # sample. + return ( + y + if name in ["LabelPropagation", "LabelSpreading", "SelfTrainingClassifier"] + else y_names + ) + + +def check_classifiers_classes(name, classifier_orig): + X_multiclass, y_multiclass = make_blobs( + n_samples=30, random_state=0, cluster_std=0.1 + ) + X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass, random_state=7) + X_multiclass = StandardScaler().fit_transform(X_multiclass) + + X_binary = X_multiclass[y_multiclass != 2] + y_binary = y_multiclass[y_multiclass != 2] + + X_multiclass = _enforce_estimator_tags_X(classifier_orig, X_multiclass) + X_binary = _enforce_estimator_tags_X(classifier_orig, X_binary) + + labels_multiclass = ["one", "two", "three"] + labels_binary = ["one", "two"] + + y_names_multiclass = np.take(labels_multiclass, y_multiclass) + y_names_binary = np.take(labels_binary, y_binary) + + problems = [(X_binary, y_binary, y_names_binary)] + if not _safe_tags(classifier_orig, key="binary_only"): + problems.append((X_multiclass, y_multiclass, y_names_multiclass)) + + for X, y, y_names in problems: + for y_names_i in [y_names, y_names.astype("O")]: + y_ = _choose_check_classifiers_labels(name, y, y_names_i) + check_classifiers_predictions(X, y_, name, classifier_orig) + + labels_binary = [-1, 1] + y_names_binary = np.take(labels_binary, y_binary) + y_binary = _choose_check_classifiers_labels(name, y_binary, y_names_binary) + check_classifiers_predictions(X_binary, y_binary, name, classifier_orig) + + +@ignore_warnings(category=FutureWarning) +def check_regressors_int(name, regressor_orig): + X, _ = _regression_dataset() + X = _enforce_estimator_tags_X(regressor_orig, X[:50]) + rnd = np.random.RandomState(0) + y = rnd.randint(3, size=X.shape[0]) + y = _enforce_estimator_tags_y(regressor_orig, y) + rnd = np.random.RandomState(0) + # separate estimators to control random seeds + regressor_1 = clone(regressor_orig) + regressor_2 = clone(regressor_orig) + set_random_state(regressor_1) + set_random_state(regressor_2) + + if name in CROSS_DECOMPOSITION: + y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) + y_ = y_.T + else: + y_ = y + + # fit + regressor_1.fit(X, y_) + pred1 = regressor_1.predict(X) + regressor_2.fit(X, y_.astype(float)) + pred2 = regressor_2.predict(X) + assert_allclose(pred1, pred2, atol=1e-2, err_msg=name) + + +@ignore_warnings(category=FutureWarning) +def check_regressors_train( + name, regressor_orig, readonly_memmap=False, X_dtype=np.float64 +): + X, y = _regression_dataset() + X = X.astype(X_dtype) + y = scale(y) # X is already scaled + regressor = clone(regressor_orig) + X = _enforce_estimator_tags_X(regressor, X) + y = _enforce_estimator_tags_y(regressor, y) + if name in CROSS_DECOMPOSITION: + rnd = np.random.RandomState(0) + y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) + y_ = y_.T + else: + y_ = y + + if readonly_memmap: + X, y, y_ = create_memmap_backed_data([X, y, y_]) + + if not hasattr(regressor, "alphas") and hasattr(regressor, "alpha"): + # linear regressors need to set alpha, but not generalized CV ones + regressor.alpha = 0.01 + if name == "PassiveAggressiveRegressor": + regressor.C = 0.01 + + # raises error on malformed input for fit + with raises( + ValueError, + err_msg=( + f"The classifier {name} does not raise an error when " + "incorrect/malformed input data for fit is passed. The number of " + "training examples is not the same as the number of labels. Perhaps " + "use check_X_y in fit." + ), + ): + regressor.fit(X, y[:-1]) + # fit + set_random_state(regressor) + regressor.fit(X, y_) + regressor.fit(X.tolist(), y_.tolist()) + y_pred = regressor.predict(X) + assert y_pred.shape == y_.shape + + # TODO: find out why PLS and CCA fail. RANSAC is random + # and furthermore assumes the presence of outliers, hence + # skipped + if not _safe_tags(regressor, key="poor_score"): + assert regressor.score(X, y_) > 0.5 + + +@ignore_warnings +def check_regressors_no_decision_function(name, regressor_orig): + # check that regressors don't have a decision_function, predict_proba, or + # predict_log_proba method. + rng = np.random.RandomState(0) + regressor = clone(regressor_orig) + + X = rng.normal(size=(10, 4)) + X = _enforce_estimator_tags_X(regressor_orig, X) + y = _enforce_estimator_tags_y(regressor, X[:, 0]) + + regressor.fit(X, y) + funcs = ["decision_function", "predict_proba", "predict_log_proba"] + for func_name in funcs: + assert not hasattr(regressor, func_name) + + +@ignore_warnings(category=FutureWarning) +def check_class_weight_classifiers(name, classifier_orig): + if _safe_tags(classifier_orig, key="binary_only"): + problems = [2] + else: + problems = [2, 3] + + for n_centers in problems: + # create a very noisy dataset + X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.5, random_state=0 + ) + + # can't use gram_if_pairwise() here, setting up gram matrix manually + if _safe_tags(classifier_orig, key="pairwise"): + X_test = rbf_kernel(X_test, X_train) + X_train = rbf_kernel(X_train, X_train) + + n_centers = len(np.unique(y_train)) + + if n_centers == 2: + class_weight = {0: 1000, 1: 0.0001} + else: + class_weight = {0: 1000, 1: 0.0001, 2: 0.0001} + + classifier = clone(classifier_orig).set_params(class_weight=class_weight) + if hasattr(classifier, "n_iter"): + classifier.set_params(n_iter=100) + if hasattr(classifier, "max_iter"): + classifier.set_params(max_iter=1000) + if hasattr(classifier, "min_weight_fraction_leaf"): + classifier.set_params(min_weight_fraction_leaf=0.01) + if hasattr(classifier, "n_iter_no_change"): + classifier.set_params(n_iter_no_change=20) + + set_random_state(classifier) + classifier.fit(X_train, y_train) + y_pred = classifier.predict(X_test) + # XXX: Generally can use 0.89 here. On Windows, LinearSVC gets + # 0.88 (Issue #9111) + if not _safe_tags(classifier_orig, key="poor_score"): + assert np.mean(y_pred == 0) > 0.87 + + +@ignore_warnings(category=FutureWarning) +def check_class_weight_balanced_classifiers( + name, classifier_orig, X_train, y_train, X_test, y_test, weights +): + classifier = clone(classifier_orig) + if hasattr(classifier, "n_iter"): + classifier.set_params(n_iter=100) + if hasattr(classifier, "max_iter"): + classifier.set_params(max_iter=1000) + + set_random_state(classifier) + classifier.fit(X_train, y_train) + y_pred = classifier.predict(X_test) + + classifier.set_params(class_weight="balanced") + classifier.fit(X_train, y_train) + y_pred_balanced = classifier.predict(X_test) + assert f1_score(y_test, y_pred_balanced, average="weighted") > f1_score( + y_test, y_pred, average="weighted" + ) + + +@ignore_warnings(category=FutureWarning) +def check_class_weight_balanced_linear_classifier(name, Classifier): + """Test class weights with non-contiguous class labels.""" + # this is run on classes, not instances, though this should be changed + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = np.array([1, 1, 1, -1, -1]) + + classifier = Classifier() + + if hasattr(classifier, "n_iter"): + # This is a very small dataset, default n_iter are likely to prevent + # convergence + classifier.set_params(n_iter=1000) + if hasattr(classifier, "max_iter"): + classifier.set_params(max_iter=1000) + if hasattr(classifier, "cv"): + classifier.set_params(cv=3) + set_random_state(classifier) + + # Let the model compute the class frequencies + classifier.set_params(class_weight="balanced") + coef_balanced = classifier.fit(X, y).coef_.copy() + + # Count each label occurrence to reweight manually + n_samples = len(y) + n_classes = float(len(np.unique(y))) + + class_weight = { + 1: n_samples / (np.sum(y == 1) * n_classes), + -1: n_samples / (np.sum(y == -1) * n_classes), + } + classifier.set_params(class_weight=class_weight) + coef_manual = classifier.fit(X, y).coef_.copy() + + assert_allclose( + coef_balanced, + coef_manual, + err_msg="Classifier %s is not computing class_weight=balanced properly." % name, + ) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_overwrite_params(name, estimator_orig): + X, y = make_blobs(random_state=0, n_samples=21) + X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + set_random_state(estimator) + + # Make a physical copy of the original estimator parameters before fitting. + params = estimator.get_params() + original_params = deepcopy(params) + + # Fit the model + estimator.fit(X, y) + + # Compare the state of the model parameters with the original parameters + new_params = estimator.get_params() + for param_name, original_value in original_params.items(): + new_value = new_params[param_name] + + # We should never change or mutate the internal state of input + # parameters by default. To check this we use the joblib.hash function + # that introspects recursively any subobjects to compute a checksum. + # The only exception to this rule of immutable constructor parameters + # is possible RandomState instance but in this check we explicitly + # fixed the random_state params recursively to be integer seeds. + assert joblib.hash(new_value) == joblib.hash(original_value), ( + "Estimator %s should not change or mutate " + " the parameter %s from %s to %s during fit." + % (name, param_name, original_value, new_value) + ) + + +@ignore_warnings(category=FutureWarning) +def check_no_attributes_set_in_init(name, estimator_orig): + """Check setting during init.""" + try: + # Clone fails if the estimator does not store + # all parameters as an attribute during init + estimator = clone(estimator_orig) + except AttributeError: + raise AttributeError( + f"Estimator {name} should store all parameters as an attribute during init." + ) + + if hasattr(type(estimator).__init__, "deprecated_original"): + return + + init_params = _get_args(type(estimator).__init__) + if IS_PYPY: + # __init__ signature has additional objects in PyPy + for key in ["obj"]: + if key in init_params: + init_params.remove(key) + parents_init_params = [ + param + for params_parent in (_get_args(parent) for parent in type(estimator).__mro__) + for param in params_parent + ] + + # Test for no setting apart from parameters during init + invalid_attr = set(vars(estimator)) - set(init_params) - set(parents_init_params) + # Ignore private attributes + invalid_attr = set([attr for attr in invalid_attr if not attr.startswith("_")]) + assert not invalid_attr, ( + "Estimator %s should not set any attribute apart" + " from parameters during init. Found attributes %s." + % (name, sorted(invalid_attr)) + ) + + +@ignore_warnings(category=FutureWarning) +def check_sparsify_coefficients(name, estimator_orig): + X = np.array( + [ + [-2, -1], + [-1, -1], + [-1, -2], + [1, 1], + [1, 2], + [2, 1], + [-1, -2], + [2, 2], + [-2, -2], + ] + ) + y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + y = _enforce_estimator_tags_y(estimator_orig, y) + est = clone(estimator_orig) + + est.fit(X, y) + pred_orig = est.predict(X) + + # test sparsify with dense inputs + est.sparsify() + assert sparse.issparse(est.coef_) + pred = est.predict(X) + assert_array_equal(pred, pred_orig) + + # pickle and unpickle with sparse coef_ + est = pickle.loads(pickle.dumps(est)) + assert sparse.issparse(est.coef_) + pred = est.predict(X) + assert_array_equal(pred, pred_orig) + + +@ignore_warnings(category=FutureWarning) +def check_classifier_data_not_an_array(name, estimator_orig): + X = np.array( + [ + [3, 0], + [0, 1], + [0, 2], + [1, 1], + [1, 2], + [2, 1], + [0, 3], + [1, 0], + [2, 0], + [4, 4], + [2, 3], + [3, 2], + ] + ) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2]) + y = _enforce_estimator_tags_y(estimator_orig, y) + for obj_type in ["NotAnArray", "PandasDataframe"]: + check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type) + + +@ignore_warnings(category=FutureWarning) +def check_regressor_data_not_an_array(name, estimator_orig): + X, y = _regression_dataset() + X = _enforce_estimator_tags_X(estimator_orig, X) + y = _enforce_estimator_tags_y(estimator_orig, y) + for obj_type in ["NotAnArray", "PandasDataframe"]: + check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type): + if name in CROSS_DECOMPOSITION: + raise SkipTest( + "Skipping check_estimators_data_not_an_array " + "for cross decomposition module as estimators " + "are not deterministic." + ) + # separate estimators to control random seeds + estimator_1 = clone(estimator_orig) + estimator_2 = clone(estimator_orig) + set_random_state(estimator_1) + set_random_state(estimator_2) + + if obj_type not in ["NotAnArray", "PandasDataframe"]: + raise ValueError("Data type {0} not supported".format(obj_type)) + + if obj_type == "NotAnArray": + y_ = _NotAnArray(np.asarray(y)) + X_ = _NotAnArray(np.asarray(X)) + else: + # Here pandas objects (Series and DataFrame) are tested explicitly + # because some estimators may handle them (especially their indexing) + # specially. + try: + import pandas as pd + + y_ = np.asarray(y) + if y_.ndim == 1: + y_ = pd.Series(y_, copy=False) + else: + y_ = pd.DataFrame(y_, copy=False) + X_ = pd.DataFrame(np.asarray(X), copy=False) + + except ImportError: + raise SkipTest( + "pandas is not installed: not checking estimators for pandas objects." + ) + + # fit + estimator_1.fit(X_, y_) + pred1 = estimator_1.predict(X_) + estimator_2.fit(X, y) + pred2 = estimator_2.predict(X) + assert_allclose(pred1, pred2, atol=1e-2, err_msg=name) + + +def check_parameters_default_constructible(name, Estimator): + # test default-constructibility + # get rid of deprecation warnings + + Estimator = Estimator.__class__ + + with ignore_warnings(category=FutureWarning): + estimator = _construct_instance(Estimator) + # test cloning + clone(estimator) + # test __repr__ + repr(estimator) + # test that set_params returns self + assert estimator.set_params() is estimator + + # test if init does nothing but set parameters + # this is important for grid_search etc. + # We get the default parameters from init and then + # compare these against the actual values of the attributes. + + # this comes from getattr. Gets rid of deprecation decorator. + init = getattr(estimator.__init__, "deprecated_original", estimator.__init__) + + try: + + def param_filter(p): + """Identify hyper parameters of an estimator.""" + return ( + p.name != "self" + and p.kind != p.VAR_KEYWORD + and p.kind != p.VAR_POSITIONAL + ) + + init_params = [ + p for p in signature(init).parameters.values() if param_filter(p) + ] + + except (TypeError, ValueError): + # init is not a python function. + # true for mixins + return + params = estimator.get_params() + # they can need a non-default argument + init_params = init_params[len(getattr(estimator, "_required_parameters", [])) :] + + for init_param in init_params: + assert ( + init_param.default != init_param.empty + ), "parameter %s for %s has no default value" % ( + init_param.name, + type(estimator).__name__, + ) + allowed_types = { + str, + int, + float, + bool, + tuple, + type(None), + type, + } + # Any numpy numeric such as np.int32. + allowed_types.update(np.sctypeDict.values()) + + allowed_value = ( + type(init_param.default) in allowed_types + or + # Although callables are mutable, we accept them as argument + # default value and trust that neither the implementation of + # the callable nor of the estimator changes the state of the + # callable. + callable(init_param.default) + ) + + assert allowed_value, ( + f"Parameter '{init_param.name}' of estimator " + f"'{Estimator.__name__}' is of type " + f"{type(init_param.default).__name__} which is not allowed. " + f"'{init_param.name}' must be a callable or must be of type " + f"{set(type.__name__ for type in allowed_types)}." + ) + if init_param.name not in params.keys(): + # deprecated parameter, not in get_params + assert init_param.default is None, ( + f"Estimator parameter '{init_param.name}' of estimator " + f"'{Estimator.__name__}' is not returned by get_params. " + "If it is deprecated, set its default value to None." + ) + continue + + param_value = params[init_param.name] + if isinstance(param_value, np.ndarray): + assert_array_equal(param_value, init_param.default) + else: + failure_text = ( + f"Parameter {init_param.name} was mutated on init. All " + "parameters must be stored unchanged." + ) + if is_scalar_nan(param_value): + # Allows to set default parameters to np.nan + assert param_value is init_param.default, failure_text + else: + assert param_value == init_param.default, failure_text + + +def _enforce_estimator_tags_y(estimator, y): + # Estimators with a `requires_positive_y` tag only accept strictly positive + # data + if _safe_tags(estimator, key="requires_positive_y"): + # Create strictly positive y. The minimal increment above 0 is 1, as + # y could be of integer dtype. + y += 1 + abs(y.min()) + if _safe_tags(estimator, key="binary_only") and y.size > 0: + y = np.where(y == y.flat[0], y, y.flat[0] + 1) + # Estimators in mono_output_task_error raise ValueError if y is of 1-D + # Convert into a 2-D y for those estimators. + if _safe_tags(estimator, key="multioutput_only"): + return np.reshape(y, (-1, 1)) + return y + + +def _enforce_estimator_tags_X(estimator, X, kernel=linear_kernel): + # Estimators with `1darray` in `X_types` tag only accept + # X of shape (`n_samples`,) + if "1darray" in _safe_tags(estimator, key="X_types"): + X = X[:, 0] + # Estimators with a `requires_positive_X` tag only accept + # strictly positive data + if _safe_tags(estimator, key="requires_positive_X"): + X = X - X.min() + if "categorical" in _safe_tags(estimator, key="X_types"): + dtype = np.float64 if _safe_tags(estimator, key="allow_nan") else np.int32 + X = np.round((X - X.min())).astype(dtype) + + if estimator.__class__.__name__ == "SkewedChi2Sampler": + # SkewedChi2Sampler requires X > -skewdness in transform + X = X - X.min() + + # Pairwise estimators only accept + # X of shape (`n_samples`, `n_samples`) + if _is_pairwise_metric(estimator): + X = pairwise_distances(X, metric="euclidean") + elif _safe_tags(estimator, key="pairwise"): + X = kernel(X, X) + return X + + +@ignore_warnings(category=FutureWarning) +def check_non_transformer_estimators_n_iter(name, estimator_orig): + # Test that estimators that are not transformers with a parameter + # max_iter, return the attribute of n_iter_ at least 1. + + # These models are dependent on external solvers like + # libsvm and accessing the iter parameter is non-trivial. + # SelfTrainingClassifier does not perform an iteration if all samples are + # labeled, hence n_iter_ = 0 is valid. + not_run_check_n_iter = [ + "Ridge", + "RidgeClassifier", + "RandomizedLasso", + "LogisticRegressionCV", + "LinearSVC", + "LogisticRegression", + "SelfTrainingClassifier", + ] + + # Tested in test_transformer_n_iter + not_run_check_n_iter += CROSS_DECOMPOSITION + if name in not_run_check_n_iter: + return + + # LassoLars stops early for the default alpha=1.0 the iris dataset. + if name == "LassoLars": + estimator = clone(estimator_orig).set_params(alpha=0.0) + else: + estimator = clone(estimator_orig) + if hasattr(estimator, "max_iter"): + iris = load_iris() + X, y_ = iris.data, iris.target + y_ = _enforce_estimator_tags_y(estimator, y_) + + set_random_state(estimator, 0) + + X = _enforce_estimator_tags_X(estimator_orig, X) + + estimator.fit(X, y_) + + assert np.all(estimator.n_iter_ >= 1) + + +@ignore_warnings(category=FutureWarning) +def check_transformer_n_iter(name, estimator_orig): + # Test that transformers with a parameter max_iter, return the + # attribute of n_iter_ at least 1. + estimator = clone(estimator_orig) + if hasattr(estimator, "max_iter"): + if name in CROSS_DECOMPOSITION: + # Check using default data + X = [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [2.0, 5.0, 4.0]] + y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]] + + else: + X, y_ = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = _enforce_estimator_tags_X(estimator_orig, X) + set_random_state(estimator, 0) + estimator.fit(X, y_) + + # These return a n_iter per component. + if name in CROSS_DECOMPOSITION: + for iter_ in estimator.n_iter_: + assert iter_ >= 1 + else: + assert estimator.n_iter_ >= 1 + + +@ignore_warnings(category=FutureWarning) +def check_get_params_invariance(name, estimator_orig): + # Checks if get_params(deep=False) is a subset of get_params(deep=True) + e = clone(estimator_orig) + + shallow_params = e.get_params(deep=False) + deep_params = e.get_params(deep=True) + + assert all(item in deep_params.items() for item in shallow_params.items()) + + +@ignore_warnings(category=FutureWarning) +def check_set_params(name, estimator_orig): + # Check that get_params() returns the same thing + # before and after set_params() with some fuzz + estimator = clone(estimator_orig) + + orig_params = estimator.get_params(deep=False) + msg = "get_params result does not match what was passed to set_params" + + estimator.set_params(**orig_params) + curr_params = estimator.get_params(deep=False) + assert set(orig_params.keys()) == set(curr_params.keys()), msg + for k, v in curr_params.items(): + assert orig_params[k] is v, msg + + # some fuzz values + test_values = [-np.inf, np.inf, None] + + test_params = deepcopy(orig_params) + for param_name in orig_params.keys(): + default_value = orig_params[param_name] + for value in test_values: + test_params[param_name] = value + try: + estimator.set_params(**test_params) + except (TypeError, ValueError) as e: + e_type = e.__class__.__name__ + # Exception occurred, possibly parameter validation + warnings.warn( + "{0} occurred during set_params of param {1} on " + "{2}. It is recommended to delay parameter " + "validation until fit.".format(e_type, param_name, name) + ) + + change_warning_msg = ( + "Estimator's parameters changed after set_params raised {}".format( + e_type + ) + ) + params_before_exception = curr_params + curr_params = estimator.get_params(deep=False) + try: + assert set(params_before_exception.keys()) == set( + curr_params.keys() + ) + for k, v in curr_params.items(): + assert params_before_exception[k] is v + except AssertionError: + warnings.warn(change_warning_msg) + else: + curr_params = estimator.get_params(deep=False) + assert set(test_params.keys()) == set(curr_params.keys()), msg + for k, v in curr_params.items(): + assert test_params[k] is v, msg + test_params[param_name] = default_value + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_regression_target(name, estimator_orig): + # Check if classifier throws an exception when fed regression targets + + X, y = _regression_dataset() + + X = _enforce_estimator_tags_X(estimator_orig, X) + e = clone(estimator_orig) + msg = "Unknown label type: " + if not _safe_tags(e, key="no_validation"): + with raises(ValueError, match=msg): + e.fit(X, y) + + +@ignore_warnings(category=FutureWarning) +def check_decision_proba_consistency(name, estimator_orig): + # Check whether an estimator having both decision_function and + # predict_proba methods has outputs with perfect rank correlation. + + centers = [(2, 2), (4, 4)] + X, y = make_blobs( + n_samples=100, + random_state=0, + n_features=4, + centers=centers, + cluster_std=1.0, + shuffle=True, + ) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=0 + ) + estimator = clone(estimator_orig) + + if hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba"): + estimator.fit(X_train, y_train) + # Since the link function from decision_function() to predict_proba() + # is sometimes not precise enough (typically expit), we round to the + # 10th decimal to avoid numerical issues: we compare the rank + # with deterministic ties rather than get platform specific rank + # inversions in case of machine level differences. + a = estimator.predict_proba(X_test)[:, 1].round(decimals=10) + b = estimator.decision_function(X_test).round(decimals=10) + + rank_proba, rank_score = rankdata(a), rankdata(b) + try: + assert_array_almost_equal(rank_proba, rank_score) + except AssertionError: + # Sometimes, the rounding applied on the probabilities will have + # ties that are not present in the scores because it is + # numerically more precise. In this case, we relax the test by + # grouping the decision function scores based on the probability + # rank and check that the score is monotonically increasing. + grouped_y_score = np.array( + [b[rank_proba == group].mean() for group in np.unique(rank_proba)] + ) + sorted_idx = np.argsort(grouped_y_score) + assert_array_equal(sorted_idx, np.arange(len(sorted_idx))) + + +def check_outliers_fit_predict(name, estimator_orig): + # Check fit_predict for outlier detectors. + + n_samples = 300 + X, _ = make_blobs(n_samples=n_samples, random_state=0) + X = shuffle(X, random_state=7) + n_samples, n_features = X.shape + estimator = clone(estimator_orig) + + set_random_state(estimator) + + y_pred = estimator.fit_predict(X) + assert y_pred.shape == (n_samples,) + assert y_pred.dtype.kind == "i" + assert_array_equal(np.unique(y_pred), np.array([-1, 1])) + + # check fit_predict = fit.predict when the estimator has both a predict and + # a fit_predict method. recall that it is already assumed here that the + # estimator has a fit_predict method + if hasattr(estimator, "predict"): + y_pred_2 = estimator.fit(X).predict(X) + assert_array_equal(y_pred, y_pred_2) + + if hasattr(estimator, "contamination"): + # proportion of outliers equal to contamination parameter when not + # set to 'auto' + expected_outliers = 30 + contamination = float(expected_outliers) / n_samples + estimator.set_params(contamination=contamination) + y_pred = estimator.fit_predict(X) + + num_outliers = np.sum(y_pred != 1) + # num_outliers should be equal to expected_outliers unless + # there are ties in the decision_function values. this can + # only be tested for estimators with a decision_function + # method + if num_outliers != expected_outliers and hasattr( + estimator, "decision_function" + ): + decision = estimator.decision_function(X) + check_outlier_corruption(num_outliers, expected_outliers, decision) + + +def check_fit_non_negative(name, estimator_orig): + # Check that proper warning is raised for non-negative X + # when tag requires_positive_X is present + X = np.array([[-1.0, 1], [-1.0, 1]]) + y = np.array([1, 2]) + estimator = clone(estimator_orig) + with raises(ValueError): + estimator.fit(X, y) + + +def check_fit_idempotent(name, estimator_orig): + # Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would + # check that the estimated parameters during training (e.g. coefs_) are + # the same, but having a universal comparison function for those + # attributes is difficult and full of edge cases. So instead we check that + # predict(), predict_proba(), decision_function() and transform() return + # the same results. + + check_methods = ["predict", "transform", "decision_function", "predict_proba"] + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + if "warm_start" in estimator.get_params().keys(): + estimator.set_params(warm_start=False) + + n_samples = 100 + X = rng.normal(loc=100, size=(n_samples, 2)) + X = _enforce_estimator_tags_X(estimator, X) + if is_regressor(estimator_orig): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + train, test = next(ShuffleSplit(test_size=0.2, random_state=rng).split(X)) + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + + # Fit for the first time + estimator.fit(X_train, y_train) + + result = { + method: getattr(estimator, method)(X_test) + for method in check_methods + if hasattr(estimator, method) + } + + # Fit again + set_random_state(estimator) + estimator.fit(X_train, y_train) + + for method in check_methods: + if hasattr(estimator, method): + new_result = getattr(estimator, method)(X_test) + if np.issubdtype(new_result.dtype, np.floating): + tol = 2 * np.finfo(new_result.dtype).eps + else: + tol = 2 * np.finfo(np.float64).eps + assert_allclose_dense_sparse( + result[method], + new_result, + atol=max(tol, 1e-9), + rtol=max(tol, 1e-7), + err_msg="Idempotency check failed for method {}".format(method), + ) + + +def check_fit_check_is_fitted(name, estimator_orig): + # Make sure that estimator doesn't pass check_is_fitted before calling fit + # and that passes check_is_fitted once it's fit. + + rng = np.random.RandomState(42) + + estimator = clone(estimator_orig) + set_random_state(estimator) + if "warm_start" in estimator.get_params(): + estimator.set_params(warm_start=False) + + n_samples = 100 + X = rng.normal(loc=100, size=(n_samples, 2)) + X = _enforce_estimator_tags_X(estimator, X) + if is_regressor(estimator_orig): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + if not _safe_tags(estimator).get("stateless", False): + # stateless estimators (such as FunctionTransformer) are always "fit"! + try: + check_is_fitted(estimator) + raise AssertionError( + f"{estimator.__class__.__name__} passes check_is_fitted before being" + " fit!" + ) + except NotFittedError: + pass + estimator.fit(X, y) + try: + check_is_fitted(estimator) + except NotFittedError as e: + raise NotFittedError( + "Estimator fails to pass `check_is_fitted` even though it has been fit." + ) from e + + +def check_n_features_in(name, estimator_orig): + # Make sure that n_features_in_ attribute doesn't exist until fit is + # called, and that its value is correct. + + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + if "warm_start" in estimator.get_params(): + estimator.set_params(warm_start=False) + + n_samples = 100 + X = rng.normal(loc=100, size=(n_samples, 2)) + X = _enforce_estimator_tags_X(estimator, X) + if is_regressor(estimator_orig): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + assert not hasattr(estimator, "n_features_in_") + estimator.fit(X, y) + assert hasattr(estimator, "n_features_in_") + assert estimator.n_features_in_ == X.shape[1] + + +def check_requires_y_none(name, estimator_orig): + # Make sure that an estimator with requires_y=True fails gracefully when + # given y=None + + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + + n_samples = 100 + X = rng.normal(loc=100, size=(n_samples, 2)) + X = _enforce_estimator_tags_X(estimator, X) + + expected_err_msgs = ( + "requires y to be passed, but the target y is None", + "Expected array-like (array or non-string sequence), got None", + "y should be a 1d array", + ) + + try: + estimator.fit(X, None) + except ValueError as ve: + if not any(msg in str(ve) for msg in expected_err_msgs): + raise ve + + +@ignore_warnings(category=FutureWarning) +def check_n_features_in_after_fitting(name, estimator_orig): + # Make sure that n_features_in are checked after fitting + tags = _safe_tags(estimator_orig) + + is_supported_X_types = ( + "2darray" in tags["X_types"] or "categorical" in tags["X_types"] + ) + + if not is_supported_X_types or tags["no_validation"]: + return + + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + if "warm_start" in estimator.get_params(): + estimator.set_params(warm_start=False) + + n_samples = 150 + X = rng.normal(size=(n_samples, 8)) + X = _enforce_estimator_tags_X(estimator, X) + + if is_regressor(estimator): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + estimator.fit(X, y) + assert estimator.n_features_in_ == X.shape[1] + + # check methods will check n_features_in_ + check_methods = [ + "predict", + "transform", + "decision_function", + "predict_proba", + "score", + ] + X_bad = X[:, [1]] + + msg = f"X has 1 features, but \\w+ is expecting {X.shape[1]} features as input" + for method in check_methods: + if not hasattr(estimator, method): + continue + + callable_method = getattr(estimator, method) + if method == "score": + callable_method = partial(callable_method, y=y) + + with raises(ValueError, match=msg): + callable_method(X_bad) + + # partial_fit will check in the second call + if not hasattr(estimator, "partial_fit"): + return + + estimator = clone(estimator_orig) + if is_classifier(estimator): + estimator.partial_fit(X, y, classes=np.unique(y)) + else: + estimator.partial_fit(X, y) + assert estimator.n_features_in_ == X.shape[1] + + with raises(ValueError, match=msg): + estimator.partial_fit(X_bad, y) + + +def check_estimator_get_tags_default_keys(name, estimator_orig): + # check that if _get_tags is implemented, it contains all keys from + # _DEFAULT_KEYS + estimator = clone(estimator_orig) + if not hasattr(estimator, "_get_tags"): + return + + tags_keys = set(estimator._get_tags().keys()) + default_tags_keys = set(_DEFAULT_TAGS.keys()) + assert tags_keys.intersection(default_tags_keys) == default_tags_keys, ( + f"{name}._get_tags() is missing entries for the following default tags" + f": {default_tags_keys - tags_keys.intersection(default_tags_keys)}" + ) + + +def check_dataframe_column_names_consistency(name, estimator_orig): + try: + import pandas as pd + except ImportError: + raise SkipTest( + "pandas is not installed: not checking column name consistency for pandas" + ) + + tags = _safe_tags(estimator_orig) + is_supported_X_types = ( + "2darray" in tags["X_types"] or "categorical" in tags["X_types"] + ) + + if not is_supported_X_types or tags["no_validation"]: + return + + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + + X_orig = rng.normal(size=(150, 8)) + + X_orig = _enforce_estimator_tags_X(estimator, X_orig) + n_samples, n_features = X_orig.shape + + names = np.array([f"col_{i}" for i in range(n_features)]) + X = pd.DataFrame(X_orig, columns=names, copy=False) + + if is_regressor(estimator): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + # Check that calling `fit` does not raise any warnings about feature names. + with warnings.catch_warnings(): + warnings.filterwarnings( + "error", + message="X does not have valid feature names", + category=UserWarning, + module="sklearn", + ) + estimator.fit(X, y) + + if not hasattr(estimator, "feature_names_in_"): + raise ValueError( + "Estimator does not have a feature_names_in_ " + "attribute after fitting with a dataframe" + ) + assert isinstance(estimator.feature_names_in_, np.ndarray) + assert estimator.feature_names_in_.dtype == object + assert_array_equal(estimator.feature_names_in_, names) + + # Only check sklearn estimators for feature_names_in_ in docstring + module_name = estimator_orig.__module__ + if ( + module_name.startswith("sklearn.") + and not ("test_" in module_name or module_name.endswith("_testing")) + and ("feature_names_in_" not in (estimator_orig.__doc__)) + ): + raise ValueError( + f"Estimator {name} does not document its feature_names_in_ attribute" + ) + + check_methods = [] + for method in ( + "predict", + "transform", + "decision_function", + "predict_proba", + "score", + "score_samples", + "predict_log_proba", + ): + if not hasattr(estimator, method): + continue + + callable_method = getattr(estimator, method) + if method == "score": + callable_method = partial(callable_method, y=y) + check_methods.append((method, callable_method)) + + for _, method in check_methods: + with warnings.catch_warnings(): + warnings.filterwarnings( + "error", + message="X does not have valid feature names", + category=UserWarning, + module="sklearn", + ) + method(X) # works without UserWarning for valid features + + invalid_names = [ + (names[::-1], "Feature names must be in the same order as they were in fit."), + ( + [f"another_prefix_{i}" for i in range(n_features)], + ( + "Feature names unseen at fit time:\n- another_prefix_0\n-" + " another_prefix_1\n" + ), + ), + ( + names[:3], + f"Feature names seen at fit time, yet now missing:\n- {min(names[3:])}\n", + ), + ] + params = { + key: value + for key, value in estimator.get_params().items() + if "early_stopping" in key + } + early_stopping_enabled = any(value is True for value in params.values()) + + for invalid_name, additional_message in invalid_names: + X_bad = pd.DataFrame(X, columns=invalid_name, copy=False) + + expected_msg = re.escape( + "The feature names should match those that were passed during fit.\n" + f"{additional_message}" + ) + for name, method in check_methods: + with raises( + ValueError, match=expected_msg, err_msg=f"{name} did not raise" + ): + method(X_bad) + + # partial_fit checks on second call + # Do not call partial fit if early_stopping is on + if not hasattr(estimator, "partial_fit") or early_stopping_enabled: + continue + + estimator = clone(estimator_orig) + if is_classifier(estimator): + classes = np.unique(y) + estimator.partial_fit(X, y, classes=classes) + else: + estimator.partial_fit(X, y) + + with raises(ValueError, match=expected_msg): + estimator.partial_fit(X_bad, y) + + +def check_transformer_get_feature_names_out(name, transformer_orig): + tags = transformer_orig._get_tags() + if "2darray" not in tags["X_types"] or tags["no_validation"]: + return + + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + + transformer = clone(transformer_orig) + X = _enforce_estimator_tags_X(transformer, X) + + n_features = X.shape[1] + set_random_state(transformer) + + y_ = y + if name in CROSS_DECOMPOSITION: + y_ = np.c_[np.asarray(y), np.asarray(y)] + y_[::2, 1] *= 2 + + X_transform = transformer.fit_transform(X, y=y_) + input_features = [f"feature{i}" for i in range(n_features)] + + # input_features names is not the same length as n_features_in_ + with raises(ValueError, match="input_features should have length equal"): + transformer.get_feature_names_out(input_features[::2]) + + feature_names_out = transformer.get_feature_names_out(input_features) + assert feature_names_out is not None + assert isinstance(feature_names_out, np.ndarray) + assert feature_names_out.dtype == object + assert all(isinstance(name, str) for name in feature_names_out) + + if isinstance(X_transform, tuple): + n_features_out = X_transform[0].shape[1] + else: + n_features_out = X_transform.shape[1] + + assert ( + len(feature_names_out) == n_features_out + ), f"Expected {n_features_out} feature names, got {len(feature_names_out)}" + + +def check_transformer_get_feature_names_out_pandas(name, transformer_orig): + try: + import pandas as pd + except ImportError: + raise SkipTest( + "pandas is not installed: not checking column name consistency for pandas" + ) + + tags = transformer_orig._get_tags() + if "2darray" not in tags["X_types"] or tags["no_validation"]: + return + + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + + transformer = clone(transformer_orig) + X = _enforce_estimator_tags_X(transformer, X) + + n_features = X.shape[1] + set_random_state(transformer) + + y_ = y + if name in CROSS_DECOMPOSITION: + y_ = np.c_[np.asarray(y), np.asarray(y)] + y_[::2, 1] *= 2 + + feature_names_in = [f"col{i}" for i in range(n_features)] + df = pd.DataFrame(X, columns=feature_names_in, copy=False) + X_transform = transformer.fit_transform(df, y=y_) + + # error is raised when `input_features` do not match feature_names_in + invalid_feature_names = [f"bad{i}" for i in range(n_features)] + with raises(ValueError, match="input_features is not equal to feature_names_in_"): + transformer.get_feature_names_out(invalid_feature_names) + + feature_names_out_default = transformer.get_feature_names_out() + feature_names_in_explicit_names = transformer.get_feature_names_out( + feature_names_in + ) + assert_array_equal(feature_names_out_default, feature_names_in_explicit_names) + + if isinstance(X_transform, tuple): + n_features_out = X_transform[0].shape[1] + else: + n_features_out = X_transform.shape[1] + + assert ( + len(feature_names_out_default) == n_features_out + ), f"Expected {n_features_out} feature names, got {len(feature_names_out_default)}" + + +def check_param_validation(name, estimator_orig): + # Check that an informative error is raised when the value of a constructor + # parameter does not have an appropriate type or value. + rng = np.random.RandomState(0) + X = rng.uniform(size=(20, 5)) + y = rng.randint(0, 2, size=20) + y = _enforce_estimator_tags_y(estimator_orig, y) + + estimator_params = estimator_orig.get_params(deep=False).keys() + + # check that there is a constraint for each parameter + if estimator_params: + validation_params = estimator_orig._parameter_constraints.keys() + unexpected_params = set(validation_params) - set(estimator_params) + missing_params = set(estimator_params) - set(validation_params) + err_msg = ( + f"Mismatch between _parameter_constraints and the parameters of {name}." + f"\nConsider the unexpected parameters {unexpected_params} and expected but" + f" missing parameters {missing_params}" + ) + assert validation_params == estimator_params, err_msg + + # this object does not have a valid type for sure for all params + param_with_bad_type = type("BadType", (), {})() + + fit_methods = ["fit", "partial_fit", "fit_transform", "fit_predict"] + + for param_name in estimator_params: + constraints = estimator_orig._parameter_constraints[param_name] + + if constraints == "no_validation": + # This parameter is not validated + continue + + # Mixing an interval of reals and an interval of integers must be avoided. + if any( + isinstance(constraint, Interval) and constraint.type == Integral + for constraint in constraints + ) and any( + isinstance(constraint, Interval) and constraint.type == Real + for constraint in constraints + ): + raise ValueError( + f"The constraint for parameter {param_name} of {name} can't have a mix" + " of intervals of Integral and Real types. Use the type RealNotInt" + " instead of Real." + ) + + match = rf"The '{param_name}' parameter of {name} must be .* Got .* instead." + err_msg = ( + f"{name} does not raise an informative error message when the " + f"parameter {param_name} does not have a valid type or value." + ) + + estimator = clone(estimator_orig) + + # First, check that the error is raised if param doesn't match any valid type. + estimator.set_params(**{param_name: param_with_bad_type}) + + for method in fit_methods: + if not hasattr(estimator, method): + # the method is not accessible with the current set of parameters + continue + + err_msg = ( + f"{name} does not raise an informative error message when the parameter" + f" {param_name} does not have a valid type. If any Python type is" + " valid, the constraint should be 'no_validation'." + ) + + with raises(InvalidParameterError, match=match, err_msg=err_msg): + if any( + isinstance(X_type, str) and X_type.endswith("labels") + for X_type in _safe_tags(estimator, key="X_types") + ): + # The estimator is a label transformer and take only `y` + getattr(estimator, method)(y) + else: + getattr(estimator, method)(X, y) + + # Then, for constraints that are more than a type constraint, check that the + # error is raised if param does match a valid type but does not match any valid + # value for this type. + constraints = [make_constraint(constraint) for constraint in constraints] + + for constraint in constraints: + try: + bad_value = generate_invalid_param_val(constraint) + except NotImplementedError: + continue + + estimator.set_params(**{param_name: bad_value}) + + for method in fit_methods: + if not hasattr(estimator, method): + # the method is not accessible with the current set of parameters + continue + + err_msg = ( + f"{name} does not raise an informative error message when the " + f"parameter {param_name} does not have a valid value.\n" + "Constraints should be disjoint. For instance " + "[StrOptions({'a_string'}), str] is not a acceptable set of " + "constraint because generating an invalid string for the first " + "constraint will always produce a valid string for the second " + "constraint." + ) + + with raises(InvalidParameterError, match=match, err_msg=err_msg): + if any( + X_type.endswith("labels") + for X_type in _safe_tags(estimator, key="X_types") + ): + # The estimator is a label transformer and take only `y` + getattr(estimator, method)(y) + else: + getattr(estimator, method)(X, y) + + +def check_set_output_transform(name, transformer_orig): + # Check transformer.set_output with the default configuration does not + # change the transform output. + tags = transformer_orig._get_tags() + if "2darray" not in tags["X_types"] or tags["no_validation"]: + return + + rng = np.random.RandomState(0) + transformer = clone(transformer_orig) + + X = rng.uniform(size=(20, 5)) + X = _enforce_estimator_tags_X(transformer_orig, X) + y = rng.randint(0, 2, size=20) + y = _enforce_estimator_tags_y(transformer_orig, y) + set_random_state(transformer) + + def fit_then_transform(est): + if name in CROSS_DECOMPOSITION: + return est.fit(X, y).transform(X, y) + return est.fit(X, y).transform(X) + + def fit_transform(est): + return est.fit_transform(X, y) + + transform_methods = { + "transform": fit_then_transform, + "fit_transform": fit_transform, + } + for name, transform_method in transform_methods.items(): + transformer = clone(transformer) + if not hasattr(transformer, name): + continue + X_trans_no_setting = transform_method(transformer) + + # Auto wrapping only wraps the first array + if name in CROSS_DECOMPOSITION: + X_trans_no_setting = X_trans_no_setting[0] + + transformer.set_output(transform="default") + X_trans_default = transform_method(transformer) + + if name in CROSS_DECOMPOSITION: + X_trans_default = X_trans_default[0] + + # Default and no setting -> returns the same transformation + assert_allclose_dense_sparse(X_trans_no_setting, X_trans_default) + + +def _output_from_fit_transform(transformer, name, X, df, y): + """Generate output to test `set_output` for different configuration: + + - calling either `fit.transform` or `fit_transform`; + - passing either a dataframe or a numpy array to fit; + - passing either a dataframe or a numpy array to transform. + """ + outputs = {} + + # fit then transform case: + cases = [ + ("fit.transform/df/df", df, df), + ("fit.transform/df/array", df, X), + ("fit.transform/array/df", X, df), + ("fit.transform/array/array", X, X), + ] + if all(hasattr(transformer, meth) for meth in ["fit", "transform"]): + for ( + case, + data_fit, + data_transform, + ) in cases: + transformer.fit(data_fit, y) + if name in CROSS_DECOMPOSITION: + X_trans, _ = transformer.transform(data_transform, y) + else: + X_trans = transformer.transform(data_transform) + outputs[case] = (X_trans, transformer.get_feature_names_out()) + + # fit_transform case: + cases = [ + ("fit_transform/df", df), + ("fit_transform/array", X), + ] + if hasattr(transformer, "fit_transform"): + for case, data in cases: + if name in CROSS_DECOMPOSITION: + X_trans, _ = transformer.fit_transform(data, y) + else: + X_trans = transformer.fit_transform(data, y) + outputs[case] = (X_trans, transformer.get_feature_names_out()) + + return outputs + + +def _check_generated_dataframe( + name, + case, + index, + outputs_default, + outputs_dataframe_lib, + is_supported_dataframe, + create_dataframe, + assert_frame_equal, +): + """Check if the generated DataFrame by the transformer is valid. + + The DataFrame implementation is specified through the parameters of this function. + + Parameters + ---------- + name : str + The name of the transformer. + case : str + A single case from the cases generated by `_output_from_fit_transform`. + index : index or None + The index of the DataFrame. `None` if the library does not implement a DataFrame + with an index. + outputs_default : tuple + A tuple containing the output data and feature names for the default output. + outputs_dataframe_lib : tuple + A tuple containing the output data and feature names for the pandas case. + is_supported_dataframe : callable + A callable that takes a DataFrame instance as input and return whether or + E.g. `lambda X: isintance(X, pd.DataFrame)`. + create_dataframe : callable + A callable taking as parameters `data`, `columns`, and `index` and returns + a callable. Be aware that `index` can be ignored. For example, polars dataframes + would ignore the idnex. + assert_frame_equal : callable + A callable taking 2 dataframes to compare if they are equal. + """ + X_trans, feature_names_default = outputs_default + df_trans, feature_names_dataframe_lib = outputs_dataframe_lib + + assert is_supported_dataframe(df_trans) + # We always rely on the output of `get_feature_names_out` of the + # transformer used to generate the dataframe as a ground-truth of the + # columns. + # If a dataframe is passed into transform, then the output should have the same + # index + expected_index = index if case.endswith("df") else None + expected_dataframe = create_dataframe( + X_trans, columns=feature_names_dataframe_lib, index=expected_index + ) + + try: + assert_frame_equal(df_trans, expected_dataframe) + except AssertionError as e: + raise AssertionError( + f"{name} does not generate a valid dataframe in the {case} " + "case. The generated dataframe is not equal to the expected " + f"dataframe. The error message is: {e}" + ) from e + + +def _check_set_output_transform_dataframe( + name, + transformer_orig, + *, + dataframe_lib, + is_supported_dataframe, + create_dataframe, + assert_frame_equal, + context, +): + """Check that a transformer can output a DataFrame when requested. + + The DataFrame implementation is specified through the parameters of this function. + + Parameters + ---------- + name : str + The name of the transformer. + transformer_orig : estimator + The original transformer instance. + dataframe_lib : str + The name of the library implementing the DataFrame. + is_supported_dataframe : callable + A callable that takes a DataFrame instance as input and returns whether or + not it is supported by the dataframe library. + E.g. `lambda X: isintance(X, pd.DataFrame)`. + create_dataframe : callable + A callable taking as parameters `data`, `columns`, and `index` and returns + a callable. Be aware that `index` can be ignored. For example, polars dataframes + will ignore the index. + assert_frame_equal : callable + A callable taking 2 dataframes to compare if they are equal. + context : {"local", "global"} + Whether to use a local context by setting `set_output(...)` on the transformer + or a global context by using the `with config_context(...)` + """ + # Check transformer.set_output configures the output of transform="pandas". + tags = transformer_orig._get_tags() + if "2darray" not in tags["X_types"] or tags["no_validation"]: + return + + rng = np.random.RandomState(0) + transformer = clone(transformer_orig) + + X = rng.uniform(size=(20, 5)) + X = _enforce_estimator_tags_X(transformer_orig, X) + y = rng.randint(0, 2, size=20) + y = _enforce_estimator_tags_y(transformer_orig, y) + set_random_state(transformer) + + feature_names_in = [f"col{i}" for i in range(X.shape[1])] + index = [f"index{i}" for i in range(X.shape[0])] + df = create_dataframe(X, columns=feature_names_in, index=index) + + transformer_default = clone(transformer).set_output(transform="default") + outputs_default = _output_from_fit_transform(transformer_default, name, X, df, y) + + if context == "local": + transformer_df = clone(transformer).set_output(transform=dataframe_lib) + context_to_use = nullcontext() + else: # global + transformer_df = clone(transformer) + context_to_use = config_context(transform_output=dataframe_lib) + + try: + with context_to_use: + outputs_df = _output_from_fit_transform(transformer_df, name, X, df, y) + except ValueError as e: + # transformer does not support sparse data + capitalized_lib = dataframe_lib.capitalize() + error_message = str(e) + assert ( + f"{capitalized_lib} output does not support sparse data." in error_message + or "The transformer outputs a scipy sparse matrix." in error_message + ), e + return + + for case in outputs_default: + _check_generated_dataframe( + name, + case, + index, + outputs_default[case], + outputs_df[case], + is_supported_dataframe, + create_dataframe, + assert_frame_equal, + ) + + +def _check_set_output_transform_pandas_context(name, transformer_orig, context): + try: + import pandas as pd + except ImportError: # pragma: no cover + raise SkipTest("pandas is not installed: not checking set output") + + _check_set_output_transform_dataframe( + name, + transformer_orig, + dataframe_lib="pandas", + is_supported_dataframe=lambda X: isinstance(X, pd.DataFrame), + create_dataframe=lambda X, columns, index: pd.DataFrame( + X, columns=columns, copy=False, index=index + ), + assert_frame_equal=pd.testing.assert_frame_equal, + context=context, + ) + + +def check_set_output_transform_pandas(name, transformer_orig): + _check_set_output_transform_pandas_context(name, transformer_orig, "local") + + +def check_global_output_transform_pandas(name, transformer_orig): + _check_set_output_transform_pandas_context(name, transformer_orig, "global") + + +def _check_set_output_transform_polars_context(name, transformer_orig, context): + try: + import polars as pl + from polars.testing import assert_frame_equal + except ImportError: # pragma: no cover + raise SkipTest("polars is not installed: not checking set output") + + def create_dataframe(X, columns, index): + if isinstance(columns, np.ndarray): + columns = columns.tolist() + + return pl.DataFrame(X, schema=columns, orient="row") + + _check_set_output_transform_dataframe( + name, + transformer_orig, + dataframe_lib="polars", + is_supported_dataframe=lambda X: isinstance(X, pl.DataFrame), + create_dataframe=create_dataframe, + assert_frame_equal=assert_frame_equal, + context=context, + ) + + +def check_set_output_transform_polars(name, transformer_orig): + _check_set_output_transform_polars_context(name, transformer_orig, "local") + + +def check_global_set_output_transform_polars(name, transformer_orig): + _check_set_output_transform_polars_context(name, transformer_orig, "global") diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/fixes.py b/venv/lib/python3.10/site-packages/sklearn/utils/fixes.py new file mode 100644 index 0000000000000000000000000000000000000000..e97062390920d4ab0306dc946ef9d434f84e1628 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/fixes.py @@ -0,0 +1,394 @@ +"""Compatibility fixes for older version of python, numpy and scipy + +If you add content to this file, please give the version of the package +at which the fix is no longer needed. +""" +# Authors: Emmanuelle Gouillart +# Gael Varoquaux +# Fabian Pedregosa +# Lars Buitinck +# +# License: BSD 3 clause + + +import numpy as np +import scipy +import scipy.sparse.linalg +import scipy.stats +import threadpoolctl + +import sklearn + +from ..externals._packaging.version import parse as parse_version +from .deprecation import deprecated + +np_version = parse_version(np.__version__) +np_base_version = parse_version(np_version.base_version) +sp_version = parse_version(scipy.__version__) +sp_base_version = parse_version(sp_version.base_version) + +# TODO: We can consider removing the containers and importing +# directly from SciPy when sparse matrices will be deprecated. +CSR_CONTAINERS = [scipy.sparse.csr_matrix] +CSC_CONTAINERS = [scipy.sparse.csc_matrix] +COO_CONTAINERS = [scipy.sparse.coo_matrix] +LIL_CONTAINERS = [scipy.sparse.lil_matrix] +DOK_CONTAINERS = [scipy.sparse.dok_matrix] +BSR_CONTAINERS = [scipy.sparse.bsr_matrix] +DIA_CONTAINERS = [scipy.sparse.dia_matrix] + +if parse_version(scipy.__version__) >= parse_version("1.8"): + # Sparse Arrays have been added in SciPy 1.8 + # TODO: When SciPy 1.8 is the minimum supported version, + # those list can be created directly without this condition. + # See: https://github.com/scikit-learn/scikit-learn/issues/27090 + CSR_CONTAINERS.append(scipy.sparse.csr_array) + CSC_CONTAINERS.append(scipy.sparse.csc_array) + COO_CONTAINERS.append(scipy.sparse.coo_array) + LIL_CONTAINERS.append(scipy.sparse.lil_array) + DOK_CONTAINERS.append(scipy.sparse.dok_array) + BSR_CONTAINERS.append(scipy.sparse.bsr_array) + DIA_CONTAINERS.append(scipy.sparse.dia_array) + +try: + from scipy.optimize._linesearch import line_search_wolfe1, line_search_wolfe2 +except ImportError: # SciPy < 1.8 + from scipy.optimize.linesearch import line_search_wolfe2, line_search_wolfe1 # type: ignore # noqa + + +def _object_dtype_isnan(X): + return X != X + + +# Rename the `method` kwarg to `interpolation` for NumPy < 1.22, because +# `interpolation` kwarg was deprecated in favor of `method` in NumPy >= 1.22. +def _percentile(a, q, *, method="linear", **kwargs): + return np.percentile(a, q, interpolation=method, **kwargs) + + +if np_version < parse_version("1.22"): + percentile = _percentile +else: # >= 1.22 + from numpy import percentile # type: ignore # noqa + + +# compatibility fix for threadpoolctl >= 3.0.0 +# since version 3 it's possible to setup a global threadpool controller to avoid +# looping through all loaded shared libraries each time. +# the global controller is created during the first call to threadpoolctl. +def _get_threadpool_controller(): + if not hasattr(threadpoolctl, "ThreadpoolController"): + return None + + if not hasattr(sklearn, "_sklearn_threadpool_controller"): + sklearn._sklearn_threadpool_controller = threadpoolctl.ThreadpoolController() + + return sklearn._sklearn_threadpool_controller + + +def threadpool_limits(limits=None, user_api=None): + controller = _get_threadpool_controller() + if controller is not None: + return controller.limit(limits=limits, user_api=user_api) + else: + return threadpoolctl.threadpool_limits(limits=limits, user_api=user_api) + + +threadpool_limits.__doc__ = threadpoolctl.threadpool_limits.__doc__ + + +def threadpool_info(): + controller = _get_threadpool_controller() + if controller is not None: + return controller.info() + else: + return threadpoolctl.threadpool_info() + + +threadpool_info.__doc__ = threadpoolctl.threadpool_info.__doc__ + + +@deprecated( + "The function `delayed` has been moved from `sklearn.utils.fixes` to " + "`sklearn.utils.parallel`. This import path will be removed in 1.5." +) +def delayed(function): + from sklearn.utils.parallel import delayed + + return delayed(function) + + +# TODO: Remove when SciPy 1.11 is the minimum supported version +def _mode(a, axis=0): + if sp_version >= parse_version("1.9.0"): + mode = scipy.stats.mode(a, axis=axis, keepdims=True) + if sp_version >= parse_version("1.10.999"): + # scipy.stats.mode has changed returned array shape with axis=None + # and keepdims=True, see https://github.com/scipy/scipy/pull/17561 + if axis is None: + mode = np.ravel(mode) + return mode + return scipy.stats.mode(a, axis=axis) + + +# TODO: Remove when Scipy 1.12 is the minimum supported version +if sp_base_version >= parse_version("1.12.0"): + _sparse_linalg_cg = scipy.sparse.linalg.cg +else: + + def _sparse_linalg_cg(A, b, **kwargs): + if "rtol" in kwargs: + kwargs["tol"] = kwargs.pop("rtol") + if "atol" not in kwargs: + kwargs["atol"] = "legacy" + return scipy.sparse.linalg.cg(A, b, **kwargs) + + +# TODO: Fuse the modern implementations of _sparse_min_max and _sparse_nan_min_max +# into the public min_max_axis function when Scipy 1.11 is the minimum supported +# version and delete the backport in the else branch below. +if sp_base_version >= parse_version("1.11.0"): + + def _sparse_min_max(X, axis): + the_min = X.min(axis=axis) + the_max = X.max(axis=axis) + + if axis is not None: + the_min = the_min.toarray().ravel() + the_max = the_max.toarray().ravel() + + return the_min, the_max + + def _sparse_nan_min_max(X, axis): + the_min = X.nanmin(axis=axis) + the_max = X.nanmax(axis=axis) + + if axis is not None: + the_min = the_min.toarray().ravel() + the_max = the_max.toarray().ravel() + + return the_min, the_max + +else: + # This code is mostly taken from scipy 0.14 and extended to handle nans, see + # https://github.com/scikit-learn/scikit-learn/pull/11196 + def _minor_reduce(X, ufunc): + major_index = np.flatnonzero(np.diff(X.indptr)) + + # reduceat tries casts X.indptr to intp, which errors + # if it is int64 on a 32 bit system. + # Reinitializing prevents this where possible, see #13737 + X = type(X)((X.data, X.indices, X.indptr), shape=X.shape) + value = ufunc.reduceat(X.data, X.indptr[major_index]) + return major_index, value + + def _min_or_max_axis(X, axis, min_or_max): + N = X.shape[axis] + if N == 0: + raise ValueError("zero-size array to reduction operation") + M = X.shape[1 - axis] + mat = X.tocsc() if axis == 0 else X.tocsr() + mat.sum_duplicates() + major_index, value = _minor_reduce(mat, min_or_max) + not_full = np.diff(mat.indptr)[major_index] < N + value[not_full] = min_or_max(value[not_full], 0) + mask = value != 0 + major_index = np.compress(mask, major_index) + value = np.compress(mask, value) + + if axis == 0: + res = scipy.sparse.coo_matrix( + (value, (np.zeros(len(value)), major_index)), + dtype=X.dtype, + shape=(1, M), + ) + else: + res = scipy.sparse.coo_matrix( + (value, (major_index, np.zeros(len(value)))), + dtype=X.dtype, + shape=(M, 1), + ) + return res.A.ravel() + + def _sparse_min_or_max(X, axis, min_or_max): + if axis is None: + if 0 in X.shape: + raise ValueError("zero-size array to reduction operation") + zero = X.dtype.type(0) + if X.nnz == 0: + return zero + m = min_or_max.reduce(X.data.ravel()) + if X.nnz != np.prod(X.shape): + m = min_or_max(zero, m) + return m + if axis < 0: + axis += 2 + if (axis == 0) or (axis == 1): + return _min_or_max_axis(X, axis, min_or_max) + else: + raise ValueError("invalid axis, use 0 for rows, or 1 for columns") + + def _sparse_min_max(X, axis): + return ( + _sparse_min_or_max(X, axis, np.minimum), + _sparse_min_or_max(X, axis, np.maximum), + ) + + def _sparse_nan_min_max(X, axis): + return ( + _sparse_min_or_max(X, axis, np.fmin), + _sparse_min_or_max(X, axis, np.fmax), + ) + + +# For +1.25 NumPy versions exceptions and warnings are being moved +# to a dedicated submodule. +if np_version >= parse_version("1.25.0"): + from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning +else: + from numpy import ComplexWarning, VisibleDeprecationWarning # type: ignore # noqa + + +# TODO: Remove when Scipy 1.6 is the minimum supported version +try: + from scipy.integrate import trapezoid # type: ignore # noqa +except ImportError: + from scipy.integrate import trapz as trapezoid # type: ignore # noqa + + +# TODO: Adapt when Pandas > 2.2 is the minimum supported version +def pd_fillna(pd, frame): + pd_version = parse_version(pd.__version__).base_version + if parse_version(pd_version) < parse_version("2.2"): + frame = frame.fillna(value=np.nan) + else: + infer_objects_kwargs = ( + {} if parse_version(pd_version) >= parse_version("3") else {"copy": False} + ) + with pd.option_context("future.no_silent_downcasting", True): + frame = frame.fillna(value=np.nan).infer_objects(**infer_objects_kwargs) + return frame + + +# TODO: remove when SciPy 1.12 is the minimum supported version +def _preserve_dia_indices_dtype( + sparse_container, original_container_format, requested_sparse_format +): + """Preserve indices dtype for SciPy < 1.12 when converting from DIA to CSR/CSC. + + For SciPy < 1.12, DIA arrays indices are upcasted to `np.int64` that is + inconsistent with DIA matrices. We downcast the indices dtype to `np.int32` to + be consistent with DIA matrices. + + The converted indices arrays are affected back inplace to the sparse container. + + Parameters + ---------- + sparse_container : sparse container + Sparse container to be checked. + requested_sparse_format : str or bool + The type of format of `sparse_container`. + + Notes + ----- + See https://github.com/scipy/scipy/issues/19245 for more details. + """ + if original_container_format == "dia_array" and requested_sparse_format in ( + "csr", + "coo", + ): + if requested_sparse_format == "csr": + index_dtype = _smallest_admissible_index_dtype( + arrays=(sparse_container.indptr, sparse_container.indices), + maxval=max(sparse_container.nnz, sparse_container.shape[1]), + check_contents=True, + ) + sparse_container.indices = sparse_container.indices.astype( + index_dtype, copy=False + ) + sparse_container.indptr = sparse_container.indptr.astype( + index_dtype, copy=False + ) + else: # requested_sparse_format == "coo" + index_dtype = _smallest_admissible_index_dtype( + maxval=max(sparse_container.shape) + ) + sparse_container.row = sparse_container.row.astype(index_dtype, copy=False) + sparse_container.col = sparse_container.col.astype(index_dtype, copy=False) + + +# TODO: remove when SciPy 1.12 is the minimum supported version +def _smallest_admissible_index_dtype(arrays=(), maxval=None, check_contents=False): + """Based on input (integer) arrays `a`, determine a suitable index data + type that can hold the data in the arrays. + + This function returns `np.int64` if it either required by `maxval` or based on the + largest precision of the dtype of the arrays passed as argument, or by the their + contents (when `check_contents is True`). If none of the condition requires + `np.int64` then this function returns `np.int32`. + + Parameters + ---------- + arrays : ndarray or tuple of ndarrays, default=() + Input arrays whose types/contents to check. + + maxval : float, default=None + Maximum value needed. + + check_contents : bool, default=False + Whether to check the values in the arrays and not just their types. + By default, check only the types. + + Returns + ------- + dtype : {np.int32, np.int64} + Suitable index data type (int32 or int64). + """ + + int32min = np.int32(np.iinfo(np.int32).min) + int32max = np.int32(np.iinfo(np.int32).max) + + if maxval is not None: + if maxval > np.iinfo(np.int64).max: + raise ValueError( + f"maxval={maxval} is to large to be represented as np.int64." + ) + if maxval > int32max: + return np.int64 + + if isinstance(arrays, np.ndarray): + arrays = (arrays,) + + for arr in arrays: + if not isinstance(arr, np.ndarray): + raise TypeError( + f"Arrays should be of type np.ndarray, got {type(arr)} instead." + ) + if not np.issubdtype(arr.dtype, np.integer): + raise ValueError( + f"Array dtype {arr.dtype} is not supported for index dtype. We expect " + "integral values." + ) + if not np.can_cast(arr.dtype, np.int32): + if not check_contents: + # when `check_contents` is False, we stay on the safe side and return + # np.int64. + return np.int64 + if arr.size == 0: + # a bigger type not needed yet, let's look at the next array + continue + else: + maxval = arr.max() + minval = arr.min() + if minval < int32min or maxval > int32max: + # a big index type is actually needed + return np.int64 + + return np.int32 + + +# TODO: Remove when Scipy 1.12 is the minimum supported version +if sp_version < parse_version("1.12"): + from ..externals._scipy.sparse.csgraph import laplacian # type: ignore # noqa +else: + from scipy.sparse.csgraph import laplacian # type: ignore # noqa # pragma: no cover diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/graph.py b/venv/lib/python3.10/site-packages/sklearn/utils/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..06b2e152101a9c2fe843a0704ea17080ba73a21b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/graph.py @@ -0,0 +1,166 @@ +""" +The :mod:`sklearn.utils.graph` module includes graph utilities and algorithms. +""" + +# Authors: Aric Hagberg +# Gael Varoquaux +# Jake Vanderplas +# License: BSD 3 clause + +import numpy as np +from scipy import sparse + +from ..metrics.pairwise import pairwise_distances +from ._param_validation import Integral, Interval, validate_params + + +############################################################################### +# Path and connected component analysis. +# Code adapted from networkx +@validate_params( + { + "graph": ["array-like", "sparse matrix"], + "source": [Interval(Integral, 0, None, closed="left")], + "cutoff": [Interval(Integral, 0, None, closed="left"), None], + }, + prefer_skip_nested_validation=True, +) +def single_source_shortest_path_length(graph, source, *, cutoff=None): + """Return the length of the shortest path from source to all reachable nodes. + + Parameters + ---------- + graph : {array-like, sparse matrix} of shape (n_nodes, n_nodes) + Adjacency matrix of the graph. Sparse matrix of format LIL is + preferred. + + source : int + Start node for path. + + cutoff : int, default=None + Depth to stop the search - only paths of length <= cutoff are returned. + + Returns + ------- + paths : dict + Reachable end nodes mapped to length of path from source, + i.e. `{end: path_length}`. + + Examples + -------- + >>> from sklearn.utils.graph import single_source_shortest_path_length + >>> import numpy as np + >>> graph = np.array([[ 0, 1, 0, 0], + ... [ 1, 0, 1, 0], + ... [ 0, 1, 0, 0], + ... [ 0, 0, 0, 0]]) + >>> single_source_shortest_path_length(graph, 0) + {0: 0, 1: 1, 2: 2} + >>> graph = np.ones((6, 6)) + >>> sorted(single_source_shortest_path_length(graph, 2).items()) + [(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)] + """ + if sparse.issparse(graph): + graph = graph.tolil() + else: + graph = sparse.lil_matrix(graph) + seen = {} # level (number of hops) when seen in BFS + level = 0 # the current level + next_level = [source] # dict of nodes to check at next level + while next_level: + this_level = next_level # advance to next level + next_level = set() # and start a new list (fringe) + for v in this_level: + if v not in seen: + seen[v] = level # set the level of vertex v + next_level.update(graph.rows[v]) + if cutoff is not None and cutoff <= level: + break + level += 1 + return seen # return all path lengths as dictionary + + +def _fix_connected_components( + X, + graph, + n_connected_components, + component_labels, + mode="distance", + metric="euclidean", + **kwargs, +): + """Add connections to sparse graph to connect unconnected components. + + For each pair of unconnected components, compute all pairwise distances + from one component to the other, and add a connection on the closest pair + of samples. This is a hacky way to get a graph with a single connected + component, which is necessary for example to compute a shortest path + between all pairs of samples in the graph. + + Parameters + ---------- + X : array of shape (n_samples, n_features) or (n_samples, n_samples) + Features to compute the pairwise distances. If `metric = + "precomputed"`, X is the matrix of pairwise distances. + + graph : sparse matrix of shape (n_samples, n_samples) + Graph of connection between samples. + + n_connected_components : int + Number of connected components, as computed by + `scipy.sparse.csgraph.connected_components`. + + component_labels : array of shape (n_samples) + Labels of connected components, as computed by + `scipy.sparse.csgraph.connected_components`. + + mode : {'connectivity', 'distance'}, default='distance' + Type of graph matrix: 'connectivity' corresponds to the connectivity + matrix with ones and zeros, and 'distance' corresponds to the distances + between neighbors according to the given metric. + + metric : str + Metric used in `sklearn.metrics.pairwise.pairwise_distances`. + + kwargs : kwargs + Keyword arguments passed to + `sklearn.metrics.pairwise.pairwise_distances`. + + Returns + ------- + graph : sparse matrix of shape (n_samples, n_samples) + Graph of connection between samples, with a single connected component. + """ + if metric == "precomputed" and sparse.issparse(X): + raise RuntimeError( + "_fix_connected_components with metric='precomputed' requires the " + "full distance matrix in X, and does not work with a sparse " + "neighbors graph." + ) + + for i in range(n_connected_components): + idx_i = np.flatnonzero(component_labels == i) + Xi = X[idx_i] + for j in range(i): + idx_j = np.flatnonzero(component_labels == j) + Xj = X[idx_j] + + if metric == "precomputed": + D = X[np.ix_(idx_i, idx_j)] + else: + D = pairwise_distances(Xi, Xj, metric=metric, **kwargs) + + ii, jj = np.unravel_index(D.argmin(axis=None), D.shape) + if mode == "connectivity": + graph[idx_i[ii], idx_j[jj]] = 1 + graph[idx_j[jj], idx_i[ii]] = 1 + elif mode == "distance": + graph[idx_i[ii], idx_j[jj]] = D[ii, jj] + graph[idx_j[jj], idx_i[ii]] = D[ii, jj] + else: + raise ValueError( + "Unknown mode=%r, should be one of ['connectivity', 'distance']." + % mode + ) + + return graph diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/metadata_routing.py b/venv/lib/python3.10/site-packages/sklearn/utils/metadata_routing.py new file mode 100644 index 0000000000000000000000000000000000000000..bb98d2f08b93e4498d55f813c460fb4cfffe26fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/metadata_routing.py @@ -0,0 +1,22 @@ +""" +The :mod:`sklearn.utils.metadata_routing` module includes utilities to route +metadata within scikit-learn estimators. +""" + +# This module is not a separate sub-folder since that would result in a circular +# import issue. +# +# Author: Adrin Jalali +# License: BSD 3 clause + +from ._metadata_requests import WARN, UNUSED, UNCHANGED # noqa +from ._metadata_requests import get_routing_for_object # noqa +from ._metadata_requests import MetadataRouter # noqa +from ._metadata_requests import MetadataRequest # noqa +from ._metadata_requests import MethodMapping # noqa +from ._metadata_requests import process_routing # noqa +from ._metadata_requests import _MetadataRequester # noqa +from ._metadata_requests import _routing_enabled # noqa +from ._metadata_requests import _raise_for_params # noqa +from ._metadata_requests import _RoutingNotSupportedMixin # noqa +from ._metadata_requests import _raise_for_unsupported_routing # noqa diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/metaestimators.py b/venv/lib/python3.10/site-packages/sklearn/utils/metaestimators.py new file mode 100644 index 0000000000000000000000000000000000000000..639e000dd77a7a8908d64235a5b2ff78111888b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/metaestimators.py @@ -0,0 +1,165 @@ +""" +The :mod:`sklearn.utils.metaestimators` module includes utilities for meta-estimators. +""" + +# Author: Joel Nothman +# Andreas Mueller +# License: BSD +from abc import ABCMeta, abstractmethod +from contextlib import suppress +from typing import Any, List + +import numpy as np + +from ..base import BaseEstimator +from ..utils import _safe_indexing +from ..utils._tags import _safe_tags +from ._available_if import available_if + +__all__ = ["available_if"] + + +class _BaseComposition(BaseEstimator, metaclass=ABCMeta): + """Handles parameter management for classifiers composed of named estimators.""" + + steps: List[Any] + + @abstractmethod + def __init__(self): + pass + + def _get_params(self, attr, deep=True): + out = super().get_params(deep=deep) + if not deep: + return out + + estimators = getattr(self, attr) + try: + out.update(estimators) + except (TypeError, ValueError): + # Ignore TypeError for cases where estimators is not a list of + # (name, estimator) and ignore ValueError when the list is not + # formatted correctly. This is to prevent errors when calling + # `set_params`. `BaseEstimator.set_params` calls `get_params` which + # can error for invalid values for `estimators`. + return out + + for name, estimator in estimators: + if hasattr(estimator, "get_params"): + for key, value in estimator.get_params(deep=True).items(): + out["%s__%s" % (name, key)] = value + return out + + def _set_params(self, attr, **params): + # Ensure strict ordering of parameter setting: + # 1. All steps + if attr in params: + setattr(self, attr, params.pop(attr)) + # 2. Replace items with estimators in params + items = getattr(self, attr) + if isinstance(items, list) and items: + # Get item names used to identify valid names in params + # `zip` raises a TypeError when `items` does not contains + # elements of length 2 + with suppress(TypeError): + item_names, _ = zip(*items) + for name in list(params.keys()): + if "__" not in name and name in item_names: + self._replace_estimator(attr, name, params.pop(name)) + + # 3. Step parameters and other initialisation arguments + super().set_params(**params) + return self + + def _replace_estimator(self, attr, name, new_val): + # assumes `name` is a valid estimator name + new_estimators = list(getattr(self, attr)) + for i, (estimator_name, _) in enumerate(new_estimators): + if estimator_name == name: + new_estimators[i] = (name, new_val) + break + setattr(self, attr, new_estimators) + + def _validate_names(self, names): + if len(set(names)) != len(names): + raise ValueError("Names provided are not unique: {0!r}".format(list(names))) + invalid_names = set(names).intersection(self.get_params(deep=False)) + if invalid_names: + raise ValueError( + "Estimator names conflict with constructor arguments: {0!r}".format( + sorted(invalid_names) + ) + ) + invalid_names = [name for name in names if "__" in name] + if invalid_names: + raise ValueError( + "Estimator names must not contain __: got {0!r}".format(invalid_names) + ) + + +def _safe_split(estimator, X, y, indices, train_indices=None): + """Create subset of dataset and properly handle kernels. + + Slice X, y according to indices for cross-validation, but take care of + precomputed kernel-matrices or pairwise affinities / distances. + + If ``estimator._pairwise is True``, X needs to be square and + we slice rows and columns. If ``train_indices`` is not None, + we slice rows using ``indices`` (assumed the test set) and columns + using ``train_indices``, indicating the training set. + + Labels y will always be indexed only along the first axis. + + Parameters + ---------- + estimator : object + Estimator to determine whether we should slice only rows or rows and + columns. + + X : array-like, sparse matrix or iterable + Data to be indexed. If ``estimator._pairwise is True``, + this needs to be a square array-like or sparse matrix. + + y : array-like, sparse matrix or iterable + Targets to be indexed. + + indices : array of int + Rows to select from X and y. + If ``estimator._pairwise is True`` and ``train_indices is None`` + then ``indices`` will also be used to slice columns. + + train_indices : array of int or None, default=None + If ``estimator._pairwise is True`` and ``train_indices is not None``, + then ``train_indices`` will be use to slice the columns of X. + + Returns + ------- + X_subset : array-like, sparse matrix or list + Indexed data. + + y_subset : array-like, sparse matrix or list + Indexed targets. + + """ + if _safe_tags(estimator, key="pairwise"): + if not hasattr(X, "shape"): + raise ValueError( + "Precomputed kernels or affinity matrices have " + "to be passed as arrays or sparse matrices." + ) + # X is a precomputed square kernel matrix + if X.shape[0] != X.shape[1]: + raise ValueError("X should be a square kernel matrix") + if train_indices is None: + X_subset = X[np.ix_(indices, indices)] + else: + X_subset = X[np.ix_(indices, train_indices)] + else: + X_subset = _safe_indexing(X, indices) + + if y is not None: + y_subset = _safe_indexing(y, indices) + else: + y_subset = None + + return X_subset, y_subset diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/multiclass.py b/venv/lib/python3.10/site-packages/sklearn/utils/multiclass.py new file mode 100644 index 0000000000000000000000000000000000000000..a4b23427e5b7053c397f32b71b265870405e67e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/multiclass.py @@ -0,0 +1,553 @@ +""" +The :mod:`sklearn.utils.multiclass` module includes utilities to handle +multiclass/multioutput target in classifiers. +""" + +# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi +# +# License: BSD 3 clause +import warnings +from collections.abc import Sequence +from itertools import chain + +import numpy as np +from scipy.sparse import issparse + +from ..utils._array_api import get_namespace +from ..utils.fixes import VisibleDeprecationWarning +from .validation import _assert_all_finite, check_array + + +def _unique_multiclass(y): + xp, is_array_api_compliant = get_namespace(y) + if hasattr(y, "__array__") or is_array_api_compliant: + return xp.unique_values(xp.asarray(y)) + else: + return set(y) + + +def _unique_indicator(y): + xp, _ = get_namespace(y) + return xp.arange( + check_array(y, input_name="y", accept_sparse=["csr", "csc", "coo"]).shape[1] + ) + + +_FN_UNIQUE_LABELS = { + "binary": _unique_multiclass, + "multiclass": _unique_multiclass, + "multilabel-indicator": _unique_indicator, +} + + +def unique_labels(*ys): + """Extract an ordered array of unique labels. + + We don't allow: + - mix of multilabel and multiclass (single label) targets + - mix of label indicator matrix and anything else, + because there are no explicit labels) + - mix of label indicator matrices of different sizes + - mix of string and integer labels + + At the moment, we also don't allow "multiclass-multioutput" input type. + + Parameters + ---------- + *ys : array-likes + Label values. + + Returns + ------- + out : ndarray of shape (n_unique_labels,) + An ordered array of unique labels. + + Examples + -------- + >>> from sklearn.utils.multiclass import unique_labels + >>> unique_labels([3, 5, 5, 5, 7, 7]) + array([3, 5, 7]) + >>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4]) + array([1, 2, 3, 4]) + >>> unique_labels([1, 2, 10], [5, 11]) + array([ 1, 2, 5, 10, 11]) + """ + xp, is_array_api_compliant = get_namespace(*ys) + if not ys: + raise ValueError("No argument has been passed.") + # Check that we don't mix label format + + ys_types = set(type_of_target(x) for x in ys) + if ys_types == {"binary", "multiclass"}: + ys_types = {"multiclass"} + + if len(ys_types) > 1: + raise ValueError("Mix type of y not allowed, got types %s" % ys_types) + + label_type = ys_types.pop() + + # Check consistency for the indicator format + if ( + label_type == "multilabel-indicator" + and len( + set( + check_array(y, accept_sparse=["csr", "csc", "coo"]).shape[1] for y in ys + ) + ) + > 1 + ): + raise ValueError( + "Multi-label binary indicator input with different numbers of labels" + ) + + # Get the unique set of labels + _unique_labels = _FN_UNIQUE_LABELS.get(label_type, None) + if not _unique_labels: + raise ValueError("Unknown label type: %s" % repr(ys)) + + if is_array_api_compliant: + # array_api does not allow for mixed dtypes + unique_ys = xp.concat([_unique_labels(y) for y in ys]) + return xp.unique_values(unique_ys) + + ys_labels = set(chain.from_iterable((i for i in _unique_labels(y)) for y in ys)) + # Check that we don't mix string type with number type + if len(set(isinstance(label, str) for label in ys_labels)) > 1: + raise ValueError("Mix of label input types (string and number)") + + return xp.asarray(sorted(ys_labels)) + + +def _is_integral_float(y): + xp, is_array_api_compliant = get_namespace(y) + return xp.isdtype(y.dtype, "real floating") and bool( + xp.all(xp.astype((xp.astype(y, xp.int64)), y.dtype) == y) + ) + + +def is_multilabel(y): + """Check if ``y`` is in a multilabel format. + + Parameters + ---------- + y : ndarray of shape (n_samples,) + Target values. + + Returns + ------- + out : bool + Return ``True``, if ``y`` is in a multilabel format, else ```False``. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils.multiclass import is_multilabel + >>> is_multilabel([0, 1, 0, 1]) + False + >>> is_multilabel([[1], [0, 2], []]) + False + >>> is_multilabel(np.array([[1, 0], [0, 0]])) + True + >>> is_multilabel(np.array([[1], [0], [0]])) + False + >>> is_multilabel(np.array([[1, 0, 0]])) + True + """ + xp, is_array_api_compliant = get_namespace(y) + if hasattr(y, "__array__") or isinstance(y, Sequence) or is_array_api_compliant: + # DeprecationWarning will be replaced by ValueError, see NEP 34 + # https://numpy.org/neps/nep-0034-infer-dtype-is-object.html + check_y_kwargs = dict( + accept_sparse=True, + allow_nd=True, + force_all_finite=False, + ensure_2d=False, + ensure_min_samples=0, + ensure_min_features=0, + ) + with warnings.catch_warnings(): + warnings.simplefilter("error", VisibleDeprecationWarning) + try: + y = check_array(y, dtype=None, **check_y_kwargs) + except (VisibleDeprecationWarning, ValueError) as e: + if str(e).startswith("Complex data not supported"): + raise + + # dtype=object should be provided explicitly for ragged arrays, + # see NEP 34 + y = check_array(y, dtype=object, **check_y_kwargs) + + if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1): + return False + + if issparse(y): + if y.format in ("dok", "lil"): + y = y.tocsr() + labels = xp.unique_values(y.data) + return ( + len(y.data) == 0 + or (labels.size == 1 or (labels.size == 2) and (0 in labels)) + and (y.dtype.kind in "biu" or _is_integral_float(labels)) # bool, int, uint + ) + else: + labels = xp.unique_values(y) + + return labels.shape[0] < 3 and ( + xp.isdtype(y.dtype, ("bool", "signed integer", "unsigned integer")) + or _is_integral_float(labels) + ) + + +def check_classification_targets(y): + """Ensure that target y is of a non-regression type. + + Only the following target types (as defined in type_of_target) are allowed: + 'binary', 'multiclass', 'multiclass-multioutput', + 'multilabel-indicator', 'multilabel-sequences' + + Parameters + ---------- + y : array-like + Target values. + """ + y_type = type_of_target(y, input_name="y") + if y_type not in [ + "binary", + "multiclass", + "multiclass-multioutput", + "multilabel-indicator", + "multilabel-sequences", + ]: + raise ValueError( + f"Unknown label type: {y_type}. Maybe you are trying to fit a " + "classifier, which expects discrete classes on a " + "regression target with continuous values." + ) + + +def type_of_target(y, input_name=""): + """Determine the type of data indicated by the target. + + Note that this type is the most specific type that can be inferred. + For example: + + * ``binary`` is more specific but compatible with ``multiclass``. + * ``multiclass`` of integers is more specific but compatible with + ``continuous``. + * ``multilabel-indicator`` is more specific but compatible with + ``multiclass-multioutput``. + + Parameters + ---------- + y : {array-like, sparse matrix} + Target values. If a sparse matrix, `y` is expected to be a + CSR/CSC matrix. + + input_name : str, default="" + The data name used to construct the error message. + + .. versionadded:: 1.1.0 + + Returns + ------- + target_type : str + One of: + + * 'continuous': `y` is an array-like of floats that are not all + integers, and is 1d or a column vector. + * 'continuous-multioutput': `y` is a 2d array of floats that are + not all integers, and both dimensions are of size > 1. + * 'binary': `y` contains <= 2 discrete values and is 1d or a column + vector. + * 'multiclass': `y` contains more than two discrete values, is not a + sequence of sequences, and is 1d or a column vector. + * 'multiclass-multioutput': `y` is a 2d array that contains more + than two discrete values, is not a sequence of sequences, and both + dimensions are of size > 1. + * 'multilabel-indicator': `y` is a label indicator matrix, an array + of two dimensions with at least two columns, and at most 2 unique + values. + * 'unknown': `y` is array-like but none of the above, such as a 3d + array, sequence of sequences, or an array of non-sequence objects. + + Examples + -------- + >>> from sklearn.utils.multiclass import type_of_target + >>> import numpy as np + >>> type_of_target([0.1, 0.6]) + 'continuous' + >>> type_of_target([1, -1, -1, 1]) + 'binary' + >>> type_of_target(['a', 'b', 'a']) + 'binary' + >>> type_of_target([1.0, 2.0]) + 'binary' + >>> type_of_target([1, 0, 2]) + 'multiclass' + >>> type_of_target([1.0, 0.0, 3.0]) + 'multiclass' + >>> type_of_target(['a', 'b', 'c']) + 'multiclass' + >>> type_of_target(np.array([[1, 2], [3, 1]])) + 'multiclass-multioutput' + >>> type_of_target([[1, 2]]) + 'multilabel-indicator' + >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]])) + 'continuous-multioutput' + >>> type_of_target(np.array([[0, 1], [1, 1]])) + 'multilabel-indicator' + """ + xp, is_array_api_compliant = get_namespace(y) + valid = ( + (isinstance(y, Sequence) or issparse(y) or hasattr(y, "__array__")) + and not isinstance(y, str) + or is_array_api_compliant + ) + + if not valid: + raise ValueError( + "Expected array-like (array or non-string sequence), got %r" % y + ) + + sparse_pandas = y.__class__.__name__ in ["SparseSeries", "SparseArray"] + if sparse_pandas: + raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'") + + if is_multilabel(y): + return "multilabel-indicator" + + # DeprecationWarning will be replaced by ValueError, see NEP 34 + # https://numpy.org/neps/nep-0034-infer-dtype-is-object.html + # We therefore catch both deprecation (NumPy < 1.24) warning and + # value error (NumPy >= 1.24). + check_y_kwargs = dict( + accept_sparse=True, + allow_nd=True, + force_all_finite=False, + ensure_2d=False, + ensure_min_samples=0, + ensure_min_features=0, + ) + + with warnings.catch_warnings(): + warnings.simplefilter("error", VisibleDeprecationWarning) + if not issparse(y): + try: + y = check_array(y, dtype=None, **check_y_kwargs) + except (VisibleDeprecationWarning, ValueError) as e: + if str(e).startswith("Complex data not supported"): + raise + + # dtype=object should be provided explicitly for ragged arrays, + # see NEP 34 + y = check_array(y, dtype=object, **check_y_kwargs) + + # The old sequence of sequences format + try: + first_row = y[[0], :] if issparse(y) else y[0] + if ( + not hasattr(first_row, "__array__") + and isinstance(first_row, Sequence) + and not isinstance(first_row, str) + ): + raise ValueError( + "You appear to be using a legacy multi-label data" + " representation. Sequence of sequences are no" + " longer supported; use a binary array or sparse" + " matrix instead - the MultiLabelBinarizer" + " transformer can convert to this format." + ) + except IndexError: + pass + + # Invalid inputs + if y.ndim not in (1, 2): + # Number of dimension greater than 2: [[[1, 2]]] + return "unknown" + if not min(y.shape): + # Empty ndarray: []/[[]] + if y.ndim == 1: + # 1-D empty array: [] + return "binary" # [] + # 2-D empty array: [[]] + return "unknown" + if not issparse(y) and y.dtype == object and not isinstance(y.flat[0], str): + # [obj_1] and not ["label_1"] + return "unknown" + + # Check if multioutput + if y.ndim == 2 and y.shape[1] > 1: + suffix = "-multioutput" # [[1, 2], [1, 2]] + else: + suffix = "" # [1, 2, 3] or [[1], [2], [3]] + + # Check float and contains non-integer float values + if xp.isdtype(y.dtype, "real floating"): + # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.] + data = y.data if issparse(y) else y + if xp.any(data != xp.astype(data, int)): + _assert_all_finite(data, input_name=input_name) + return "continuous" + suffix + + # Check multiclass + if issparse(first_row): + first_row = first_row.data + if xp.unique_values(y).shape[0] > 2 or (y.ndim == 2 and len(first_row) > 1): + # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]] + return "multiclass" + suffix + else: + return "binary" # [1, 2] or [["a"], ["b"]] + + +def _check_partial_fit_first_call(clf, classes=None): + """Private helper function for factorizing common classes param logic. + + Estimators that implement the ``partial_fit`` API need to be provided with + the list of possible classes at the first call to partial_fit. + + Subsequent calls to partial_fit should check that ``classes`` is still + consistent with a previous value of ``clf.classes_`` when provided. + + This function returns True if it detects that this was the first call to + ``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also + set on ``clf``. + + """ + if getattr(clf, "classes_", None) is None and classes is None: + raise ValueError("classes must be passed on the first call to partial_fit.") + + elif classes is not None: + if getattr(clf, "classes_", None) is not None: + if not np.array_equal(clf.classes_, unique_labels(classes)): + raise ValueError( + "`classes=%r` is not the same as on last call " + "to partial_fit, was: %r" % (classes, clf.classes_) + ) + + else: + # This is the first call to partial_fit + clf.classes_ = unique_labels(classes) + return True + + # classes is None and clf.classes_ has already previously been set: + # nothing to do + return False + + +def class_distribution(y, sample_weight=None): + """Compute class priors from multioutput-multiclass target data. + + Parameters + ---------- + y : {array-like, sparse matrix} of size (n_samples, n_outputs) + The labels for each example. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + classes : list of size n_outputs of ndarray of size (n_classes,) + List of classes for each column. + + n_classes : list of int of size n_outputs + Number of classes in each column. + + class_prior : list of size n_outputs of ndarray of size (n_classes,) + Class distribution of each column. + """ + classes = [] + n_classes = [] + class_prior = [] + + n_samples, n_outputs = y.shape + if sample_weight is not None: + sample_weight = np.asarray(sample_weight) + + if issparse(y): + y = y.tocsc() + y_nnz = np.diff(y.indptr) + + for k in range(n_outputs): + col_nonzero = y.indices[y.indptr[k] : y.indptr[k + 1]] + # separate sample weights for zero and non-zero elements + if sample_weight is not None: + nz_samp_weight = sample_weight[col_nonzero] + zeros_samp_weight_sum = np.sum(sample_weight) - np.sum(nz_samp_weight) + else: + nz_samp_weight = None + zeros_samp_weight_sum = y.shape[0] - y_nnz[k] + + classes_k, y_k = np.unique( + y.data[y.indptr[k] : y.indptr[k + 1]], return_inverse=True + ) + class_prior_k = np.bincount(y_k, weights=nz_samp_weight) + + # An explicit zero was found, combine its weight with the weight + # of the implicit zeros + if 0 in classes_k: + class_prior_k[classes_k == 0] += zeros_samp_weight_sum + + # If an there is an implicit zero and it is not in classes and + # class_prior, make an entry for it + if 0 not in classes_k and y_nnz[k] < y.shape[0]: + classes_k = np.insert(classes_k, 0, 0) + class_prior_k = np.insert(class_prior_k, 0, zeros_samp_weight_sum) + + classes.append(classes_k) + n_classes.append(classes_k.shape[0]) + class_prior.append(class_prior_k / class_prior_k.sum()) + else: + for k in range(n_outputs): + classes_k, y_k = np.unique(y[:, k], return_inverse=True) + classes.append(classes_k) + n_classes.append(classes_k.shape[0]) + class_prior_k = np.bincount(y_k, weights=sample_weight) + class_prior.append(class_prior_k / class_prior_k.sum()) + + return (classes, n_classes, class_prior) + + +def _ovr_decision_function(predictions, confidences, n_classes): + """Compute a continuous, tie-breaking OvR decision function from OvO. + + It is important to include a continuous value, not only votes, + to make computing AUC or calibration meaningful. + + Parameters + ---------- + predictions : array-like of shape (n_samples, n_classifiers) + Predicted classes for each binary classifier. + + confidences : array-like of shape (n_samples, n_classifiers) + Decision functions or predicted probabilities for positive class + for each binary classifier. + + n_classes : int + Number of classes. n_classifiers must be + ``n_classes * (n_classes - 1 ) / 2``. + """ + n_samples = predictions.shape[0] + votes = np.zeros((n_samples, n_classes)) + sum_of_confidences = np.zeros((n_samples, n_classes)) + + k = 0 + for i in range(n_classes): + for j in range(i + 1, n_classes): + sum_of_confidences[:, i] -= confidences[:, k] + sum_of_confidences[:, j] += confidences[:, k] + votes[predictions[:, k] == 0, i] += 1 + votes[predictions[:, k] == 1, j] += 1 + k += 1 + + # Monotonically transform the sum_of_confidences to (-1/3, 1/3) + # and add it with votes. The monotonic transformation is + # f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2 + # to ensure that we won't reach the limits and change vote order. + # The motivation is to use confidence levels as a way to break ties in + # the votes without switching any decision made based on a difference + # of 1 vote. + transformed_confidences = sum_of_confidences / ( + 3 * (np.abs(sum_of_confidences) + 1) + ) + return votes + transformed_confidences diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/murmurhash.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/utils/murmurhash.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..dbc6e5f409b173d45084a36fc9feea9cf169df2e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/murmurhash.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/murmurhash.pxd b/venv/lib/python3.10/site-packages/sklearn/utils/murmurhash.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1844be154b39de7d1ec0b069df6bdc2ecb1fa13b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/murmurhash.pxd @@ -0,0 +1,21 @@ +"""Export fast murmurhash C/C++ routines + cython wrappers""" + +cimport numpy as cnp + +# The C API is disabled for now, since it requires -I flags to get +# compilation to work even when these functions are not used. +# cdef extern from "MurmurHash3.h": +# void MurmurHash3_x86_32(void* key, int len, unsigned int seed, +# void* out) +# +# void MurmurHash3_x86_128(void* key, int len, unsigned int seed, +# void* out) +# +# void MurmurHash3_x64_128(void* key, int len, unsigned int seed, +# void* out) + + +cpdef cnp.uint32_t murmurhash3_int_u32(int key, unsigned int seed) +cpdef cnp.int32_t murmurhash3_int_s32(int key, unsigned int seed) +cpdef cnp.uint32_t murmurhash3_bytes_u32(bytes key, unsigned int seed) +cpdef cnp.int32_t murmurhash3_bytes_s32(bytes key, unsigned int seed) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/random.py b/venv/lib/python3.10/site-packages/sklearn/utils/random.py new file mode 100644 index 0000000000000000000000000000000000000000..1dfe8d83a94b354d86ad9c7e6049d9940f13ec00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/random.py @@ -0,0 +1,103 @@ +""" +The mod:`sklearn.utils.random` module includes utilities for random sampling. +""" + +# Author: Hamzeh Alsalhi +# +# License: BSD 3 clause +import array + +import numpy as np +import scipy.sparse as sp + +from . import check_random_state +from ._random import sample_without_replacement + +__all__ = ["sample_without_replacement"] + + +def _random_choice_csc(n_samples, classes, class_probability=None, random_state=None): + """Generate a sparse random matrix given column class distributions + + Parameters + ---------- + n_samples : int, + Number of samples to draw in each column. + + classes : list of size n_outputs of arrays of size (n_classes,) + List of classes for each column. + + class_probability : list of size n_outputs of arrays of \ + shape (n_classes,), default=None + Class distribution of each column. If None, uniform distribution is + assumed. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the sampled classes. + See :term:`Glossary `. + + Returns + ------- + random_matrix : sparse csc matrix of size (n_samples, n_outputs) + + """ + data = array.array("i") + indices = array.array("i") + indptr = array.array("i", [0]) + + for j in range(len(classes)): + classes[j] = np.asarray(classes[j]) + if classes[j].dtype.kind != "i": + raise ValueError("class dtype %s is not supported" % classes[j].dtype) + classes[j] = classes[j].astype(np.int64, copy=False) + + # use uniform distribution if no class_probability is given + if class_probability is None: + class_prob_j = np.empty(shape=classes[j].shape[0]) + class_prob_j.fill(1 / classes[j].shape[0]) + else: + class_prob_j = np.asarray(class_probability[j]) + + if not np.isclose(np.sum(class_prob_j), 1.0): + raise ValueError( + "Probability array at index {0} does not sum to one".format(j) + ) + + if class_prob_j.shape[0] != classes[j].shape[0]: + raise ValueError( + "classes[{0}] (length {1}) and " + "class_probability[{0}] (length {2}) have " + "different length.".format( + j, classes[j].shape[0], class_prob_j.shape[0] + ) + ) + + # If 0 is not present in the classes insert it with a probability 0.0 + if 0 not in classes[j]: + classes[j] = np.insert(classes[j], 0, 0) + class_prob_j = np.insert(class_prob_j, 0, 0.0) + + # If there are nonzero classes choose randomly using class_probability + rng = check_random_state(random_state) + if classes[j].shape[0] > 1: + index_class_0 = np.flatnonzero(classes[j] == 0).item() + p_nonzero = 1 - class_prob_j[index_class_0] + nnz = int(n_samples * p_nonzero) + ind_sample = sample_without_replacement( + n_population=n_samples, n_samples=nnz, random_state=random_state + ) + indices.extend(ind_sample) + + # Normalize probabilities for the nonzero elements + classes_j_nonzero = classes[j] != 0 + class_probability_nz = class_prob_j[classes_j_nonzero] + class_probability_nz_norm = class_probability_nz / np.sum( + class_probability_nz + ) + classes_ind = np.searchsorted( + class_probability_nz_norm.cumsum(), rng.uniform(size=nnz) + ) + data.extend(classes[j][classes_j_nonzero][classes_ind]) + indptr.append(len(indices)) + + return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)