diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9e4f0a62136e53d2d7307e637604e7d76fb83d5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26d968041733d7223b9d35604897e94200cfb561 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bbc9d8d0d50e6ba281c96f8e5c03a384cc3275e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aadafe4a19ac8a287d5882322dbdec5cec958f0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b49f19b1f1b5045b4010400e08278f598e8e628 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e9ef22e38352ba07684a2354041ebc8b9d64128 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaf6c0977ac935190daa13b6b1687cc06e02aa04 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..886946e7f9ee7d582a5324dc5274bcf4018ed588 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bcde4ea4a185fb14186d219150b9ec6de310820 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6565864f89a277c8918b403262802a7807672c40 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4ef8bb5f61316cc61c9874fbf81aa875661886d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7c7e8be85c8e67e7b52bca36c1a9686aee090d0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a540de0d1a42b6205a25b6735d0dc2764a70e6ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24b03442fcd41084de22fa0f5f163fa3c9b291ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5055581dc338584d07dd5e2faa469ca66ad2824d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84e17d5894370f473859731abf9653e5e817383a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cba98d0467532989aabfb741fb72ab36b75a1e5f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6c4c90b72f3e0f150b45b1b45ba8c062520dce96 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py new file mode 100644 index 0000000000000000000000000000000000000000..d1293bb62f262739bd784713c0e0059d486c7e24 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py @@ -0,0 +1,3182 @@ +# Author: Alexandre Gramfort +# Fabian Pedregosa +# Olivier Grisel +# Gael Varoquaux +# +# License: BSD 3 clause + +import numbers +import sys +import warnings +from abc import ABC, abstractmethod +from functools import partial +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs +from scipy import sparse + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context +from ..model_selection import check_cv +from ..utils import Bunch, check_array, check_scalar +from ..utils._metadata_requests import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + get_routing_for_object, +) +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import safe_sparse_dot +from ..utils.metadata_routing import ( + _routing_enabled, + process_routing, +) +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_sample_weight, + check_consistent_length, + check_is_fitted, + check_random_state, + column_or_1d, + has_fit_parameter, +) + +# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast' +from . import _cd_fast as cd_fast # type: ignore +from ._base import LinearModel, _pre_fit, _preprocess_data + + +def _set_order(X, y, order="C"): + """Change the order of X and y if necessary. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + order : {None, 'C', 'F'} + If 'C', dense arrays are returned as C-ordered, sparse matrices in csr + format. If 'F', dense arrays are return as F-ordered, sparse matrices + in csc format. + + Returns + ------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data with guaranteed order. + + y : ndarray of shape (n_samples,) + Target values with guaranteed order. + """ + if order not in [None, "C", "F"]: + raise ValueError( + "Unknown value for order. Got {} instead of None, 'C' or 'F'.".format(order) + ) + sparse_X = sparse.issparse(X) + sparse_y = sparse.issparse(y) + if order is not None: + sparse_format = "csc" if order == "F" else "csr" + if sparse_X: + X = X.asformat(sparse_format, copy=False) + else: + X = np.asarray(X, order=order) + if sparse_y: + y = y.asformat(sparse_format) + else: + y = np.asarray(y, order=order) + return X, y + + +############################################################################### +# Paths functions + + +def _alpha_grid( + X, + y, + Xy=None, + l1_ratio=1.0, + fit_intercept=True, + eps=1e-3, + n_alphas=100, + copy_X=True, +): + """Compute the grid of alpha values for elastic net parameter search + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data to avoid + unnecessary memory duplication + + y : ndarray of shape (n_samples,) or (n_samples, n_outputs) + Target values + + Xy : array-like of shape (n_features,) or (n_features, n_outputs),\ + default=None + Xy = np.dot(X.T, y) that can be precomputed. + + l1_ratio : float, default=1.0 + The elastic net mixing parameter, with ``0 < l1_ratio <= 1``. + For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not + supported) ``For l1_ratio = 1`` it is an L1 penalty. For + ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3`` + + n_alphas : int, default=100 + Number of alphas along the regularization path + + fit_intercept : bool, default=True + Whether to fit an intercept or not + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + """ + if l1_ratio == 0: + raise ValueError( + "Automatic alpha grid generation is not supported for" + " l1_ratio=0. Please supply a grid by providing " + "your estimator with the appropriate `alphas=` " + "argument." + ) + n_samples = len(y) + + sparse_center = False + if Xy is None: + X_sparse = sparse.issparse(X) + sparse_center = X_sparse and fit_intercept + X = check_array( + X, accept_sparse="csc", copy=(copy_X and fit_intercept and not X_sparse) + ) + if not X_sparse: + # X can be touched inplace thanks to the above line + X, y, _, _, _ = _preprocess_data( + X, y, fit_intercept=fit_intercept, copy=False + ) + Xy = safe_sparse_dot(X.T, y, dense_output=True) + + if sparse_center: + # Workaround to find alpha_max for sparse matrices. + # since we should not destroy the sparsity of such matrices. + _, _, X_offset, _, X_scale = _preprocess_data( + X, y, fit_intercept=fit_intercept + ) + mean_dot = X_offset * np.sum(y) + + if Xy.ndim == 1: + Xy = Xy[:, np.newaxis] + + if sparse_center: + if fit_intercept: + Xy -= mean_dot[:, np.newaxis] + + alpha_max = np.sqrt(np.sum(Xy**2, axis=1)).max() / (n_samples * l1_ratio) + + if alpha_max <= np.finfo(float).resolution: + alphas = np.empty(n_alphas) + alphas.fill(np.finfo(float).resolution) + return alphas + + return np.geomspace(alpha_max, alpha_max * eps, num=n_alphas) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like", "sparse matrix"], + "eps": [Interval(Real, 0, None, closed="neither")], + "n_alphas": [Interval(Integral, 1, None, closed="left")], + "alphas": ["array-like", None], + "precompute": [StrOptions({"auto"}), "boolean", "array-like"], + "Xy": ["array-like", None], + "copy_X": ["boolean"], + "coef_init": ["array-like", None], + "verbose": ["verbose"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def lasso_path( + X, + y, + *, + eps=1e-3, + n_alphas=100, + alphas=None, + precompute="auto", + Xy=None, + copy_X=True, + coef_init=None, + verbose=False, + return_n_iter=False, + positive=False, + **params, +): + """Compute Lasso path with coordinate descent. + + The Lasso optimization function varies for mono and multi-outputs. + + For mono-output tasks it is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + For multi-output tasks it is:: + + (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data to avoid + unnecessary memory duplication. If ``y`` is mono-output then ``X`` + can be sparse. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_targets) + Target values. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If ``None`` alphas are set automatically. + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + Xy : array-like of shape (n_features,) or (n_features, n_targets),\ + default=None + Xy = np.dot(X.T, y) that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + coef_init : array-like of shape (n_features, ), default=None + The initial values of the coefficients. + + verbose : bool or int, default=False + Amount of verbosity. + + return_n_iter : bool, default=False + Whether to return the number of iterations or not. + + positive : bool, default=False + If set to True, forces coefficients to be positive. + (Only allowed when ``y.ndim == 1``). + + **params : kwargs + Keyword arguments passed to the coordinate descent solver. + + Returns + ------- + alphas : ndarray of shape (n_alphas,) + The alphas along the path where models are computed. + + coefs : ndarray of shape (n_features, n_alphas) or \ + (n_targets, n_features, n_alphas) + Coefficients along the path. + + dual_gaps : ndarray of shape (n_alphas,) + The dual gaps at the end of the optimization for each alpha. + + n_iters : list of int + The number of iterations taken by the coordinate descent optimizer to + reach the specified tolerance for each alpha. + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso path using LARS + algorithm. + Lasso : The Lasso is a linear model that estimates sparse coefficients. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoCV : Lasso linear model with iterative fitting along a regularization + path. + LassoLarsCV : Cross-validated Lasso using the LARS algorithm. + sklearn.decomposition.sparse_encode : Estimator that can be used to + transform signals into sparse linear combination of atoms from a fixed. + + Notes + ----- + For an example, see + :ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py + `. + + To avoid unnecessary memory duplication the X argument of the fit method + should be directly passed as a Fortran-contiguous numpy array. + + Note that in certain cases, the Lars solver may be significantly + faster to implement this functionality. In particular, linear + interpolation can be used to retrieve model coefficients between the + values output by lars_path + + Examples + -------- + + Comparing lasso_path and lars_path with interpolation: + + >>> import numpy as np + >>> from sklearn.linear_model import lasso_path + >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T + >>> y = np.array([1, 2, 3.1]) + >>> # Use lasso_path to compute a coefficient path + >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5]) + >>> print(coef_path) + [[0. 0. 0.46874778] + [0.2159048 0.4425765 0.23689075]] + + >>> # Now use lars_path and 1D linear interpolation to compute the + >>> # same path + >>> from sklearn.linear_model import lars_path + >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso') + >>> from scipy import interpolate + >>> coef_path_continuous = interpolate.interp1d(alphas[::-1], + ... coef_path_lars[:, ::-1]) + >>> print(coef_path_continuous([5., 1., .5])) + [[0. 0. 0.46915237] + [0.2159048 0.4425765 0.23668876]] + """ + return enet_path( + X, + y, + l1_ratio=1.0, + eps=eps, + n_alphas=n_alphas, + alphas=alphas, + precompute=precompute, + Xy=Xy, + copy_X=copy_X, + coef_init=coef_init, + verbose=verbose, + positive=positive, + return_n_iter=return_n_iter, + **params, + ) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like", "sparse matrix"], + "l1_ratio": [Interval(Real, 0.0, 1.0, closed="both")], + "eps": [Interval(Real, 0.0, None, closed="neither")], + "n_alphas": [Interval(Integral, 1, None, closed="left")], + "alphas": ["array-like", None], + "precompute": [StrOptions({"auto"}), "boolean", "array-like"], + "Xy": ["array-like", None], + "copy_X": ["boolean"], + "coef_init": ["array-like", None], + "verbose": ["verbose"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + "check_input": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def enet_path( + X, + y, + *, + l1_ratio=0.5, + eps=1e-3, + n_alphas=100, + alphas=None, + precompute="auto", + Xy=None, + copy_X=True, + coef_init=None, + verbose=False, + return_n_iter=False, + positive=False, + check_input=True, + **params, +): + """Compute elastic net path with coordinate descent. + + The elastic net optimization function varies for mono and multi-outputs. + + For mono-output tasks it is:: + + 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 + + For multi-output tasks it is:: + + (1 / (2 * n_samples)) * ||Y - XW||_Fro^2 + + alpha * l1_ratio * ||W||_21 + + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data to avoid + unnecessary memory duplication. If ``y`` is mono-output then ``X`` + can be sparse. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_targets) + Target values. + + l1_ratio : float, default=0.5 + Number between 0 and 1 passed to elastic net (scaling between + l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If None alphas are set automatically. + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + Xy : array-like of shape (n_features,) or (n_features, n_targets),\ + default=None + Xy = np.dot(X.T, y) that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + coef_init : array-like of shape (n_features, ), default=None + The initial values of the coefficients. + + verbose : bool or int, default=False + Amount of verbosity. + + return_n_iter : bool, default=False + Whether to return the number of iterations or not. + + positive : bool, default=False + If set to True, forces coefficients to be positive. + (Only allowed when ``y.ndim == 1``). + + check_input : bool, default=True + If set to False, the input validation checks are skipped (including the + Gram matrix when provided). It is assumed that they are handled + by the caller. + + **params : kwargs + Keyword arguments passed to the coordinate descent solver. + + Returns + ------- + alphas : ndarray of shape (n_alphas,) + The alphas along the path where models are computed. + + coefs : ndarray of shape (n_features, n_alphas) or \ + (n_targets, n_features, n_alphas) + Coefficients along the path. + + dual_gaps : ndarray of shape (n_alphas,) + The dual gaps at the end of the optimization for each alpha. + + n_iters : list of int + The number of iterations taken by the coordinate descent optimizer to + reach the specified tolerance for each alpha. + (Is returned when ``return_n_iter`` is set to True). + + See Also + -------- + MultiTaskElasticNet : Multi-task ElasticNet model trained with L1/L2 mixed-norm \ + as regularizer. + MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in cross-validation. + ElasticNet : Linear regression with combined L1 and L2 priors as regularizer. + ElasticNetCV : Elastic Net model with iterative fitting along a regularization path. + + Notes + ----- + For an example, see + :ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py + `. + """ + X_offset_param = params.pop("X_offset", None) + X_scale_param = params.pop("X_scale", None) + sample_weight = params.pop("sample_weight", None) + tol = params.pop("tol", 1e-4) + max_iter = params.pop("max_iter", 1000) + random_state = params.pop("random_state", None) + selection = params.pop("selection", "cyclic") + + if len(params) > 0: + raise ValueError("Unexpected parameters in params", params.keys()) + + # We expect X and y to be already Fortran ordered when bypassing + # checks + if check_input: + X = check_array( + X, + accept_sparse="csc", + dtype=[np.float64, np.float32], + order="F", + copy=copy_X, + ) + y = check_array( + y, + accept_sparse="csc", + dtype=X.dtype.type, + order="F", + copy=False, + ensure_2d=False, + ) + if Xy is not None: + # Xy should be a 1d contiguous array or a 2D C ordered array + Xy = check_array( + Xy, dtype=X.dtype.type, order="C", copy=False, ensure_2d=False + ) + + n_samples, n_features = X.shape + + multi_output = False + if y.ndim != 1: + multi_output = True + n_targets = y.shape[1] + + if multi_output and positive: + raise ValueError("positive=True is not allowed for multi-output (y.ndim != 1)") + + # MultiTaskElasticNet does not support sparse matrices + if not multi_output and sparse.issparse(X): + if X_offset_param is not None: + # As sparse matrices are not actually centered we need this to be passed to + # the CD solver. + X_sparse_scaling = X_offset_param / X_scale_param + X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype) + else: + X_sparse_scaling = np.zeros(n_features, dtype=X.dtype) + + # X should have been passed through _pre_fit already if function is called + # from ElasticNet.fit + if check_input: + X, y, _, _, _, precompute, Xy = _pre_fit( + X, + y, + Xy, + precompute, + fit_intercept=False, + copy=False, + check_input=check_input, + ) + if alphas is None: + # No need to normalize of fit_intercept: it has been done + # above + alphas = _alpha_grid( + X, + y, + Xy=Xy, + l1_ratio=l1_ratio, + fit_intercept=False, + eps=eps, + n_alphas=n_alphas, + copy_X=False, + ) + elif len(alphas) > 1: + alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered + + n_alphas = len(alphas) + dual_gaps = np.empty(n_alphas) + n_iters = [] + + rng = check_random_state(random_state) + if selection not in ["random", "cyclic"]: + raise ValueError("selection should be either random or cyclic.") + random = selection == "random" + + if not multi_output: + coefs = np.empty((n_features, n_alphas), dtype=X.dtype) + else: + coefs = np.empty((n_targets, n_features, n_alphas), dtype=X.dtype) + + if coef_init is None: + coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order="F") + else: + coef_ = np.asfortranarray(coef_init, dtype=X.dtype) + + for i, alpha in enumerate(alphas): + # account for n_samples scaling in objectives between here and cd_fast + l1_reg = alpha * l1_ratio * n_samples + l2_reg = alpha * (1.0 - l1_ratio) * n_samples + if not multi_output and sparse.issparse(X): + model = cd_fast.sparse_enet_coordinate_descent( + w=coef_, + alpha=l1_reg, + beta=l2_reg, + X_data=X.data, + X_indices=X.indices, + X_indptr=X.indptr, + y=y, + sample_weight=sample_weight, + X_mean=X_sparse_scaling, + max_iter=max_iter, + tol=tol, + rng=rng, + random=random, + positive=positive, + ) + elif multi_output: + model = cd_fast.enet_coordinate_descent_multi_task( + coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random + ) + elif isinstance(precompute, np.ndarray): + # We expect precompute to be already Fortran ordered when bypassing + # checks + if check_input: + precompute = check_array(precompute, dtype=X.dtype.type, order="C") + model = cd_fast.enet_coordinate_descent_gram( + coef_, + l1_reg, + l2_reg, + precompute, + Xy, + y, + max_iter, + tol, + rng, + random, + positive, + ) + elif precompute is False: + model = cd_fast.enet_coordinate_descent( + coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, positive + ) + else: + raise ValueError( + "Precompute should be one of True, False, 'auto' or array-like. Got %r" + % precompute + ) + coef_, dual_gap_, eps_, n_iter_ = model + coefs[..., i] = coef_ + # we correct the scale of the returned dual gap, as the objective + # in cd_fast is n_samples * the objective in this docstring. + dual_gaps[i] = dual_gap_ / n_samples + n_iters.append(n_iter_) + + if verbose: + if verbose > 2: + print(model) + elif verbose > 1: + print("Path: %03i out of %03i" % (i, n_alphas)) + else: + sys.stderr.write(".") + + if return_n_iter: + return alphas, coefs, dual_gaps, n_iters + return alphas, coefs, dual_gaps + + +############################################################################### +# ElasticNet model + + +class ElasticNet(MultiOutputMixin, RegressorMixin, LinearModel): + """Linear regression with combined L1 and L2 priors as regularizer. + + Minimizes the objective function:: + + 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 + + If you are interested in controlling the L1 and L2 penalty + separately, keep in mind that this is equivalent to:: + + a * ||w||_1 + 0.5 * b * ||w||_2^2 + + where:: + + alpha = a + b and l1_ratio = a / (a + b) + + The parameter l1_ratio corresponds to alpha in the glmnet R package while + alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio + = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable, + unless you supply your own sequence of alpha. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the penalty terms. Defaults to 1.0. + See the notes for the exact mathematical meaning of this + parameter. ``alpha = 0`` is equivalent to an ordinary least square, + solved by the :class:`LinearRegression` object. For numerical + reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised. + Given this, you should use the :class:`LinearRegression` object. + + l1_ratio : float, default=0.5 + The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For + ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it + is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a + combination of L1 and L2. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If ``False``, the + data is assumed to be already centered. + + precompute : bool or array-like of shape (n_features, n_features),\ + default=False + Whether to use a precomputed Gram matrix to speed up + calculations. The Gram matrix can also be passed as argument. + For sparse input this option is always ``False`` to preserve sparsity. + + max_iter : int, default=1000 + The maximum number of iterations. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``, see Notes below. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + sparse_coef_ : sparse matrix of shape (n_features,) or \ + (n_targets, n_features) + Sparse representation of the `coef_`. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : list of int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + dual_gap_ : float or ndarray of shape (n_targets,) + Given param alpha, the dual gaps at the end of the optimization, + same shape as each observation of y. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + ElasticNetCV : Elastic net model with best model selection by + cross-validation. + SGDRegressor : Implements elastic net regression with incremental training. + SGDClassifier : Implements logistic regression with elastic net penalty + (``SGDClassifier(loss="log_loss", penalty="elasticnet")``). + + Notes + ----- + To avoid unnecessary memory duplication the X argument of the fit method + should be directly passed as a Fortran-contiguous numpy array. + + The precise stopping criteria based on `tol` are the following: First, check that + that maximum coordinate update, i.e. :math:`\\max_j |w_j^{new} - w_j^{old}|` + is smaller than `tol` times the maximum absolute coefficient, :math:`\\max_j |w_j|`. + If so, then additionally check whether the dual gap is smaller than `tol` times + :math:`||y||_2^2 / n_{\text{samples}}`. + + Examples + -------- + >>> from sklearn.linear_model import ElasticNet + >>> from sklearn.datasets import make_regression + + >>> X, y = make_regression(n_features=2, random_state=0) + >>> regr = ElasticNet(random_state=0) + >>> regr.fit(X, y) + ElasticNet(random_state=0) + >>> print(regr.coef_) + [18.83816048 64.55968825] + >>> print(regr.intercept_) + 1.451... + >>> print(regr.predict([[0, 0]])) + [1.451...] + """ + + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "fit_intercept": ["boolean"], + "precompute": ["boolean", "array-like"], + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "copy_X": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + "warm_start": ["boolean"], + "positive": ["boolean"], + "random_state": ["random_state"], + "selection": [StrOptions({"cyclic", "random"})], + } + + path = staticmethod(enet_path) + + def __init__( + self, + alpha=1.0, + *, + l1_ratio=0.5, + fit_intercept=True, + precompute=False, + max_iter=1000, + copy_X=True, + tol=1e-4, + warm_start=False, + positive=False, + random_state=None, + selection="cyclic", + ): + self.alpha = alpha + self.l1_ratio = l1_ratio + self.fit_intercept = fit_intercept + self.precompute = precompute + self.max_iter = max_iter + self.copy_X = copy_X + self.tol = tol + self.warm_start = warm_start + self.positive = positive + self.random_state = random_state + self.selection = selection + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, check_input=True): + """Fit model with coordinate descent. + + Parameters + ---------- + X : {ndarray, sparse matrix} of (n_samples, n_features) + Data. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target. Will be cast to X's dtype if necessary. + + sample_weight : float or array-like of shape (n_samples,), default=None + Sample weights. Internally, the `sample_weight` vector will be + rescaled to sum to `n_samples`. + + .. versionadded:: 0.23 + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you do. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + Coordinate descent is an algorithm that considers each column of + data at a time hence it will automatically convert the X input + as a Fortran-contiguous numpy array if necessary. + + To avoid memory re-allocation it is advised to allocate the + initial data in memory directly using that format. + """ + if self.alpha == 0: + warnings.warn( + ( + "With alpha=0, this algorithm does not converge " + "well. You are advised to use the LinearRegression " + "estimator" + ), + stacklevel=2, + ) + + # Remember if X is copied + X_copied = False + # We expect X and y to be float64 or float32 Fortran ordered arrays + # when bypassing checks + if check_input: + X_copied = self.copy_X and self.fit_intercept + X, y = self._validate_data( + X, + y, + accept_sparse="csc", + order="F", + dtype=[np.float64, np.float32], + copy=X_copied, + multi_output=True, + y_numeric=True, + ) + y = check_array( + y, order="F", copy=False, dtype=X.dtype.type, ensure_2d=False + ) + + n_samples, n_features = X.shape + alpha = self.alpha + + if isinstance(sample_weight, numbers.Number): + sample_weight = None + if sample_weight is not None: + if check_input: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + # TLDR: Rescale sw to sum up to n_samples. + # Long: The objective function of Enet + # + # 1/2 * np.average(squared error, weights=sw) + # + alpha * penalty (1) + # + # is invariant under rescaling of sw. + # But enet_path coordinate descent minimizes + # + # 1/2 * sum(squared error) + alpha' * penalty (2) + # + # and therefore sets + # + # alpha' = n_samples * alpha (3) + # + # inside its function body, which results in objective (2) being + # equivalent to (1) in case of no sw. + # With sw, however, enet_path should set + # + # alpha' = sum(sw) * alpha (4) + # + # Therefore, we use the freedom of Eq. (1) to rescale sw before + # calling enet_path, i.e. + # + # sw *= n_samples / sum(sw) + # + # such that sum(sw) = n_samples. This way, (3) and (4) are the same. + sample_weight = sample_weight * (n_samples / np.sum(sample_weight)) + # Note: Alternatively, we could also have rescaled alpha instead + # of sample_weight: + # + # alpha *= np.sum(sample_weight) / n_samples + + # Ensure copying happens only once, don't do it again if done above. + # X and y will be rescaled if sample_weight is not None, order='F' + # ensures that the returned X and y are still F-contiguous. + should_copy = self.copy_X and not X_copied + X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit( + X, + y, + None, + self.precompute, + fit_intercept=self.fit_intercept, + copy=should_copy, + check_input=check_input, + sample_weight=sample_weight, + ) + # coordinate descent needs F-ordered arrays and _pre_fit might have + # called _rescale_data + if check_input or sample_weight is not None: + X, y = _set_order(X, y, order="F") + if y.ndim == 1: + y = y[:, np.newaxis] + if Xy is not None and Xy.ndim == 1: + Xy = Xy[:, np.newaxis] + + n_targets = y.shape[1] + + if not self.warm_start or not hasattr(self, "coef_"): + coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order="F") + else: + coef_ = self.coef_ + if coef_.ndim == 1: + coef_ = coef_[np.newaxis, :] + + dual_gaps_ = np.zeros(n_targets, dtype=X.dtype) + self.n_iter_ = [] + + for k in range(n_targets): + if Xy is not None: + this_Xy = Xy[:, k] + else: + this_Xy = None + _, this_coef, this_dual_gap, this_iter = self.path( + X, + y[:, k], + l1_ratio=self.l1_ratio, + eps=None, + n_alphas=None, + alphas=[alpha], + precompute=precompute, + Xy=this_Xy, + copy_X=True, + coef_init=coef_[k], + verbose=False, + return_n_iter=True, + positive=self.positive, + check_input=False, + # from here on **params + tol=self.tol, + X_offset=X_offset, + X_scale=X_scale, + max_iter=self.max_iter, + random_state=self.random_state, + selection=self.selection, + sample_weight=sample_weight, + ) + coef_[k] = this_coef[:, 0] + dual_gaps_[k] = this_dual_gap[0] + self.n_iter_.append(this_iter[0]) + + if n_targets == 1: + self.n_iter_ = self.n_iter_[0] + self.coef_ = coef_[0] + self.dual_gap_ = dual_gaps_[0] + else: + self.coef_ = coef_ + self.dual_gap_ = dual_gaps_ + + self._set_intercept(X_offset, y_offset, X_scale) + + # check for finiteness of coefficients + if not all(np.isfinite(w).all() for w in [self.coef_, self.intercept_]): + raise ValueError( + "Coordinate descent iterations resulted in non-finite parameter" + " values. The input data may contain large values and need to" + " be preprocessed." + ) + + # return self for chaining fit and predict calls + return self + + @property + def sparse_coef_(self): + """Sparse representation of the fitted `coef_`.""" + return sparse.csr_matrix(self.coef_) + + def _decision_function(self, X): + """Decision function of the linear model. + + Parameters + ---------- + X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) + + Returns + ------- + T : ndarray of shape (n_samples,) + The predicted decision function. + """ + check_is_fitted(self) + if sparse.issparse(X): + return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ + else: + return super()._decision_function(X) + + +############################################################################### +# Lasso model + + +class Lasso(ElasticNet): + """Linear Model trained with L1 prior as regularizer (aka the Lasso). + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Technically the Lasso model is optimizing the same objective function as + the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the L1 term, controlling regularization + strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. + + When `alpha = 0`, the objective is equivalent to ordinary least + squares, solved by the :class:`LinearRegression` object. For numerical + reasons, using `alpha = 0` with the `Lasso` object is not advised. + Instead, you should use the :class:`LinearRegression` object. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to False, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : bool or array-like of shape (n_features, n_features),\ + default=False + Whether to use a precomputed Gram matrix to speed up + calculations. The Gram matrix can also be passed as argument. + For sparse input this option is always ``False`` to preserve sparsity. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``, see Notes below. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + dual_gap_ : float or ndarray of shape (n_targets,) + Given param alpha, the dual gaps at the end of the optimization, + same shape as each observation of y. + + sparse_coef_ : sparse matrix of shape (n_features, 1) or \ + (n_targets, n_features) + Readonly property derived from ``coef_``. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : int or list of int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Regularization path using LARS. + lasso_path : Regularization path using Lasso. + LassoLars : Lasso Path along the regularization parameter using LARS algorithm. + LassoCV : Lasso alpha parameter by cross-validation. + LassoLarsCV : Lasso least angle parameter algorithm by cross-validation. + sklearn.decomposition.sparse_encode : Sparse coding array estimator. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + To avoid unnecessary memory duplication the X argument of the fit method + should be directly passed as a Fortran-contiguous numpy array. + + Regularization improves the conditioning of the problem and + reduces the variance of the estimates. Larger values specify stronger + regularization. Alpha corresponds to `1 / (2C)` in other linear + models such as :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are + assumed to be specific to the targets. Hence they must correspond in + number. + + The precise stopping criteria based on `tol` are the following: First, check that + that maximum coordinate update, i.e. :math:`\\max_j |w_j^{new} - w_j^{old}|` + is smaller than `tol` times the maximum absolute coefficient, :math:`\\max_j |w_j|`. + If so, then additionally check whether the dual gap is smaller than `tol` times + :math:`||y||_2^2 / n_{\\text{samples}}`. + + The target can be a 2-dimensional array, resulting in the optimization of the + following objective:: + + (1 / (2 * n_samples)) * ||Y - XW||^2_F + alpha * ||W||_11 + + where :math:`||W||_{1,1}` is the sum of the magnitude of the matrix coefficients. + It should not be confused with :class:`~sklearn.linear_model.MultiTaskLasso` which + instead penalizes the :math:`L_{2,1}` norm of the coefficients, yielding row-wise + sparsity in the coefficients. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.Lasso(alpha=0.1) + >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) + Lasso(alpha=0.1) + >>> print(clf.coef_) + [0.85 0. ] + >>> print(clf.intercept_) + 0.15... + """ + + _parameter_constraints: dict = { + **ElasticNet._parameter_constraints, + } + _parameter_constraints.pop("l1_ratio") + + path = staticmethod(enet_path) + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + precompute=False, + copy_X=True, + max_iter=1000, + tol=1e-4, + warm_start=False, + positive=False, + random_state=None, + selection="cyclic", + ): + super().__init__( + alpha=alpha, + l1_ratio=1.0, + fit_intercept=fit_intercept, + precompute=precompute, + copy_X=copy_X, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + positive=positive, + random_state=random_state, + selection=selection, + ) + + +############################################################################### +# Functions for CV with paths functions + + +def _path_residuals( + X, + y, + sample_weight, + train, + test, + fit_intercept, + path, + path_params, + alphas=None, + l1_ratio=1, + X_order=None, + dtype=None, +): + """Returns the MSE for the models computed by 'path'. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : None or array-like of shape (n_samples,) + Sample weights. + + train : list of indices + The indices of the train set. + + test : list of indices + The indices of the test set. + + path : callable + Function returning a list of models on the path. See + enet_path for an example of signature. + + path_params : dictionary + Parameters passed to the path function. + + alphas : array-like, default=None + Array of float that is used for cross-validation. If not + provided, computed using 'path'. + + l1_ratio : float, default=1 + float between 0 and 1 passed to ElasticNet (scaling between + l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an + L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 + < l1_ratio < 1``, the penalty is a combination of L1 and L2. + + X_order : {'F', 'C'}, default=None + The order of the arrays expected by the path function to + avoid memory copies. + + dtype : a numpy dtype, default=None + The dtype of the arrays expected by the path function to + avoid memory copies. + """ + X_train = X[train] + y_train = y[train] + X_test = X[test] + y_test = y[test] + if sample_weight is None: + sw_train, sw_test = None, None + else: + sw_train = sample_weight[train] + sw_test = sample_weight[test] + n_samples = X_train.shape[0] + # TLDR: Rescale sw_train to sum up to n_samples on the training set. + # See TLDR and long comment inside ElasticNet.fit. + sw_train *= n_samples / np.sum(sw_train) + # Note: Alternatively, we could also have rescaled alpha instead + # of sample_weight: + # + # alpha *= np.sum(sample_weight) / n_samples + + if not sparse.issparse(X): + for array, array_input in ( + (X_train, X), + (y_train, y), + (X_test, X), + (y_test, y), + ): + if array.base is not array_input and not array.flags["WRITEABLE"]: + # fancy indexing should create a writable copy but it doesn't + # for read-only memmaps (cf. numpy#14132). + array.setflags(write=True) + + if y.ndim == 1: + precompute = path_params["precompute"] + else: + # No Gram variant of multi-task exists right now. + # Fall back to default enet_multitask + precompute = False + + X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit( + X_train, + y_train, + None, + precompute, + fit_intercept=fit_intercept, + copy=False, + sample_weight=sw_train, + ) + + path_params = path_params.copy() + path_params["Xy"] = Xy + path_params["X_offset"] = X_offset + path_params["X_scale"] = X_scale + path_params["precompute"] = precompute + path_params["copy_X"] = False + path_params["alphas"] = alphas + # needed for sparse cd solver + path_params["sample_weight"] = sw_train + + if "l1_ratio" in path_params: + path_params["l1_ratio"] = l1_ratio + + # Do the ordering and type casting here, as if it is done in the path, + # X is copied and a reference is kept here + X_train = check_array(X_train, accept_sparse="csc", dtype=dtype, order=X_order) + alphas, coefs, _ = path(X_train, y_train, **path_params) + del X_train, y_train + + if y.ndim == 1: + # Doing this so that it becomes coherent with multioutput. + coefs = coefs[np.newaxis, :, :] + y_offset = np.atleast_1d(y_offset) + y_test = y_test[:, np.newaxis] + + intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs) + X_test_coefs = safe_sparse_dot(X_test, coefs) + residues = X_test_coefs - y_test[:, :, np.newaxis] + residues += intercepts + if sample_weight is None: + this_mse = (residues**2).mean(axis=0) + else: + this_mse = np.average(residues**2, weights=sw_test, axis=0) + + return this_mse.mean(axis=0) + + +class LinearModelCV(MultiOutputMixin, LinearModel, ABC): + """Base class for iterative model fitting along a regularization path.""" + + _parameter_constraints: dict = { + "eps": [Interval(Real, 0, None, closed="neither")], + "n_alphas": [Interval(Integral, 1, None, closed="left")], + "alphas": ["array-like", None], + "fit_intercept": ["boolean"], + "precompute": [StrOptions({"auto"}), "array-like", "boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "copy_X": ["boolean"], + "cv": ["cv_object"], + "verbose": ["verbose"], + "n_jobs": [Integral, None], + "positive": ["boolean"], + "random_state": ["random_state"], + "selection": [StrOptions({"cyclic", "random"})], + } + + @abstractmethod + def __init__( + self, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + precompute="auto", + max_iter=1000, + tol=1e-4, + copy_X=True, + cv=None, + verbose=False, + n_jobs=None, + positive=False, + random_state=None, + selection="cyclic", + ): + self.eps = eps + self.n_alphas = n_alphas + self.alphas = alphas + self.fit_intercept = fit_intercept + self.precompute = precompute + self.max_iter = max_iter + self.tol = tol + self.copy_X = copy_X + self.cv = cv + self.verbose = verbose + self.n_jobs = n_jobs + self.positive = positive + self.random_state = random_state + self.selection = selection + + @abstractmethod + def _get_estimator(self): + """Model to be fitted after the best alpha has been determined.""" + + @abstractmethod + def _is_multitask(self): + """Bool indicating if class is meant for multidimensional target.""" + + @staticmethod + @abstractmethod + def path(X, y, **kwargs): + """Compute path with coordinate descent.""" + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, **params): + """Fit linear model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data + to avoid unnecessary memory duplication. If y is mono-output, + X can be sparse. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : float or array-like of shape (n_samples,), \ + default=None + Sample weights used for fitting and evaluation of the weighted + mean squared error of each cv-fold. Note that the cross validated + MSE that is finally used to find the best model is the unweighted + mean over the (weighted) MSEs of each test fold. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of fitted model. + """ + _raise_for_params(params, self, "fit") + + # This makes sure that there is no duplication in memory. + # Dealing right with copy_X is important in the following: + # Multiple functions touch X and subsamples of X and can induce a + # lot of duplication of memory + copy_X = self.copy_X and self.fit_intercept + + check_y_params = dict( + copy=False, dtype=[np.float64, np.float32], ensure_2d=False + ) + if isinstance(X, np.ndarray) or sparse.issparse(X): + # Keep a reference to X + reference_to_old_X = X + # Let us not impose fortran ordering so far: it is + # not useful for the cross-validation loop and will be done + # by the model fitting itself + + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be + # csr. We also want to allow y to be 64 or 32 but check_X_y only + # allows to convert for 64. + check_X_params = dict( + accept_sparse="csc", dtype=[np.float64, np.float32], copy=False + ) + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + if sparse.issparse(X): + if hasattr(reference_to_old_X, "data") and not np.may_share_memory( + reference_to_old_X.data, X.data + ): + # X is a sparse matrix and has been copied + copy_X = False + elif not np.may_share_memory(reference_to_old_X, X): + # X has been copied + copy_X = False + del reference_to_old_X + else: + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be + # csr. We also want to allow y to be 64 or 32 but check_X_y only + # allows to convert for 64. + check_X_params = dict( + accept_sparse="csc", + dtype=[np.float64, np.float32], + order="F", + copy=copy_X, + ) + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + copy_X = False + + check_consistent_length(X, y) + + if not self._is_multitask(): + if y.ndim > 1 and y.shape[1] > 1: + raise ValueError( + "For multi-task outputs, use MultiTask%s" % self.__class__.__name__ + ) + y = column_or_1d(y, warn=True) + else: + if sparse.issparse(X): + raise TypeError("X should be dense but a sparse matrix waspassed") + elif y.ndim == 1: + raise ValueError( + "For mono-task outputs, use %sCV" % self.__class__.__name__[9:] + ) + + if isinstance(sample_weight, numbers.Number): + sample_weight = None + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + model = self._get_estimator() + + # All LinearModelCV parameters except 'cv' are acceptable + path_params = self.get_params() + + # Pop `intercept` that is not parameter of the path function + path_params.pop("fit_intercept", None) + + if "l1_ratio" in path_params: + l1_ratios = np.atleast_1d(path_params["l1_ratio"]) + # For the first path, we need to set l1_ratio + path_params["l1_ratio"] = l1_ratios[0] + else: + l1_ratios = [ + 1, + ] + path_params.pop("cv", None) + path_params.pop("n_jobs", None) + + alphas = self.alphas + n_l1_ratio = len(l1_ratios) + + check_scalar_alpha = partial( + check_scalar, + target_type=Real, + min_val=0.0, + include_boundaries="left", + ) + + if alphas is None: + alphas = [ + _alpha_grid( + X, + y, + l1_ratio=l1_ratio, + fit_intercept=self.fit_intercept, + eps=self.eps, + n_alphas=self.n_alphas, + copy_X=self.copy_X, + ) + for l1_ratio in l1_ratios + ] + else: + # Making sure alphas entries are scalars. + for index, alpha in enumerate(alphas): + check_scalar_alpha(alpha, f"alphas[{index}]") + # Making sure alphas is properly ordered. + alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1)) + + # We want n_alphas to be the number of alphas used for each l1_ratio. + n_alphas = len(alphas[0]) + path_params.update({"n_alphas": n_alphas}) + + path_params["copy_X"] = copy_X + # We are not computing in parallel, we can modify X + # inplace in the folds + if effective_n_jobs(self.n_jobs) > 1: + path_params["copy_X"] = False + + # init cross-validation generator + cv = check_cv(self.cv) + + if _routing_enabled(): + splitter_supports_sample_weight = get_routing_for_object(cv).consumes( + method="split", params=["sample_weight"] + ) + if ( + sample_weight is not None + and not splitter_supports_sample_weight + and not has_fit_parameter(self, "sample_weight") + ): + raise ValueError( + "The CV splitter and underlying estimator do not support" + " sample weights." + ) + + if splitter_supports_sample_weight: + params["sample_weight"] = sample_weight + + routed_params = process_routing(self, "fit", **params) + + if sample_weight is not None and not has_fit_parameter( + self, "sample_weight" + ): + # MultiTaskElasticNetCV does not (yet) support sample_weight + sample_weight = None + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split=Bunch()) + + # Compute path for all folds and compute MSE to get the best alpha + folds = list(cv.split(X, y, **routed_params.splitter.split)) + best_mse = np.inf + + # We do a double for loop folded in one, in order to be able to + # iterate in parallel on l1_ratio and folds + jobs = ( + delayed(_path_residuals)( + X, + y, + sample_weight, + train, + test, + self.fit_intercept, + self.path, + path_params, + alphas=this_alphas, + l1_ratio=this_l1_ratio, + X_order="F", + dtype=X.dtype.type, + ) + for this_l1_ratio, this_alphas in zip(l1_ratios, alphas) + for train, test in folds + ) + mse_paths = Parallel( + n_jobs=self.n_jobs, + verbose=self.verbose, + prefer="threads", + )(jobs) + mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1)) + # The mean is computed over folds. + mean_mse = np.mean(mse_paths, axis=1) + self.mse_path_ = np.squeeze(np.moveaxis(mse_paths, 2, 1)) + for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas, mean_mse): + i_best_alpha = np.argmin(mse_alphas) + this_best_mse = mse_alphas[i_best_alpha] + if this_best_mse < best_mse: + best_alpha = l1_alphas[i_best_alpha] + best_l1_ratio = l1_ratio + best_mse = this_best_mse + + self.l1_ratio_ = best_l1_ratio + self.alpha_ = best_alpha + if self.alphas is None: + self.alphas_ = np.asarray(alphas) + if n_l1_ratio == 1: + self.alphas_ = self.alphas_[0] + # Remove duplicate alphas in case alphas is provided. + else: + self.alphas_ = np.asarray(alphas[0]) + + # Refit the model with the parameters selected + common_params = { + name: value + for name, value in self.get_params().items() + if name in model.get_params() + } + model.set_params(**common_params) + model.alpha = best_alpha + model.l1_ratio = best_l1_ratio + model.copy_X = copy_X + precompute = getattr(self, "precompute", None) + if isinstance(precompute, str) and precompute == "auto": + model.precompute = False + + if sample_weight is None: + # MultiTaskElasticNetCV does not (yet) support sample_weight, even + # not sample_weight=None. + model.fit(X, y) + else: + model.fit(X, y, sample_weight=sample_weight) + if not hasattr(self, "l1_ratio"): + del self.l1_ratio_ + self.coef_ = model.coef_ + self.intercept_ = model.intercept_ + self.dual_gap_ = model.dual_gap_ + self.n_iter_ = model.n_iter_ + return self + + def _more_tags(self): + # Note: check_sample_weights_invariance(kind='ones') should work, but + # currently we can only mark a whole test as xfail. + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + splitter=check_cv(self.cv), + method_mapping=MethodMapping().add(callee="split", caller="fit"), + ) + ) + return router + + +class LassoCV(RegressorMixin, LinearModelCV): + """Lasso linear model with iterative fitting along a regularization path. + + See glossary entry for :term:`cross-validation estimator`. + + The best model is selected by cross-validation. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If ``None`` alphas are set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : bool or int, default=False + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + positive : bool, default=False + If positive, restrict regression coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + alpha_ : float + The amount of penalization chosen by cross validation. + + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + mse_path_ : ndarray of shape (n_alphas, n_folds) + Mean square error for the test set on each fold, varying alpha. + + alphas_ : ndarray of shape (n_alphas,) + The grid of alphas used for fitting. + + dual_gap_ : float or ndarray of shape (n_targets,) + The dual gap at the end of the optimization for the optimal alpha + (``alpha_``). + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso path using LARS + algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : The Lasso is a linear model that estimates sparse coefficients. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoCV : Lasso linear model with iterative fitting along a regularization + path. + LassoLarsCV : Cross-validated Lasso using the LARS algorithm. + + Notes + ----- + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` argument of the `fit` + method should be directly passed as a Fortran-contiguous numpy array. + + For an example, see + :ref:`examples/linear_model/plot_lasso_model_selection.py + `. + + :class:`LassoCV` leads to different results than a hyperparameter + search using :class:`~sklearn.model_selection.GridSearchCV` with a + :class:`Lasso` model. In :class:`LassoCV`, a model for a given + penalty `alpha` is warm started using the coefficients of the + closest model (trained at the previous iteration) on the + regularization path. It tends to speed up the hyperparameter + search. + + Examples + -------- + >>> from sklearn.linear_model import LassoCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(noise=4, random_state=0) + >>> reg = LassoCV(cv=5, random_state=0).fit(X, y) + >>> reg.score(X, y) + 0.9993... + >>> reg.predict(X[:1,]) + array([-78.4951...]) + """ + + path = staticmethod(lasso_path) + + def __init__( + self, + *, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + precompute="auto", + max_iter=1000, + tol=1e-4, + copy_X=True, + cv=None, + verbose=False, + n_jobs=None, + positive=False, + random_state=None, + selection="cyclic", + ): + super().__init__( + eps=eps, + n_alphas=n_alphas, + alphas=alphas, + fit_intercept=fit_intercept, + precompute=precompute, + max_iter=max_iter, + tol=tol, + copy_X=copy_X, + cv=cv, + verbose=verbose, + n_jobs=n_jobs, + positive=positive, + random_state=random_state, + selection=selection, + ) + + def _get_estimator(self): + return Lasso() + + def _is_multitask(self): + return False + + def _more_tags(self): + return {"multioutput": False} + + +class ElasticNetCV(RegressorMixin, LinearModelCV): + """Elastic Net model with iterative fitting along a regularization path. + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + l1_ratio : float or list of float, default=0.5 + Float between 0 and 1 passed to ElasticNet (scaling between + l1 and l2 penalties). For ``l1_ratio = 0`` + the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. + For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 + This parameter can be a list, in which case the different + values are tested by cross-validation and the one giving the best + prediction score is used. Note that a good choice of list of + values for l1_ratio is often to put more values close to 1 + (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, + .9, .95, .99, 1]``. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path, used for each l1_ratio. + + alphas : array-like, default=None + List of alphas where to compute the models. + If None alphas are set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + verbose : bool or int, default=0 + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + alpha_ : float + The amount of penalization chosen by cross validation. + + l1_ratio_ : float + The compromise between l1 and l2 penalization chosen by + cross validation. + + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + intercept_ : float or ndarray of shape (n_targets, n_features) + Independent term in the decision function. + + mse_path_ : ndarray of shape (n_l1_ratio, n_alpha, n_folds) + Mean square error for the test set on each fold, varying l1_ratio and + alpha. + + alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas) + The grid of alphas used for fitting, for each l1_ratio. + + dual_gap_ : float + The dual gaps at the end of the optimization for the optimal alpha. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + enet_path : Compute elastic net path with coordinate descent. + ElasticNet : Linear regression with combined L1 and L2 priors as regularizer. + + Notes + ----- + In `fit`, once the best parameters `l1_ratio` and `alpha` are found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` argument of the `fit` + method should be directly passed as a Fortran-contiguous numpy array. + + The parameter `l1_ratio` corresponds to alpha in the glmnet R package + while alpha corresponds to the lambda parameter in glmnet. + More specifically, the optimization objective is:: + + 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 + + If you are interested in controlling the L1 and L2 penalty + separately, keep in mind that this is equivalent to:: + + a * L1 + b * L2 + + for:: + + alpha = a + b and l1_ratio = a / (a + b). + + For an example, see + :ref:`examples/linear_model/plot_lasso_model_selection.py + `. + + Examples + -------- + >>> from sklearn.linear_model import ElasticNetCV + >>> from sklearn.datasets import make_regression + + >>> X, y = make_regression(n_features=2, random_state=0) + >>> regr = ElasticNetCV(cv=5, random_state=0) + >>> regr.fit(X, y) + ElasticNetCV(cv=5, random_state=0) + >>> print(regr.alpha_) + 0.199... + >>> print(regr.intercept_) + 0.398... + >>> print(regr.predict([[0, 0]])) + [0.398...] + """ + + _parameter_constraints: dict = { + **LinearModelCV._parameter_constraints, + "l1_ratio": [Interval(Real, 0, 1, closed="both"), "array-like"], + } + + path = staticmethod(enet_path) + + def __init__( + self, + *, + l1_ratio=0.5, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + precompute="auto", + max_iter=1000, + tol=1e-4, + cv=None, + copy_X=True, + verbose=0, + n_jobs=None, + positive=False, + random_state=None, + selection="cyclic", + ): + self.l1_ratio = l1_ratio + self.eps = eps + self.n_alphas = n_alphas + self.alphas = alphas + self.fit_intercept = fit_intercept + self.precompute = precompute + self.max_iter = max_iter + self.tol = tol + self.cv = cv + self.copy_X = copy_X + self.verbose = verbose + self.n_jobs = n_jobs + self.positive = positive + self.random_state = random_state + self.selection = selection + + def _get_estimator(self): + return ElasticNet() + + def _is_multitask(self): + return False + + def _more_tags(self): + return {"multioutput": False} + + +############################################################################### +# Multi Task ElasticNet and Lasso models (with joint feature selection) + + +class MultiTaskElasticNet(Lasso): + """Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer. + + The optimization objective for MultiTaskElasticNet is:: + + (1 / (2 * n_samples)) * ||Y - XW||_Fro^2 + + alpha * l1_ratio * ||W||_21 + + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + + Where:: + + ||W||_21 = sum_i sqrt(sum_j W_ij ^ 2) + + i.e. the sum of norms of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the L1/L2 term. Defaults to 1.0. + + l1_ratio : float, default=0.5 + The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. + For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it + is an L2 penalty. + For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). If a 1D y is + passed in at fit (non multi-task usage), ``coef_`` is then a 1D array. + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + dual_gap_ : float + The dual gaps at the end of the optimization. + + eps_ : float + The tolerance scaled scaled by the variance of the target `y`. + + sparse_coef_ : sparse matrix of shape (n_features,) or \ + (n_targets, n_features) + Sparse representation of the `coef_`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in + cross-validation. + ElasticNet : Linear regression with combined L1 and L2 priors as regularizer. + MultiTaskLasso : Multi-task Lasso model trained with L1/L2 + mixed-norm as regularizer. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + To avoid unnecessary memory duplication the X and y arguments of the fit + method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1) + >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) + MultiTaskElasticNet(alpha=0.1) + >>> print(clf.coef_) + [[0.45663524 0.45612256] + [0.45663524 0.45612256]] + >>> print(clf.intercept_) + [0.0872422 0.0872422] + """ + + _parameter_constraints: dict = { + **ElasticNet._parameter_constraints, + } + for param in ("precompute", "positive"): + _parameter_constraints.pop(param) + + def __init__( + self, + alpha=1.0, + *, + l1_ratio=0.5, + fit_intercept=True, + copy_X=True, + max_iter=1000, + tol=1e-4, + warm_start=False, + random_state=None, + selection="cyclic", + ): + self.l1_ratio = l1_ratio + self.alpha = alpha + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.copy_X = copy_X + self.tol = tol + self.warm_start = warm_start + self.random_state = random_state + self.selection = selection + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit MultiTaskElasticNet model with coordinate descent. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data. + y : ndarray of shape (n_samples, n_targets) + Target. Will be cast to X's dtype if necessary. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + Coordinate descent is an algorithm that considers each column of + data at a time hence it will automatically convert the X input + as a Fortran-contiguous numpy array if necessary. + + To avoid memory re-allocation it is advised to allocate the + initial data in memory directly using that format. + """ + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be csr. + check_X_params = dict( + dtype=[np.float64, np.float32], + order="F", + copy=self.copy_X and self.fit_intercept, + ) + check_y_params = dict(ensure_2d=False, order="F") + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + check_consistent_length(X, y) + y = y.astype(X.dtype) + + if hasattr(self, "l1_ratio"): + model_str = "ElasticNet" + else: + model_str = "Lasso" + if y.ndim == 1: + raise ValueError("For mono-task outputs, use %s" % model_str) + + n_samples, n_features = X.shape + n_targets = y.shape[1] + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=False + ) + + if not self.warm_start or not hasattr(self, "coef_"): + self.coef_ = np.zeros( + (n_targets, n_features), dtype=X.dtype.type, order="F" + ) + + l1_reg = self.alpha * self.l1_ratio * n_samples + l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples + + self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory + + random = self.selection == "random" + + ( + self.coef_, + self.dual_gap_, + self.eps_, + self.n_iter_, + ) = cd_fast.enet_coordinate_descent_multi_task( + self.coef_, + l1_reg, + l2_reg, + X, + y, + self.max_iter, + self.tol, + check_random_state(self.random_state), + random, + ) + + # account for different objective scaling here and in cd_fast + self.dual_gap_ /= n_samples + + self._set_intercept(X_offset, y_offset, X_scale) + + # return self for chaining fit and predict calls + return self + + def _more_tags(self): + return {"multioutput_only": True} + + +class MultiTaskLasso(MultiTaskElasticNet): + """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the L1/L2 term. Defaults to 1.0. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + dual_gap_ : ndarray of shape (n_alphas,) + The dual gaps at the end of the optimization for each alpha. + + eps_ : float + The tolerance scaled scaled by the variance of the target `y`. + + sparse_coef_ : sparse matrix of shape (n_features,) or \ + (n_targets, n_features) + Sparse representation of the `coef_`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Lasso: Linear Model trained with L1 prior as regularizer (aka the Lasso). + MultiTaskLassoCV: Multi-task L1 regularized linear model with built-in + cross-validation. + MultiTaskElasticNetCV: Multi-task L1/L2 ElasticNet with built-in cross-validation. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + To avoid unnecessary memory duplication the X and y arguments of the fit + method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.MultiTaskLasso(alpha=0.1) + >>> clf.fit([[0, 1], [1, 2], [2, 4]], [[0, 0], [1, 1], [2, 3]]) + MultiTaskLasso(alpha=0.1) + >>> print(clf.coef_) + [[0. 0.60809415] + [0. 0.94592424]] + >>> print(clf.intercept_) + [-0.41888636 -0.87382323] + """ + + _parameter_constraints: dict = { + **MultiTaskElasticNet._parameter_constraints, + } + _parameter_constraints.pop("l1_ratio") + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=1000, + tol=1e-4, + warm_start=False, + random_state=None, + selection="cyclic", + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.copy_X = copy_X + self.tol = tol + self.warm_start = warm_start + self.l1_ratio = 1.0 + self.random_state = random_state + self.selection = selection + + +class MultiTaskElasticNetCV(RegressorMixin, LinearModelCV): + """Multi-task L1/L2 ElasticNet with built-in cross-validation. + + See glossary entry for :term:`cross-validation estimator`. + + The optimization objective for MultiTaskElasticNet is:: + + (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + + alpha * l1_ratio * ||W||_21 + + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.15 + + Parameters + ---------- + l1_ratio : float or list of float, default=0.5 + The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. + For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it + is an L2 penalty. + For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. + This parameter can be a list, in which case the different + values are tested by cross-validation and the one giving the best + prediction score is used. Note that a good choice of list of + values for l1_ratio is often to put more values close to 1 + (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, + .9, .95, .99, 1]``. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If not provided, set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + verbose : bool or int, default=0 + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. Note that this is + used only if multiple values for l1_ratio are given. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + alpha_ : float + The amount of penalization chosen by cross validation. + + mse_path_ : ndarray of shape (n_alphas, n_folds) or \ + (n_l1_ratio, n_alphas, n_folds) + Mean square error for the test set on each fold, varying alpha. + + alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas) + The grid of alphas used for fitting, for each l1_ratio. + + l1_ratio_ : float + Best l1_ratio obtained by cross-validation. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + dual_gap_ : float + The dual gap at the end of the optimization for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MultiTaskElasticNet : Multi-task L1/L2 ElasticNet with built-in cross-validation. + ElasticNetCV : Elastic net model with best model selection by + cross-validation. + MultiTaskLassoCV : Multi-task Lasso model trained with L1 norm + as regularizer and built-in cross-validation. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + In `fit`, once the best parameters `l1_ratio` and `alpha` are found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` and `y` arguments of the + `fit` method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.MultiTaskElasticNetCV(cv=3) + >>> clf.fit([[0,0], [1, 1], [2, 2]], + ... [[0, 0], [1, 1], [2, 2]]) + MultiTaskElasticNetCV(cv=3) + >>> print(clf.coef_) + [[0.52875032 0.46958558] + [0.52875032 0.46958558]] + >>> print(clf.intercept_) + [0.00166409 0.00166409] + """ + + _parameter_constraints: dict = { + **LinearModelCV._parameter_constraints, + "l1_ratio": [Interval(Real, 0, 1, closed="both"), "array-like"], + } + _parameter_constraints.pop("precompute") + _parameter_constraints.pop("positive") + + path = staticmethod(enet_path) + + def __init__( + self, + *, + l1_ratio=0.5, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + max_iter=1000, + tol=1e-4, + cv=None, + copy_X=True, + verbose=0, + n_jobs=None, + random_state=None, + selection="cyclic", + ): + self.l1_ratio = l1_ratio + self.eps = eps + self.n_alphas = n_alphas + self.alphas = alphas + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.tol = tol + self.cv = cv + self.copy_X = copy_X + self.verbose = verbose + self.n_jobs = n_jobs + self.random_state = random_state + self.selection = selection + + def _get_estimator(self): + return MultiTaskElasticNet() + + def _is_multitask(self): + return True + + def _more_tags(self): + return {"multioutput_only": True} + + # This is necessary as LinearModelCV now supports sample_weight while + # MultiTaskElasticNet does not (yet). + def fit(self, X, y, **params): + """Fit MultiTaskElasticNet model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. + y : ndarray of shape (n_samples, n_targets) + Training target variable. Will be cast to X's dtype if necessary. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns MultiTaskElasticNet instance. + """ + return super().fit(X, y, **params) + + +class MultiTaskLassoCV(RegressorMixin, LinearModelCV): + """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer. + + See glossary entry for :term:`cross-validation estimator`. + + The optimization objective for MultiTaskLasso is:: + + (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.15 + + Parameters + ---------- + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If not provided, set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : bool or int, default=False + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. Note that this is + used only if multiple values for l1_ratio are given. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + alpha_ : float + The amount of penalization chosen by cross validation. + + mse_path_ : ndarray of shape (n_alphas, n_folds) + Mean square error for the test set on each fold, varying alpha. + + alphas_ : ndarray of shape (n_alphas,) + The grid of alphas used for fitting. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + dual_gap_ : float + The dual gap at the end of the optimization for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MultiTaskElasticNet : Multi-task ElasticNet model trained with L1/L2 + mixed-norm as regularizer. + ElasticNetCV : Elastic net model with best model selection by + cross-validation. + MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in + cross-validation. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` and `y` arguments of the + `fit` method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn.linear_model import MultiTaskLassoCV + >>> from sklearn.datasets import make_regression + >>> from sklearn.metrics import r2_score + >>> X, y = make_regression(n_targets=2, noise=4, random_state=0) + >>> reg = MultiTaskLassoCV(cv=5, random_state=0).fit(X, y) + >>> r2_score(y, reg.predict(X)) + 0.9994... + >>> reg.alpha_ + 0.5713... + >>> reg.predict(X[:1,]) + array([[153.7971..., 94.9015...]]) + """ + + _parameter_constraints: dict = { + **LinearModelCV._parameter_constraints, + } + _parameter_constraints.pop("precompute") + _parameter_constraints.pop("positive") + + path = staticmethod(lasso_path) + + def __init__( + self, + *, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + max_iter=1000, + tol=1e-4, + copy_X=True, + cv=None, + verbose=False, + n_jobs=None, + random_state=None, + selection="cyclic", + ): + super().__init__( + eps=eps, + n_alphas=n_alphas, + alphas=alphas, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + copy_X=copy_X, + cv=cv, + verbose=verbose, + n_jobs=n_jobs, + random_state=random_state, + selection=selection, + ) + + def _get_estimator(self): + return MultiTaskLasso() + + def _is_multitask(self): + return True + + def _more_tags(self): + return {"multioutput_only": True} + + # This is necessary as LinearModelCV now supports sample_weight while + # MultiTaskElasticNet does not (yet). + def fit(self, X, y, **params): + """Fit MultiTaskLasso model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data. + y : ndarray of shape (n_samples, n_targets) + Target. Will be cast to X's dtype if necessary. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of fitted model. + """ + return super().fit(X, y, **params) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b82bbd77bcf9a16040ac2cebb3f655811bbff84 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py @@ -0,0 +1,15 @@ +# License: BSD 3 clause + +from .glm import ( + GammaRegressor, + PoissonRegressor, + TweedieRegressor, + _GeneralizedLinearRegressor, +) + +__all__ = [ + "_GeneralizedLinearRegressor", + "PoissonRegressor", + "GammaRegressor", + "TweedieRegressor", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8f942351010db820e56a0ca6b473e8cba2c6f07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf558bc6508565f77bebd7837d076ada6873322d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..014ff4a9445eb6a8f87068694b3e441074d30efa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9b431fd2377dba50a6fabd703ae7c0334033e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py @@ -0,0 +1,525 @@ +""" +Newton solver for Generalized Linear Models +""" + +# Author: Christian Lorentzen +# License: BSD 3 clause + +import warnings +from abc import ABC, abstractmethod + +import numpy as np +import scipy.linalg +import scipy.optimize + +from ..._loss.loss import HalfSquaredError +from ...exceptions import ConvergenceWarning +from ...utils.optimize import _check_optimize_result +from .._linear_loss import LinearModelLoss + + +class NewtonSolver(ABC): + """Newton solver for GLMs. + + This class implements Newton/2nd-order optimization routines for GLMs. Each Newton + iteration aims at finding the Newton step which is done by the inner solver. With + Hessian H, gradient g and coefficients coef, one step solves: + + H @ coef_newton = -g + + For our GLM / LinearModelLoss, we have gradient g and Hessian H: + + g = X.T @ loss.gradient + l2_reg_strength * coef + H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity + + Backtracking line search updates coef = coef_old + t * coef_newton for some t in + (0, 1]. + + This is a base class, actual implementations (child classes) may deviate from the + above pattern and use structure specific tricks. + + Usage pattern: + - initialize solver: sol = NewtonSolver(...) + - solve the problem: sol.solve(X, y, sample_weight) + + References + ---------- + - Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization" + 2nd edition + https://doi.org/10.1007/978-0-387-40065-5 + + - Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization." + Cambridge University Press, 2004. + https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Initial coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + + linear_loss : LinearModelLoss + The loss to be minimized. + + l2_reg_strength : float, default=0.0 + L2 regularization strength. + + tol : float, default=1e-4 + The optimization problem is solved when each of the following condition is + fulfilled: + 1. maximum |gradient| <= tol + 2. Newton decrement d: 1/2 * d^2 <= tol + + max_iter : int, default=100 + Maximum number of Newton steps allowed. + + n_threads : int, default=1 + Number of OpenMP threads to use for the computation of the Hessian and gradient + of the loss function. + + Attributes + ---------- + coef_old : ndarray of shape coef.shape + Coefficient of previous iteration. + + coef_newton : ndarray of shape coef.shape + Newton step. + + gradient : ndarray of shape coef.shape + Gradient of the loss w.r.t. the coefficients. + + gradient_old : ndarray of shape coef.shape + Gradient of previous iteration. + + loss_value : float + Value of objective function = loss + penalty. + + loss_value_old : float + Value of objective function of previous itertion. + + raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes) + + converged : bool + Indicator for convergence of the solver. + + iteration : int + Number of Newton steps, i.e. calls to inner_solve + + use_fallback_lbfgs_solve : bool + If set to True, the solver will resort to call LBFGS to finish the optimisation + procedure in case of convergence issues. + + gradient_times_newton : float + gradient @ coef_newton, set in inner_solve and used by line_search. If the + Newton step is a descent direction, this is negative. + """ + + def __init__( + self, + *, + coef, + linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True), + l2_reg_strength=0.0, + tol=1e-4, + max_iter=100, + n_threads=1, + verbose=0, + ): + self.coef = coef + self.linear_loss = linear_loss + self.l2_reg_strength = l2_reg_strength + self.tol = tol + self.max_iter = max_iter + self.n_threads = n_threads + self.verbose = verbose + + def setup(self, X, y, sample_weight): + """Precomputations + + If None, initializes: + - self.coef + Sets: + - self.raw_prediction + - self.loss_value + """ + _, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X) + self.loss_value = self.linear_loss.loss( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + raw_prediction=self.raw_prediction, + ) + + @abstractmethod + def update_gradient_hessian(self, X, y, sample_weight): + """Update gradient and Hessian.""" + + @abstractmethod + def inner_solve(self, X, y, sample_weight): + """Compute Newton step. + + Sets: + - self.coef_newton + - self.gradient_times_newton + """ + + def fallback_lbfgs_solve(self, X, y, sample_weight): + """Fallback solver in case of emergency. + + If a solver detects convergence problems, it may fall back to this methods in + the hope to exit with success instead of raising an error. + + Sets: + - self.coef + - self.converged + """ + opt_res = scipy.optimize.minimize( + self.linear_loss.loss_gradient, + self.coef, + method="L-BFGS-B", + jac=True, + options={ + "maxiter": self.max_iter, + "maxls": 50, # default is 20 + "iprint": self.verbose - 1, + "gtol": self.tol, + "ftol": 64 * np.finfo(np.float64).eps, + }, + args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads), + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res) + self.coef = opt_res.x + self.converged = opt_res.status == 0 + + def line_search(self, X, y, sample_weight): + """Backtracking line search. + + Sets: + - self.coef_old + - self.coef + - self.loss_value_old + - self.loss_value + - self.gradient_old + - self.gradient + - self.raw_prediction + """ + # line search parameters + beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11 + eps = 16 * np.finfo(self.loss_value.dtype).eps + t = 1 # step size + + # gradient_times_newton = self.gradient @ self.coef_newton + # was computed in inner_solve. + armijo_term = sigma * self.gradient_times_newton + _, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw( + self.coef_newton, X + ) + + self.coef_old = self.coef + self.loss_value_old = self.loss_value + self.gradient_old = self.gradient + + # np.sum(np.abs(self.gradient_old)) + sum_abs_grad_old = -1 + + is_verbose = self.verbose >= 2 + if is_verbose: + print(" Backtracking Line Search") + print(f" eps=10 * finfo.eps={eps}") + + for i in range(21): # until and including t = beta**20 ~ 1e-6 + self.coef = self.coef_old + t * self.coef_newton + raw = self.raw_prediction + t * raw_prediction_newton + self.loss_value, self.gradient = self.linear_loss.loss_gradient( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + raw_prediction=raw, + ) + # Note: If coef_newton is too large, loss_gradient may produce inf values, + # potentially accompanied by a RuntimeWarning. + # This case will be captured by the Armijo condition. + + # 1. Check Armijo / sufficient decrease condition. + # The smaller (more negative) the better. + loss_improvement = self.loss_value - self.loss_value_old + check = loss_improvement <= t * armijo_term + if is_verbose: + print( + f" line search iteration={i+1}, step size={t}\n" + f" check loss improvement <= armijo term: {loss_improvement} " + f"<= {t * armijo_term} {check}" + ) + if check: + break + # 2. Deal with relative loss differences around machine precision. + tiny_loss = np.abs(self.loss_value_old * eps) + check = np.abs(loss_improvement) <= tiny_loss + if is_verbose: + print( + " check loss |improvement| <= eps * |loss_old|:" + f" {np.abs(loss_improvement)} <= {tiny_loss} {check}" + ) + if check: + if sum_abs_grad_old < 0: + sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1) + # 2.1 Check sum of absolute gradients as alternative condition. + sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1) + check = sum_abs_grad < sum_abs_grad_old + if is_verbose: + print( + " check sum(|gradient|) < sum(|gradient_old|): " + f"{sum_abs_grad} < {sum_abs_grad_old} {check}" + ) + if check: + break + + t *= beta + else: + warnings.warn( + ( + f"Line search of Newton solver {self.__class__.__name__} at" + f" iteration #{self.iteration} did no converge after 21 line search" + " refinement iterations. It will now resort to lbfgs instead." + ), + ConvergenceWarning, + ) + if self.verbose: + print(" Line search did not converge and resorts to lbfgs instead.") + self.use_fallback_lbfgs_solve = True + return + + self.raw_prediction = raw + + def check_convergence(self, X, y, sample_weight): + """Check for convergence. + + Sets self.converged. + """ + if self.verbose: + print(" Check Convergence") + # Note: Checking maximum relative change of coefficient <= tol is a bad + # convergence criterion because even a large step could have brought us close + # to the true minimum. + # coef_step = self.coef - self.coef_old + # check = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old))) + + # 1. Criterion: maximum |gradient| <= tol + # The gradient was already updated in line_search() + check = np.max(np.abs(self.gradient)) + if self.verbose: + print(f" 1. max |gradient| {check} <= {self.tol}") + if check > self.tol: + return + + # 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol + # d = sqrt(grad @ hessian^-1 @ grad) + # = sqrt(coef_newton @ hessian @ coef_newton) + # See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1. + d2 = self.coef_newton @ self.hessian @ self.coef_newton + if self.verbose: + print(f" 2. Newton decrement {0.5 * d2} <= {self.tol}") + if 0.5 * d2 > self.tol: + return + + if self.verbose: + loss_value = self.linear_loss.loss( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + ) + print(f" Solver did converge at loss = {loss_value}.") + self.converged = True + + def finalize(self, X, y, sample_weight): + """Finalize the solvers results. + + Some solvers may need this, others not. + """ + pass + + def solve(self, X, y, sample_weight): + """Solve the optimization problem. + + This is the main routine. + + Order of calls: + self.setup() + while iteration: + self.update_gradient_hessian() + self.inner_solve() + self.line_search() + self.check_convergence() + self.finalize() + + Returns + ------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Solution of the optimization problem. + """ + # setup usually: + # - initializes self.coef if needed + # - initializes and calculates self.raw_predictions, self.loss_value + self.setup(X=X, y=y, sample_weight=sample_weight) + + self.iteration = 1 + self.converged = False + self.use_fallback_lbfgs_solve = False + + while self.iteration <= self.max_iter and not self.converged: + if self.verbose: + print(f"Newton iter={self.iteration}") + + self.use_fallback_lbfgs_solve = False # Fallback solver. + + # 1. Update Hessian and gradient + self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight) + + # TODO: + # if iteration == 1: + # We might stop early, e.g. we already are close to the optimum, + # usually detected by zero gradients at this stage. + + # 2. Inner solver + # Calculate Newton step/direction + # This usually sets self.coef_newton and self.gradient_times_newton. + self.inner_solve(X=X, y=y, sample_weight=sample_weight) + if self.use_fallback_lbfgs_solve: + break + + # 3. Backtracking line search + # This usually sets self.coef_old, self.coef, self.loss_value_old + # self.loss_value, self.gradient_old, self.gradient, + # self.raw_prediction. + self.line_search(X=X, y=y, sample_weight=sample_weight) + if self.use_fallback_lbfgs_solve: + break + + # 4. Check convergence + # Sets self.converged. + self.check_convergence(X=X, y=y, sample_weight=sample_weight) + + # 5. Next iteration + self.iteration += 1 + + if not self.converged: + if self.use_fallback_lbfgs_solve: + # Note: The fallback solver circumvents check_convergence and relies on + # the convergence checks of lbfgs instead. Enough warnings have been + # raised on the way. + self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight) + else: + warnings.warn( + ( + f"Newton solver did not converge after {self.iteration - 1} " + "iterations." + ), + ConvergenceWarning, + ) + + self.iteration -= 1 + self.finalize(X=X, y=y, sample_weight=sample_weight) + return self.coef + + +class NewtonCholeskySolver(NewtonSolver): + """Cholesky based Newton solver. + + Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear + solver. + """ + + def setup(self, X, y, sample_weight): + super().setup(X=X, y=y, sample_weight=sample_weight) + n_dof = X.shape[1] + if self.linear_loss.fit_intercept: + n_dof += 1 + self.gradient = np.empty_like(self.coef) + self.hessian = np.empty_like(self.coef, shape=(n_dof, n_dof)) + + def update_gradient_hessian(self, X, y, sample_weight): + _, _, self.hessian_warning = self.linear_loss.gradient_hessian( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + gradient_out=self.gradient, + hessian_out=self.hessian, + raw_prediction=self.raw_prediction, # this was updated in line_search + ) + + def inner_solve(self, X, y, sample_weight): + if self.hessian_warning: + warnings.warn( + ( + f"The inner solver of {self.__class__.__name__} detected a " + "pointwise hessian with many negative values at iteration " + f"#{self.iteration}. It will now resort to lbfgs instead." + ), + ConvergenceWarning, + ) + if self.verbose: + print( + " The inner solver detected a pointwise Hessian with many " + "negative values and resorts to lbfgs instead." + ) + self.use_fallback_lbfgs_solve = True + return + + try: + with warnings.catch_warnings(): + warnings.simplefilter("error", scipy.linalg.LinAlgWarning) + self.coef_newton = scipy.linalg.solve( + self.hessian, -self.gradient, check_finite=False, assume_a="sym" + ) + self.gradient_times_newton = self.gradient @ self.coef_newton + if self.gradient_times_newton > 0: + if self.verbose: + print( + " The inner solver found a Newton step that is not a " + "descent direction and resorts to LBFGS steps instead." + ) + self.use_fallback_lbfgs_solve = True + return + except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e: + warnings.warn( + f"The inner solver of {self.__class__.__name__} stumbled upon a " + "singular or very ill-conditioned Hessian matrix at iteration " + f"#{self.iteration}. It will now resort to lbfgs instead.\n" + "Further options are to use another solver or to avoid such situation " + "in the first place. Possible remedies are removing collinear features" + " of X or increasing the penalization strengths.\n" + "The original Linear Algebra message was:\n" + + str(e), + scipy.linalg.LinAlgWarning, + ) + # Possible causes: + # 1. hess_pointwise is negative. But this is already taken care in + # LinearModelLoss.gradient_hessian. + # 2. X is singular or ill-conditioned + # This might be the most probable cause. + # + # There are many possible ways to deal with this situation. Most of them + # add, explicitly or implicitly, a matrix to the hessian to make it + # positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed. + # Instead, we resort to lbfgs. + if self.verbose: + print( + " The inner solver stumbled upon an singular or ill-conditioned " + "Hessian matrix and resorts to LBFGS instead." + ) + self.use_fallback_lbfgs_solve = True + return diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py new file mode 100644 index 0000000000000000000000000000000000000000..4cac889a4da518e3116c3243be5d3701c34d1b68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py @@ -0,0 +1,904 @@ +""" +Generalized Linear Models with Exponential Dispersion Family +""" + +# Author: Christian Lorentzen +# some parts and tricks stolen from other sklearn files. +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np +import scipy.optimize + +from ..._loss.loss import ( + HalfGammaLoss, + HalfPoissonLoss, + HalfSquaredError, + HalfTweedieLoss, + HalfTweedieLossIdentity, +) +from ...base import BaseEstimator, RegressorMixin, _fit_context +from ...utils import check_array +from ...utils._openmp_helpers import _openmp_effective_n_threads +from ...utils._param_validation import Hidden, Interval, StrOptions +from ...utils.optimize import _check_optimize_result +from ...utils.validation import _check_sample_weight, check_is_fitted +from .._linear_loss import LinearModelLoss +from ._newton_solver import NewtonCholeskySolver, NewtonSolver + + +class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator): + """Regression via a penalized Generalized Linear Model (GLM). + + GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at fitting and + predicting the mean of the target y as y_pred=h(X*w) with coefficients w. + Therefore, the fit minimizes the following objective function with L2 priors as + regularizer:: + + 1/(2*sum(s_i)) * sum(s_i * deviance(y_i, h(x_i*w)) + 1/2 * alpha * ||w||_2^2 + + with inverse link function h, s=sample_weight and per observation (unit) deviance + deviance(y_i, h(x_i*w)). Note that for an EDM, 1/2 * deviance is the negative + log-likelihood up to a constant (in w) term. + The parameter ``alpha`` corresponds to the lambda parameter in glmnet. + + Instead of implementing the EDM family and a link function separately, we directly + use the loss functions `from sklearn._loss` which have the link functions included + in them for performance reasons. We pick the loss functions that implement + (1/2 times) EDM deviances. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the penalty term and thus determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (X @ coef + intercept). + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_``. + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_iter_ : int + Actual number of iterations used in the solver. + + _base_loss : BaseLoss, default=HalfSquaredError() + This is set during fit via `self._get_loss()`. + A `_base_loss` contains a specific loss function as well as the link + function. The loss to be minimized specifies the distributional assumption of + the GLM, i.e. the distribution from the EDM. Here are some examples: + + ======================= ======== ========================== + _base_loss Link Target Domain + ======================= ======== ========================== + HalfSquaredError identity y any real number + HalfPoissonLoss log 0 <= y + HalfGammaLoss log 0 < y + HalfTweedieLoss log dependent on tweedie power + HalfTweedieLossIdentity identity dependent on tweedie power + ======================= ======== ========================== + + The link function of the GLM, i.e. mapping from linear predictor + `X @ coeff + intercept` to prediction `y_pred`. For instance, with a log link, + we have `y_pred = exp(X @ coeff + intercept)`. + """ + + # We allow for NewtonSolver classes for the "solver" parameter but do not + # make them public in the docstrings. This facilitates testing and + # benchmarking. + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0.0, None, closed="left")], + "fit_intercept": ["boolean"], + "solver": [ + StrOptions({"lbfgs", "newton-cholesky"}), + Hidden(type), + ], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="neither")], + "warm_start": ["boolean"], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.solver = solver + self.max_iter = max_iter + self.tol = tol + self.warm_start = warm_start + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit a Generalized Linear Model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Fitted model. + """ + X, y = self._validate_data( + X, + y, + accept_sparse=["csc", "csr"], + dtype=[np.float64, np.float32], + y_numeric=True, + multi_output=False, + ) + + # required by losses + if self.solver == "lbfgs": + # lbfgs will force coef and therefore raw_prediction to be float64. The + # base_loss needs y, X @ coef and sample_weight all of same dtype + # (and contiguous). + loss_dtype = np.float64 + else: + loss_dtype = min(max(y.dtype, X.dtype), np.float64) + y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False) + + if sample_weight is not None: + # Note that _check_sample_weight calls check_array(order="C") required by + # losses. + sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype) + + n_samples, n_features = X.shape + self._base_loss = self._get_loss() + + linear_loss = LinearModelLoss( + base_loss=self._base_loss, + fit_intercept=self.fit_intercept, + ) + + if not linear_loss.base_loss.in_y_true_range(y): + raise ValueError( + "Some value(s) of y are out of the valid range of the loss" + f" {self._base_loss.__class__.__name__!r}." + ) + + # TODO: if alpha=0 check that X is not rank deficient + + # NOTE: Rescaling of sample_weight: + # We want to minimize + # obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance) + # + 1/2 * alpha * L2, + # with + # deviance = 2 * loss. + # The objective is invariant to multiplying sample_weight by a constant. We + # could choose this constant such that sum(sample_weight) = 1 in order to end + # up with + # obj = sum(sample_weight * loss) + 1/2 * alpha * L2. + # But LinearModelLoss.loss() already computes + # average(loss, weights=sample_weight) + # Thus, without rescaling, we have + # obj = LinearModelLoss.loss(...) + + if self.warm_start and hasattr(self, "coef_"): + if self.fit_intercept: + # LinearModelLoss needs intercept at the end of coefficient array. + coef = np.concatenate((self.coef_, np.array([self.intercept_]))) + else: + coef = self.coef_ + coef = coef.astype(loss_dtype, copy=False) + else: + coef = linear_loss.init_zero_coef(X, dtype=loss_dtype) + if self.fit_intercept: + coef[-1] = linear_loss.base_loss.link.link( + np.average(y, weights=sample_weight) + ) + + l2_reg_strength = self.alpha + n_threads = _openmp_effective_n_threads() + + # Algorithms for optimization: + # Note again that our losses implement 1/2 * deviance. + if self.solver == "lbfgs": + func = linear_loss.loss_gradient + + opt_res = scipy.optimize.minimize( + func, + coef, + method="L-BFGS-B", + jac=True, + options={ + "maxiter": self.max_iter, + "maxls": 50, # default is 20 + "iprint": self.verbose - 1, + "gtol": self.tol, + # The constant 64 was found empirically to pass the test suite. + # The point is that ftol is very small, but a bit larger than + # machine precision for float64, which is the dtype used by lbfgs. + "ftol": 64 * np.finfo(float).eps, + }, + args=(X, y, sample_weight, l2_reg_strength, n_threads), + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res) + coef = opt_res.x + elif self.solver == "newton-cholesky": + sol = NewtonCholeskySolver( + coef=coef, + linear_loss=linear_loss, + l2_reg_strength=l2_reg_strength, + tol=self.tol, + max_iter=self.max_iter, + n_threads=n_threads, + verbose=self.verbose, + ) + coef = sol.solve(X, y, sample_weight) + self.n_iter_ = sol.iteration + elif issubclass(self.solver, NewtonSolver): + sol = self.solver( + coef=coef, + linear_loss=linear_loss, + l2_reg_strength=l2_reg_strength, + tol=self.tol, + max_iter=self.max_iter, + n_threads=n_threads, + ) + coef = sol.solve(X, y, sample_weight) + self.n_iter_ = sol.iteration + else: + raise ValueError(f"Invalid solver={self.solver}.") + + if self.fit_intercept: + self.intercept_ = coef[-1] + self.coef_ = coef[:-1] + else: + # set intercept to zero as the other linear models do + self.intercept_ = 0.0 + self.coef_ = coef + + return self + + def _linear_predictor(self, X): + """Compute the linear_predictor = `X @ coef_ + intercept_`. + + Note that we often use the term raw_prediction instead of linear predictor. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + Returns + ------- + y_pred : array of shape (n_samples,) + Returns predicted values of linear predictor. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=["csr", "csc", "coo"], + dtype=[np.float64, np.float32], + ensure_2d=True, + allow_nd=False, + reset=False, + ) + return X @ self.coef_ + self.intercept_ + + def predict(self, X): + """Predict using GLM with feature matrix X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + Returns + ------- + y_pred : array of shape (n_samples,) + Returns predicted values. + """ + # check_array is done in _linear_predictor + raw_prediction = self._linear_predictor(X) + y_pred = self._base_loss.link.inverse(raw_prediction) + return y_pred + + def score(self, X, y, sample_weight=None): + """Compute D^2, the percentage of deviance explained. + + D^2 is a generalization of the coefficient of determination R^2. + R^2 uses squared error and D^2 uses the deviance of this GLM, see the + :ref:`User Guide `. + + D^2 is defined as + :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`, + :math:`D_{null}` is the null deviance, i.e. the deviance of a model + with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`. + The mean :math:`\\bar{y}` is averaged by sample_weight. + Best possible score is 1.0 and it can be negative (because the model + can be arbitrarily worse). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) + True values of target. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + D^2 of self.predict(X) w.r.t. y. + """ + # TODO: Adapt link to User Guide in the docstring, once + # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. + # + # Note, default score defined in RegressorMixin is R^2 score. + # TODO: make D^2 a score function in module metrics (and thereby get + # input validation and so on) + raw_prediction = self._linear_predictor(X) # validates X + # required by losses + y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False) + + if sample_weight is not None: + # Note that _check_sample_weight calls check_array(order="C") required by + # losses. + sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) + + base_loss = self._base_loss + + if not base_loss.in_y_true_range(y): + raise ValueError( + "Some value(s) of y are out of the valid range of the loss" + f" {base_loss.__name__}." + ) + + constant = np.average( + base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None), + weights=sample_weight, + ) + + # Missing factor of 2 in deviance cancels out. + deviance = base_loss( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=1, + ) + y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) + deviance_null = base_loss( + y_true=y, + raw_prediction=np.tile(y_mean, y.shape[0]), + sample_weight=sample_weight, + n_threads=1, + ) + return 1 - (deviance + constant) / (deviance_null + constant) + + def _more_tags(self): + try: + # Create instance of BaseLoss if fit wasn't called yet. This is necessary as + # TweedieRegressor might set the used loss during fit different from + # self._base_loss. + base_loss = self._get_loss() + return {"requires_positive_y": not base_loss.in_y_true_range(-1.0)} + except (ValueError, AttributeError, TypeError): + # This happens when the link or power parameter of TweedieRegressor is + # invalid. We fallback on the default tags in that case. + return {} + + def _get_loss(self): + """This is only necessary because of the link and power arguments of the + TweedieRegressor. + + Note that we do not need to pass sample_weight to the loss class as this is + only needed to set loss.constant_hessian on which GLMs do not rely. + """ + return HalfSquaredError() + + +class PoissonRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Poisson distribution. + + This regressor uses the 'log' link function. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (`X @ coef + intercept`). + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_`` . + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Actual number of iterations used in the solver. + + See Also + -------- + TweedieRegressor : Generalized Linear Model with a Tweedie distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.PoissonRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [12, 17, 22, 21] + >>> clf.fit(X, y) + PoissonRegressor() + >>> clf.score(X, y) + 0.990... + >>> clf.coef_ + array([0.121..., 0.158...]) + >>> clf.intercept_ + 2.088... + >>> clf.predict([[1, 1], [3, 4]]) + array([10.676..., 21.875...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + + def _get_loss(self): + return HalfPoissonLoss() + + +class GammaRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Gamma distribution. + + This regressor uses the 'log' link function. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor `X @ coef_ + intercept_`. + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for `coef_` and `intercept_`. + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + n_iter_ : int + Actual number of iterations used in the solver. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PoissonRegressor : Generalized Linear Model with a Poisson distribution. + TweedieRegressor : Generalized Linear Model with a Tweedie distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.GammaRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [19, 26, 33, 30] + >>> clf.fit(X, y) + GammaRegressor() + >>> clf.score(X, y) + 0.773... + >>> clf.coef_ + array([0.072..., 0.066...]) + >>> clf.intercept_ + 2.896... + >>> clf.predict([[1, 0], [2, 8]]) + array([19.483..., 35.795...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + + def _get_loss(self): + return HalfGammaLoss() + + +class TweedieRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Tweedie distribution. + + This estimator can be used to model different GLMs depending on the + ``power`` parameter, which determines the underlying distribution. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + power : float, default=0 + The power determines the underlying target distribution according + to the following table: + + +-------+------------------------+ + | Power | Distribution | + +=======+========================+ + | 0 | Normal | + +-------+------------------------+ + | 1 | Poisson | + +-------+------------------------+ + | (1,2) | Compound Poisson Gamma | + +-------+------------------------+ + | 2 | Gamma | + +-------+------------------------+ + | 3 | Inverse Gaussian | + +-------+------------------------+ + + For ``0 < power < 1``, no distribution exists. + + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (`X @ coef + intercept`). + + link : {'auto', 'identity', 'log'}, default='auto' + The link function of the GLM, i.e. mapping from linear predictor + `X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets + the link depending on the chosen `power` parameter as follows: + + - 'identity' for ``power <= 0``, e.g. for the Normal distribution + - 'log' for ``power > 0``, e.g. for Poisson, Gamma and Inverse Gaussian + distributions + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_`` . + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_iter_ : int + Actual number of iterations used in the solver. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PoissonRegressor : Generalized Linear Model with a Poisson distribution. + GammaRegressor : Generalized Linear Model with a Gamma distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.TweedieRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [2, 3.5, 5, 5.5] + >>> clf.fit(X, y) + TweedieRegressor() + >>> clf.score(X, y) + 0.839... + >>> clf.coef_ + array([0.599..., 0.299...]) + >>> clf.intercept_ + 1.600... + >>> clf.predict([[1, 1], [3, 4]]) + array([2.500..., 4.599...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints, + "power": [Interval(Real, None, None, closed="neither")], + "link": [StrOptions({"auto", "identity", "log"})], + } + + def __init__( + self, + *, + power=0.0, + alpha=1.0, + fit_intercept=True, + link="auto", + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + self.link = link + self.power = power + + def _get_loss(self): + if self.link == "auto": + if self.power <= 0: + # identity link + return HalfTweedieLossIdentity(power=self.power) + else: + # log link + return HalfTweedieLoss(power=self.power) + + if self.link == "log": + return HalfTweedieLoss(power=self.power) + + if self.link == "identity": + return HalfTweedieLossIdentity(power=self.power) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..588cf7e93eef01b82eaf24c87c36df22ea21dade --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py @@ -0,0 +1 @@ +# License: BSD 3 clause diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d04d8fa074621a0c2c31f5e70ca6873a8484f3ee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3fdb1fa2877c90bc4c3f24c2e163dbebe57b4dd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py new file mode 100644 index 0000000000000000000000000000000000000000..5256a5f37027294bf0e3545d5a42bd77715e4177 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py @@ -0,0 +1,1112 @@ +# Authors: Christian Lorentzen +# +# License: BSD 3 clause + +import itertools +import warnings +from functools import partial + +import numpy as np +import pytest +import scipy +from numpy.testing import assert_allclose +from scipy import linalg +from scipy.optimize import minimize, root + +from sklearn._loss import HalfBinomialLoss, HalfPoissonLoss, HalfTweedieLoss +from sklearn._loss.link import IdentityLink, LogLink +from sklearn.base import clone +from sklearn.datasets import make_low_rank_matrix, make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + GammaRegressor, + PoissonRegressor, + Ridge, + TweedieRegressor, +) +from sklearn.linear_model._glm import _GeneralizedLinearRegressor +from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver +from sklearn.linear_model._linear_loss import LinearModelLoss +from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance +from sklearn.model_selection import train_test_split + +SOLVERS = ["lbfgs", "newton-cholesky"] + + +class BinomialRegressor(_GeneralizedLinearRegressor): + def _get_loss(self): + return HalfBinomialLoss() + + +def _special_minimize(fun, grad, x, tol_NM, tol): + # Find good starting point by Nelder-Mead + res_NM = minimize( + fun, x, method="Nelder-Mead", options={"xatol": tol_NM, "fatol": tol_NM} + ) + # Now refine via root finding on the gradient of the function, which is + # more precise than minimizing the function itself. + res = root( + grad, + res_NM.x, + method="lm", + options={"ftol": tol, "xtol": tol, "gtol": tol}, + ) + return res.x + + +@pytest.fixture(scope="module") +def regression_data(): + X, y = make_regression( + n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2 + ) + return X, y + + +@pytest.fixture( + params=itertools.product( + ["long", "wide"], + [ + BinomialRegressor(), + PoissonRegressor(), + GammaRegressor(), + # TweedieRegressor(power=3.0), # too difficult + # TweedieRegressor(power=0, link="log"), # too difficult + TweedieRegressor(power=1.5), + ], + ), + ids=lambda param: f"{param[0]}-{param[1]}", +) +def glm_dataset(global_random_seed, request): + """Dataset with GLM solutions, well conditioned X. + + This is inspired by ols_ridge_dataset in test_ridge.py. + + The construction is based on the SVD decomposition of X = U S V'. + + Parameters + ---------- + type : {"long", "wide"} + If "long", then n_samples > n_features. + If "wide", then n_features > n_samples. + model : a GLM model + + For "wide", we return the minimum norm solution: + + min ||w||_2 subject to w = argmin deviance(X, y, w) + + Note that the deviance is always minimized if y = inverse_link(X w) is possible to + achieve, which it is in the wide data case. Therefore, we can construct the + solution with minimum norm like (wide) OLS: + + min ||w||_2 subject to link(y) = raw_prediction = X w + + Returns + ------- + model : GLM model + X : ndarray + Last column of 1, i.e. intercept. + y : ndarray + coef_unpenalized : ndarray + Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in + case of ambiguity) + Last coefficient is intercept. + coef_penalized : ndarray + GLM solution with alpha=l2_reg_strength=1, i.e. + min 1/n * sum(loss) + ||w[:-1]||_2^2. + Last coefficient is intercept. + l2_reg_strength : float + Always equal 1. + """ + data_type, model = request.param + # Make larger dim more than double as big as the smaller one. + # This helps when constructing singular matrices like (X, X). + if data_type == "long": + n_samples, n_features = 12, 4 + else: + n_samples, n_features = 4, 12 + k = min(n_samples, n_features) + rng = np.random.RandomState(global_random_seed) + X = make_low_rank_matrix( + n_samples=n_samples, + n_features=n_features, + effective_rank=k, + tail_strength=0.1, + random_state=rng, + ) + X[:, -1] = 1 # last columns acts as intercept + U, s, Vt = linalg.svd(X, full_matrices=False) + assert np.all(s > 1e-3) # to be sure + assert np.max(s) / np.min(s) < 100 # condition number of X + + if data_type == "long": + coef_unpenalized = rng.uniform(low=1, high=3, size=n_features) + coef_unpenalized *= rng.choice([-1, 1], size=n_features) + raw_prediction = X @ coef_unpenalized + else: + raw_prediction = rng.uniform(low=-3, high=3, size=n_samples) + # minimum norm solution min ||w||_2 such that raw_prediction = X w: + # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction + coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction + + linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True) + sw = np.full(shape=n_samples, fill_value=1 / n_samples) + y = linear_loss.base_loss.link.inverse(raw_prediction) + + # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with + # optimizer. Note that the problem is well conditioned such that we get accurate + # results. + l2_reg_strength = 1 + fun = partial( + linear_loss.loss, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + grad = partial( + linear_loss.gradient, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + coef_penalized_with_intercept = _special_minimize( + fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14 + ) + + linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False) + fun = partial( + linear_loss.loss, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + grad = partial( + linear_loss.gradient, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + coef_penalized_without_intercept = _special_minimize( + fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14 + ) + + # To be sure + assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm( + coef_unpenalized + ) + + return ( + model, + X, + y, + coef_unpenalized, + coef_penalized_with_intercept, + coef_penalized_without_intercept, + l2_reg_strength, + ) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_glm_regression(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + + model.fit(X, y) + + rtol = 5e-5 if solver == "lbfgs" else 1e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + # Same with sample_weight. + model = ( + clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) + ) + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution on hstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2. + For long X, [X, X] is still a long but singular matrix. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + n_samples, n_features = X.shape + params = dict( + alpha=alpha / 2, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1) + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + + with warnings.catch_warnings(): + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.simplefilter("ignore", ConvergenceWarning) + model.fit(X, y) + + rtol = 2e-4 if solver == "lbfgs" else 5e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution on vstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X], [y] + [X], [y] with 1 * alpha. + It is the same alpha as the average loss stays the same. + For wide X, [X', X'] is a singular matrix. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + n_samples, n_features = X.shape + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + model.fit(X, y) + + rtol = 3e-5 if solver == "lbfgs" else 5e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + Note: This checks the minimum norm solution for wide X, i.e. + n_samples < n_features: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + + with warnings.catch_warnings(): + if solver.startswith("newton") and n_samples < n_features: + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails + # for the wide/fat case with n_features > n_samples. Most current GLM solvers do + # NOT return the minimum norm solution with fit_intercept=True. + if n_samples > n_features: + rtol = 5e-5 if solver == "lbfgs" else 1e-7 + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 5e-5 + if solver == "newton-cholesky": + rtol = 5e-4 + assert_allclose(model.predict(X), y, rtol=rtol) + + norm_solution = np.linalg.norm(np.r_[intercept, coef]) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + if solver == "newton-cholesky": + # XXX: This solver shows random behaviour. Sometimes it finds solutions + # with norm_model <= norm_solution! So we check conditionally. + if norm_model < (1 + 1e-12) * norm_solution: + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + elif solver == "lbfgs" and fit_intercept: + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + assert norm_model > (1 + 1e-12) * norm_solution + + # See https://github.com/scikit-learn/scikit-learn/issues/23670. + # Note: Even adding a tiny penalty does not give the minimal norm solution. + # XXX: We could have naively expected LBFGS to find the minimal norm + # solution by adding a very small penalty. Even that fails for a reason we + # do not properly understand at this point. + else: + # When `fit_intercept=False`, LBFGS naturally converges to the minimum norm + # solution on this problem. + # XXX: Do we have any theoretical guarantees why this should be the case? + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + GLM fit on [X] is the same as fit on [X, X]/2. + For long X, [X, X] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + intercept = coef[-1] + coef = coef[:-1] + if n_samples > n_features: + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + else: + # To know the minimum norm solution, we keep one intercept column and do + # not divide by 2. Later on, we must take special care. + X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] + else: + intercept = 0 + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + + with warnings.catch_warnings(): + if solver.startswith("newton"): + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + if fit_intercept and n_samples < n_features: + # Here we take special care. + model_intercept = 2 * model.intercept_ + model_coef = 2 * model.coef_[:-1] # exclude the other intercept term. + # For minimum norm solution, we would have + # assert model.intercept_ == pytest.approx(model.coef_[-1]) + else: + model_intercept = model.intercept_ + model_coef = model.coef_ + + if n_samples > n_features: + assert model_intercept == pytest.approx(intercept) + rtol = 1e-4 + assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 1e-6 if solver == "lbfgs" else 5e-6 + assert_allclose(model.predict(X), y, rtol=rtol) + if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky": + # Same as in test_glm_regression_unpenalized. + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + norm_solution = np.linalg.norm( + 0.5 * np.r_[intercept, intercept, coef, coef] + ) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + assert norm_model > (1 + 1e-12) * norm_solution + # For minimum norm solution, we would have + # assert model.intercept_ == pytest.approx(model.coef_[-1]) + else: + assert model_intercept == pytest.approx(intercept, rel=5e-6) + assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + GLM fit on [X] is the same as fit on [X], [y] + [X], [y]. + For wide X, [X', X'] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + + with warnings.catch_warnings(): + if solver.startswith("newton") and n_samples < n_features: + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + if n_samples > n_features: + rtol = 5e-5 if solver == "lbfgs" else 1e-6 + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 1e-6 if solver == "lbfgs" else 5e-6 + assert_allclose(model.predict(X), y, rtol=rtol) + + norm_solution = np.linalg.norm(np.r_[intercept, coef]) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + if solver == "newton-cholesky": + # XXX: This solver shows random behaviour. Sometimes it finds solutions + # with norm_model <= norm_solution! So we check conditionally. + if not (norm_model > (1 + 1e-12) * norm_solution): + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=1e-4) + elif solver == "lbfgs" and fit_intercept: + # Same as in test_glm_regression_unpenalized. + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + assert norm_model > (1 + 1e-12) * norm_solution + else: + rtol = 1e-5 if solver == "newton-cholesky" else 1e-4 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +def test_sample_weights_validation(): + """Test the raised errors in the validation of sample_weight.""" + # scalar value but not positive + X = [[1]] + y = [1] + weights = 0 + glm = _GeneralizedLinearRegressor() + + # Positive weights are accepted + glm.fit(X, y, sample_weight=1) + + # 2d array + weights = [[0]] + with pytest.raises(ValueError, match="must be 1D array or scalar"): + glm.fit(X, y, weights) + + # 1d but wrong length + weights = [1, 0] + msg = r"sample_weight.shape == \(2,\), expected \(1,\)!" + with pytest.raises(ValueError, match=msg): + glm.fit(X, y, weights) + + +@pytest.mark.parametrize( + "glm", + [ + TweedieRegressor(power=3), + PoissonRegressor(), + GammaRegressor(), + TweedieRegressor(power=1.5), + ], +) +def test_glm_wrong_y_range(glm): + y = np.array([-1, 2]) + X = np.array([[1], [1]]) + msg = r"Some value\(s\) of y are out of the valid range of the loss" + with pytest.raises(ValueError, match=msg): + glm.fit(X, y) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_glm_identity_regression(fit_intercept): + """Test GLM regression with identity link on a simple dataset.""" + coef = [1.0, 2.0] + X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T + y = np.dot(X, coef) + glm = _GeneralizedLinearRegressor( + alpha=0, + fit_intercept=fit_intercept, + tol=1e-12, + ) + if fit_intercept: + glm.fit(X[:, 1:], y) + assert_allclose(glm.coef_, coef[1:], rtol=1e-10) + assert_allclose(glm.intercept_, coef[0], rtol=1e-10) + else: + glm.fit(X, y) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("alpha", [0.0, 1.0]) +@pytest.mark.parametrize( + "GLMEstimator", [_GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor] +) +def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator): + """Test that the impact of sample_weight is consistent""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + glm_params = dict(alpha=alpha, fit_intercept=fit_intercept) + + glm = GLMEstimator(**glm_params).fit(X, y) + coef = glm.coef_.copy() + + # sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones(y.shape) + glm.fit(X, y, sample_weight=sample_weight) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + # sample_weight are normalized to 1 so, scaling them has no effect + sample_weight = 2 * np.ones(y.shape) + glm.fit(X, y, sample_weight=sample_weight) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + # setting one element of sample_weight to 0 is equivalent to removing + # the corresponding sample + sample_weight = np.ones(y.shape) + sample_weight[-1] = 0 + glm.fit(X, y, sample_weight=sample_weight) + coef1 = glm.coef_.copy() + glm.fit(X[:-1], y[:-1]) + assert_allclose(glm.coef_, coef1, rtol=1e-12) + + # check that multiplying sample_weight by 2 is equivalent + # to repeating corresponding samples twice + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = np.ones(len(y)) + sample_weight_1[: n_samples // 2] = 2 + + glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1) + + glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None) + assert_allclose(glm1.coef_, glm2.coef_) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize( + "estimator", + [ + PoissonRegressor(), + GammaRegressor(), + TweedieRegressor(power=3.0), + TweedieRegressor(power=0, link="log"), + TweedieRegressor(power=1.5), + TweedieRegressor(power=4.5), + ], +) +def test_glm_log_regression(solver, fit_intercept, estimator): + """Test GLM regression with log link on a simple dataset.""" + coef = [0.2, -0.1] + X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T + y = np.exp(np.dot(X, coef)) + glm = clone(estimator).set_params( + alpha=0, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-8, + ) + if fit_intercept: + res = glm.fit(X[:, :-1], y) + assert_allclose(res.coef_, coef[:-1], rtol=1e-6) + assert_allclose(res.intercept_, coef[-1], rtol=1e-6) + else: + res = glm.fit(X, y) + assert_allclose(res.coef_, coef, rtol=2e-6) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_warm_start(solver, fit_intercept, global_random_seed): + n_samples, n_features = 100, 10 + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features - 2, + bias=fit_intercept * 1.0, + noise=1.0, + random_state=global_random_seed, + ) + y = np.abs(y) # Poisson requires non-negative targets. + alpha = 1 + params = { + "solver": solver, + "fit_intercept": fit_intercept, + "tol": 1e-10, + } + + glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params) + glm1.fit(X, y) + + glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params) + # As we intentionally set max_iter=1 such that the solver should raise a + # ConvergenceWarning. + with pytest.warns(ConvergenceWarning): + glm2.fit(X, y) + + linear_loss = LinearModelLoss( + base_loss=glm1._get_loss(), + fit_intercept=fit_intercept, + ) + sw = np.full_like(y, fill_value=1 / n_samples) + + objective_glm1 = linear_loss.loss( + coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_, + X=X, + y=y, + sample_weight=sw, + l2_reg_strength=alpha, + ) + objective_glm2 = linear_loss.loss( + coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_, + X=X, + y=y, + sample_weight=sw, + l2_reg_strength=alpha, + ) + assert objective_glm1 < objective_glm2 + + glm2.set_params(max_iter=1000) + glm2.fit(X, y) + # The two models are not exactly identical since the lbfgs solver + # computes the approximate hessian from previous iterations, which + # will not be strictly identical in the case of a warm start. + rtol = 2e-4 if solver == "lbfgs" else 1e-9 + assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol) + assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5) + + +@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("sample_weight", [None, True]) +def test_normal_ridge_comparison( + n_samples, n_features, fit_intercept, sample_weight, request +): + """Compare with Ridge regression for Normal distributions.""" + test_size = 10 + X, y = make_regression( + n_samples=n_samples + test_size, + n_features=n_features, + n_informative=n_features - 2, + noise=0.5, + random_state=42, + ) + + if n_samples > n_features: + ridge_params = {"solver": "svd"} + else: + ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7} + + ( + X_train, + X_test, + y_train, + y_test, + ) = train_test_split(X, y, test_size=test_size, random_state=0) + + alpha = 1.0 + if sample_weight is None: + sw_train = None + alpha_ridge = alpha * n_samples + else: + sw_train = np.random.RandomState(0).rand(len(y_train)) + alpha_ridge = alpha * sw_train.sum() + + # GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2 + ridge = Ridge( + alpha=alpha_ridge, + random_state=42, + fit_intercept=fit_intercept, + **ridge_params, + ) + ridge.fit(X_train, y_train, sample_weight=sw_train) + + glm = _GeneralizedLinearRegressor( + alpha=alpha, + fit_intercept=fit_intercept, + max_iter=300, + tol=1e-5, + ) + glm.fit(X_train, y_train, sample_weight=sw_train) + assert glm.coef_.shape == (X.shape[1],) + assert_allclose(glm.coef_, ridge.coef_, atol=5e-5) + assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5) + assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4) + assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4) + + +@pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"]) +def test_poisson_glmnet(solver): + """Compare Poisson regression with L2 regularization and LogLink to glmnet""" + # library("glmnet") + # options(digits=10) + # df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2)) + # x <- data.matrix(df[,c("a", "b")]) + # y <- df$y + # fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson", + # standardize=F, thresh=1e-10, nlambda=10000) + # coef(fit, s=1) + # (Intercept) -0.12889386979 + # a 0.29019207995 + # b 0.03741173122 + X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T + y = np.array([0, 1, 1, 2]) + glm = PoissonRegressor( + alpha=1, + fit_intercept=True, + tol=1e-7, + max_iter=300, + solver=solver, + ) + glm.fit(X, y) + assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5) + assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5) + + +def test_convergence_warning(regression_data): + X, y = regression_data + + est = _GeneralizedLinearRegressor(max_iter=1, tol=1e-20) + with pytest.warns(ConvergenceWarning): + est.fit(X, y) + + +@pytest.mark.parametrize( + "name, link_class", [("identity", IdentityLink), ("log", LogLink)] +) +def test_tweedie_link_argument(name, link_class): + """Test GLM link argument set as string.""" + y = np.array([0.1, 0.5]) # in range of all distributions + X = np.array([[1], [2]]) + glm = TweedieRegressor(power=1, link=name).fit(X, y) + assert isinstance(glm._base_loss.link, link_class) + + +@pytest.mark.parametrize( + "power, expected_link_class", + [ + (0, IdentityLink), # normal + (1, LogLink), # poisson + (2, LogLink), # gamma + (3, LogLink), # inverse-gaussian + ], +) +def test_tweedie_link_auto(power, expected_link_class): + """Test that link='auto' delivers the expected link function""" + y = np.array([0.1, 0.5]) # in range of all distributions + X = np.array([[1], [2]]) + glm = TweedieRegressor(link="auto", power=power).fit(X, y) + assert isinstance(glm._base_loss.link, expected_link_class) + + +@pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3]) +@pytest.mark.parametrize("link", ["log", "identity"]) +def test_tweedie_score(regression_data, power, link): + """Test that GLM score equals d2_tweedie_score for Tweedie losses.""" + X, y = regression_data + # make y positive + y = np.abs(y) + 1.0 + glm = TweedieRegressor(power=power, link=link).fit(X, y) + assert glm.score(X, y) == pytest.approx( + d2_tweedie_score(y, glm.predict(X), power=power) + ) + + +@pytest.mark.parametrize( + "estimator, value", + [ + (PoissonRegressor(), True), + (GammaRegressor(), True), + (TweedieRegressor(power=1.5), True), + (TweedieRegressor(power=0), False), + ], +) +def test_tags(estimator, value): + assert estimator._get_tags()["requires_positive_y"] is value + + +def test_linalg_warning_with_newton_solver(global_random_seed): + newton_solver = "newton-cholesky" + rng = np.random.RandomState(global_random_seed) + # Use at least 20 samples to reduce the likelihood of getting a degenerate + # dataset for any global_random_seed. + X_orig = rng.normal(size=(20, 3)) + y = rng.poisson( + np.exp(X_orig @ np.ones(X_orig.shape[1])), size=X_orig.shape[0] + ).astype(np.float64) + + # Collinear variation of the same input features. + X_collinear = np.hstack([X_orig] * 10) + + # Let's consider the deviance of a constant baseline on this problem. + baseline_pred = np.full_like(y, y.mean()) + constant_model_deviance = mean_poisson_deviance(y, baseline_pred) + assert constant_model_deviance > 1.0 + + # No warning raised on well-conditioned design, even without regularization. + tol = 1e-10 + with warnings.catch_warnings(): + warnings.simplefilter("error") + reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(X_orig, y) + original_newton_deviance = mean_poisson_deviance(y, reg.predict(X_orig)) + + # On this dataset, we should have enough data points to not make it + # possible to get a near zero deviance (for the any of the admissible + # random seeds). This will make it easier to interpret meaning of rtol in + # the subsequent assertions: + assert original_newton_deviance > 0.2 + + # We check that the model could successfully fit information in X_orig to + # improve upon the constant baseline by a large margin (when evaluated on + # the traing set). + assert constant_model_deviance - original_newton_deviance > 0.1 + + # LBFGS is robust to a collinear design because its approximation of the + # Hessian is Symmeric Positive Definite by construction. Let's record its + # solution + with warnings.catch_warnings(): + warnings.simplefilter("error") + reg = PoissonRegressor(solver="lbfgs", alpha=0.0, tol=tol).fit(X_collinear, y) + collinear_lbfgs_deviance = mean_poisson_deviance(y, reg.predict(X_collinear)) + + # The LBFGS solution on the collinear is expected to reach a comparable + # solution to the Newton solution on the original data. + rtol = 1e-6 + assert collinear_lbfgs_deviance == pytest.approx(original_newton_deviance, rel=rtol) + + # Fitting a Newton solver on the collinear version of the training data + # without regularization should raise an informative warning and fallback + # to the LBFGS solver. + msg = ( + "The inner solver of .*Newton.*Solver stumbled upon a singular or very " + "ill-conditioned Hessian matrix" + ) + with pytest.warns(scipy.linalg.LinAlgWarning, match=msg): + reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit( + X_collinear, y + ) + # As a result we should still automatically converge to a good solution. + collinear_newton_deviance = mean_poisson_deviance(y, reg.predict(X_collinear)) + assert collinear_newton_deviance == pytest.approx( + original_newton_deviance, rel=rtol + ) + + # Increasing the regularization slightly should make the problem go away: + with warnings.catch_warnings(): + warnings.simplefilter("error", scipy.linalg.LinAlgWarning) + reg = PoissonRegressor(solver=newton_solver, alpha=1e-10).fit(X_collinear, y) + + # The slightly penalized model on the collinear data should be close enough + # to the unpenalized model on the original data. + penalized_collinear_newton_deviance = mean_poisson_deviance( + y, reg.predict(X_collinear) + ) + assert penalized_collinear_newton_deviance == pytest.approx( + original_newton_deviance, rel=rtol + ) + + +@pytest.mark.parametrize("verbose", [0, 1, 2]) +def test_newton_solver_verbosity(capsys, verbose): + """Test the std output of verbose newton solvers.""" + y = np.array([1, 2], dtype=float) + X = np.array([[1.0, 0], [0, 1]], dtype=float) + linear_loss = LinearModelLoss(base_loss=HalfPoissonLoss(), fit_intercept=False) + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.solve(X, y, None) # returns array([0., 0.69314758]) + captured = capsys.readouterr() + + if verbose == 0: + assert captured.out == "" + else: + msg = [ + "Newton iter=1", + "Check Convergence", + "1. max |gradient|", + "2. Newton decrement", + "Solver did converge at loss = ", + ] + for m in msg: + assert m in captured.out + + if verbose >= 2: + msg = ["Backtracking Line Search", "line search iteration="] + for m in msg: + assert m in captured.out + + # Set the Newton solver to a state with a completely wrong Newton step. + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.setup(X=X, y=y, sample_weight=None) + sol.iteration = 1 + sol.update_gradient_hessian(X=X, y=y, sample_weight=None) + sol.coef_newton = np.array([1.0, 0]) + sol.gradient_times_newton = sol.gradient @ sol.coef_newton + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.line_search(X=X, y=y, sample_weight=None) + captured = capsys.readouterr() + if verbose >= 1: + assert ( + "Line search did not converge and resorts to lbfgs instead." in captured.out + ) + + # Set the Newton solver to a state with bad Newton step such that the loss + # improvement in line search is tiny. + sol = NewtonCholeskySolver( + coef=np.array([1e-12, 0.69314758]), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.setup(X=X, y=y, sample_weight=None) + sol.iteration = 1 + sol.update_gradient_hessian(X=X, y=y, sample_weight=None) + sol.coef_newton = np.array([1e-6, 0]) + sol.gradient_times_newton = sol.gradient @ sol.coef_newton + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.line_search(X=X, y=y, sample_weight=None) + captured = capsys.readouterr() + if verbose >= 2: + msg = [ + "line search iteration=", + "check loss improvement <= armijo term:", + "check loss |improvement| <= eps * |loss_old|:", + "check sum(|gradient|) < sum(|gradient_old|):", + ] + for m in msg: + assert m in captured.out + + # Test for a case with negative hessian. We badly initialize coef for a Tweedie + # loss with non-canonical link, e.g. Inverse Gaussian deviance with a log link. + linear_loss = LinearModelLoss( + base_loss=HalfTweedieLoss(power=3), fit_intercept=False + ) + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X) + 1, + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.solve(X, y, None) + captured = capsys.readouterr() + if verbose >= 1: + assert ( + "The inner solver detected a pointwise Hessian with many negative values" + " and resorts to lbfgs instead." + in captured.out + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py new file mode 100644 index 0000000000000000000000000000000000000000..f5766ac0d6154a854bcbe4fa0519930fdfb267bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py @@ -0,0 +1,2306 @@ +""" +Least Angle Regression algorithm. See the documentation on the +Generalized Linear Model for a complete discussion. +""" +# Author: Fabian Pedregosa +# Alexandre Gramfort +# Gael Varoquaux +# +# License: BSD 3 clause + +import sys +import warnings +from math import log +from numbers import Integral, Real + +import numpy as np +from scipy import interpolate, linalg +from scipy.linalg.lapack import get_lapack_funcs + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..model_selection import check_cv + +# mypy error: Module 'sklearn.utils' has no attribute 'arrayfuncs' +from ..utils import ( # type: ignore + Bunch, + arrayfuncs, + as_float_array, + check_random_state, +) +from ..utils._metadata_requests import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.parallel import Parallel, delayed +from ._base import LinearModel, LinearRegression, _preprocess_data + +SOLVE_TRIANGULAR_ARGS = {"check_finite": False} + + +@validate_params( + { + "X": [np.ndarray, None], + "y": [np.ndarray, None], + "Xy": [np.ndarray, None], + "Gram": [StrOptions({"auto"}), "boolean", np.ndarray, None], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "alpha_min": [Interval(Real, 0, None, closed="left")], + "method": [StrOptions({"lar", "lasso"})], + "copy_X": ["boolean"], + "eps": [Interval(Real, 0, None, closed="neither"), None], + "copy_Gram": ["boolean"], + "verbose": ["verbose"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def lars_path( + X, + y, + Xy=None, + *, + Gram=None, + max_iter=500, + alpha_min=0, + method="lar", + copy_X=True, + eps=np.finfo(float).eps, + copy_Gram=True, + verbose=0, + return_path=True, + return_n_iter=False, + positive=False, +): + """Compute Least Angle Regression or Lasso path using the LARS algorithm [1]. + + The optimization objective for the case method='lasso' is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + in the case of method='lar', the objective function is only known in + the form of an implicit equation (see discussion in [1]). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : None or ndarray of shape (n_samples, n_features) + Input data. Note that if X is `None` then the Gram matrix must be + specified, i.e., cannot be `None` or `False`. + + y : None or ndarray of shape (n_samples,) + Input targets. + + Xy : array-like of shape (n_features,) or (n_features, n_targets), \ + default=None + `Xy = X.T @ y` that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + Gram : None, 'auto', bool, ndarray of shape (n_features, n_features), \ + default=None + Precomputed Gram matrix `X.T @ X`, if `'auto'`, the Gram + matrix is precomputed from the given X, if there are more samples + than features. + + max_iter : int, default=500 + Maximum number of iterations to perform, set to infinity for no limit. + + alpha_min : float, default=0 + Minimum correlation along the path. It corresponds to the + regularization parameter `alpha` in the Lasso. + + method : {'lar', 'lasso'}, default='lar' + Specifies the returned model. Select `'lar'` for Least Angle + Regression, `'lasso'` for the Lasso. + + copy_X : bool, default=True + If `False`, `X` is overwritten. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the `tol` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_Gram : bool, default=True + If `False`, `Gram` is overwritten. + + verbose : int, default=0 + Controls output verbosity. + + return_path : bool, default=True + If `True`, returns the entire path, else returns only the + last point of the path. + + return_n_iter : bool, default=False + Whether to return the number of iterations. + + positive : bool, default=False + Restrict coefficients to be >= 0. + This option is only allowed with method 'lasso'. Note that the model + coefficients will not converge to the ordinary-least-squares solution + for small values of alpha. Only coefficients up to the smallest alpha + value (`alphas_[alphas_ > 0.].min()` when fit_path=True) reached by + the stepwise Lars-Lasso algorithm are typically in congruence with the + solution of the coordinate descent `lasso_path` function. + + Returns + ------- + alphas : ndarray of shape (n_alphas + 1,) + Maximum of covariances (in absolute value) at each iteration. + `n_alphas` is either `max_iter`, `n_features`, or the + number of nodes in the path with `alpha >= alpha_min`, whichever + is smaller. + + active : ndarray of shape (n_alphas,) + Indices of active variables at the end of the path. + + coefs : ndarray of shape (n_features, n_alphas + 1) + Coefficients along the path. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is set + to True. + + See Also + -------- + lars_path_gram : Compute LARS path in the sufficient stats mode. + lasso_path : Compute Lasso path with coordinate descent. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLarsCV : Cross-validated Lasso, using the LARS algorithm. + LarsCV : Cross-validated Least Angle Regression model. + sklearn.decomposition.sparse_encode : Sparse coding. + + References + ---------- + .. [1] "Least Angle Regression", Efron et al. + http://statweb.stanford.edu/~tibs/ftp/lars.pdf + + .. [2] `Wikipedia entry on the Least-angle regression + `_ + + .. [3] `Wikipedia entry on the Lasso + `_ + """ + if X is None and Gram is not None: + raise ValueError( + "X cannot be None if Gram is not None" + "Use lars_path_gram to avoid passing X and y." + ) + return _lars_path_solver( + X=X, + y=y, + Xy=Xy, + Gram=Gram, + n_samples=None, + max_iter=max_iter, + alpha_min=alpha_min, + method=method, + copy_X=copy_X, + eps=eps, + copy_Gram=copy_Gram, + verbose=verbose, + return_path=return_path, + return_n_iter=return_n_iter, + positive=positive, + ) + + +@validate_params( + { + "Xy": [np.ndarray], + "Gram": [np.ndarray], + "n_samples": [Interval(Integral, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "alpha_min": [Interval(Real, 0, None, closed="left")], + "method": [StrOptions({"lar", "lasso"})], + "copy_X": ["boolean"], + "eps": [Interval(Real, 0, None, closed="neither"), None], + "copy_Gram": ["boolean"], + "verbose": ["verbose"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def lars_path_gram( + Xy, + Gram, + *, + n_samples, + max_iter=500, + alpha_min=0, + method="lar", + copy_X=True, + eps=np.finfo(float).eps, + copy_Gram=True, + verbose=0, + return_path=True, + return_n_iter=False, + positive=False, +): + """The lars_path in the sufficient stats mode [1]. + + The optimization objective for the case method='lasso' is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + in the case of method='lars', the objective function is only known in + the form of an implicit equation (see discussion in [1]) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + Xy : ndarray of shape (n_features,) or (n_features, n_targets) + `Xy = X.T @ y`. + + Gram : ndarray of shape (n_features, n_features) + `Gram = X.T @ X`. + + n_samples : int + Equivalent size of sample. + + max_iter : int, default=500 + Maximum number of iterations to perform, set to infinity for no limit. + + alpha_min : float, default=0 + Minimum correlation along the path. It corresponds to the + regularization parameter alpha parameter in the Lasso. + + method : {'lar', 'lasso'}, default='lar' + Specifies the returned model. Select `'lar'` for Least Angle + Regression, ``'lasso'`` for the Lasso. + + copy_X : bool, default=True + If `False`, `X` is overwritten. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the `tol` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_Gram : bool, default=True + If `False`, `Gram` is overwritten. + + verbose : int, default=0 + Controls output verbosity. + + return_path : bool, default=True + If `return_path==True` returns the entire path, else returns only the + last point of the path. + + return_n_iter : bool, default=False + Whether to return the number of iterations. + + positive : bool, default=False + Restrict coefficients to be >= 0. + This option is only allowed with method 'lasso'. Note that the model + coefficients will not converge to the ordinary-least-squares solution + for small values of alpha. Only coefficients up to the smallest alpha + value (`alphas_[alphas_ > 0.].min()` when `fit_path=True`) reached by + the stepwise Lars-Lasso algorithm are typically in congruence with the + solution of the coordinate descent lasso_path function. + + Returns + ------- + alphas : ndarray of shape (n_alphas + 1,) + Maximum of covariances (in absolute value) at each iteration. + `n_alphas` is either `max_iter`, `n_features` or the + number of nodes in the path with `alpha >= alpha_min`, whichever + is smaller. + + active : ndarray of shape (n_alphas,) + Indices of active variables at the end of the path. + + coefs : ndarray of shape (n_features, n_alphas + 1) + Coefficients along the path. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is set + to True. + + See Also + -------- + lars_path_gram : Compute LARS path. + lasso_path : Compute Lasso path with coordinate descent. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLarsCV : Cross-validated Lasso, using the LARS algorithm. + LarsCV : Cross-validated Least Angle Regression model. + sklearn.decomposition.sparse_encode : Sparse coding. + + References + ---------- + .. [1] "Least Angle Regression", Efron et al. + http://statweb.stanford.edu/~tibs/ftp/lars.pdf + + .. [2] `Wikipedia entry on the Least-angle regression + `_ + + .. [3] `Wikipedia entry on the Lasso + `_ + """ + return _lars_path_solver( + X=None, + y=None, + Xy=Xy, + Gram=Gram, + n_samples=n_samples, + max_iter=max_iter, + alpha_min=alpha_min, + method=method, + copy_X=copy_X, + eps=eps, + copy_Gram=copy_Gram, + verbose=verbose, + return_path=return_path, + return_n_iter=return_n_iter, + positive=positive, + ) + + +def _lars_path_solver( + X, + y, + Xy=None, + Gram=None, + n_samples=None, + max_iter=500, + alpha_min=0, + method="lar", + copy_X=True, + eps=np.finfo(float).eps, + copy_Gram=True, + verbose=0, + return_path=True, + return_n_iter=False, + positive=False, +): + """Compute Least Angle Regression or Lasso path using LARS algorithm [1] + + The optimization objective for the case method='lasso' is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + in the case of method='lars', the objective function is only known in + the form of an implicit equation (see discussion in [1]) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : None or ndarray of shape (n_samples, n_features) + Input data. Note that if X is None then Gram must be specified, + i.e., cannot be None or False. + + y : None or ndarray of shape (n_samples,) + Input targets. + + Xy : array-like of shape (n_features,) or (n_features, n_targets), \ + default=None + `Xy = np.dot(X.T, y)` that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + Gram : None, 'auto' or array-like of shape (n_features, n_features), \ + default=None + Precomputed Gram matrix `(X' * X)`, if ``'auto'``, the Gram + matrix is precomputed from the given X, if there are more samples + than features. + + n_samples : int or float, default=None + Equivalent size of sample. If `None`, it will be `n_samples`. + + max_iter : int, default=500 + Maximum number of iterations to perform, set to infinity for no limit. + + alpha_min : float, default=0 + Minimum correlation along the path. It corresponds to the + regularization parameter alpha parameter in the Lasso. + + method : {'lar', 'lasso'}, default='lar' + Specifies the returned model. Select ``'lar'`` for Least Angle + Regression, ``'lasso'`` for the Lasso. + + copy_X : bool, default=True + If ``False``, ``X`` is overwritten. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_Gram : bool, default=True + If ``False``, ``Gram`` is overwritten. + + verbose : int, default=0 + Controls output verbosity. + + return_path : bool, default=True + If ``return_path==True`` returns the entire path, else returns only the + last point of the path. + + return_n_iter : bool, default=False + Whether to return the number of iterations. + + positive : bool, default=False + Restrict coefficients to be >= 0. + This option is only allowed with method 'lasso'. Note that the model + coefficients will not converge to the ordinary-least-squares solution + for small values of alpha. Only coefficients up to the smallest alpha + value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by + the stepwise Lars-Lasso algorithm are typically in congruence with the + solution of the coordinate descent lasso_path function. + + Returns + ------- + alphas : array-like of shape (n_alphas + 1,) + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. + + active : array-like of shape (n_alphas,) + Indices of active variables at the end of the path. + + coefs : array-like of shape (n_features, n_alphas + 1) + Coefficients along the path + + n_iter : int + Number of iterations run. Returned only if return_n_iter is set + to True. + + See Also + -------- + lasso_path + LassoLars + Lars + LassoLarsCV + LarsCV + sklearn.decomposition.sparse_encode + + References + ---------- + .. [1] "Least Angle Regression", Efron et al. + http://statweb.stanford.edu/~tibs/ftp/lars.pdf + + .. [2] `Wikipedia entry on the Least-angle regression + `_ + + .. [3] `Wikipedia entry on the Lasso + `_ + + """ + if method == "lar" and positive: + raise ValueError("Positive constraint not supported for 'lar' coding method.") + + n_samples = n_samples if n_samples is not None else y.size + + if Xy is None: + Cov = np.dot(X.T, y) + else: + Cov = Xy.copy() + + if Gram is None or Gram is False: + Gram = None + if X is None: + raise ValueError("X and Gram cannot both be unspecified.") + elif isinstance(Gram, str) and Gram == "auto" or Gram is True: + if Gram is True or X.shape[0] > X.shape[1]: + Gram = np.dot(X.T, X) + else: + Gram = None + elif copy_Gram: + Gram = Gram.copy() + + if Gram is None: + n_features = X.shape[1] + else: + n_features = Cov.shape[0] + if Gram.shape != (n_features, n_features): + raise ValueError("The shapes of the inputs Gram and Xy do not match.") + + if copy_X and X is not None and Gram is None: + # force copy. setting the array to be fortran-ordered + # speeds up the calculation of the (partial) Gram matrix + # and allows to easily swap columns + X = X.copy("F") + + max_features = min(max_iter, n_features) + + dtypes = set(a.dtype for a in (X, y, Xy, Gram) if a is not None) + if len(dtypes) == 1: + # use the precision level of input data if it is consistent + return_dtype = next(iter(dtypes)) + else: + # fallback to double precision otherwise + return_dtype = np.float64 + + if return_path: + coefs = np.zeros((max_features + 1, n_features), dtype=return_dtype) + alphas = np.zeros(max_features + 1, dtype=return_dtype) + else: + coef, prev_coef = ( + np.zeros(n_features, dtype=return_dtype), + np.zeros(n_features, dtype=return_dtype), + ) + alpha, prev_alpha = ( + np.array([0.0], dtype=return_dtype), + np.array([0.0], dtype=return_dtype), + ) + # above better ideas? + + n_iter, n_active = 0, 0 + active, indices = list(), np.arange(n_features) + # holds the sign of covariance + sign_active = np.empty(max_features, dtype=np.int8) + drop = False + + # will hold the cholesky factorization. Only lower part is + # referenced. + if Gram is None: + L = np.empty((max_features, max_features), dtype=X.dtype) + swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (X,)) + else: + L = np.empty((max_features, max_features), dtype=Gram.dtype) + swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (Cov,)) + (solve_cholesky,) = get_lapack_funcs(("potrs",), (L,)) + + if verbose: + if verbose > 1: + print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC") + else: + sys.stdout.write(".") + sys.stdout.flush() + + tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning + cov_precision = np.finfo(Cov.dtype).precision + equality_tolerance = np.finfo(np.float32).eps + + if Gram is not None: + Gram_copy = Gram.copy() + Cov_copy = Cov.copy() + + while True: + if Cov.size: + if positive: + C_idx = np.argmax(Cov) + else: + C_idx = np.argmax(np.abs(Cov)) + + C_ = Cov[C_idx] + + if positive: + C = C_ + else: + C = np.fabs(C_) + else: + C = 0.0 + + if return_path: + alpha = alphas[n_iter, np.newaxis] + coef = coefs[n_iter] + prev_alpha = alphas[n_iter - 1, np.newaxis] + prev_coef = coefs[n_iter - 1] + + alpha[0] = C / n_samples + if alpha[0] <= alpha_min + equality_tolerance: # early stopping + if abs(alpha[0] - alpha_min) > equality_tolerance: + # interpolation factor 0 <= ss < 1 + if n_iter > 0: + # In the first iteration, all alphas are zero, the formula + # below would make ss a NaN + ss = (prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0]) + coef[:] = prev_coef + ss * (coef - prev_coef) + alpha[0] = alpha_min + if return_path: + coefs[n_iter] = coef + break + + if n_iter >= max_iter or n_active >= n_features: + break + if not drop: + ########################################################## + # Append x_j to the Cholesky factorization of (Xa * Xa') # + # # + # ( L 0 ) # + # L -> ( ) , where L * w = Xa' x_j # + # ( w z ) and z = ||x_j|| # + # # + ########################################################## + + if positive: + sign_active[n_active] = np.ones_like(C_) + else: + sign_active[n_active] = np.sign(C_) + m, n = n_active, C_idx + n_active + + Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) + indices[n], indices[m] = indices[m], indices[n] + Cov_not_shortened = Cov + Cov = Cov[1:] # remove Cov[0] + + if Gram is None: + X.T[n], X.T[m] = swap(X.T[n], X.T[m]) + c = nrm2(X.T[n_active]) ** 2 + L[n_active, :n_active] = np.dot(X.T[n_active], X.T[:n_active].T) + else: + # swap does only work inplace if matrix is fortran + # contiguous ... + Gram[m], Gram[n] = swap(Gram[m], Gram[n]) + Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n]) + c = Gram[n_active, n_active] + L[n_active, :n_active] = Gram[n_active, :n_active] + + # Update the cholesky decomposition for the Gram matrix + if n_active: + linalg.solve_triangular( + L[:n_active, :n_active], + L[n_active, :n_active], + trans=0, + lower=1, + overwrite_b=True, + **SOLVE_TRIANGULAR_ARGS, + ) + + v = np.dot(L[n_active, :n_active], L[n_active, :n_active]) + diag = max(np.sqrt(np.abs(c - v)), eps) + L[n_active, n_active] = diag + + if diag < 1e-7: + # The system is becoming too ill-conditioned. + # We have degenerate vectors in our active set. + # We'll 'drop for good' the last regressor added. + warnings.warn( + "Regressors in active set degenerate. " + "Dropping a regressor, after %i iterations, " + "i.e. alpha=%.3e, " + "with an active set of %i regressors, and " + "the smallest cholesky pivot element being %.3e." + " Reduce max_iter or increase eps parameters." + % (n_iter, alpha.item(), n_active, diag), + ConvergenceWarning, + ) + + # XXX: need to figure a 'drop for good' way + Cov = Cov_not_shortened + Cov[0] = 0 + Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) + continue + + active.append(indices[n_active]) + n_active += 1 + + if verbose > 1: + print( + "%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], "", n_active, C) + ) + + if method == "lasso" and n_iter > 0 and prev_alpha[0] < alpha[0]: + # alpha is increasing. This is because the updates of Cov are + # bringing in too much numerical error that is greater than + # than the remaining correlation with the + # regressors. Time to bail out + warnings.warn( + "Early stopping the lars path, as the residues " + "are small and the current value of alpha is no " + "longer well controlled. %i iterations, alpha=%.3e, " + "previous alpha=%.3e, with an active set of %i " + "regressors." % (n_iter, alpha.item(), prev_alpha.item(), n_active), + ConvergenceWarning, + ) + break + + # least squares solution + least_squares, _ = solve_cholesky( + L[:n_active, :n_active], sign_active[:n_active], lower=True + ) + + if least_squares.size == 1 and least_squares == 0: + # This happens because sign_active[:n_active] = 0 + least_squares[...] = 1 + AA = 1.0 + else: + # is this really needed ? + AA = 1.0 / np.sqrt(np.sum(least_squares * sign_active[:n_active])) + + if not np.isfinite(AA): + # L is too ill-conditioned + i = 0 + L_ = L[:n_active, :n_active].copy() + while not np.isfinite(AA): + L_.flat[:: n_active + 1] += (2**i) * eps + least_squares, _ = solve_cholesky( + L_, sign_active[:n_active], lower=True + ) + tmp = max(np.sum(least_squares * sign_active[:n_active]), eps) + AA = 1.0 / np.sqrt(tmp) + i += 1 + least_squares *= AA + + if Gram is None: + # equiangular direction of variables in the active set + eq_dir = np.dot(X.T[:n_active].T, least_squares) + # correlation between each unactive variables and + # eqiangular vector + corr_eq_dir = np.dot(X.T[n_active:], eq_dir) + else: + # if huge number of features, this takes 50% of time, I + # think could be avoided if we just update it using an + # orthogonal (QR) decomposition of X + corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares) + + # Explicit rounding can be necessary to avoid `np.argmax(Cov)` yielding + # unstable results because of rounding errors. + np.around(corr_eq_dir, decimals=cov_precision, out=corr_eq_dir) + + g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32)) + if positive: + gamma_ = min(g1, C / AA) + else: + g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32)) + gamma_ = min(g1, g2, C / AA) + + # TODO: better names for these variables: z + drop = False + z = -coef[active] / (least_squares + tiny32) + z_pos = arrayfuncs.min_pos(z) + if z_pos < gamma_: + # some coefficients have changed sign + idx = np.where(z == z_pos)[0][::-1] + + # update the sign, important for LAR + sign_active[idx] = -sign_active[idx] + + if method == "lasso": + gamma_ = z_pos + drop = True + + n_iter += 1 + + if return_path: + if n_iter >= coefs.shape[0]: + del coef, alpha, prev_alpha, prev_coef + # resize the coefs and alphas array + add_features = 2 * max(1, (max_features - n_active)) + coefs = np.resize(coefs, (n_iter + add_features, n_features)) + coefs[-add_features:] = 0 + alphas = np.resize(alphas, n_iter + add_features) + alphas[-add_features:] = 0 + coef = coefs[n_iter] + prev_coef = coefs[n_iter - 1] + else: + # mimic the effect of incrementing n_iter on the array references + prev_coef = coef + prev_alpha[0] = alpha[0] + coef = np.zeros_like(coef) + + coef[active] = prev_coef[active] + gamma_ * least_squares + + # update correlations + Cov -= gamma_ * corr_eq_dir + + # See if any coefficient has changed sign + if drop and method == "lasso": + # handle the case when idx is not length of 1 + for ii in idx: + arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) + + n_active -= 1 + # handle the case when idx is not length of 1 + drop_idx = [active.pop(ii) for ii in idx] + + if Gram is None: + # propagate dropped variable + for ii in idx: + for i in range(ii, n_active): + X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1]) + # yeah this is stupid + indices[i], indices[i + 1] = indices[i + 1], indices[i] + + # TODO: this could be updated + residual = y - np.dot(X[:, :n_active], coef[active]) + temp = np.dot(X.T[n_active], residual) + + Cov = np.r_[temp, Cov] + else: + for ii in idx: + for i in range(ii, n_active): + indices[i], indices[i + 1] = indices[i + 1], indices[i] + Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1]) + Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1]) + + # Cov_n = Cov_j + x_j * X + increment(betas) TODO: + # will this still work with multiple drops ? + + # recompute covariance. Probably could be done better + # wrong as Xy is not swapped with the rest of variables + + # TODO: this could be updated + temp = Cov_copy[drop_idx] - np.dot(Gram_copy[drop_idx], coef) + Cov = np.r_[temp, Cov] + + sign_active = np.delete(sign_active, idx) + sign_active = np.append(sign_active, 0.0) # just to maintain size + if verbose > 1: + print( + "%s\t\t%s\t\t%s\t\t%s\t\t%s" + % (n_iter, "", drop_idx, n_active, abs(temp)) + ) + + if return_path: + # resize coefs in case of early stop + alphas = alphas[: n_iter + 1] + coefs = coefs[: n_iter + 1] + + if return_n_iter: + return alphas, active, coefs.T, n_iter + else: + return alphas, active, coefs.T + else: + if return_n_iter: + return alpha, active, coef, n_iter + else: + return alpha, active, coef + + +############################################################################### +# Estimator classes + + +class Lars(MultiOutputMixin, RegressorMixin, LinearModel): + """Least Angle Regression model a.k.a. LAR. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + precompute : bool, 'auto' or array-like , default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + n_nonzero_coefs : int, default=500 + Target number of non-zero coefficients. Use ``np.inf`` for no limit. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + fit_path : bool, default=True + If True the full path is stored in the ``coef_path_`` attribute. + If you compute the solution for a large problem or many targets, + setting ``fit_path`` to ``False`` will lead to a speedup, especially + with a small alpha. + + jitter : float, default=None + Upper bound on a uniform noise parameter to be added to the + `y` values, to satisfy the model's assumption of + one-at-a-time computations. Might help with stability. + + .. versionadded:: 0.23 + + random_state : int, RandomState instance or None, default=None + Determines random number generation for jittering. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. Ignored if `jitter` is None. + + .. versionadded:: 0.23 + + Attributes + ---------- + alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. If this is a list of array-like, the length of the outer + list is `n_targets`. + + active_ : list of shape (n_alphas,) or list of such lists + Indices of active variables at the end of the path. + If this is a list of list, the length of the outer list is `n_targets`. + + coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \ + of such arrays + The varying values of the coefficients along the path. It is not + present if the ``fit_path`` parameter is ``False``. If this is a list + of array-like, the length of the outer list is `n_targets`. + + coef_ : array-like of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the formulation formula). + + intercept_ : float or array-like of shape (n_targets,) + Independent term in decision function. + + n_iter_ : array-like or int + The number of iterations taken by lars_path to find the + grid of alphas for each target. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path: Compute Least Angle Regression or Lasso + path using LARS algorithm. + LarsCV : Cross-validated Least Angle Regression model. + sklearn.decomposition.sparse_encode : Sparse coding. + + Examples + -------- + >>> from sklearn import linear_model + >>> reg = linear_model.Lars(n_nonzero_coefs=1) + >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) + Lars(n_nonzero_coefs=1) + >>> print(reg.coef_) + [ 0. -1.11...] + """ + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "verbose": ["verbose"], + "precompute": ["boolean", StrOptions({"auto"}), np.ndarray, Hidden(None)], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")], + "eps": [Interval(Real, 0, None, closed="left")], + "copy_X": ["boolean"], + "fit_path": ["boolean"], + "jitter": [Interval(Real, 0, None, closed="left"), None], + "random_state": ["random_state"], + } + + method = "lar" + positive = False + + def __init__( + self, + *, + fit_intercept=True, + verbose=False, + precompute="auto", + n_nonzero_coefs=500, + eps=np.finfo(float).eps, + copy_X=True, + fit_path=True, + jitter=None, + random_state=None, + ): + self.fit_intercept = fit_intercept + self.verbose = verbose + self.precompute = precompute + self.n_nonzero_coefs = n_nonzero_coefs + self.eps = eps + self.copy_X = copy_X + self.fit_path = fit_path + self.jitter = jitter + self.random_state = random_state + + @staticmethod + def _get_gram(precompute, X, y): + if (not hasattr(precompute, "__array__")) and ( + (precompute is True) + or (precompute == "auto" and X.shape[0] > X.shape[1]) + or (precompute == "auto" and y.shape[1] > 1) + ): + precompute = np.dot(X.T, X) + + return precompute + + def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None): + """Auxiliary method to fit the model using X, y as training data""" + n_features = X.shape[1] + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=self.copy_X + ) + + if y.ndim == 1: + y = y[:, np.newaxis] + + n_targets = y.shape[1] + + Gram = self._get_gram(self.precompute, X, y) + + self.alphas_ = [] + self.n_iter_ = [] + self.coef_ = np.empty((n_targets, n_features), dtype=X.dtype) + + if fit_path: + self.active_ = [] + self.coef_path_ = [] + for k in range(n_targets): + this_Xy = None if Xy is None else Xy[:, k] + alphas, active, coef_path, n_iter_ = lars_path( + X, + y[:, k], + Gram=Gram, + Xy=this_Xy, + copy_X=self.copy_X, + copy_Gram=True, + alpha_min=alpha, + method=self.method, + verbose=max(0, self.verbose - 1), + max_iter=max_iter, + eps=self.eps, + return_path=True, + return_n_iter=True, + positive=self.positive, + ) + self.alphas_.append(alphas) + self.active_.append(active) + self.n_iter_.append(n_iter_) + self.coef_path_.append(coef_path) + self.coef_[k] = coef_path[:, -1] + + if n_targets == 1: + self.alphas_, self.active_, self.coef_path_, self.coef_ = [ + a[0] + for a in (self.alphas_, self.active_, self.coef_path_, self.coef_) + ] + self.n_iter_ = self.n_iter_[0] + else: + for k in range(n_targets): + this_Xy = None if Xy is None else Xy[:, k] + alphas, _, self.coef_[k], n_iter_ = lars_path( + X, + y[:, k], + Gram=Gram, + Xy=this_Xy, + copy_X=self.copy_X, + copy_Gram=True, + alpha_min=alpha, + method=self.method, + verbose=max(0, self.verbose - 1), + max_iter=max_iter, + eps=self.eps, + return_path=False, + return_n_iter=True, + positive=self.positive, + ) + self.alphas_.append(alphas) + self.n_iter_.append(n_iter_) + if n_targets == 1: + self.alphas_ = self.alphas_[0] + self.n_iter_ = self.n_iter_[0] + + self._set_intercept(X_offset, y_offset, X_scale) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, Xy=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Xy : array-like of shape (n_features,) or (n_features, n_targets), \ + default=None + Xy = np.dot(X.T, y) that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + Returns + ------- + self : object + Returns an instance of self. + """ + X, y = self._validate_data(X, y, y_numeric=True, multi_output=True) + + alpha = getattr(self, "alpha", 0.0) + if hasattr(self, "n_nonzero_coefs"): + alpha = 0.0 # n_nonzero_coefs parametrization takes priority + max_iter = self.n_nonzero_coefs + else: + max_iter = self.max_iter + + if self.jitter is not None: + rng = check_random_state(self.random_state) + + noise = rng.uniform(high=self.jitter, size=len(y)) + y = y + noise + + self._fit( + X, + y, + max_iter=max_iter, + alpha=alpha, + fit_path=self.fit_path, + Xy=Xy, + ) + + return self + + +class LassoLars(Lars): + """Lasso model fit with Least Angle Regression a.k.a. Lars. + + It is a Linear Model trained with an L1 prior as regularizer. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the penalty term. Defaults to 1.0. + ``alpha = 0`` is equivalent to an ordinary least square, solved + by :class:`LinearRegression`. For numerical reasons, using + ``alpha = 0`` with the LassoLars object is not advised and you + should prefer the LinearRegression object. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + precompute : bool, 'auto' or array-like, default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + fit_path : bool, default=True + If ``True`` the full path is stored in the ``coef_path_`` attribute. + If you compute the solution for a large problem or many targets, + setting ``fit_path`` to ``False`` will lead to a speedup, especially + with a small alpha. + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + Under the positive restriction the model coefficients will not converge + to the ordinary-least-squares solution for small values of alpha. + Only coefficients up to the smallest alpha value (``alphas_[alphas_ > + 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso + algorithm are typically in congruence with the solution of the + coordinate descent Lasso estimator. + + jitter : float, default=None + Upper bound on a uniform noise parameter to be added to the + `y` values, to satisfy the model's assumption of + one-at-a-time computations. Might help with stability. + + .. versionadded:: 0.23 + + random_state : int, RandomState instance or None, default=None + Determines random number generation for jittering. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. Ignored if `jitter` is None. + + .. versionadded:: 0.23 + + Attributes + ---------- + alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. If this is a list of array-like, the length of the outer + list is `n_targets`. + + active_ : list of length n_alphas or list of such lists + Indices of active variables at the end of the path. + If this is a list of list, the length of the outer list is `n_targets`. + + coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \ + of such arrays + If a list is passed it's expected to be one of n_targets such arrays. + The varying values of the coefficients along the path. It is not + present if the ``fit_path`` parameter is ``False``. If this is a list + of array-like, the length of the outer list is `n_targets`. + + coef_ : array-like of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the formulation formula). + + intercept_ : float or array-like of shape (n_targets,) + Independent term in decision function. + + n_iter_ : array-like or int + The number of iterations taken by lars_path to find the + grid of alphas for each target. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLarsCV: Cross-validated Lasso, using the LARS algorithm. + LassoLarsIC : Lasso model fit with Lars using BIC + or AIC for model selection. + sklearn.decomposition.sparse_encode : Sparse coding. + + Examples + -------- + >>> from sklearn import linear_model + >>> reg = linear_model.LassoLars(alpha=0.01) + >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1]) + LassoLars(alpha=0.01) + >>> print(reg.coef_) + [ 0. -0.955...] + """ + + _parameter_constraints: dict = { + **Lars._parameter_constraints, + "alpha": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "positive": ["boolean"], + } + _parameter_constraints.pop("n_nonzero_coefs") + + method = "lasso" + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + verbose=False, + precompute="auto", + max_iter=500, + eps=np.finfo(float).eps, + copy_X=True, + fit_path=True, + positive=False, + jitter=None, + random_state=None, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.verbose = verbose + self.positive = positive + self.precompute = precompute + self.copy_X = copy_X + self.eps = eps + self.fit_path = fit_path + self.jitter = jitter + self.random_state = random_state + + +############################################################################### +# Cross-validated estimator classes + + +def _check_copy_and_writeable(array, copy=False): + if copy or not array.flags.writeable: + return array.copy() + return array + + +def _lars_path_residues( + X_train, + y_train, + X_test, + y_test, + Gram=None, + copy=True, + method="lar", + verbose=False, + fit_intercept=True, + max_iter=500, + eps=np.finfo(float).eps, + positive=False, +): + """Compute the residues on left-out data for a full LARS path + + Parameters + ----------- + X_train : array-like of shape (n_samples, n_features) + The data to fit the LARS on + + y_train : array-like of shape (n_samples,) + The target variable to fit LARS on + + X_test : array-like of shape (n_samples, n_features) + The data to compute the residues on + + y_test : array-like of shape (n_samples,) + The target variable to compute the residues on + + Gram : None, 'auto' or array-like of shape (n_features, n_features), \ + default=None + Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram + matrix is precomputed from the given X, if there are more samples + than features + + copy : bool, default=True + Whether X_train, X_test, y_train and y_test should be copied; + if False, they may be overwritten. + + method : {'lar' , 'lasso'}, default='lar' + Specifies the returned model. Select ``'lar'`` for Least Angle + Regression, ``'lasso'`` for the Lasso. + + verbose : bool or int, default=False + Sets the amount of verbosity + + fit_intercept : bool, default=True + whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + See reservations for using this option in combination with method + 'lasso' for expected small values of alpha in the doc of LassoLarsCV + and LassoLarsIC. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + Returns + -------- + alphas : array-like of shape (n_alphas,) + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever + is smaller. + + active : list + Indices of active variables at the end of the path. + + coefs : array-like of shape (n_features, n_alphas) + Coefficients along the path + + residues : array-like of shape (n_alphas, n_samples) + Residues of the prediction on the test data + """ + X_train = _check_copy_and_writeable(X_train, copy) + y_train = _check_copy_and_writeable(y_train, copy) + X_test = _check_copy_and_writeable(X_test, copy) + y_test = _check_copy_and_writeable(y_test, copy) + + if fit_intercept: + X_mean = X_train.mean(axis=0) + X_train -= X_mean + X_test -= X_mean + y_mean = y_train.mean(axis=0) + y_train = as_float_array(y_train, copy=False) + y_train -= y_mean + y_test = as_float_array(y_test, copy=False) + y_test -= y_mean + + alphas, active, coefs = lars_path( + X_train, + y_train, + Gram=Gram, + copy_X=False, + copy_Gram=False, + method=method, + verbose=max(0, verbose - 1), + max_iter=max_iter, + eps=eps, + positive=positive, + ) + residues = np.dot(X_test, coefs) - y_test[:, np.newaxis] + return alphas, active, coefs, residues.T + + +class LarsCV(Lars): + """Cross-validated Least Angle Regression model. + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + precompute : bool, 'auto' or array-like , default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram matrix + cannot be passed as argument since we will use only subsets of X. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + max_n_alphas : int, default=1000 + The maximum number of points on the path used to compute the + residuals in the cross-validation. + + n_jobs : int or None, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + Attributes + ---------- + active_ : list of length n_alphas or list of such lists + Indices of active variables at the end of the path. + If this is a list of lists, the outer list length is `n_targets`. + + coef_ : array-like of shape (n_features,) + parameter vector (w in the formulation formula) + + intercept_ : float + independent term in decision function + + coef_path_ : array-like of shape (n_features, n_alphas) + the varying values of the coefficients along the path + + alpha_ : float + the estimated regularization parameter alpha + + alphas_ : array-like of shape (n_alphas,) + the different values of alpha along the path + + cv_alphas_ : array-like of shape (n_cv_alphas,) + all the values of alpha along the path for the different folds + + mse_path_ : array-like of shape (n_folds, n_cv_alphas) + the mean square error on left-out for each fold along the path + (alpha values given by ``cv_alphas``) + + n_iter_ : array-like or int + the number of iterations run by Lars with the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoLarsIC : Lasso model fit with Lars using BIC + or AIC for model selection. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> from sklearn.linear_model import LarsCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0) + >>> reg = LarsCV(cv=5).fit(X, y) + >>> reg.score(X, y) + 0.9996... + >>> reg.alpha_ + 0.2961... + >>> reg.predict(X[:1,]) + array([154.3996...]) + """ + + _parameter_constraints: dict = { + **Lars._parameter_constraints, + "max_iter": [Interval(Integral, 0, None, closed="left")], + "cv": ["cv_object"], + "max_n_alphas": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + } + + for parameter in ["n_nonzero_coefs", "jitter", "fit_path", "random_state"]: + _parameter_constraints.pop(parameter) + + method = "lar" + + def __init__( + self, + *, + fit_intercept=True, + verbose=False, + max_iter=500, + precompute="auto", + cv=None, + max_n_alphas=1000, + n_jobs=None, + eps=np.finfo(float).eps, + copy_X=True, + ): + self.max_iter = max_iter + self.cv = cv + self.max_n_alphas = max_n_alphas + self.n_jobs = n_jobs + super().__init__( + fit_intercept=fit_intercept, + verbose=verbose, + precompute=precompute, + n_nonzero_coefs=500, + eps=eps, + copy_X=copy_X, + fit_path=True, + ) + + def _more_tags(self): + return {"multioutput": False} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, **params): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of self. + """ + _raise_for_params(params, self, "fit") + + X, y = self._validate_data(X, y, y_numeric=True) + X = as_float_array(X, copy=self.copy_X) + y = as_float_array(y, copy=self.copy_X) + + # init cross-validation generator + cv = check_cv(self.cv, classifier=False) + + if _routing_enabled(): + routed_params = process_routing(self, "fit", **params) + else: + routed_params = Bunch(splitter=Bunch(split={})) + + # As we use cross-validation, the Gram matrix is not precomputed here + Gram = self.precompute + if hasattr(Gram, "__array__"): + warnings.warn( + 'Parameter "precompute" cannot be an array in ' + '%s. Automatically switch to "auto" instead.' + % self.__class__.__name__ + ) + Gram = "auto" + + cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(_lars_path_residues)( + X[train], + y[train], + X[test], + y[test], + Gram=Gram, + copy=False, + method=self.method, + verbose=max(0, self.verbose - 1), + fit_intercept=self.fit_intercept, + max_iter=self.max_iter, + eps=self.eps, + positive=self.positive, + ) + for train, test in cv.split(X, y, **routed_params.splitter.split) + ) + all_alphas = np.concatenate(list(zip(*cv_paths))[0]) + # Unique also sorts + all_alphas = np.unique(all_alphas) + # Take at most max_n_alphas values + stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas)))) + all_alphas = all_alphas[::stride] + + mse_path = np.empty((len(all_alphas), len(cv_paths))) + for index, (alphas, _, _, residues) in enumerate(cv_paths): + alphas = alphas[::-1] + residues = residues[::-1] + if alphas[0] != 0: + alphas = np.r_[0, alphas] + residues = np.r_[residues[0, np.newaxis], residues] + if alphas[-1] != all_alphas[-1]: + alphas = np.r_[alphas, all_alphas[-1]] + residues = np.r_[residues, residues[-1, np.newaxis]] + this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas) + this_residues **= 2 + mse_path[:, index] = np.mean(this_residues, axis=-1) + + mask = np.all(np.isfinite(mse_path), axis=-1) + all_alphas = all_alphas[mask] + mse_path = mse_path[mask] + # Select the alpha that minimizes left-out error + i_best_alpha = np.argmin(mse_path.mean(axis=-1)) + best_alpha = all_alphas[i_best_alpha] + + # Store our parameters + self.alpha_ = best_alpha + self.cv_alphas_ = all_alphas + self.mse_path_ = mse_path + + # Now compute the full model using best_alpha + # it will call a lasso internally when self if LassoLarsCV + # as self.method == 'lasso' + self._fit( + X, + y, + max_iter=self.max_iter, + alpha=best_alpha, + Xy=None, + fit_path=True, + ) + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + splitter=check_cv(self.cv), + method_mapping=MethodMapping().add(callee="split", caller="fit"), + ) + return router + + +class LassoLarsCV(LarsCV): + """Cross-validated Lasso, using the LARS algorithm. + + See glossary entry for :term:`cross-validation estimator`. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + precompute : bool or 'auto' , default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram matrix + cannot be passed as argument since we will use only subsets of X. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + max_n_alphas : int, default=1000 + The maximum number of points on the path used to compute the + residuals in the cross-validation. + + n_jobs : int or None, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + Under the positive restriction the model coefficients do not converge + to the ordinary-least-squares solution for small values of alpha. + Only coefficients up to the smallest alpha value (``alphas_[alphas_ > + 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso + algorithm are typically in congruence with the solution of the + coordinate descent Lasso estimator. + As a consequence using LassoLarsCV only makes sense for problems where + a sparse solution is expected and/or reached. + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + parameter vector (w in the formulation formula) + + intercept_ : float + independent term in decision function. + + coef_path_ : array-like of shape (n_features, n_alphas) + the varying values of the coefficients along the path + + alpha_ : float + the estimated regularization parameter alpha + + alphas_ : array-like of shape (n_alphas,) + the different values of alpha along the path + + cv_alphas_ : array-like of shape (n_cv_alphas,) + all the values of alpha along the path for the different folds + + mse_path_ : array-like of shape (n_folds, n_cv_alphas) + the mean square error on left-out for each fold along the path + (alpha values given by ``cv_alphas``) + + n_iter_ : array-like or int + the number of iterations run by Lars with the optimal alpha. + + active_ : list of int + Indices of active variables at the end of the path. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoLarsIC : Lasso model fit with Lars using BIC + or AIC for model selection. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + The object solves the same problem as the + :class:`~sklearn.linear_model.LassoCV` object. However, unlike the + :class:`~sklearn.linear_model.LassoCV`, it find the relevant alphas values + by itself. In general, because of this property, it will be more stable. + However, it is more fragile to heavily multicollinear datasets. + + It is more efficient than the :class:`~sklearn.linear_model.LassoCV` if + only a small number of features are selected compared to the total number, + for instance if there are very few samples compared to the number of + features. + + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> from sklearn.linear_model import LassoLarsCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(noise=4.0, random_state=0) + >>> reg = LassoLarsCV(cv=5).fit(X, y) + >>> reg.score(X, y) + 0.9993... + >>> reg.alpha_ + 0.3972... + >>> reg.predict(X[:1,]) + array([-78.4831...]) + """ + + _parameter_constraints = { + **LarsCV._parameter_constraints, + "positive": ["boolean"], + } + + method = "lasso" + + def __init__( + self, + *, + fit_intercept=True, + verbose=False, + max_iter=500, + precompute="auto", + cv=None, + max_n_alphas=1000, + n_jobs=None, + eps=np.finfo(float).eps, + copy_X=True, + positive=False, + ): + self.fit_intercept = fit_intercept + self.verbose = verbose + self.max_iter = max_iter + self.precompute = precompute + self.cv = cv + self.max_n_alphas = max_n_alphas + self.n_jobs = n_jobs + self.eps = eps + self.copy_X = copy_X + self.positive = positive + # XXX : we don't use super().__init__ + # to avoid setting n_nonzero_coefs + + +class LassoLarsIC(LassoLars): + """Lasso model fit with Lars using BIC or AIC for model selection. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + AIC is the Akaike information criterion [2]_ and BIC is the Bayes + Information criterion [3]_. Such criteria are useful to select the value + of the regularization parameter by making a trade-off between the + goodness of fit and the complexity of the model. A good model should + explain well the data while being simple. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {'aic', 'bic'}, default='aic' + The type of criterion to use. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + precompute : bool, 'auto' or array-like, default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=500 + Maximum number of iterations to perform. Can be used for + early stopping. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + Under the positive restriction the model coefficients do not converge + to the ordinary-least-squares solution for small values of alpha. + Only coefficients up to the smallest alpha value (``alphas_[alphas_ > + 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso + algorithm are typically in congruence with the solution of the + coordinate descent Lasso estimator. + As a consequence using LassoLarsIC only makes sense for problems where + a sparse solution is expected and/or reached. + + noise_variance : float, default=None + The estimated noise variance of the data. If `None`, an unbiased + estimate is computed by an OLS model. However, it is only possible + in the case where `n_samples > n_features + fit_intercept`. + + .. versionadded:: 1.1 + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + parameter vector (w in the formulation formula) + + intercept_ : float + independent term in decision function. + + alpha_ : float + the alpha parameter chosen by the information criterion + + alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. If a list, it will be of length `n_targets`. + + n_iter_ : int + number of iterations run by lars_path to find the grid of + alphas. + + criterion_ : array-like of shape (n_alphas,) + The value of the information criteria ('aic', 'bic') across all + alphas. The alpha which has the smallest information criterion is + chosen, as specified in [1]_. + + noise_variance_ : float + The estimated noise variance from the data used to compute the + criterion. + + .. versionadded:: 1.1 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoLarsCV: Cross-validated Lasso, using the LARS algorithm. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + The number of degrees of freedom is computed as in [1]_. + + To have more details regarding the mathematical formulation of the + AIC and BIC criteria, please refer to :ref:`User Guide `. + + References + ---------- + .. [1] :arxiv:`Zou, Hui, Trevor Hastie, and Robert Tibshirani. + "On the degrees of freedom of the lasso." + The Annals of Statistics 35.5 (2007): 2173-2192. + <0712.0881>` + + .. [2] `Wikipedia entry on the Akaike information criterion + `_ + + .. [3] `Wikipedia entry on the Bayesian information criterion + `_ + + Examples + -------- + >>> from sklearn import linear_model + >>> reg = linear_model.LassoLarsIC(criterion='bic') + >>> X = [[-2, 2], [-1, 1], [0, 0], [1, 1], [2, 2]] + >>> y = [-2.2222, -1.1111, 0, -1.1111, -2.2222] + >>> reg.fit(X, y) + LassoLarsIC(criterion='bic') + >>> print(reg.coef_) + [ 0. -1.11...] + """ + + _parameter_constraints: dict = { + **LassoLars._parameter_constraints, + "criterion": [StrOptions({"aic", "bic"})], + "noise_variance": [Interval(Real, 0, None, closed="left"), None], + } + + for parameter in ["jitter", "fit_path", "alpha", "random_state"]: + _parameter_constraints.pop(parameter) + + def __init__( + self, + criterion="aic", + *, + fit_intercept=True, + verbose=False, + precompute="auto", + max_iter=500, + eps=np.finfo(float).eps, + copy_X=True, + positive=False, + noise_variance=None, + ): + self.criterion = criterion + self.fit_intercept = fit_intercept + self.positive = positive + self.max_iter = max_iter + self.verbose = verbose + self.copy_X = copy_X + self.precompute = precompute + self.eps = eps + self.fit_path = True + self.noise_variance = noise_variance + + def _more_tags(self): + return {"multioutput": False} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, copy_X=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + copy_X : bool, default=None + If provided, this parameter will override the choice + of copy_X made at instance creation. + If ``True``, X will be copied; else, it may be overwritten. + + Returns + ------- + self : object + Returns an instance of self. + """ + if copy_X is None: + copy_X = self.copy_X + X, y = self._validate_data(X, y, y_numeric=True) + + X, y, Xmean, ymean, Xstd = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=copy_X + ) + + Gram = self.precompute + + alphas_, _, coef_path_, self.n_iter_ = lars_path( + X, + y, + Gram=Gram, + copy_X=copy_X, + copy_Gram=True, + alpha_min=0.0, + method="lasso", + verbose=self.verbose, + max_iter=self.max_iter, + eps=self.eps, + return_n_iter=True, + positive=self.positive, + ) + + n_samples = X.shape[0] + + if self.criterion == "aic": + criterion_factor = 2 + elif self.criterion == "bic": + criterion_factor = log(n_samples) + else: + raise ValueError( + f"criterion should be either bic or aic, got {self.criterion!r}" + ) + + residuals = y[:, np.newaxis] - np.dot(X, coef_path_) + residuals_sum_squares = np.sum(residuals**2, axis=0) + degrees_of_freedom = np.zeros(coef_path_.shape[1], dtype=int) + for k, coef in enumerate(coef_path_.T): + mask = np.abs(coef) > np.finfo(coef.dtype).eps + if not np.any(mask): + continue + # get the number of degrees of freedom equal to: + # Xc = X[:, mask] + # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs + degrees_of_freedom[k] = np.sum(mask) + + self.alphas_ = alphas_ + + if self.noise_variance is None: + self.noise_variance_ = self._estimate_noise_variance( + X, y, positive=self.positive + ) + else: + self.noise_variance_ = self.noise_variance + + self.criterion_ = ( + n_samples * np.log(2 * np.pi * self.noise_variance_) + + residuals_sum_squares / self.noise_variance_ + + criterion_factor * degrees_of_freedom + ) + n_best = np.argmin(self.criterion_) + + self.alpha_ = alphas_[n_best] + self.coef_ = coef_path_[:, n_best] + self._set_intercept(Xmean, ymean, Xstd) + return self + + def _estimate_noise_variance(self, X, y, positive): + """Compute an estimate of the variance with an OLS model. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data to be fitted by the OLS model. We expect the data to be + centered. + + y : ndarray of shape (n_samples,) + Associated target. + + positive : bool, default=False + Restrict coefficients to be >= 0. This should be inline with + the `positive` parameter from `LassoLarsIC`. + + Returns + ------- + noise_variance : float + An estimator of the noise variance of an OLS model. + """ + if X.shape[0] <= X.shape[1] + self.fit_intercept: + raise ValueError( + f"You are using {self.__class__.__name__} in the case where the number " + "of samples is smaller than the number of features. In this setting, " + "getting a good estimate for the variance of the noise is not " + "possible. Provide an estimate of the noise variance in the " + "constructor." + ) + # X and y are already centered and we don't need to fit with an intercept + ols_model = LinearRegression(positive=positive, fit_intercept=False) + y_pred = ols_model.fit(X, y).predict(X) + return np.sum((y - y_pred) ** 2) / ( + X.shape[0] - X.shape[1] - self.fit_intercept + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..4255706e284f176c0e0103f9871ce32b9bb2b132 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py @@ -0,0 +1,671 @@ +""" +Loss functions for linear models with raw_prediction = X @ coef +""" +import numpy as np +from scipy import sparse + +from ..utils.extmath import squared_norm + + +class LinearModelLoss: + """General class for loss functions with raw_prediction = X @ coef + intercept. + + Note that raw_prediction is also known as linear predictor. + + The loss is the average of per sample losses and includes a term for L2 + regularization:: + + loss = 1 / s_sum * sum_i s_i loss(y_i, X_i @ coef + intercept) + + 1/2 * l2_reg_strength * ||coef||_2^2 + + with sample weights s_i=1 if sample_weight=None and s_sum=sum_i s_i. + + Gradient and hessian, for simplicity without intercept, are:: + + gradient = 1 / s_sum * X.T @ loss.gradient + l2_reg_strength * coef + hessian = 1 / s_sum * X.T @ diag(loss.hessian) @ X + + l2_reg_strength * identity + + Conventions: + if fit_intercept: + n_dof = n_features + 1 + else: + n_dof = n_features + + if base_loss.is_multiclass: + coef.shape = (n_classes, n_dof) or ravelled (n_classes * n_dof,) + else: + coef.shape = (n_dof,) + + The intercept term is at the end of the coef array: + if base_loss.is_multiclass: + if coef.shape (n_classes, n_dof): + intercept = coef[:, -1] + if coef.shape (n_classes * n_dof,) + intercept = coef[n_features::n_dof] = coef[(n_dof-1)::n_dof] + intercept.shape = (n_classes,) + else: + intercept = coef[-1] + + Note: If coef has shape (n_classes * n_dof,), the 2d-array can be reconstructed as + + coef.reshape((n_classes, -1), order="F") + + The option order="F" makes coef[:, i] contiguous. This, in turn, makes the + coefficients without intercept, coef[:, :-1], contiguous and speeds up + matrix-vector computations. + + Note: If the average loss per sample is wanted instead of the sum of the loss per + sample, one can simply use a rescaled sample_weight such that + sum(sample_weight) = 1. + + Parameters + ---------- + base_loss : instance of class BaseLoss from sklearn._loss. + fit_intercept : bool + """ + + def __init__(self, base_loss, fit_intercept): + self.base_loss = base_loss + self.fit_intercept = fit_intercept + + def init_zero_coef(self, X, dtype=None): + """Allocate coef of correct shape with zeros. + + Parameters: + ----------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + dtype : data-type, default=None + Overrides the data type of coef. With dtype=None, coef will have the same + dtype as X. + + Returns + ------- + coef : ndarray of shape (n_dof,) or (n_classes, n_dof) + Coefficients of a linear model. + """ + n_features = X.shape[1] + n_classes = self.base_loss.n_classes + if self.fit_intercept: + n_dof = n_features + 1 + else: + n_dof = n_features + if self.base_loss.is_multiclass: + coef = np.zeros_like(X, shape=(n_classes, n_dof), dtype=dtype, order="F") + else: + coef = np.zeros_like(X, shape=n_dof, dtype=dtype) + return coef + + def weight_intercept(self, coef): + """Helper function to get coefficients and intercept. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + + Returns + ------- + weights : ndarray of shape (n_features,) or (n_classes, n_features) + Coefficients without intercept term. + intercept : float or ndarray of shape (n_classes,) + Intercept terms. + """ + if not self.base_loss.is_multiclass: + if self.fit_intercept: + intercept = coef[-1] + weights = coef[:-1] + else: + intercept = 0.0 + weights = coef + else: + # reshape to (n_classes, n_dof) + if coef.ndim == 1: + weights = coef.reshape((self.base_loss.n_classes, -1), order="F") + else: + weights = coef + if self.fit_intercept: + intercept = weights[:, -1] + weights = weights[:, :-1] + else: + intercept = 0.0 + + return weights, intercept + + def weight_intercept_raw(self, coef, X): + """Helper function to get coefficients, intercept and raw_prediction. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + Returns + ------- + weights : ndarray of shape (n_features,) or (n_classes, n_features) + Coefficients without intercept term. + intercept : float or ndarray of shape (n_classes,) + Intercept terms. + raw_prediction : ndarray of shape (n_samples,) or \ + (n_samples, n_classes) + """ + weights, intercept = self.weight_intercept(coef) + + if not self.base_loss.is_multiclass: + raw_prediction = X @ weights + intercept + else: + # weights has shape (n_classes, n_dof) + raw_prediction = X @ weights.T + intercept # ndarray, likely C-contiguous + + return weights, intercept, raw_prediction + + def l2_penalty(self, weights, l2_reg_strength): + """Compute L2 penalty term l2_reg_strength/2 *||w||_2^2.""" + norm2_w = weights @ weights if weights.ndim == 1 else squared_norm(weights) + return 0.5 * l2_reg_strength * norm2_w + + def loss( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + raw_prediction=None, + ): + """Compute the loss as weighted average over point-wise losses. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + loss : float + Weighted average of losses per sample, plus penalty. + """ + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + loss = self.base_loss.loss( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=None, + n_threads=n_threads, + ) + loss = np.average(loss, weights=sample_weight) + + return loss + self.l2_penalty(weights, l2_reg_strength) + + def loss_gradient( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + raw_prediction=None, + ): + """Computes the sum of loss and gradient w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + loss : float + Weighted average of losses per sample, plus penalty. + + gradient : ndarray of shape coef.shape + The gradient of the loss. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + loss, grad_pointwise = self.base_loss.loss_gradient( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + loss = loss.sum() / sw_sum + loss += self.l2_penalty(weights, l2_reg_strength) + + grad_pointwise /= sw_sum + + if not self.base_loss.is_multiclass: + grad = np.empty_like(coef, dtype=weights.dtype) + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + else: + grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + # grad_pointwise.shape = (n_samples, n_classes) + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + if coef.ndim == 1: + grad = grad.ravel(order="F") + + return loss, grad + + def gradient( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + raw_prediction=None, + ): + """Computes the gradient w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + gradient : ndarray of shape coef.shape + The gradient of the loss. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + grad_pointwise = self.base_loss.gradient( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + grad_pointwise /= sw_sum + + if not self.base_loss.is_multiclass: + grad = np.empty_like(coef, dtype=weights.dtype) + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + return grad + else: + grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + # gradient.shape = (n_samples, n_classes) + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + if coef.ndim == 1: + return grad.ravel(order="F") + else: + return grad + + def gradient_hessian( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + gradient_out=None, + hessian_out=None, + raw_prediction=None, + ): + """Computes gradient and hessian w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + gradient_out : None or ndarray of shape coef.shape + A location into which the gradient is stored. If None, a new array + might be created. + hessian_out : None or ndarray + A location into which the hessian is stored. If None, a new array + might be created. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + gradient : ndarray of shape coef.shape + The gradient of the loss. + + hessian : ndarray + Hessian matrix. + + hessian_warning : bool + True if pointwise hessian has more than half of its elements non-positive. + """ + n_samples, n_features = X.shape + n_dof = n_features + int(self.fit_intercept) + + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + grad_pointwise /= sw_sum + hess_pointwise /= sw_sum + + # For non-canonical link functions and far away from the optimum, the pointwise + # hessian can be negative. We take care that 75% of the hessian entries are + # positive. + hessian_warning = np.mean(hess_pointwise <= 0) > 0.25 + hess_pointwise = np.abs(hess_pointwise) + + if not self.base_loss.is_multiclass: + # gradient + if gradient_out is None: + grad = np.empty_like(coef, dtype=weights.dtype) + else: + grad = gradient_out + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + + # hessian + if hessian_out is None: + hess = np.empty(shape=(n_dof, n_dof), dtype=weights.dtype) + else: + hess = hessian_out + + if hessian_warning: + # Exit early without computing the hessian. + return grad, hess, hessian_warning + + # TODO: This "sandwich product", X' diag(W) X, is the main computational + # bottleneck for solvers. A dedicated Cython routine might improve it + # exploiting the symmetry (as opposed to, e.g., BLAS gemm). + if sparse.issparse(X): + hess[:n_features, :n_features] = ( + X.T + @ sparse.dia_matrix( + (hess_pointwise, 0), shape=(n_samples, n_samples) + ) + @ X + ).toarray() + else: + # np.einsum may use less memory but the following, using BLAS matrix + # multiplication (gemm), is by far faster. + WX = hess_pointwise[:, None] * X + hess[:n_features, :n_features] = np.dot(X.T, WX) + + if l2_reg_strength > 0: + # The L2 penalty enters the Hessian on the diagonal only. To add those + # terms, we use a flattened view on the array. + hess.reshape(-1)[ + : (n_features * n_dof) : (n_dof + 1) + ] += l2_reg_strength + + if self.fit_intercept: + # With intercept included as added column to X, the hessian becomes + # hess = (X, 1)' @ diag(h) @ (X, 1) + # = (X' @ diag(h) @ X, X' @ h) + # ( h @ X, sum(h)) + # The left upper part has already been filled, it remains to compute + # the last row and the last column. + Xh = X.T @ hess_pointwise + hess[:-1, -1] = Xh + hess[-1, :-1] = Xh + hess[-1, -1] = hess_pointwise.sum() + else: + # Here we may safely assume HalfMultinomialLoss aka categorical + # cross-entropy. + raise NotImplementedError + + return grad, hess, hessian_warning + + def gradient_hessian_product( + self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1 + ): + """Computes gradient and hessp (hessian product function) w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + + Returns + ------- + gradient : ndarray of shape coef.shape + The gradient of the loss. + + hessp : callable + Function that takes in a vector input of shape of gradient and + and returns matrix-vector product with hessian. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + + if not self.base_loss.is_multiclass: + grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + grad_pointwise /= sw_sum + hess_pointwise /= sw_sum + grad = np.empty_like(coef, dtype=weights.dtype) + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + + # Precompute as much as possible: hX, hX_sum and hessian_sum + hessian_sum = hess_pointwise.sum() + if sparse.issparse(X): + hX = ( + sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples)) + @ X + ) + else: + hX = hess_pointwise[:, np.newaxis] * X + + if self.fit_intercept: + # Calculate the double derivative with respect to intercept. + # Note: In case hX is sparse, hX.sum is a matrix object. + hX_sum = np.squeeze(np.asarray(hX.sum(axis=0))) + # prevent squeezing to zero-dim array if n_features == 1 + hX_sum = np.atleast_1d(hX_sum) + + # With intercept included and l2_reg_strength = 0, hessp returns + # res = (X, 1)' @ diag(h) @ (X, 1) @ s + # = (X, 1)' @ (hX @ s[:n_features], sum(h) * s[-1]) + # res[:n_features] = X' @ hX @ s[:n_features] + sum(h) * s[-1] + # res[-1] = 1' @ hX @ s[:n_features] + sum(h) * s[-1] + def hessp(s): + ret = np.empty_like(s) + if sparse.issparse(X): + ret[:n_features] = X.T @ (hX @ s[:n_features]) + else: + ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]]) + ret[:n_features] += l2_reg_strength * s[:n_features] + + if self.fit_intercept: + ret[:n_features] += s[-1] * hX_sum + ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1] + return ret + + else: + # Here we may safely assume HalfMultinomialLoss aka categorical + # cross-entropy. + # HalfMultinomialLoss computes only the diagonal part of the hessian, i.e. + # diagonal in the classes. Here, we want the matrix-vector product of the + # full hessian. Therefore, we call gradient_proba. + grad_pointwise, proba = self.base_loss.gradient_proba( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + grad_pointwise /= sw_sum + grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + + # Full hessian-vector product, i.e. not only the diagonal part of the + # hessian. Derivation with some index battle for input vector s: + # - sample index i + # - feature indices j, m + # - class indices k, l + # - 1_{k=l} is one if k=l else 0 + # - p_i_k is the (predicted) probability that sample i belongs to class k + # for all i: sum_k p_i_k = 1 + # - s_l_m is input vector for class l and feature m + # - X' = X transposed + # + # Note: Hessian with dropping most indices is just: + # X' @ p_k (1(k=l) - p_l) @ X + # + # result_{k j} = sum_{i, l, m} Hessian_{i, k j, m l} * s_l_m + # = sum_{i, l, m} (X')_{ji} * p_i_k * (1_{k=l} - p_i_l) + # * X_{im} s_l_m + # = sum_{i, m} (X')_{ji} * p_i_k + # * (X_{im} * s_k_m - sum_l p_i_l * X_{im} * s_l_m) + # + # See also https://github.com/scikit-learn/scikit-learn/pull/3646#discussion_r17461411 # noqa + def hessp(s): + s = s.reshape((n_classes, -1), order="F") # shape = (n_classes, n_dof) + if self.fit_intercept: + s_intercept = s[:, -1] + s = s[:, :-1] # shape = (n_classes, n_features) + else: + s_intercept = 0 + tmp = X @ s.T + s_intercept # X_{im} * s_k_m + tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis] # - sum_l .. + tmp *= proba # * p_i_k + if sample_weight is not None: + tmp *= sample_weight[:, np.newaxis] + # hess_prod = empty_like(grad), but we ravel grad below and this + # function is run after that. + hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + hess_prod[:, :n_features] = (tmp.T @ X) / sw_sum + l2_reg_strength * s + if self.fit_intercept: + hess_prod[:, -1] = tmp.sum(axis=0) / sw_sum + if coef.ndim == 1: + return hess_prod.ravel(order="F") + else: + return hess_prod + + if coef.ndim == 1: + return grad.ravel(order="F"), hessp + + return grad, hessp diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..b97550fa52e8c7f4bce1b38be01827e8c605af96 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py @@ -0,0 +1,229 @@ +# Author: Mathieu Blondel +# License: BSD 3 clause +from numbers import Real + +from ..utils._param_validation import Interval, StrOptions +from ._stochastic_gradient import BaseSGDClassifier + + +class Perceptron(BaseSGDClassifier): + """Linear perceptron classifier. + + The implementation is a wrapper around :class:`~sklearn.linear_model.SGDClassifier` + by fixing the `loss` and `learning_rate` parameters as:: + + SGDClassifier(loss="perceptron", learning_rate="constant") + + Other available parameters are described below and are forwarded to + :class:`~sklearn.linear_model.SGDClassifier`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + penalty : {'l2','l1','elasticnet'}, default=None + The penalty (aka regularization term) to be used. + + alpha : float, default=0.0001 + Constant that multiplies the regularization term if regularization is + used. + + l1_ratio : float, default=0.15 + The Elastic Net mixing parameter, with `0 <= l1_ratio <= 1`. + `l1_ratio=0` corresponds to L2 penalty, `l1_ratio=1` to L1. + Only used if `penalty='elasticnet'`. + + .. versionadded:: 0.24 + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`partial_fit` method. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). + + .. versionadded:: 0.19 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + + eta0 : float, default=1 + Constant by which the updates are multiplied. + + n_jobs : int, default=None + The number of CPUs to use to do the OVA (One Versus All, for + multi-class problems) computation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance or None, default=0 + Used to shuffle the training data, when ``shuffle`` is set to + ``True``. Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to True, it will automatically set aside + a stratified fraction of training data as validation and terminate + training when validation score is not improving by at least `tol` for + `n_iter_no_change` consecutive epochs. + + .. versionadded:: 0.20 + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if early_stopping is True. + + .. versionadded:: 0.20 + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before early stopping. + + .. versionadded:: 0.20 + + class_weight : dict, {class_label: weight} or "balanced", default=None + Preset for the class_weight fit parameter. + + Weights associated with classes. If not given, all classes + are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. See + :term:`the Glossary `. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The unique classes labels. + + coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ + (n_classes, n_features) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) + Constants in decision function. + + loss_function_ : concrete LossFunction + The function that determines the loss, or difference between the + output of the algorithm and the target values. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + For multiclass fits, it is the maximum over every binary fit. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + See Also + -------- + sklearn.linear_model.SGDClassifier : Linear classifiers + (SVM, logistic regression, etc.) with SGD training. + + Notes + ----- + ``Perceptron`` is a classification algorithm which shares the same + underlying implementation with ``SGDClassifier``. In fact, + ``Perceptron()`` is equivalent to `SGDClassifier(loss="perceptron", + eta0=1, learning_rate="constant", penalty=None)`. + + References + ---------- + https://en.wikipedia.org/wiki/Perceptron and references therein. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.linear_model import Perceptron + >>> X, y = load_digits(return_X_y=True) + >>> clf = Perceptron(tol=1e-3, random_state=0) + >>> clf.fit(X, y) + Perceptron() + >>> clf.score(X, y) + 0.939... + """ + + _parameter_constraints: dict = {**BaseSGDClassifier._parameter_constraints} + _parameter_constraints.pop("loss") + _parameter_constraints.pop("average") + _parameter_constraints.update( + { + "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "eta0": [Interval(Real, 0, None, closed="left")], + } + ) + + def __init__( + self, + *, + penalty=None, + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + eta0=1.0, + n_jobs=None, + random_state=0, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + class_weight=None, + warm_start=False, + ): + super().__init__( + loss="perceptron", + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + random_state=random_state, + learning_rate="constant", + eta0=eta0, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + power_t=0.5, + warm_start=warm_start, + class_weight=class_weight, + n_jobs=n_jobs, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py new file mode 100644 index 0000000000000000000000000000000000000000..33451d8640bffd2f32310b7d7e26b3eb7ae130f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py @@ -0,0 +1,308 @@ +# Authors: David Dale +# Christian Lorentzen +# License: BSD 3 clause +import warnings +from numbers import Real + +import numpy as np +from scipy import sparse +from scipy.optimize import linprog + +from ..base import BaseEstimator, RegressorMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..utils import _safe_indexing +from ..utils._param_validation import Interval, StrOptions +from ..utils.fixes import parse_version, sp_version +from ..utils.validation import _check_sample_weight +from ._base import LinearModel + + +class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator): + """Linear regression model that predicts conditional quantiles. + + The linear :class:`QuantileRegressor` optimizes the pinball loss for a + desired `quantile` and is robust to outliers. + + This model uses an L1 regularization like + :class:`~sklearn.linear_model.Lasso`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + quantile : float, default=0.5 + The quantile that the model tries to predict. It must be strictly + between 0 and 1. If 0.5 (default), the model predicts the 50% + quantile, i.e. the median. + + alpha : float, default=1.0 + Regularization constant that multiplies the L1 penalty term. + + fit_intercept : bool, default=True + Whether or not to fit the intercept. + + solver : {'highs-ds', 'highs-ipm', 'highs', 'interior-point', \ + 'revised simplex'}, default='highs' + Method used by :func:`scipy.optimize.linprog` to solve the linear + programming formulation. + + From `scipy>=1.6.0`, it is recommended to use the highs methods because + they are the fastest ones. Solvers "highs-ds", "highs-ipm" and "highs" + support sparse input data and, in fact, always convert to sparse csc. + + From `scipy>=1.11.0`, "interior-point" is not available anymore. + + .. versionchanged:: 1.4 + The default of `solver` changed to `"highs"` in version 1.4. + + solver_options : dict, default=None + Additional parameters passed to :func:`scipy.optimize.linprog` as + options. If `None` and if `solver='interior-point'`, then + `{"lstsq": True}` is passed to :func:`scipy.optimize.linprog` for the + sake of stability. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the features. + + intercept_ : float + The intercept of the model, aka bias term. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations performed by the solver. + + See Also + -------- + Lasso : The Lasso is a linear model that estimates sparse coefficients + with l1 regularization. + HuberRegressor : Linear regression model that is robust to outliers. + + Examples + -------- + >>> from sklearn.linear_model import QuantileRegressor + >>> import numpy as np + >>> n_samples, n_features = 10, 2 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> # the two following lines are optional in practice + >>> from sklearn.utils.fixes import sp_version, parse_version + >>> solver = "highs" if sp_version >= parse_version("1.6.0") else "interior-point" + >>> reg = QuantileRegressor(quantile=0.8, solver=solver).fit(X, y) + >>> np.mean(y <= reg.predict(X)) + 0.8 + """ + + _parameter_constraints: dict = { + "quantile": [Interval(Real, 0, 1, closed="neither")], + "alpha": [Interval(Real, 0, None, closed="left")], + "fit_intercept": ["boolean"], + "solver": [ + StrOptions( + { + "highs-ds", + "highs-ipm", + "highs", + "interior-point", + "revised simplex", + } + ), + ], + "solver_options": [dict, None], + } + + def __init__( + self, + *, + quantile=0.5, + alpha=1.0, + fit_intercept=True, + solver="highs", + solver_options=None, + ): + self.quantile = quantile + self.alpha = alpha + self.fit_intercept = fit_intercept + self.solver = solver + self.solver_options = solver_options + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Returns self. + """ + X, y = self._validate_data( + X, + y, + accept_sparse=["csc", "csr", "coo"], + y_numeric=True, + multi_output=False, + ) + sample_weight = _check_sample_weight(sample_weight, X) + + n_features = X.shape[1] + n_params = n_features + + if self.fit_intercept: + n_params += 1 + # Note that centering y and X with _preprocess_data does not work + # for quantile regression. + + # The objective is defined as 1/n * sum(pinball loss) + alpha * L1. + # So we rescale the penalty term, which is equivalent. + alpha = np.sum(sample_weight) * self.alpha + + if self.solver in ( + "highs-ds", + "highs-ipm", + "highs", + ) and sp_version < parse_version("1.6.0"): + raise ValueError( + f"Solver {self.solver} is only available " + f"with scipy>=1.6.0, got {sp_version}" + ) + else: + solver = self.solver + + if solver == "interior-point" and sp_version >= parse_version("1.11.0"): + raise ValueError( + f"Solver {solver} is not anymore available in SciPy >= 1.11.0." + ) + + if sparse.issparse(X) and solver not in ["highs", "highs-ds", "highs-ipm"]: + raise ValueError( + f"Solver {self.solver} does not support sparse X. " + "Use solver 'highs' for example." + ) + # make default solver more stable + if self.solver_options is None and solver == "interior-point": + solver_options = {"lstsq": True} + else: + solver_options = self.solver_options + + # After rescaling alpha, the minimization problem is + # min sum(pinball loss) + alpha * L1 + # Use linear programming formulation of quantile regression + # min_x c x + # A_eq x = b_eq + # 0 <= x + # x = (s0, s, t0, t, u, v) = slack variables >= 0 + # intercept = s0 - t0 + # coef = s - t + # c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n) + # residual = y - X@coef - intercept = u - v + # A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n)) + # b_eq = y + # p = n_features + # n = n_samples + # 1_n = vector of length n with entries equal one + # see https://stats.stackexchange.com/questions/384909/ + # + # Filtering out zero sample weights from the beginning makes life + # easier for the linprog solver. + indices = np.nonzero(sample_weight)[0] + n_indices = len(indices) # use n_mask instead of n_samples + if n_indices < len(sample_weight): + sample_weight = sample_weight[indices] + X = _safe_indexing(X, indices) + y = _safe_indexing(y, indices) + c = np.concatenate( + [ + np.full(2 * n_params, fill_value=alpha), + sample_weight * self.quantile, + sample_weight * (1 - self.quantile), + ] + ) + if self.fit_intercept: + # do not penalize the intercept + c[0] = 0 + c[n_params] = 0 + + if solver in ["highs", "highs-ds", "highs-ipm"]: + # Note that highs methods always use a sparse CSC memory layout internally, + # even for optimization problems parametrized using dense numpy arrays. + # Therefore, we work with CSC matrices as early as possible to limit + # unnecessary repeated memory copies. + eye = sparse.eye(n_indices, dtype=X.dtype, format="csc") + if self.fit_intercept: + ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype)) + A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc") + else: + A_eq = sparse.hstack([X, -X, eye, -eye], format="csc") + else: + eye = np.eye(n_indices) + if self.fit_intercept: + ones = np.ones((n_indices, 1)) + A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1) + else: + A_eq = np.concatenate([X, -X, eye, -eye], axis=1) + + b_eq = y + + result = linprog( + c=c, + A_eq=A_eq, + b_eq=b_eq, + method=solver, + options=solver_options, + ) + solution = result.x + if not result.success: + failure = { + 1: "Iteration limit reached.", + 2: "Problem appears to be infeasible.", + 3: "Problem appears to be unbounded.", + 4: "Numerical difficulties encountered.", + } + warnings.warn( + "Linear programming for QuantileRegressor did not succeed.\n" + f"Status is {result.status}: " + + failure.setdefault(result.status, "unknown reason") + + "\n" + + "Result message of linprog:\n" + + result.message, + ConvergenceWarning, + ) + + # positive slack - negative slack + # solution is an array with (params_pos, params_neg, u, v) + params = solution[:n_params] - solution[n_params : 2 * n_params] + + self.n_iter_ = result.nit + + if self.fit_intercept: + self.coef_ = params[1:] + self.intercept_ = params[0] + else: + self.coef_ = params + self.intercept_ = 0.0 + return self diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py new file mode 100644 index 0000000000000000000000000000000000000000..b2c25607f91c07b327315825d64fbbee41c359d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py @@ -0,0 +1,623 @@ +# Author: Johannes Schönberger +# +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + BaseEstimator, + MetaEstimatorMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, + clone, +) +from ..exceptions import ConvergenceWarning +from ..utils import check_consistent_length, check_random_state +from ..utils._param_validation import ( + HasMethods, + Interval, + Options, + RealNotInt, + StrOptions, +) +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.random import sample_without_replacement +from ..utils.validation import _check_sample_weight, check_is_fitted, has_fit_parameter +from ._base import LinearRegression + +_EPSILON = np.spacing(1) + + +def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability): + """Determine number trials such that at least one outlier-free subset is + sampled for the given inlier/outlier ratio. + + Parameters + ---------- + n_inliers : int + Number of inliers in the data. + + n_samples : int + Total number of samples in the data. + + min_samples : int + Minimum number of samples chosen randomly from original data. + + probability : float + Probability (confidence) that one outlier-free sample is generated. + + Returns + ------- + trials : int + Number of trials. + + """ + inlier_ratio = n_inliers / float(n_samples) + nom = max(_EPSILON, 1 - probability) + denom = max(_EPSILON, 1 - inlier_ratio**min_samples) + if nom == 1: + return 0 + if denom == 1: + return float("inf") + return abs(float(np.ceil(np.log(nom) / np.log(denom)))) + + +class RANSACRegressor( + _RoutingNotSupportedMixin, + MetaEstimatorMixin, + RegressorMixin, + MultiOutputMixin, + BaseEstimator, +): + """RANSAC (RANdom SAmple Consensus) algorithm. + + RANSAC is an iterative algorithm for the robust estimation of parameters + from a subset of inliers from the complete data set. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object, default=None + Base estimator object which implements the following methods: + + * `fit(X, y)`: Fit model to given training data and target values. + * `score(X, y)`: Returns the mean accuracy on the given test data, + which is used for the stop criterion defined by `stop_score`. + Additionally, the score is used to decide which of two equally + large consensus sets is chosen as the better one. + * `predict(X)`: Returns predicted values using the linear model, + which is used to compute residual error using loss function. + + If `estimator` is None, then + :class:`~sklearn.linear_model.LinearRegression` is used for + target values of dtype float. + + Note that the current implementation only supports regression + estimators. + + min_samples : int (>= 1) or float ([0, 1]), default=None + Minimum number of samples chosen randomly from original data. Treated + as an absolute number of samples for `min_samples >= 1`, treated as a + relative number `ceil(min_samples * X.shape[0])` for + `min_samples < 1`. This is typically chosen as the minimal number of + samples necessary to estimate the given `estimator`. By default a + :class:`~sklearn.linear_model.LinearRegression` estimator is assumed and + `min_samples` is chosen as ``X.shape[1] + 1``. This parameter is highly + dependent upon the model, so if a `estimator` other than + :class:`~sklearn.linear_model.LinearRegression` is used, the user must + provide a value. + + residual_threshold : float, default=None + Maximum residual for a data sample to be classified as an inlier. + By default the threshold is chosen as the MAD (median absolute + deviation) of the target values `y`. Points whose residuals are + strictly equal to the threshold are considered as inliers. + + is_data_valid : callable, default=None + This function is called with the randomly selected data before the + model is fitted to it: `is_data_valid(X, y)`. If its return value is + False the current randomly chosen sub-sample is skipped. + + is_model_valid : callable, default=None + This function is called with the estimated model and the randomly + selected data: `is_model_valid(model, X, y)`. If its return value is + False the current randomly chosen sub-sample is skipped. + Rejecting samples with this function is computationally costlier than + with `is_data_valid`. `is_model_valid` should therefore only be used if + the estimated model is needed for making the rejection decision. + + max_trials : int, default=100 + Maximum number of iterations for random sample selection. + + max_skips : int, default=np.inf + Maximum number of iterations that can be skipped due to finding zero + inliers or invalid data defined by ``is_data_valid`` or invalid models + defined by ``is_model_valid``. + + .. versionadded:: 0.19 + + stop_n_inliers : int, default=np.inf + Stop iteration if at least this number of inliers are found. + + stop_score : float, default=np.inf + Stop iteration if score is greater equal than this threshold. + + stop_probability : float in range [0, 1], default=0.99 + RANSAC iteration stops if at least one outlier-free set of the training + data is sampled in RANSAC. This requires to generate at least N + samples (iterations):: + + N >= log(1 - probability) / log(1 - e**m) + + where the probability (confidence) is typically set to high value such + as 0.99 (the default) and e is the current fraction of inliers w.r.t. + the total number of samples. + + loss : str, callable, default='absolute_error' + String inputs, 'absolute_error' and 'squared_error' are supported which + find the absolute error and squared error per sample respectively. + + If ``loss`` is a callable, then it should be a function that takes + two arrays as inputs, the true and predicted value and returns a 1-D + array with the i-th value of the array corresponding to the loss + on ``X[i]``. + + If the loss on a sample is greater than the ``residual_threshold``, + then this sample is classified as an outlier. + + .. versionadded:: 0.18 + + random_state : int, RandomState instance, default=None + The generator used to initialize the centers. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + estimator_ : object + Best fitted model (copy of the `estimator` object). + + n_trials_ : int + Number of random selection trials until one of the stop criteria is + met. It is always ``<= max_trials``. + + inlier_mask_ : bool array of shape [n_samples] + Boolean mask of inliers classified as ``True``. + + n_skips_no_inliers_ : int + Number of iterations skipped due to finding zero inliers. + + .. versionadded:: 0.19 + + n_skips_invalid_data_ : int + Number of iterations skipped due to invalid data defined by + ``is_data_valid``. + + .. versionadded:: 0.19 + + n_skips_invalid_model_ : int + Number of iterations skipped due to an invalid model defined by + ``is_model_valid``. + + .. versionadded:: 0.19 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + HuberRegressor : Linear regression model that is robust to outliers. + TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model. + SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/RANSAC + .. [2] https://www.sri.com/wp-content/uploads/2021/12/ransac-publication.pdf + .. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf + + Examples + -------- + >>> from sklearn.linear_model import RANSACRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression( + ... n_samples=200, n_features=2, noise=4.0, random_state=0) + >>> reg = RANSACRegressor(random_state=0).fit(X, y) + >>> reg.score(X, y) + 0.9885... + >>> reg.predict(X[:1,]) + array([-31.9417...]) + """ # noqa: E501 + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit", "score", "predict"]), None], + "min_samples": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + None, + ], + "residual_threshold": [Interval(Real, 0, None, closed="left"), None], + "is_data_valid": [callable, None], + "is_model_valid": [callable, None], + "max_trials": [ + Interval(Integral, 0, None, closed="left"), + Options(Real, {np.inf}), + ], + "max_skips": [ + Interval(Integral, 0, None, closed="left"), + Options(Real, {np.inf}), + ], + "stop_n_inliers": [ + Interval(Integral, 0, None, closed="left"), + Options(Real, {np.inf}), + ], + "stop_score": [Interval(Real, None, None, closed="both")], + "stop_probability": [Interval(Real, 0, 1, closed="both")], + "loss": [StrOptions({"absolute_error", "squared_error"}), callable], + "random_state": ["random_state"], + } + + def __init__( + self, + estimator=None, + *, + min_samples=None, + residual_threshold=None, + is_data_valid=None, + is_model_valid=None, + max_trials=100, + max_skips=np.inf, + stop_n_inliers=np.inf, + stop_score=np.inf, + stop_probability=0.99, + loss="absolute_error", + random_state=None, + ): + self.estimator = estimator + self.min_samples = min_samples + self.residual_threshold = residual_threshold + self.is_data_valid = is_data_valid + self.is_model_valid = is_model_valid + self.max_trials = max_trials + self.max_skips = max_skips + self.stop_n_inliers = stop_n_inliers + self.stop_score = stop_score + self.stop_probability = stop_probability + self.random_state = random_state + self.loss = loss + + @_fit_context( + # RansacRegressor.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None): + """Fit estimator using RANSAC algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Individual weights for each sample + raises error if sample_weight is passed and estimator + fit method does not support it. + + .. versionadded:: 0.18 + + Returns + ------- + self : object + Fitted `RANSACRegressor` estimator. + + Raises + ------ + ValueError + If no valid consensus set could be found. This occurs if + `is_data_valid` and `is_model_valid` return False for all + `max_trials` randomly chosen sub-samples. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + # Need to validate separately here. We can't pass multi_output=True + # because that would allow y to be csr. Delay expensive finiteness + # check to the estimator's own input validation. + check_X_params = dict(accept_sparse="csr", force_all_finite=False) + check_y_params = dict(ensure_2d=False) + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + check_consistent_length(X, y) + + if self.estimator is not None: + estimator = clone(self.estimator) + else: + estimator = LinearRegression() + + if self.min_samples is None: + if not isinstance(estimator, LinearRegression): + raise ValueError( + "`min_samples` needs to be explicitly set when estimator " + "is not a LinearRegression." + ) + min_samples = X.shape[1] + 1 + elif 0 < self.min_samples < 1: + min_samples = np.ceil(self.min_samples * X.shape[0]) + elif self.min_samples >= 1: + min_samples = self.min_samples + if min_samples > X.shape[0]: + raise ValueError( + "`min_samples` may not be larger than number " + "of samples: n_samples = %d." % (X.shape[0]) + ) + + if self.residual_threshold is None: + # MAD (median absolute deviation) + residual_threshold = np.median(np.abs(y - np.median(y))) + else: + residual_threshold = self.residual_threshold + + if self.loss == "absolute_error": + if y.ndim == 1: + loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred) + else: + loss_function = lambda y_true, y_pred: np.sum( + np.abs(y_true - y_pred), axis=1 + ) + elif self.loss == "squared_error": + if y.ndim == 1: + loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2 + else: + loss_function = lambda y_true, y_pred: np.sum( + (y_true - y_pred) ** 2, axis=1 + ) + + elif callable(self.loss): + loss_function = self.loss + + random_state = check_random_state(self.random_state) + + try: # Not all estimator accept a random_state + estimator.set_params(random_state=random_state) + except ValueError: + pass + + estimator_fit_has_sample_weight = has_fit_parameter(estimator, "sample_weight") + estimator_name = type(estimator).__name__ + if sample_weight is not None and not estimator_fit_has_sample_weight: + raise ValueError( + "%s does not support sample_weight. Samples" + " weights are only used for the calibration" + " itself." % estimator_name + ) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + n_inliers_best = 1 + score_best = -np.inf + inlier_mask_best = None + X_inlier_best = None + y_inlier_best = None + inlier_best_idxs_subset = None + self.n_skips_no_inliers_ = 0 + self.n_skips_invalid_data_ = 0 + self.n_skips_invalid_model_ = 0 + + # number of data samples + n_samples = X.shape[0] + sample_idxs = np.arange(n_samples) + + self.n_trials_ = 0 + max_trials = self.max_trials + while self.n_trials_ < max_trials: + self.n_trials_ += 1 + + if ( + self.n_skips_no_inliers_ + + self.n_skips_invalid_data_ + + self.n_skips_invalid_model_ + ) > self.max_skips: + break + + # choose random sample set + subset_idxs = sample_without_replacement( + n_samples, min_samples, random_state=random_state + ) + X_subset = X[subset_idxs] + y_subset = y[subset_idxs] + + # check if random sample set is valid + if self.is_data_valid is not None and not self.is_data_valid( + X_subset, y_subset + ): + self.n_skips_invalid_data_ += 1 + continue + + # fit model for current random sample set + if sample_weight is None: + estimator.fit(X_subset, y_subset) + else: + estimator.fit( + X_subset, y_subset, sample_weight=sample_weight[subset_idxs] + ) + + # check if estimated model is valid + if self.is_model_valid is not None and not self.is_model_valid( + estimator, X_subset, y_subset + ): + self.n_skips_invalid_model_ += 1 + continue + + # residuals of all data for current random sample model + y_pred = estimator.predict(X) + residuals_subset = loss_function(y, y_pred) + + # classify data into inliers and outliers + inlier_mask_subset = residuals_subset <= residual_threshold + n_inliers_subset = np.sum(inlier_mask_subset) + + # less inliers -> skip current random sample + if n_inliers_subset < n_inliers_best: + self.n_skips_no_inliers_ += 1 + continue + + # extract inlier data set + inlier_idxs_subset = sample_idxs[inlier_mask_subset] + X_inlier_subset = X[inlier_idxs_subset] + y_inlier_subset = y[inlier_idxs_subset] + + # score of inlier data set + score_subset = estimator.score(X_inlier_subset, y_inlier_subset) + + # same number of inliers but worse score -> skip current random + # sample + if n_inliers_subset == n_inliers_best and score_subset < score_best: + continue + + # save current random sample as best sample + n_inliers_best = n_inliers_subset + score_best = score_subset + inlier_mask_best = inlier_mask_subset + X_inlier_best = X_inlier_subset + y_inlier_best = y_inlier_subset + inlier_best_idxs_subset = inlier_idxs_subset + + max_trials = min( + max_trials, + _dynamic_max_trials( + n_inliers_best, n_samples, min_samples, self.stop_probability + ), + ) + + # break if sufficient number of inliers or score is reached + if n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score: + break + + # if none of the iterations met the required criteria + if inlier_mask_best is None: + if ( + self.n_skips_no_inliers_ + + self.n_skips_invalid_data_ + + self.n_skips_invalid_model_ + ) > self.max_skips: + raise ValueError( + "RANSAC skipped more iterations than `max_skips` without" + " finding a valid consensus set. Iterations were skipped" + " because each randomly chosen sub-sample failed the" + " passing criteria. See estimator attributes for" + " diagnostics (n_skips*)." + ) + else: + raise ValueError( + "RANSAC could not find a valid consensus set. All" + " `max_trials` iterations were skipped because each" + " randomly chosen sub-sample failed the passing criteria." + " See estimator attributes for diagnostics (n_skips*)." + ) + else: + if ( + self.n_skips_no_inliers_ + + self.n_skips_invalid_data_ + + self.n_skips_invalid_model_ + ) > self.max_skips: + warnings.warn( + ( + "RANSAC found a valid consensus set but exited" + " early due to skipping more iterations than" + " `max_skips`. See estimator attributes for" + " diagnostics (n_skips*)." + ), + ConvergenceWarning, + ) + + # estimate final model using all inliers + if sample_weight is None: + estimator.fit(X_inlier_best, y_inlier_best) + else: + estimator.fit( + X_inlier_best, + y_inlier_best, + sample_weight=sample_weight[inlier_best_idxs_subset], + ) + + self.estimator_ = estimator + self.inlier_mask_ = inlier_mask_best + return self + + def predict(self, X): + """Predict using the estimated model. + + This is a wrapper for `estimator_.predict(X)`. + + Parameters + ---------- + X : {array-like or sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + y : array, shape = [n_samples] or [n_samples, n_targets] + Returns predicted values. + """ + check_is_fitted(self) + X = self._validate_data( + X, + force_all_finite=False, + accept_sparse=True, + reset=False, + ) + return self.estimator_.predict(X) + + def score(self, X, y): + """Return the score of the prediction. + + This is a wrapper for `estimator_.score(X, y)`. + + Parameters + ---------- + X : (array-like or sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Returns + ------- + z : float + Score of the prediction. + """ + check_is_fitted(self) + X = self._validate_data( + X, + force_all_finite=False, + accept_sparse=True, + reset=False, + ) + return self.estimator_.score(X, y) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py new file mode 100644 index 0000000000000000000000000000000000000000..84646f5aaf130a4252a6a3c300e4cf69f8779c55 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py @@ -0,0 +1,2612 @@ +""" +Ridge regression +""" + +# Author: Mathieu Blondel +# Reuben Fletcher-Costin +# Fabian Pedregosa +# Michael Eickenberg +# License: BSD 3 clause + + +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from functools import partial +from numbers import Integral, Real + +import numpy as np +from scipy import linalg, optimize, sparse +from scipy.sparse import linalg as sp_linalg + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context, is_classifier +from ..exceptions import ConvergenceWarning +from ..metrics import check_scoring, get_scorer_names +from ..model_selection import GridSearchCV +from ..preprocessing import LabelBinarizer +from ..utils import ( + check_array, + check_consistent_length, + check_scalar, + column_or_1d, + compute_sample_weight, +) +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import row_norms, safe_sparse_dot +from ..utils.fixes import _sparse_linalg_cg +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._base import LinearClassifierMixin, LinearModel, _preprocess_data, _rescale_data +from ._sag import sag_solver + + +def _get_rescaled_operator(X, X_offset, sample_weight_sqrt): + """Create LinearOperator for matrix products with implicit centering. + + Matrix product `LinearOperator @ coef` returns `(X - X_offset) @ coef`. + """ + + def matvec(b): + return X.dot(b) - sample_weight_sqrt * b.dot(X_offset) + + def rmatvec(b): + return X.T.dot(b) - X_offset * b.dot(sample_weight_sqrt) + + X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec) + return X1 + + +def _solve_sparse_cg( + X, + y, + alpha, + max_iter=None, + tol=1e-4, + verbose=0, + X_offset=None, + X_scale=None, + sample_weight_sqrt=None, +): + if sample_weight_sqrt is None: + sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype) + + n_samples, n_features = X.shape + + if X_offset is None or X_scale is None: + X1 = sp_linalg.aslinearoperator(X) + else: + X_offset_scale = X_offset / X_scale + X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt) + + coefs = np.empty((y.shape[1], n_features), dtype=X.dtype) + + if n_features > n_samples: + + def create_mv(curr_alpha): + def _mv(x): + return X1.matvec(X1.rmatvec(x)) + curr_alpha * x + + return _mv + + else: + + def create_mv(curr_alpha): + def _mv(x): + return X1.rmatvec(X1.matvec(x)) + curr_alpha * x + + return _mv + + for i in range(y.shape[1]): + y_column = y[:, i] + + mv = create_mv(alpha[i]) + if n_features > n_samples: + # kernel ridge + # w = X.T * inv(X X^t + alpha*Id) y + C = sp_linalg.LinearOperator( + (n_samples, n_samples), matvec=mv, dtype=X.dtype + ) + coef, info = _sparse_linalg_cg(C, y_column, rtol=tol) + coefs[i] = X1.rmatvec(coef) + else: + # linear ridge + # w = inv(X^t X + alpha*Id) * X.T y + y_column = X1.rmatvec(y_column) + C = sp_linalg.LinearOperator( + (n_features, n_features), matvec=mv, dtype=X.dtype + ) + coefs[i], info = _sparse_linalg_cg(C, y_column, maxiter=max_iter, rtol=tol) + + if info < 0: + raise ValueError("Failed with error code %d" % info) + + if max_iter is None and info > 0 and verbose: + warnings.warn( + "sparse_cg did not converge after %d iterations." % info, + ConvergenceWarning, + ) + + return coefs + + +def _solve_lsqr( + X, + y, + *, + alpha, + fit_intercept=True, + max_iter=None, + tol=1e-4, + X_offset=None, + X_scale=None, + sample_weight_sqrt=None, +): + """Solve Ridge regression via LSQR. + + We expect that y is always mean centered. + If X is dense, we expect it to be mean centered such that we can solve + ||y - Xw||_2^2 + alpha * ||w||_2^2 + + If X is sparse, we expect X_offset to be given such that we can solve + ||y - (X - X_offset)w||_2^2 + alpha * ||w||_2^2 + + With sample weights S=diag(sample_weight), this becomes + ||sqrt(S) (y - (X - X_offset) w)||_2^2 + alpha * ||w||_2^2 + and we expect y and X to already be rescaled, i.e. sqrt(S) @ y, sqrt(S) @ X. In + this case, X_offset is the sample_weight weighted mean of X before scaling by + sqrt(S). The objective then reads + ||y - (X - sqrt(S) X_offset) w)||_2^2 + alpha * ||w||_2^2 + """ + if sample_weight_sqrt is None: + sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype) + + if sparse.issparse(X) and fit_intercept: + X_offset_scale = X_offset / X_scale + X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt) + else: + # No need to touch anything + X1 = X + + n_samples, n_features = X.shape + coefs = np.empty((y.shape[1], n_features), dtype=X.dtype) + n_iter = np.empty(y.shape[1], dtype=np.int32) + + # According to the lsqr documentation, alpha = damp^2. + sqrt_alpha = np.sqrt(alpha) + + for i in range(y.shape[1]): + y_column = y[:, i] + info = sp_linalg.lsqr( + X1, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter + ) + coefs[i] = info[0] + n_iter[i] = info[2] + + return coefs, n_iter + + +def _solve_cholesky(X, y, alpha): + # w = inv(X^t X + alpha*Id) * X.T y + n_features = X.shape[1] + n_targets = y.shape[1] + + A = safe_sparse_dot(X.T, X, dense_output=True) + Xy = safe_sparse_dot(X.T, y, dense_output=True) + + one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]]) + + if one_alpha: + A.flat[:: n_features + 1] += alpha[0] + return linalg.solve(A, Xy, assume_a="pos", overwrite_a=True).T + else: + coefs = np.empty([n_targets, n_features], dtype=X.dtype) + for coef, target, current_alpha in zip(coefs, Xy.T, alpha): + A.flat[:: n_features + 1] += current_alpha + coef[:] = linalg.solve(A, target, assume_a="pos", overwrite_a=False).ravel() + A.flat[:: n_features + 1] -= current_alpha + return coefs + + +def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False): + # dual_coef = inv(X X^t + alpha*Id) y + n_samples = K.shape[0] + n_targets = y.shape[1] + + if copy: + K = K.copy() + + alpha = np.atleast_1d(alpha) + one_alpha = (alpha == alpha[0]).all() + has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None] + + if has_sw: + # Unlike other solvers, we need to support sample_weight directly + # because K might be a pre-computed kernel. + sw = np.sqrt(np.atleast_1d(sample_weight)) + y = y * sw[:, np.newaxis] + K *= np.outer(sw, sw) + + if one_alpha: + # Only one penalty, we can solve multi-target problems in one time. + K.flat[:: n_samples + 1] += alpha[0] + + try: + # Note: we must use overwrite_a=False in order to be able to + # use the fall-back solution below in case a LinAlgError + # is raised + dual_coef = linalg.solve(K, y, assume_a="pos", overwrite_a=False) + except np.linalg.LinAlgError: + warnings.warn( + "Singular matrix in solving dual problem. Using " + "least-squares solution instead." + ) + dual_coef = linalg.lstsq(K, y)[0] + + # K is expensive to compute and store in memory so change it back in + # case it was user-given. + K.flat[:: n_samples + 1] -= alpha[0] + + if has_sw: + dual_coef *= sw[:, np.newaxis] + + return dual_coef + else: + # One penalty per target. We need to solve each target separately. + dual_coefs = np.empty([n_targets, n_samples], K.dtype) + + for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha): + K.flat[:: n_samples + 1] += current_alpha + + dual_coef[:] = linalg.solve( + K, target, assume_a="pos", overwrite_a=False + ).ravel() + + K.flat[:: n_samples + 1] -= current_alpha + + if has_sw: + dual_coefs *= sw[np.newaxis, :] + + return dual_coefs.T + + +def _solve_svd(X, y, alpha): + U, s, Vt = linalg.svd(X, full_matrices=False) + idx = s > 1e-15 # same default value as scipy.linalg.pinv + s_nnz = s[idx][:, np.newaxis] + UTy = np.dot(U.T, y) + d = np.zeros((s.size, alpha.size), dtype=X.dtype) + d[idx] = s_nnz / (s_nnz**2 + alpha) + d_UT_y = d * UTy + return np.dot(Vt.T, d_UT_y).T + + +def _solve_lbfgs( + X, + y, + alpha, + positive=True, + max_iter=None, + tol=1e-4, + X_offset=None, + X_scale=None, + sample_weight_sqrt=None, +): + """Solve ridge regression with LBFGS. + + The main purpose is fitting with forcing coefficients to be positive. + For unconstrained ridge regression, there are faster dedicated solver methods. + Note that with positive bounds on the coefficients, LBFGS seems faster + than scipy.optimize.lsq_linear. + """ + n_samples, n_features = X.shape + + options = {} + if max_iter is not None: + options["maxiter"] = max_iter + config = { + "method": "L-BFGS-B", + "tol": tol, + "jac": True, + "options": options, + } + if positive: + config["bounds"] = [(0, np.inf)] * n_features + + if X_offset is not None and X_scale is not None: + X_offset_scale = X_offset / X_scale + else: + X_offset_scale = None + + if sample_weight_sqrt is None: + sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype) + + coefs = np.empty((y.shape[1], n_features), dtype=X.dtype) + + for i in range(y.shape[1]): + x0 = np.zeros((n_features,)) + y_column = y[:, i] + + def func(w): + residual = X.dot(w) - y_column + if X_offset_scale is not None: + residual -= sample_weight_sqrt * w.dot(X_offset_scale) + f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w) + grad = X.T @ residual + alpha[i] * w + if X_offset_scale is not None: + grad -= X_offset_scale * residual.dot(sample_weight_sqrt) + + return f, grad + + result = optimize.minimize(func, x0, **config) + if not result["success"]: + warnings.warn( + ( + "The lbfgs solver did not converge. Try increasing max_iter " + f"or tol. Currently: max_iter={max_iter} and tol={tol}" + ), + ConvergenceWarning, + ) + coefs[i] = result["x"] + + return coefs + + +def _get_valid_accept_sparse(is_X_sparse, solver): + if is_X_sparse and solver in ["auto", "sag", "saga"]: + return "csr" + else: + return ["csr", "csc", "coo"] + + +@validate_params( + { + "X": ["array-like", "sparse matrix", sp_linalg.LinearOperator], + "y": ["array-like"], + "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], + "sample_weight": [ + Interval(Real, None, None, closed="neither"), + "array-like", + None, + ], + "solver": [ + StrOptions( + {"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"} + ) + ], + "max_iter": [Interval(Integral, 0, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left")], + "verbose": ["verbose"], + "positive": ["boolean"], + "random_state": ["random_state"], + "return_n_iter": ["boolean"], + "return_intercept": ["boolean"], + "check_input": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def ridge_regression( + X, + y, + alpha, + *, + sample_weight=None, + solver="auto", + max_iter=None, + tol=1e-4, + verbose=0, + positive=False, + random_state=None, + return_n_iter=False, + return_intercept=False, + check_input=True, +): + """Solve the ridge equation by the method of normal equations. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix, LinearOperator} of shape \ + (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + alpha : float or array-like of shape (n_targets,) + Constant that multiplies the L2 term, controlling regularization + strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. + + When `alpha = 0`, the objective is equivalent to ordinary least + squares, solved by the :class:`LinearRegression` object. For numerical + reasons, using `alpha = 0` with the `Ridge` object is not advised. + Instead, you should use the :class:`LinearRegression` object. + + If an array is passed, penalties are assumed to be specific to the + targets. Hence they must correspond in number. + + sample_weight : float or array-like of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. If sample_weight is not None and + solver='auto', the solver will be set to 'cholesky'. + + .. versionadded:: 0.17 + + solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \ + 'sag', 'saga', 'lbfgs'}, default='auto' + Solver to use in the computational routines: + + - 'auto' chooses the solver automatically based on the type of data. + + - 'svd' uses a Singular Value Decomposition of X to compute the Ridge + coefficients. It is the most stable solver, in particular more stable + for singular matrices than 'cholesky' at the cost of being slower. + + - 'cholesky' uses the standard scipy.linalg.solve function to + obtain a closed-form solution via a Cholesky decomposition of + dot(X.T, X) + + - 'sparse_cg' uses the conjugate gradient solver as found in + scipy.sparse.linalg.cg. As an iterative algorithm, this solver is + more appropriate than 'cholesky' for large-scale data + (possibility to set `tol` and `max_iter`). + + - 'lsqr' uses the dedicated regularized least-squares routine + scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative + procedure. + + - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses + its improved, unbiased version named SAGA. Both methods also use an + iterative procedure, and are often faster than other solvers when + both n_samples and n_features are large. Note that 'sag' and + 'saga' fast convergence is only guaranteed on features with + approximately the same scale. You can preprocess the data with a + scaler from sklearn.preprocessing. + + - 'lbfgs' uses L-BFGS-B algorithm implemented in + `scipy.optimize.minimize`. It can be used only when `positive` + is True. + + All solvers except 'svd' support both dense and sparse data. However, only + 'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when + `fit_intercept` is True. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + + max_iter : int, default=None + Maximum number of iterations for conjugate gradient solver. + For the 'sparse_cg' and 'lsqr' solvers, the default value is determined + by scipy.sparse.linalg. For 'sag' and saga solver, the default value is + 1000. For 'lbfgs' solver, the default value is 15000. + + tol : float, default=1e-4 + Precision of the solution. Note that `tol` has no effect for solvers 'svd' and + 'cholesky'. + + .. versionchanged:: 1.2 + Default value changed from 1e-3 to 1e-4 for consistency with other linear + models. + + verbose : int, default=0 + Verbosity level. Setting verbose > 0 will display additional + information depending on the solver used. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + Only 'lbfgs' solver is supported in this case. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag' or 'saga' to shuffle the data. + See :term:`Glossary ` for details. + + return_n_iter : bool, default=False + If True, the method also returns `n_iter`, the actual number of + iteration performed by the solver. + + .. versionadded:: 0.17 + + return_intercept : bool, default=False + If True and if X is sparse, the method also returns the intercept, + and the solver is automatically changed to 'sag'. This is only a + temporary fix for fitting the intercept with sparse data. For dense + data, use sklearn.linear_model._preprocess_data before your regression. + + .. versionadded:: 0.17 + + check_input : bool, default=True + If False, the input arrays X and y will not be checked. + + .. versionadded:: 0.21 + + Returns + ------- + coef : ndarray of shape (n_features,) or (n_targets, n_features) + Weight vector(s). + + n_iter : int, optional + The actual number of iteration performed by the solver. + Only returned if `return_n_iter` is True. + + intercept : float or ndarray of shape (n_targets,) + The intercept of the model. Only returned if `return_intercept` + is True and if X is a scipy sparse array. + + Notes + ----- + This function won't compute the intercept. + + Regularization improves the conditioning of the problem and + reduces the variance of the estimates. Larger values specify stronger + regularization. Alpha corresponds to ``1 / (2C)`` in other linear + models such as :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are + assumed to be specific to the targets. Hence they must correspond in + number. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_regression + >>> from sklearn.linear_model import ridge_regression + >>> rng = np.random.RandomState(0) + >>> X = rng.randn(100, 4) + >>> y = 2.0 * X[:, 0] - 1.0 * X[:, 1] + 0.1 * rng.standard_normal(100) + >>> coef, intercept = ridge_regression(X, y, alpha=1.0, return_intercept=True) + >>> list(coef) + [1.9..., -1.0..., -0.0..., -0.0...] + >>> intercept + -0.0... + """ + return _ridge_regression( + X, + y, + alpha, + sample_weight=sample_weight, + solver=solver, + max_iter=max_iter, + tol=tol, + verbose=verbose, + positive=positive, + random_state=random_state, + return_n_iter=return_n_iter, + return_intercept=return_intercept, + X_scale=None, + X_offset=None, + check_input=check_input, + ) + + +def _ridge_regression( + X, + y, + alpha, + sample_weight=None, + solver="auto", + max_iter=None, + tol=1e-4, + verbose=0, + positive=False, + random_state=None, + return_n_iter=False, + return_intercept=False, + X_scale=None, + X_offset=None, + check_input=True, + fit_intercept=False, +): + has_sw = sample_weight is not None + + if solver == "auto": + if positive: + solver = "lbfgs" + elif return_intercept: + # sag supports fitting intercept directly + solver = "sag" + elif not sparse.issparse(X): + solver = "cholesky" + else: + solver = "sparse_cg" + + if solver not in ("sparse_cg", "cholesky", "svd", "lsqr", "sag", "saga", "lbfgs"): + raise ValueError( + "Known solvers are 'sparse_cg', 'cholesky', 'svd'" + " 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver + ) + + if positive and solver != "lbfgs": + raise ValueError( + "When positive=True, only 'lbfgs' solver can be used. " + f"Please change solver {solver} to 'lbfgs' " + "or set positive=False." + ) + + if solver == "lbfgs" and not positive: + raise ValueError( + "'lbfgs' solver can be used only when positive=True. " + "Please use another solver." + ) + + if return_intercept and solver != "sag": + raise ValueError( + "In Ridge, only 'sag' solver can directly fit the " + "intercept. Please change solver to 'sag' or set " + "return_intercept=False." + ) + + if check_input: + _dtype = [np.float64, np.float32] + _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver) + X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order="C") + y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None) + check_consistent_length(X, y) + + n_samples, n_features = X.shape + + if y.ndim > 2: + raise ValueError("Target y has the wrong shape %s" % str(y.shape)) + + ravel = False + if y.ndim == 1: + y = y.reshape(-1, 1) + ravel = True + + n_samples_, n_targets = y.shape + + if n_samples != n_samples_: + raise ValueError( + "Number of samples in X and y does not correspond: %d != %d" + % (n_samples, n_samples_) + ) + + if has_sw: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if solver not in ["sag", "saga"]: + # SAG supports sample_weight directly. For other solvers, + # we implement sample_weight via a simple rescaling. + X, y, sample_weight_sqrt = _rescale_data(X, y, sample_weight) + + # Some callers of this method might pass alpha as single + # element array which already has been validated. + if alpha is not None and not isinstance(alpha, np.ndarray): + alpha = check_scalar( + alpha, + "alpha", + target_type=numbers.Real, + min_val=0.0, + include_boundaries="left", + ) + + # There should be either 1 or n_targets penalties + alpha = np.asarray(alpha, dtype=X.dtype).ravel() + if alpha.size not in [1, n_targets]: + raise ValueError( + "Number of targets and number of penalties do not correspond: %d != %d" + % (alpha.size, n_targets) + ) + + if alpha.size == 1 and n_targets > 1: + alpha = np.repeat(alpha, n_targets) + + n_iter = None + if solver == "sparse_cg": + coef = _solve_sparse_cg( + X, + y, + alpha, + max_iter=max_iter, + tol=tol, + verbose=verbose, + X_offset=X_offset, + X_scale=X_scale, + sample_weight_sqrt=sample_weight_sqrt if has_sw else None, + ) + + elif solver == "lsqr": + coef, n_iter = _solve_lsqr( + X, + y, + alpha=alpha, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + X_offset=X_offset, + X_scale=X_scale, + sample_weight_sqrt=sample_weight_sqrt if has_sw else None, + ) + + elif solver == "cholesky": + if n_features > n_samples: + K = safe_sparse_dot(X, X.T, dense_output=True) + try: + dual_coef = _solve_cholesky_kernel(K, y, alpha) + + coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T + except linalg.LinAlgError: + # use SVD solver if matrix is singular + solver = "svd" + else: + try: + coef = _solve_cholesky(X, y, alpha) + except linalg.LinAlgError: + # use SVD solver if matrix is singular + solver = "svd" + + elif solver in ["sag", "saga"]: + # precompute max_squared_sum for all targets + max_squared_sum = row_norms(X, squared=True).max() + + coef = np.empty((y.shape[1], n_features), dtype=X.dtype) + n_iter = np.empty(y.shape[1], dtype=np.int32) + intercept = np.zeros((y.shape[1],), dtype=X.dtype) + for i, (alpha_i, target) in enumerate(zip(alpha, y.T)): + init = { + "coef": np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype) + } + coef_, n_iter_, _ = sag_solver( + X, + target.ravel(), + sample_weight, + "squared", + alpha_i, + 0, + max_iter, + tol, + verbose, + random_state, + False, + max_squared_sum, + init, + is_saga=solver == "saga", + ) + if return_intercept: + coef[i] = coef_[:-1] + intercept[i] = coef_[-1] + else: + coef[i] = coef_ + n_iter[i] = n_iter_ + + if intercept.shape[0] == 1: + intercept = intercept[0] + coef = np.asarray(coef) + + elif solver == "lbfgs": + coef = _solve_lbfgs( + X, + y, + alpha, + positive=positive, + tol=tol, + max_iter=max_iter, + X_offset=X_offset, + X_scale=X_scale, + sample_weight_sqrt=sample_weight_sqrt if has_sw else None, + ) + + if solver == "svd": + if sparse.issparse(X): + raise TypeError("SVD solver does not support sparse inputs currently") + coef = _solve_svd(X, y, alpha) + + if ravel: + # When y was passed as a 1d-array, we flatten the coefficients. + coef = coef.ravel() + + if return_n_iter and return_intercept: + return coef, n_iter, intercept + elif return_intercept: + return coef, intercept + elif return_n_iter: + return coef, n_iter + else: + return coef + + +class _BaseRidge(LinearModel, metaclass=ABCMeta): + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray], + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left")], + "solver": [ + StrOptions( + {"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"} + ) + ], + "positive": ["boolean"], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=None, + tol=1e-4, + solver="auto", + positive=False, + random_state=None, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.copy_X = copy_X + self.max_iter = max_iter + self.tol = tol + self.solver = solver + self.positive = positive + self.random_state = random_state + + def fit(self, X, y, sample_weight=None): + if self.solver == "lbfgs" and not self.positive: + raise ValueError( + "'lbfgs' solver can be used only when positive=True. " + "Please use another solver." + ) + + if self.positive: + if self.solver not in ["auto", "lbfgs"]: + raise ValueError( + f"solver='{self.solver}' does not support positive fitting. Please" + " set the solver to 'auto' or 'lbfgs', or set `positive=False`" + ) + else: + solver = self.solver + elif sparse.issparse(X) and self.fit_intercept: + if self.solver not in ["auto", "lbfgs", "lsqr", "sag", "sparse_cg"]: + raise ValueError( + "solver='{}' does not support fitting the intercept " + "on sparse data. Please set the solver to 'auto' or " + "'lsqr', 'sparse_cg', 'sag', 'lbfgs' " + "or set `fit_intercept=False`".format(self.solver) + ) + if self.solver in ["lsqr", "lbfgs"]: + solver = self.solver + elif self.solver == "sag" and self.max_iter is None and self.tol > 1e-4: + warnings.warn( + '"sag" solver requires many iterations to fit ' + "an intercept with sparse inputs. Either set the " + 'solver to "auto" or "sparse_cg", or set a low ' + '"tol" and a high "max_iter" (especially if inputs are ' + "not standardized)." + ) + solver = "sag" + else: + solver = "sparse_cg" + else: + solver = self.solver + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # when X is sparse we only remove offset from y + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=self.copy_X, + sample_weight=sample_weight, + ) + + if solver == "sag" and sparse.issparse(X) and self.fit_intercept: + self.coef_, self.n_iter_, self.intercept_ = _ridge_regression( + X, + y, + alpha=self.alpha, + sample_weight=sample_weight, + max_iter=self.max_iter, + tol=self.tol, + solver="sag", + positive=self.positive, + random_state=self.random_state, + return_n_iter=True, + return_intercept=True, + check_input=False, + ) + # add the offset which was subtracted by _preprocess_data + self.intercept_ += y_offset + + else: + if sparse.issparse(X) and self.fit_intercept: + # required to fit intercept with sparse_cg and lbfgs solver + params = {"X_offset": X_offset, "X_scale": X_scale} + else: + # for dense matrices or when intercept is set to 0 + params = {} + + self.coef_, self.n_iter_ = _ridge_regression( + X, + y, + alpha=self.alpha, + sample_weight=sample_weight, + max_iter=self.max_iter, + tol=self.tol, + solver=solver, + positive=self.positive, + random_state=self.random_state, + return_n_iter=True, + return_intercept=False, + check_input=False, + fit_intercept=self.fit_intercept, + **params, + ) + self._set_intercept(X_offset, y_offset, X_scale) + + return self + + +class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge): + """Linear least squares with l2 regularization. + + Minimizes the objective function:: + + ||y - Xw||^2_2 + alpha * ||w||^2_2 + + This model solves a regression model where the loss function is + the linear least squares function and regularization is given by + the l2-norm. Also known as Ridge Regression or Tikhonov regularization. + This estimator has built-in support for multi-variate regression + (i.e., when y is a 2d-array of shape (n_samples, n_targets)). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : {float, ndarray of shape (n_targets,)}, default=1.0 + Constant that multiplies the L2 term, controlling regularization + strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. + + When `alpha = 0`, the objective is equivalent to ordinary least + squares, solved by the :class:`LinearRegression` object. For numerical + reasons, using `alpha = 0` with the `Ridge` object is not advised. + Instead, you should use the :class:`LinearRegression` object. + + If an array is passed, penalties are assumed to be specific to the + targets. Hence they must correspond in number. + + fit_intercept : bool, default=True + Whether to fit the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. ``X`` and ``y`` are expected to be centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + max_iter : int, default=None + Maximum number of iterations for conjugate gradient solver. + For 'sparse_cg' and 'lsqr' solvers, the default value is determined + by scipy.sparse.linalg. For 'sag' solver, the default value is 1000. + For 'lbfgs' solver, the default value is 15000. + + tol : float, default=1e-4 + The precision of the solution (`coef_`) is determined by `tol` which + specifies a different convergence criterion for each solver: + + - 'svd': `tol` has no impact. + + - 'cholesky': `tol` has no impact. + + - 'sparse_cg': norm of residuals smaller than `tol`. + + - 'lsqr': `tol` is set as atol and btol of scipy.sparse.linalg.lsqr, + which control the norm of the residual vector in terms of the norms of + matrix and coefficients. + + - 'sag' and 'saga': relative change of coef smaller than `tol`. + + - 'lbfgs': maximum of the absolute (projected) gradient=max|residuals| + smaller than `tol`. + + .. versionchanged:: 1.2 + Default value changed from 1e-3 to 1e-4 for consistency with other linear + models. + + solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \ + 'sag', 'saga', 'lbfgs'}, default='auto' + Solver to use in the computational routines: + + - 'auto' chooses the solver automatically based on the type of data. + + - 'svd' uses a Singular Value Decomposition of X to compute the Ridge + coefficients. It is the most stable solver, in particular more stable + for singular matrices than 'cholesky' at the cost of being slower. + + - 'cholesky' uses the standard scipy.linalg.solve function to + obtain a closed-form solution. + + - 'sparse_cg' uses the conjugate gradient solver as found in + scipy.sparse.linalg.cg. As an iterative algorithm, this solver is + more appropriate than 'cholesky' for large-scale data + (possibility to set `tol` and `max_iter`). + + - 'lsqr' uses the dedicated regularized least-squares routine + scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative + procedure. + + - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses + its improved, unbiased version named SAGA. Both methods also use an + iterative procedure, and are often faster than other solvers when + both n_samples and n_features are large. Note that 'sag' and + 'saga' fast convergence is only guaranteed on features with + approximately the same scale. You can preprocess the data with a + scaler from sklearn.preprocessing. + + - 'lbfgs' uses L-BFGS-B algorithm implemented in + `scipy.optimize.minimize`. It can be used only when `positive` + is True. + + All solvers except 'svd' support both dense and sparse data. However, only + 'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when + `fit_intercept` is True. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + Only 'lbfgs' solver is supported in this case. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag' or 'saga' to shuffle the data. + See :term:`Glossary ` for details. + + .. versionadded:: 0.17 + `random_state` to support Stochastic Average Gradient. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Weight vector(s). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + n_iter_ : None or ndarray of shape (n_targets,) + Actual number of iterations for each target. Available only for + sag and lsqr solvers. Other solvers will return None. + + .. versionadded:: 0.17 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + RidgeClassifier : Ridge classifier. + RidgeCV : Ridge regression with built-in cross validation. + :class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression + combines ridge regression with the kernel trick. + + Notes + ----- + Regularization improves the conditioning of the problem and + reduces the variance of the estimates. Larger values specify stronger + regularization. Alpha corresponds to ``1 / (2C)`` in other linear + models such as :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + + Examples + -------- + >>> from sklearn.linear_model import Ridge + >>> import numpy as np + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> clf = Ridge(alpha=1.0) + >>> clf.fit(X, y) + Ridge() + """ + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=None, + tol=1e-4, + solver="auto", + positive=False, + random_state=None, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + copy_X=copy_X, + max_iter=max_iter, + tol=tol, + solver=solver, + positive=positive, + random_state=random_state, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge regression model. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + Returns + ------- + self : object + Fitted estimator. + """ + _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver) + X, y = self._validate_data( + X, + y, + accept_sparse=_accept_sparse, + dtype=[np.float64, np.float32], + multi_output=True, + y_numeric=True, + ) + return super().fit(X, y, sample_weight=sample_weight) + + +class _RidgeClassifierMixin(LinearClassifierMixin): + def _prepare_data(self, X, y, sample_weight, solver): + """Validate `X` and `y` and binarize `y`. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + solver : str + The solver used in `Ridge` to know which sparse format to support. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Validated training data. + + y : ndarray of shape (n_samples,) + Validated target values. + + sample_weight : ndarray of shape (n_samples,) + Validated sample weights. + + Y : ndarray of shape (n_samples, n_classes) + The binarized version of `y`. + """ + accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver) + X, y = self._validate_data( + X, + y, + accept_sparse=accept_sparse, + multi_output=True, + y_numeric=False, + ) + + self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) + Y = self._label_binarizer.fit_transform(y) + if not self._label_binarizer.y_type_.startswith("multilabel"): + y = column_or_1d(y, warn=True) + + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + if self.class_weight: + sample_weight = sample_weight * compute_sample_weight(self.class_weight, y) + return X, y, sample_weight, Y + + def predict(self, X): + """Predict class labels for samples in `X`. + + Parameters + ---------- + X : {array-like, spare matrix} of shape (n_samples, n_features) + The data matrix for which we want to predict the targets. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs) + Vector or matrix containing the predictions. In binary and + multiclass problems, this is a vector containing `n_samples`. In + a multilabel problem, it returns a matrix of shape + `(n_samples, n_outputs)`. + """ + check_is_fitted(self, attributes=["_label_binarizer"]) + if self._label_binarizer.y_type_.startswith("multilabel"): + # Threshold such that the negative label is -1 and positive label + # is 1 to use the inverse transform of the label binarizer fitted + # during fit. + scores = 2 * (self.decision_function(X) > 0) - 1 + return self._label_binarizer.inverse_transform(scores) + return super().predict(X) + + @property + def classes_(self): + """Classes labels.""" + return self._label_binarizer.classes_ + + def _more_tags(self): + return {"multilabel": True} + + +class RidgeClassifier(_RidgeClassifierMixin, _BaseRidge): + """Classifier using Ridge regression. + + This classifier first converts the target values into ``{-1, 1}`` and + then treats the problem as a regression task (multi-output regression in + the multiclass case). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set to false, no + intercept will be used in calculations (e.g. data is expected to be + already centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + max_iter : int, default=None + Maximum number of iterations for conjugate gradient solver. + The default value is determined by scipy.sparse.linalg. + + tol : float, default=1e-4 + The precision of the solution (`coef_`) is determined by `tol` which + specifies a different convergence criterion for each solver: + + - 'svd': `tol` has no impact. + + - 'cholesky': `tol` has no impact. + + - 'sparse_cg': norm of residuals smaller than `tol`. + + - 'lsqr': `tol` is set as atol and btol of scipy.sparse.linalg.lsqr, + which control the norm of the residual vector in terms of the norms of + matrix and coefficients. + + - 'sag' and 'saga': relative change of coef smaller than `tol`. + + - 'lbfgs': maximum of the absolute (projected) gradient=max|residuals| + smaller than `tol`. + + .. versionchanged:: 1.2 + Default value changed from 1e-3 to 1e-4 for consistency with other linear + models. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \ + 'sag', 'saga', 'lbfgs'}, default='auto' + Solver to use in the computational routines: + + - 'auto' chooses the solver automatically based on the type of data. + + - 'svd' uses a Singular Value Decomposition of X to compute the Ridge + coefficients. It is the most stable solver, in particular more stable + for singular matrices than 'cholesky' at the cost of being slower. + + - 'cholesky' uses the standard scipy.linalg.solve function to + obtain a closed-form solution. + + - 'sparse_cg' uses the conjugate gradient solver as found in + scipy.sparse.linalg.cg. As an iterative algorithm, this solver is + more appropriate than 'cholesky' for large-scale data + (possibility to set `tol` and `max_iter`). + + - 'lsqr' uses the dedicated regularized least-squares routine + scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative + procedure. + + - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses + its unbiased and more flexible version named SAGA. Both methods + use an iterative procedure, and are often faster than other solvers + when both n_samples and n_features are large. Note that 'sag' and + 'saga' fast convergence is only guaranteed on features with + approximately the same scale. You can preprocess the data with a + scaler from sklearn.preprocessing. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + + - 'lbfgs' uses L-BFGS-B algorithm implemented in + `scipy.optimize.minimize`. It can be used only when `positive` + is True. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + Only 'lbfgs' solver is supported in this case. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag' or 'saga' to shuffle the data. + See :term:`Glossary ` for details. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) + Coefficient of the features in the decision function. + + ``coef_`` is of shape (1, n_features) when the given problem is binary. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + n_iter_ : None or ndarray of shape (n_targets,) + Actual number of iterations for each target. Available only for + sag and lsqr solvers. Other solvers will return None. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression. + RidgeClassifierCV : Ridge classifier with built-in cross validation. + + Notes + ----- + For multi-class classification, n_class classifiers are trained in + a one-versus-all approach. Concretely, this is implemented by taking + advantage of the multi-variate response support in Ridge. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.linear_model import RidgeClassifier + >>> X, y = load_breast_cancer(return_X_y=True) + >>> clf = RidgeClassifier().fit(X, y) + >>> clf.score(X, y) + 0.9595... + """ + + _parameter_constraints: dict = { + **_BaseRidge._parameter_constraints, + "class_weight": [dict, StrOptions({"balanced"}), None], + } + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=None, + tol=1e-4, + class_weight=None, + solver="auto", + positive=False, + random_state=None, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + copy_X=copy_X, + max_iter=max_iter, + tol=tol, + solver=solver, + positive=positive, + random_state=random_state, + ) + self.class_weight = class_weight + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge classifier model. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + .. versionadded:: 0.17 + *sample_weight* support to RidgeClassifier. + + Returns + ------- + self : object + Instance of the estimator. + """ + X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver) + + super().fit(X, Y, sample_weight=sample_weight) + return self + + +def _check_gcv_mode(X, gcv_mode): + if gcv_mode in ["eigen", "svd"]: + return gcv_mode + # if X has more rows than columns, use decomposition of X^T.X, + # otherwise X.X^T + if X.shape[0] > X.shape[1]: + return "svd" + return "eigen" + + +def _find_smallest_angle(query, vectors): + """Find the column of vectors that is most aligned with the query. + + Both query and the columns of vectors must have their l2 norm equal to 1. + + Parameters + ---------- + query : ndarray of shape (n_samples,) + Normalized query vector. + + vectors : ndarray of shape (n_samples, n_features) + Vectors to which we compare query, as columns. Must be normalized. + """ + abs_cosine = np.abs(query.dot(vectors)) + index = np.argmax(abs_cosine) + return index + + +class _X_CenterStackOp(sparse.linalg.LinearOperator): + """Behaves as centered and scaled X with an added intercept column. + + This operator behaves as + np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]) + """ + + def __init__(self, X, X_mean, sqrt_sw): + n_samples, n_features = X.shape + super().__init__(X.dtype, (n_samples, n_features + 1)) + self.X = X + self.X_mean = X_mean + self.sqrt_sw = sqrt_sw + + def _matvec(self, v): + v = v.ravel() + return ( + safe_sparse_dot(self.X, v[:-1], dense_output=True) + - self.sqrt_sw * self.X_mean.dot(v[:-1]) + + v[-1] * self.sqrt_sw + ) + + def _matmat(self, v): + return ( + safe_sparse_dot(self.X, v[:-1], dense_output=True) + - self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1]) + + v[-1] * self.sqrt_sw[:, None] + ) + + def _transpose(self): + return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw) + + +class _XT_CenterStackOp(sparse.linalg.LinearOperator): + """Behaves as transposed centered and scaled X with an intercept column. + + This operator behaves as + np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T + """ + + def __init__(self, X, X_mean, sqrt_sw): + n_samples, n_features = X.shape + super().__init__(X.dtype, (n_features + 1, n_samples)) + self.X = X + self.X_mean = X_mean + self.sqrt_sw = sqrt_sw + + def _matvec(self, v): + v = v.ravel() + n_features = self.shape[0] + res = np.empty(n_features, dtype=self.X.dtype) + res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - ( + self.X_mean * self.sqrt_sw.dot(v) + ) + res[-1] = np.dot(v, self.sqrt_sw) + return res + + def _matmat(self, v): + n_features = self.shape[0] + res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype) + res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[ + :, None + ] * self.sqrt_sw.dot(v) + res[-1] = np.dot(self.sqrt_sw, v) + return res + + +class _IdentityRegressor: + """Fake regressor which will directly output the prediction.""" + + def decision_function(self, y_predict): + return y_predict + + def predict(self, y_predict): + return y_predict + + +class _IdentityClassifier(LinearClassifierMixin): + """Fake classifier which will directly output the prediction. + + We inherit from LinearClassifierMixin to get the proper shape for the + output `y`. + """ + + def __init__(self, classes): + self.classes_ = classes + + def decision_function(self, y_predict): + return y_predict + + +class _RidgeGCV(LinearModel): + """Ridge regression with built-in Leave-one-out Cross-Validation. + + This class is not intended to be used directly. Use RidgeCV instead. + + Notes + ----- + + We want to solve (K + alpha*Id)c = y, + where K = X X^T is the kernel matrix. + + Let G = (K + alpha*Id). + + Dual solution: c = G^-1y + Primal solution: w = X^T c + + Compute eigendecomposition K = Q V Q^T. + Then G^-1 = Q (V + alpha*Id)^-1 Q^T, + where (V + alpha*Id) is diagonal. + It is thus inexpensive to inverse for many alphas. + + Let loov be the vector of prediction values for each example + when the model was fitted with all examples but this example. + + loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1) + + Let looe be the vector of prediction errors for each example + when the model was fitted with all examples but this example. + + looe = y - loov = c / diag(G^-1) + + The best score (negative mean squared error or user-provided scoring) is + stored in the `best_score_` attribute, and the selected hyperparameter in + `alpha_`. + + References + ---------- + http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf + https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf + """ + + def __init__( + self, + alphas=(0.1, 1.0, 10.0), + *, + fit_intercept=True, + scoring=None, + copy_X=True, + gcv_mode=None, + store_cv_values=False, + is_clf=False, + alpha_per_target=False, + ): + self.alphas = alphas + self.fit_intercept = fit_intercept + self.scoring = scoring + self.copy_X = copy_X + self.gcv_mode = gcv_mode + self.store_cv_values = store_cv_values + self.is_clf = is_clf + self.alpha_per_target = alpha_per_target + + @staticmethod + def _decomp_diag(v_prime, Q): + # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T)) + return (v_prime * Q**2).sum(axis=-1) + + @staticmethod + def _diag_dot(D, B): + # compute dot(diag(D), B) + if len(B.shape) > 1: + # handle case where B is > 1-d + D = D[(slice(None),) + (np.newaxis,) * (len(B.shape) - 1)] + return D * B + + def _compute_gram(self, X, sqrt_sw): + """Computes the Gram matrix XX^T with possible centering. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The preprocessed design matrix. + + sqrt_sw : ndarray of shape (n_samples,) + square roots of sample weights + + Returns + ------- + gram : ndarray of shape (n_samples, n_samples) + The Gram matrix. + X_mean : ndarray of shape (n_feature,) + The weighted mean of ``X`` for each feature. + + Notes + ----- + When X is dense the centering has been done in preprocessing + so the mean is 0 and we just compute XX^T. + + When X is sparse it has not been centered in preprocessing, but it has + been scaled by sqrt(sample weights). + + When self.fit_intercept is False no centering is done. + + The centered X is never actually computed because centering would break + the sparsity of X. + """ + center = self.fit_intercept and sparse.issparse(X) + if not center: + # in this case centering has been done in preprocessing + # or we are not fitting an intercept. + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X, X.T, dense_output=True), X_mean + # X is sparse + n_samples = X.shape[0] + sample_weight_matrix = sparse.dia_matrix( + (sqrt_sw, 0), shape=(n_samples, n_samples) + ) + X_weighted = sample_weight_matrix.dot(X) + X_mean, _ = mean_variance_axis(X_weighted, axis=0) + X_mean *= n_samples / sqrt_sw.dot(sqrt_sw) + X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True) + X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean) + return ( + safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T, + X_mean, + ) + + def _compute_covariance(self, X, sqrt_sw): + """Computes covariance matrix X^TX with possible centering. + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + The preprocessed design matrix. + + sqrt_sw : ndarray of shape (n_samples,) + square roots of sample weights + + Returns + ------- + covariance : ndarray of shape (n_features, n_features) + The covariance matrix. + X_mean : ndarray of shape (n_feature,) + The weighted mean of ``X`` for each feature. + + Notes + ----- + Since X is sparse it has not been centered in preprocessing, but it has + been scaled by sqrt(sample weights). + + When self.fit_intercept is False no centering is done. + + The centered X is never actually computed because centering would break + the sparsity of X. + """ + if not self.fit_intercept: + # in this case centering has been done in preprocessing + # or we are not fitting an intercept. + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X.T, X, dense_output=True), X_mean + # this function only gets called for sparse X + n_samples = X.shape[0] + sample_weight_matrix = sparse.dia_matrix( + (sqrt_sw, 0), shape=(n_samples, n_samples) + ) + X_weighted = sample_weight_matrix.dot(X) + X_mean, _ = mean_variance_axis(X_weighted, axis=0) + X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw) + weight_sum = sqrt_sw.dot(sqrt_sw) + return ( + safe_sparse_dot(X.T, X, dense_output=True) + - weight_sum * np.outer(X_mean, X_mean), + X_mean, + ) + + def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw): + """Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T) + without explicitly centering X nor computing X.dot(A) + when X is sparse. + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + + A : ndarray of shape (n_features, n_features) + + X_mean : ndarray of shape (n_features,) + + sqrt_sw : ndarray of shape (n_features,) + square roots of sample weights + + Returns + ------- + diag : np.ndarray, shape (n_samples,) + The computed diagonal. + """ + intercept_col = scale = sqrt_sw + batch_size = X.shape[1] + diag = np.empty(X.shape[0], dtype=X.dtype) + for start in range(0, X.shape[0], batch_size): + batch = slice(start, min(X.shape[0], start + batch_size), 1) + X_batch = np.empty( + (X[batch].shape[0], X.shape[1] + self.fit_intercept), dtype=X.dtype + ) + if self.fit_intercept: + X_batch[:, :-1] = X[batch].toarray() - X_mean * scale[batch][:, None] + X_batch[:, -1] = intercept_col[batch] + else: + X_batch = X[batch].toarray() + diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) + return diag + + def _eigen_decompose_gram(self, X, y, sqrt_sw): + """Eigendecomposition of X.X^T, used when n_samples <= n_features.""" + # if X is dense it has already been centered in preprocessing + K, X_mean = self._compute_gram(X, sqrt_sw) + if self.fit_intercept: + # to emulate centering X with sample weights, + # ie removing the weighted average, we add a column + # containing the square roots of the sample weights. + # by centering, it is orthogonal to the other columns + K += np.outer(sqrt_sw, sqrt_sw) + eigvals, Q = linalg.eigh(K) + QT_y = np.dot(Q.T, y) + return X_mean, eigvals, Q, QT_y + + def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X.X^T (n_samples <= n_features). + """ + w = 1.0 / (eigvals + alpha) + if self.fit_intercept: + # the vector containing the square roots of the sample weights (1 + # when no sample weights) is the eigenvector of XX^T which + # corresponds to the intercept; we cancel the regularization on + # this dimension. the corresponding eigenvalue is + # sum(sample_weight). + normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) + intercept_dim = _find_smallest_angle(normalized_sw, Q) + w[intercept_dim] = 0 # cancel regularization for the intercept + + c = np.dot(Q, self._diag_dot(w, QT_y)) + G_inverse_diag = self._decomp_diag(w, Q) + # handle case where y is 2-d + if len(y.shape) != 1: + G_inverse_diag = G_inverse_diag[:, np.newaxis] + return G_inverse_diag, c + + def _eigen_decompose_covariance(self, X, y, sqrt_sw): + """Eigendecomposition of X^T.X, used when n_samples > n_features + and X is sparse. + """ + n_samples, n_features = X.shape + cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype) + cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw) + if not self.fit_intercept: + cov = cov[:-1, :-1] + # to emulate centering X with sample weights, + # ie removing the weighted average, we add a column + # containing the square roots of the sample weights. + # by centering, it is orthogonal to the other columns + # when all samples have the same weight we add a column of 1 + else: + cov[-1] = 0 + cov[:, -1] = 0 + cov[-1, -1] = sqrt_sw.dot(sqrt_sw) + nullspace_dim = max(0, n_features - n_samples) + eigvals, V = linalg.eigh(cov) + # remove eigenvalues and vectors in the null space of X^T.X + eigvals = eigvals[nullspace_dim:] + V = V[:, nullspace_dim:] + return X_mean, eigvals, V, X + + def _solve_eigen_covariance_no_intercept( + self, alpha, y, sqrt_sw, X_mean, eigvals, V, X + ): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X^T.X + (n_samples > n_features and X is sparse), and not fitting an intercept. + """ + w = 1 / (eigvals + alpha) + A = (V * w).dot(V.T) + AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) + y_hat = safe_sparse_dot(X, AXy, dense_output=True) + hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) + if len(y.shape) != 1: + # handle case where y is 2-d + hat_diag = hat_diag[:, np.newaxis] + return (1 - hat_diag) / alpha, (y - y_hat) / alpha + + def _solve_eigen_covariance_intercept( + self, alpha, y, sqrt_sw, X_mean, eigvals, V, X + ): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X^T.X + (n_samples > n_features and X is sparse), + and we are fitting an intercept. + """ + # the vector [0, 0, ..., 0, 1] + # is the eigenvector of X^TX which + # corresponds to the intercept; we cancel the regularization on + # this dimension. the corresponding eigenvalue is + # sum(sample_weight), e.g. n when uniform sample weights. + intercept_sv = np.zeros(V.shape[0]) + intercept_sv[-1] = 1 + intercept_dim = _find_smallest_angle(intercept_sv, V) + w = 1 / (eigvals + alpha) + w[intercept_dim] = 1 / eigvals[intercept_dim] + A = (V * w).dot(V.T) + # add a column to X containing the square roots of sample weights + X_op = _X_CenterStackOp(X, X_mean, sqrt_sw) + AXy = A.dot(X_op.T.dot(y)) + y_hat = X_op.dot(AXy) + hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) + # return (1 - hat_diag), (y - y_hat) + if len(y.shape) != 1: + # handle case where y is 2-d + hat_diag = hat_diag[:, np.newaxis] + return (1 - hat_diag) / alpha, (y - y_hat) / alpha + + def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X^T.X + (n_samples > n_features and X is sparse). + """ + if self.fit_intercept: + return self._solve_eigen_covariance_intercept( + alpha, y, sqrt_sw, X_mean, eigvals, V, X + ) + return self._solve_eigen_covariance_no_intercept( + alpha, y, sqrt_sw, X_mean, eigvals, V, X + ) + + def _svd_decompose_design_matrix(self, X, y, sqrt_sw): + # X already centered + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + if self.fit_intercept: + # to emulate fit_intercept=True situation, add a column + # containing the square roots of the sample weights + # by centering, the other columns are orthogonal to that one + intercept_column = sqrt_sw[:, None] + X = np.hstack((X, intercept_column)) + U, singvals, _ = linalg.svd(X, full_matrices=0) + singvals_sq = singvals**2 + UT_y = np.dot(U.T, y) + return X_mean, singvals_sq, U, UT_y + + def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have an SVD decomposition of X + (n_samples > n_features and X is dense). + """ + w = ((singvals_sq + alpha) ** -1) - (alpha**-1) + if self.fit_intercept: + # detect intercept column + normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) + intercept_dim = _find_smallest_angle(normalized_sw, U) + # cancel the regularization for the intercept + w[intercept_dim] = -(alpha**-1) + c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha**-1) * y + G_inverse_diag = self._decomp_diag(w, U) + (alpha**-1) + if len(y.shape) != 1: + # handle case where y is 2-d + G_inverse_diag = G_inverse_diag[:, np.newaxis] + return G_inverse_diag, c + + def fit(self, X, y, sample_weight=None): + """Fit Ridge regression model with gcv. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. Will be cast to float64 if necessary. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to float64 if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. Note that the scale of `sample_weight` + has an impact on the loss; i.e. multiplying all weights by `k` + is equivalent to setting `alpha / k`. + + Returns + ------- + self : object + """ + X, y = self._validate_data( + X, + y, + accept_sparse=["csr", "csc", "coo"], + dtype=[np.float64], + multi_output=True, + y_numeric=True, + ) + + # alpha_per_target cannot be used in classifier mode. All subclasses + # of _RidgeGCV that are classifiers keep alpha_per_target at its + # default value: False, so the condition below should never happen. + assert not (self.is_clf and self.alpha_per_target) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + self.alphas = np.asarray(self.alphas) + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=self.copy_X, + sample_weight=sample_weight, + ) + + gcv_mode = _check_gcv_mode(X, self.gcv_mode) + + if gcv_mode == "eigen": + decompose = self._eigen_decompose_gram + solve = self._solve_eigen_gram + elif gcv_mode == "svd": + if sparse.issparse(X): + decompose = self._eigen_decompose_covariance + solve = self._solve_eigen_covariance + else: + decompose = self._svd_decompose_design_matrix + solve = self._solve_svd_design_matrix + + n_samples = X.shape[0] + + if sample_weight is not None: + X, y, sqrt_sw = _rescale_data(X, y, sample_weight) + else: + sqrt_sw = np.ones(n_samples, dtype=X.dtype) + + X_mean, *decomposition = decompose(X, y, sqrt_sw) + + scorer = check_scoring(self, scoring=self.scoring, allow_none=True) + error = scorer is None + + n_y = 1 if len(y.shape) == 1 else y.shape[1] + n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas) + + if self.store_cv_values: + self.cv_values_ = np.empty((n_samples * n_y, n_alphas), dtype=X.dtype) + + best_coef, best_score, best_alpha = None, None, None + + for i, alpha in enumerate(np.atleast_1d(self.alphas)): + G_inverse_diag, c = solve(float(alpha), y, sqrt_sw, X_mean, *decomposition) + if error: + squared_errors = (c / G_inverse_diag) ** 2 + if self.alpha_per_target: + alpha_score = -squared_errors.mean(axis=0) + else: + alpha_score = -squared_errors.mean() + if self.store_cv_values: + self.cv_values_[:, i] = squared_errors.ravel() + else: + predictions = y - (c / G_inverse_diag) + if self.store_cv_values: + self.cv_values_[:, i] = predictions.ravel() + + if self.is_clf: + identity_estimator = _IdentityClassifier(classes=np.arange(n_y)) + alpha_score = scorer( + identity_estimator, predictions, y.argmax(axis=1) + ) + else: + identity_estimator = _IdentityRegressor() + if self.alpha_per_target: + alpha_score = np.array( + [ + scorer(identity_estimator, predictions[:, j], y[:, j]) + for j in range(n_y) + ] + ) + else: + alpha_score = scorer( + identity_estimator, predictions.ravel(), y.ravel() + ) + + # Keep track of the best model + if best_score is None: + # initialize + if self.alpha_per_target and n_y > 1: + best_coef = c + best_score = np.atleast_1d(alpha_score) + best_alpha = np.full(n_y, alpha) + else: + best_coef = c + best_score = alpha_score + best_alpha = alpha + else: + # update + if self.alpha_per_target and n_y > 1: + to_update = alpha_score > best_score + best_coef[:, to_update] = c[:, to_update] + best_score[to_update] = alpha_score[to_update] + best_alpha[to_update] = alpha + elif alpha_score > best_score: + best_coef, best_score, best_alpha = c, alpha_score, alpha + + self.alpha_ = best_alpha + self.best_score_ = best_score + self.dual_coef_ = best_coef + self.coef_ = safe_sparse_dot(self.dual_coef_.T, X) + + if sparse.issparse(X): + X_offset = X_mean * X_scale + else: + X_offset += X_mean * X_scale + self._set_intercept(X_offset, y_offset, X_scale) + + if self.store_cv_values: + if len(y.shape) == 1: + cv_values_shape = n_samples, n_alphas + else: + cv_values_shape = n_samples, n_y, n_alphas + self.cv_values_ = self.cv_values_.reshape(cv_values_shape) + + return self + + +class _BaseRidgeCV(LinearModel): + _parameter_constraints: dict = { + "alphas": ["array-like", Interval(Real, 0, None, closed="neither")], + "fit_intercept": ["boolean"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "cv": ["cv_object"], + "gcv_mode": [StrOptions({"auto", "svd", "eigen"}), None], + "store_cv_values": ["boolean"], + "alpha_per_target": ["boolean"], + } + + def __init__( + self, + alphas=(0.1, 1.0, 10.0), + *, + fit_intercept=True, + scoring=None, + cv=None, + gcv_mode=None, + store_cv_values=False, + alpha_per_target=False, + ): + self.alphas = alphas + self.fit_intercept = fit_intercept + self.scoring = scoring + self.cv = cv + self.gcv_mode = gcv_mode + self.store_cv_values = store_cv_values + self.alpha_per_target = alpha_per_target + + def fit(self, X, y, sample_weight=None): + """Fit Ridge regression model with cv. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. If using GCV, will be cast to float64 + if necessary. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + When sample_weight is provided, the selected hyperparameter may depend + on whether we use leave-one-out cross-validation (cv=None or cv='auto') + or another form of cross-validation, because only leave-one-out + cross-validation takes the sample weights into account when computing + the validation score. + """ + cv = self.cv + + check_scalar_alpha = partial( + check_scalar, + target_type=numbers.Real, + min_val=0.0, + include_boundaries="neither", + ) + + if isinstance(self.alphas, (np.ndarray, list, tuple)): + n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas) + if n_alphas != 1: + for index, alpha in enumerate(self.alphas): + alpha = check_scalar_alpha(alpha, f"alphas[{index}]") + else: + self.alphas[0] = check_scalar_alpha(self.alphas[0], "alphas") + alphas = np.asarray(self.alphas) + + if cv is None: + estimator = _RidgeGCV( + alphas, + fit_intercept=self.fit_intercept, + scoring=self.scoring, + gcv_mode=self.gcv_mode, + store_cv_values=self.store_cv_values, + is_clf=is_classifier(self), + alpha_per_target=self.alpha_per_target, + ) + estimator.fit(X, y, sample_weight=sample_weight) + self.alpha_ = estimator.alpha_ + self.best_score_ = estimator.best_score_ + if self.store_cv_values: + self.cv_values_ = estimator.cv_values_ + else: + if self.store_cv_values: + raise ValueError("cv!=None and store_cv_values=True are incompatible") + if self.alpha_per_target: + raise ValueError("cv!=None and alpha_per_target=True are incompatible") + + parameters = {"alpha": alphas} + solver = "sparse_cg" if sparse.issparse(X) else "auto" + model = RidgeClassifier if is_classifier(self) else Ridge + gs = GridSearchCV( + model( + fit_intercept=self.fit_intercept, + solver=solver, + ), + parameters, + cv=cv, + scoring=self.scoring, + ) + gs.fit(X, y, sample_weight=sample_weight) + estimator = gs.best_estimator_ + self.alpha_ = gs.best_estimator_.alpha + self.best_score_ = gs.best_score_ + + self.coef_ = estimator.coef_ + self.intercept_ = estimator.intercept_ + self.n_features_in_ = estimator.n_features_in_ + if hasattr(estimator, "feature_names_in_"): + self.feature_names_in_ = estimator.feature_names_in_ + + return self + + +class RidgeCV( + _RoutingNotSupportedMixin, MultiOutputMixin, RegressorMixin, _BaseRidgeCV +): + """Ridge regression with built-in cross-validation. + + See glossary entry for :term:`cross-validation estimator`. + + By default, it performs efficient Leave-One-Out Cross-Validation. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alphas : array-like of shape (n_alphas,), default=(0.1, 1.0, 10.0) + Array of alpha values to try. + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + If using Leave-One-Out cross-validation, alphas must be positive. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + scoring : str, callable, default=None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + If None, the negative mean squared error if cv is 'auto' or None + (i.e. when using leave-one-out cross-validation), and r2 score + otherwise. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the efficient Leave-One-Out cross-validation + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if ``y`` is binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used, else, + :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + gcv_mode : {'auto', 'svd', 'eigen'}, default='auto' + Flag indicating which strategy to use when performing + Leave-One-Out Cross-Validation. Options are:: + + 'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen' + 'svd' : force use of singular value decomposition of X when X is + dense, eigenvalue decomposition of X^T.X when X is sparse. + 'eigen' : force computation via eigendecomposition of X.X^T + + The 'auto' mode is the default and is intended to pick the cheaper + option of the two depending on the shape of the training data. + + store_cv_values : bool, default=False + Flag indicating if the cross-validation values corresponding to + each alpha should be stored in the ``cv_values_`` attribute (see + below). This flag is only compatible with ``cv=None`` (i.e. using + Leave-One-Out Cross-Validation). + + alpha_per_target : bool, default=False + Flag indicating whether to optimize the alpha value (picked from the + `alphas` parameter list) for each target separately (for multi-output + settings: multiple prediction targets). When set to `True`, after + fitting, the `alpha_` attribute will contain a value for each target. + When set to `False`, a single alpha is used for all targets. + + .. versionadded:: 0.24 + + Attributes + ---------- + cv_values_ : ndarray of shape (n_samples, n_alphas) or \ + shape (n_samples, n_targets, n_alphas), optional + Cross-validation values for each alpha (only available if + ``store_cv_values=True`` and ``cv=None``). After ``fit()`` has been + called, this attribute will contain the mean squared errors if + `scoring is None` otherwise it will contain standardized per point + prediction values. + + coef_ : ndarray of shape (n_features) or (n_targets, n_features) + Weight vector(s). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + alpha_ : float or ndarray of shape (n_targets,) + Estimated regularization parameter, or, if ``alpha_per_target=True``, + the estimated regularization parameter for each target. + + best_score_ : float or ndarray of shape (n_targets,) + Score of base estimator with best alpha, or, if + ``alpha_per_target=True``, a score for each target. + + .. versionadded:: 0.23 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression. + RidgeClassifier : Classifier based on ridge regression on {-1, 1} labels. + RidgeClassifierCV : Ridge classifier with built-in cross validation. + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.linear_model import RidgeCV + >>> X, y = load_diabetes(return_X_y=True) + >>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y) + >>> clf.score(X, y) + 0.5166... + """ + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge regression model with cv. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. If using GCV, will be cast to float64 + if necessary. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + When sample_weight is provided, the selected hyperparameter may depend + on whether we use leave-one-out cross-validation (cv=None or cv='auto') + or another form of cross-validation, because only leave-one-out + cross-validation takes the sample weights into account when computing + the validation score. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + super().fit(X, y, sample_weight=sample_weight) + return self + + +class RidgeClassifierCV(_RoutingNotSupportedMixin, _RidgeClassifierMixin, _BaseRidgeCV): + """Ridge classifier with built-in cross-validation. + + See glossary entry for :term:`cross-validation estimator`. + + By default, it performs Leave-One-Out Cross-Validation. Currently, + only the n_features > n_samples case is handled efficiently. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alphas : array-like of shape (n_alphas,), default=(0.1, 1.0, 10.0) + Array of alpha values to try. + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + scoring : str, callable, default=None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the efficient Leave-One-Out cross-validation + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + store_cv_values : bool, default=False + Flag indicating if the cross-validation values corresponding to + each alpha should be stored in the ``cv_values_`` attribute (see + below). This flag is only compatible with ``cv=None`` (i.e. using + Leave-One-Out Cross-Validation). + + Attributes + ---------- + cv_values_ : ndarray of shape (n_samples, n_targets, n_alphas), optional + Cross-validation values for each alpha (only if ``store_cv_values=True`` and + ``cv=None``). After ``fit()`` has been called, this attribute will + contain the mean squared errors if `scoring is None` otherwise it + will contain standardized per point prediction values. + + coef_ : ndarray of shape (1, n_features) or (n_targets, n_features) + Coefficient of the features in the decision function. + + ``coef_`` is of shape (1, n_features) when the given problem is binary. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + alpha_ : float + Estimated regularization parameter. + + best_score_ : float + Score of base estimator with best alpha. + + .. versionadded:: 0.23 + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression. + RidgeClassifier : Ridge classifier. + RidgeCV : Ridge regression with built-in cross validation. + + Notes + ----- + For multi-class classification, n_class classifiers are trained in + a one-versus-all approach. Concretely, this is implemented by taking + advantage of the multi-variate response support in Ridge. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.linear_model import RidgeClassifierCV + >>> X, y = load_breast_cancer(return_X_y=True) + >>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y) + >>> clf.score(X, y) + 0.9630... + """ + + _parameter_constraints: dict = { + **_BaseRidgeCV._parameter_constraints, + "class_weight": [dict, StrOptions({"balanced"}), None], + } + for param in ("gcv_mode", "alpha_per_target"): + _parameter_constraints.pop(param) + + def __init__( + self, + alphas=(0.1, 1.0, 10.0), + *, + fit_intercept=True, + scoring=None, + cv=None, + class_weight=None, + store_cv_values=False, + ): + super().__init__( + alphas=alphas, + fit_intercept=fit_intercept, + scoring=scoring, + cv=cv, + store_cv_values=store_cv_values, + ) + self.class_weight = class_weight + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge classifier with cv. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples + and `n_features` is the number of features. When using GCV, + will be cast to float64 if necessary. + + y : ndarray of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + # `RidgeClassifier` does not accept "sag" or "saga" solver and thus support + # csr, csc, and coo sparse matrices. By using solver="eigen" we force to accept + # all sparse format. + X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, solver="eigen") + + # If cv is None, gcv mode will be used and we used the binarized Y + # since y will not be binarized in _RidgeGCV estimator. + # If cv is not None, a GridSearchCV with some RidgeClassifier + # estimators are used where y will be binarized. Thus, we pass y + # instead of the binarized Y. + target = Y if self.cv is None else y + super().fit(X, target, sample_weight=sample_weight) + return self + + def _more_tags(self): + return { + "multilabel": True, + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + }, + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ff254ef00ed47cbfe789952c9ac89ec9d1cd1c8c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py new file mode 100644 index 0000000000000000000000000000000000000000..1826b0c83bb79ed324f326e014d216abfb8a817c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py @@ -0,0 +1,2605 @@ +# Authors: Peter Prettenhofer (main author) +# Mathieu Blondel (partial_fit support) +# +# License: BSD 3 clause +"""Classification, regression and One-Class SVM using Stochastic Gradient +Descent (SGD). +""" + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + BaseEstimator, + OutlierMixin, + RegressorMixin, + _fit_context, + clone, + is_classifier, +) +from ..exceptions import ConvergenceWarning +from ..model_selection import ShuffleSplit, StratifiedShuffleSplit +from ..utils import check_random_state, compute_class_weight, deprecated +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.extmath import safe_sparse_dot +from ..utils.metaestimators import available_if +from ..utils.multiclass import _check_partial_fit_first_call +from ..utils.parallel import Parallel, delayed +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._base import LinearClassifierMixin, SparseCoefMixin, make_dataset +from ._sgd_fast import ( + EpsilonInsensitive, + Hinge, + Huber, + Log, + ModifiedHuber, + SquaredEpsilonInsensitive, + SquaredHinge, + SquaredLoss, + _plain_sgd32, + _plain_sgd64, +) + +LEARNING_RATE_TYPES = { + "constant": 1, + "optimal": 2, + "invscaling": 3, + "adaptive": 4, + "pa1": 5, + "pa2": 6, +} + +PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3} + +DEFAULT_EPSILON = 0.1 +# Default value of ``epsilon`` parameter. + +MAX_INT = np.iinfo(np.int32).max + + +class _ValidationScoreCallback: + """Callback for early stopping based on validation score""" + + def __init__(self, estimator, X_val, y_val, sample_weight_val, classes=None): + self.estimator = clone(estimator) + self.estimator.t_ = 1 # to pass check_is_fitted + if classes is not None: + self.estimator.classes_ = classes + self.X_val = X_val + self.y_val = y_val + self.sample_weight_val = sample_weight_val + + def __call__(self, coef, intercept): + est = self.estimator + est.coef_ = coef.reshape(1, -1) + est.intercept_ = np.atleast_1d(intercept) + return est.score(self.X_val, self.y_val, self.sample_weight_val) + + +class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for SGD classification and regression.""" + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left"), None], + "shuffle": ["boolean"], + "verbose": ["verbose"], + "random_state": ["random_state"], + "warm_start": ["boolean"], + "average": [Interval(Integral, 0, None, closed="left"), bool, np.bool_], + } + + def __init__( + self, + loss, + *, + penalty="l2", + alpha=0.0001, + C=1.0, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=0.1, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=False, + average=False, + ): + self.loss = loss + self.penalty = penalty + self.learning_rate = learning_rate + self.epsilon = epsilon + self.alpha = alpha + self.C = C + self.l1_ratio = l1_ratio + self.fit_intercept = fit_intercept + self.shuffle = shuffle + self.random_state = random_state + self.verbose = verbose + self.eta0 = eta0 + self.power_t = power_t + self.early_stopping = early_stopping + self.validation_fraction = validation_fraction + self.n_iter_no_change = n_iter_no_change + self.warm_start = warm_start + self.average = average + self.max_iter = max_iter + self.tol = tol + + @abstractmethod + def fit(self, X, y): + """Fit model.""" + + def _more_validate_params(self, for_partial_fit=False): + """Validate input params.""" + if self.early_stopping and for_partial_fit: + raise ValueError("early_stopping should be False with partial_fit") + if ( + self.learning_rate in ("constant", "invscaling", "adaptive") + and self.eta0 <= 0.0 + ): + raise ValueError("eta0 must be > 0") + if self.learning_rate == "optimal" and self.alpha == 0: + raise ValueError( + "alpha must be > 0 since " + "learning_rate is 'optimal'. alpha is used " + "to compute the optimal learning rate." + ) + + # raises ValueError if not registered + self._get_penalty_type(self.penalty) + self._get_learning_rate_type(self.learning_rate) + + def _get_loss_function(self, loss): + """Get concrete ``LossFunction`` object for str ``loss``.""" + loss_ = self.loss_functions[loss] + loss_class, args = loss_[0], loss_[1:] + if loss in ("huber", "epsilon_insensitive", "squared_epsilon_insensitive"): + args = (self.epsilon,) + return loss_class(*args) + + def _get_learning_rate_type(self, learning_rate): + return LEARNING_RATE_TYPES[learning_rate] + + def _get_penalty_type(self, penalty): + penalty = str(penalty).lower() + return PENALTY_TYPES[penalty] + + def _allocate_parameter_mem( + self, + n_classes, + n_features, + input_dtype, + coef_init=None, + intercept_init=None, + one_class=0, + ): + """Allocate mem for parameters; initialize if provided.""" + if n_classes > 2: + # allocate coef_ for multi-class + if coef_init is not None: + coef_init = np.asarray(coef_init, dtype=input_dtype, order="C") + if coef_init.shape != (n_classes, n_features): + raise ValueError("Provided ``coef_`` does not match dataset. ") + self.coef_ = coef_init + else: + self.coef_ = np.zeros( + (n_classes, n_features), dtype=input_dtype, order="C" + ) + + # allocate intercept_ for multi-class + if intercept_init is not None: + intercept_init = np.asarray( + intercept_init, order="C", dtype=input_dtype + ) + if intercept_init.shape != (n_classes,): + raise ValueError("Provided intercept_init does not match dataset.") + self.intercept_ = intercept_init + else: + self.intercept_ = np.zeros(n_classes, dtype=input_dtype, order="C") + else: + # allocate coef_ + if coef_init is not None: + coef_init = np.asarray(coef_init, dtype=input_dtype, order="C") + coef_init = coef_init.ravel() + if coef_init.shape != (n_features,): + raise ValueError("Provided coef_init does not match dataset.") + self.coef_ = coef_init + else: + self.coef_ = np.zeros(n_features, dtype=input_dtype, order="C") + + # allocate intercept_ + if intercept_init is not None: + intercept_init = np.asarray(intercept_init, dtype=input_dtype) + if intercept_init.shape != (1,) and intercept_init.shape != (): + raise ValueError("Provided intercept_init does not match dataset.") + if one_class: + self.offset_ = intercept_init.reshape( + 1, + ) + else: + self.intercept_ = intercept_init.reshape( + 1, + ) + else: + if one_class: + self.offset_ = np.zeros(1, dtype=input_dtype, order="C") + else: + self.intercept_ = np.zeros(1, dtype=input_dtype, order="C") + + # initialize average parameters + if self.average > 0: + self._standard_coef = self.coef_ + self._average_coef = np.zeros( + self.coef_.shape, dtype=input_dtype, order="C" + ) + if one_class: + self._standard_intercept = 1 - self.offset_ + else: + self._standard_intercept = self.intercept_ + + self._average_intercept = np.zeros( + self._standard_intercept.shape, dtype=input_dtype, order="C" + ) + + def _make_validation_split(self, y, sample_mask): + """Split the dataset between training set and validation set. + + Parameters + ---------- + y : ndarray of shape (n_samples, ) + Target values. + + sample_mask : ndarray of shape (n_samples, ) + A boolean array indicating whether each sample should be included + for validation set. + + Returns + ------- + validation_mask : ndarray of shape (n_samples, ) + Equal to True on the validation set, False on the training set. + """ + n_samples = y.shape[0] + validation_mask = np.zeros(n_samples, dtype=np.bool_) + if not self.early_stopping: + # use the full set for training, with an empty validation set + return validation_mask + + if is_classifier(self): + splitter_type = StratifiedShuffleSplit + else: + splitter_type = ShuffleSplit + cv = splitter_type( + test_size=self.validation_fraction, random_state=self.random_state + ) + idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y)) + + if not np.any(sample_mask[idx_val]): + raise ValueError( + "The sample weights for validation set are all zero, consider using a" + " different random state." + ) + + if idx_train.shape[0] == 0 or idx_val.shape[0] == 0: + raise ValueError( + "Splitting %d samples into a train set and a validation set " + "with validation_fraction=%r led to an empty set (%d and %d " + "samples). Please either change validation_fraction, increase " + "number of samples, or disable early_stopping." + % ( + n_samples, + self.validation_fraction, + idx_train.shape[0], + idx_val.shape[0], + ) + ) + + validation_mask[idx_val] = True + return validation_mask + + def _make_validation_score_cb( + self, validation_mask, X, y, sample_weight, classes=None + ): + if not self.early_stopping: + return None + + return _ValidationScoreCallback( + self, + X[validation_mask], + y[validation_mask], + sample_weight[validation_mask], + classes=classes, + ) + + # TODO(1.6): Remove + # mypy error: Decorated property not supported + @deprecated( # type: ignore + "Attribute `loss_function_` was deprecated in version 1.4 and will be removed " + "in 1.6." + ) + @property + def loss_function_(self): + return self._loss_function_ + + +def _prepare_fit_binary(est, y, i, input_dtye): + """Initialization for fit_binary. + + Returns y, coef, intercept, average_coef, average_intercept. + """ + y_i = np.ones(y.shape, dtype=input_dtye, order="C") + y_i[y != est.classes_[i]] = -1.0 + average_intercept = 0 + average_coef = None + + if len(est.classes_) == 2: + if not est.average: + coef = est.coef_.ravel() + intercept = est.intercept_[0] + else: + coef = est._standard_coef.ravel() + intercept = est._standard_intercept[0] + average_coef = est._average_coef.ravel() + average_intercept = est._average_intercept[0] + else: + if not est.average: + coef = est.coef_[i] + intercept = est.intercept_[i] + else: + coef = est._standard_coef[i] + intercept = est._standard_intercept[i] + average_coef = est._average_coef[i] + average_intercept = est._average_intercept[i] + + return y_i, coef, intercept, average_coef, average_intercept + + +def fit_binary( + est, + i, + X, + y, + alpha, + C, + learning_rate, + max_iter, + pos_weight, + neg_weight, + sample_weight, + validation_mask=None, + random_state=None, +): + """Fit a single binary classifier. + + The i'th class is considered the "positive" class. + + Parameters + ---------- + est : Estimator object + The estimator to fit + + i : int + Index of the positive class + + X : numpy array or sparse matrix of shape [n_samples,n_features] + Training data + + y : numpy array of shape [n_samples, ] + Target values + + alpha : float + The regularization parameter + + C : float + Maximum step size for passive aggressive + + learning_rate : str + The learning rate. Accepted values are 'constant', 'optimal', + 'invscaling', 'pa1' and 'pa2'. + + max_iter : int + The maximum number of iterations (epochs) + + pos_weight : float + The weight of the positive class + + neg_weight : float + The weight of the negative class + + sample_weight : numpy array of shape [n_samples, ] + The weight of each sample + + validation_mask : numpy array of shape [n_samples, ], default=None + Precomputed validation mask in case _fit_binary is called in the + context of a one-vs-rest reduction. + + random_state : int, RandomState instance, default=None + If int, random_state is the seed used by the random number generator; + If RandomState instance, random_state is the random number generator; + If None, the random number generator is the RandomState instance used + by `np.random`. + """ + # if average is not true, average_coef, and average_intercept will be + # unused + y_i, coef, intercept, average_coef, average_intercept = _prepare_fit_binary( + est, y, i, input_dtye=X.dtype + ) + assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0] + + random_state = check_random_state(random_state) + dataset, intercept_decay = make_dataset( + X, y_i, sample_weight, random_state=random_state + ) + + penalty_type = est._get_penalty_type(est.penalty) + learning_rate_type = est._get_learning_rate_type(learning_rate) + + if validation_mask is None: + validation_mask = est._make_validation_split(y_i, sample_mask=sample_weight > 0) + classes = np.array([-1, 1], dtype=y_i.dtype) + validation_score_cb = est._make_validation_score_cb( + validation_mask, X, y_i, sample_weight, classes=classes + ) + + # numpy mtrand expects a C long which is a signed 32 bit integer under + # Windows + seed = random_state.randint(MAX_INT) + + tol = est.tol if est.tol is not None else -np.inf + + _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype) + coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd( + coef, + intercept, + average_coef, + average_intercept, + est._loss_function_, + penalty_type, + alpha, + C, + est.l1_ratio, + dataset, + validation_mask, + est.early_stopping, + validation_score_cb, + int(est.n_iter_no_change), + max_iter, + tol, + int(est.fit_intercept), + int(est.verbose), + int(est.shuffle), + seed, + pos_weight, + neg_weight, + learning_rate_type, + est.eta0, + est.power_t, + 0, + est.t_, + intercept_decay, + est.average, + ) + + if est.average: + if len(est.classes_) == 2: + est._average_intercept[0] = average_intercept + else: + est._average_intercept[i] = average_intercept + + return coef, intercept, n_iter_ + + +def _get_plain_sgd_function(input_dtype): + return _plain_sgd32 if input_dtype == np.float32 else _plain_sgd64 + + +class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta): + loss_functions = { + "hinge": (Hinge, 1.0), + "squared_hinge": (SquaredHinge, 1.0), + "perceptron": (Hinge, 0.0), + "log_loss": (Log,), + "modified_huber": (ModifiedHuber,), + "squared_error": (SquaredLoss,), + "huber": (Huber, DEFAULT_EPSILON), + "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), + "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), + } + + _parameter_constraints: dict = { + **BaseSGD._parameter_constraints, + "loss": [StrOptions(set(loss_functions))], + "early_stopping": ["boolean"], + "validation_fraction": [Interval(Real, 0, 1, closed="neither")], + "n_iter_no_change": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "class_weight": [StrOptions({"balanced"}), dict, None], + } + + @abstractmethod + def __init__( + self, + loss="hinge", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + n_jobs=None, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + class_weight=None, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + warm_start=warm_start, + average=average, + ) + self.class_weight = class_weight + self.n_jobs = n_jobs + + def _partial_fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + max_iter, + classes, + sample_weight, + coef_init, + intercept_init, + ): + first_call = not hasattr(self, "classes_") + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + reset=first_call, + ) + + n_samples, n_features = X.shape + + _check_partial_fit_first_call(self, classes) + + n_classes = self.classes_.shape[0] + + # Allocate datastructures from input arguments + self._expanded_class_weight = compute_class_weight( + self.class_weight, classes=self.classes_, y=y + ) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if getattr(self, "coef_", None) is None or coef_init is not None: + self._allocate_parameter_mem( + n_classes=n_classes, + n_features=n_features, + input_dtype=X.dtype, + coef_init=coef_init, + intercept_init=intercept_init, + ) + elif n_features != self.coef_.shape[-1]: + raise ValueError( + "Number of features %d does not match previous data %d." + % (n_features, self.coef_.shape[-1]) + ) + + self._loss_function_ = self._get_loss_function(loss) + if not hasattr(self, "t_"): + self.t_ = 1.0 + + # delegate to concrete training procedure + if n_classes > 2: + self._fit_multiclass( + X, + y, + alpha=alpha, + C=C, + learning_rate=learning_rate, + sample_weight=sample_weight, + max_iter=max_iter, + ) + elif n_classes == 2: + self._fit_binary( + X, + y, + alpha=alpha, + C=C, + learning_rate=learning_rate, + sample_weight=sample_weight, + max_iter=max_iter, + ) + else: + raise ValueError( + "The number of classes has to be greater than one; got %d class" + % n_classes + ) + + return self + + def _fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + coef_init=None, + intercept_init=None, + sample_weight=None, + ): + if hasattr(self, "classes_"): + # delete the attribute otherwise _partial_fit thinks it's not the first call + delattr(self, "classes_") + + # labels can be encoded as float, int, or string literals + # np.unique sorts in asc order; largest class id is positive class + y = self._validate_data(y=y) + classes = np.unique(y) + + if self.warm_start and hasattr(self, "coef_"): + if coef_init is None: + coef_init = self.coef_ + if intercept_init is None: + intercept_init = self.intercept_ + else: + self.coef_ = None + self.intercept_ = None + + if self.average > 0: + self._standard_coef = self.coef_ + self._standard_intercept = self.intercept_ + self._average_coef = None + self._average_intercept = None + + # Clear iteration count for multiple call to fit. + self.t_ = 1.0 + + self._partial_fit( + X, + y, + alpha, + C, + loss, + learning_rate, + self.max_iter, + classes, + sample_weight, + coef_init, + intercept_init, + ) + + if ( + self.tol is not None + and self.tol > -np.inf + and self.n_iter_ == self.max_iter + ): + warnings.warn( + ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ), + ConvergenceWarning, + ) + return self + + def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter): + """Fit a binary classifier on X and y.""" + coef, intercept, n_iter_ = fit_binary( + self, + 1, + X, + y, + alpha, + C, + learning_rate, + max_iter, + self._expanded_class_weight[1], + self._expanded_class_weight[0], + sample_weight, + random_state=self.random_state, + ) + + self.t_ += n_iter_ * X.shape[0] + self.n_iter_ = n_iter_ + + # need to be 2d + if self.average > 0: + if self.average <= self.t_ - 1: + self.coef_ = self._average_coef.reshape(1, -1) + self.intercept_ = self._average_intercept + else: + self.coef_ = self._standard_coef.reshape(1, -1) + self._standard_intercept = np.atleast_1d(intercept) + self.intercept_ = self._standard_intercept + else: + self.coef_ = coef.reshape(1, -1) + # intercept is a float, need to convert it to an array of length 1 + self.intercept_ = np.atleast_1d(intercept) + + def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter): + """Fit a multi-class classifier by combining binary classifiers + + Each binary classifier predicts one class versus all others. This + strategy is called OvA (One versus All) or OvR (One versus Rest). + """ + # Precompute the validation split using the multiclass labels + # to ensure proper balancing of the classes. + validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0) + + # Use joblib to fit OvA in parallel. + # Pick the random seed for each job outside of fit_binary to avoid + # sharing the estimator random state between threads which could lead + # to non-deterministic behavior + random_state = check_random_state(self.random_state) + seeds = random_state.randint(MAX_INT, size=len(self.classes_)) + result = Parallel( + n_jobs=self.n_jobs, verbose=self.verbose, require="sharedmem" + )( + delayed(fit_binary)( + self, + i, + X, + y, + alpha, + C, + learning_rate, + max_iter, + self._expanded_class_weight[i], + 1.0, + sample_weight, + validation_mask=validation_mask, + random_state=seed, + ) + for i, seed in enumerate(seeds) + ) + + # take the maximum of n_iter_ over every binary fit + n_iter_ = 0.0 + for i, (_, intercept, n_iter_i) in enumerate(result): + self.intercept_[i] = intercept + n_iter_ = max(n_iter_, n_iter_i) + + self.t_ += n_iter_ * X.shape[0] + self.n_iter_ = n_iter_ + + if self.average > 0: + if self.average <= self.t_ - 1.0: + self.coef_ = self._average_coef + self.intercept_ = self._average_intercept + else: + self.coef_ = self._standard_coef + self._standard_intercept = np.atleast_1d(self.intercept_) + self.intercept_ = self._standard_intercept + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None, sample_weight=None): + """Perform one epoch of stochastic gradient descent on given samples. + + Internally, this method uses ``max_iter = 1``. Therefore, it is not + guaranteed that a minimum of the cost function is reached after calling + it once. Matters such as objective convergence, early stopping, and + learning rate adjustments should be handled by the user. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Subset of the training data. + + y : ndarray of shape (n_samples,) + Subset of the target values. + + classes : ndarray of shape (n_classes,), default=None + Classes across all calls to partial_fit. + Can be obtained by via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is required for the first call to partial_fit + and can be omitted in the subsequent calls. + Note that y doesn't need to contain all labels in `classes`. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples. + If not provided, uniform weights are assumed. + + Returns + ------- + self : object + Returns an instance of self. + """ + if not hasattr(self, "classes_"): + self._more_validate_params(for_partial_fit=True) + + if self.class_weight == "balanced": + raise ValueError( + "class_weight '{0}' is not supported for " + "partial_fit. In order to use 'balanced' weights," + " use compute_class_weight('{0}', " + "classes=classes, y=y). " + "In place of y you can use a large enough sample " + "of the full training set target to properly " + "estimate the class frequency distributions. " + "Pass the resulting weights as the class_weight " + "parameter.".format(self.class_weight) + ) + + return self._partial_fit( + X, + y, + alpha=self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + max_iter=1, + classes=classes, + sample_weight=sample_weight, + coef_init=None, + intercept_init=None, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): + """Fit linear model with Stochastic Gradient Descent. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + coef_init : ndarray of shape (n_classes, n_features), default=None + The initial coefficients to warm-start the optimization. + + intercept_init : ndarray of shape (n_classes,), default=None + The initial intercept to warm-start the optimization. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples. + If not provided, uniform weights are assumed. These weights will + be multiplied with class_weight (passed through the + constructor) if class_weight is specified. + + Returns + ------- + self : object + Returns an instance of self. + """ + self._more_validate_params() + + return self._fit( + X, + y, + alpha=self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + coef_init=coef_init, + intercept_init=intercept_init, + sample_weight=sample_weight, + ) + + +class SGDClassifier(BaseSGDClassifier): + """Linear classifiers (SVM, logistic regression, etc.) with SGD training. + + This estimator implements regularized linear models with stochastic + gradient descent (SGD) learning: the gradient of the loss is estimated + each sample at a time and the model is updated along the way with a + decreasing strength schedule (aka learning rate). SGD allows minibatch + (online/out-of-core) learning via the `partial_fit` method. + For best results using the default learning rate schedule, the data should + have zero mean and unit variance. + + This implementation works with data represented as dense or sparse arrays + of floating point values for the features. The model it fits can be + controlled with the loss parameter; by default, it fits a linear support + vector machine (SVM). + + The regularizer is a penalty added to the loss function that shrinks model + parameters towards the zero vector using either the squared euclidean norm + L2 or the absolute norm L1 or a combination of both (Elastic Net). If the + parameter update crosses the 0.0 value because of the regularizer, the + update is truncated to 0.0 to allow for learning sparse models and achieve + online feature selection. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + loss : {'hinge', 'log_loss', 'modified_huber', 'squared_hinge',\ + 'perceptron', 'squared_error', 'huber', 'epsilon_insensitive',\ + 'squared_epsilon_insensitive'}, default='hinge' + The loss function to be used. + + - 'hinge' gives a linear SVM. + - 'log_loss' gives logistic regression, a probabilistic classifier. + - 'modified_huber' is another smooth loss that brings tolerance to + outliers as well as probability estimates. + - 'squared_hinge' is like hinge but is quadratically penalized. + - 'perceptron' is the linear loss used by the perceptron algorithm. + - The other losses, 'squared_error', 'huber', 'epsilon_insensitive' and + 'squared_epsilon_insensitive' are designed for regression but can be useful + in classification as well; see + :class:`~sklearn.linear_model.SGDRegressor` for a description. + + More details about the losses formulas can be found in the + :ref:`User Guide `. + + penalty : {'l2', 'l1', 'elasticnet', None}, default='l2' + The penalty (aka regularization term) to be used. Defaults to 'l2' + which is the standard regularizer for linear SVM models. 'l1' and + 'elasticnet' might bring sparsity to the model (feature selection) + not achievable with 'l2'. No penalty is added when set to `None`. + + alpha : float, default=0.0001 + Constant that multiplies the regularization term. The higher the + value, the stronger the regularization. Also used to compute the + learning rate when `learning_rate` is set to 'optimal'. + Values must be in the range `[0.0, inf)`. + + l1_ratio : float, default=0.15 + The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. + l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. + Only used if `penalty` is 'elasticnet'. + Values must be in the range `[0.0, 1.0]`. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`partial_fit` method. + Values must be in the range `[1, inf)`. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, training will stop + when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive + epochs. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Values must be in the range `[0.0, inf)`. + + .. versionadded:: 0.19 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + Values must be in the range `[0, inf)`. + + epsilon : float, default=0.1 + Epsilon in the epsilon-insensitive loss functions; only if `loss` is + 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. + For 'huber', determines the threshold at which it becomes less + important to get the prediction exactly right. + For epsilon-insensitive, any differences between the current prediction + and the correct label are ignored if they are less than this threshold. + Values must be in the range `[0.0, inf)`. + + n_jobs : int, default=None + The number of CPUs to use to do the OVA (One Versus All, for + multi-class problems) computation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + Used for shuffling the data, when ``shuffle`` is set to ``True``. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + Integer values must be in the range `[0, 2**32 - 1]`. + + learning_rate : str, default='optimal' + The learning rate schedule: + + - 'constant': `eta = eta0` + - 'optimal': `eta = 1.0 / (alpha * (t + t0))` + where `t0` is chosen by a heuristic proposed by Leon Bottou. + - 'invscaling': `eta = eta0 / pow(t, power_t)` + - 'adaptive': `eta = eta0`, as long as the training keeps decreasing. + Each time n_iter_no_change consecutive epochs fail to decrease the + training loss by tol or fail to increase validation score by tol if + `early_stopping` is `True`, the current learning rate is divided by 5. + + .. versionadded:: 0.20 + Added 'adaptive' option + + eta0 : float, default=0.0 + The initial learning rate for the 'constant', 'invscaling' or + 'adaptive' schedules. The default value is 0.0 as eta0 is not used by + the default schedule 'optimal'. + Values must be in the range `[0.0, inf)`. + + power_t : float, default=0.5 + The exponent for inverse scaling learning rate. + Values must be in the range `(-inf, inf)`. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to `True`, it will automatically set aside + a stratified fraction of training data as validation and terminate + training when validation score returned by the `score` method is not + improving by at least tol for n_iter_no_change consecutive epochs. + + .. versionadded:: 0.20 + Added 'early_stopping' option + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if `early_stopping` is True. + Values must be in the range `(0.0, 1.0)`. + + .. versionadded:: 0.20 + Added 'validation_fraction' option + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before stopping + fitting. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Integer values must be in the range `[1, max_iter)`. + + .. versionadded:: 0.20 + Added 'n_iter_no_change' option + + class_weight : dict, {class_label: weight} or "balanced", default=None + Preset for the class_weight fit parameter. + + Weights associated with classes. If not given, all classes + are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + If a dynamic learning rate is used, the learning rate is adapted + depending on the number of samples already seen. Calling ``fit`` resets + this counter, while ``partial_fit`` will result in increasing the + existing counter. + + average : bool or int, default=False + When set to `True`, computes the averaged SGD weights across all + updates and stores the result in the ``coef_`` attribute. If set to + an int greater than 1, averaging will begin once the total number of + samples seen reaches `average`. So ``average=10`` will begin + averaging after seeing 10 samples. + Integer values must be in the range `[1, n_samples]`. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ + (n_classes, n_features) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) + Constants in decision function. + + n_iter_ : int + The actual number of iterations before reaching the stopping criterion. + For multiclass fits, it is the maximum over every binary fit. + + loss_function_ : concrete ``LossFunction`` + + .. deprecated:: 1.4 + Attribute `loss_function_` was deprecated in version 1.4 and will be + removed in 1.6. + + classes_ : array of shape (n_classes,) + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.svm.LinearSVC : Linear support vector classification. + LogisticRegression : Logistic regression. + Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to + ``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", + penalty=None)``. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import SGDClassifier + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.pipeline import make_pipeline + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> Y = np.array([1, 1, 2, 2]) + >>> # Always scale the input. The most convenient way is to use a pipeline. + >>> clf = make_pipeline(StandardScaler(), + ... SGDClassifier(max_iter=1000, tol=1e-3)) + >>> clf.fit(X, Y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('sgdclassifier', SGDClassifier())]) + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + **BaseSGDClassifier._parameter_constraints, + "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "power_t": [Interval(Real, None, None, closed="neither")], + "epsilon": [Interval(Real, 0, None, closed="left")], + "learning_rate": [ + StrOptions({"constant", "optimal", "invscaling", "adaptive"}), + Hidden(StrOptions({"pa1", "pa2"})), + ], + "eta0": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + loss="hinge", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + n_jobs=None, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + class_weight=None, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + n_jobs=n_jobs, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + class_weight=class_weight, + warm_start=warm_start, + average=average, + ) + + def _check_proba(self): + if self.loss not in ("log_loss", "modified_huber"): + raise AttributeError( + "probability estimates are not available for loss=%r" % self.loss + ) + return True + + @available_if(_check_proba) + def predict_proba(self, X): + """Probability estimates. + + This method is only available for log loss and modified Huber loss. + + Multiclass probability estimates are derived from binary (one-vs.-rest) + estimates by simple normalization, as recommended by Zadrozny and + Elkan. + + Binary probability estimates for loss="modified_huber" are given by + (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions + it is necessary to perform proper probability calibration by wrapping + the classifier with + :class:`~sklearn.calibration.CalibratedClassifierCV` instead. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Input data for prediction. + + Returns + ------- + ndarray of shape (n_samples, n_classes) + Returns the probability of the sample for each class in the model, + where classes are ordered as they are in `self.classes_`. + + References + ---------- + Zadrozny and Elkan, "Transforming classifier scores into multiclass + probability estimates", SIGKDD'02, + https://dl.acm.org/doi/pdf/10.1145/775047.775151 + + The justification for the formula in the loss="modified_huber" + case is in the appendix B in: + http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf + """ + check_is_fitted(self) + + if self.loss == "log_loss": + return self._predict_proba_lr(X) + + elif self.loss == "modified_huber": + binary = len(self.classes_) == 2 + scores = self.decision_function(X) + + if binary: + prob2 = np.ones((scores.shape[0], 2)) + prob = prob2[:, 1] + else: + prob = scores + + np.clip(scores, -1, 1, prob) + prob += 1.0 + prob /= 2.0 + + if binary: + prob2[:, 0] -= prob + prob = prob2 + else: + # the above might assign zero to all classes, which doesn't + # normalize neatly; work around this to produce uniform + # probabilities + prob_sum = prob.sum(axis=1) + all_zero = prob_sum == 0 + if np.any(all_zero): + prob[all_zero, :] = 1 + prob_sum[all_zero] = len(self.classes_) + + # normalize + prob /= prob_sum.reshape((prob.shape[0], -1)) + + return prob + + else: + raise NotImplementedError( + "predict_(log_)proba only supported when" + " loss='log_loss' or loss='modified_huber' " + "(%r given)" + % self.loss + ) + + @available_if(_check_proba) + def predict_log_proba(self, X): + """Log of probability estimates. + + This method is only available for log loss and modified Huber loss. + + When loss="modified_huber", probability estimates may be hard zeros + and ones, so taking the logarithm is not possible. + + See ``predict_proba`` for details. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data for prediction. + + Returns + ------- + T : array-like, shape (n_samples, n_classes) + Returns the log-probability of the sample for each class in the + model, where classes are ordered as they are in + `self.classes_`. + """ + return np.log(self.predict_proba(X)) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + }, + "preserves_dtype": [np.float64, np.float32], + } + + +class BaseSGDRegressor(RegressorMixin, BaseSGD): + loss_functions = { + "squared_error": (SquaredLoss,), + "huber": (Huber, DEFAULT_EPSILON), + "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), + "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), + } + + _parameter_constraints: dict = { + **BaseSGD._parameter_constraints, + "loss": [StrOptions(set(loss_functions))], + "early_stopping": ["boolean"], + "validation_fraction": [Interval(Real, 0, 1, closed="neither")], + "n_iter_no_change": [Interval(Integral, 1, None, closed="left")], + } + + @abstractmethod + def __init__( + self, + loss="squared_error", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + random_state=None, + learning_rate="invscaling", + eta0=0.01, + power_t=0.25, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + warm_start=warm_start, + average=average, + ) + + def _partial_fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + max_iter, + sample_weight, + coef_init, + intercept_init, + ): + first_call = getattr(self, "coef_", None) is None + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + copy=False, + order="C", + dtype=[np.float64, np.float32], + accept_large_sparse=False, + reset=first_call, + ) + y = y.astype(X.dtype, copy=False) + + n_samples, n_features = X.shape + + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # Allocate datastructures from input arguments + if first_call: + self._allocate_parameter_mem( + n_classes=1, + n_features=n_features, + input_dtype=X.dtype, + coef_init=coef_init, + intercept_init=intercept_init, + ) + if self.average > 0 and getattr(self, "_average_coef", None) is None: + self._average_coef = np.zeros(n_features, dtype=X.dtype, order="C") + self._average_intercept = np.zeros(1, dtype=X.dtype, order="C") + + self._fit_regressor( + X, y, alpha, C, loss, learning_rate, sample_weight, max_iter + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, sample_weight=None): + """Perform one epoch of stochastic gradient descent on given samples. + + Internally, this method uses ``max_iter = 1``. Therefore, it is not + guaranteed that a minimum of the cost function is reached after calling + it once. Matters such as objective convergence and early stopping + should be handled by the user. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Subset of training data. + + y : numpy array of shape (n_samples,) + Subset of target values. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples. + If not provided, uniform weights are assumed. + + Returns + ------- + self : object + Returns an instance of self. + """ + if not hasattr(self, "coef_"): + self._more_validate_params(for_partial_fit=True) + + return self._partial_fit( + X, + y, + self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + max_iter=1, + sample_weight=sample_weight, + coef_init=None, + intercept_init=None, + ) + + def _fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + coef_init=None, + intercept_init=None, + sample_weight=None, + ): + if self.warm_start and getattr(self, "coef_", None) is not None: + if coef_init is None: + coef_init = self.coef_ + if intercept_init is None: + intercept_init = self.intercept_ + else: + self.coef_ = None + self.intercept_ = None + + # Clear iteration count for multiple call to fit. + self.t_ = 1.0 + + self._partial_fit( + X, + y, + alpha, + C, + loss, + learning_rate, + self.max_iter, + sample_weight, + coef_init, + intercept_init, + ) + + if ( + self.tol is not None + and self.tol > -np.inf + and self.n_iter_ == self.max_iter + ): + warnings.warn( + ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ), + ConvergenceWarning, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): + """Fit linear model with Stochastic Gradient Descent. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + coef_init : ndarray of shape (n_features,), default=None + The initial coefficients to warm-start the optimization. + + intercept_init : ndarray of shape (1,), default=None + The initial intercept to warm-start the optimization. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Fitted `SGDRegressor` estimator. + """ + self._more_validate_params() + + return self._fit( + X, + y, + alpha=self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + coef_init=coef_init, + intercept_init=intercept_init, + sample_weight=sample_weight, + ) + + def _decision_function(self, X): + """Predict using the linear model + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + + Returns + ------- + ndarray of shape (n_samples,) + Predicted target values per element in X. + """ + check_is_fitted(self) + + X = self._validate_data(X, accept_sparse="csr", reset=False) + + scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ + return scores.ravel() + + def predict(self, X): + """Predict using the linear model. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Input data. + + Returns + ------- + ndarray of shape (n_samples,) + Predicted target values per element in X. + """ + return self._decision_function(X) + + def _fit_regressor( + self, X, y, alpha, C, loss, learning_rate, sample_weight, max_iter + ): + loss_function = self._get_loss_function(loss) + penalty_type = self._get_penalty_type(self.penalty) + learning_rate_type = self._get_learning_rate_type(learning_rate) + + if not hasattr(self, "t_"): + self.t_ = 1.0 + + validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0) + validation_score_cb = self._make_validation_score_cb( + validation_mask, X, y, sample_weight + ) + + random_state = check_random_state(self.random_state) + # numpy mtrand expects a C long which is a signed 32 bit integer under + # Windows + seed = random_state.randint(0, MAX_INT) + + dataset, intercept_decay = make_dataset( + X, y, sample_weight, random_state=random_state + ) + + tol = self.tol if self.tol is not None else -np.inf + + if self.average: + coef = self._standard_coef + intercept = self._standard_intercept + average_coef = self._average_coef + average_intercept = self._average_intercept + else: + coef = self.coef_ + intercept = self.intercept_ + average_coef = None # Not used + average_intercept = [0] # Not used + + _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype) + coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd( + coef, + intercept[0], + average_coef, + average_intercept[0], + loss_function, + penalty_type, + alpha, + C, + self.l1_ratio, + dataset, + validation_mask, + self.early_stopping, + validation_score_cb, + int(self.n_iter_no_change), + max_iter, + tol, + int(self.fit_intercept), + int(self.verbose), + int(self.shuffle), + seed, + 1.0, + 1.0, + learning_rate_type, + self.eta0, + self.power_t, + 0, + self.t_, + intercept_decay, + self.average, + ) + + self.t_ += self.n_iter_ * X.shape[0] + + if self.average > 0: + self._average_intercept = np.atleast_1d(average_intercept) + self._standard_intercept = np.atleast_1d(intercept) + + if self.average <= self.t_ - 1.0: + # made enough updates for averaging to be taken into account + self.coef_ = average_coef + self.intercept_ = np.atleast_1d(average_intercept) + else: + self.coef_ = coef + self.intercept_ = np.atleast_1d(intercept) + + else: + self.intercept_ = np.atleast_1d(intercept) + + +class SGDRegressor(BaseSGDRegressor): + """Linear model fitted by minimizing a regularized empirical loss with SGD. + + SGD stands for Stochastic Gradient Descent: the gradient of the loss is + estimated each sample at a time and the model is updated along the way with + a decreasing strength schedule (aka learning rate). + + The regularizer is a penalty added to the loss function that shrinks model + parameters towards the zero vector using either the squared euclidean norm + L2 or the absolute norm L1 or a combination of both (Elastic Net). If the + parameter update crosses the 0.0 value because of the regularizer, the + update is truncated to 0.0 to allow for learning sparse models and achieve + online feature selection. + + This implementation works with data represented as dense numpy arrays of + floating point values for the features. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + loss : str, default='squared_error' + The loss function to be used. The possible values are 'squared_error', + 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive' + + The 'squared_error' refers to the ordinary least squares fit. + 'huber' modifies 'squared_error' to focus less on getting outliers + correct by switching from squared to linear loss past a distance of + epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is + linear past that; this is the loss function used in SVR. + 'squared_epsilon_insensitive' is the same but becomes squared loss past + a tolerance of epsilon. + + More details about the losses formulas can be found in the + :ref:`User Guide `. + + penalty : {'l2', 'l1', 'elasticnet', None}, default='l2' + The penalty (aka regularization term) to be used. Defaults to 'l2' + which is the standard regularizer for linear SVM models. 'l1' and + 'elasticnet' might bring sparsity to the model (feature selection) + not achievable with 'l2'. No penalty is added when set to `None`. + + alpha : float, default=0.0001 + Constant that multiplies the regularization term. The higher the + value, the stronger the regularization. Also used to compute the + learning rate when `learning_rate` is set to 'optimal'. + Values must be in the range `[0.0, inf)`. + + l1_ratio : float, default=0.15 + The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. + l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. + Only used if `penalty` is 'elasticnet'. + Values must be in the range `[0.0, 1.0]`. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`partial_fit` method. + Values must be in the range `[1, inf)`. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, training will stop + when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive + epochs. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Values must be in the range `[0.0, inf)`. + + .. versionadded:: 0.19 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + Values must be in the range `[0, inf)`. + + epsilon : float, default=0.1 + Epsilon in the epsilon-insensitive loss functions; only if `loss` is + 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. + For 'huber', determines the threshold at which it becomes less + important to get the prediction exactly right. + For epsilon-insensitive, any differences between the current prediction + and the correct label are ignored if they are less than this threshold. + Values must be in the range `[0.0, inf)`. + + random_state : int, RandomState instance, default=None + Used for shuffling the data, when ``shuffle`` is set to ``True``. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + learning_rate : str, default='invscaling' + The learning rate schedule: + + - 'constant': `eta = eta0` + - 'optimal': `eta = 1.0 / (alpha * (t + t0))` + where t0 is chosen by a heuristic proposed by Leon Bottou. + - 'invscaling': `eta = eta0 / pow(t, power_t)` + - 'adaptive': eta = eta0, as long as the training keeps decreasing. + Each time n_iter_no_change consecutive epochs fail to decrease the + training loss by tol or fail to increase validation score by tol if + early_stopping is True, the current learning rate is divided by 5. + + .. versionadded:: 0.20 + Added 'adaptive' option + + eta0 : float, default=0.01 + The initial learning rate for the 'constant', 'invscaling' or + 'adaptive' schedules. The default value is 0.01. + Values must be in the range `[0.0, inf)`. + + power_t : float, default=0.25 + The exponent for inverse scaling learning rate. + Values must be in the range `(-inf, inf)`. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to True, it will automatically set aside + a fraction of training data as validation and terminate + training when validation score returned by the `score` method is not + improving by at least `tol` for `n_iter_no_change` consecutive + epochs. + + .. versionadded:: 0.20 + Added 'early_stopping' option + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if `early_stopping` is True. + Values must be in the range `(0.0, 1.0)`. + + .. versionadded:: 0.20 + Added 'validation_fraction' option + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before stopping + fitting. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Integer values must be in the range `[1, max_iter)`. + + .. versionadded:: 0.20 + Added 'n_iter_no_change' option + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + If a dynamic learning rate is used, the learning rate is adapted + depending on the number of samples already seen. Calling ``fit`` resets + this counter, while ``partial_fit`` will result in increasing the + existing counter. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights across all + updates and stores the result in the ``coef_`` attribute. If set to + an int greater than 1, averaging will begin once the total number of + samples seen reaches `average`. So ``average=10`` will begin + averaging after seeing 10 samples. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) + The intercept term. + + n_iter_ : int + The actual number of iterations before reaching the stopping criterion. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + HuberRegressor : Linear regression model that is robust to outliers. + Lars : Least Angle Regression model. + Lasso : Linear Model trained with L1 prior as regularizer. + RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm. + Ridge : Linear least squares with l2 regularization. + sklearn.svm.SVR : Epsilon-Support Vector Regression. + TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import SGDRegressor + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> # Always scale the input. The most convenient way is to use a pipeline. + >>> reg = make_pipeline(StandardScaler(), + ... SGDRegressor(max_iter=1000, tol=1e-3)) + >>> reg.fit(X, y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('sgdregressor', SGDRegressor())]) + """ + + _parameter_constraints: dict = { + **BaseSGDRegressor._parameter_constraints, + "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "power_t": [Interval(Real, None, None, closed="neither")], + "learning_rate": [ + StrOptions({"constant", "optimal", "invscaling", "adaptive"}), + Hidden(StrOptions({"pa1", "pa2"})), + ], + "epsilon": [Interval(Real, 0, None, closed="left")], + "eta0": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + loss="squared_error", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + random_state=None, + learning_rate="invscaling", + eta0=0.01, + power_t=0.25, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + warm_start=warm_start, + average=average, + ) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + }, + "preserves_dtype": [np.float64, np.float32], + } + + +class SGDOneClassSVM(BaseSGD, OutlierMixin): + """Solves linear One-Class SVM using Stochastic Gradient Descent. + + This implementation is meant to be used with a kernel approximation + technique (e.g. `sklearn.kernel_approximation.Nystroem`) to obtain results + similar to `sklearn.svm.OneClassSVM` which uses a Gaussian kernel by + default. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + nu : float, default=0.5 + The nu parameter of the One Class SVM: an upper bound on the + fraction of training errors and a lower bound of the fraction of + support vectors. Should be in the interval (0, 1]. By default 0.5 + will be taken. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. Defaults to True. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + `partial_fit`. Defaults to 1000. + Values must be in the range `[1, inf)`. + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). Defaults to 1e-3. + Values must be in the range `[0.0, inf)`. + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + Defaults to True. + + verbose : int, default=0 + The verbosity level. + + random_state : int, RandomState instance or None, default=None + The seed of the pseudo random number generator to use when shuffling + the data. If int, random_state is the seed used by the random number + generator; If RandomState instance, random_state is the random number + generator; If None, the random number generator is the RandomState + instance used by `np.random`. + + learning_rate : {'constant', 'optimal', 'invscaling', 'adaptive'}, default='optimal' + The learning rate schedule to use with `fit`. (If using `partial_fit`, + learning rate must be controlled directly). + + - 'constant': `eta = eta0` + - 'optimal': `eta = 1.0 / (alpha * (t + t0))` + where t0 is chosen by a heuristic proposed by Leon Bottou. + - 'invscaling': `eta = eta0 / pow(t, power_t)` + - 'adaptive': eta = eta0, as long as the training keeps decreasing. + Each time n_iter_no_change consecutive epochs fail to decrease the + training loss by tol or fail to increase validation score by tol if + early_stopping is True, the current learning rate is divided by 5. + + eta0 : float, default=0.0 + The initial learning rate for the 'constant', 'invscaling' or + 'adaptive' schedules. The default value is 0.0 as eta0 is not used by + the default schedule 'optimal'. + Values must be in the range `[0.0, inf)`. + + power_t : float, default=0.5 + The exponent for inverse scaling learning rate. + Values must be in the range `(-inf, inf)`. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + If a dynamic learning rate is used, the learning rate is adapted + depending on the number of samples already seen. Calling ``fit`` resets + this counter, while ``partial_fit`` will result in increasing the + existing counter. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights and stores the + result in the ``coef_`` attribute. If set to an int greater than 1, + averaging will begin once the total number of samples seen reaches + average. So ``average=10`` will begin averaging after seeing 10 + samples. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) + Weights assigned to the features. + + offset_ : ndarray of shape (1,) + Offset used to define the decision function from the raw scores. + We have the relation: decision_function = score_samples - offset. + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + loss_function_ : concrete ``LossFunction`` + + .. deprecated:: 1.4 + ``loss_function_`` was deprecated in version 1.4 and will be removed in + 1.6. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.svm.OneClassSVM : Unsupervised Outlier Detection. + + Notes + ----- + This estimator has a linear complexity in the number of training samples + and is thus better suited than the `sklearn.svm.OneClassSVM` + implementation for datasets with a large number of training samples (say + > 10,000). + + Examples + -------- + >>> import numpy as np + >>> from sklearn import linear_model + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> clf = linear_model.SGDOneClassSVM(random_state=42) + >>> clf.fit(X) + SGDOneClassSVM(random_state=42) + + >>> print(clf.predict([[4, 4]])) + [1] + """ + + loss_functions = {"hinge": (Hinge, 1.0)} + + _parameter_constraints: dict = { + **BaseSGD._parameter_constraints, + "nu": [Interval(Real, 0.0, 1.0, closed="right")], + "learning_rate": [ + StrOptions({"constant", "optimal", "invscaling", "adaptive"}), + Hidden(StrOptions({"pa1", "pa2"})), + ], + "eta0": [Interval(Real, 0, None, closed="left")], + "power_t": [Interval(Real, None, None, closed="neither")], + } + + def __init__( + self, + nu=0.5, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + warm_start=False, + average=False, + ): + self.nu = nu + super(SGDOneClassSVM, self).__init__( + loss="hinge", + penalty="l2", + C=1.0, + l1_ratio=0, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=DEFAULT_EPSILON, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=warm_start, + average=average, + ) + + def _fit_one_class(self, X, alpha, C, sample_weight, learning_rate, max_iter): + """Uses SGD implementation with X and y=np.ones(n_samples).""" + + # The One-Class SVM uses the SGD implementation with + # y=np.ones(n_samples). + n_samples = X.shape[0] + y = np.ones(n_samples, dtype=X.dtype, order="C") + + dataset, offset_decay = make_dataset(X, y, sample_weight) + + penalty_type = self._get_penalty_type(self.penalty) + learning_rate_type = self._get_learning_rate_type(learning_rate) + + # early stopping is set to False for the One-Class SVM. thus + # validation_mask and validation_score_cb will be set to values + # associated to early_stopping=False in _make_validation_split and + # _make_validation_score_cb respectively. + validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0) + validation_score_cb = self._make_validation_score_cb( + validation_mask, X, y, sample_weight + ) + + random_state = check_random_state(self.random_state) + # numpy mtrand expects a C long which is a signed 32 bit integer under + # Windows + seed = random_state.randint(0, np.iinfo(np.int32).max) + + tol = self.tol if self.tol is not None else -np.inf + + one_class = 1 + # There are no class weights for the One-Class SVM and they are + # therefore set to 1. + pos_weight = 1 + neg_weight = 1 + + if self.average: + coef = self._standard_coef + intercept = self._standard_intercept + average_coef = self._average_coef + average_intercept = self._average_intercept + else: + coef = self.coef_ + intercept = 1 - self.offset_ + average_coef = None # Not used + average_intercept = [0] # Not used + + _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype) + coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd( + coef, + intercept[0], + average_coef, + average_intercept[0], + self._loss_function_, + penalty_type, + alpha, + C, + self.l1_ratio, + dataset, + validation_mask, + self.early_stopping, + validation_score_cb, + int(self.n_iter_no_change), + max_iter, + tol, + int(self.fit_intercept), + int(self.verbose), + int(self.shuffle), + seed, + neg_weight, + pos_weight, + learning_rate_type, + self.eta0, + self.power_t, + one_class, + self.t_, + offset_decay, + self.average, + ) + + self.t_ += self.n_iter_ * n_samples + + if self.average > 0: + self._average_intercept = np.atleast_1d(average_intercept) + self._standard_intercept = np.atleast_1d(intercept) + + if self.average <= self.t_ - 1.0: + # made enough updates for averaging to be taken into account + self.coef_ = average_coef + self.offset_ = 1 - np.atleast_1d(average_intercept) + else: + self.coef_ = coef + self.offset_ = 1 - np.atleast_1d(intercept) + + else: + self.offset_ = 1 - np.atleast_1d(intercept) + + def _partial_fit( + self, + X, + alpha, + C, + loss, + learning_rate, + max_iter, + sample_weight, + coef_init, + offset_init, + ): + first_call = getattr(self, "coef_", None) is None + X = self._validate_data( + X, + None, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + reset=first_call, + ) + + n_features = X.shape[1] + + # Allocate datastructures from input arguments + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # We use intercept = 1 - offset where intercept is the intercept of + # the SGD implementation and offset is the offset of the One-Class SVM + # optimization problem. + if getattr(self, "coef_", None) is None or coef_init is not None: + self._allocate_parameter_mem( + n_classes=1, + n_features=n_features, + input_dtype=X.dtype, + coef_init=coef_init, + intercept_init=offset_init, + one_class=1, + ) + elif n_features != self.coef_.shape[-1]: + raise ValueError( + "Number of features %d does not match previous data %d." + % (n_features, self.coef_.shape[-1]) + ) + + if self.average and getattr(self, "_average_coef", None) is None: + self._average_coef = np.zeros(n_features, dtype=X.dtype, order="C") + self._average_intercept = np.zeros(1, dtype=X.dtype, order="C") + + self._loss_function_ = self._get_loss_function(loss) + if not hasattr(self, "t_"): + self.t_ = 1.0 + + # delegate to concrete training procedure + self._fit_one_class( + X, + alpha=alpha, + C=C, + learning_rate=learning_rate, + sample_weight=sample_weight, + max_iter=max_iter, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, sample_weight=None): + """Fit linear One-Class SVM with Stochastic Gradient Descent. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Subset of the training data. + y : Ignored + Not used, present for API consistency by convention. + + sample_weight : array-like, shape (n_samples,), optional + Weights applied to individual samples. + If not provided, uniform weights are assumed. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + if not hasattr(self, "coef_"): + self._more_validate_params(for_partial_fit=True) + + alpha = self.nu / 2 + return self._partial_fit( + X, + alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + max_iter=1, + sample_weight=sample_weight, + coef_init=None, + offset_init=None, + ) + + def _fit( + self, + X, + alpha, + C, + loss, + learning_rate, + coef_init=None, + offset_init=None, + sample_weight=None, + ): + if self.warm_start and hasattr(self, "coef_"): + if coef_init is None: + coef_init = self.coef_ + if offset_init is None: + offset_init = self.offset_ + else: + self.coef_ = None + self.offset_ = None + + # Clear iteration count for multiple call to fit. + self.t_ = 1.0 + + self._partial_fit( + X, + alpha, + C, + loss, + learning_rate, + self.max_iter, + sample_weight, + coef_init, + offset_init, + ) + + if ( + self.tol is not None + and self.tol > -np.inf + and self.n_iter_ == self.max_iter + ): + warnings.warn( + ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ), + ConvergenceWarning, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None): + """Fit linear One-Class SVM with Stochastic Gradient Descent. + + This solves an equivalent optimization problem of the + One-Class SVM primal optimization problem and returns a weight vector + w and an offset rho such that the decision function is given by + - rho. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data. + y : Ignored + Not used, present for API consistency by convention. + + coef_init : array, shape (n_classes, n_features) + The initial coefficients to warm-start the optimization. + + offset_init : array, shape (n_classes,) + The initial offset to warm-start the optimization. + + sample_weight : array-like, shape (n_samples,), optional + Weights applied to individual samples. + If not provided, uniform weights are assumed. These weights will + be multiplied with class_weight (passed through the + constructor) if class_weight is specified. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + self._more_validate_params() + + alpha = self.nu / 2 + self._fit( + X, + alpha=alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + coef_init=coef_init, + offset_init=offset_init, + sample_weight=sample_weight, + ) + + return self + + def decision_function(self, X): + """Signed distance to the separating hyperplane. + + Signed distance is positive for an inlier and negative for an + outlier. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Testing data. + + Returns + ------- + dec : array-like, shape (n_samples,) + Decision function values of the samples. + """ + + check_is_fitted(self, "coef_") + + X = self._validate_data(X, accept_sparse="csr", reset=False) + decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_ + + return decisions.ravel() + + def score_samples(self, X): + """Raw scoring function of the samples. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Testing data. + + Returns + ------- + score_samples : array-like, shape (n_samples,) + Unshiffted scoring function values of the samples. + """ + score_samples = self.decision_function(X) + self.offset_ + return score_samples + + def predict(self, X): + """Return labels (1 inlier, -1 outlier) of the samples. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Testing data. + + Returns + ------- + y : array, shape (n_samples,) + Labels of the samples. + """ + y = (self.decision_function(X) >= 0).astype(np.int32) + y[y == 0] = -1 # for consistency with outlier detectors + return y + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ) + }, + "preserves_dtype": [np.float64, np.float32], + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..760c330bc5f1102cf8f17984ff95a16550415963 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29c70a9a5d41b36fc51e02edee4c575f2f0eb8ac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..468ce98cc7d1a1e6a09c528c1a76dd758a309097 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72e1c2c0b8b971a40b0c26f6b93f5c3d8adbffcb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83056ee34084898d36754f68ac385bdcb4ed46bf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..576ad181e7a52c7d7cac9c1e59c9363068ffc46e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cef8fa2cc1e2472aa43c5c63f2eb84de2724f1c9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30b6e2f57fcba2402b1042017a13e23ce67c8e42 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd4a4bb0b97d9f975ddd461cd89c49ea0a8ffd7b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c018c92c709d35969895113d858f05f16d9a66e9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f858ff40aa1a93eb4b281f6e9fd86fa8a1e7829 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_quantile.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_quantile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dc30e2dbd4a0cb207247de373514ba442a67709 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_quantile.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa19fd46ffde8470c6b5f3ea5b0e068e2911e379 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8697ad77bb99cd0b316feae4a57b72859a0b8bfe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1973c245115296d667edbaf00df55081fb38cfe0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..7c9f734dcf5b587c72e6549ca2c437e8b2c0bab2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_base.py @@ -0,0 +1,789 @@ +# Author: Alexandre Gramfort +# Fabian Pedregosa +# Maria Telenczuk +# +# License: BSD 3 clause + +import warnings + +import numpy as np +import pytest +from scipy import linalg, sparse + +from sklearn.datasets import load_iris, make_regression, make_sparse_uncorrelated +from sklearn.linear_model import LinearRegression +from sklearn.linear_model._base import ( + _preprocess_data, + _rescale_data, + make_dataset, +) +from sklearn.preprocessing import add_dummy_feature +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + LIL_CONTAINERS, +) + +rtol = 1e-6 + + +def test_linear_regression(): + # Test LinearRegression on a simple dataset. + # a simple dataset + X = [[1], [2]] + Y = [1, 2] + + reg = LinearRegression() + reg.fit(X, Y) + + assert_array_almost_equal(reg.coef_, [1]) + assert_array_almost_equal(reg.intercept_, [0]) + assert_array_almost_equal(reg.predict(X), [1, 2]) + + # test it also for degenerate input + X = [[1]] + Y = [0] + + reg = LinearRegression() + reg.fit(X, Y) + assert_array_almost_equal(reg.coef_, [0]) + assert_array_almost_equal(reg.intercept_, [0]) + assert_array_almost_equal(reg.predict(X), [0]) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_linear_regression_sample_weights( + sparse_container, fit_intercept, global_random_seed +): + rng = np.random.RandomState(global_random_seed) + + # It would not work with under-determined systems + n_samples, n_features = 6, 5 + + X = rng.normal(size=(n_samples, n_features)) + if sparse_container is not None: + X = sparse_container(X) + y = rng.normal(size=n_samples) + + sample_weight = 1.0 + rng.uniform(size=n_samples) + + # LinearRegression with explicit sample_weight + reg = LinearRegression(fit_intercept=fit_intercept) + reg.fit(X, y, sample_weight=sample_weight) + coefs1 = reg.coef_ + inter1 = reg.intercept_ + + assert reg.coef_.shape == (X.shape[1],) # sanity checks + + # Closed form of the weighted least square + # theta = (X^T W X)^(-1) @ X^T W y + W = np.diag(sample_weight) + X_aug = X if not fit_intercept else add_dummy_feature(X) + + Xw = X_aug.T @ W @ X_aug + yw = X_aug.T @ W @ y + coefs2 = linalg.solve(Xw, yw) + + if not fit_intercept: + assert_allclose(coefs1, coefs2) + else: + assert_allclose(coefs1, coefs2[1:]) + assert_allclose(inter1, coefs2[0]) + + +def test_raises_value_error_if_positive_and_sparse(): + error_msg = "Sparse data was passed for X, but dense data is required." + # X must not be sparse if positive == True + X = sparse.eye(10) + y = np.ones(10) + + reg = LinearRegression(positive=True) + + with pytest.raises(TypeError, match=error_msg): + reg.fit(X, y) + + +@pytest.mark.parametrize("n_samples, n_features", [(2, 3), (3, 2)]) +def test_raises_value_error_if_sample_weights_greater_than_1d(n_samples, n_features): + # Sample weights must be either scalar or 1D + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + sample_weights_OK = rng.randn(n_samples) ** 2 + 1 + sample_weights_OK_1 = 1.0 + sample_weights_OK_2 = 2.0 + + reg = LinearRegression() + + # make sure the "OK" sample weights actually work + reg.fit(X, y, sample_weights_OK) + reg.fit(X, y, sample_weights_OK_1) + reg.fit(X, y, sample_weights_OK_2) + + +def test_fit_intercept(): + # Test assertions on betas shape. + X2 = np.array([[0.38349978, 0.61650022], [0.58853682, 0.41146318]]) + X3 = np.array( + [[0.27677969, 0.70693172, 0.01628859], [0.08385139, 0.20692515, 0.70922346]] + ) + y = np.array([1, 1]) + + lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y) + lr2_with_intercept = LinearRegression().fit(X2, y) + + lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y) + lr3_with_intercept = LinearRegression().fit(X3, y) + + assert lr2_with_intercept.coef_.shape == lr2_without_intercept.coef_.shape + assert lr3_with_intercept.coef_.shape == lr3_without_intercept.coef_.shape + assert lr2_without_intercept.coef_.ndim == lr3_without_intercept.coef_.ndim + + +def test_linear_regression_sparse(global_random_seed): + # Test that linear regression also works with sparse data + rng = np.random.RandomState(global_random_seed) + n = 100 + X = sparse.eye(n, n) + beta = rng.rand(n) + y = X @ beta + + ols = LinearRegression() + ols.fit(X, y.ravel()) + assert_array_almost_equal(beta, ols.coef_ + ols.intercept_) + + assert_array_almost_equal(ols.predict(X) - y.ravel(), 0) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_linear_regression_sparse_equal_dense(fit_intercept, csr_container): + # Test that linear regression agrees between sparse and dense + rng = np.random.RandomState(0) + n_samples = 200 + n_features = 2 + X = rng.randn(n_samples, n_features) + X[X < 0.1] = 0.0 + Xcsr = csr_container(X) + y = rng.rand(n_samples) + params = dict(fit_intercept=fit_intercept) + clf_dense = LinearRegression(**params) + clf_sparse = LinearRegression(**params) + clf_dense.fit(X, y) + clf_sparse.fit(Xcsr, y) + assert clf_dense.intercept_ == pytest.approx(clf_sparse.intercept_) + assert_allclose(clf_dense.coef_, clf_sparse.coef_) + + +def test_linear_regression_multiple_outcome(): + # Test multiple-outcome linear regressions + rng = np.random.RandomState(0) + X, y = make_regression(random_state=rng) + + Y = np.vstack((y, y)).T + n_features = X.shape[1] + + reg = LinearRegression() + reg.fit((X), Y) + assert reg.coef_.shape == (2, n_features) + Y_pred = reg.predict(X) + reg.fit(X, y) + y_pred = reg.predict(X) + assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_linear_regression_sparse_multiple_outcome(global_random_seed, coo_container): + # Test multiple-outcome linear regressions with sparse data + rng = np.random.RandomState(global_random_seed) + X, y = make_sparse_uncorrelated(random_state=rng) + X = coo_container(X) + Y = np.vstack((y, y)).T + n_features = X.shape[1] + + ols = LinearRegression() + ols.fit(X, Y) + assert ols.coef_.shape == (2, n_features) + Y_pred = ols.predict(X) + ols.fit(X, y.ravel()) + y_pred = ols.predict(X) + assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) + + +def test_linear_regression_positive(): + # Test nonnegative LinearRegression on a simple dataset. + X = [[1], [2]] + y = [1, 2] + + reg = LinearRegression(positive=True) + reg.fit(X, y) + + assert_array_almost_equal(reg.coef_, [1]) + assert_array_almost_equal(reg.intercept_, [0]) + assert_array_almost_equal(reg.predict(X), [1, 2]) + + # test it also for degenerate input + X = [[1]] + y = [0] + + reg = LinearRegression(positive=True) + reg.fit(X, y) + assert_allclose(reg.coef_, [0]) + assert_allclose(reg.intercept_, [0]) + assert_allclose(reg.predict(X), [0]) + + +def test_linear_regression_positive_multiple_outcome(global_random_seed): + # Test multiple-outcome nonnegative linear regressions + rng = np.random.RandomState(global_random_seed) + X, y = make_sparse_uncorrelated(random_state=rng) + Y = np.vstack((y, y)).T + n_features = X.shape[1] + + ols = LinearRegression(positive=True) + ols.fit(X, Y) + assert ols.coef_.shape == (2, n_features) + assert np.all(ols.coef_ >= 0.0) + Y_pred = ols.predict(X) + ols.fit(X, y.ravel()) + y_pred = ols.predict(X) + assert_allclose(np.vstack((y_pred, y_pred)).T, Y_pred) + + +def test_linear_regression_positive_vs_nonpositive(global_random_seed): + # Test differences with LinearRegression when positive=False. + rng = np.random.RandomState(global_random_seed) + X, y = make_sparse_uncorrelated(random_state=rng) + + reg = LinearRegression(positive=True) + reg.fit(X, y) + regn = LinearRegression(positive=False) + regn.fit(X, y) + + assert np.mean((reg.coef_ - regn.coef_) ** 2) > 1e-3 + + +def test_linear_regression_positive_vs_nonpositive_when_positive(global_random_seed): + # Test LinearRegression fitted coefficients + # when the problem is positive. + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 4 + X = rng.rand(n_samples, n_features) + y = X[:, 0] + 2 * X[:, 1] + 3 * X[:, 2] + 1.5 * X[:, 3] + + reg = LinearRegression(positive=True) + reg.fit(X, y) + regn = LinearRegression(positive=False) + regn.fit(X, y) + + assert np.mean((reg.coef_ - regn.coef_) ** 2) < 1e-6 + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("use_sw", [True, False]) +def test_inplace_data_preprocessing(sparse_container, use_sw, global_random_seed): + # Check that the data is not modified inplace by the linear regression + # estimator. + rng = np.random.RandomState(global_random_seed) + original_X_data = rng.randn(10, 12) + original_y_data = rng.randn(10, 2) + orginal_sw_data = rng.rand(10) + + if sparse_container is not None: + X = sparse_container(original_X_data) + else: + X = original_X_data.copy() + y = original_y_data.copy() + # XXX: Note hat y_sparse is not supported (broken?) in the current + # implementation of LinearRegression. + + if use_sw: + sample_weight = orginal_sw_data.copy() + else: + sample_weight = None + + # Do not allow inplace preprocessing of X and y: + reg = LinearRegression() + reg.fit(X, y, sample_weight=sample_weight) + if sparse_container is not None: + assert_allclose(X.toarray(), original_X_data) + else: + assert_allclose(X, original_X_data) + assert_allclose(y, original_y_data) + + if use_sw: + assert_allclose(sample_weight, orginal_sw_data) + + # Allow inplace preprocessing of X and y + reg = LinearRegression(copy_X=False) + reg.fit(X, y, sample_weight=sample_weight) + if sparse_container is not None: + # No optimization relying on the inplace modification of sparse input + # data has been implemented at this time. + assert_allclose(X.toarray(), original_X_data) + else: + # X has been offset (and optionally rescaled by sample weights) + # inplace. The 0.42 threshold is arbitrary and has been found to be + # robust to any random seed in the admissible range. + assert np.linalg.norm(X - original_X_data) > 0.42 + + # y should not have been modified inplace by LinearRegression.fit. + assert_allclose(y, original_y_data) + + if use_sw: + # Sample weights have no reason to ever be modified inplace. + assert_allclose(sample_weight, orginal_sw_data) + + +def test_linear_regression_pd_sparse_dataframe_warning(): + pd = pytest.importorskip("pandas") + + # Warning is raised only when some of the columns is sparse + df = pd.DataFrame({"0": np.random.randn(10)}) + for col in range(1, 4): + arr = np.random.randn(10) + arr[:8] = 0 + # all columns but the first column is sparse + if col != 0: + arr = pd.arrays.SparseArray(arr, fill_value=0) + df[str(col)] = arr + + msg = "pandas.DataFrame with sparse columns found." + + reg = LinearRegression() + with pytest.warns(UserWarning, match=msg): + reg.fit(df.iloc[:, 0:2], df.iloc[:, 3]) + + # does not warn when the whole dataframe is sparse + df["0"] = pd.arrays.SparseArray(df["0"], fill_value=0) + assert hasattr(df, "sparse") + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + reg.fit(df.iloc[:, 0:2], df.iloc[:, 3]) + + +def test_preprocess_data(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + expected_X_mean = np.mean(X, axis=0) + expected_y_mean = np.mean(y, axis=0) + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=False) + assert_array_almost_equal(X_mean, np.zeros(n_features)) + assert_array_almost_equal(y_mean, 0) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt, X) + assert_array_almost_equal(yt, y) + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=True) + assert_array_almost_equal(X_mean, expected_X_mean) + assert_array_almost_equal(y_mean, expected_y_mean) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt, X - expected_X_mean) + assert_array_almost_equal(yt, y - expected_y_mean) + + +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_preprocess_data_multioutput(global_random_seed, sparse_container): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 3 + n_outputs = 2 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples, n_outputs) + expected_y_mean = np.mean(y, axis=0) + + if sparse_container is not None: + X = sparse_container(X) + + _, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False) + assert_array_almost_equal(y_mean, np.zeros(n_outputs)) + assert_array_almost_equal(yt, y) + + _, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True) + assert_array_almost_equal(y_mean, expected_y_mean) + assert_array_almost_equal(yt, y - y_mean) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_preprocess_data_weighted(sparse_container, global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 4 + # Generate random data with 50% of zero values to make sure + # that the sparse variant of this test is actually sparse. This also + # shifts the mean value for each columns in X further away from + # zero. + X = rng.rand(n_samples, n_features) + X[X < 0.5] = 0.0 + + # Scale the first feature of X to be 10 larger than the other to + # better check the impact of feature scaling. + X[:, 0] *= 10 + + # Constant non-zero feature. + X[:, 2] = 1.0 + + # Constant zero feature (non-materialized in the sparse case) + X[:, 3] = 0.0 + y = rng.rand(n_samples) + + sample_weight = rng.rand(n_samples) + expected_X_mean = np.average(X, axis=0, weights=sample_weight) + expected_y_mean = np.average(y, axis=0, weights=sample_weight) + + X_sample_weight_avg = np.average(X, weights=sample_weight, axis=0) + X_sample_weight_var = np.average( + (X - X_sample_weight_avg) ** 2, weights=sample_weight, axis=0 + ) + constant_mask = X_sample_weight_var < 10 * np.finfo(X.dtype).eps + assert_array_equal(constant_mask, [0, 0, 1, 1]) + expected_X_scale = np.sqrt(X_sample_weight_var) * np.sqrt(sample_weight.sum()) + + # near constant features should not be scaled + expected_X_scale[constant_mask] = 1 + + if sparse_container is not None: + X = sparse_container(X) + + # normalize is False + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data( + X, + y, + fit_intercept=True, + sample_weight=sample_weight, + ) + assert_array_almost_equal(X_mean, expected_X_mean) + assert_array_almost_equal(y_mean, expected_y_mean) + assert_array_almost_equal(X_scale, np.ones(n_features)) + if sparse_container is not None: + assert_array_almost_equal(Xt.toarray(), X.toarray()) + else: + assert_array_almost_equal(Xt, X - expected_X_mean) + assert_array_almost_equal(yt, y - expected_y_mean) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_sparse_preprocess_data_offsets(global_random_seed, lil_container): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + X = sparse.rand(n_samples, n_features, density=0.5, random_state=rng) + X = lil_container(X) + y = rng.rand(n_samples) + XA = X.toarray() + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=False) + assert_array_almost_equal(X_mean, np.zeros(n_features)) + assert_array_almost_equal(y_mean, 0) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt.toarray(), XA) + assert_array_almost_equal(yt, y) + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=True) + assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) + assert_array_almost_equal(y_mean, np.mean(y, axis=0)) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt.toarray(), XA) + assert_array_almost_equal(yt, y - np.mean(y, axis=0)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_preprocess_data(csr_container): + # Test output format of _preprocess_data, when input is csr + X, y = make_regression() + X[X < 2.5] = 0.0 + csr = csr_container(X) + csr_, y, _, _, _ = _preprocess_data(csr, y, fit_intercept=True) + assert csr_.format == "csr" + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("to_copy", (True, False)) +def test_preprocess_copy_data_no_checks(sparse_container, to_copy): + X, y = make_regression() + X[X < 2.5] = 0.0 + + if sparse_container is not None: + X = sparse_container(X) + + X_, y_, _, _, _ = _preprocess_data( + X, y, fit_intercept=True, copy=to_copy, check_input=False + ) + + if to_copy and sparse_container is not None: + assert not np.may_share_memory(X_.data, X.data) + elif to_copy: + assert not np.may_share_memory(X_, X) + elif sparse_container is not None: + assert np.may_share_memory(X_.data, X.data) + else: + assert np.may_share_memory(X_, X) + + +def test_dtype_preprocess_data(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + X_32 = np.asarray(X, dtype=np.float32) + y_32 = np.asarray(y, dtype=np.float32) + X_64 = np.asarray(X, dtype=np.float64) + y_64 = np.asarray(y, dtype=np.float64) + + for fit_intercept in [True, False]: + Xt_32, yt_32, X_mean_32, y_mean_32, X_scale_32 = _preprocess_data( + X_32, + y_32, + fit_intercept=fit_intercept, + ) + + Xt_64, yt_64, X_mean_64, y_mean_64, X_scale_64 = _preprocess_data( + X_64, + y_64, + fit_intercept=fit_intercept, + ) + + Xt_3264, yt_3264, X_mean_3264, y_mean_3264, X_scale_3264 = _preprocess_data( + X_32, + y_64, + fit_intercept=fit_intercept, + ) + + Xt_6432, yt_6432, X_mean_6432, y_mean_6432, X_scale_6432 = _preprocess_data( + X_64, + y_32, + fit_intercept=fit_intercept, + ) + + assert Xt_32.dtype == np.float32 + assert yt_32.dtype == np.float32 + assert X_mean_32.dtype == np.float32 + assert y_mean_32.dtype == np.float32 + assert X_scale_32.dtype == np.float32 + + assert Xt_64.dtype == np.float64 + assert yt_64.dtype == np.float64 + assert X_mean_64.dtype == np.float64 + assert y_mean_64.dtype == np.float64 + assert X_scale_64.dtype == np.float64 + + assert Xt_3264.dtype == np.float32 + assert yt_3264.dtype == np.float32 + assert X_mean_3264.dtype == np.float32 + assert y_mean_3264.dtype == np.float32 + assert X_scale_3264.dtype == np.float32 + + assert Xt_6432.dtype == np.float64 + assert yt_6432.dtype == np.float64 + assert X_mean_6432.dtype == np.float64 + assert y_mean_6432.dtype == np.float64 + assert X_scale_6432.dtype == np.float64 + + assert X_32.dtype == np.float32 + assert y_32.dtype == np.float32 + assert X_64.dtype == np.float64 + assert y_64.dtype == np.float64 + + assert_array_almost_equal(Xt_32, Xt_64) + assert_array_almost_equal(yt_32, yt_64) + assert_array_almost_equal(X_mean_32, X_mean_64) + assert_array_almost_equal(y_mean_32, y_mean_64) + assert_array_almost_equal(X_scale_32, X_scale_64) + + +@pytest.mark.parametrize("n_targets", [None, 2]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_rescale_data(n_targets, sparse_container, global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + + sample_weight = 1.0 + rng.rand(n_samples) + X = rng.rand(n_samples, n_features) + if n_targets is None: + y = rng.rand(n_samples) + else: + y = rng.rand(n_samples, n_targets) + + expected_sqrt_sw = np.sqrt(sample_weight) + expected_rescaled_X = X * expected_sqrt_sw[:, np.newaxis] + + if n_targets is None: + expected_rescaled_y = y * expected_sqrt_sw + else: + expected_rescaled_y = y * expected_sqrt_sw[:, np.newaxis] + + if sparse_container is not None: + X = sparse_container(X) + if n_targets is None: + y = sparse_container(y.reshape(-1, 1)) + else: + y = sparse_container(y) + + rescaled_X, rescaled_y, sqrt_sw = _rescale_data(X, y, sample_weight) + + assert_allclose(sqrt_sw, expected_sqrt_sw) + + if sparse_container is not None: + rescaled_X = rescaled_X.toarray() + rescaled_y = rescaled_y.toarray() + if n_targets is None: + rescaled_y = rescaled_y.ravel() + + assert_allclose(rescaled_X, expected_rescaled_X) + assert_allclose(rescaled_y, expected_rescaled_y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_fused_types_make_dataset(csr_container): + iris = load_iris() + + X_32 = iris.data.astype(np.float32) + y_32 = iris.target.astype(np.float32) + X_csr_32 = csr_container(X_32) + sample_weight_32 = np.arange(y_32.size, dtype=np.float32) + + X_64 = iris.data.astype(np.float64) + y_64 = iris.target.astype(np.float64) + X_csr_64 = csr_container(X_64) + sample_weight_64 = np.arange(y_64.size, dtype=np.float64) + + # array + dataset_32, _ = make_dataset(X_32, y_32, sample_weight_32) + dataset_64, _ = make_dataset(X_64, y_64, sample_weight_64) + xi_32, yi_32, _, _ = dataset_32._next_py() + xi_64, yi_64, _, _ = dataset_64._next_py() + xi_data_32, _, _ = xi_32 + xi_data_64, _, _ = xi_64 + + assert xi_data_32.dtype == np.float32 + assert xi_data_64.dtype == np.float64 + assert_allclose(yi_64, yi_32, rtol=rtol) + + # csr + datasetcsr_32, _ = make_dataset(X_csr_32, y_32, sample_weight_32) + datasetcsr_64, _ = make_dataset(X_csr_64, y_64, sample_weight_64) + xicsr_32, yicsr_32, _, _ = datasetcsr_32._next_py() + xicsr_64, yicsr_64, _, _ = datasetcsr_64._next_py() + xicsr_data_32, _, _ = xicsr_32 + xicsr_data_64, _, _ = xicsr_64 + + assert xicsr_data_32.dtype == np.float32 + assert xicsr_data_64.dtype == np.float64 + + assert_allclose(xicsr_data_64, xicsr_data_32, rtol=rtol) + assert_allclose(yicsr_64, yicsr_32, rtol=rtol) + + assert_array_equal(xi_data_32, xicsr_data_32) + assert_array_equal(xi_data_64, xicsr_data_64) + assert_array_equal(yi_32, yicsr_32) + assert_array_equal(yi_64, yicsr_64) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_linear_regression_sample_weight_consistency( + sparse_container, fit_intercept, global_random_seed +): + """Test that the impact of sample_weight is consistent. + + Note that this test is stricter than the common test + check_sample_weights_invariance alone and also tests sparse X. + It is very similar to test_enet_sample_weight_consistency. + """ + rng = np.random.RandomState(global_random_seed) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + if sparse_container is not None: + X = sparse_container(X) + params = dict(fit_intercept=fit_intercept) + + reg = LinearRegression(**params).fit(X, y, sample_weight=None) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + # 1) sample_weight=np.ones(..) must be equivalent to sample_weight=None + # same check as check_sample_weights_invariance(name, reg, kind="ones"), but we also + # test with sparse input. + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 2) sample_weight=None should be equivalent to sample_weight = number + sample_weight = 123.0 + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 3) scaling of sample_weight should have no effect, cf. np.average() + sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0]) + reg = reg.fit(X, y, sample_weight=sample_weight) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + reg.fit(X, y, sample_weight=np.pi * sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6 if sparse_container is None else 1e-5) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 4) setting elements of sample_weight to 0 is equivalent to removing these samples + sample_weight_0 = sample_weight.copy() + sample_weight_0[-5:] = 0 + y[-5:] *= 1000 # to make excluding those samples important + reg.fit(X, y, sample_weight=sample_weight_0) + coef_0 = reg.coef_.copy() + if fit_intercept: + intercept_0 = reg.intercept_ + reg.fit(X[:-5], y[:-5], sample_weight=sample_weight[:-5]) + if fit_intercept and sparse_container is None: + # FIXME: https://github.com/scikit-learn/scikit-learn/issues/26164 + # This often fails, e.g. when calling + # SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all" pytest \ + # sklearn/linear_model/tests/test_base.py\ + # ::test_linear_regression_sample_weight_consistency + pass + else: + assert_allclose(reg.coef_, coef_0, rtol=1e-5) + if fit_intercept: + assert_allclose(reg.intercept_, intercept_0) + + # 5) check that multiplying sample_weight by 2 is equivalent to repeating + # corresponding samples twice + if sparse_container is not None: + X2 = sparse.vstack([X, X[: n_samples // 2]], format="csc") + else: + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = sample_weight.copy() + sample_weight_1[: n_samples // 2] *= 2 + sample_weight_2 = np.concatenate( + [sample_weight, sample_weight[: n_samples // 2]], axis=0 + ) + + reg1 = LinearRegression(**params).fit(X, y, sample_weight=sample_weight_1) + reg2 = LinearRegression(**params).fit(X2, y2, sample_weight=sample_weight_2) + assert_allclose(reg1.coef_, reg2.coef_, rtol=1e-6) + if fit_intercept: + assert_allclose(reg1.intercept_, reg2.intercept_) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_bayes.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_bayes.py new file mode 100644 index 0000000000000000000000000000000000000000..a700a98dbbc45625a71d67b0acbb469926d2da30 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_bayes.py @@ -0,0 +1,329 @@ +# Author: Alexandre Gramfort +# Fabian Pedregosa +# +# License: BSD 3 clause + +from math import log + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.linear_model import ARDRegression, BayesianRidge, Ridge +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + _convert_container, + assert_almost_equal, + assert_array_almost_equal, + assert_array_less, +) +from sklearn.utils.extmath import fast_logdet + +diabetes = datasets.load_diabetes() + + +def test_bayesian_ridge_scores(): + """Check scores attribute shape""" + X, y = diabetes.data, diabetes.target + + clf = BayesianRidge(compute_score=True) + clf.fit(X, y) + + assert clf.scores_.shape == (clf.n_iter_ + 1,) + + +def test_bayesian_ridge_score_values(): + """Check value of score on toy example. + + Compute log marginal likelihood with equation (36) in Sparse Bayesian + Learning and the Relevance Vector Machine (Tipping, 2001): + + - 0.5 * (log |Id/alpha + X.X^T/lambda| + + y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi)) + + lambda_1 * log(lambda) - lambda_2 * lambda + + alpha_1 * log(alpha) - alpha_2 * alpha + + and check equality with the score computed during training. + """ + + X, y = diabetes.data, diabetes.target + n_samples = X.shape[0] + # check with initial values of alpha and lambda (see code for the values) + eps = np.finfo(np.float64).eps + alpha_ = 1.0 / (np.var(y) + eps) + lambda_ = 1.0 + + # value of the parameters of the Gamma hyperpriors + alpha_1 = 0.1 + alpha_2 = 0.1 + lambda_1 = 0.1 + lambda_2 = 0.1 + + # compute score using formula of docstring + score = lambda_1 * log(lambda_) - lambda_2 * lambda_ + score += alpha_1 * log(alpha_) - alpha_2 * alpha_ + M = 1.0 / alpha_ * np.eye(n_samples) + 1.0 / lambda_ * np.dot(X, X.T) + M_inv_dot_y = np.linalg.solve(M, y) + score += -0.5 * ( + fast_logdet(M) + np.dot(y.T, M_inv_dot_y) + n_samples * log(2 * np.pi) + ) + + # compute score with BayesianRidge + clf = BayesianRidge( + alpha_1=alpha_1, + alpha_2=alpha_2, + lambda_1=lambda_1, + lambda_2=lambda_2, + max_iter=1, + fit_intercept=False, + compute_score=True, + ) + clf.fit(X, y) + + assert_almost_equal(clf.scores_[0], score, decimal=9) + + +def test_bayesian_ridge_parameter(): + # Test correctness of lambda_ and alpha_ parameters (GitHub issue #8224) + X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]]) + y = np.array([1, 2, 3, 2, 0, 4, 5]).T + + # A Ridge regression model using an alpha value equal to the ratio of + # lambda_ and alpha_ from the Bayesian Ridge model must be identical + br_model = BayesianRidge(compute_score=True).fit(X, y) + rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y) + assert_array_almost_equal(rr_model.coef_, br_model.coef_) + assert_almost_equal(rr_model.intercept_, br_model.intercept_) + + +def test_bayesian_sample_weights(): + # Test correctness of the sample_weights method + X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]]) + y = np.array([1, 2, 3, 2, 0, 4, 5]).T + w = np.array([4, 3, 3, 1, 1, 2, 3]).T + + # A Ridge regression model using an alpha value equal to the ratio of + # lambda_ and alpha_ from the Bayesian Ridge model must be identical + br_model = BayesianRidge(compute_score=True).fit(X, y, sample_weight=w) + rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit( + X, y, sample_weight=w + ) + assert_array_almost_equal(rr_model.coef_, br_model.coef_) + assert_almost_equal(rr_model.intercept_, br_model.intercept_) + + +def test_toy_bayesian_ridge_object(): + # Test BayesianRidge on toy + X = np.array([[1], [2], [6], [8], [10]]) + Y = np.array([1, 2, 6, 8, 10]) + clf = BayesianRidge(compute_score=True) + clf.fit(X, Y) + + # Check that the model could approximately learn the identity function + test = [[1], [3], [4]] + assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2) + + +def test_bayesian_initial_params(): + # Test BayesianRidge with initial values (alpha_init, lambda_init) + X = np.vander(np.linspace(0, 4, 5), 4) + y = np.array([0.0, 1.0, 0.0, -1.0, 0.0]) # y = (x^3 - 6x^2 + 8x) / 3 + + # In this case, starting from the default initial values will increase + # the bias of the fitted curve. So, lambda_init should be small. + reg = BayesianRidge(alpha_init=1.0, lambda_init=1e-3) + # Check the R2 score nearly equals to one. + r2 = reg.fit(X, y).score(X, y) + assert_almost_equal(r2, 1.0) + + +def test_prediction_bayesian_ridge_ard_with_constant_input(): + # Test BayesianRidge and ARDRegression predictions for edge case of + # constant target vectors + n_samples = 4 + n_features = 5 + random_state = check_random_state(42) + constant_value = random_state.rand() + X = random_state.random_sample((n_samples, n_features)) + y = np.full(n_samples, constant_value, dtype=np.array(constant_value).dtype) + expected = np.full(n_samples, constant_value, dtype=np.array(constant_value).dtype) + + for clf in [BayesianRidge(), ARDRegression()]: + y_pred = clf.fit(X, y).predict(X) + assert_array_almost_equal(y_pred, expected) + + +def test_std_bayesian_ridge_ard_with_constant_input(): + # Test BayesianRidge and ARDRegression standard dev. for edge case of + # constant target vector + # The standard dev. should be relatively small (< 0.01 is tested here) + n_samples = 10 + n_features = 5 + random_state = check_random_state(42) + constant_value = random_state.rand() + X = random_state.random_sample((n_samples, n_features)) + y = np.full(n_samples, constant_value, dtype=np.array(constant_value).dtype) + expected_upper_boundary = 0.01 + + for clf in [BayesianRidge(), ARDRegression()]: + _, y_std = clf.fit(X, y).predict(X, return_std=True) + assert_array_less(y_std, expected_upper_boundary) + + +def test_update_of_sigma_in_ard(): + # Checks that `sigma_` is updated correctly after the last iteration + # of the ARDRegression algorithm. See issue #10128. + X = np.array([[1, 0], [0, 0]]) + y = np.array([0, 0]) + clf = ARDRegression(max_iter=1) + clf.fit(X, y) + # With the inputs above, ARDRegression prunes both of the two coefficients + # in the first iteration. Hence, the expected shape of `sigma_` is (0, 0). + assert clf.sigma_.shape == (0, 0) + # Ensure that no error is thrown at prediction stage + clf.predict(X, return_std=True) + + +def test_toy_ard_object(): + # Test BayesianRegression ARD classifier + X = np.array([[1], [2], [3]]) + Y = np.array([1, 2, 3]) + clf = ARDRegression(compute_score=True) + clf.fit(X, Y) + + # Check that the model could approximately learn the identity function + test = [[1], [3], [4]] + assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2) + + +@pytest.mark.parametrize("n_samples, n_features", ((10, 100), (100, 10))) +def test_ard_accuracy_on_easy_problem(global_random_seed, n_samples, n_features): + # Check that ARD converges with reasonable accuracy on an easy problem + # (Github issue #14055) + X = np.random.RandomState(global_random_seed).normal(size=(250, 3)) + y = X[:, 1] + + regressor = ARDRegression() + regressor.fit(X, y) + + abs_coef_error = np.abs(1 - regressor.coef_[1]) + assert abs_coef_error < 1e-10 + + +@pytest.mark.parametrize("constructor_name", ["array", "dataframe"]) +def test_return_std(constructor_name): + # Test return_std option for both Bayesian regressors + def f(X): + return np.dot(X, w) + b + + def f_noise(X, noise_mult): + return f(X) + np.random.randn(X.shape[0]) * noise_mult + + d = 5 + n_train = 50 + n_test = 10 + + w = np.array([1.0, 0.0, 1.0, -1.0, 0.0]) + b = 1.0 + + X = np.random.random((n_train, d)) + X = _convert_container(X, constructor_name) + + X_test = np.random.random((n_test, d)) + X_test = _convert_container(X_test, constructor_name) + + for decimal, noise_mult in enumerate([1, 0.1, 0.01]): + y = f_noise(X, noise_mult) + + m1 = BayesianRidge() + m1.fit(X, y) + y_mean1, y_std1 = m1.predict(X_test, return_std=True) + assert_array_almost_equal(y_std1, noise_mult, decimal=decimal) + + m2 = ARDRegression() + m2.fit(X, y) + y_mean2, y_std2 = m2.predict(X_test, return_std=True) + assert_array_almost_equal(y_std2, noise_mult, decimal=decimal) + + +def test_update_sigma(global_random_seed): + # make sure the two update_sigma() helpers are equivalent. The woodbury + # formula is used when n_samples < n_features, and the other one is used + # otherwise. + + rng = np.random.RandomState(global_random_seed) + + # set n_samples == n_features to avoid instability issues when inverting + # the matrices. Using the woodbury formula would be unstable when + # n_samples > n_features + n_samples = n_features = 10 + X = rng.randn(n_samples, n_features) + alpha = 1 + lmbda = np.arange(1, n_features + 1) + keep_lambda = np.array([True] * n_features) + + reg = ARDRegression() + + sigma = reg._update_sigma(X, alpha, lmbda, keep_lambda) + sigma_woodbury = reg._update_sigma_woodbury(X, alpha, lmbda, keep_lambda) + + np.testing.assert_allclose(sigma, sigma_woodbury) + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression]) +def test_dtype_match(dtype, Estimator): + # Test that np.float32 input data is not cast to np.float64 when possible + X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]], dtype=dtype) + y = np.array([1, 2, 3, 2, 0, 4, 5]).T + + model = Estimator() + # check type consistency + model.fit(X, y) + attributes = ["coef_", "sigma_"] + for attribute in attributes: + assert getattr(model, attribute).dtype == X.dtype + + y_mean, y_std = model.predict(X, return_std=True) + assert y_mean.dtype == X.dtype + assert y_std.dtype == X.dtype + + +@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression]) +def test_dtype_correctness(Estimator): + X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]]) + y = np.array([1, 2, 3, 2, 0, 4, 5]).T + model = Estimator() + coef_32 = model.fit(X.astype(np.float32), y).coef_ + coef_64 = model.fit(X.astype(np.float64), y).coef_ + np.testing.assert_allclose(coef_32, coef_64, rtol=1e-4) + + +# TODO(1.5) remove +@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression]) +def test_bayesian_ridge_ard_n_iter_deprecated(Estimator): + """Check the deprecation warning of `n_iter`.""" + depr_msg = ( + "'n_iter' was renamed to 'max_iter' in version 1.3 and will be removed in 1.5" + ) + X, y = diabetes.data, diabetes.target + model = Estimator(n_iter=5) + + with pytest.warns(FutureWarning, match=depr_msg): + model.fit(X, y) + + +# TODO(1.5) remove +@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression]) +def test_bayesian_ridge_ard_max_iter_and_n_iter_both_set(Estimator): + """Check that a ValueError is raised when both `max_iter` and `n_iter` are set.""" + err_msg = ( + "Both `n_iter` and `max_iter` attributes were set. Attribute" + " `n_iter` was deprecated in version 1.3 and will be removed in" + " 1.5. To avoid this error, only set the `max_iter` attribute." + ) + X, y = diabetes.data, diabetes.target + model = Estimator(n_iter=5, max_iter=5) + + with pytest.raises(ValueError, match=err_msg): + model.fit(X, y) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py new file mode 100644 index 0000000000000000000000000000000000000000..3856d74464f0b31851095d5298c91b8cf79fd9fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py @@ -0,0 +1,216 @@ +# Authors: Manoj Kumar mks542@nyu.edu +# License: BSD 3 clause + +import numpy as np +import pytest +from scipy import optimize + +from sklearn.datasets import make_regression +from sklearn.linear_model import HuberRegressor, LinearRegression, Ridge, SGDRegressor +from sklearn.linear_model._huber import _huber_loss_and_gradient +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def make_regression_with_outliers(n_samples=50, n_features=20): + rng = np.random.RandomState(0) + # Generate data with outliers by replacing 10% of the samples with noise. + X, y = make_regression( + n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05 + ) + + # Replace 10% of the sample with noise. + num_noise = int(0.1 * n_samples) + random_samples = rng.randint(0, n_samples, num_noise) + X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1])) + return X, y + + +def test_huber_equals_lr_for_high_epsilon(): + # Test that Ridge matches LinearRegression for large epsilon + X, y = make_regression_with_outliers() + lr = LinearRegression() + lr.fit(X, y) + huber = HuberRegressor(epsilon=1e3, alpha=0.0) + huber.fit(X, y) + assert_almost_equal(huber.coef_, lr.coef_, 3) + assert_almost_equal(huber.intercept_, lr.intercept_, 2) + + +def test_huber_max_iter(): + X, y = make_regression_with_outliers() + huber = HuberRegressor(max_iter=1) + huber.fit(X, y) + assert huber.n_iter_ == huber.max_iter + + +def test_huber_gradient(): + # Test that the gradient calculated by _huber_loss_and_gradient is correct + rng = np.random.RandomState(1) + X, y = make_regression_with_outliers() + sample_weight = rng.randint(1, 3, (y.shape[0])) + + def loss_func(x, *args): + return _huber_loss_and_gradient(x, *args)[0] + + def grad_func(x, *args): + return _huber_loss_and_gradient(x, *args)[1] + + # Check using optimize.check_grad that the gradients are equal. + for _ in range(5): + # Check for both fit_intercept and otherwise. + for n_features in [X.shape[1] + 1, X.shape[1] + 2]: + w = rng.randn(n_features) + w[-1] = np.abs(w[-1]) + grad_same = optimize.check_grad( + loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight + ) + assert_almost_equal(grad_same, 1e-6, 4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_huber_sample_weights(csr_container): + # Test sample_weights implementation in HuberRegressor""" + + X, y = make_regression_with_outliers() + huber = HuberRegressor() + huber.fit(X, y) + huber_coef = huber.coef_ + huber_intercept = huber.intercept_ + + # Rescale coefs before comparing with assert_array_almost_equal to make + # sure that the number of decimal places used is somewhat insensitive to + # the amplitude of the coefficients and therefore to the scale of the + # data and the regularization parameter + scale = max(np.mean(np.abs(huber.coef_)), np.mean(np.abs(huber.intercept_))) + + huber.fit(X, y, sample_weight=np.ones(y.shape[0])) + assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale) + assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale) + + X, y = make_regression_with_outliers(n_samples=5, n_features=20) + X_new = np.vstack((X, np.vstack((X[1], X[1], X[3])))) + y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]])) + huber.fit(X_new, y_new) + huber_coef = huber.coef_ + huber_intercept = huber.intercept_ + sample_weight = np.ones(X.shape[0]) + sample_weight[1] = 3 + sample_weight[3] = 2 + huber.fit(X, y, sample_weight=sample_weight) + + assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale) + assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale) + + # Test sparse implementation with sample weights. + X_csr = csr_container(X) + huber_sparse = HuberRegressor() + huber_sparse.fit(X_csr, y, sample_weight=sample_weight) + assert_array_almost_equal(huber_sparse.coef_ / scale, huber_coef / scale) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_huber_sparse(csr_container): + X, y = make_regression_with_outliers() + huber = HuberRegressor(alpha=0.1) + huber.fit(X, y) + + X_csr = csr_container(X) + huber_sparse = HuberRegressor(alpha=0.1) + huber_sparse.fit(X_csr, y) + assert_array_almost_equal(huber_sparse.coef_, huber.coef_) + assert_array_equal(huber.outliers_, huber_sparse.outliers_) + + +def test_huber_scaling_invariant(): + # Test that outliers filtering is scaling independent. + X, y = make_regression_with_outliers() + huber = HuberRegressor(fit_intercept=False, alpha=0.0) + huber.fit(X, y) + n_outliers_mask_1 = huber.outliers_ + assert not np.all(n_outliers_mask_1) + + huber.fit(X, 2.0 * y) + n_outliers_mask_2 = huber.outliers_ + assert_array_equal(n_outliers_mask_2, n_outliers_mask_1) + + huber.fit(2.0 * X, 2.0 * y) + n_outliers_mask_3 = huber.outliers_ + assert_array_equal(n_outliers_mask_3, n_outliers_mask_1) + + +def test_huber_and_sgd_same_results(): + # Test they should converge to same coefficients for same parameters + + X, y = make_regression_with_outliers(n_samples=10, n_features=2) + + # Fit once to find out the scale parameter. Scale down X and y by scale + # so that the scale parameter is optimized to 1.0 + huber = HuberRegressor(fit_intercept=False, alpha=0.0, epsilon=1.35) + huber.fit(X, y) + X_scale = X / huber.scale_ + y_scale = y / huber.scale_ + huber.fit(X_scale, y_scale) + assert_almost_equal(huber.scale_, 1.0, 3) + + sgdreg = SGDRegressor( + alpha=0.0, + loss="huber", + shuffle=True, + random_state=0, + max_iter=10000, + fit_intercept=False, + epsilon=1.35, + tol=None, + ) + sgdreg.fit(X_scale, y_scale) + assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1) + + +def test_huber_warm_start(): + X, y = make_regression_with_outliers() + huber_warm = HuberRegressor(alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1) + + huber_warm.fit(X, y) + huber_warm_coef = huber_warm.coef_.copy() + huber_warm.fit(X, y) + + # SciPy performs the tol check after doing the coef updates, so + # these would be almost same but not equal. + assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1) + + assert huber_warm.n_iter_ == 0 + + +def test_huber_better_r2_score(): + # Test that huber returns a better r2 score than non-outliers""" + X, y = make_regression_with_outliers() + huber = HuberRegressor(alpha=0.01) + huber.fit(X, y) + linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y + mask = np.abs(linear_loss) < huber.epsilon * huber.scale_ + huber_score = huber.score(X[mask], y[mask]) + huber_outlier_score = huber.score(X[~mask], y[~mask]) + + # The Ridge regressor should be influenced by the outliers and hence + # give a worse score on the non-outliers as compared to the huber + # regressor. + ridge = Ridge(alpha=0.01) + ridge.fit(X, y) + ridge_score = ridge.score(X[mask], y[mask]) + ridge_outlier_score = ridge.score(X[~mask], y[~mask]) + assert huber_score > ridge_score + + # The huber model should also fit poorly on the outliers. + assert ridge_outlier_score > huber_outlier_score + + +def test_huber_bool(): + # Test that it does not crash with bool data + X, y = make_regression(n_samples=200, n_features=2, noise=4.0, random_state=0) + X_bool = X > 0 + HuberRegressor().fit(X_bool, y) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py new file mode 100644 index 0000000000000000000000000000000000000000..9974090135ac501da0935ee3048a112f305eebcf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py @@ -0,0 +1,2194 @@ +import itertools +import os +import warnings +from functools import partial + +import numpy as np +import pytest +from numpy.testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from scipy import sparse + +from sklearn import config_context +from sklearn.base import clone +from sklearn.datasets import load_iris, make_classification +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import SGDClassifier +from sklearn.linear_model._logistic import ( + LogisticRegression as LogisticRegressionDefault, +) +from sklearn.linear_model._logistic import ( + LogisticRegressionCV as LogisticRegressionCVDefault, +) +from sklearn.linear_model._logistic import ( + _log_reg_scoring_path, + _logistic_regression_path, +) +from sklearn.metrics import get_scorer, log_loss +from sklearn.model_selection import ( + GridSearchCV, + StratifiedKFold, + cross_val_score, + train_test_split, +) +from sklearn.preprocessing import LabelEncoder, StandardScaler, scale +from sklearn.svm import l1_min_c +from sklearn.utils import _IS_32BIT, compute_class_weight, shuffle +from sklearn.utils._testing import ignore_warnings, skip_if_no_parallel +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS + +pytestmark = pytest.mark.filterwarnings( + "error::sklearn.exceptions.ConvergenceWarning:sklearn.*" +) +# Fixing random_state helps prevent ConvergenceWarnings +LogisticRegression = partial(LogisticRegressionDefault, random_state=0) +LogisticRegressionCV = partial(LogisticRegressionCVDefault, random_state=0) + + +SOLVERS = ("lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga") +X = [[-1, 0], [0, 1], [1, 1]] +Y1 = [0, 1, 1] +Y2 = [2, 1, 0] +iris = load_iris() + + +def check_predictions(clf, X, y): + """Check that the model is able to fit the classification data""" + n_samples = len(y) + classes = np.unique(y) + n_classes = classes.shape[0] + + predicted = clf.fit(X, y).predict(X) + assert_array_equal(clf.classes_, classes) + + assert predicted.shape == (n_samples,) + assert_array_equal(predicted, y) + + probabilities = clf.predict_proba(X) + assert probabilities.shape == (n_samples, n_classes) + assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) + assert_array_equal(probabilities.argmax(axis=1), y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_predict_2_classes(csr_container): + # Simple sanity check on a 2 classes dataset + # Make sure it predicts the correct result on simple datasets. + check_predictions(LogisticRegression(random_state=0), X, Y1) + check_predictions(LogisticRegression(random_state=0), csr_container(X), Y1) + + check_predictions(LogisticRegression(C=100, random_state=0), X, Y1) + check_predictions(LogisticRegression(C=100, random_state=0), csr_container(X), Y1) + + check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X, Y1) + check_predictions( + LogisticRegression(fit_intercept=False, random_state=0), csr_container(X), Y1 + ) + + +def test_logistic_cv_mock_scorer(): + class MockScorer: + def __init__(self): + self.calls = 0 + self.scores = [0.1, 0.4, 0.8, 0.5] + + def __call__(self, model, X, y, sample_weight=None): + score = self.scores[self.calls % len(self.scores)] + self.calls += 1 + return score + + mock_scorer = MockScorer() + Cs = [1, 2, 3, 4] + cv = 2 + + lr = LogisticRegressionCV(Cs=Cs, scoring=mock_scorer, cv=cv) + X, y = make_classification(random_state=0) + lr.fit(X, y) + + # Cs[2] has the highest score (0.8) from MockScorer + assert lr.C_[0] == Cs[2] + + # scorer called 8 times (cv*len(Cs)) + assert mock_scorer.calls == cv * len(Cs) + + # reset mock_scorer + mock_scorer.calls = 0 + custom_score = lr.score(X, lr.predict(X)) + + assert custom_score == mock_scorer.scores[0] + assert mock_scorer.calls == 1 + + +@skip_if_no_parallel +def test_lr_liblinear_warning(): + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + lr = LogisticRegression(solver="liblinear", n_jobs=2) + warning_message = ( + "'n_jobs' > 1 does not have any effect when" + " 'solver' is set to 'liblinear'. Got 'n_jobs'" + " = 2." + ) + with pytest.warns(UserWarning, match=warning_message): + lr.fit(iris.data, target) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_predict_3_classes(csr_container): + check_predictions(LogisticRegression(C=10), X, Y2) + check_predictions(LogisticRegression(C=10), csr_container(X), Y2) + + +@pytest.mark.parametrize( + "clf", + [ + LogisticRegression(C=len(iris.data), solver="liblinear", multi_class="ovr"), + LogisticRegression(C=len(iris.data), solver="lbfgs", multi_class="multinomial"), + LogisticRegression( + C=len(iris.data), solver="newton-cg", multi_class="multinomial" + ), + LogisticRegression( + C=len(iris.data), solver="sag", tol=1e-2, multi_class="ovr", random_state=42 + ), + LogisticRegression( + C=len(iris.data), + solver="saga", + tol=1e-2, + multi_class="ovr", + random_state=42, + ), + LogisticRegression( + C=len(iris.data), solver="newton-cholesky", multi_class="ovr" + ), + ], +) +def test_predict_iris(clf): + """Test logistic regression with the iris dataset. + + Test that both multinomial and OvR solvers handle multiclass data correctly and + give good accuracy score (>0.95) for the training data. + """ + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + if clf.solver == "lbfgs": + # lbfgs has convergence issues on the iris data with its default max_iter=100 + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + clf.fit(iris.data, target) + else: + clf.fit(iris.data, target) + assert_array_equal(np.unique(target), clf.classes_) + + pred = clf.predict(iris.data) + assert np.mean(pred == target) > 0.95 + + probabilities = clf.predict_proba(iris.data) + assert_allclose(probabilities.sum(axis=1), np.ones(n_samples)) + + pred = iris.target_names[probabilities.argmax(axis=1)] + assert np.mean(pred == target) > 0.95 + + +@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV]) +def test_check_solver_option(LR): + X, y = iris.data, iris.target + + # only 'liblinear' and 'newton-cholesky' solver + for solver in ["liblinear", "newton-cholesky"]: + msg = f"Solver {solver} does not support a multinomial backend." + lr = LR(solver=solver, multi_class="multinomial") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # all solvers except 'liblinear' and 'saga' + for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag"]: + msg = "Solver %s supports only 'l2' or None penalties," % solver + lr = LR(solver=solver, penalty="l1", multi_class="ovr") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag", "saga"]: + msg = "Solver %s supports only dual=False, got dual=True" % solver + lr = LR(solver=solver, dual=True, multi_class="ovr") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # only saga supports elasticnet. We only test for liblinear because the + # error is raised before for the other solvers (solver %s supports only l2 + # penalties) + for solver in ["liblinear"]: + msg = f"Only 'saga' solver supports elasticnet penalty, got solver={solver}." + lr = LR(solver=solver, penalty="elasticnet") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # liblinear does not support penalty='none' + # (LogisticRegressionCV does not supports penalty='none' at all) + if LR is LogisticRegression: + msg = "penalty=None is not supported for the liblinear solver" + lr = LR(penalty=None, solver="liblinear") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + +@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV]) +def test_elasticnet_l1_ratio_err_helpful(LR): + # Check that an informative error message is raised when penalty="elasticnet" + # but l1_ratio is not specified. + model = LR(penalty="elasticnet", solver="saga") + with pytest.raises(ValueError, match=r".*l1_ratio.*"): + model.fit(np.array([[1, 2], [3, 4]]), np.array([0, 1])) + + +@pytest.mark.parametrize("solver", ["lbfgs", "newton-cg", "sag", "saga"]) +def test_multinomial_binary(solver): + # Test multinomial LR on a binary problem. + target = (iris.target > 0).astype(np.intp) + target = np.array(["setosa", "not-setosa"])[target] + + clf = LogisticRegression( + solver=solver, multi_class="multinomial", random_state=42, max_iter=2000 + ) + clf.fit(iris.data, target) + + assert clf.coef_.shape == (1, iris.data.shape[1]) + assert clf.intercept_.shape == (1,) + assert_array_equal(clf.predict(iris.data), target) + + mlr = LogisticRegression( + solver=solver, multi_class="multinomial", random_state=42, fit_intercept=False + ) + mlr.fit(iris.data, target) + pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)] + assert np.mean(pred == target) > 0.9 + + +def test_multinomial_binary_probabilities(global_random_seed): + # Test multinomial LR gives expected probabilities based on the + # decision function, for a binary problem. + X, y = make_classification(random_state=global_random_seed) + clf = LogisticRegression( + multi_class="multinomial", + solver="saga", + tol=1e-3, + random_state=global_random_seed, + ) + clf.fit(X, y) + + decision = clf.decision_function(X) + proba = clf.predict_proba(X) + + expected_proba_class_1 = np.exp(decision) / (np.exp(decision) + np.exp(-decision)) + expected_proba = np.c_[1 - expected_proba_class_1, expected_proba_class_1] + + assert_almost_equal(proba, expected_proba) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_sparsify(coo_container): + # Test sparsify and densify members. + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + X = scale(iris.data) + clf = LogisticRegression(random_state=0).fit(X, target) + + pred_d_d = clf.decision_function(X) + + clf.sparsify() + assert sparse.issparse(clf.coef_) + pred_s_d = clf.decision_function(X) + + sp_data = coo_container(X) + pred_s_s = clf.decision_function(sp_data) + + clf.densify() + pred_d_s = clf.decision_function(sp_data) + + assert_array_almost_equal(pred_d_d, pred_s_d) + assert_array_almost_equal(pred_d_d, pred_s_s) + assert_array_almost_equal(pred_d_d, pred_d_s) + + +def test_inconsistent_input(): + # Test that an exception is raised on inconsistent input + rng = np.random.RandomState(0) + X_ = rng.random_sample((5, 10)) + y_ = np.ones(X_.shape[0]) + y_[0] = 0 + + clf = LogisticRegression(random_state=0) + + # Wrong dimensions for training data + y_wrong = y_[:-1] + + with pytest.raises(ValueError): + clf.fit(X, y_wrong) + + # Wrong dimensions for test data + with pytest.raises(ValueError): + clf.fit(X_, y_).predict(rng.random_sample((3, 12))) + + +def test_write_parameters(): + # Test that we can write to coef_ and intercept_ + clf = LogisticRegression(random_state=0) + clf.fit(X, Y1) + clf.coef_[:] = 0 + clf.intercept_[:] = 0 + assert_array_almost_equal(clf.decision_function(X), 0) + + +def test_nan(): + # Test proper NaN handling. + # Regression test for Issue #252: fit used to go into an infinite loop. + Xnan = np.array(X, dtype=np.float64) + Xnan[0, 1] = np.nan + logistic = LogisticRegression(random_state=0) + + with pytest.raises(ValueError): + logistic.fit(Xnan, Y1) + + +def test_consistency_path(): + # Test that the path algorithm is consistent + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = [1] * 100 + [-1] * 100 + Cs = np.logspace(0, 4, 10) + + f = ignore_warnings + # can't test with fit_intercept=True since LIBLINEAR + # penalizes the intercept + for solver in ["sag", "saga"]: + coefs, Cs, _ = f(_logistic_regression_path)( + X, + y, + Cs=Cs, + fit_intercept=False, + tol=1e-5, + solver=solver, + max_iter=1000, + multi_class="ovr", + random_state=0, + ) + for i, C in enumerate(Cs): + lr = LogisticRegression( + C=C, + fit_intercept=False, + tol=1e-5, + solver=solver, + multi_class="ovr", + random_state=0, + max_iter=1000, + ) + lr.fit(X, y) + lr_coef = lr.coef_.ravel() + assert_array_almost_equal( + lr_coef, coefs[i], decimal=4, err_msg="with solver = %s" % solver + ) + + # test for fit_intercept=True + for solver in ("lbfgs", "newton-cg", "newton-cholesky", "liblinear", "sag", "saga"): + Cs = [1e3] + coefs, Cs, _ = f(_logistic_regression_path)( + X, + y, + Cs=Cs, + tol=1e-6, + solver=solver, + intercept_scaling=10000.0, + random_state=0, + multi_class="ovr", + ) + lr = LogisticRegression( + C=Cs[0], + tol=1e-6, + intercept_scaling=10000.0, + random_state=0, + multi_class="ovr", + solver=solver, + ) + lr.fit(X, y) + lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_]) + assert_array_almost_equal( + lr_coef, coefs[0], decimal=4, err_msg="with solver = %s" % solver + ) + + +def test_logistic_regression_path_convergence_fail(): + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = [1] * 100 + [-1] * 100 + Cs = [1e3] + + # Check that the convergence message points to both a model agnostic + # advice (scaling the data) and to the logistic regression specific + # documentation that includes hints on the solver configuration. + with pytest.warns(ConvergenceWarning) as record: + _logistic_regression_path( + X, y, Cs=Cs, tol=0.0, max_iter=1, random_state=0, verbose=0 + ) + + assert len(record) == 1 + warn_msg = record[0].message.args[0] + assert "lbfgs failed to converge" in warn_msg + assert "Increase the number of iterations" in warn_msg + assert "scale the data" in warn_msg + assert "linear_model.html#logistic-regression" in warn_msg + + +def test_liblinear_dual_random_state(): + # random_state is relevant for liblinear solver only if dual=True + X, y = make_classification(n_samples=20, random_state=0) + lr1 = LogisticRegression( + random_state=0, + dual=True, + tol=1e-3, + solver="liblinear", + multi_class="ovr", + ) + lr1.fit(X, y) + lr2 = LogisticRegression( + random_state=0, + dual=True, + tol=1e-3, + solver="liblinear", + multi_class="ovr", + ) + lr2.fit(X, y) + lr3 = LogisticRegression( + random_state=8, + dual=True, + tol=1e-3, + solver="liblinear", + multi_class="ovr", + ) + lr3.fit(X, y) + + # same result for same random state + assert_array_almost_equal(lr1.coef_, lr2.coef_) + # different results for different random states + msg = "Arrays are not almost equal to 6 decimals" + with pytest.raises(AssertionError, match=msg): + assert_array_almost_equal(lr1.coef_, lr3.coef_) + + +def test_logistic_cv(): + # test for LogisticRegressionCV object + n_samples, n_features = 50, 5 + rng = np.random.RandomState(0) + X_ref = rng.randn(n_samples, n_features) + y = np.sign(X_ref.dot(5 * rng.randn(n_features))) + X_ref -= X_ref.mean() + X_ref /= X_ref.std() + lr_cv = LogisticRegressionCV( + Cs=[1.0], fit_intercept=False, solver="liblinear", multi_class="ovr", cv=3 + ) + lr_cv.fit(X_ref, y) + lr = LogisticRegression( + C=1.0, fit_intercept=False, solver="liblinear", multi_class="ovr" + ) + lr.fit(X_ref, y) + assert_array_almost_equal(lr.coef_, lr_cv.coef_) + + assert_array_equal(lr_cv.coef_.shape, (1, n_features)) + assert_array_equal(lr_cv.classes_, [-1, 1]) + assert len(lr_cv.classes_) == 2 + + coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values())) + assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features)) + assert_array_equal(lr_cv.Cs_.shape, (1,)) + scores = np.asarray(list(lr_cv.scores_.values())) + assert_array_equal(scores.shape, (1, 3, 1)) + + +@pytest.mark.parametrize( + "scoring, multiclass_agg_list", + [ + ("accuracy", [""]), + ("precision", ["_macro", "_weighted"]), + # no need to test for micro averaging because it + # is the same as accuracy for f1, precision, + # and recall (see https://github.com/ + # scikit-learn/scikit-learn/pull/ + # 11578#discussion_r203250062) + ("f1", ["_macro", "_weighted"]), + ("neg_log_loss", [""]), + ("recall", ["_macro", "_weighted"]), + ], +) +def test_logistic_cv_multinomial_score(scoring, multiclass_agg_list): + # test that LogisticRegressionCV uses the right score to compute its + # cross-validation scores when using a multinomial scoring + # see https://github.com/scikit-learn/scikit-learn/issues/8720 + X, y = make_classification( + n_samples=100, random_state=0, n_classes=3, n_informative=6 + ) + train, test = np.arange(80), np.arange(80, 100) + lr = LogisticRegression(C=1.0, multi_class="multinomial") + # we use lbfgs to support multinomial + params = lr.get_params() + # we store the params to set them further in _log_reg_scoring_path + for key in ["C", "n_jobs", "warm_start"]: + del params[key] + lr.fit(X[train], y[train]) + for averaging in multiclass_agg_list: + scorer = get_scorer(scoring + averaging) + assert_array_almost_equal( + _log_reg_scoring_path( + X, + y, + train, + test, + Cs=[1.0], + scoring=scorer, + pos_class=None, + max_squared_sum=None, + sample_weight=None, + score_params=None, + **params, + )[2][0], + scorer(lr, X[test], y[test]), + ) + + +def test_multinomial_logistic_regression_string_inputs(): + # Test with string labels for LogisticRegression(CV) + n_samples, n_features, n_classes = 50, 5, 3 + X_ref, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_classes=n_classes, + n_informative=3, + random_state=0, + ) + y_str = LabelEncoder().fit(["bar", "baz", "foo"]).inverse_transform(y) + # For numerical labels, let y values be taken from set (-1, 0, 1) + y = np.array(y) - 1 + # Test for string labels + lr = LogisticRegression(multi_class="multinomial") + lr_cv = LogisticRegressionCV(multi_class="multinomial", Cs=3) + lr_str = LogisticRegression(multi_class="multinomial") + lr_cv_str = LogisticRegressionCV(multi_class="multinomial", Cs=3) + + lr.fit(X_ref, y) + lr_cv.fit(X_ref, y) + lr_str.fit(X_ref, y_str) + lr_cv_str.fit(X_ref, y_str) + + assert_array_almost_equal(lr.coef_, lr_str.coef_) + assert sorted(lr_str.classes_) == ["bar", "baz", "foo"] + assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_) + assert sorted(lr_str.classes_) == ["bar", "baz", "foo"] + assert sorted(lr_cv_str.classes_) == ["bar", "baz", "foo"] + + # The predictions should be in original labels + assert sorted(np.unique(lr_str.predict(X_ref))) == ["bar", "baz", "foo"] + assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz", "foo"] + + # Make sure class weights can be given with string labels + lr_cv_str = LogisticRegression( + class_weight={"bar": 1, "baz": 2, "foo": 0}, multi_class="multinomial" + ).fit(X_ref, y_str) + assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz"] + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_logistic_cv_sparse(csr_container): + X, y = make_classification(n_samples=50, n_features=5, random_state=0) + X[X < 1.0] = 0.0 + csr = csr_container(X) + + clf = LogisticRegressionCV() + clf.fit(X, y) + clfs = LogisticRegressionCV() + clfs.fit(csr, y) + assert_array_almost_equal(clfs.coef_, clf.coef_) + assert_array_almost_equal(clfs.intercept_, clf.intercept_) + assert clfs.C_ == clf.C_ + + +def test_ovr_multinomial_iris(): + # Test that OvR and multinomial are correct using the iris dataset. + train, target = iris.data, iris.target + n_samples, n_features = train.shape + + # The cv indices from stratified kfold (where stratification is done based + # on the fine-grained iris classes, i.e, before the classes 0 and 1 are + # conflated) is used for both clf and clf1 + n_cv = 2 + cv = StratifiedKFold(n_cv) + precomputed_folds = list(cv.split(train, target)) + + # Train clf on the original dataset where classes 0 and 1 are separated + clf = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr") + clf.fit(train, target) + + # Conflate classes 0 and 1 and train clf1 on this modified dataset + clf1 = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr") + target_copy = target.copy() + target_copy[target_copy == 0] = 1 + clf1.fit(train, target_copy) + + # Ensure that what OvR learns for class2 is same regardless of whether + # classes 0 and 1 are separated or not + assert_allclose(clf.scores_[2], clf1.scores_[2]) + assert_allclose(clf.intercept_[2:], clf1.intercept_) + assert_allclose(clf.coef_[2][np.newaxis, :], clf1.coef_) + + # Test the shape of various attributes. + assert clf.coef_.shape == (3, n_features) + assert_array_equal(clf.classes_, [0, 1, 2]) + coefs_paths = np.asarray(list(clf.coefs_paths_.values())) + assert coefs_paths.shape == (3, n_cv, 10, n_features + 1) + assert clf.Cs_.shape == (10,) + scores = np.asarray(list(clf.scores_.values())) + assert scores.shape == (3, n_cv, 10) + + # Test that for the iris data multinomial gives a better accuracy than OvR + for solver in ["lbfgs", "newton-cg", "sag", "saga"]: + max_iter = 500 if solver in ["sag", "saga"] else 30 + clf_multi = LogisticRegressionCV( + solver=solver, + multi_class="multinomial", + max_iter=max_iter, + random_state=42, + tol=1e-3 if solver in ["sag", "saga"] else 1e-2, + cv=2, + ) + if solver == "lbfgs": + # lbfgs requires scaling to avoid convergence warnings + train = scale(train) + + clf_multi.fit(train, target) + multi_score = clf_multi.score(train, target) + ovr_score = clf.score(train, target) + assert multi_score > ovr_score + + # Test attributes of LogisticRegressionCV + assert clf.coef_.shape == clf_multi.coef_.shape + assert_array_equal(clf_multi.classes_, [0, 1, 2]) + coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values())) + assert coefs_paths.shape == (3, n_cv, 10, n_features + 1) + assert clf_multi.Cs_.shape == (10,) + scores = np.asarray(list(clf_multi.scores_.values())) + assert scores.shape == (3, n_cv, 10) + + +def test_logistic_regression_solvers(): + """Test solvers converge to the same result.""" + X, y = make_classification(n_features=10, n_informative=5, random_state=0) + + params = dict(fit_intercept=False, random_state=42, multi_class="ovr") + + regressors = { + solver: LogisticRegression(solver=solver, **params).fit(X, y) + for solver in SOLVERS + } + + for solver_1, solver_2 in itertools.combinations(regressors, r=2): + assert_array_almost_equal( + regressors[solver_1].coef_, regressors[solver_2].coef_, decimal=3 + ) + + +def test_logistic_regression_solvers_multiclass(): + """Test solvers converge to the same result for multiclass problems.""" + X, y = make_classification( + n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0 + ) + tol = 1e-7 + params = dict(fit_intercept=False, tol=tol, random_state=42, multi_class="ovr") + + # Override max iteration count for specific solvers to allow for + # proper convergence. + solver_max_iter = {"sag": 1000, "saga": 10000} + + regressors = { + solver: LogisticRegression( + solver=solver, max_iter=solver_max_iter.get(solver, 100), **params + ).fit(X, y) + for solver in SOLVERS + } + + for solver_1, solver_2 in itertools.combinations(regressors, r=2): + assert_allclose( + regressors[solver_1].coef_, + regressors[solver_2].coef_, + rtol=5e-3 if solver_2 == "saga" else 1e-3, + err_msg=f"{solver_1} vs {solver_2}", + ) + + +@pytest.mark.parametrize("weight", [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]) +@pytest.mark.parametrize("class_weight", ["weight", "balanced"]) +def test_logistic_regressioncv_class_weights(weight, class_weight, global_random_seed): + """Test class_weight for LogisticRegressionCV.""" + n_classes = len(weight) + if class_weight == "weight": + class_weight = weight + + X, y = make_classification( + n_samples=30, + n_features=3, + n_repeated=0, + n_informative=3, + n_redundant=0, + n_classes=n_classes, + random_state=global_random_seed, + ) + params = dict( + Cs=1, + fit_intercept=False, + multi_class="ovr", + class_weight=class_weight, + tol=1e-8, + ) + clf_lbfgs = LogisticRegressionCV(solver="lbfgs", **params) + + # XXX: lbfgs' line search can fail and cause a ConvergenceWarning for some + # 10% of the random seeds, but only on specific platforms (in particular + # when using Atlas BLAS/LAPACK implementation). Doubling the maxls internal + # parameter of the solver does not help. However this lack of proper + # convergence does not seem to prevent the assertion to pass, so we ignore + # the warning for now. + # See: https://github.com/scikit-learn/scikit-learn/pull/27649 + with ignore_warnings(category=ConvergenceWarning): + clf_lbfgs.fit(X, y) + + for solver in set(SOLVERS) - set(["lbfgs"]): + clf = LogisticRegressionCV(solver=solver, **params) + if solver in ("sag", "saga"): + clf.set_params( + tol=1e-18, max_iter=10000, random_state=global_random_seed + 1 + ) + clf.fit(X, y) + + assert_allclose( + clf.coef_, clf_lbfgs.coef_, rtol=1e-3, err_msg=f"{solver} vs lbfgs" + ) + + +def test_logistic_regression_sample_weights(): + X, y = make_classification( + n_samples=20, n_features=5, n_informative=3, n_classes=2, random_state=0 + ) + sample_weight = y + 1 + + for LR in [LogisticRegression, LogisticRegressionCV]: + kw = {"random_state": 42, "fit_intercept": False, "multi_class": "ovr"} + if LR is LogisticRegressionCV: + kw.update({"Cs": 3, "cv": 3}) + + # Test that passing sample_weight as ones is the same as + # not passing them at all (default None) + for solver in ["lbfgs", "liblinear"]: + clf_sw_none = LR(solver=solver, **kw) + clf_sw_ones = LR(solver=solver, **kw) + clf_sw_none.fit(X, y) + clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0])) + assert_allclose(clf_sw_none.coef_, clf_sw_ones.coef_, rtol=1e-4) + + # Test that sample weights work the same with the lbfgs, + # newton-cg, newton-cholesky and 'sag' solvers + clf_sw_lbfgs = LR(**kw, tol=1e-5) + clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight) + for solver in set(SOLVERS) - set(("lbfgs", "saga")): + clf_sw = LR(solver=solver, tol=1e-10 if solver == "sag" else 1e-5, **kw) + # ignore convergence warning due to small dataset with sag + with ignore_warnings(): + clf_sw.fit(X, y, sample_weight=sample_weight) + assert_allclose(clf_sw_lbfgs.coef_, clf_sw.coef_, rtol=1e-4) + + # Test that passing class_weight as [1,2] is the same as + # passing class weight = [1,1] but adjusting sample weights + # to be 2 for all instances of class 2 + for solver in ["lbfgs", "liblinear"]: + clf_cw_12 = LR(solver=solver, class_weight={0: 1, 1: 2}, **kw) + clf_cw_12.fit(X, y) + clf_sw_12 = LR(solver=solver, **kw) + clf_sw_12.fit(X, y, sample_weight=sample_weight) + assert_allclose(clf_cw_12.coef_, clf_sw_12.coef_, rtol=1e-4) + + # Test the above for l1 penalty and l2 penalty with dual=True. + # since the patched liblinear code is different. + clf_cw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + class_weight={0: 1, 1: 2}, + penalty="l1", + tol=1e-5, + random_state=42, + multi_class="ovr", + ) + clf_cw.fit(X, y) + clf_sw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + penalty="l1", + tol=1e-5, + random_state=42, + multi_class="ovr", + ) + clf_sw.fit(X, y, sample_weight) + assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) + + clf_cw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + class_weight={0: 1, 1: 2}, + penalty="l2", + dual=True, + random_state=42, + multi_class="ovr", + ) + clf_cw.fit(X, y) + clf_sw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + penalty="l2", + dual=True, + random_state=42, + multi_class="ovr", + ) + clf_sw.fit(X, y, sample_weight) + assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) + + +def _compute_class_weight_dictionary(y): + # helper for returning a dictionary instead of an array + classes = np.unique(y) + class_weight = compute_class_weight("balanced", classes=classes, y=y) + class_weight_dict = dict(zip(classes, class_weight)) + return class_weight_dict + + +def test_logistic_regression_class_weights(): + # Scale data to avoid convergence warnings with the lbfgs solver + X_iris = scale(iris.data) + # Multinomial case: remove 90% of class 0 + X = X_iris[45:, :] + y = iris.target[45:] + solvers = ("lbfgs", "newton-cg") + class_weight_dict = _compute_class_weight_dictionary(y) + + for solver in solvers: + clf1 = LogisticRegression( + solver=solver, multi_class="multinomial", class_weight="balanced" + ) + clf2 = LogisticRegression( + solver=solver, multi_class="multinomial", class_weight=class_weight_dict + ) + clf1.fit(X, y) + clf2.fit(X, y) + assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4) + + # Binary case: remove 90% of class 0 and 100% of class 2 + X = X_iris[45:100, :] + y = iris.target[45:100] + class_weight_dict = _compute_class_weight_dictionary(y) + + for solver in set(SOLVERS) - set(("sag", "saga")): + clf1 = LogisticRegression( + solver=solver, multi_class="ovr", class_weight="balanced" + ) + clf2 = LogisticRegression( + solver=solver, multi_class="ovr", class_weight=class_weight_dict + ) + clf1.fit(X, y) + clf2.fit(X, y) + assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6) + + +def test_logistic_regression_multinomial(): + # Tests for the multinomial option in logistic regression + + # Some basic attributes of Logistic Regression + n_samples, n_features, n_classes = 50, 20, 3 + X, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_informative=10, + n_classes=n_classes, + random_state=0, + ) + + X = StandardScaler(with_mean=False).fit_transform(X) + + # 'lbfgs' is used as a referenced + solver = "lbfgs" + ref_i = LogisticRegression(solver=solver, multi_class="multinomial", tol=1e-6) + ref_w = LogisticRegression( + solver=solver, multi_class="multinomial", fit_intercept=False, tol=1e-6 + ) + ref_i.fit(X, y) + ref_w.fit(X, y) + assert ref_i.coef_.shape == (n_classes, n_features) + assert ref_w.coef_.shape == (n_classes, n_features) + for solver in ["sag", "saga", "newton-cg"]: + clf_i = LogisticRegression( + solver=solver, + multi_class="multinomial", + random_state=42, + max_iter=2000, + tol=1e-7, + ) + clf_w = LogisticRegression( + solver=solver, + multi_class="multinomial", + random_state=42, + max_iter=2000, + tol=1e-7, + fit_intercept=False, + ) + clf_i.fit(X, y) + clf_w.fit(X, y) + assert clf_i.coef_.shape == (n_classes, n_features) + assert clf_w.coef_.shape == (n_classes, n_features) + + # Compare solutions between lbfgs and the other solvers + assert_allclose(ref_i.coef_, clf_i.coef_, rtol=1e-3) + assert_allclose(ref_w.coef_, clf_w.coef_, rtol=1e-2) + assert_allclose(ref_i.intercept_, clf_i.intercept_, rtol=1e-3) + + # Test that the path give almost the same results. However since in this + # case we take the average of the coefs after fitting across all the + # folds, it need not be exactly the same. + for solver in ["lbfgs", "newton-cg", "sag", "saga"]: + clf_path = LogisticRegressionCV( + solver=solver, max_iter=2000, tol=1e-6, multi_class="multinomial", Cs=[1.0] + ) + clf_path.fit(X, y) + assert_allclose(clf_path.coef_, ref_i.coef_, rtol=1e-2) + assert_allclose(clf_path.intercept_, ref_i.intercept_, rtol=1e-2) + + +def test_liblinear_decision_function_zero(): + # Test negative prediction when decision_function values are zero. + # Liblinear predicts the positive class when decision_function values + # are zero. This is a test to verify that we do not do the same. + # See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600 + # and the PR https://github.com/scikit-learn/scikit-learn/pull/3623 + X, y = make_classification(n_samples=5, n_features=5, random_state=0) + clf = LogisticRegression(fit_intercept=False, solver="liblinear", multi_class="ovr") + clf.fit(X, y) + + # Dummy data such that the decision function becomes zero. + X = np.zeros((5, 5)) + assert_array_equal(clf.predict(X), np.zeros(5)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_liblinear_logregcv_sparse(csr_container): + # Test LogRegCV with solver='liblinear' works for sparse matrices + + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + clf = LogisticRegressionCV(solver="liblinear", multi_class="ovr") + clf.fit(csr_container(X), y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_saga_sparse(csr_container): + # Test LogRegCV with solver='liblinear' works for sparse matrices + + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + clf = LogisticRegressionCV(solver="saga", tol=1e-2) + clf.fit(csr_container(X), y) + + +def test_logreg_intercept_scaling_zero(): + # Test that intercept_scaling is ignored when fit_intercept is False + + clf = LogisticRegression(fit_intercept=False) + clf.fit(X, Y1) + assert clf.intercept_ == 0.0 + + +def test_logreg_l1(): + # Because liblinear penalizes the intercept and saga does not, we do not + # fit the intercept to make it possible to compare the coefficients of + # the two models at convergence. + rng = np.random.RandomState(42) + n_samples = 50 + X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) + X_noise = rng.normal(size=(n_samples, 3)) + X_constant = np.ones(shape=(n_samples, 2)) + X = np.concatenate((X, X_noise, X_constant), axis=1) + lr_liblinear = LogisticRegression( + penalty="l1", + C=1.0, + solver="liblinear", + fit_intercept=False, + multi_class="ovr", + tol=1e-10, + ) + lr_liblinear.fit(X, y) + + lr_saga = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + multi_class="ovr", + max_iter=1000, + tol=1e-10, + ) + lr_saga.fit(X, y) + assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) + + # Noise and constant features should be regularized to zero by the l1 + # penalty + assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) + assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_logreg_l1_sparse_data(csr_container): + # Because liblinear penalizes the intercept and saga does not, we do not + # fit the intercept to make it possible to compare the coefficients of + # the two models at convergence. + rng = np.random.RandomState(42) + n_samples = 50 + X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) + X_noise = rng.normal(scale=0.1, size=(n_samples, 3)) + X_constant = np.zeros(shape=(n_samples, 2)) + X = np.concatenate((X, X_noise, X_constant), axis=1) + X[X < 1] = 0 + X = csr_container(X) + + lr_liblinear = LogisticRegression( + penalty="l1", + C=1.0, + solver="liblinear", + fit_intercept=False, + multi_class="ovr", + tol=1e-10, + ) + lr_liblinear.fit(X, y) + + lr_saga = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + multi_class="ovr", + max_iter=1000, + tol=1e-10, + ) + lr_saga.fit(X, y) + assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) + # Noise and constant features should be regularized to zero by the l1 + # penalty + assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) + assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) + + # Check that solving on the sparse and dense data yield the same results + lr_saga_dense = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + multi_class="ovr", + max_iter=1000, + tol=1e-10, + ) + lr_saga_dense.fit(X.toarray(), y) + assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_) + + +@pytest.mark.parametrize("random_seed", [42]) +@pytest.mark.parametrize("penalty", ["l1", "l2"]) +def test_logistic_regression_cv_refit(random_seed, penalty): + # Test that when refit=True, logistic regression cv with the saga solver + # converges to the same solution as logistic regression with a fixed + # regularization parameter. + # Internally the LogisticRegressionCV model uses a warm start to refit on + # the full data model with the optimal C found by CV. As the penalized + # logistic regression loss is convex, we should still recover exactly + # the same solution as long as the stopping criterion is strict enough (and + # that there are no exactly duplicated features when penalty='l1'). + X, y = make_classification(n_samples=100, n_features=20, random_state=random_seed) + common_params = dict( + solver="saga", + penalty=penalty, + random_state=random_seed, + max_iter=1000, + tol=1e-12, + ) + lr_cv = LogisticRegressionCV(Cs=[1.0], refit=True, **common_params) + lr_cv.fit(X, y) + lr = LogisticRegression(C=1.0, **common_params) + lr.fit(X, y) + assert_array_almost_equal(lr_cv.coef_, lr.coef_) + + +def test_logreg_predict_proba_multinomial(): + X, y = make_classification( + n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10 + ) + + # Predicted probabilities using the true-entropy loss should give a + # smaller loss than those using the ovr method. + clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs") + clf_multi.fit(X, y) + clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) + clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs") + clf_ovr.fit(X, y) + clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X)) + assert clf_ovr_loss > clf_multi_loss + + # Predicted probabilities using the soft-max function should give a + # smaller loss than those using the logistic function. + clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) + clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X)) + assert clf_wrong_loss > clf_multi_loss + + +@pytest.mark.parametrize("max_iter", np.arange(1, 5)) +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +@pytest.mark.parametrize( + "solver, message", + [ + ( + "newton-cg", + "newton-cg failed to converge. Increase the number of iterations.", + ), + ( + "liblinear", + "Liblinear failed to converge, increase the number of iterations.", + ), + ("sag", "The max_iter was reached which means the coef_ did not converge"), + ("saga", "The max_iter was reached which means the coef_ did not converge"), + ("lbfgs", "lbfgs failed to converge"), + ("newton-cholesky", "Newton solver did not converge after [0-9]* iterations"), + ], +) +def test_max_iter(max_iter, multi_class, solver, message): + # Test that the maximum number of iteration is reached + X, y_bin = iris.data, iris.target.copy() + y_bin[y_bin == 2] = 0 + + if solver in ("liblinear", "newton-cholesky") and multi_class == "multinomial": + pytest.skip("'multinomial' is not supported by liblinear and newton-cholesky") + if solver == "newton-cholesky" and max_iter > 1: + pytest.skip("solver newton-cholesky might converge very fast") + + lr = LogisticRegression( + max_iter=max_iter, + tol=1e-15, + multi_class=multi_class, + random_state=0, + solver=solver, + ) + with pytest.warns(ConvergenceWarning, match=message): + lr.fit(X, y_bin) + + assert lr.n_iter_[0] == max_iter + + +@pytest.mark.parametrize("solver", SOLVERS) +def test_n_iter(solver): + # Test that self.n_iter_ has the correct format. + X, y = iris.data, iris.target + if solver == "lbfgs": + # lbfgs requires scaling to avoid convergence warnings + X = scale(X) + + n_classes = np.unique(y).shape[0] + assert n_classes == 3 + + # Also generate a binary classification sub-problem. + y_bin = y.copy() + y_bin[y_bin == 2] = 0 + + n_Cs = 4 + n_cv_fold = 2 + + # Binary classification case + clf = LogisticRegression(tol=1e-2, C=1.0, solver=solver, random_state=42) + clf.fit(X, y_bin) + assert clf.n_iter_.shape == (1,) + + clf_cv = LogisticRegressionCV( + tol=1e-2, solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42 + ) + clf_cv.fit(X, y_bin) + assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs) + + # OvR case + clf.set_params(multi_class="ovr").fit(X, y) + assert clf.n_iter_.shape == (n_classes,) + + clf_cv.set_params(multi_class="ovr").fit(X, y) + assert clf_cv.n_iter_.shape == (n_classes, n_cv_fold, n_Cs) + + # multinomial case + if solver in ("liblinear", "newton-cholesky"): + # This solver only supports one-vs-rest multiclass classification. + return + + # When using the multinomial objective function, there is a single + # optimization problem to solve for all classes at once: + clf.set_params(multi_class="multinomial").fit(X, y) + assert clf.n_iter_.shape == (1,) + + clf_cv.set_params(multi_class="multinomial").fit(X, y) + assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs) + + +@pytest.mark.parametrize("solver", sorted(set(SOLVERS) - set(["liblinear"]))) +@pytest.mark.parametrize("warm_start", (True, False)) +@pytest.mark.parametrize("fit_intercept", (True, False)) +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +def test_warm_start(solver, warm_start, fit_intercept, multi_class): + # A 1-iteration second fit on same data should give almost same result + # with warm starting, and quite different result without warm starting. + # Warm starting does not work with liblinear solver. + X, y = iris.data, iris.target + + if solver == "newton-cholesky" and multi_class == "multinomial": + # solver does only support OvR + return + + clf = LogisticRegression( + tol=1e-4, + multi_class=multi_class, + warm_start=warm_start, + solver=solver, + random_state=42, + fit_intercept=fit_intercept, + ) + with ignore_warnings(category=ConvergenceWarning): + clf.fit(X, y) + coef_1 = clf.coef_ + + clf.max_iter = 1 + clf.fit(X, y) + cum_diff = np.sum(np.abs(coef_1 - clf.coef_)) + msg = ( + "Warm starting issue with %s solver in %s mode " + "with fit_intercept=%s and warm_start=%s" + % (solver, multi_class, str(fit_intercept), str(warm_start)) + ) + if warm_start: + assert 2.0 > cum_diff, msg + else: + assert cum_diff > 2.0, msg + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_saga_vs_liblinear(csr_container): + iris = load_iris() + X, y = iris.data, iris.target + X = np.concatenate([X] * 3) + y = np.concatenate([y] * 3) + + X_bin = X[y <= 1] + y_bin = y[y <= 1] * 2 - 1 + + X_sparse, y_sparse = make_classification( + n_samples=50, n_features=20, random_state=0 + ) + X_sparse = csr_container(X_sparse) + + for X, y in ((X_bin, y_bin), (X_sparse, y_sparse)): + for penalty in ["l1", "l2"]: + n_samples = X.shape[0] + # alpha=1e-3 is time consuming + for alpha in np.logspace(-1, 1, 3): + saga = LogisticRegression( + C=1.0 / (n_samples * alpha), + solver="saga", + multi_class="ovr", + max_iter=200, + fit_intercept=False, + penalty=penalty, + random_state=0, + tol=1e-6, + ) + + liblinear = LogisticRegression( + C=1.0 / (n_samples * alpha), + solver="liblinear", + multi_class="ovr", + max_iter=200, + fit_intercept=False, + penalty=penalty, + random_state=0, + tol=1e-6, + ) + + saga.fit(X, y) + liblinear.fit(X, y) + # Convergence for alpha=1e-3 is very slow + assert_array_almost_equal(saga.coef_, liblinear.coef_, 3) + + +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +@pytest.mark.parametrize( + "solver", ["liblinear", "newton-cg", "newton-cholesky", "saga"] +) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dtype_match(solver, multi_class, fit_intercept, csr_container): + # Test that np.float32 input data is not cast to np.float64 when possible + # and that the output is approximately the same no matter the input format. + + if solver in ("liblinear", "newton-cholesky") and multi_class == "multinomial": + pytest.skip(f"Solver={solver} does not support multinomial logistic.") + + out32_type = np.float64 if solver == "liblinear" else np.float32 + + X_32 = np.array(X).astype(np.float32) + y_32 = np.array(Y1).astype(np.float32) + X_64 = np.array(X).astype(np.float64) + y_64 = np.array(Y1).astype(np.float64) + X_sparse_32 = csr_container(X, dtype=np.float32) + X_sparse_64 = csr_container(X, dtype=np.float64) + solver_tol = 5e-4 + + lr_templ = LogisticRegression( + solver=solver, + multi_class=multi_class, + random_state=42, + tol=solver_tol, + fit_intercept=fit_intercept, + ) + + # Check 32-bit type consistency + lr_32 = clone(lr_templ) + lr_32.fit(X_32, y_32) + assert lr_32.coef_.dtype == out32_type + + # Check 32-bit type consistency with sparsity + lr_32_sparse = clone(lr_templ) + lr_32_sparse.fit(X_sparse_32, y_32) + assert lr_32_sparse.coef_.dtype == out32_type + + # Check 64-bit type consistency + lr_64 = clone(lr_templ) + lr_64.fit(X_64, y_64) + assert lr_64.coef_.dtype == np.float64 + + # Check 64-bit type consistency with sparsity + lr_64_sparse = clone(lr_templ) + lr_64_sparse.fit(X_sparse_64, y_64) + assert lr_64_sparse.coef_.dtype == np.float64 + + # solver_tol bounds the norm of the loss gradient + # dw ~= inv(H)*grad ==> |dw| ~= |inv(H)| * solver_tol, where H - hessian + # + # See https://github.com/scikit-learn/scikit-learn/pull/13645 + # + # with Z = np.hstack((np.ones((3,1)), np.array(X))) + # In [8]: np.linalg.norm(np.diag([0,2,2]) + np.linalg.inv((Z.T @ Z)/4)) + # Out[8]: 1.7193336918135917 + + # factor of 2 to get the ball diameter + atol = 2 * 1.72 * solver_tol + if os.name == "nt" and _IS_32BIT: + # FIXME + atol = 1e-2 + + # Check accuracy consistency + assert_allclose(lr_32.coef_, lr_64.coef_.astype(np.float32), atol=atol) + + if solver == "saga" and fit_intercept: + # FIXME: SAGA on sparse data fits the intercept inaccurately with the + # default tol and max_iter parameters. + atol = 1e-1 + + assert_allclose(lr_32.coef_, lr_32_sparse.coef_, atol=atol) + assert_allclose(lr_64.coef_, lr_64_sparse.coef_, atol=atol) + + +def test_warm_start_converge_LR(): + # Test to see that the logistic regression converges on warm start, + # with multi_class='multinomial'. Non-regressive test for #10836 + + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = np.array([1] * 100 + [-1] * 100) + lr_no_ws = LogisticRegression( + multi_class="multinomial", solver="sag", warm_start=False, random_state=0 + ) + lr_ws = LogisticRegression( + multi_class="multinomial", solver="sag", warm_start=True, random_state=0 + ) + + lr_no_ws_loss = log_loss(y, lr_no_ws.fit(X, y).predict_proba(X)) + for i in range(5): + lr_ws.fit(X, y) + lr_ws_loss = log_loss(y, lr_ws.predict_proba(X)) + assert_allclose(lr_no_ws_loss, lr_ws_loss, rtol=1e-5) + + +def test_elastic_net_coeffs(): + # make sure elasticnet penalty gives different coefficients from l1 and l2 + # with saga solver (l1_ratio different from 0 or 1) + X, y = make_classification(random_state=0) + + C = 2.0 + l1_ratio = 0.5 + coeffs = list() + for penalty, ratio in (("elasticnet", l1_ratio), ("l1", None), ("l2", None)): + lr = LogisticRegression( + penalty=penalty, + C=C, + solver="saga", + random_state=0, + l1_ratio=ratio, + tol=1e-3, + max_iter=200, + ) + lr.fit(X, y) + coeffs.append(lr.coef_) + + elastic_net_coeffs, l1_coeffs, l2_coeffs = coeffs + # make sure coeffs differ by at least .1 + assert not np.allclose(elastic_net_coeffs, l1_coeffs, rtol=0, atol=0.1) + assert not np.allclose(elastic_net_coeffs, l2_coeffs, rtol=0, atol=0.1) + assert not np.allclose(l2_coeffs, l1_coeffs, rtol=0, atol=0.1) + + +@pytest.mark.parametrize("C", [0.001, 0.1, 1, 10, 100, 1000, 1e6]) +@pytest.mark.parametrize("penalty, l1_ratio", [("l1", 1), ("l2", 0)]) +def test_elastic_net_l1_l2_equivalence(C, penalty, l1_ratio): + # Make sure elasticnet is equivalent to l1 when l1_ratio=1 and to l2 when + # l1_ratio=0. + X, y = make_classification(random_state=0) + + lr_enet = LogisticRegression( + penalty="elasticnet", + C=C, + l1_ratio=l1_ratio, + solver="saga", + random_state=0, + tol=1e-2, + ) + lr_expected = LogisticRegression( + penalty=penalty, C=C, solver="saga", random_state=0, tol=1e-2 + ) + lr_enet.fit(X, y) + lr_expected.fit(X, y) + + assert_array_almost_equal(lr_enet.coef_, lr_expected.coef_) + + +@pytest.mark.parametrize("C", [0.001, 1, 100, 1e6]) +def test_elastic_net_vs_l1_l2(C): + # Make sure that elasticnet with grid search on l1_ratio gives same or + # better results than just l1 or just l2. + + X, y = make_classification(500, random_state=0) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + param_grid = {"l1_ratio": np.linspace(0, 1, 5)} + + enet_clf = LogisticRegression( + penalty="elasticnet", C=C, solver="saga", random_state=0, tol=1e-2 + ) + gs = GridSearchCV(enet_clf, param_grid, refit=True) + + l1_clf = LogisticRegression( + penalty="l1", C=C, solver="saga", random_state=0, tol=1e-2 + ) + l2_clf = LogisticRegression( + penalty="l2", C=C, solver="saga", random_state=0, tol=1e-2 + ) + + for clf in (gs, l1_clf, l2_clf): + clf.fit(X_train, y_train) + + assert gs.score(X_test, y_test) >= l1_clf.score(X_test, y_test) + assert gs.score(X_test, y_test) >= l2_clf.score(X_test, y_test) + + +@pytest.mark.parametrize("C", np.logspace(-3, 2, 4)) +@pytest.mark.parametrize("l1_ratio", [0.1, 0.5, 0.9]) +def test_LogisticRegression_elastic_net_objective(C, l1_ratio): + # Check that training with a penalty matching the objective leads + # to a lower objective. + # Here we train a logistic regression with l2 (a) and elasticnet (b) + # penalties, and compute the elasticnet objective. That of a should be + # greater than that of b (both objectives are convex). + X, y = make_classification( + n_samples=1000, + n_classes=2, + n_features=20, + n_informative=10, + n_redundant=0, + n_repeated=0, + random_state=0, + ) + X = scale(X) + + lr_enet = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + C=C, + l1_ratio=l1_ratio, + fit_intercept=False, + ) + lr_l2 = LogisticRegression( + penalty="l2", solver="saga", random_state=0, C=C, fit_intercept=False + ) + lr_enet.fit(X, y) + lr_l2.fit(X, y) + + def enet_objective(lr): + coef = lr.coef_.ravel() + obj = C * log_loss(y, lr.predict_proba(X)) + obj += l1_ratio * np.sum(np.abs(coef)) + obj += (1.0 - l1_ratio) * 0.5 * np.dot(coef, coef) + return obj + + assert enet_objective(lr_enet) < enet_objective(lr_l2) + + +@pytest.mark.parametrize("multi_class", ("ovr", "multinomial")) +def test_LogisticRegressionCV_GridSearchCV_elastic_net(multi_class): + # make sure LogisticRegressionCV gives same best params (l1 and C) as + # GridSearchCV when penalty is elasticnet + + if multi_class == "ovr": + # This is actually binary classification, ovr multiclass is treated in + # test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr + X, y = make_classification(random_state=0) + else: + X, y = make_classification( + n_samples=100, n_classes=3, n_informative=3, random_state=0 + ) + + cv = StratifiedKFold(5) + + l1_ratios = np.linspace(0, 1, 3) + Cs = np.logspace(-4, 4, 3) + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=cv, + l1_ratios=l1_ratios, + random_state=0, + multi_class=multi_class, + tol=1e-2, + ) + lrcv.fit(X, y) + + param_grid = {"C": Cs, "l1_ratio": l1_ratios} + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + multi_class=multi_class, + tol=1e-2, + ) + gs = GridSearchCV(lr, param_grid, cv=cv) + gs.fit(X, y) + + assert gs.best_params_["l1_ratio"] == lrcv.l1_ratio_[0] + assert gs.best_params_["C"] == lrcv.C_[0] + + +def test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr(): + # make sure LogisticRegressionCV gives same best params (l1 and C) as + # GridSearchCV when penalty is elasticnet and multiclass is ovr. We can't + # compare best_params like in the previous test because + # LogisticRegressionCV with multi_class='ovr' will have one C and one + # l1_param for each class, while LogisticRegression will share the + # parameters over the *n_classes* classifiers. + + X, y = make_classification( + n_samples=100, n_classes=3, n_informative=3, random_state=0 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + cv = StratifiedKFold(5) + + l1_ratios = np.linspace(0, 1, 3) + Cs = np.logspace(-4, 4, 3) + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=cv, + l1_ratios=l1_ratios, + random_state=0, + multi_class="ovr", + tol=1e-2, + ) + lrcv.fit(X_train, y_train) + + param_grid = {"C": Cs, "l1_ratio": l1_ratios} + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + multi_class="ovr", + tol=1e-2, + ) + gs = GridSearchCV(lr, param_grid, cv=cv) + gs.fit(X_train, y_train) + + # Check that predictions are 80% the same + assert (lrcv.predict(X_train) == gs.predict(X_train)).mean() >= 0.8 + assert (lrcv.predict(X_test) == gs.predict(X_test)).mean() >= 0.8 + + +@pytest.mark.parametrize("penalty", ("l2", "elasticnet")) +@pytest.mark.parametrize("multi_class", ("ovr", "multinomial", "auto")) +def test_LogisticRegressionCV_no_refit(penalty, multi_class): + # Test LogisticRegressionCV attribute shapes when refit is False + + n_classes = 3 + n_features = 20 + X, y = make_classification( + n_samples=200, + n_classes=n_classes, + n_informative=n_classes, + n_features=n_features, + random_state=0, + ) + + Cs = np.logspace(-4, 4, 3) + if penalty == "elasticnet": + l1_ratios = np.linspace(0, 1, 2) + else: + l1_ratios = None + + lrcv = LogisticRegressionCV( + penalty=penalty, + Cs=Cs, + solver="saga", + l1_ratios=l1_ratios, + random_state=0, + multi_class=multi_class, + tol=1e-2, + refit=False, + ) + lrcv.fit(X, y) + assert lrcv.C_.shape == (n_classes,) + assert lrcv.l1_ratio_.shape == (n_classes,) + assert lrcv.coef_.shape == (n_classes, n_features) + + +def test_LogisticRegressionCV_elasticnet_attribute_shapes(): + # Make sure the shapes of scores_ and coefs_paths_ attributes are correct + # when using elasticnet (added one dimension for l1_ratios) + + n_classes = 3 + n_features = 20 + X, y = make_classification( + n_samples=200, + n_classes=n_classes, + n_informative=n_classes, + n_features=n_features, + random_state=0, + ) + + Cs = np.logspace(-4, 4, 3) + l1_ratios = np.linspace(0, 1, 2) + + n_folds = 2 + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=n_folds, + l1_ratios=l1_ratios, + multi_class="ovr", + random_state=0, + tol=1e-2, + ) + lrcv.fit(X, y) + coefs_paths = np.asarray(list(lrcv.coefs_paths_.values())) + assert coefs_paths.shape == ( + n_classes, + n_folds, + Cs.size, + l1_ratios.size, + n_features + 1, + ) + scores = np.asarray(list(lrcv.scores_.values())) + assert scores.shape == (n_classes, n_folds, Cs.size, l1_ratios.size) + + assert lrcv.n_iter_.shape == (n_classes, n_folds, Cs.size, l1_ratios.size) + + +def test_l1_ratio_non_elasticnet(): + msg = ( + r"l1_ratio parameter is only used when penalty is" + r" 'elasticnet'\. Got \(penalty=l1\)" + ) + with pytest.warns(UserWarning, match=msg): + LogisticRegression(penalty="l1", solver="saga", l1_ratio=0.5).fit(X, Y1) + + +@pytest.mark.parametrize("C", np.logspace(-3, 2, 4)) +@pytest.mark.parametrize("l1_ratio", [0.1, 0.5, 0.9]) +def test_elastic_net_versus_sgd(C, l1_ratio): + # Compare elasticnet penalty in LogisticRegression() and SGD(loss='log') + n_samples = 500 + X, y = make_classification( + n_samples=n_samples, + n_classes=2, + n_features=5, + n_informative=5, + n_redundant=0, + n_repeated=0, + random_state=1, + ) + X = scale(X) + + sgd = SGDClassifier( + penalty="elasticnet", + random_state=1, + fit_intercept=False, + tol=None, + max_iter=2000, + l1_ratio=l1_ratio, + alpha=1.0 / C / n_samples, + loss="log_loss", + ) + log = LogisticRegression( + penalty="elasticnet", + random_state=1, + fit_intercept=False, + tol=1e-5, + max_iter=1000, + l1_ratio=l1_ratio, + C=C, + solver="saga", + ) + + sgd.fit(X, y) + log.fit(X, y) + assert_array_almost_equal(sgd.coef_, log.coef_, decimal=1) + + +def test_logistic_regression_path_coefs_multinomial(): + # Make sure that the returned coefs by logistic_regression_path when + # multi_class='multinomial' don't override each other (used to be a + # bug). + X, y = make_classification( + n_samples=200, + n_classes=3, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=0, + n_features=2, + ) + Cs = [0.00001, 1, 10000] + coefs, _, _ = _logistic_regression_path( + X, + y, + penalty="l1", + Cs=Cs, + solver="saga", + random_state=0, + multi_class="multinomial", + ) + + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[0], coefs[1], decimal=1) + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[0], coefs[2], decimal=1) + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[1], coefs[2], decimal=1) + + +@pytest.mark.parametrize( + "est", + [ + LogisticRegression(random_state=0, max_iter=500), + LogisticRegressionCV(random_state=0, cv=3, Cs=3, tol=1e-3, max_iter=500), + ], + ids=lambda x: x.__class__.__name__, +) +@pytest.mark.parametrize("solver", SOLVERS) +def test_logistic_regression_multi_class_auto(est, solver): + # check multi_class='auto' => multi_class='ovr' + # iff binary y or liblinear or newton-cholesky + + def fit(X, y, **kw): + return clone(est).set_params(**kw).fit(X, y) + + scaled_data = scale(iris.data) + X = scaled_data[::10] + X2 = scaled_data[1::10] + y_multi = iris.target[::10] + y_bin = y_multi == 0 + est_auto_bin = fit(X, y_bin, multi_class="auto", solver=solver) + est_ovr_bin = fit(X, y_bin, multi_class="ovr", solver=solver) + assert_allclose(est_auto_bin.coef_, est_ovr_bin.coef_) + assert_allclose(est_auto_bin.predict_proba(X2), est_ovr_bin.predict_proba(X2)) + + est_auto_multi = fit(X, y_multi, multi_class="auto", solver=solver) + if solver in ("liblinear", "newton-cholesky"): + est_ovr_multi = fit(X, y_multi, multi_class="ovr", solver=solver) + assert_allclose(est_auto_multi.coef_, est_ovr_multi.coef_) + assert_allclose( + est_auto_multi.predict_proba(X2), est_ovr_multi.predict_proba(X2) + ) + else: + est_multi_multi = fit(X, y_multi, multi_class="multinomial", solver=solver) + assert_allclose(est_auto_multi.coef_, est_multi_multi.coef_) + assert_allclose( + est_auto_multi.predict_proba(X2), est_multi_multi.predict_proba(X2) + ) + + # Make sure multi_class='ovr' is distinct from ='multinomial' + assert not np.allclose( + est_auto_bin.coef_, + fit(X, y_bin, multi_class="multinomial", solver=solver).coef_, + ) + assert not np.allclose( + est_auto_bin.coef_, + fit(X, y_multi, multi_class="multinomial", solver=solver).coef_, + ) + + +@pytest.mark.parametrize("solver", sorted(set(SOLVERS) - set(["liblinear"]))) +def test_penalty_none(solver): + # - Make sure warning is raised if penalty=None and C is set to a + # non-default value. + # - Make sure setting penalty=None is equivalent to setting C=np.inf with + # l2 penalty. + X, y = make_classification(n_samples=1000, n_redundant=0, random_state=0) + + msg = "Setting penalty=None will ignore the C" + lr = LogisticRegression(penalty=None, solver=solver, C=4) + with pytest.warns(UserWarning, match=msg): + lr.fit(X, y) + + lr_none = LogisticRegression(penalty=None, solver=solver, random_state=0) + lr_l2_C_inf = LogisticRegression( + penalty="l2", C=np.inf, solver=solver, random_state=0 + ) + pred_none = lr_none.fit(X, y).predict(X) + pred_l2_C_inf = lr_l2_C_inf.fit(X, y).predict(X) + assert_array_equal(pred_none, pred_l2_C_inf) + + +@pytest.mark.parametrize( + "params", + [ + {"penalty": "l1", "dual": False, "tol": 1e-6, "max_iter": 1000}, + {"penalty": "l2", "dual": True, "tol": 1e-12, "max_iter": 1000}, + {"penalty": "l2", "dual": False, "tol": 1e-12, "max_iter": 1000}, + ], +) +def test_logisticregression_liblinear_sample_weight(params): + # check that we support sample_weight with liblinear in all possible cases: + # l1-primal, l2-primal, l2-dual + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.dtype("float"), + ) + y = np.array( + [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype("int") + ) + + X2 = np.vstack([X, X]) + y2 = np.hstack([y, 3 - y]) + sample_weight = np.ones(shape=len(y) * 2) + sample_weight[len(y) :] = 0 + X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0) + + base_clf = LogisticRegression(solver="liblinear", random_state=42) + base_clf.set_params(**params) + clf_no_weight = clone(base_clf).fit(X, y) + clf_with_weight = clone(base_clf).fit(X2, y2, sample_weight=sample_weight) + + for method in ("predict", "predict_proba", "decision_function"): + X_clf_no_weight = getattr(clf_no_weight, method)(X) + X_clf_with_weight = getattr(clf_with_weight, method)(X) + assert_allclose(X_clf_no_weight, X_clf_with_weight) + + +def test_scores_attribute_layout_elasticnet(): + # Non regression test for issue #14955. + # when penalty is elastic net the scores_ attribute has shape + # (n_classes, n_Cs, n_l1_ratios) + # We here make sure that the second dimension indeed corresponds to Cs and + # the third dimension corresponds to l1_ratios. + + X, y = make_classification(n_samples=1000, random_state=0) + cv = StratifiedKFold(n_splits=5) + + l1_ratios = [0.1, 0.9] + Cs = [0.1, 1, 10] + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + solver="saga", + l1_ratios=l1_ratios, + Cs=Cs, + cv=cv, + random_state=0, + max_iter=250, + tol=1e-3, + ) + lrcv.fit(X, y) + + avg_scores_lrcv = lrcv.scores_[1].mean(axis=0) # average over folds + + for i, C in enumerate(Cs): + for j, l1_ratio in enumerate(l1_ratios): + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + C=C, + l1_ratio=l1_ratio, + random_state=0, + max_iter=250, + tol=1e-3, + ) + + avg_score_lr = cross_val_score(lr, X, y, cv=cv).mean() + assert avg_scores_lrcv[i, j] == pytest.approx(avg_score_lr) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_multinomial_identifiability_on_iris(fit_intercept): + """Test that the multinomial classification is identifiable. + + A multinomial with c classes can be modeled with + probability_k = exp(X@coef_k) / sum(exp(X@coef_l), l=1..c) for k=1..c. + This is not identifiable, unless one chooses a further constraint. + According to [1], the maximum of the L2 penalized likelihood automatically + satisfies the symmetric constraint: + sum(coef_k, k=1..c) = 0 + + Further details can be found in [2]. + + Reference + --------- + .. [1] :doi:`Zhu, Ji and Trevor J. Hastie. "Classification of gene microarrays by + penalized logistic regression". Biostatistics 5 3 (2004): 427-43. + <10.1093/biostatistics/kxg046>` + + .. [2] :arxiv:`Noah Simon and Jerome Friedman and Trevor Hastie. (2013) + "A Blockwise Descent Algorithm for Group-penalized Multiresponse and + Multinomial Regression". <1311.6529>` + """ + # Test logistic regression with the iris dataset + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + clf = LogisticRegression( + C=len(iris.data), + solver="lbfgs", + multi_class="multinomial", + fit_intercept=fit_intercept, + ) + # Scaling X to ease convergence. + X_scaled = scale(iris.data) + clf.fit(X_scaled, target) + + # axis=0 is sum over classes + assert_allclose(clf.coef_.sum(axis=0), 0, atol=1e-10) + if fit_intercept: + clf.intercept_.sum(axis=0) == pytest.approx(0, abs=1e-15) + + +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial", "auto"]) +@pytest.mark.parametrize("class_weight", [{0: 1.0, 1: 10.0, 2: 1.0}, "balanced"]) +def test_sample_weight_not_modified(multi_class, class_weight): + X, y = load_iris(return_X_y=True) + n_features = len(X) + W = np.ones(n_features) + W[: n_features // 2] = 2 + + expected = W.copy() + + clf = LogisticRegression( + random_state=0, class_weight=class_weight, max_iter=200, multi_class=multi_class + ) + clf.fit(X, y, sample_weight=W) + assert_allclose(expected, W) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_large_sparse_matrix(solver, global_random_seed, csr_container): + # Solvers either accept large sparse matrices, or raise helpful error. + # Non-regression test for pull-request #21093. + + # generate sparse matrix with int64 indices + X = csr_container(sparse.rand(20, 10, random_state=global_random_seed)) + for attr in ["indices", "indptr"]: + setattr(X, attr, getattr(X, attr).astype("int64")) + rng = np.random.RandomState(global_random_seed) + y = rng.randint(2, size=X.shape[0]) + + if solver in ["liblinear", "sag", "saga"]: + msg = "Only sparse matrices with 32-bit integer indices" + with pytest.raises(ValueError, match=msg): + LogisticRegression(solver=solver).fit(X, y) + else: + LogisticRegression(solver=solver).fit(X, y) + + +def test_single_feature_newton_cg(): + # Test that Newton-CG works with a single feature and intercept. + # Non-regression test for issue #23605. + + X = np.array([[0.5, 0.65, 1.1, 1.25, 0.8, 0.54, 0.95, 0.7]]).T + y = np.array([1, 1, 0, 0, 1, 1, 0, 1]) + assert X.shape[1] == 1 + LogisticRegression(solver="newton-cg", fit_intercept=True).fit(X, y) + + +def test_liblinear_not_stuck(): + # Non-regression https://github.com/scikit-learn/scikit-learn/issues/18264 + X = iris.data.copy() + y = iris.target.copy() + X = X[y != 2] + y = y[y != 2] + X_prep = StandardScaler().fit_transform(X) + + C = l1_min_c(X, y, loss="log") * 10 ** (10 / 29) + clf = LogisticRegression( + penalty="l1", + solver="liblinear", + tol=1e-6, + max_iter=100, + intercept_scaling=10000.0, + random_state=0, + C=C, + ) + + # test that the fit does not raise a ConvergenceWarning + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + clf.fit(X_prep, y) + + +@pytest.mark.usefixtures("enable_slep006") +def test_lr_cv_scores_differ_when_sample_weight_is_requested(): + """Test that `sample_weight` is correctly passed to the scorer in + `LogisticRegressionCV.fit` and `LogisticRegressionCV.score` by + checking the difference in scores with the case when `sample_weight` + is not requested. + """ + rng = np.random.RandomState(10) + X, y = make_classification(n_samples=10, random_state=rng) + X_t, y_t = make_classification(n_samples=10, random_state=rng) + sample_weight = np.ones(len(y)) + sample_weight[: len(y) // 2] = 2 + kwargs = {"sample_weight": sample_weight} + + scorer1 = get_scorer("accuracy") + lr_cv1 = LogisticRegressionCV(scoring=scorer1) + lr_cv1.fit(X, y, **kwargs) + + scorer2 = get_scorer("accuracy") + scorer2.set_score_request(sample_weight=True) + lr_cv2 = LogisticRegressionCV(scoring=scorer2) + lr_cv2.fit(X, y, **kwargs) + + assert not np.allclose(lr_cv1.scores_[1], lr_cv2.scores_[1]) + + score_1 = lr_cv1.score(X_t, y_t, **kwargs) + score_2 = lr_cv2.score(X_t, y_t, **kwargs) + + assert not np.allclose(score_1, score_2) + + +def test_lr_cv_scores_without_enabling_metadata_routing(): + """Test that `sample_weight` is passed correctly to the scorer in + `LogisticRegressionCV.fit` and `LogisticRegressionCV.score` even + when `enable_metadata_routing=False` + """ + rng = np.random.RandomState(10) + X, y = make_classification(n_samples=10, random_state=rng) + X_t, y_t = make_classification(n_samples=10, random_state=rng) + sample_weight = np.ones(len(y)) + sample_weight[: len(y) // 2] = 2 + kwargs = {"sample_weight": sample_weight} + + with config_context(enable_metadata_routing=False): + scorer1 = get_scorer("accuracy") + lr_cv1 = LogisticRegressionCV(scoring=scorer1) + lr_cv1.fit(X, y, **kwargs) + score_1 = lr_cv1.score(X_t, y_t, **kwargs) + + with config_context(enable_metadata_routing=True): + scorer2 = get_scorer("accuracy") + scorer2.set_score_request(sample_weight=True) + lr_cv2 = LogisticRegressionCV(scoring=scorer2) + lr_cv2.fit(X, y, **kwargs) + score_2 = lr_cv2.score(X_t, y_t, **kwargs) + + assert_allclose(lr_cv1.scores_[1], lr_cv2.scores_[1]) + assert_allclose(score_1, score_2) + + +@pytest.mark.parametrize("solver", SOLVERS) +def test_zero_max_iter(solver): + # Make sure we can inspect the state of LogisticRegression right after + # initialization (before the first weight update). + X, y = load_iris(return_X_y=True) + y = y == 2 + with ignore_warnings(category=ConvergenceWarning): + clf = LogisticRegression(solver=solver, max_iter=0).fit(X, y) + if solver not in ["saga", "sag"]: + # XXX: sag and saga have n_iter_ = [1]... + assert clf.n_iter_ == 0 + + if solver != "lbfgs": + # XXX: lbfgs has already started to update the coefficients... + assert_allclose(clf.coef_, np.zeros_like(clf.coef_)) + assert_allclose( + clf.decision_function(X), + np.full(shape=X.shape[0], fill_value=clf.intercept_), + ) + assert_allclose( + clf.predict_proba(X), + np.full(shape=(X.shape[0], 2), fill_value=0.5), + ) + assert clf.score(X, y) < 0.7 + + +def test_passing_params_without_enabling_metadata_routing(): + """Test that the right error message is raised when metadata params + are passed while not supported when `enable_metadata_routing=False`.""" + X, y = make_classification(n_samples=10, random_state=0) + lr_cv = LogisticRegressionCV() + msg = "is only supported if enable_metadata_routing=True" + + with config_context(enable_metadata_routing=False): + params = {"extra_param": 1.0} + + with pytest.raises(ValueError, match=msg): + lr_cv.fit(X, y, **params) + + with pytest.raises(ValueError, match=msg): + lr_cv.score(X, y, **params) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py new file mode 100644 index 0000000000000000000000000000000000000000..7f4354fc803d24c2396f5105a5a4ce52c0a3e9fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py @@ -0,0 +1,262 @@ +# Author: Vlad Niculae +# License: BSD 3 clause + + +import numpy as np +import pytest + +from sklearn.datasets import make_sparse_coded_signal +from sklearn.linear_model import ( + LinearRegression, + OrthogonalMatchingPursuit, + OrthogonalMatchingPursuitCV, + orthogonal_mp, + orthogonal_mp_gram, +) +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) + +n_samples, n_features, n_nonzero_coefs, n_targets = 25, 35, 5, 3 +y, X, gamma = make_sparse_coded_signal( + n_samples=n_targets, + n_components=n_features, + n_features=n_samples, + n_nonzero_coefs=n_nonzero_coefs, + random_state=0, +) +y, X, gamma = y.T, X.T, gamma.T +# Make X not of norm 1 for testing +X *= 10 +y *= 10 +G, Xy = np.dot(X.T, X), np.dot(X.T, y) +# this makes X (n_samples, n_features) +# and y (n_samples, 3) + + +def test_correct_shapes(): + assert orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape == (n_features,) + assert orthogonal_mp(X, y, n_nonzero_coefs=5).shape == (n_features, 3) + + +def test_correct_shapes_gram(): + assert orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape == (n_features,) + assert orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape == (n_features, 3) + + +def test_n_nonzero_coefs(): + assert np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5)) <= 5 + assert ( + np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5, precompute=True)) + <= 5 + ) + + +def test_tol(): + tol = 0.5 + gamma = orthogonal_mp(X, y[:, 0], tol=tol) + gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True) + assert np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol + assert np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol + + +def test_with_without_gram(): + assert_array_almost_equal( + orthogonal_mp(X, y, n_nonzero_coefs=5), + orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True), + ) + + +def test_with_without_gram_tol(): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=1.0), orthogonal_mp(X, y, tol=1.0, precompute=True) + ) + + +def test_unreachable_accuracy(): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=0), orthogonal_mp(X, y, n_nonzero_coefs=n_features) + ) + warning_message = ( + "Orthogonal matching pursuit ended prematurely " + "due to linear dependence in the dictionary. " + "The requested precision might not have been met." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=0, precompute=True), + orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_features), + ) + + +@pytest.mark.parametrize("positional_params", [(X, y), (G, Xy)]) +@pytest.mark.parametrize( + "keyword_params", + [{"n_nonzero_coefs": n_features + 1}], +) +def test_bad_input(positional_params, keyword_params): + with pytest.raises(ValueError): + orthogonal_mp(*positional_params, **keyword_params) + + +def test_perfect_signal_recovery(): + (idx,) = gamma[:, 0].nonzero() + gamma_rec = orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5) + gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5) + assert_array_equal(idx, np.flatnonzero(gamma_rec)) + assert_array_equal(idx, np.flatnonzero(gamma_gram)) + assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2) + assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2) + + +def test_orthogonal_mp_gram_readonly(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/5956 + (idx,) = gamma[:, 0].nonzero() + G_readonly = G.copy() + G_readonly.setflags(write=False) + Xy_readonly = Xy.copy() + Xy_readonly.setflags(write=False) + gamma_gram = orthogonal_mp_gram( + G_readonly, Xy_readonly[:, 0], n_nonzero_coefs=5, copy_Gram=False, copy_Xy=False + ) + assert_array_equal(idx, np.flatnonzero(gamma_gram)) + assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2) + + +def test_estimator(): + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs) + omp.fit(X, y[:, 0]) + assert omp.coef_.shape == (n_features,) + assert omp.intercept_.shape == () + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs + + omp.fit(X, y) + assert omp.coef_.shape == (n_targets, n_features) + assert omp.intercept_.shape == (n_targets,) + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs + + coef_normalized = omp.coef_[0].copy() + omp.set_params(fit_intercept=True) + omp.fit(X, y[:, 0]) + assert_array_almost_equal(coef_normalized, omp.coef_) + + omp.set_params(fit_intercept=False) + omp.fit(X, y[:, 0]) + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs + assert omp.coef_.shape == (n_features,) + assert omp.intercept_ == 0 + + omp.fit(X, y) + assert omp.coef_.shape == (n_targets, n_features) + assert omp.intercept_ == 0 + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs + + +def test_identical_regressors(): + newX = X.copy() + newX[:, 1] = newX[:, 0] + gamma = np.zeros(n_features) + gamma[0] = gamma[1] = 1.0 + newy = np.dot(newX, gamma) + warning_message = ( + "Orthogonal matching pursuit ended prematurely " + "due to linear dependence in the dictionary. " + "The requested precision might not have been met." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + orthogonal_mp(newX, newy, n_nonzero_coefs=2) + + +def test_swapped_regressors(): + gamma = np.zeros(n_features) + # X[:, 21] should be selected first, then X[:, 0] selected second, + # which will take X[:, 21]'s place in case the algorithm does + # column swapping for optimization (which is the case at the moment) + gamma[21] = 1.0 + gamma[0] = 0.5 + new_y = np.dot(X, gamma) + new_Xy = np.dot(X.T, new_y) + gamma_hat = orthogonal_mp(X, new_y, n_nonzero_coefs=2) + gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, n_nonzero_coefs=2) + assert_array_equal(np.flatnonzero(gamma_hat), [0, 21]) + assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21]) + + +def test_no_atoms(): + y_empty = np.zeros_like(y) + Xy_empty = np.dot(X.T, y_empty) + gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, n_nonzero_coefs=1) + gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, n_nonzero_coefs=1) + assert np.all(gamma_empty == 0) + assert np.all(gamma_empty_gram == 0) + + +def test_omp_path(): + path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True) + last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True) + last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + + +def test_omp_return_path_prop_with_gram(): + path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True, precompute=True) + last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False, precompute=True) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + + +def test_omp_cv(): + y_ = y[:, 0] + gamma_ = gamma[:, 0] + ompcv = OrthogonalMatchingPursuitCV(fit_intercept=False, max_iter=10) + ompcv.fit(X, y_) + assert ompcv.n_nonzero_coefs_ == n_nonzero_coefs + assert_array_almost_equal(ompcv.coef_, gamma_) + omp = OrthogonalMatchingPursuit( + fit_intercept=False, n_nonzero_coefs=ompcv.n_nonzero_coefs_ + ) + omp.fit(X, y_) + assert_array_almost_equal(ompcv.coef_, omp.coef_) + + +def test_omp_reaches_least_squares(): + # Use small simple data; it's a sanity check but OMP can stop early + rng = check_random_state(0) + n_samples, n_features = (10, 8) + n_targets = 3 + X = rng.randn(n_samples, n_features) + Y = rng.randn(n_samples, n_targets) + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features) + lstsq = LinearRegression() + omp.fit(X, Y) + lstsq.fit(X, Y) + assert_array_almost_equal(omp.coef_, lstsq.coef_) + + +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +def test_omp_gram_dtype_match(data_type): + # verify matching input data type and output data type + coef = orthogonal_mp_gram( + G.astype(data_type), Xy.astype(data_type), n_nonzero_coefs=5 + ) + assert coef.dtype == data_type + + +def test_omp_gram_numerical_consistency(): + # verify numericaly consistency among np.float32 and np.float64 + coef_32 = orthogonal_mp_gram( + G.astype(np.float32), Xy.astype(np.float32), n_nonzero_coefs=5 + ) + coef_64 = orthogonal_mp_gram( + G.astype(np.float32), Xy.astype(np.float64), n_nonzero_coefs=5 + ) + assert_allclose(coef_32, coef_64) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_quantile.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_quantile.py new file mode 100644 index 0000000000000000000000000000000000000000..53c1e1f071dcb11792163003bb3a3f3a290bb0aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_quantile.py @@ -0,0 +1,306 @@ +# Authors: David Dale +# Christian Lorentzen +# License: BSD 3 clause + +import numpy as np +import pytest +from pytest import approx +from scipy.optimize import minimize + +from sklearn.datasets import make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import HuberRegressor, QuantileRegressor +from sklearn.metrics import mean_pinball_loss +from sklearn.utils._testing import assert_allclose, skip_if_32bit +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + parse_version, + sp_version, +) + + +@pytest.fixture +def X_y_data(): + X, y = make_regression(n_samples=10, n_features=1, random_state=0, noise=1) + return X, y + + +@pytest.fixture +def default_solver(): + return "highs" if sp_version >= parse_version("1.6.0") else "interior-point" + + +@pytest.mark.skipif( + parse_version(sp_version.base_version) >= parse_version("1.11"), + reason="interior-point solver is not available in SciPy 1.11", +) +@pytest.mark.parametrize("solver", ["interior-point", "revised simplex"]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_incompatible_solver_for_sparse_input(X_y_data, solver, csc_container): + X, y = X_y_data + X_sparse = csc_container(X) + err_msg = ( + f"Solver {solver} does not support sparse X. Use solver 'highs' for example." + ) + with pytest.raises(ValueError, match=err_msg): + QuantileRegressor(solver=solver).fit(X_sparse, y) + + +@pytest.mark.parametrize("solver", ("highs-ds", "highs-ipm", "highs")) +@pytest.mark.skipif( + sp_version >= parse_version("1.6.0"), + reason="Solvers are available as of scipy 1.6.0", +) +def test_too_new_solver_methods_raise_error(X_y_data, solver): + """Test that highs solver raises for scipy<1.6.0.""" + X, y = X_y_data + with pytest.raises(ValueError, match="scipy>=1.6.0"): + QuantileRegressor(solver=solver).fit(X, y) + + +@pytest.mark.parametrize( + "quantile, alpha, intercept, coef", + [ + # for 50% quantile w/o regularization, any slope in [1, 10] is okay + [0.5, 0, 1, None], + # if positive error costs more, the slope is maximal + [0.51, 0, 1, 10], + # if negative error costs more, the slope is minimal + [0.49, 0, 1, 1], + # for a small lasso penalty, the slope is also minimal + [0.5, 0.01, 1, 1], + # for a large lasso penalty, the model predicts the constant median + [0.5, 100, 2, 0], + ], +) +def test_quantile_toy_example(quantile, alpha, intercept, coef, default_solver): + # test how different parameters affect a small intuitive example + X = [[0], [1], [1]] + y = [1, 2, 11] + model = QuantileRegressor( + quantile=quantile, alpha=alpha, solver=default_solver + ).fit(X, y) + assert_allclose(model.intercept_, intercept, atol=1e-2) + if coef is not None: + assert_allclose(model.coef_[0], coef, atol=1e-2) + if alpha < 100: + assert model.coef_[0] >= 1 + assert model.coef_[0] <= 10 + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_quantile_equals_huber_for_low_epsilon(fit_intercept, default_solver): + X, y = make_regression(n_samples=100, n_features=20, random_state=0, noise=1.0) + alpha = 1e-4 + huber = HuberRegressor( + epsilon=1 + 1e-4, alpha=alpha, fit_intercept=fit_intercept + ).fit(X, y) + quant = QuantileRegressor( + alpha=alpha, fit_intercept=fit_intercept, solver=default_solver + ).fit(X, y) + assert_allclose(huber.coef_, quant.coef_, atol=1e-1) + if fit_intercept: + assert huber.intercept_ == approx(quant.intercept_, abs=1e-1) + # check that we still predict fraction + assert np.mean(y < quant.predict(X)) == approx(0.5, abs=1e-1) + + +@pytest.mark.parametrize("q", [0.5, 0.9, 0.05]) +def test_quantile_estimates_calibration(q, default_solver): + # Test that model estimates percentage of points below the prediction + X, y = make_regression(n_samples=1000, n_features=20, random_state=0, noise=1.0) + quant = QuantileRegressor( + quantile=q, + alpha=0, + solver=default_solver, + ).fit(X, y) + assert np.mean(y < quant.predict(X)) == approx(q, abs=1e-2) + + +def test_quantile_sample_weight(default_solver): + # test that with unequal sample weights we still estimate weighted fraction + n = 1000 + X, y = make_regression(n_samples=n, n_features=5, random_state=0, noise=10.0) + weight = np.ones(n) + # when we increase weight of upper observations, + # estimate of quantile should go up + weight[y > y.mean()] = 100 + quant = QuantileRegressor(quantile=0.5, alpha=1e-8, solver=default_solver) + quant.fit(X, y, sample_weight=weight) + fraction_below = np.mean(y < quant.predict(X)) + assert fraction_below > 0.5 + weighted_fraction_below = np.average(y < quant.predict(X), weights=weight) + assert weighted_fraction_below == approx(0.5, abs=3e-2) + + +@pytest.mark.skipif( + sp_version < parse_version("1.6.0"), + reason="The `highs` solver is available from the 1.6.0 scipy version", +) +@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8]) +def test_asymmetric_error(quantile, default_solver): + """Test quantile regression for asymmetric distributed targets.""" + n_samples = 1000 + rng = np.random.RandomState(42) + X = np.concatenate( + ( + np.abs(rng.randn(n_samples)[:, None]), + -rng.randint(2, size=(n_samples, 1)), + ), + axis=1, + ) + intercept = 1.23 + coef = np.array([0.5, -2]) + # Take care that X @ coef + intercept > 0 + assert np.min(X @ coef + intercept) > 0 + # For an exponential distribution with rate lambda, e.g. exp(-lambda * x), + # the quantile at level q is: + # quantile(q) = - log(1 - q) / lambda + # scale = 1/lambda = -quantile(q) / log(1 - q) + y = rng.exponential( + scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples + ) + model = QuantileRegressor( + quantile=quantile, + alpha=0, + solver=default_solver, + ).fit(X, y) + # This test can be made to pass with any solver but in the interest + # of sparing continuous integration resources, the test is performed + # with the fastest solver only. + + assert model.intercept_ == approx(intercept, rel=0.2) + assert_allclose(model.coef_, coef, rtol=0.6) + assert_allclose(np.mean(model.predict(X) > y), quantile, atol=1e-2) + + # Now compare to Nelder-Mead optimization with L1 penalty + alpha = 0.01 + model.set_params(alpha=alpha).fit(X, y) + model_coef = np.r_[model.intercept_, model.coef_] + + def func(coef): + loss = mean_pinball_loss(y, X @ coef[1:] + coef[0], alpha=quantile) + L1 = np.sum(np.abs(coef[1:])) + return loss + alpha * L1 + + res = minimize( + fun=func, + x0=[1, 0, -1], + method="Nelder-Mead", + tol=1e-12, + options={"maxiter": 2000}, + ) + + assert func(model_coef) == approx(func(res.x)) + assert_allclose(model.intercept_, res.x[0]) + assert_allclose(model.coef_, res.x[1:]) + assert_allclose(np.mean(model.predict(X) > y), quantile, atol=1e-2) + + +@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8]) +def test_equivariance(quantile, default_solver): + """Test equivariace of quantile regression. + + See Koenker (2005) Quantile Regression, Chapter 2.2.3. + """ + rng = np.random.RandomState(42) + n_samples, n_features = 100, 5 + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + noise=0, + random_state=rng, + shuffle=False, + ) + # make y asymmetric + y += rng.exponential(scale=100, size=y.shape) + params = dict(alpha=0, solver=default_solver) + model1 = QuantileRegressor(quantile=quantile, **params).fit(X, y) + + # coef(q; a*y, X) = a * coef(q; y, X) + a = 2.5 + model2 = QuantileRegressor(quantile=quantile, **params).fit(X, a * y) + assert model2.intercept_ == approx(a * model1.intercept_, rel=1e-5) + assert_allclose(model2.coef_, a * model1.coef_, rtol=1e-5) + + # coef(1-q; -a*y, X) = -a * coef(q; y, X) + model2 = QuantileRegressor(quantile=1 - quantile, **params).fit(X, -a * y) + assert model2.intercept_ == approx(-a * model1.intercept_, rel=1e-5) + assert_allclose(model2.coef_, -a * model1.coef_, rtol=1e-5) + + # coef(q; y + X @ g, X) = coef(q; y, X) + g + g_intercept, g_coef = rng.randn(), rng.randn(n_features) + model2 = QuantileRegressor(quantile=quantile, **params) + model2.fit(X, y + X @ g_coef + g_intercept) + assert model2.intercept_ == approx(model1.intercept_ + g_intercept) + assert_allclose(model2.coef_, model1.coef_ + g_coef, rtol=1e-6) + + # coef(q; y, X @ A) = A^-1 @ coef(q; y, X) + A = rng.randn(n_features, n_features) + model2 = QuantileRegressor(quantile=quantile, **params) + model2.fit(X @ A, y) + assert model2.intercept_ == approx(model1.intercept_, rel=1e-5) + assert_allclose(model2.coef_, np.linalg.solve(A, model1.coef_), rtol=1e-5) + + +@pytest.mark.skipif( + parse_version(sp_version.base_version) >= parse_version("1.11"), + reason="interior-point solver is not available in SciPy 1.11", +) +@pytest.mark.filterwarnings("ignore:`method='interior-point'` is deprecated") +def test_linprog_failure(): + """Test that linprog fails.""" + X = np.linspace(0, 10, num=10).reshape(-1, 1) + y = np.linspace(0, 10, num=10) + reg = QuantileRegressor( + alpha=0, solver="interior-point", solver_options={"maxiter": 1} + ) + + msg = "Linear programming for QuantileRegressor did not succeed." + with pytest.warns(ConvergenceWarning, match=msg): + reg.fit(X, y) + + +@skip_if_32bit +@pytest.mark.skipif( + sp_version <= parse_version("1.6.0"), + reason="Solvers are available as of scipy 1.6.0", +) +@pytest.mark.parametrize( + "sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS +) +@pytest.mark.parametrize("solver", ["highs", "highs-ds", "highs-ipm"]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_sparse_input(sparse_container, solver, fit_intercept, default_solver): + """Test that sparse and dense X give same results.""" + X, y = make_regression(n_samples=100, n_features=20, random_state=1, noise=1.0) + X_sparse = sparse_container(X) + alpha = 1e-4 + quant_dense = QuantileRegressor( + alpha=alpha, fit_intercept=fit_intercept, solver=default_solver + ).fit(X, y) + quant_sparse = QuantileRegressor( + alpha=alpha, fit_intercept=fit_intercept, solver=solver + ).fit(X_sparse, y) + assert_allclose(quant_sparse.coef_, quant_dense.coef_, rtol=1e-2) + if fit_intercept: + assert quant_sparse.intercept_ == approx(quant_dense.intercept_) + # check that we still predict fraction + assert 0.45 <= np.mean(y < quant_sparse.predict(X_sparse)) <= 0.57 + + +def test_error_interior_point_future(X_y_data, monkeypatch): + """Check that we will raise a proper error when requesting + `solver='interior-point'` in SciPy >= 1.11. + """ + X, y = X_y_data + import sklearn.linear_model._quantile + + with monkeypatch.context() as m: + m.setattr(sklearn.linear_model._quantile, "sp_version", parse_version("1.11.0")) + err_msg = "Solver interior-point is not anymore available in SciPy >= 1.11.0." + with pytest.raises(ValueError, match=err_msg): + QuantileRegressor(solver="interior-point").fit(X, y) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ransac.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ransac.py new file mode 100644 index 0000000000000000000000000000000000000000..b442f6b207e708c7f7b2b989afd0c34ff492eddf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ransac.py @@ -0,0 +1,545 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal + +from sklearn.datasets import make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + LinearRegression, + OrthogonalMatchingPursuit, + RANSACRegressor, + Ridge, +) +from sklearn.linear_model._ransac import _dynamic_max_trials +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +# Generate coordinates of line +X = np.arange(-200, 200) +y = 0.2 * X + 20 +data = np.column_stack([X, y]) + +# Add some faulty data +rng = np.random.RandomState(1000) +outliers = np.unique(rng.randint(len(X), size=200)) +data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10 + +X = data[:, 0][:, np.newaxis] +y = data[:, 1] + + +def test_ransac_inliers_outliers(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + + # Estimate parameters of corrupted data + ransac_estimator.fit(X, y) + + # Ground truth / reference inlier mask + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_is_data_valid(): + def is_data_valid(X, y): + assert X.shape[0] == 2 + assert y.shape[0] == 2 + return False + + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + y = rng.rand(10, 1) + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + is_data_valid=is_data_valid, + random_state=0, + ) + with pytest.raises(ValueError): + ransac_estimator.fit(X, y) + + +def test_ransac_is_model_valid(): + def is_model_valid(estimator, X, y): + assert X.shape[0] == 2 + assert y.shape[0] == 2 + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + is_model_valid=is_model_valid, + random_state=0, + ) + with pytest.raises(ValueError): + ransac_estimator.fit(X, y) + + +def test_ransac_max_trials(): + estimator = LinearRegression() + + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + max_trials=0, + random_state=0, + ) + with pytest.raises(ValueError): + ransac_estimator.fit(X, y) + + # there is a 1e-9 chance it will take these many trials. No good reason + # 1e-2 isn't enough, can still happen + # 2 is the what ransac defines as min_samples = X.shape[1] + 1 + max_trials = _dynamic_max_trials(len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9) + ransac_estimator = RANSACRegressor(estimator, min_samples=2) + for i in range(50): + ransac_estimator.set_params(min_samples=2, random_state=i) + ransac_estimator.fit(X, y) + assert ransac_estimator.n_trials_ < max_trials + 1 + + +def test_ransac_stop_n_inliers(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + stop_n_inliers=2, + random_state=0, + ) + ransac_estimator.fit(X, y) + + assert ransac_estimator.n_trials_ == 1 + + +def test_ransac_stop_score(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + stop_score=0, + random_state=0, + ) + ransac_estimator.fit(X, y) + + assert ransac_estimator.n_trials_ == 1 + + +def test_ransac_score(): + X = np.arange(100)[:, None] + y = np.zeros((100,)) + y[0] = 1 + y[1] = 100 + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=0.5, random_state=0 + ) + ransac_estimator.fit(X, y) + + assert ransac_estimator.score(X[2:], y[2:]) == 1 + assert ransac_estimator.score(X[:2], y[:2]) < 1 + + +def test_ransac_predict(): + X = np.arange(100)[:, None] + y = np.zeros((100,)) + y[0] = 1 + y[1] = 100 + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=0.5, random_state=0 + ) + ransac_estimator.fit(X, y) + + assert_array_equal(ransac_estimator.predict(X), np.zeros(100)) + + +def test_ransac_no_valid_data(): + def is_data_valid(X, y): + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_data_valid=is_data_valid, max_trials=5 + ) + + msg = "RANSAC could not find a valid consensus set" + with pytest.raises(ValueError, match=msg): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 5 + assert ransac_estimator.n_skips_invalid_model_ == 0 + + +def test_ransac_no_valid_model(): + def is_model_valid(estimator, X, y): + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_model_valid=is_model_valid, max_trials=5 + ) + + msg = "RANSAC could not find a valid consensus set" + with pytest.raises(ValueError, match=msg): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 0 + assert ransac_estimator.n_skips_invalid_model_ == 5 + + +def test_ransac_exceed_max_skips(): + def is_data_valid(X, y): + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_data_valid=is_data_valid, max_trials=5, max_skips=3 + ) + + msg = "RANSAC skipped more iterations than `max_skips`" + with pytest.raises(ValueError, match=msg): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 4 + assert ransac_estimator.n_skips_invalid_model_ == 0 + + +def test_ransac_warn_exceed_max_skips(): + global cause_skip + cause_skip = False + + def is_data_valid(X, y): + global cause_skip + if not cause_skip: + cause_skip = True + return True + else: + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_data_valid=is_data_valid, max_skips=3, max_trials=5 + ) + warning_message = ( + "RANSAC found a valid consensus set but exited " + "early due to skipping more iterations than " + "`max_skips`. See estimator attributes for " + "diagnostics." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 4 + assert ransac_estimator.n_skips_invalid_model_ == 0 + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSR_CONTAINERS + CSC_CONTAINERS +) +def test_ransac_sparse(sparse_container): + X_sparse = sparse_container(X) + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator.fit(X_sparse, y) + + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_none_estimator(): + estimator = LinearRegression() + + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_none_estimator = RANSACRegressor( + None, min_samples=2, residual_threshold=5, random_state=0 + ) + + ransac_estimator.fit(X, y) + ransac_none_estimator.fit(X, y) + + assert_array_almost_equal( + ransac_estimator.predict(X), ransac_none_estimator.predict(X) + ) + + +def test_ransac_min_n_samples(): + estimator = LinearRegression() + ransac_estimator1 = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator2 = RANSACRegressor( + estimator, + min_samples=2.0 / X.shape[0], + residual_threshold=5, + random_state=0, + ) + ransac_estimator5 = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator6 = RANSACRegressor(estimator, residual_threshold=5, random_state=0) + ransac_estimator7 = RANSACRegressor( + estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0 + ) + # GH #19390 + ransac_estimator8 = RANSACRegressor( + Ridge(), min_samples=None, residual_threshold=5, random_state=0 + ) + + ransac_estimator1.fit(X, y) + ransac_estimator2.fit(X, y) + ransac_estimator5.fit(X, y) + ransac_estimator6.fit(X, y) + + assert_array_almost_equal( + ransac_estimator1.predict(X), ransac_estimator2.predict(X) + ) + assert_array_almost_equal( + ransac_estimator1.predict(X), ransac_estimator5.predict(X) + ) + assert_array_almost_equal( + ransac_estimator1.predict(X), ransac_estimator6.predict(X) + ) + + with pytest.raises(ValueError): + ransac_estimator7.fit(X, y) + + err_msg = "`min_samples` needs to be explicitly set" + with pytest.raises(ValueError, match=err_msg): + ransac_estimator8.fit(X, y) + + +def test_ransac_multi_dimensional_targets(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + + # 3-D target values + yyy = np.column_stack([y, y, y]) + + # Estimate parameters of corrupted data + ransac_estimator.fit(X, yyy) + + # Ground truth / reference inlier mask + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_residual_loss(): + def loss_multi1(y_true, y_pred): + return np.sum(np.abs(y_true - y_pred), axis=1) + + def loss_multi2(y_true, y_pred): + return np.sum((y_true - y_pred) ** 2, axis=1) + + def loss_mono(y_true, y_pred): + return np.abs(y_true - y_pred) + + yyy = np.column_stack([y, y, y]) + + estimator = LinearRegression() + ransac_estimator0 = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator1 = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + random_state=0, + loss=loss_multi1, + ) + ransac_estimator2 = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + random_state=0, + loss=loss_multi2, + ) + + # multi-dimensional + ransac_estimator0.fit(X, yyy) + ransac_estimator1.fit(X, yyy) + ransac_estimator2.fit(X, yyy) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator1.predict(X) + ) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator2.predict(X) + ) + + # one-dimensional + ransac_estimator0.fit(X, y) + ransac_estimator2.loss = loss_mono + ransac_estimator2.fit(X, y) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator2.predict(X) + ) + ransac_estimator3 = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + random_state=0, + loss="squared_error", + ) + ransac_estimator3.fit(X, y) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator2.predict(X) + ) + + +def test_ransac_default_residual_threshold(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor(estimator, min_samples=2, random_state=0) + + # Estimate parameters of corrupted data + ransac_estimator.fit(X, y) + + # Ground truth / reference inlier mask + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_dynamic_max_trials(): + # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in + # Hartley, R.~I. and Zisserman, A., 2004, + # Multiple View Geometry in Computer Vision, Second Edition, + # Cambridge University Press, ISBN: 0521540518 + + # e = 0%, min_samples = X + assert _dynamic_max_trials(100, 100, 2, 0.99) == 1 + + # e = 5%, min_samples = 2 + assert _dynamic_max_trials(95, 100, 2, 0.99) == 2 + # e = 10%, min_samples = 2 + assert _dynamic_max_trials(90, 100, 2, 0.99) == 3 + # e = 30%, min_samples = 2 + assert _dynamic_max_trials(70, 100, 2, 0.99) == 7 + # e = 50%, min_samples = 2 + assert _dynamic_max_trials(50, 100, 2, 0.99) == 17 + + # e = 5%, min_samples = 8 + assert _dynamic_max_trials(95, 100, 8, 0.99) == 5 + # e = 10%, min_samples = 8 + assert _dynamic_max_trials(90, 100, 8, 0.99) == 9 + # e = 30%, min_samples = 8 + assert _dynamic_max_trials(70, 100, 8, 0.99) == 78 + # e = 50%, min_samples = 8 + assert _dynamic_max_trials(50, 100, 8, 0.99) == 1177 + + # e = 0%, min_samples = 10 + assert _dynamic_max_trials(1, 100, 10, 0) == 0 + assert _dynamic_max_trials(1, 100, 10, 1) == float("inf") + + +def test_ransac_fit_sample_weight(): + ransac_estimator = RANSACRegressor(random_state=0) + n_samples = y.shape[0] + weights = np.ones(n_samples) + ransac_estimator.fit(X, y, weights) + # sanity check + assert ransac_estimator.inlier_mask_.shape[0] == n_samples + + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + # check that mask is correct + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where + # X = X1 repeated n1 times, X2 repeated n2 times and so forth + random_state = check_random_state(0) + X_ = random_state.randint(0, 200, [10, 1]) + y_ = np.ndarray.flatten(0.2 * X_ + 2) + sample_weight = random_state.randint(0, 10, 10) + outlier_X = random_state.randint(0, 1000, [1, 1]) + outlier_weight = random_state.randint(0, 10, 1) + outlier_y = random_state.randint(-1000, 0, 1) + + X_flat = np.append( + np.repeat(X_, sample_weight, axis=0), + np.repeat(outlier_X, outlier_weight, axis=0), + axis=0, + ) + y_flat = np.ndarray.flatten( + np.append( + np.repeat(y_, sample_weight, axis=0), + np.repeat(outlier_y, outlier_weight, axis=0), + axis=0, + ) + ) + ransac_estimator.fit(X_flat, y_flat) + ref_coef_ = ransac_estimator.estimator_.coef_ + + sample_weight = np.append(sample_weight, outlier_weight) + X_ = np.append(X_, outlier_X, axis=0) + y_ = np.append(y_, outlier_y) + ransac_estimator.fit(X_, y_, sample_weight) + + assert_allclose(ransac_estimator.estimator_.coef_, ref_coef_) + + # check that if estimator.fit doesn't support + # sample_weight, raises error + estimator = OrthogonalMatchingPursuit() + ransac_estimator = RANSACRegressor(estimator, min_samples=10) + + err_msg = f"{estimator.__class__.__name__} does not support sample_weight." + with pytest.raises(ValueError, match=err_msg): + ransac_estimator.fit(X, y, weights) + + +def test_ransac_final_model_fit_sample_weight(): + X, y = make_regression(n_samples=1000, random_state=10) + rng = check_random_state(42) + sample_weight = rng.randint(1, 4, size=y.shape[0]) + sample_weight = sample_weight / sample_weight.sum() + ransac = RANSACRegressor(estimator=LinearRegression(), random_state=0) + ransac.fit(X, y, sample_weight=sample_weight) + + final_model = LinearRegression() + mask_samples = ransac.inlier_mask_ + final_model.fit( + X[mask_samples], y[mask_samples], sample_weight=sample_weight[mask_samples] + ) + + assert_allclose(ransac.estimator_.coef_, final_model.coef_, atol=1e-12) + + +def test_perfect_horizontal_line(): + """Check that we can fit a line where all samples are inliers. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19497 + """ + X = np.arange(100)[:, None] + y = np.zeros((100,)) + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor(estimator, random_state=0) + ransac_estimator.fit(X, y) + + assert_allclose(ransac_estimator.estimator_.coef_, 0.0) + assert_allclose(ransac_estimator.estimator_.intercept_, 0.0) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ridge.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ridge.py new file mode 100644 index 0000000000000000000000000000000000000000..19ff441a068127c4bfcf4b2a67afb00cbeff1409 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ridge.py @@ -0,0 +1,2064 @@ +import warnings +from itertools import product + +import numpy as np +import pytest +from scipy import linalg + +from sklearn import datasets +from sklearn.datasets import ( + make_classification, + make_low_rank_matrix, + make_multilabel_classification, + make_regression, +) +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + LinearRegression, + Ridge, + RidgeClassifier, + RidgeClassifierCV, + RidgeCV, + ridge_regression, +) +from sklearn.linear_model._ridge import ( + _check_gcv_mode, + _RidgeGCV, + _solve_cholesky, + _solve_cholesky_kernel, + _solve_lbfgs, + _solve_svd, + _X_CenterStackOp, +) +from sklearn.metrics import get_scorer, make_scorer, mean_squared_error +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + KFold, + LeaveOneOut, + cross_val_predict, +) +from sklearn.preprocessing import minmax_scale +from sklearn.utils import _IS_32BIT, check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +SOLVERS = ["svd", "sparse_cg", "cholesky", "lsqr", "sag", "saga"] +SPARSE_SOLVERS_WITH_INTERCEPT = ("sparse_cg", "sag") +SPARSE_SOLVERS_WITHOUT_INTERCEPT = ("sparse_cg", "cholesky", "lsqr", "sag", "saga") + +diabetes = datasets.load_diabetes() +X_diabetes, y_diabetes = diabetes.data, diabetes.target +ind = np.arange(X_diabetes.shape[0]) +rng = np.random.RandomState(0) +rng.shuffle(ind) +ind = ind[:200] +X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind] + +iris = datasets.load_iris() +X_iris, y_iris = iris.data, iris.target + + +def _accuracy_callable(y_test, y_pred): + return np.mean(y_test == y_pred) + + +def _mean_squared_error_callable(y_test, y_pred): + return ((y_test - y_pred) ** 2).mean() + + +@pytest.fixture(params=["long", "wide"]) +def ols_ridge_dataset(global_random_seed, request): + """Dataset with OLS and Ridge solutions, well conditioned X. + + The construction is based on the SVD decomposition of X = U S V'. + + Parameters + ---------- + type : {"long", "wide"} + If "long", then n_samples > n_features. + If "wide", then n_features > n_samples. + + For "wide", we return the minimum norm solution w = X' (XX')^-1 y: + + min ||w||_2 subject to X w = y + + Returns + ------- + X : ndarray + Last column of 1, i.e. intercept. + y : ndarray + coef_ols : ndarray of shape + Minimum norm OLS solutions, i.e. min ||X w - y||_2_2 (with minimum ||w||_2 in + case of ambiguity) + Last coefficient is intercept. + coef_ridge : ndarray of shape (5,) + Ridge solution with alpha=1, i.e. min ||X w - y||_2_2 + ||w||_2^2. + Last coefficient is intercept. + """ + # Make larger dim more than double as big as the smaller one. + # This helps when constructing singular matrices like (X, X). + if request.param == "long": + n_samples, n_features = 12, 4 + else: + n_samples, n_features = 4, 12 + k = min(n_samples, n_features) + rng = np.random.RandomState(global_random_seed) + X = make_low_rank_matrix( + n_samples=n_samples, n_features=n_features, effective_rank=k, random_state=rng + ) + X[:, -1] = 1 # last columns acts as intercept + U, s, Vt = linalg.svd(X) + assert np.all(s > 1e-3) # to be sure + U1, U2 = U[:, :k], U[:, k:] + Vt1, _ = Vt[:k, :], Vt[k:, :] + + if request.param == "long": + # Add a term that vanishes in the product X'y + coef_ols = rng.uniform(low=-10, high=10, size=n_features) + y = X @ coef_ols + y += U2 @ rng.normal(size=n_samples - n_features) ** 2 + else: + y = rng.uniform(low=-10, high=10, size=n_samples) + # w = X'(XX')^-1 y = V s^-1 U' y + coef_ols = Vt1.T @ np.diag(1 / s) @ U1.T @ y + + # Add penalty alpha * ||coef||_2^2 for alpha=1 and solve via normal equations. + # Note that the problem is well conditioned such that we get accurate results. + alpha = 1 + d = alpha * np.identity(n_features) + d[-1, -1] = 0 # intercept gets no penalty + coef_ridge = linalg.solve(X.T @ X + d, X.T @ y) + + # To be sure + R_OLS = y - X @ coef_ols + R_Ridge = y - X @ coef_ridge + assert np.linalg.norm(R_OLS) < np.linalg.norm(R_Ridge) + + return X, y, coef_ols, coef_ridge + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression(solver, fit_intercept, ols_ridge_dataset, global_random_seed): + """Test that Ridge converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + """ + X, y, _, coef = ols_ridge_dataset + alpha = 1.0 # because ols_ridge_dataset uses this. + params = dict( + alpha=alpha, + fit_intercept=True, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + + # Calculate residuals and R2. + res_null = y - np.mean(y) + res_Ridge = y - X @ coef + R2_Ridge = 1 - np.sum(res_Ridge**2) / np.sum(res_null**2) + + model = Ridge(**params) + X = X[:, :-1] # remove intercept + if fit_intercept: + intercept = coef[-1] + else: + X = X - X.mean(axis=0) + y = y - y.mean() + intercept = 0 + model.fit(X, y) + coef = coef[:-1] + + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + assert model.score(X, y) == pytest.approx(R2_Ridge) + + # Same with sample_weight. + model = Ridge(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + assert model.score(X, y) == pytest.approx(R2_Ridge) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_hstacked_X( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that Ridge converges for all solvers to correct solution on hstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2. + For long X, [X, X] is a singular matrix. + """ + X, y, _, coef = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 1.0 # because ols_ridge_dataset uses this. + + model = Ridge( + alpha=alpha / 2, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1) + if fit_intercept: + intercept = coef[-1] + else: + X = X - X.mean(axis=0) + y = y - y.mean() + intercept = 0 + model.fit(X, y) + coef = coef[:-1] + + assert model.intercept_ == pytest.approx(intercept) + # coefficients are not all on the same magnitude, adding a small atol to + # make this test less brittle + assert_allclose(model.coef_, np.r_[coef, coef], atol=1e-8) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_vstacked_X( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that Ridge converges for all solvers to correct solution on vstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X], [y] + [X], [y] with 2 * alpha. + For wide X, [X', X'] is a singular matrix. + """ + X, y, _, coef = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 1.0 # because ols_ridge_dataset uses this. + + model = Ridge( + alpha=2 * alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + X = X[:, :-1] # remove intercept + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + if fit_intercept: + intercept = coef[-1] + else: + X = X - X.mean(axis=0) + y = y - y.mean() + intercept = 0 + model.fit(X, y) + coef = coef[:-1] + + assert model.intercept_ == pytest.approx(intercept) + # coefficients are not all on the same magnitude, adding a small atol to + # make this test less brittle + assert_allclose(model.coef_, coef, atol=1e-8) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_unpenalized( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that unpenalized Ridge = OLS converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + Note: This checks the minimum norm solution for wide X, i.e. + n_samples < n_features: + min ||w||_2 subject to X w = y + """ + X, y, coef, _ = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 0 # OLS + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + + model = Ridge(**params) + # Note that cholesky might give a warning: "Singular matrix in solving dual + # problem. Using least-squares solution instead." + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + model.fit(X, y) + + # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails + # for the wide/fat case with n_features > n_samples. The current Ridge solvers do + # NOT return the minimum norm solution with fit_intercept=True. + if n_samples > n_features or not fit_intercept: + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + else: + # As it is an underdetermined problem, residuals = 0. This shows that we get + # a solution to X w = y .... + assert_allclose(model.predict(X), y) + assert_allclose(X @ coef + intercept, y) + # But it is not the minimum norm solution. (This should be equal.) + assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm( + np.r_[intercept, coef] + ) + + pytest.xfail(reason="Ridge does not provide the minimum norm solution.") + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_unpenalized_hstacked_X( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that unpenalized Ridge = OLS converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + OLS fit on [X] is the same as fit on [X, X]/2. + For long X, [X, X] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to min ||X w - y||_2 + """ + X, y, coef, _ = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 0 # OLS + + model = Ridge( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + model.fit(X, y) + + if n_samples > n_features or not fit_intercept: + assert model.intercept_ == pytest.approx(intercept) + if solver == "cholesky": + # Cholesky is a bad choice for singular X. + pytest.skip() + assert_allclose(model.coef_, np.r_[coef, coef]) + else: + # FIXME: Same as in test_ridge_regression_unpenalized. + # As it is an underdetermined problem, residuals = 0. This shows that we get + # a solution to X w = y .... + assert_allclose(model.predict(X), y) + # But it is not the minimum norm solution. (This should be equal.) + assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm( + np.r_[intercept, coef, coef] + ) + + pytest.xfail(reason="Ridge does not provide the minimum norm solution.") + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, np.r_[coef, coef]) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_unpenalized_vstacked_X( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that unpenalized Ridge = OLS converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + OLS fit on [X] is the same as fit on [X], [y] + [X], [y]. + For wide X, [X', X'] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to X w = y + """ + X, y, coef, _ = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 0 # OLS + + model = Ridge( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + model.fit(X, y) + + if n_samples > n_features or not fit_intercept: + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + else: + # FIXME: Same as in test_ridge_regression_unpenalized. + # As it is an underdetermined problem, residuals = 0. This shows that we get + # a solution to X w = y .... + assert_allclose(model.predict(X), y) + # But it is not the minimum norm solution. (This should be equal.) + assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm( + np.r_[intercept, coef] + ) + + pytest.xfail(reason="Ridge does not provide the minimum norm solution.") + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("alpha", [1.0, 1e-2]) +def test_ridge_regression_sample_weights( + solver, + fit_intercept, + sparse_container, + alpha, + ols_ridge_dataset, + global_random_seed, +): + """Test that Ridge with sample weights gives correct results. + + We use the following trick: + ||y - Xw||_2 = (z - Aw)' W (z - Aw) + for z=[y, y], A' = [X', X'] (vstacked), and W[:n/2] + W[n/2:] = 1, W=diag(W) + """ + if sparse_container is not None: + if fit_intercept and solver not in SPARSE_SOLVERS_WITH_INTERCEPT: + pytest.skip() + elif not fit_intercept and solver not in SPARSE_SOLVERS_WITHOUT_INTERCEPT: + pytest.skip() + X, y, _, coef = ols_ridge_dataset + n_samples, n_features = X.shape + sw = rng.uniform(low=0, high=1, size=n_samples) + + model = Ridge( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ["sag", "saga"] else 1e-10, + max_iter=100_000, + random_state=global_random_seed, + ) + X = X[:, :-1] # remove intercept + X = np.concatenate((X, X), axis=0) + y = np.r_[y, y] + sw = np.r_[sw, 1 - sw] * alpha + if fit_intercept: + intercept = coef[-1] + else: + X = X - X.mean(axis=0) + y = y - y.mean() + intercept = 0 + if sparse_container is not None: + X = sparse_container(X) + model.fit(X, y, sample_weight=sw) + coef = coef[:-1] + + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + + +def test_primal_dual_relationship(): + y = y_diabetes.reshape(-1, 1) + coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2]) + K = np.dot(X_diabetes, X_diabetes.T) + dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2]) + coef2 = np.dot(X_diabetes.T, dual_coef).T + assert_array_almost_equal(coef, coef2) + + +def test_ridge_regression_convergence_fail(): + rng = np.random.RandomState(0) + y = rng.randn(5) + X = rng.randn(5, 10) + warning_message = r"sparse_cg did not converge after" r" [0-9]+ iterations." + with pytest.warns(ConvergenceWarning, match=warning_message): + ridge_regression( + X, y, alpha=1.0, solver="sparse_cg", tol=0.0, max_iter=None, verbose=1 + ) + + +def test_ridge_shapes_type(): + # Test shape of coef_ and intercept_ + rng = np.random.RandomState(0) + n_samples, n_features = 5, 10 + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + Y1 = y[:, np.newaxis] + Y = np.c_[y, 1 + y] + + ridge = Ridge() + + ridge.fit(X, y) + assert ridge.coef_.shape == (n_features,) + assert ridge.intercept_.shape == () + assert isinstance(ridge.coef_, np.ndarray) + assert isinstance(ridge.intercept_, float) + + ridge.fit(X, Y1) + assert ridge.coef_.shape == (1, n_features) + assert ridge.intercept_.shape == (1,) + assert isinstance(ridge.coef_, np.ndarray) + assert isinstance(ridge.intercept_, np.ndarray) + + ridge.fit(X, Y) + assert ridge.coef_.shape == (2, n_features) + assert ridge.intercept_.shape == (2,) + assert isinstance(ridge.coef_, np.ndarray) + assert isinstance(ridge.intercept_, np.ndarray) + + +def test_ridge_intercept(): + # Test intercept with multiple targets GH issue #708 + rng = np.random.RandomState(0) + n_samples, n_features = 5, 10 + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + Y = np.c_[y, 1.0 + y] + + ridge = Ridge() + + ridge.fit(X, y) + intercept = ridge.intercept_ + + ridge.fit(X, Y) + assert_almost_equal(ridge.intercept_[0], intercept) + assert_almost_equal(ridge.intercept_[1], intercept + 1.0) + + +def test_ridge_vs_lstsq(): + # On alpha=0., Ridge and OLS yield the same solution. + + rng = np.random.RandomState(0) + # we need more samples than features + n_samples, n_features = 5, 4 + y = rng.randn(n_samples) + X = rng.randn(n_samples, n_features) + + ridge = Ridge(alpha=0.0, fit_intercept=False) + ols = LinearRegression(fit_intercept=False) + + ridge.fit(X, y) + ols.fit(X, y) + assert_almost_equal(ridge.coef_, ols.coef_) + + ridge.fit(X, y) + ols.fit(X, y) + assert_almost_equal(ridge.coef_, ols.coef_) + + +def test_ridge_individual_penalties(): + # Tests the ridge object using individual penalties + + rng = np.random.RandomState(42) + + n_samples, n_features, n_targets = 20, 10, 5 + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples, n_targets) + + penalties = np.arange(n_targets) + + coef_cholesky = np.array( + [ + Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_ + for alpha, target in zip(penalties, y.T) + ] + ) + + coefs_indiv_pen = [ + Ridge(alpha=penalties, solver=solver, tol=1e-12).fit(X, y).coef_ + for solver in ["svd", "sparse_cg", "lsqr", "cholesky", "sag", "saga"] + ] + for coef_indiv_pen in coefs_indiv_pen: + assert_array_almost_equal(coef_cholesky, coef_indiv_pen) + + # Test error is raised when number of targets and penalties do not match. + ridge = Ridge(alpha=penalties[:-1]) + err_msg = "Number of targets and number of penalties do not correspond: 4 != 5" + with pytest.raises(ValueError, match=err_msg): + ridge.fit(X, y) + + +@pytest.mark.parametrize("n_col", [(), (1,), (3,)]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_X_CenterStackOp(n_col, csr_container): + rng = np.random.RandomState(0) + X = rng.randn(11, 8) + X_m = rng.randn(8) + sqrt_sw = rng.randn(len(X)) + Y = rng.randn(11, *n_col) + A = rng.randn(9, *n_col) + operator = _X_CenterStackOp(csr_container(X), X_m, sqrt_sw) + reference_operator = np.hstack([X - sqrt_sw[:, None] * X_m, sqrt_sw[:, None]]) + assert_allclose(reference_operator.dot(A), operator.dot(A)) + assert_allclose(reference_operator.T.dot(Y), operator.T.dot(Y)) + + +@pytest.mark.parametrize("shape", [(10, 1), (13, 9), (3, 7), (2, 2), (20, 20)]) +@pytest.mark.parametrize("uniform_weights", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_compute_gram(shape, uniform_weights, csr_container): + rng = np.random.RandomState(0) + X = rng.randn(*shape) + if uniform_weights: + sw = np.ones(X.shape[0]) + else: + sw = rng.chisquare(1, shape[0]) + sqrt_sw = np.sqrt(sw) + X_mean = np.average(X, axis=0, weights=sw) + X_centered = (X - X_mean) * sqrt_sw[:, None] + true_gram = X_centered.dot(X_centered.T) + X_sparse = csr_container(X * sqrt_sw[:, None]) + gcv = _RidgeGCV(fit_intercept=True) + computed_gram, computed_mean = gcv._compute_gram(X_sparse, sqrt_sw) + assert_allclose(X_mean, computed_mean) + assert_allclose(true_gram, computed_gram) + + +@pytest.mark.parametrize("shape", [(10, 1), (13, 9), (3, 7), (2, 2), (20, 20)]) +@pytest.mark.parametrize("uniform_weights", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_compute_covariance(shape, uniform_weights, csr_container): + rng = np.random.RandomState(0) + X = rng.randn(*shape) + if uniform_weights: + sw = np.ones(X.shape[0]) + else: + sw = rng.chisquare(1, shape[0]) + sqrt_sw = np.sqrt(sw) + X_mean = np.average(X, axis=0, weights=sw) + X_centered = (X - X_mean) * sqrt_sw[:, None] + true_covariance = X_centered.T.dot(X_centered) + X_sparse = csr_container(X * sqrt_sw[:, None]) + gcv = _RidgeGCV(fit_intercept=True) + computed_cov, computed_mean = gcv._compute_covariance(X_sparse, sqrt_sw) + assert_allclose(X_mean, computed_mean) + assert_allclose(true_covariance, computed_cov) + + +def _make_sparse_offset_regression( + n_samples=100, + n_features=100, + proportion_nonzero=0.5, + n_informative=10, + n_targets=1, + bias=13.0, + X_offset=30.0, + noise=30.0, + shuffle=True, + coef=False, + positive=False, + random_state=None, +): + X, y, c = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_informative, + n_targets=n_targets, + bias=bias, + noise=noise, + shuffle=shuffle, + coef=True, + random_state=random_state, + ) + if n_features == 1: + c = np.asarray([c]) + X += X_offset + mask = ( + np.random.RandomState(random_state).binomial(1, proportion_nonzero, X.shape) > 0 + ) + removed_X = X.copy() + X[~mask] = 0.0 + removed_X[mask] = 0.0 + y -= removed_X.dot(c) + if positive: + y += X.dot(np.abs(c) + 1 - c) + c = np.abs(c) + 1 + if n_features == 1: + c = c[0] + if coef: + return X, y, c + return X, y + + +@pytest.mark.parametrize( + "solver, sparse_container", + ( + (solver, sparse_container) + for (solver, sparse_container) in product( + ["cholesky", "sag", "sparse_cg", "lsqr", "saga", "ridgecv"], + [None] + CSR_CONTAINERS, + ) + if sparse_container is None or solver in ["sparse_cg", "ridgecv"] + ), +) +@pytest.mark.parametrize( + "n_samples,dtype,proportion_nonzero", + [(20, "float32", 0.1), (40, "float32", 1.0), (20, "float64", 0.2)], +) +@pytest.mark.parametrize("seed", np.arange(3)) +def test_solver_consistency( + solver, proportion_nonzero, n_samples, dtype, sparse_container, seed +): + alpha = 1.0 + noise = 50.0 if proportion_nonzero > 0.9 else 500.0 + X, y = _make_sparse_offset_regression( + bias=10, + n_features=30, + proportion_nonzero=proportion_nonzero, + noise=noise, + random_state=seed, + n_samples=n_samples, + ) + + # Manually scale the data to avoid pathological cases. We use + # minmax_scale to deal with the sparse case without breaking + # the sparsity pattern. + X = minmax_scale(X) + + svd_ridge = Ridge(solver="svd", alpha=alpha).fit(X, y) + X = X.astype(dtype, copy=False) + y = y.astype(dtype, copy=False) + if sparse_container is not None: + X = sparse_container(X) + if solver == "ridgecv": + ridge = RidgeCV(alphas=[alpha]) + else: + ridge = Ridge(solver=solver, tol=1e-10, alpha=alpha) + ridge.fit(X, y) + assert_allclose(ridge.coef_, svd_ridge.coef_, atol=1e-3, rtol=1e-3) + assert_allclose(ridge.intercept_, svd_ridge.intercept_, atol=1e-3, rtol=1e-3) + + +@pytest.mark.parametrize("gcv_mode", ["svd", "eigen"]) +@pytest.mark.parametrize("X_container", [np.asarray] + CSR_CONTAINERS) +@pytest.mark.parametrize("X_shape", [(11, 8), (11, 20)]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize( + "y_shape, noise", + [ + ((11,), 1.0), + ((11, 1), 30.0), + ((11, 3), 150.0), + ], +) +def test_ridge_gcv_vs_ridge_loo_cv( + gcv_mode, X_container, X_shape, y_shape, fit_intercept, noise +): + n_samples, n_features = X_shape + n_targets = y_shape[-1] if len(y_shape) == 2 else 1 + X, y = _make_sparse_offset_regression( + n_samples=n_samples, + n_features=n_features, + n_targets=n_targets, + random_state=0, + shuffle=False, + noise=noise, + n_informative=5, + ) + y = y.reshape(y_shape) + + alphas = [1e-3, 0.1, 1.0, 10.0, 1e3] + loo_ridge = RidgeCV( + cv=n_samples, + fit_intercept=fit_intercept, + alphas=alphas, + scoring="neg_mean_squared_error", + ) + gcv_ridge = RidgeCV( + gcv_mode=gcv_mode, + fit_intercept=fit_intercept, + alphas=alphas, + ) + + loo_ridge.fit(X, y) + + X_gcv = X_container(X) + gcv_ridge.fit(X_gcv, y) + + assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_) + assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=1e-3) + assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-3) + + +def test_ridge_loo_cv_asym_scoring(): + # checking on asymmetric scoring + scoring = "explained_variance" + n_samples, n_features = 10, 5 + n_targets = 1 + X, y = _make_sparse_offset_regression( + n_samples=n_samples, + n_features=n_features, + n_targets=n_targets, + random_state=0, + shuffle=False, + noise=1, + n_informative=5, + ) + + alphas = [1e-3, 0.1, 1.0, 10.0, 1e3] + loo_ridge = RidgeCV( + cv=n_samples, fit_intercept=True, alphas=alphas, scoring=scoring + ) + + gcv_ridge = RidgeCV(fit_intercept=True, alphas=alphas, scoring=scoring) + + loo_ridge.fit(X, y) + gcv_ridge.fit(X, y) + + assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_) + assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=1e-3) + assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-3) + + +@pytest.mark.parametrize("gcv_mode", ["svd", "eigen"]) +@pytest.mark.parametrize("X_container", [np.asarray] + CSR_CONTAINERS) +@pytest.mark.parametrize("n_features", [8, 20]) +@pytest.mark.parametrize( + "y_shape, fit_intercept, noise", + [ + ((11,), True, 1.0), + ((11, 1), True, 20.0), + ((11, 3), True, 150.0), + ((11, 3), False, 30.0), + ], +) +def test_ridge_gcv_sample_weights( + gcv_mode, X_container, fit_intercept, n_features, y_shape, noise +): + alphas = [1e-3, 0.1, 1.0, 10.0, 1e3] + rng = np.random.RandomState(0) + n_targets = y_shape[-1] if len(y_shape) == 2 else 1 + X, y = _make_sparse_offset_regression( + n_samples=11, + n_features=n_features, + n_targets=n_targets, + random_state=0, + shuffle=False, + noise=noise, + ) + y = y.reshape(y_shape) + + sample_weight = 3 * rng.randn(len(X)) + sample_weight = (sample_weight - sample_weight.min() + 1).astype(int) + indices = np.repeat(np.arange(X.shape[0]), sample_weight) + sample_weight = sample_weight.astype(float) + X_tiled, y_tiled = X[indices], y[indices] + + cv = GroupKFold(n_splits=X.shape[0]) + splits = cv.split(X_tiled, y_tiled, groups=indices) + kfold = RidgeCV( + alphas=alphas, + cv=splits, + scoring="neg_mean_squared_error", + fit_intercept=fit_intercept, + ) + kfold.fit(X_tiled, y_tiled) + + ridge_reg = Ridge(alpha=kfold.alpha_, fit_intercept=fit_intercept) + splits = cv.split(X_tiled, y_tiled, groups=indices) + predictions = cross_val_predict(ridge_reg, X_tiled, y_tiled, cv=splits) + kfold_errors = (y_tiled - predictions) ** 2 + kfold_errors = [ + np.sum(kfold_errors[indices == i], axis=0) for i in np.arange(X.shape[0]) + ] + kfold_errors = np.asarray(kfold_errors) + + X_gcv = X_container(X) + gcv_ridge = RidgeCV( + alphas=alphas, + store_cv_values=True, + gcv_mode=gcv_mode, + fit_intercept=fit_intercept, + ) + gcv_ridge.fit(X_gcv, y, sample_weight=sample_weight) + if len(y_shape) == 2: + gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)] + else: + gcv_errors = gcv_ridge.cv_values_[:, alphas.index(kfold.alpha_)] + + assert kfold.alpha_ == pytest.approx(gcv_ridge.alpha_) + assert_allclose(gcv_errors, kfold_errors, rtol=1e-3) + assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=1e-3) + assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=1e-3) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize( + "mode, mode_n_greater_than_p, mode_p_greater_than_n", + [ + (None, "svd", "eigen"), + ("auto", "svd", "eigen"), + ("eigen", "eigen", "eigen"), + ("svd", "svd", "svd"), + ], +) +def test_check_gcv_mode_choice( + sparse_container, mode, mode_n_greater_than_p, mode_p_greater_than_n +): + X, _ = make_regression(n_samples=5, n_features=2) + if sparse_container is not None: + X = sparse_container(X) + assert _check_gcv_mode(X, mode) == mode_n_greater_than_p + assert _check_gcv_mode(X.T, mode) == mode_p_greater_than_n + + +def _test_ridge_loo(sparse_container): + # test that can work with both dense or sparse matrices + n_samples = X_diabetes.shape[0] + + ret = [] + + if sparse_container is None: + X, fit_intercept = X_diabetes, True + else: + X, fit_intercept = sparse_container(X_diabetes), False + ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept) + + # check best alpha + ridge_gcv.fit(X, y_diabetes) + alpha_ = ridge_gcv.alpha_ + ret.append(alpha_) + + # check that we get same best alpha with custom loss_func + f = ignore_warnings + scoring = make_scorer(mean_squared_error, greater_is_better=False) + ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring) + f(ridge_gcv2.fit)(X, y_diabetes) + assert ridge_gcv2.alpha_ == pytest.approx(alpha_) + + # check that we get same best alpha with custom score_func + def func(x, y): + return -mean_squared_error(x, y) + + scoring = make_scorer(func) + ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring) + f(ridge_gcv3.fit)(X, y_diabetes) + assert ridge_gcv3.alpha_ == pytest.approx(alpha_) + + # check that we get same best alpha with a scorer + scorer = get_scorer("neg_mean_squared_error") + ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer) + ridge_gcv4.fit(X, y_diabetes) + assert ridge_gcv4.alpha_ == pytest.approx(alpha_) + + # check that we get same best alpha with sample weights + if sparse_container is None: + ridge_gcv.fit(X, y_diabetes, sample_weight=np.ones(n_samples)) + assert ridge_gcv.alpha_ == pytest.approx(alpha_) + + # simulate several responses + Y = np.vstack((y_diabetes, y_diabetes)).T + + ridge_gcv.fit(X, Y) + Y_pred = ridge_gcv.predict(X) + ridge_gcv.fit(X, y_diabetes) + y_pred = ridge_gcv.predict(X) + + assert_allclose(np.vstack((y_pred, y_pred)).T, Y_pred, rtol=1e-5) + + return ret + + +def _test_ridge_cv(sparse_container): + X = X_diabetes if sparse_container is None else sparse_container(X_diabetes) + ridge_cv = RidgeCV() + ridge_cv.fit(X, y_diabetes) + ridge_cv.predict(X) + + assert len(ridge_cv.coef_.shape) == 1 + assert type(ridge_cv.intercept_) == np.float64 + + cv = KFold(5) + ridge_cv.set_params(cv=cv) + ridge_cv.fit(X, y_diabetes) + ridge_cv.predict(X) + + assert len(ridge_cv.coef_.shape) == 1 + assert type(ridge_cv.intercept_) == np.float64 + + +@pytest.mark.parametrize( + "ridge, make_dataset", + [ + (RidgeCV(store_cv_values=False), make_regression), + (RidgeClassifierCV(store_cv_values=False), make_classification), + ], +) +def test_ridge_gcv_cv_values_not_stored(ridge, make_dataset): + # Check that `cv_values_` is not stored when store_cv_values is False + X, y = make_dataset(n_samples=6, random_state=42) + ridge.fit(X, y) + assert not hasattr(ridge, "cv_values_") + + +@pytest.mark.parametrize( + "ridge, make_dataset", + [(RidgeCV(), make_regression), (RidgeClassifierCV(), make_classification)], +) +@pytest.mark.parametrize("cv", [None, 3]) +def test_ridge_best_score(ridge, make_dataset, cv): + # check that the best_score_ is store + X, y = make_dataset(n_samples=6, random_state=42) + ridge.set_params(store_cv_values=False, cv=cv) + ridge.fit(X, y) + assert hasattr(ridge, "best_score_") + assert isinstance(ridge.best_score_, float) + + +def test_ridge_cv_individual_penalties(): + # Tests the ridge_cv object optimizing individual penalties for each target + + rng = np.random.RandomState(42) + + # Create random dataset with multiple targets. Each target should have + # a different optimal alpha. + n_samples, n_features, n_targets = 20, 5, 3 + y = rng.randn(n_samples, n_targets) + X = ( + np.dot(y[:, [0]], np.ones((1, n_features))) + + np.dot(y[:, [1]], 0.05 * np.ones((1, n_features))) + + np.dot(y[:, [2]], 0.001 * np.ones((1, n_features))) + + rng.randn(n_samples, n_features) + ) + + alphas = (1, 100, 1000) + + # Find optimal alpha for each target + optimal_alphas = [RidgeCV(alphas=alphas).fit(X, target).alpha_ for target in y.T] + + # Find optimal alphas for all targets simultaneously + ridge_cv = RidgeCV(alphas=alphas, alpha_per_target=True).fit(X, y) + assert_array_equal(optimal_alphas, ridge_cv.alpha_) + + # The resulting regression weights should incorporate the different + # alpha values. + assert_array_almost_equal( + Ridge(alpha=ridge_cv.alpha_).fit(X, y).coef_, ridge_cv.coef_ + ) + + # Test shape of alpha_ and cv_values_ + ridge_cv = RidgeCV(alphas=alphas, alpha_per_target=True, store_cv_values=True).fit( + X, y + ) + assert ridge_cv.alpha_.shape == (n_targets,) + assert ridge_cv.best_score_.shape == (n_targets,) + assert ridge_cv.cv_values_.shape == (n_samples, len(alphas), n_targets) + + # Test edge case of there being only one alpha value + ridge_cv = RidgeCV(alphas=1, alpha_per_target=True, store_cv_values=True).fit(X, y) + assert ridge_cv.alpha_.shape == (n_targets,) + assert ridge_cv.best_score_.shape == (n_targets,) + assert ridge_cv.cv_values_.shape == (n_samples, n_targets, 1) + + # Test edge case of there being only one target + ridge_cv = RidgeCV(alphas=alphas, alpha_per_target=True, store_cv_values=True).fit( + X, y[:, 0] + ) + assert np.isscalar(ridge_cv.alpha_) + assert np.isscalar(ridge_cv.best_score_) + assert ridge_cv.cv_values_.shape == (n_samples, len(alphas)) + + # Try with a custom scoring function + ridge_cv = RidgeCV(alphas=alphas, alpha_per_target=True, scoring="r2").fit(X, y) + assert_array_equal(optimal_alphas, ridge_cv.alpha_) + assert_array_almost_equal( + Ridge(alpha=ridge_cv.alpha_).fit(X, y).coef_, ridge_cv.coef_ + ) + + # Using a custom CV object should throw an error in combination with + # alpha_per_target=True + ridge_cv = RidgeCV(alphas=alphas, cv=LeaveOneOut(), alpha_per_target=True) + msg = "cv!=None and alpha_per_target=True are incompatible" + with pytest.raises(ValueError, match=msg): + ridge_cv.fit(X, y) + ridge_cv = RidgeCV(alphas=alphas, cv=6, alpha_per_target=True) + with pytest.raises(ValueError, match=msg): + ridge_cv.fit(X, y) + + +def _test_ridge_diabetes(sparse_container): + X = X_diabetes if sparse_container is None else sparse_container(X_diabetes) + ridge = Ridge(fit_intercept=False) + ridge.fit(X, y_diabetes) + return np.round(ridge.score(X, y_diabetes), 5) + + +def _test_multi_ridge_diabetes(sparse_container): + # simulate several responses + X = X_diabetes if sparse_container is None else sparse_container(X_diabetes) + Y = np.vstack((y_diabetes, y_diabetes)).T + n_features = X_diabetes.shape[1] + + ridge = Ridge(fit_intercept=False) + ridge.fit(X, Y) + assert ridge.coef_.shape == (2, n_features) + Y_pred = ridge.predict(X) + ridge.fit(X, y_diabetes) + y_pred = ridge.predict(X) + assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) + + +def _test_ridge_classifiers(sparse_container): + n_classes = np.unique(y_iris).shape[0] + n_features = X_iris.shape[1] + X = X_iris if sparse_container is None else sparse_container(X_iris) + + for reg in (RidgeClassifier(), RidgeClassifierCV()): + reg.fit(X, y_iris) + assert reg.coef_.shape == (n_classes, n_features) + y_pred = reg.predict(X) + assert np.mean(y_iris == y_pred) > 0.79 + + cv = KFold(5) + reg = RidgeClassifierCV(cv=cv) + reg.fit(X, y_iris) + y_pred = reg.predict(X) + assert np.mean(y_iris == y_pred) >= 0.8 + + +@pytest.mark.parametrize("scoring", [None, "accuracy", _accuracy_callable]) +@pytest.mark.parametrize("cv", [None, KFold(5)]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_ridge_classifier_with_scoring(sparse_container, scoring, cv): + # non-regression test for #14672 + # check that RidgeClassifierCV works with all sort of scoring and + # cross-validation + X = X_iris if sparse_container is None else sparse_container(X_iris) + scoring_ = make_scorer(scoring) if callable(scoring) else scoring + clf = RidgeClassifierCV(scoring=scoring_, cv=cv) + # Smoke test to check that fit/predict does not raise error + clf.fit(X, y_iris).predict(X) + + +@pytest.mark.parametrize("cv", [None, KFold(5)]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_ridge_regression_custom_scoring(sparse_container, cv): + # check that custom scoring is working as expected + # check the tie breaking strategy (keep the first alpha tried) + + def _dummy_score(y_test, y_pred): + return 0.42 + + X = X_iris if sparse_container is None else sparse_container(X_iris) + alphas = np.logspace(-2, 2, num=5) + clf = RidgeClassifierCV(alphas=alphas, scoring=make_scorer(_dummy_score), cv=cv) + clf.fit(X, y_iris) + assert clf.best_score_ == pytest.approx(0.42) + # In case of tie score, the first alphas will be kept + assert clf.alpha_ == pytest.approx(alphas[0]) + + +def _test_tolerance(sparse_container): + X = X_diabetes if sparse_container is None else sparse_container(X_diabetes) + + ridge = Ridge(tol=1e-5, fit_intercept=False) + ridge.fit(X, y_diabetes) + score = ridge.score(X, y_diabetes) + + ridge2 = Ridge(tol=1e-3, fit_intercept=False) + ridge2.fit(X, y_diabetes) + score2 = ridge2.score(X, y_diabetes) + + assert score >= score2 + + +@pytest.mark.parametrize( + "test_func", + ( + _test_ridge_loo, + _test_ridge_cv, + _test_ridge_diabetes, + _test_multi_ridge_diabetes, + _test_ridge_classifiers, + _test_tolerance, + ), +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dense_sparse(test_func, csr_container): + # test dense matrix + ret_dense = test_func(None) + # test sparse matrix + ret_sparse = test_func(csr_container) + # test that the outputs are the same + if ret_dense is not None and ret_sparse is not None: + assert_array_almost_equal(ret_dense, ret_sparse, decimal=3) + + +def test_class_weights(): + # Test class weights. + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = [1, 1, 1, -1, -1] + + reg = RidgeClassifier(class_weight=None) + reg.fit(X, y) + assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1])) + + # we give a small weights to class 1 + reg = RidgeClassifier(class_weight={1: 0.001}) + reg.fit(X, y) + + # now the hyperplane should rotate clock-wise and + # the prediction on this point should shift + assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([-1])) + + # check if class_weight = 'balanced' can handle negative labels. + reg = RidgeClassifier(class_weight="balanced") + reg.fit(X, y) + assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1])) + + # class_weight = 'balanced', and class_weight = None should return + # same values when y has equal number of all labels + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0]]) + y = [1, 1, -1, -1] + reg = RidgeClassifier(class_weight=None) + reg.fit(X, y) + rega = RidgeClassifier(class_weight="balanced") + rega.fit(X, y) + assert len(rega.classes_) == 2 + assert_array_almost_equal(reg.coef_, rega.coef_) + assert_array_almost_equal(reg.intercept_, rega.intercept_) + + +@pytest.mark.parametrize("reg", (RidgeClassifier, RidgeClassifierCV)) +def test_class_weight_vs_sample_weight(reg): + """Check class_weights resemble sample_weights behavior.""" + + # Iris is balanced, so no effect expected for using 'balanced' weights + reg1 = reg() + reg1.fit(iris.data, iris.target) + reg2 = reg(class_weight="balanced") + reg2.fit(iris.data, iris.target) + assert_almost_equal(reg1.coef_, reg2.coef_) + + # Inflate importance of class 1, check against user-defined weights + sample_weight = np.ones(iris.target.shape) + sample_weight[iris.target == 1] *= 100 + class_weight = {0: 1.0, 1: 100.0, 2: 1.0} + reg1 = reg() + reg1.fit(iris.data, iris.target, sample_weight) + reg2 = reg(class_weight=class_weight) + reg2.fit(iris.data, iris.target) + assert_almost_equal(reg1.coef_, reg2.coef_) + + # Check that sample_weight and class_weight are multiplicative + reg1 = reg() + reg1.fit(iris.data, iris.target, sample_weight**2) + reg2 = reg(class_weight=class_weight) + reg2.fit(iris.data, iris.target, sample_weight) + assert_almost_equal(reg1.coef_, reg2.coef_) + + +def test_class_weights_cv(): + # Test class weights for cross validated ridge classifier. + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = [1, 1, 1, -1, -1] + + reg = RidgeClassifierCV(class_weight=None, alphas=[0.01, 0.1, 1]) + reg.fit(X, y) + + # we give a small weights to class 1 + reg = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[0.01, 0.1, 1, 10]) + reg.fit(X, y) + + assert_array_equal(reg.predict([[-0.2, 2]]), np.array([-1])) + + +@pytest.mark.parametrize( + "scoring", [None, "neg_mean_squared_error", _mean_squared_error_callable] +) +def test_ridgecv_store_cv_values(scoring): + rng = np.random.RandomState(42) + + n_samples = 8 + n_features = 5 + x = rng.randn(n_samples, n_features) + alphas = [1e-1, 1e0, 1e1] + n_alphas = len(alphas) + + scoring_ = make_scorer(scoring) if callable(scoring) else scoring + + r = RidgeCV(alphas=alphas, cv=None, store_cv_values=True, scoring=scoring_) + + # with len(y.shape) == 1 + y = rng.randn(n_samples) + r.fit(x, y) + assert r.cv_values_.shape == (n_samples, n_alphas) + + # with len(y.shape) == 2 + n_targets = 3 + y = rng.randn(n_samples, n_targets) + r.fit(x, y) + assert r.cv_values_.shape == (n_samples, n_targets, n_alphas) + + r = RidgeCV(cv=3, store_cv_values=True, scoring=scoring) + with pytest.raises(ValueError, match="cv!=None and store_cv_values"): + r.fit(x, y) + + +@pytest.mark.parametrize("scoring", [None, "accuracy", _accuracy_callable]) +def test_ridge_classifier_cv_store_cv_values(scoring): + x = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = np.array([1, 1, 1, -1, -1]) + + n_samples = x.shape[0] + alphas = [1e-1, 1e0, 1e1] + n_alphas = len(alphas) + + scoring_ = make_scorer(scoring) if callable(scoring) else scoring + + r = RidgeClassifierCV( + alphas=alphas, cv=None, store_cv_values=True, scoring=scoring_ + ) + + # with len(y.shape) == 1 + n_targets = 1 + r.fit(x, y) + assert r.cv_values_.shape == (n_samples, n_targets, n_alphas) + + # with len(y.shape) == 2 + y = np.array( + [[1, 1, 1, -1, -1], [1, -1, 1, -1, 1], [-1, -1, 1, -1, -1]] + ).transpose() + n_targets = y.shape[1] + r.fit(x, y) + assert r.cv_values_.shape == (n_samples, n_targets, n_alphas) + + +@pytest.mark.parametrize("Estimator", [RidgeCV, RidgeClassifierCV]) +def test_ridgecv_alphas_conversion(Estimator): + rng = np.random.RandomState(0) + alphas = (0.1, 1.0, 10.0) + + n_samples, n_features = 5, 5 + if Estimator is RidgeCV: + y = rng.randn(n_samples) + else: + y = rng.randint(0, 2, n_samples) + X = rng.randn(n_samples, n_features) + + ridge_est = Estimator(alphas=alphas) + assert ( + ridge_est.alphas is alphas + ), f"`alphas` was mutated in `{Estimator.__name__}.__init__`" + + ridge_est.fit(X, y) + assert_array_equal(ridge_est.alphas, np.asarray(alphas)) + + +def test_ridgecv_sample_weight(): + rng = np.random.RandomState(0) + alphas = (0.1, 1.0, 10.0) + + # There are different algorithms for n_samples > n_features + # and the opposite, so test them both. + for n_samples, n_features in ((6, 5), (5, 10)): + y = rng.randn(n_samples) + X = rng.randn(n_samples, n_features) + sample_weight = 1.0 + rng.rand(n_samples) + + cv = KFold(5) + ridgecv = RidgeCV(alphas=alphas, cv=cv) + ridgecv.fit(X, y, sample_weight=sample_weight) + + # Check using GridSearchCV directly + parameters = {"alpha": alphas} + gs = GridSearchCV(Ridge(), parameters, cv=cv) + gs.fit(X, y, sample_weight=sample_weight) + + assert ridgecv.alpha_ == gs.best_estimator_.alpha + assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_) + + +def test_raises_value_error_if_sample_weights_greater_than_1d(): + # Sample weights must be either scalar or 1D + + n_sampless = [2, 3] + n_featuress = [3, 2] + + rng = np.random.RandomState(42) + + for n_samples, n_features in zip(n_sampless, n_featuress): + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + sample_weights_OK = rng.randn(n_samples) ** 2 + 1 + sample_weights_OK_1 = 1.0 + sample_weights_OK_2 = 2.0 + sample_weights_not_OK = sample_weights_OK[:, np.newaxis] + sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :] + + ridge = Ridge(alpha=1) + + # make sure the "OK" sample weights actually work + ridge.fit(X, y, sample_weights_OK) + ridge.fit(X, y, sample_weights_OK_1) + ridge.fit(X, y, sample_weights_OK_2) + + def fit_ridge_not_ok(): + ridge.fit(X, y, sample_weights_not_OK) + + def fit_ridge_not_ok_2(): + ridge.fit(X, y, sample_weights_not_OK_2) + + err_msg = "Sample weights must be 1D array or scalar" + with pytest.raises(ValueError, match=err_msg): + fit_ridge_not_ok() + + err_msg = "Sample weights must be 1D array or scalar" + with pytest.raises(ValueError, match=err_msg): + fit_ridge_not_ok_2() + + +@pytest.mark.parametrize("n_samples,n_features", [[2, 3], [3, 2]]) +@pytest.mark.parametrize( + "sparse_container", + COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS, +) +def test_sparse_design_with_sample_weights(n_samples, n_features, sparse_container): + # Sample weights must work with sparse matrices + rng = np.random.RandomState(42) + + sparse_ridge = Ridge(alpha=1.0, fit_intercept=False) + dense_ridge = Ridge(alpha=1.0, fit_intercept=False) + + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + sample_weights = rng.randn(n_samples) ** 2 + 1 + X_sparse = sparse_container(X) + sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights) + dense_ridge.fit(X, y, sample_weight=sample_weights) + + assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_, decimal=6) + + +def test_ridgecv_int_alphas(): + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = [1, 1, 1, -1, -1] + + # Integers + ridge = RidgeCV(alphas=(1, 10, 100)) + ridge.fit(X, y) + + +@pytest.mark.parametrize("Estimator", [RidgeCV, RidgeClassifierCV]) +@pytest.mark.parametrize( + "params, err_type, err_msg", + [ + ({"alphas": (1, -1, -100)}, ValueError, r"alphas\[1\] == -1, must be > 0.0"), + ( + {"alphas": (-0.1, -1.0, -10.0)}, + ValueError, + r"alphas\[0\] == -0.1, must be > 0.0", + ), + ( + {"alphas": (1, 1.0, "1")}, + TypeError, + r"alphas\[2\] must be an instance of float, not str", + ), + ], +) +def test_ridgecv_alphas_validation(Estimator, params, err_type, err_msg): + """Check the `alphas` validation in RidgeCV and RidgeClassifierCV.""" + + n_samples, n_features = 5, 5 + X = rng.randn(n_samples, n_features) + y = rng.randint(0, 2, n_samples) + + with pytest.raises(err_type, match=err_msg): + Estimator(**params).fit(X, y) + + +@pytest.mark.parametrize("Estimator", [RidgeCV, RidgeClassifierCV]) +def test_ridgecv_alphas_scalar(Estimator): + """Check the case when `alphas` is a scalar. + This case was supported in the past when `alphas` where converted + into array in `__init__`. + We add this test to ensure backward compatibility. + """ + + n_samples, n_features = 5, 5 + X = rng.randn(n_samples, n_features) + if Estimator is RidgeCV: + y = rng.randn(n_samples) + else: + y = rng.randint(0, 2, n_samples) + + Estimator(alphas=1).fit(X, y) + + +def test_sparse_cg_max_iter(): + reg = Ridge(solver="sparse_cg", max_iter=1) + reg.fit(X_diabetes, y_diabetes) + assert reg.coef_.shape[0] == X_diabetes.shape[1] + + +@ignore_warnings +def test_n_iter(): + # Test that self.n_iter_ is correct. + n_targets = 2 + X, y = X_diabetes, y_diabetes + y_n = np.tile(y, (n_targets, 1)).T + + for max_iter in range(1, 4): + for solver in ("sag", "saga", "lsqr"): + reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12) + reg.fit(X, y_n) + assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets)) + + for solver in ("sparse_cg", "svd", "cholesky"): + reg = Ridge(solver=solver, max_iter=1, tol=1e-1) + reg.fit(X, y_n) + assert reg.n_iter_ is None + + +@pytest.mark.parametrize("solver", ["lsqr", "sparse_cg", "lbfgs", "auto"]) +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_ridge_fit_intercept_sparse( + solver, with_sample_weight, global_random_seed, csr_container +): + """Check that ridge finds the same coefs and intercept on dense and sparse input + in the presence of sample weights. + + For now only sparse_cg and lbfgs can correctly fit an intercept + with sparse X with default tol and max_iter. + 'sag' is tested separately in test_ridge_fit_intercept_sparse_sag because it + requires more iterations and should raise a warning if default max_iter is used. + Other solvers raise an exception, as checked in + test_ridge_fit_intercept_sparse_error + """ + positive = solver == "lbfgs" + X, y = _make_sparse_offset_regression( + n_features=20, random_state=global_random_seed, positive=positive + ) + + sample_weight = None + if with_sample_weight: + rng = np.random.RandomState(global_random_seed) + sample_weight = 1.0 + rng.uniform(size=X.shape[0]) + + # "auto" should switch to "sparse_cg" when X is sparse + # so the reference we use for both ("auto" and "sparse_cg") is + # Ridge(solver="sparse_cg"), fitted using the dense representation (note + # that "sparse_cg" can fit sparse or dense data) + dense_solver = "sparse_cg" if solver == "auto" else solver + dense_ridge = Ridge(solver=dense_solver, tol=1e-12, positive=positive) + sparse_ridge = Ridge(solver=solver, tol=1e-12, positive=positive) + + dense_ridge.fit(X, y, sample_weight=sample_weight) + sparse_ridge.fit(csr_container(X), y, sample_weight=sample_weight) + + assert_allclose(dense_ridge.intercept_, sparse_ridge.intercept_) + assert_allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=5e-7) + + +@pytest.mark.parametrize("solver", ["saga", "svd", "cholesky"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_ridge_fit_intercept_sparse_error(solver, csr_container): + X, y = _make_sparse_offset_regression(n_features=20, random_state=0) + X_csr = csr_container(X) + sparse_ridge = Ridge(solver=solver) + err_msg = "solver='{}' does not support".format(solver) + with pytest.raises(ValueError, match=err_msg): + sparse_ridge.fit(X_csr, y) + + +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_ridge_fit_intercept_sparse_sag( + with_sample_weight, global_random_seed, csr_container +): + X, y = _make_sparse_offset_regression( + n_features=5, n_samples=20, random_state=global_random_seed, X_offset=5.0 + ) + if with_sample_weight: + rng = np.random.RandomState(global_random_seed) + sample_weight = 1.0 + rng.uniform(size=X.shape[0]) + else: + sample_weight = None + X_csr = csr_container(X) + + params = dict( + alpha=1.0, solver="sag", fit_intercept=True, tol=1e-10, max_iter=100000 + ) + dense_ridge = Ridge(**params) + sparse_ridge = Ridge(**params) + dense_ridge.fit(X, y, sample_weight=sample_weight) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + sparse_ridge.fit(X_csr, y, sample_weight=sample_weight) + assert_allclose(dense_ridge.intercept_, sparse_ridge.intercept_, rtol=1e-4) + assert_allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=1e-4) + with pytest.warns(UserWarning, match='"sag" solver requires.*'): + Ridge(solver="sag", fit_intercept=True, tol=1e-3, max_iter=None).fit(X_csr, y) + + +@pytest.mark.parametrize("return_intercept", [False, True]) +@pytest.mark.parametrize("sample_weight", [None, np.ones(1000)]) +@pytest.mark.parametrize("container", [np.array] + CSR_CONTAINERS) +@pytest.mark.parametrize( + "solver", ["auto", "sparse_cg", "cholesky", "lsqr", "sag", "saga", "lbfgs"] +) +def test_ridge_regression_check_arguments_validity( + return_intercept, sample_weight, container, solver +): + """check if all combinations of arguments give valid estimations""" + + # test excludes 'svd' solver because it raises exception for sparse inputs + + rng = check_random_state(42) + X = rng.rand(1000, 3) + true_coefs = [1, 2, 0.1] + y = np.dot(X, true_coefs) + true_intercept = 0.0 + if return_intercept: + true_intercept = 10000.0 + y += true_intercept + X_testing = container(X) + + alpha, tol = 1e-3, 1e-6 + atol = 1e-3 if _IS_32BIT else 1e-4 + + positive = solver == "lbfgs" + + if solver not in ["sag", "auto"] and return_intercept: + with pytest.raises(ValueError, match="In Ridge, only 'sag' solver"): + ridge_regression( + X_testing, + y, + alpha=alpha, + solver=solver, + sample_weight=sample_weight, + return_intercept=return_intercept, + positive=positive, + tol=tol, + ) + return + + out = ridge_regression( + X_testing, + y, + alpha=alpha, + solver=solver, + sample_weight=sample_weight, + positive=positive, + return_intercept=return_intercept, + tol=tol, + ) + + if return_intercept: + coef, intercept = out + assert_allclose(coef, true_coefs, rtol=0, atol=atol) + assert_allclose(intercept, true_intercept, rtol=0, atol=atol) + else: + assert_allclose(out, true_coefs, rtol=0, atol=atol) + + +@pytest.mark.parametrize( + "solver", ["svd", "sparse_cg", "cholesky", "lsqr", "sag", "saga", "lbfgs"] +) +def test_dtype_match(solver): + rng = np.random.RandomState(0) + alpha = 1.0 + positive = solver == "lbfgs" + + n_samples, n_features = 6, 5 + X_64 = rng.randn(n_samples, n_features) + y_64 = rng.randn(n_samples) + X_32 = X_64.astype(np.float32) + y_32 = y_64.astype(np.float32) + + tol = 2 * np.finfo(np.float32).resolution + # Check type consistency 32bits + ridge_32 = Ridge( + alpha=alpha, solver=solver, max_iter=500, tol=tol, positive=positive + ) + ridge_32.fit(X_32, y_32) + coef_32 = ridge_32.coef_ + + # Check type consistency 64 bits + ridge_64 = Ridge( + alpha=alpha, solver=solver, max_iter=500, tol=tol, positive=positive + ) + ridge_64.fit(X_64, y_64) + coef_64 = ridge_64.coef_ + + # Do the actual checks at once for easier debug + assert coef_32.dtype == X_32.dtype + assert coef_64.dtype == X_64.dtype + assert ridge_32.predict(X_32).dtype == X_32.dtype + assert ridge_64.predict(X_64).dtype == X_64.dtype + assert_allclose(ridge_32.coef_, ridge_64.coef_, rtol=1e-4, atol=5e-4) + + +def test_dtype_match_cholesky(): + # Test different alphas in cholesky solver to ensure full coverage. + # This test is separated from test_dtype_match for clarity. + rng = np.random.RandomState(0) + alpha = np.array([1.0, 0.5]) + + n_samples, n_features, n_target = 6, 7, 2 + X_64 = rng.randn(n_samples, n_features) + y_64 = rng.randn(n_samples, n_target) + X_32 = X_64.astype(np.float32) + y_32 = y_64.astype(np.float32) + + # Check type consistency 32bits + ridge_32 = Ridge(alpha=alpha, solver="cholesky") + ridge_32.fit(X_32, y_32) + coef_32 = ridge_32.coef_ + + # Check type consistency 64 bits + ridge_64 = Ridge(alpha=alpha, solver="cholesky") + ridge_64.fit(X_64, y_64) + coef_64 = ridge_64.coef_ + + # Do all the checks at once, like this is easier to debug + assert coef_32.dtype == X_32.dtype + assert coef_64.dtype == X_64.dtype + assert ridge_32.predict(X_32).dtype == X_32.dtype + assert ridge_64.predict(X_64).dtype == X_64.dtype + assert_almost_equal(ridge_32.coef_, ridge_64.coef_, decimal=5) + + +@pytest.mark.parametrize( + "solver", ["svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"] +) +@pytest.mark.parametrize("seed", range(1)) +def test_ridge_regression_dtype_stability(solver, seed): + random_state = np.random.RandomState(seed) + n_samples, n_features = 6, 5 + X = random_state.randn(n_samples, n_features) + coef = random_state.randn(n_features) + y = np.dot(X, coef) + 0.01 * random_state.randn(n_samples) + alpha = 1.0 + positive = solver == "lbfgs" + results = dict() + # XXX: Sparse CG seems to be far less numerically stable than the + # others, maybe we should not enable float32 for this one. + atol = 1e-3 if solver == "sparse_cg" else 1e-5 + for current_dtype in (np.float32, np.float64): + results[current_dtype] = ridge_regression( + X.astype(current_dtype), + y.astype(current_dtype), + alpha=alpha, + solver=solver, + random_state=random_state, + sample_weight=None, + positive=positive, + max_iter=500, + tol=1e-10, + return_n_iter=False, + return_intercept=False, + ) + + assert results[np.float32].dtype == np.float32 + assert results[np.float64].dtype == np.float64 + assert_allclose(results[np.float32], results[np.float64], atol=atol) + + +def test_ridge_sag_with_X_fortran(): + # check that Fortran array are converted when using SAG solver + X, y = make_regression(random_state=42) + # for the order of X and y to not be C-ordered arrays + X = np.asfortranarray(X) + X = X[::2, :] + y = y[::2] + Ridge(solver="sag").fit(X, y) + + +@pytest.mark.parametrize( + "Classifier, params", + [ + (RidgeClassifier, {}), + (RidgeClassifierCV, {"cv": None}), + (RidgeClassifierCV, {"cv": 3}), + ], +) +def test_ridgeclassifier_multilabel(Classifier, params): + """Check that multilabel classification is supported and give meaningful + results.""" + X, y = make_multilabel_classification(n_classes=1, random_state=0) + y = y.reshape(-1, 1) + Y = np.concatenate([y, y], axis=1) + clf = Classifier(**params).fit(X, Y) + Y_pred = clf.predict(X) + + assert Y_pred.shape == Y.shape + assert_array_equal(Y_pred[:, 0], Y_pred[:, 1]) + Ridge(solver="sag").fit(X, y) + + +@pytest.mark.parametrize("solver", ["auto", "lbfgs"]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("alpha", [1e-3, 1e-2, 0.1, 1.0]) +def test_ridge_positive_regression_test(solver, fit_intercept, alpha): + """Test that positive Ridge finds true positive coefficients.""" + X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + coef = np.array([1, -10]) + if fit_intercept: + intercept = 20 + y = X.dot(coef) + intercept + else: + y = X.dot(coef) + + model = Ridge( + alpha=alpha, positive=True, solver=solver, fit_intercept=fit_intercept + ) + model.fit(X, y) + assert np.all(model.coef_ >= 0) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("alpha", [1e-3, 1e-2, 0.1, 1.0]) +def test_ridge_ground_truth_positive_test(fit_intercept, alpha): + """Test that Ridge w/wo positive converges to the same solution. + + Ridge with positive=True and positive=False must give the same + when the ground truth coefs are all positive. + """ + rng = np.random.RandomState(42) + X = rng.randn(300, 100) + coef = rng.uniform(0.1, 1.0, size=X.shape[1]) + if fit_intercept: + intercept = 1 + y = X @ coef + intercept + else: + y = X @ coef + y += rng.normal(size=X.shape[0]) * 0.01 + + results = [] + for positive in [True, False]: + model = Ridge( + alpha=alpha, positive=positive, fit_intercept=fit_intercept, tol=1e-10 + ) + results.append(model.fit(X, y).coef_) + assert_allclose(*results, atol=1e-6, rtol=0) + + +@pytest.mark.parametrize( + "solver", ["svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga"] +) +def test_ridge_positive_error_test(solver): + """Test input validation for positive argument in Ridge.""" + alpha = 0.1 + X = np.array([[1, 2], [3, 4]]) + coef = np.array([1, -1]) + y = X @ coef + + model = Ridge(alpha=alpha, positive=True, solver=solver, fit_intercept=False) + with pytest.raises(ValueError, match="does not support positive"): + model.fit(X, y) + + with pytest.raises(ValueError, match="only 'lbfgs' solver can be used"): + _, _ = ridge_regression( + X, y, alpha, positive=True, solver=solver, return_intercept=False + ) + + +@pytest.mark.parametrize("alpha", [1e-3, 1e-2, 0.1, 1.0]) +def test_positive_ridge_loss(alpha): + """Check ridge loss consistency when positive argument is enabled.""" + X, y = make_regression(n_samples=300, n_features=300, random_state=42) + alpha = 0.10 + n_checks = 100 + + def ridge_loss(model, random_state=None, noise_scale=1e-8): + intercept = model.intercept_ + if random_state is not None: + rng = np.random.RandomState(random_state) + coef = model.coef_ + rng.uniform(0, noise_scale, size=model.coef_.shape) + else: + coef = model.coef_ + + return 0.5 * np.sum((y - X @ coef - intercept) ** 2) + 0.5 * alpha * np.sum( + coef**2 + ) + + model = Ridge(alpha=alpha).fit(X, y) + model_positive = Ridge(alpha=alpha, positive=True).fit(X, y) + + # Check 1: + # Loss for solution found by Ridge(positive=False) + # is lower than that for solution found by Ridge(positive=True) + loss = ridge_loss(model) + loss_positive = ridge_loss(model_positive) + assert loss <= loss_positive + + # Check 2: + # Loss for solution found by Ridge(positive=True) + # is lower than that for small random positive perturbation + # of the positive solution. + for random_state in range(n_checks): + loss_perturbed = ridge_loss(model_positive, random_state=random_state) + assert loss_positive <= loss_perturbed + + +@pytest.mark.parametrize("alpha", [1e-3, 1e-2, 0.1, 1.0]) +def test_lbfgs_solver_consistency(alpha): + """Test that LBGFS gets almost the same coef of svd when positive=False.""" + X, y = make_regression(n_samples=300, n_features=300, random_state=42) + y = np.expand_dims(y, 1) + alpha = np.asarray([alpha]) + config = { + "positive": False, + "tol": 1e-16, + "max_iter": 500000, + } + + coef_lbfgs = _solve_lbfgs(X, y, alpha, **config) + coef_cholesky = _solve_svd(X, y, alpha) + assert_allclose(coef_lbfgs, coef_cholesky, atol=1e-4, rtol=0) + + +def test_lbfgs_solver_error(): + """Test that LBFGS solver raises ConvergenceWarning.""" + X = np.array([[1, -1], [1, 1]]) + y = np.array([-1e10, 1e10]) + + model = Ridge( + alpha=0.01, + solver="lbfgs", + fit_intercept=False, + tol=1e-12, + positive=True, + max_iter=1, + ) + with pytest.warns(ConvergenceWarning, match="lbfgs solver did not converge"): + model.fit(X, y) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("data", ["tall", "wide"]) +@pytest.mark.parametrize("solver", SOLVERS + ["lbfgs"]) +def test_ridge_sample_weight_consistency( + fit_intercept, sparse_container, data, solver, global_random_seed +): + """Test that the impact of sample_weight is consistent. + + Note that this test is stricter than the common test + check_sample_weights_invariance alone. + """ + # filter out solver that do not support sparse input + if sparse_container is not None: + if solver == "svd" or (solver in ("cholesky", "saga") and fit_intercept): + pytest.skip("unsupported configuration") + + # XXX: this test is quite sensitive to the seed used to generate the data: + # ideally we would like the test to pass for any global_random_seed but this is not + # the case at the moment. + rng = np.random.RandomState(42) + n_samples = 12 + if data == "tall": + n_features = n_samples // 2 + else: + n_features = n_samples * 2 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + if sparse_container is not None: + X = sparse_container(X) + params = dict( + fit_intercept=fit_intercept, + alpha=1.0, + solver=solver, + positive=(solver == "lbfgs"), + random_state=global_random_seed, # for sag/saga + tol=1e-12, + ) + + # 1) sample_weight=np.ones(..) should be equivalent to sample_weight=None + # same check as check_sample_weights_invariance(name, reg, kind="ones"), but we also + # test with sparse input. + reg = Ridge(**params).fit(X, y, sample_weight=None) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 2) setting elements of sample_weight to 0 is equivalent to removing these samples + # same check as check_sample_weights_invariance(name, reg, kind="zeros"), but we + # also test with sparse input + sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0]) + sample_weight[-5:] = 0 + y[-5:] *= 1000 # to make excluding those samples important + reg.fit(X, y, sample_weight=sample_weight) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + reg.fit(X[:-5, :], y[:-5], sample_weight=sample_weight[:-5]) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 3) scaling of sample_weight should have no effect + # Note: For models with penalty, scaling the penalty term might work. + reg2 = Ridge(**params).set_params(alpha=np.pi * params["alpha"]) + reg2.fit(X, y, sample_weight=np.pi * sample_weight) + if solver in ("sag", "saga") and not fit_intercept: + pytest.xfail(f"Solver {solver} does fail test for scaling of sample_weight.") + assert_allclose(reg2.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg2.intercept_, intercept) + + # 4) check that multiplying sample_weight by 2 is equivalent + # to repeating corresponding samples twice + if sparse_container is not None: + X = X.toarray() + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = sample_weight.copy() + sample_weight_1[: n_samples // 2] *= 2 + sample_weight_2 = np.concatenate( + [sample_weight, sample_weight[: n_samples // 2]], axis=0 + ) + if sparse_container is not None: + X = sparse_container(X) + X2 = sparse_container(X2) + reg1 = Ridge(**params).fit(X, y, sample_weight=sample_weight_1) + reg2 = Ridge(**params).fit(X2, y2, sample_weight=sample_weight_2) + assert_allclose(reg1.coef_, reg2.coef_) + if fit_intercept: + assert_allclose(reg1.intercept_, reg2.intercept_) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sag.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sag.py new file mode 100644 index 0000000000000000000000000000000000000000..96f8a79726833ccdc36585b25da4464e5486e809 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sag.py @@ -0,0 +1,1026 @@ +# Authors: Danny Sullivan +# Tom Dupre la Tour +# +# License: BSD 3 clause + +import math +import re + +import numpy as np +import pytest +from scipy.special import logsumexp + +from sklearn._loss.loss import HalfMultinomialLoss +from sklearn.base import clone +from sklearn.datasets import load_iris, make_blobs, make_classification +from sklearn.linear_model import LogisticRegression, Ridge +from sklearn.linear_model._base import make_dataset +from sklearn.linear_model._linear_loss import LinearModelLoss +from sklearn.linear_model._sag import get_auto_step_size +from sklearn.linear_model._sag_fast import _multinomial_grad_loss_all_samples +from sklearn.preprocessing import LabelBinarizer, LabelEncoder +from sklearn.utils import check_random_state, compute_class_weight +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, +) +from sklearn.utils.extmath import row_norms +from sklearn.utils.fixes import CSR_CONTAINERS + +iris = load_iris() + + +# this is used for sag classification +def log_dloss(p, y): + z = p * y + # approximately equal and saves the computation of the log + if z > 18.0: + return math.exp(-z) * -y + if z < -18.0: + return -y + return -y / (math.exp(z) + 1.0) + + +def log_loss(p, y): + return np.mean(np.log(1.0 + np.exp(-y * p))) + + +# this is used for sag regression +def squared_dloss(p, y): + return p - y + + +def squared_loss(p, y): + return np.mean(0.5 * (p - y) * (p - y)) + + +# function for measuring the log loss +def get_pobj(w, alpha, myX, myy, loss): + w = w.ravel() + pred = np.dot(myX, w) + p = loss(pred, myy) + p += alpha * w.dot(w) / 2.0 + return p + + +def sag( + X, + y, + step_size, + alpha, + n_iter=1, + dloss=None, + sparse=False, + sample_weight=None, + fit_intercept=True, + saga=False, +): + n_samples, n_features = X.shape[0], X.shape[1] + + weights = np.zeros(X.shape[1]) + sum_gradient = np.zeros(X.shape[1]) + gradient_memory = np.zeros((n_samples, n_features)) + + intercept = 0.0 + intercept_sum_gradient = 0.0 + intercept_gradient_memory = np.zeros(n_samples) + + rng = np.random.RandomState(77) + decay = 1.0 + seen = set() + + # sparse data has a fixed decay of .01 + if sparse: + decay = 0.01 + + for epoch in range(n_iter): + for k in range(n_samples): + idx = int(rng.rand() * n_samples) + # idx = k + entry = X[idx] + seen.add(idx) + p = np.dot(entry, weights) + intercept + gradient = dloss(p, y[idx]) + if sample_weight is not None: + gradient *= sample_weight[idx] + update = entry * gradient + alpha * weights + gradient_correction = update - gradient_memory[idx] + sum_gradient += gradient_correction + gradient_memory[idx] = update + if saga: + weights -= gradient_correction * step_size * (1 - 1.0 / len(seen)) + + if fit_intercept: + gradient_correction = gradient - intercept_gradient_memory[idx] + intercept_gradient_memory[idx] = gradient + intercept_sum_gradient += gradient_correction + gradient_correction *= step_size * (1.0 - 1.0 / len(seen)) + if saga: + intercept -= ( + step_size * intercept_sum_gradient / len(seen) * decay + ) + gradient_correction + else: + intercept -= step_size * intercept_sum_gradient / len(seen) * decay + + weights -= step_size * sum_gradient / len(seen) + + return weights, intercept + + +def sag_sparse( + X, + y, + step_size, + alpha, + n_iter=1, + dloss=None, + sample_weight=None, + sparse=False, + fit_intercept=True, + saga=False, + random_state=0, +): + if step_size * alpha == 1.0: + raise ZeroDivisionError( + "Sparse sag does not handle the case step_size * alpha == 1" + ) + n_samples, n_features = X.shape[0], X.shape[1] + + weights = np.zeros(n_features) + sum_gradient = np.zeros(n_features) + last_updated = np.zeros(n_features, dtype=int) + gradient_memory = np.zeros(n_samples) + rng = check_random_state(random_state) + intercept = 0.0 + intercept_sum_gradient = 0.0 + wscale = 1.0 + decay = 1.0 + seen = set() + + c_sum = np.zeros(n_iter * n_samples) + + # sparse data has a fixed decay of .01 + if sparse: + decay = 0.01 + + counter = 0 + for epoch in range(n_iter): + for k in range(n_samples): + # idx = k + idx = int(rng.rand() * n_samples) + entry = X[idx] + seen.add(idx) + + if counter >= 1: + for j in range(n_features): + if last_updated[j] == 0: + weights[j] -= c_sum[counter - 1] * sum_gradient[j] + else: + weights[j] -= ( + c_sum[counter - 1] - c_sum[last_updated[j] - 1] + ) * sum_gradient[j] + last_updated[j] = counter + + p = (wscale * np.dot(entry, weights)) + intercept + gradient = dloss(p, y[idx]) + + if sample_weight is not None: + gradient *= sample_weight[idx] + + update = entry * gradient + gradient_correction = update - (gradient_memory[idx] * entry) + sum_gradient += gradient_correction + if saga: + for j in range(n_features): + weights[j] -= ( + gradient_correction[j] + * step_size + * (1 - 1.0 / len(seen)) + / wscale + ) + + if fit_intercept: + gradient_correction = gradient - gradient_memory[idx] + intercept_sum_gradient += gradient_correction + gradient_correction *= step_size * (1.0 - 1.0 / len(seen)) + if saga: + intercept -= ( + step_size * intercept_sum_gradient / len(seen) * decay + ) + gradient_correction + else: + intercept -= step_size * intercept_sum_gradient / len(seen) * decay + + gradient_memory[idx] = gradient + + wscale *= 1.0 - alpha * step_size + if counter == 0: + c_sum[0] = step_size / (wscale * len(seen)) + else: + c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen)) + + if counter >= 1 and wscale < 1e-9: + for j in range(n_features): + if last_updated[j] == 0: + weights[j] -= c_sum[counter] * sum_gradient[j] + else: + weights[j] -= ( + c_sum[counter] - c_sum[last_updated[j] - 1] + ) * sum_gradient[j] + last_updated[j] = counter + 1 + c_sum[counter] = 0 + weights *= wscale + wscale = 1.0 + + counter += 1 + + for j in range(n_features): + if last_updated[j] == 0: + weights[j] -= c_sum[counter - 1] * sum_gradient[j] + else: + weights[j] -= ( + c_sum[counter - 1] - c_sum[last_updated[j] - 1] + ) * sum_gradient[j] + weights *= wscale + return weights, intercept + + +def get_step_size(X, alpha, fit_intercept, classification=True): + if classification: + return 4.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + 4.0 * alpha) + else: + return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha) + + +def test_classifier_matching(): + n_samples = 20 + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1) + y[y == 0] = -1 + alpha = 1.1 + fit_intercept = True + step_size = get_step_size(X, alpha, fit_intercept) + for solver in ["sag", "saga"]: + if solver == "sag": + n_iter = 80 + else: + # SAGA variance w.r.t. stream order is higher + n_iter = 300 + clf = LogisticRegression( + solver=solver, + fit_intercept=fit_intercept, + tol=1e-11, + C=1.0 / alpha / n_samples, + max_iter=n_iter, + random_state=10, + multi_class="ovr", + ) + clf.fit(X, y) + + weights, intercept = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + fit_intercept=fit_intercept, + saga=solver == "saga", + ) + weights2, intercept2 = sag( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + fit_intercept=fit_intercept, + saga=solver == "saga", + ) + weights = np.atleast_2d(weights) + intercept = np.atleast_1d(intercept) + weights2 = np.atleast_2d(weights2) + intercept2 = np.atleast_1d(intercept2) + + assert_array_almost_equal(weights, clf.coef_, decimal=9) + assert_array_almost_equal(intercept, clf.intercept_, decimal=9) + assert_array_almost_equal(weights2, clf.coef_, decimal=9) + assert_array_almost_equal(intercept2, clf.intercept_, decimal=9) + + +def test_regressor_matching(): + n_samples = 10 + n_features = 5 + + rng = np.random.RandomState(10) + X = rng.normal(size=(n_samples, n_features)) + true_w = rng.normal(size=n_features) + y = X.dot(true_w) + + alpha = 1.0 + n_iter = 100 + fit_intercept = True + + step_size = get_step_size(X, alpha, fit_intercept, classification=False) + clf = Ridge( + fit_intercept=fit_intercept, + tol=0.00000000001, + solver="sag", + alpha=alpha * n_samples, + max_iter=n_iter, + ) + clf.fit(X, y) + + weights1, intercept1 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=squared_dloss, + fit_intercept=fit_intercept, + ) + weights2, intercept2 = sag( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=squared_dloss, + fit_intercept=fit_intercept, + ) + + assert_allclose(weights1, clf.coef_) + assert_allclose(intercept1, clf.intercept_) + assert_allclose(weights2, clf.coef_) + assert_allclose(intercept2, clf.intercept_) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_pobj_matches_logistic_regression(csr_container): + """tests if the sag pobj matches log reg""" + n_samples = 100 + alpha = 1.0 + max_iter = 20 + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1) + + clf1 = LogisticRegression( + solver="sag", + fit_intercept=False, + tol=0.0000001, + C=1.0 / alpha / n_samples, + max_iter=max_iter, + random_state=10, + multi_class="ovr", + ) + clf2 = clone(clf1) + clf3 = LogisticRegression( + fit_intercept=False, + tol=0.0000001, + C=1.0 / alpha / n_samples, + max_iter=max_iter, + random_state=10, + multi_class="ovr", + ) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + clf3.fit(X, y) + + pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss) + pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss) + pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss) + + assert_array_almost_equal(pobj1, pobj2, decimal=4) + assert_array_almost_equal(pobj2, pobj3, decimal=4) + assert_array_almost_equal(pobj3, pobj1, decimal=4) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_pobj_matches_ridge_regression(csr_container): + """tests if the sag pobj matches ridge reg""" + n_samples = 100 + n_features = 10 + alpha = 1.0 + n_iter = 100 + fit_intercept = False + rng = np.random.RandomState(10) + X = rng.normal(size=(n_samples, n_features)) + true_w = rng.normal(size=n_features) + y = X.dot(true_w) + + clf1 = Ridge( + fit_intercept=fit_intercept, + tol=0.00000000001, + solver="sag", + alpha=alpha, + max_iter=n_iter, + random_state=42, + ) + clf2 = clone(clf1) + clf3 = Ridge( + fit_intercept=fit_intercept, + tol=0.00001, + solver="lsqr", + alpha=alpha, + max_iter=n_iter, + random_state=42, + ) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + clf3.fit(X, y) + + pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss) + pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss) + pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss) + + assert_array_almost_equal(pobj1, pobj2, decimal=4) + assert_array_almost_equal(pobj1, pobj3, decimal=4) + assert_array_almost_equal(pobj3, pobj2, decimal=4) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_regressor_computed_correctly(csr_container): + """tests if the sag regressor is computed correctly""" + alpha = 0.1 + n_features = 10 + n_samples = 40 + max_iter = 100 + tol = 0.000001 + fit_intercept = True + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + w = rng.normal(size=n_features) + y = np.dot(X, w) + 2.0 + step_size = get_step_size(X, alpha, fit_intercept, classification=False) + + clf1 = Ridge( + fit_intercept=fit_intercept, + tol=tol, + solver="sag", + alpha=alpha * n_samples, + max_iter=max_iter, + random_state=rng, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + spweights1, spintercept1 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=max_iter, + dloss=squared_dloss, + fit_intercept=fit_intercept, + random_state=rng, + ) + + spweights2, spintercept2 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=max_iter, + dloss=squared_dloss, + sparse=True, + fit_intercept=fit_intercept, + random_state=rng, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), spweights1.ravel(), decimal=3) + assert_almost_equal(clf1.intercept_, spintercept1, decimal=1) + + # TODO: uncomment when sparse Ridge with intercept will be fixed (#4710) + # assert_array_almost_equal(clf2.coef_.ravel(), + # spweights2.ravel(), + # decimal=3) + # assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)''' + + +def test_get_auto_step_size(): + X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64) + alpha = 1.2 + fit_intercept = False + # sum the squares of the second sample because that's the largest + max_squared_sum = 4 + 9 + 16 + max_squared_sum_ = row_norms(X, squared=True).max() + n_samples = X.shape[0] + assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4) + + for saga in [True, False]: + for fit_intercept in (True, False): + if saga: + L_sqr = max_squared_sum + alpha + int(fit_intercept) + L_log = (max_squared_sum + 4.0 * alpha + int(fit_intercept)) / 4.0 + mun_sqr = min(2 * n_samples * alpha, L_sqr) + mun_log = min(2 * n_samples * alpha, L_log) + step_size_sqr = 1 / (2 * L_sqr + mun_sqr) + step_size_log = 1 / (2 * L_log + mun_log) + else: + step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept)) + step_size_log = 4.0 / ( + max_squared_sum + 4.0 * alpha + int(fit_intercept) + ) + + step_size_sqr_ = get_auto_step_size( + max_squared_sum_, + alpha, + "squared", + fit_intercept, + n_samples=n_samples, + is_saga=saga, + ) + step_size_log_ = get_auto_step_size( + max_squared_sum_, + alpha, + "log", + fit_intercept, + n_samples=n_samples, + is_saga=saga, + ) + + assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4) + assert_almost_equal(step_size_log, step_size_log_, decimal=4) + + msg = "Unknown loss function for SAG solver, got wrong instead of" + with pytest.raises(ValueError, match=msg): + get_auto_step_size(max_squared_sum_, alpha, "wrong", fit_intercept) + + +@pytest.mark.parametrize("seed", range(3)) # locally tested with 1000 seeds +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_regressor(seed, csr_container): + """tests if the sag regressor performs well""" + xmin, xmax = -5, 5 + n_samples = 300 + tol = 0.001 + max_iter = 100 + alpha = 0.1 + rng = np.random.RandomState(seed) + X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) + + # simple linear function without noise + y = 0.5 * X.ravel() + + clf1 = Ridge( + tol=tol, + solver="sag", + max_iter=max_iter, + alpha=alpha * n_samples, + random_state=rng, + ) + clf2 = clone(clf1) + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + score1 = clf1.score(X, y) + score2 = clf2.score(X, y) + assert score1 > 0.98 + assert score2 > 0.98 + + # simple linear function with noise + y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() + + clf1 = Ridge(tol=tol, solver="sag", max_iter=max_iter, alpha=alpha * n_samples) + clf2 = clone(clf1) + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + score1 = clf1.score(X, y) + score2 = clf2.score(X, y) + assert score1 > 0.45 + assert score2 > 0.45 + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_classifier_computed_correctly(csr_container): + """tests if the binary classifier is computed correctly""" + alpha = 0.1 + n_samples = 50 + n_iter = 50 + tol = 0.00001 + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + y_tmp = np.ones(n_samples) + y_tmp[y != classes[1]] = -1 + y = y_tmp + + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=n_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + multi_class="ovr", + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + spweights, spintercept = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + fit_intercept=fit_intercept, + ) + spweights2, spintercept2 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + sparse=True, + fit_intercept=fit_intercept, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2) + assert_almost_equal(clf1.intercept_, spintercept, decimal=1) + + assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2) + assert_almost_equal(clf2.intercept_, spintercept2, decimal=1) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_multiclass_computed_correctly(csr_container): + """tests if the multiclass classifier is computed correctly""" + alpha = 0.1 + n_samples = 20 + tol = 0.00001 + max_iter = 40 + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=max_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + multi_class="ovr", + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + coef1 = [] + intercept1 = [] + coef2 = [] + intercept2 = [] + for cl in classes: + y_encoded = np.ones(n_samples) + y_encoded[y != cl] = -1 + + spweights1, spintercept1 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + dloss=log_dloss, + n_iter=max_iter, + fit_intercept=fit_intercept, + ) + spweights2, spintercept2 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + dloss=log_dloss, + n_iter=max_iter, + sparse=True, + fit_intercept=fit_intercept, + ) + coef1.append(spweights1) + intercept1.append(spintercept1) + + coef2.append(spweights2) + intercept2.append(spintercept2) + + coef1 = np.vstack(coef1) + intercept1 = np.array(intercept1) + coef2 = np.vstack(coef2) + intercept2 = np.array(intercept2) + + for i, cl in enumerate(classes): + assert_array_almost_equal(clf1.coef_[i].ravel(), coef1[i].ravel(), decimal=2) + assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1) + + assert_array_almost_equal(clf2.coef_[i].ravel(), coef2[i].ravel(), decimal=2) + assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_classifier_results(csr_container): + """tests if classifier results match target""" + alpha = 0.1 + n_features = 20 + n_samples = 10 + tol = 0.01 + max_iter = 200 + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + w = rng.normal(size=n_features) + y = np.dot(X, w) + y = np.sign(y) + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=max_iter, + tol=tol, + random_state=77, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + pred1 = clf1.predict(X) + pred2 = clf2.predict(X) + assert_almost_equal(pred1, y, decimal=12) + assert_almost_equal(pred2, y, decimal=12) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_binary_classifier_class_weight(csr_container): + """tests binary classifier with classweights for each class""" + alpha = 0.1 + n_samples = 50 + n_iter = 20 + tol = 0.00001 + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + y_tmp = np.ones(n_samples) + y_tmp[y != classes[1]] = -1 + y = y_tmp + + class_weight = {1: 0.45, -1: 0.55} + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=n_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + multi_class="ovr", + class_weight=class_weight, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + le = LabelEncoder() + class_weight_ = compute_class_weight(class_weight, classes=np.unique(y), y=y) + sample_weight = class_weight_[le.fit_transform(y)] + spweights, spintercept = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + sample_weight=sample_weight, + fit_intercept=fit_intercept, + ) + spweights2, spintercept2 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + sparse=True, + sample_weight=sample_weight, + fit_intercept=fit_intercept, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2) + assert_almost_equal(clf1.intercept_, spintercept, decimal=1) + + assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2) + assert_almost_equal(clf2.intercept_, spintercept2, decimal=1) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_multiclass_classifier_class_weight(csr_container): + """tests multiclass with classweights for each class""" + alpha = 0.1 + n_samples = 20 + tol = 0.00001 + max_iter = 50 + class_weight = {0: 0.45, 1: 0.55, 2: 0.75} + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=max_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + multi_class="ovr", + class_weight=class_weight, + ) + clf2 = clone(clf1) + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + le = LabelEncoder() + class_weight_ = compute_class_weight(class_weight, classes=np.unique(y), y=y) + sample_weight = class_weight_[le.fit_transform(y)] + + coef1 = [] + intercept1 = [] + coef2 = [] + intercept2 = [] + for cl in classes: + y_encoded = np.ones(n_samples) + y_encoded[y != cl] = -1 + + spweights1, spintercept1 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + n_iter=max_iter, + dloss=log_dloss, + sample_weight=sample_weight, + ) + spweights2, spintercept2 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + n_iter=max_iter, + dloss=log_dloss, + sample_weight=sample_weight, + sparse=True, + ) + coef1.append(spweights1) + intercept1.append(spintercept1) + coef2.append(spweights2) + intercept2.append(spintercept2) + + coef1 = np.vstack(coef1) + intercept1 = np.array(intercept1) + coef2 = np.vstack(coef2) + intercept2 = np.array(intercept2) + + for i, cl in enumerate(classes): + assert_array_almost_equal(clf1.coef_[i].ravel(), coef1[i].ravel(), decimal=2) + assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1) + + assert_array_almost_equal(clf2.coef_[i].ravel(), coef2[i].ravel(), decimal=2) + assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1) + + +def test_classifier_single_class(): + """tests if ValueError is thrown with only one class""" + X = [[1, 2], [3, 4]] + y = [1, 1] + + msg = "This solver needs samples of at least 2 classes in the data" + with pytest.raises(ValueError, match=msg): + LogisticRegression(solver="sag").fit(X, y) + + +def test_step_size_alpha_error(): + X = [[0, 0], [0, 0]] + y = [1, -1] + fit_intercept = False + alpha = 1.0 + msg = re.escape( + "Current sag implementation does not handle the case" + " step_size * alpha_scaled == 1" + ) + + clf1 = LogisticRegression(solver="sag", C=1.0 / alpha, fit_intercept=fit_intercept) + with pytest.raises(ZeroDivisionError, match=msg): + clf1.fit(X, y) + + clf2 = Ridge(fit_intercept=fit_intercept, solver="sag", alpha=alpha) + with pytest.raises(ZeroDivisionError, match=msg): + clf2.fit(X, y) + + +def test_multinomial_loss(): + # test if the multinomial loss and gradient computations are consistent + X, y = iris.data, iris.target.astype(np.float64) + n_samples, n_features = X.shape + n_classes = len(np.unique(y)) + + rng = check_random_state(42) + weights = rng.randn(n_features, n_classes) + intercept = rng.randn(n_classes) + sample_weights = np.abs(rng.randn(n_samples)) + + # compute loss and gradient like in multinomial SAG + dataset, _ = make_dataset(X, y, sample_weights, random_state=42) + loss_1, grad_1 = _multinomial_grad_loss_all_samples( + dataset, weights, intercept, n_samples, n_features, n_classes + ) + # compute loss and gradient like in multinomial LogisticRegression + loss = LinearModelLoss( + base_loss=HalfMultinomialLoss(n_classes=n_classes), + fit_intercept=True, + ) + weights_intercept = np.vstack((weights, intercept)).T + loss_2, grad_2 = loss.loss_gradient( + weights_intercept, X, y, l2_reg_strength=0.0, sample_weight=sample_weights + ) + grad_2 = grad_2[:, :-1].T + # convert to same convention, i.e. LinearModelLoss uses average(loss, weight=sw) + loss_2 *= np.sum(sample_weights) + grad_2 *= np.sum(sample_weights) + + # comparison + assert_array_almost_equal(grad_1, grad_2) + assert_almost_equal(loss_1, loss_2) + + +def test_multinomial_loss_ground_truth(): + # n_samples, n_features, n_classes = 4, 2, 3 + n_classes = 3 + X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]]) + y = np.array([0, 1, 2, 0], dtype=np.float64) + lbin = LabelBinarizer() + Y_bin = lbin.fit_transform(y) + + weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]]) + intercept = np.array([1.0, 0, -0.2]) + sample_weights = np.array([0.8, 1, 1, 0.8]) + + prediction = np.dot(X, weights) + intercept + logsumexp_prediction = logsumexp(prediction, axis=1) + p = prediction - logsumexp_prediction[:, np.newaxis] + loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum() + diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin) + grad_1 = np.dot(X.T, diff) + + loss = LinearModelLoss( + base_loss=HalfMultinomialLoss(n_classes=n_classes), + fit_intercept=True, + ) + weights_intercept = np.vstack((weights, intercept)).T + loss_2, grad_2 = loss.loss_gradient( + weights_intercept, X, y, l2_reg_strength=0.0, sample_weight=sample_weights + ) + grad_2 = grad_2[:, :-1].T + # convert to same convention, i.e. LinearModelLoss uses average(loss, weight=sw) + loss_2 *= np.sum(sample_weights) + grad_2 *= np.sum(sample_weights) + + assert_almost_equal(loss_1, loss_2) + assert_array_almost_equal(grad_1, grad_2) + + # ground truth + loss_gt = 11.680360354325961 + grad_gt = np.array( + [[-0.557487, -1.619151, +2.176638], [-0.903942, +5.258745, -4.354803]] + ) + assert_almost_equal(loss_1, loss_gt) + assert_array_almost_equal(grad_1, grad_gt) + + +@pytest.mark.parametrize("solver", ["sag", "saga"]) +def test_sag_classifier_raises_error(solver): + # Following #13316, the error handling behavior changed in cython sag. This + # is simply a non-regression test to make sure numerical errors are + # properly raised. + + # Train a classifier on a simple problem + rng = np.random.RandomState(42) + X, y = make_classification(random_state=rng) + clf = LogisticRegression(solver=solver, random_state=rng, warm_start=True) + clf.fit(X, y) + + # Trigger a numerical error by: + # - corrupting the fitted coefficients of the classifier + # - fit it again starting from its current state thanks to warm_start + clf.coef_[:] = np.nan + + with pytest.raises(ValueError, match="Floating-point under-/overflow"): + clf.fit(X, y) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sparse_coordinate_descent.py b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sparse_coordinate_descent.py new file mode 100644 index 0000000000000000000000000000000000000000..1aab9babeeb40fcc3eac3f443c1ba7a9e1bdf9d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sparse_coordinate_descent.py @@ -0,0 +1,384 @@ +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_allclose + +from sklearn.datasets import make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ElasticNet, ElasticNetCV, Lasso, LassoCV +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + create_memmap_backed_data, + ignore_warnings, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, LIL_CONTAINERS + + +def test_sparse_coef(): + # Check that the sparse_coef property works + clf = ElasticNet() + clf.coef_ = [1, 2, 3] + + assert sp.issparse(clf.sparse_coef_) + assert clf.sparse_coef_.toarray().tolist()[0] == clf.coef_ + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_lasso_zero(csc_container): + # Check that the sparse lasso can handle zero data without crashing + X = csc_container((3, 1)) + y = [0, 0, 0] + T = np.array([[1], [2], [3]]) + clf = Lasso().fit(X, y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0]) + assert_array_almost_equal(pred, [0, 0, 0]) + assert_almost_equal(clf.dual_gap_, 0) + + +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_enet_toy_list_input(with_sample_weight, csc_container): + # Test ElasticNet for various values of alpha and l1_ratio with list X + + X = np.array([[-1], [0], [1]]) + X = csc_container(X) + Y = [-1, 0, 1] # just a straight line + T = np.array([[2], [3], [4]]) # test sample + if with_sample_weight: + sw = np.array([2.0, 2, 2]) + else: + sw = None + + # this should be the same as unregularized least squares + clf = ElasticNet(alpha=0, l1_ratio=1.0) + # catch warning about alpha=0. + # this is discouraged but should work. + ignore_warnings(clf.fit)(X, Y, sample_weight=sw) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.3) + clf.fit(X, Y, sample_weight=sw) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.5) + clf.fit(X, Y, sample_weight=sw) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.45454], 3) + assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) + assert_almost_equal(clf.dual_gap_, 0) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_enet_toy_explicit_sparse_input(lil_container): + # Test ElasticNet for various values of alpha and l1_ratio with sparse X + f = ignore_warnings + # training samples + X = lil_container((3, 1)) + X[0, 0] = -1 + # X[1, 0] = 0 + X[2, 0] = 1 + Y = [-1, 0, 1] # just a straight line (the identity function) + + # test samples + T = lil_container((3, 1)) + T[0, 0] = 2 + T[1, 0] = 3 + T[2, 0] = 4 + + # this should be the same as lasso + clf = ElasticNet(alpha=0, l1_ratio=1.0) + f(clf.fit)(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.3) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.45454], 3) + assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) + assert_almost_equal(clf.dual_gap_, 0) + + +def make_sparse_data( + sparse_container, + n_samples=100, + n_features=100, + n_informative=10, + seed=42, + positive=False, + n_targets=1, +): + random_state = np.random.RandomState(seed) + + # build an ill-posed linear regression problem with many noisy features and + # comparatively few samples + + # generate a ground truth model + w = random_state.randn(n_features, n_targets) + w[n_informative:] = 0.0 # only the top features are impacting the model + if positive: + w = np.abs(w) + + X = random_state.randn(n_samples, n_features) + rnd = random_state.uniform(size=(n_samples, n_features)) + X[rnd > 0.5] = 0.0 # 50% of zeros in input signal + + # generate training ground truth labels + y = np.dot(X, w) + X = sparse_container(X) + if n_targets == 1: + y = np.ravel(y) + return X, y + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +@pytest.mark.parametrize( + "alpha, fit_intercept, positive", + [(0.1, False, False), (0.1, True, False), (1e-3, False, True), (1e-3, True, True)], +) +def test_sparse_enet_not_as_toy_dataset(csc_container, alpha, fit_intercept, positive): + n_samples, n_features, max_iter = 100, 100, 1000 + n_informative = 10 + + X, y = make_sparse_data( + csc_container, n_samples, n_features, n_informative, positive=positive + ) + + X_train, X_test = X[n_samples // 2 :], X[: n_samples // 2] + y_train, y_test = y[n_samples // 2 :], y[: n_samples // 2] + + s_clf = ElasticNet( + alpha=alpha, + l1_ratio=0.8, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=1e-7, + positive=positive, + warm_start=True, + ) + s_clf.fit(X_train, y_train) + + assert_almost_equal(s_clf.dual_gap_, 0, 4) + assert s_clf.score(X_test, y_test) > 0.85 + + # check the convergence is the same as the dense version + d_clf = ElasticNet( + alpha=alpha, + l1_ratio=0.8, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=1e-7, + positive=positive, + warm_start=True, + ) + d_clf.fit(X_train.toarray(), y_train) + + assert_almost_equal(d_clf.dual_gap_, 0, 4) + assert d_clf.score(X_test, y_test) > 0.85 + + assert_almost_equal(s_clf.coef_, d_clf.coef_, 5) + assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5) + + # check that the coefs are sparse + assert np.sum(s_clf.coef_ != 0.0) < 2 * n_informative + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_lasso_not_as_toy_dataset(csc_container): + n_samples = 100 + max_iter = 1000 + n_informative = 10 + X, y = make_sparse_data( + csc_container, n_samples=n_samples, n_informative=n_informative + ) + + X_train, X_test = X[n_samples // 2 :], X[: n_samples // 2] + y_train, y_test = y[n_samples // 2 :], y[: n_samples // 2] + + s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7) + s_clf.fit(X_train, y_train) + assert_almost_equal(s_clf.dual_gap_, 0, 4) + assert s_clf.score(X_test, y_test) > 0.85 + + # check the convergence is the same as the dense version + d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7) + d_clf.fit(X_train.toarray(), y_train) + assert_almost_equal(d_clf.dual_gap_, 0, 4) + assert d_clf.score(X_test, y_test) > 0.85 + + # check that the coefs are sparse + assert np.sum(s_clf.coef_ != 0.0) == n_informative + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_enet_multitarget(csc_container): + n_targets = 3 + X, y = make_sparse_data(csc_container, n_targets=n_targets) + + estimator = ElasticNet(alpha=0.01, precompute=False) + # XXX: There is a bug when precompute is not False! + estimator.fit(X, y) + coef, intercept, dual_gap = ( + estimator.coef_, + estimator.intercept_, + estimator.dual_gap_, + ) + + for k in range(n_targets): + estimator.fit(X, y[:, k]) + assert_array_almost_equal(coef[k, :], estimator.coef_) + assert_array_almost_equal(intercept[k], estimator.intercept_) + assert_array_almost_equal(dual_gap[k], estimator.dual_gap_) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_path_parameters(csc_container): + X, y = make_sparse_data(csc_container) + max_iter = 50 + n_alphas = 10 + clf = ElasticNetCV( + n_alphas=n_alphas, + eps=1e-3, + max_iter=max_iter, + l1_ratio=0.5, + fit_intercept=False, + ) + ignore_warnings(clf.fit)(X, y) # new params + assert_almost_equal(0.5, clf.l1_ratio) + assert n_alphas == clf.n_alphas + assert n_alphas == len(clf.alphas_) + sparse_mse_path = clf.mse_path_ + ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data + assert_almost_equal(clf.mse_path_, sparse_mse_path) + + +@pytest.mark.parametrize("Model", [Lasso, ElasticNet, LassoCV, ElasticNetCV]) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("n_samples, n_features", [(24, 6), (6, 24)]) +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_dense_equality( + Model, fit_intercept, n_samples, n_features, with_sample_weight, csc_container +): + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + effective_rank=n_features // 2, + n_informative=n_features // 2, + bias=4 * fit_intercept, + noise=1, + random_state=42, + ) + if with_sample_weight: + sw = np.abs(np.random.RandomState(42).normal(scale=10, size=y.shape)) + else: + sw = None + Xs = csc_container(X) + params = {"fit_intercept": fit_intercept} + reg_dense = Model(**params).fit(X, y, sample_weight=sw) + reg_sparse = Model(**params).fit(Xs, y, sample_weight=sw) + if fit_intercept: + assert reg_sparse.intercept_ == pytest.approx(reg_dense.intercept_) + # balance property + assert np.average(reg_sparse.predict(X), weights=sw) == pytest.approx( + np.average(y, weights=sw) + ) + assert_allclose(reg_sparse.coef_, reg_dense.coef_) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_same_output_sparse_dense_lasso_and_enet_cv(csc_container): + X, y = make_sparse_data(csc_container, n_samples=40, n_features=10) + clfs = ElasticNetCV(max_iter=100) + clfs.fit(X, y) + clfd = ElasticNetCV(max_iter=100) + clfd.fit(X.toarray(), y) + assert_almost_equal(clfs.alpha_, clfd.alpha_, 7) + assert_almost_equal(clfs.intercept_, clfd.intercept_, 7) + assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_) + assert_array_almost_equal(clfs.alphas_, clfd.alphas_) + + clfs = LassoCV(max_iter=100, cv=4) + clfs.fit(X, y) + clfd = LassoCV(max_iter=100, cv=4) + clfd.fit(X.toarray(), y) + assert_almost_equal(clfs.alpha_, clfd.alpha_, 7) + assert_almost_equal(clfs.intercept_, clfd.intercept_, 7) + assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_) + assert_array_almost_equal(clfs.alphas_, clfd.alphas_) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_same_multiple_output_sparse_dense(coo_container): + l = ElasticNet() + X = [ + [0, 1, 2, 3, 4], + [0, 2, 5, 8, 11], + [9, 10, 11, 12, 13], + [10, 11, 12, 13, 14], + ] + y = [ + [1, 2, 3, 4, 5], + [1, 3, 6, 9, 12], + [10, 11, 12, 13, 14], + [11, 12, 13, 14, 15], + ] + l.fit(X, y) + sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1) + predict_dense = l.predict(sample) + + l_sp = ElasticNet() + X_sp = coo_container(X) + l_sp.fit(X_sp, y) + sample_sparse = coo_container(sample) + predict_sparse = l_sp.predict(sample_sparse) + + assert_array_almost_equal(predict_sparse, predict_dense) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_enet_coordinate_descent(csc_container): + """Test that a warning is issued if model does not converge""" + clf = Lasso(max_iter=2) + n_samples = 5 + n_features = 2 + X = csc_container((n_samples, n_features)) * 1e50 + y = np.ones(n_samples) + warning_message = ( + "Objective did not converge. You might want " + "to increase the number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + clf.fit(X, y) + + +@pytest.mark.parametrize("copy_X", (True, False)) +def test_sparse_read_only_buffer(copy_X): + """Test that sparse coordinate descent works for read-only buffers""" + rng = np.random.RandomState(0) + + clf = ElasticNet(alpha=0.1, copy_X=copy_X, random_state=rng) + X = sp.random(100, 20, format="csc", random_state=rng) + + # Make X.data read-only + X.data = create_memmap_backed_data(X.data) + + y = rng.rand(100) + clf.fit(X, y) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py new file mode 100644 index 0000000000000000000000000000000000000000..e330cd3960aebeb3078822432aa11a3826d30387 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py @@ -0,0 +1,456 @@ +from functools import partial + +import numpy as np + +from sklearn.base import ( + BaseEstimator, + ClassifierMixin, + MetaEstimatorMixin, + RegressorMixin, + TransformerMixin, + clone, +) +from sklearn.metrics._scorer import _Scorer, mean_squared_error +from sklearn.model_selection import BaseCrossValidator +from sklearn.model_selection._split import GroupsConsumerMixin +from sklearn.utils._metadata_requests import ( + SIMPLE_METHODS, +) +from sklearn.utils.metadata_routing import ( + MetadataRouter, + process_routing, +) +from sklearn.utils.multiclass import _check_partial_fit_first_call + + +def record_metadata(obj, method, record_default=True, **kwargs): + """Utility function to store passed metadata to a method. + + If record_default is False, kwargs whose values are "default" are skipped. + This is so that checks on keyword arguments whose default was not changed + are skipped. + + """ + if not hasattr(obj, "_records"): + obj._records = {} + if not record_default: + kwargs = { + key: val + for key, val in kwargs.items() + if not isinstance(val, str) or (val != "default") + } + obj._records[method] = kwargs + + +def check_recorded_metadata(obj, method, split_params=tuple(), **kwargs): + """Check whether the expected metadata is passed to the object's method. + + Parameters + ---------- + obj : estimator object + sub-estimator to check routed params for + method : str + sub-estimator's method where metadata is routed to + split_params : tuple, default=empty + specifies any parameters which are to be checked as being a subset + of the original values. + """ + records = getattr(obj, "_records", dict()).get(method, dict()) + assert set(kwargs.keys()) == set(records.keys()) + for key, value in kwargs.items(): + recorded_value = records[key] + # The following condition is used to check for any specified parameters + # being a subset of the original values + if key in split_params and recorded_value is not None: + assert np.isin(recorded_value, value).all() + else: + assert recorded_value is value + + +record_metadata_not_default = partial(record_metadata, record_default=False) + + +def assert_request_is_empty(metadata_request, exclude=None): + """Check if a metadata request dict is empty. + + One can exclude a method or a list of methods from the check using the + ``exclude`` parameter. If metadata_request is a MetadataRouter, then + ``exclude`` can be of the form ``{"object" : [method, ...]}``. + """ + if isinstance(metadata_request, MetadataRouter): + for name, route_mapping in metadata_request: + if exclude is not None and name in exclude: + _exclude = exclude[name] + else: + _exclude = None + assert_request_is_empty(route_mapping.router, exclude=_exclude) + return + + exclude = [] if exclude is None else exclude + for method in SIMPLE_METHODS: + if method in exclude: + continue + mmr = getattr(metadata_request, method) + props = [ + prop + for prop, alias in mmr.requests.items() + if isinstance(alias, str) or alias is not None + ] + assert not props + + +def assert_request_equal(request, dictionary): + for method, requests in dictionary.items(): + mmr = getattr(request, method) + assert mmr.requests == requests + + empty_methods = [method for method in SIMPLE_METHODS if method not in dictionary] + for method in empty_methods: + assert not len(getattr(request, method).requests) + + +class _Registry(list): + # This list is used to get a reference to the sub-estimators, which are not + # necessarily stored on the metaestimator. We need to override __deepcopy__ + # because the sub-estimators are probably cloned, which would result in a + # new copy of the list, but we need copy and deep copy both to return the + # same instance. + def __deepcopy__(self, memo): + return self + + def __copy__(self): + return self + + +class ConsumingRegressor(RegressorMixin, BaseEstimator): + """A regressor consuming metadata. + + Parameters + ---------- + registry : list, default=None + If a list, the estimator will append itself to the list in order to have + a reference to the estimator later on. Since that reference is not + required in all tests, registration can be skipped by leaving this value + as None. + """ + + def __init__(self, registry=None): + self.registry = registry + + def partial_fit(self, X, y, sample_weight="default", metadata="default"): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "partial_fit", sample_weight=sample_weight, metadata=metadata + ) + return self + + def fit(self, X, y, sample_weight="default", metadata="default"): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "fit", sample_weight=sample_weight, metadata=metadata + ) + return self + + def predict(self, X, sample_weight="default", metadata="default"): + pass # pragma: no cover + + # when needed, uncomment the implementation + # record_metadata_not_default( + # self, "predict", sample_weight=sample_weight, metadata=metadata + # ) + # return np.zeros(shape=(len(X),)) + + +class NonConsumingClassifier(ClassifierMixin, BaseEstimator): + """A classifier which accepts no metadata on any method.""" + + def __init__(self, alpha=0.0): + self.alpha = alpha + + def fit(self, X, y): + self.classes_ = np.unique(y) + return self + + def partial_fit(self, X, y, classes=None): + return self + + def decision_function(self, X): + return self.predict(X) + + def predict(self, X): + return np.ones(len(X)) + + +class NonConsumingRegressor(RegressorMixin, BaseEstimator): + """A classifier which accepts no metadata on any method.""" + + def fit(self, X, y): + return self + + def partial_fit(self, X, y): + return self + + def predict(self, X): + return np.ones(len(X)) # pragma: no cover + + +class ConsumingClassifier(ClassifierMixin, BaseEstimator): + """A classifier consuming metadata. + + Parameters + ---------- + registry : list, default=None + If a list, the estimator will append itself to the list in order to have + a reference to the estimator later on. Since that reference is not + required in all tests, registration can be skipped by leaving this value + as None. + + alpha : float, default=0 + This parameter is only used to test the ``*SearchCV`` objects, and + doesn't do anything. + """ + + def __init__(self, registry=None, alpha=0.0): + self.alpha = alpha + self.registry = registry + + def partial_fit( + self, X, y, classes=None, sample_weight="default", metadata="default" + ): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "partial_fit", sample_weight=sample_weight, metadata=metadata + ) + _check_partial_fit_first_call(self, classes) + return self + + def fit(self, X, y, sample_weight="default", metadata="default"): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "fit", sample_weight=sample_weight, metadata=metadata + ) + self.classes_ = np.unique(y) + return self + + def predict(self, X, sample_weight="default", metadata="default"): + record_metadata_not_default( + self, "predict", sample_weight=sample_weight, metadata=metadata + ) + return np.zeros(shape=(len(X),)) + + def predict_proba(self, X, sample_weight="default", metadata="default"): + pass # pragma: no cover + + # uncomment when needed + # record_metadata_not_default( + # self, "predict_proba", sample_weight=sample_weight, metadata=metadata + # ) + # return np.asarray([[0.0, 1.0]] * len(X)) + + def predict_log_proba(self, X, sample_weight="default", metadata="default"): + pass # pragma: no cover + + # uncomment when needed + # record_metadata_not_default( + # self, "predict_log_proba", sample_weight=sample_weight, metadata=metadata + # ) + # return np.zeros(shape=(len(X), 2)) + + def decision_function(self, X, sample_weight="default", metadata="default"): + record_metadata_not_default( + self, "predict_proba", sample_weight=sample_weight, metadata=metadata + ) + return np.zeros(shape=(len(X),)) + + +class ConsumingTransformer(TransformerMixin, BaseEstimator): + """A transformer which accepts metadata on fit and transform. + + Parameters + ---------- + registry : list, default=None + If a list, the estimator will append itself to the list in order to have + a reference to the estimator later on. Since that reference is not + required in all tests, registration can be skipped by leaving this value + as None. + """ + + def __init__(self, registry=None): + self.registry = registry + + def fit(self, X, y=None, sample_weight=None, metadata=None): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "fit", sample_weight=sample_weight, metadata=metadata + ) + return self + + def transform(self, X, sample_weight=None, metadata=None): + record_metadata( + self, "transform", sample_weight=sample_weight, metadata=metadata + ) + return X + + def fit_transform(self, X, y, sample_weight=None, metadata=None): + # implementing ``fit_transform`` is necessary since + # ``TransformerMixin.fit_transform`` doesn't route any metadata to + # ``transform``, while here we want ``transform`` to receive + # ``sample_weight`` and ``metadata``. + record_metadata( + self, "fit_transform", sample_weight=sample_weight, metadata=metadata + ) + return self.fit(X, y, sample_weight=sample_weight, metadata=metadata).transform( + X, sample_weight=sample_weight, metadata=metadata + ) + + def inverse_transform(self, X, sample_weight=None, metadata=None): + record_metadata( + self, "inverse_transform", sample_weight=sample_weight, metadata=metadata + ) + return X + + +class ConsumingScorer(_Scorer): + def __init__(self, registry=None): + super().__init__( + score_func=mean_squared_error, sign=1, kwargs={}, response_method="predict" + ) + self.registry = registry + + def _score(self, method_caller, clf, X, y, **kwargs): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default(self, "score", **kwargs) + + sample_weight = kwargs.get("sample_weight", None) + return super()._score(method_caller, clf, X, y, sample_weight=sample_weight) + + +class ConsumingSplitter(BaseCrossValidator, GroupsConsumerMixin): + def __init__(self, registry=None): + self.registry = registry + + def split(self, X, y=None, groups="default", metadata="default"): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default(self, "split", groups=groups, metadata=metadata) + + split_index = len(X) // 2 + train_indices = list(range(0, split_index)) + test_indices = list(range(split_index, len(X))) + yield test_indices, train_indices + yield train_indices, test_indices + + def get_n_splits(self, X=None, y=None, groups=None, metadata=None): + return 2 + + def _iter_test_indices(self, X=None, y=None, groups=None): + split_index = len(X) // 2 + train_indices = list(range(0, split_index)) + test_indices = list(range(split_index, len(X))) + yield test_indices + yield train_indices + + +class MetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator): + """A meta-regressor which is only a router.""" + + def __init__(self, estimator): + self.estimator = estimator + + def fit(self, X, y, **fit_params): + params = process_routing(self, "fit", **fit_params) + self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit) + + def get_metadata_routing(self): + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, method_mapping="one-to-one" + ) + return router + + +class WeightedMetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator): + """A meta-regressor which is also a consumer.""" + + def __init__(self, estimator, registry=None): + self.estimator = estimator + self.registry = registry + + def fit(self, X, y, sample_weight=None, **fit_params): + if self.registry is not None: + self.registry.append(self) + + record_metadata(self, "fit", sample_weight=sample_weight) + params = process_routing(self, "fit", sample_weight=sample_weight, **fit_params) + self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit) + return self + + def predict(self, X, **predict_params): + params = process_routing(self, "predict", **predict_params) + return self.estimator_.predict(X, **params.estimator.predict) + + def get_metadata_routing(self): + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add(estimator=self.estimator, method_mapping="one-to-one") + ) + return router + + +class WeightedMetaClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): + """A meta-estimator which also consumes sample_weight itself in ``fit``.""" + + def __init__(self, estimator, registry=None): + self.estimator = estimator + self.registry = registry + + def fit(self, X, y, sample_weight=None, **kwargs): + if self.registry is not None: + self.registry.append(self) + + record_metadata(self, "fit", sample_weight=sample_weight) + params = process_routing(self, "fit", sample_weight=sample_weight, **kwargs) + self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit) + return self + + def get_metadata_routing(self): + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add(estimator=self.estimator, method_mapping="fit") + ) + return router + + +class MetaTransformer(MetaEstimatorMixin, TransformerMixin, BaseEstimator): + """A simple meta-transformer.""" + + def __init__(self, transformer): + self.transformer = transformer + + def fit(self, X, y=None, **fit_params): + params = process_routing(self, "fit", **fit_params) + self.transformer_ = clone(self.transformer).fit(X, y, **params.transformer.fit) + return self + + def transform(self, X, y=None, **transform_params): + params = process_routing(self, "transform", **transform_params) + return self.transformer_.transform(X, **params.transformer.transform) + + def get_metadata_routing(self): + return MetadataRouter(owner=self.__class__.__name__).add( + transformer=self.transformer, method_mapping="one-to-one" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/random_seed.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/random_seed.py new file mode 100644 index 0000000000000000000000000000000000000000..0fffd57a1016d2a93abbf74579cba45c756686f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/random_seed.py @@ -0,0 +1,84 @@ +"""global_random_seed fixture + +The goal of this fixture is to prevent tests that use it to be sensitive +to a specific seed value while still being deterministic by default. + +See the documentation for the SKLEARN_TESTS_GLOBAL_RANDOM_SEED +variable for insrtuctions on how to use this fixture. + +https://scikit-learn.org/dev/computing/parallelism.html#sklearn-tests-global-random-seed +""" +from os import environ +from random import Random + +import pytest + + +# Passes the main worker's random seeds to workers +class XDistHooks: + def pytest_configure_node(self, node) -> None: + random_seeds = node.config.getoption("random_seeds") + node.workerinput["random_seeds"] = random_seeds + + +def pytest_configure(config): + if config.pluginmanager.hasplugin("xdist"): + config.pluginmanager.register(XDistHooks()) + + RANDOM_SEED_RANGE = list(range(100)) # All seeds in [0, 99] should be valid. + random_seed_var = environ.get("SKLEARN_TESTS_GLOBAL_RANDOM_SEED") + if hasattr(config, "workerinput") and "random_seeds" in config.workerinput: + # Set worker random seed from seed generated from main process + random_seeds = config.workerinput["random_seeds"] + elif random_seed_var is None: + # This is the way. + random_seeds = [42] + elif random_seed_var == "any": + # Pick-up one seed at random in the range of admissible random seeds. + random_seeds = [Random().choice(RANDOM_SEED_RANGE)] + elif random_seed_var == "all": + random_seeds = RANDOM_SEED_RANGE + else: + if "-" in random_seed_var: + start, stop = random_seed_var.split("-") + random_seeds = list(range(int(start), int(stop) + 1)) + else: + random_seeds = [int(random_seed_var)] + + if min(random_seeds) < 0 or max(random_seeds) > 99: + raise ValueError( + "The value(s) of the environment variable " + "SKLEARN_TESTS_GLOBAL_RANDOM_SEED must be in the range [0, 99] " + f"(or 'any' or 'all'), got: {random_seed_var}" + ) + config.option.random_seeds = random_seeds + + class GlobalRandomSeedPlugin: + @pytest.fixture(params=random_seeds) + def global_random_seed(self, request): + """Fixture to ask for a random yet controllable random seed. + + All tests that use this fixture accept the contract that they should + deterministically pass for any seed value from 0 to 99 included. + + See the documentation for the SKLEARN_TESTS_GLOBAL_RANDOM_SEED + variable for insrtuctions on how to use this fixture. + + https://scikit-learn.org/dev/computing/parallelism.html#sklearn-tests-global-random-seed + """ + yield request.param + + config.pluginmanager.register(GlobalRandomSeedPlugin()) + + +def pytest_report_header(config): + random_seed_var = environ.get("SKLEARN_TESTS_GLOBAL_RANDOM_SEED") + if random_seed_var == "any": + return [ + "To reproduce this test run, set the following environment variable:", + f' SKLEARN_TESTS_GLOBAL_RANDOM_SEED="{config.option.random_seeds[0]}"', + ( + "See: https://scikit-learn.org/dev/computing/parallelism.html" + "#sklearn-tests-global-random-seed" + ), + ] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_build.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_build.py new file mode 100644 index 0000000000000000000000000000000000000000..72cab1dfcb174b9c6189ae1a7f49452d96aceca6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_build.py @@ -0,0 +1,32 @@ +import os +import textwrap + +import pytest + +from sklearn import __version__ +from sklearn.utils._openmp_helpers import _openmp_parallelism_enabled + + +def test_openmp_parallelism_enabled(): + # Check that sklearn is built with OpenMP-based parallelism enabled. + # This test can be skipped by setting the environment variable + # ``SKLEARN_SKIP_OPENMP_TEST``. + if os.getenv("SKLEARN_SKIP_OPENMP_TEST"): + pytest.skip("test explicitly skipped (SKLEARN_SKIP_OPENMP_TEST)") + + base_url = "dev" if __version__.endswith(".dev0") else "stable" + err_msg = textwrap.dedent(""" + This test fails because scikit-learn has been built without OpenMP. + This is not recommended since some estimators will run in sequential + mode instead of leveraging thread-based parallelism. + + You can find instructions to build scikit-learn with OpenMP at this + address: + + https://scikit-learn.org/{}/developers/advanced_installation.html + + You can skip this test by setting the environment variable + SKLEARN_SKIP_OPENMP_TEST to any value. + """).format(base_url) + + assert _openmp_parallelism_enabled(), err_msg diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_calibration.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_calibration.py new file mode 100644 index 0000000000000000000000000000000000000000..e74ff76b48355c8d9967bc6bfdbcdf112d707034 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_calibration.py @@ -0,0 +1,1090 @@ +# Authors: Alexandre Gramfort +# License: BSD 3 clause + +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn.base import BaseEstimator, clone +from sklearn.calibration import ( + CalibratedClassifierCV, + CalibrationDisplay, + _CalibratedClassifier, + _sigmoid_calibration, + _SigmoidCalibration, + calibration_curve, +) +from sklearn.datasets import load_iris, make_blobs, make_classification +from sklearn.dummy import DummyClassifier +from sklearn.ensemble import ( + RandomForestClassifier, + VotingClassifier, +) +from sklearn.exceptions import NotFittedError +from sklearn.feature_extraction import DictVectorizer +from sklearn.impute import SimpleImputer +from sklearn.isotonic import IsotonicRegression +from sklearn.linear_model import LogisticRegression, SGDClassifier +from sklearn.metrics import brier_score_loss +from sklearn.model_selection import ( + KFold, + LeaveOneOut, + check_cv, + cross_val_predict, + cross_val_score, + train_test_split, +) +from sklearn.naive_bayes import MultinomialNB +from sklearn.pipeline import Pipeline, make_pipeline +from sklearn.preprocessing import LabelEncoder, StandardScaler +from sklearn.svm import LinearSVC +from sklearn.tree import DecisionTreeClassifier +from sklearn.utils._mocking import CheckingClassifier +from sklearn.utils._testing import ( + _convert_container, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.extmath import softmax +from sklearn.utils.fixes import CSR_CONTAINERS + +N_SAMPLES = 200 + + +@pytest.fixture(scope="module") +def data(): + X, y = make_classification(n_samples=N_SAMPLES, n_features=6, random_state=42) + return X, y + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +@pytest.mark.parametrize("method", ["sigmoid", "isotonic"]) +@pytest.mark.parametrize("ensemble", [True, False]) +def test_calibration(data, method, csr_container, ensemble): + # Test calibration objects with isotonic and sigmoid + n_samples = N_SAMPLES // 2 + X, y = data + sample_weight = np.random.RandomState(seed=42).uniform(size=y.size) + + X -= X.min() # MultinomialNB only allows positive X + + # split train and test + X_train, y_train, sw_train = X[:n_samples], y[:n_samples], sample_weight[:n_samples] + X_test, y_test = X[n_samples:], y[n_samples:] + + # Naive-Bayes + clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train) + prob_pos_clf = clf.predict_proba(X_test)[:, 1] + + cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble) + with pytest.raises(ValueError): + cal_clf.fit(X, y) + + # Naive Bayes with calibration + for this_X_train, this_X_test in [ + (X_train, X_test), + (csr_container(X_train), csr_container(X_test)), + ]: + cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble) + # Note that this fit overwrites the fit on the entire training + # set + cal_clf.fit(this_X_train, y_train, sample_weight=sw_train) + prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1] + + # Check that brier score has improved after calibration + assert brier_score_loss(y_test, prob_pos_clf) > brier_score_loss( + y_test, prob_pos_cal_clf + ) + + # Check invariance against relabeling [0, 1] -> [1, 2] + cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train) + prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1] + assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled) + + # Check invariance against relabeling [0, 1] -> [-1, 1] + cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train) + prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1] + assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled) + + # Check invariance against relabeling [0, 1] -> [1, 0] + cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train) + prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1] + if method == "sigmoid": + assert_array_almost_equal(prob_pos_cal_clf, 1 - prob_pos_cal_clf_relabeled) + else: + # Isotonic calibration is not invariant against relabeling + # but should improve in both cases + assert brier_score_loss(y_test, prob_pos_clf) > brier_score_loss( + (y_test + 1) % 2, prob_pos_cal_clf_relabeled + ) + + +def test_calibration_default_estimator(data): + # Check estimator default is LinearSVC + X, y = data + calib_clf = CalibratedClassifierCV(cv=2) + calib_clf.fit(X, y) + + base_est = calib_clf.calibrated_classifiers_[0].estimator + assert isinstance(base_est, LinearSVC) + + +@pytest.mark.parametrize("ensemble", [True, False]) +def test_calibration_cv_splitter(data, ensemble): + # Check when `cv` is a CV splitter + X, y = data + + splits = 5 + kfold = KFold(n_splits=splits) + calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble) + assert isinstance(calib_clf.cv, KFold) + assert calib_clf.cv.n_splits == splits + + calib_clf.fit(X, y) + expected_n_clf = splits if ensemble else 1 + assert len(calib_clf.calibrated_classifiers_) == expected_n_clf + + +@pytest.mark.parametrize("method", ["sigmoid", "isotonic"]) +@pytest.mark.parametrize("ensemble", [True, False]) +def test_sample_weight(data, method, ensemble): + n_samples = N_SAMPLES // 2 + X, y = data + + sample_weight = np.random.RandomState(seed=42).uniform(size=len(y)) + X_train, y_train, sw_train = X[:n_samples], y[:n_samples], sample_weight[:n_samples] + X_test = X[n_samples:] + + estimator = LinearSVC(dual="auto", random_state=42) + calibrated_clf = CalibratedClassifierCV(estimator, method=method, ensemble=ensemble) + calibrated_clf.fit(X_train, y_train, sample_weight=sw_train) + probs_with_sw = calibrated_clf.predict_proba(X_test) + + # As the weights are used for the calibration, they should still yield + # different predictions + calibrated_clf.fit(X_train, y_train) + probs_without_sw = calibrated_clf.predict_proba(X_test) + + diff = np.linalg.norm(probs_with_sw - probs_without_sw) + assert diff > 0.1 + + +@pytest.mark.parametrize("method", ["sigmoid", "isotonic"]) +@pytest.mark.parametrize("ensemble", [True, False]) +def test_parallel_execution(data, method, ensemble): + """Test parallel calibration""" + X, y = data + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) + + estimator = make_pipeline(StandardScaler(), LinearSVC(dual="auto", random_state=42)) + + cal_clf_parallel = CalibratedClassifierCV( + estimator, method=method, n_jobs=2, ensemble=ensemble + ) + cal_clf_parallel.fit(X_train, y_train) + probs_parallel = cal_clf_parallel.predict_proba(X_test) + + cal_clf_sequential = CalibratedClassifierCV( + estimator, method=method, n_jobs=1, ensemble=ensemble + ) + cal_clf_sequential.fit(X_train, y_train) + probs_sequential = cal_clf_sequential.predict_proba(X_test) + + assert_allclose(probs_parallel, probs_sequential) + + +@pytest.mark.parametrize("method", ["sigmoid", "isotonic"]) +@pytest.mark.parametrize("ensemble", [True, False]) +# increase the number of RNG seeds to assess the statistical stability of this +# test: +@pytest.mark.parametrize("seed", range(2)) +def test_calibration_multiclass(method, ensemble, seed): + def multiclass_brier(y_true, proba_pred, n_classes): + Y_onehot = np.eye(n_classes)[y_true] + return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0] + + # Test calibration for multiclass with classifier that implements + # only decision function. + clf = LinearSVC(dual="auto", random_state=7) + X, y = make_blobs( + n_samples=500, n_features=100, random_state=seed, centers=10, cluster_std=15.0 + ) + + # Use an unbalanced dataset by collapsing 8 clusters into one class + # to make the naive calibration based on a softmax more unlikely + # to work. + y[y > 2] = 2 + n_classes = np.unique(y).shape[0] + X_train, y_train = X[::2], y[::2] + X_test, y_test = X[1::2], y[1::2] + + clf.fit(X_train, y_train) + + cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble) + cal_clf.fit(X_train, y_train) + probas = cal_clf.predict_proba(X_test) + # Check probabilities sum to 1 + assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test))) + + # Check that the dataset is not too trivial, otherwise it's hard + # to get interesting calibration data during the internal + # cross-validation loop. + assert 0.65 < clf.score(X_test, y_test) < 0.95 + + # Check that the accuracy of the calibrated model is never degraded + # too much compared to the original classifier. + assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test) + + # Check that Brier loss of calibrated classifier is smaller than + # loss obtained by naively turning OvR decision function to + # probabilities via a softmax + uncalibrated_brier = multiclass_brier( + y_test, softmax(clf.decision_function(X_test)), n_classes=n_classes + ) + calibrated_brier = multiclass_brier(y_test, probas, n_classes=n_classes) + + assert calibrated_brier < 1.1 * uncalibrated_brier + + # Test that calibration of a multiclass classifier decreases log-loss + # for RandomForestClassifier + clf = RandomForestClassifier(n_estimators=30, random_state=42) + clf.fit(X_train, y_train) + clf_probs = clf.predict_proba(X_test) + uncalibrated_brier = multiclass_brier(y_test, clf_probs, n_classes=n_classes) + + cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble) + cal_clf.fit(X_train, y_train) + cal_clf_probs = cal_clf.predict_proba(X_test) + calibrated_brier = multiclass_brier(y_test, cal_clf_probs, n_classes=n_classes) + assert calibrated_brier < 1.1 * uncalibrated_brier + + +def test_calibration_zero_probability(): + # Test an edge case where _CalibratedClassifier avoids numerical errors + # in the multiclass normalization step if all the calibrators output + # are zero all at once for a given sample and instead fallback to uniform + # probabilities. + class ZeroCalibrator: + # This function is called from _CalibratedClassifier.predict_proba. + def predict(self, X): + return np.zeros(X.shape[0]) + + X, y = make_blobs( + n_samples=50, n_features=10, random_state=7, centers=10, cluster_std=15.0 + ) + clf = DummyClassifier().fit(X, y) + calibrator = ZeroCalibrator() + cal_clf = _CalibratedClassifier( + estimator=clf, calibrators=[calibrator], classes=clf.classes_ + ) + + probas = cal_clf.predict_proba(X) + + # Check that all probabilities are uniformly 1. / clf.n_classes_ + assert_allclose(probas, 1.0 / clf.n_classes_) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_calibration_prefit(csr_container): + """Test calibration for prefitted classifiers""" + n_samples = 50 + X, y = make_classification(n_samples=3 * n_samples, n_features=6, random_state=42) + sample_weight = np.random.RandomState(seed=42).uniform(size=y.size) + + X -= X.min() # MultinomialNB only allows positive X + + # split train and test + X_train, y_train, sw_train = X[:n_samples], y[:n_samples], sample_weight[:n_samples] + X_calib, y_calib, sw_calib = ( + X[n_samples : 2 * n_samples], + y[n_samples : 2 * n_samples], + sample_weight[n_samples : 2 * n_samples], + ) + X_test, y_test = X[2 * n_samples :], y[2 * n_samples :] + + # Naive-Bayes + clf = MultinomialNB() + # Check error if clf not prefit + unfit_clf = CalibratedClassifierCV(clf, cv="prefit") + with pytest.raises(NotFittedError): + unfit_clf.fit(X_calib, y_calib) + + clf.fit(X_train, y_train, sw_train) + prob_pos_clf = clf.predict_proba(X_test)[:, 1] + + # Naive Bayes with calibration + for this_X_calib, this_X_test in [ + (X_calib, X_test), + (csr_container(X_calib), csr_container(X_test)), + ]: + for method in ["isotonic", "sigmoid"]: + cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit") + + for sw in [sw_calib, None]: + cal_clf.fit(this_X_calib, y_calib, sample_weight=sw) + y_prob = cal_clf.predict_proba(this_X_test) + y_pred = cal_clf.predict(this_X_test) + prob_pos_cal_clf = y_prob[:, 1] + assert_array_equal(y_pred, np.array([0, 1])[np.argmax(y_prob, axis=1)]) + + assert brier_score_loss(y_test, prob_pos_clf) > brier_score_loss( + y_test, prob_pos_cal_clf + ) + + +@pytest.mark.parametrize("method", ["sigmoid", "isotonic"]) +def test_calibration_ensemble_false(data, method): + # Test that `ensemble=False` is the same as using predictions from + # `cross_val_predict` to train calibrator. + X, y = data + clf = LinearSVC(dual="auto", random_state=7) + + cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False) + cal_clf.fit(X, y) + cal_probas = cal_clf.predict_proba(X) + + # Get probas manually + unbiased_preds = cross_val_predict(clf, X, y, cv=3, method="decision_function") + if method == "isotonic": + calibrator = IsotonicRegression(out_of_bounds="clip") + else: + calibrator = _SigmoidCalibration() + calibrator.fit(unbiased_preds, y) + # Use `clf` fit on all data + clf.fit(X, y) + clf_df = clf.decision_function(X) + manual_probas = calibrator.predict(clf_df) + assert_allclose(cal_probas[:, 1], manual_probas) + + +def test_sigmoid_calibration(): + """Test calibration values with Platt sigmoid model""" + exF = np.array([5, -4, 1.0]) + exY = np.array([1, -1, -1]) + # computed from my python port of the C++ code in LibSVM + AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512]) + assert_array_almost_equal(AB_lin_libsvm, _sigmoid_calibration(exF, exY), 3) + lin_prob = 1.0 / (1.0 + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1])) + sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF) + assert_array_almost_equal(lin_prob, sk_prob, 6) + + # check that _SigmoidCalibration().fit only accepts 1d array or 2d column + # arrays + with pytest.raises(ValueError): + _SigmoidCalibration().fit(np.vstack((exF, exF)), exY) + + +def test_calibration_curve(): + """Check calibration_curve function""" + y_true = np.array([0, 0, 0, 1, 1, 1]) + y_pred = np.array([0.0, 0.1, 0.2, 0.8, 0.9, 1.0]) + prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2) + assert len(prob_true) == len(prob_pred) + assert len(prob_true) == 2 + assert_almost_equal(prob_true, [0, 1]) + assert_almost_equal(prob_pred, [0.1, 0.9]) + + # Probabilities outside [0, 1] should not be accepted at all. + with pytest.raises(ValueError): + calibration_curve([1], [-0.1]) + + # test that quantiles work as expected + y_true2 = np.array([0, 0, 0, 0, 1, 1]) + y_pred2 = np.array([0.0, 0.1, 0.2, 0.5, 0.9, 1.0]) + prob_true_quantile, prob_pred_quantile = calibration_curve( + y_true2, y_pred2, n_bins=2, strategy="quantile" + ) + + assert len(prob_true_quantile) == len(prob_pred_quantile) + assert len(prob_true_quantile) == 2 + assert_almost_equal(prob_true_quantile, [0, 2 / 3]) + assert_almost_equal(prob_pred_quantile, [0.1, 0.8]) + + # Check that error is raised when invalid strategy is selected + with pytest.raises(ValueError): + calibration_curve(y_true2, y_pred2, strategy="percentile") + + +@pytest.mark.parametrize("ensemble", [True, False]) +def test_calibration_nan_imputer(ensemble): + """Test that calibration can accept nan""" + X, y = make_classification( + n_samples=10, n_features=2, n_informative=2, n_redundant=0, random_state=42 + ) + X[0, 0] = np.nan + clf = Pipeline( + [("imputer", SimpleImputer()), ("rf", RandomForestClassifier(n_estimators=1))] + ) + clf_c = CalibratedClassifierCV(clf, cv=2, method="isotonic", ensemble=ensemble) + clf_c.fit(X, y) + clf_c.predict(X) + + +@pytest.mark.parametrize("ensemble", [True, False]) +def test_calibration_prob_sum(ensemble): + # Test that sum of probabilities is 1. A non-regression test for + # issue #7796 + num_classes = 2 + X, y = make_classification(n_samples=10, n_features=5, n_classes=num_classes) + clf = LinearSVC(dual="auto", C=1.0, random_state=7) + clf_prob = CalibratedClassifierCV( + clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble + ) + clf_prob.fit(X, y) + + probs = clf_prob.predict_proba(X) + assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0])) + + +@pytest.mark.parametrize("ensemble", [True, False]) +def test_calibration_less_classes(ensemble): + # Test to check calibration works fine when train set in a test-train + # split does not contain all classes + # Since this test uses LOO, at each iteration train set will not contain a + # class label + X = np.random.randn(10, 5) + y = np.arange(10) + clf = LinearSVC(dual="auto", C=1.0, random_state=7) + cal_clf = CalibratedClassifierCV( + clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble + ) + cal_clf.fit(X, y) + + for i, calibrated_classifier in enumerate(cal_clf.calibrated_classifiers_): + proba = calibrated_classifier.predict_proba(X) + if ensemble: + # Check that the unobserved class has proba=0 + assert_array_equal(proba[:, i], np.zeros(len(y))) + # Check for all other classes proba>0 + assert np.all(proba[:, :i] > 0) + assert np.all(proba[:, i + 1 :] > 0) + else: + # Check `proba` are all 1/n_classes + assert np.allclose(proba, 1 / proba.shape[0]) + + +@pytest.mark.parametrize( + "X", + [ + np.random.RandomState(42).randn(15, 5, 2), + np.random.RandomState(42).randn(15, 5, 2, 6), + ], +) +def test_calibration_accepts_ndarray(X): + """Test that calibration accepts n-dimensional arrays as input""" + y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0] + + class MockTensorClassifier(BaseEstimator): + """A toy estimator that accepts tensor inputs""" + + _estimator_type = "classifier" + + def fit(self, X, y): + self.classes_ = np.unique(y) + return self + + def decision_function(self, X): + # toy decision function that just needs to have the right shape: + return X.reshape(X.shape[0], -1).sum(axis=1) + + calibrated_clf = CalibratedClassifierCV(MockTensorClassifier()) + # we should be able to fit this classifier with no error + calibrated_clf.fit(X, y) + + +@pytest.fixture +def dict_data(): + dict_data = [ + {"state": "NY", "age": "adult"}, + {"state": "TX", "age": "adult"}, + {"state": "VT", "age": "child"}, + ] + text_labels = [1, 0, 1] + return dict_data, text_labels + + +@pytest.fixture +def dict_data_pipeline(dict_data): + X, y = dict_data + pipeline_prefit = Pipeline( + [("vectorizer", DictVectorizer()), ("clf", RandomForestClassifier())] + ) + return pipeline_prefit.fit(X, y) + + +def test_calibration_dict_pipeline(dict_data, dict_data_pipeline): + """Test that calibration works in prefit pipeline with transformer + + `X` is not array-like, sparse matrix or dataframe at the start. + See https://github.com/scikit-learn/scikit-learn/issues/8710 + + Also test it can predict without running into validation errors. + See https://github.com/scikit-learn/scikit-learn/issues/19637 + """ + X, y = dict_data + clf = dict_data_pipeline + calib_clf = CalibratedClassifierCV(clf, cv="prefit") + calib_clf.fit(X, y) + # Check attributes are obtained from fitted estimator + assert_array_equal(calib_clf.classes_, clf.classes_) + + # Neither the pipeline nor the calibration meta-estimator + # expose the n_features_in_ check on this kind of data. + assert not hasattr(clf, "n_features_in_") + assert not hasattr(calib_clf, "n_features_in_") + + # Ensure that no error is thrown with predict and predict_proba + calib_clf.predict(X) + calib_clf.predict_proba(X) + + +@pytest.mark.parametrize( + "clf, cv", + [ + pytest.param(LinearSVC(dual="auto", C=1), 2), + pytest.param(LinearSVC(dual="auto", C=1), "prefit"), + ], +) +def test_calibration_attributes(clf, cv): + # Check that `n_features_in_` and `classes_` attributes created properly + X, y = make_classification(n_samples=10, n_features=5, n_classes=2, random_state=7) + if cv == "prefit": + clf = clf.fit(X, y) + calib_clf = CalibratedClassifierCV(clf, cv=cv) + calib_clf.fit(X, y) + + if cv == "prefit": + assert_array_equal(calib_clf.classes_, clf.classes_) + assert calib_clf.n_features_in_ == clf.n_features_in_ + else: + classes = LabelEncoder().fit(y).classes_ + assert_array_equal(calib_clf.classes_, classes) + assert calib_clf.n_features_in_ == X.shape[1] + + +def test_calibration_inconsistent_prefit_n_features_in(): + # Check that `n_features_in_` from prefit base estimator + # is consistent with training set + X, y = make_classification(n_samples=10, n_features=5, n_classes=2, random_state=7) + clf = LinearSVC(dual="auto", C=1).fit(X, y) + calib_clf = CalibratedClassifierCV(clf, cv="prefit") + + msg = "X has 3 features, but LinearSVC is expecting 5 features as input." + with pytest.raises(ValueError, match=msg): + calib_clf.fit(X[:, :3], y) + + +def test_calibration_votingclassifier(): + # Check that `CalibratedClassifier` works with `VotingClassifier`. + # The method `predict_proba` from `VotingClassifier` is dynamically + # defined via a property that only works when voting="soft". + X, y = make_classification(n_samples=10, n_features=5, n_classes=2, random_state=7) + vote = VotingClassifier( + estimators=[("lr" + str(i), LogisticRegression()) for i in range(3)], + voting="soft", + ) + vote.fit(X, y) + + calib_clf = CalibratedClassifierCV(estimator=vote, cv="prefit") + # smoke test: should not raise an error + calib_clf.fit(X, y) + + +@pytest.fixture(scope="module") +def iris_data(): + return load_iris(return_X_y=True) + + +@pytest.fixture(scope="module") +def iris_data_binary(iris_data): + X, y = iris_data + return X[y < 2], y[y < 2] + + +@pytest.mark.parametrize("n_bins", [5, 10]) +@pytest.mark.parametrize("strategy", ["uniform", "quantile"]) +def test_calibration_display_compute(pyplot, iris_data_binary, n_bins, strategy): + # Ensure `CalibrationDisplay.from_predictions` and `calibration_curve` + # compute the same results. Also checks attributes of the + # CalibrationDisplay object. + X, y = iris_data_binary + + lr = LogisticRegression().fit(X, y) + + viz = CalibrationDisplay.from_estimator( + lr, X, y, n_bins=n_bins, strategy=strategy, alpha=0.8 + ) + + y_prob = lr.predict_proba(X)[:, 1] + prob_true, prob_pred = calibration_curve( + y, y_prob, n_bins=n_bins, strategy=strategy + ) + + assert_allclose(viz.prob_true, prob_true) + assert_allclose(viz.prob_pred, prob_pred) + assert_allclose(viz.y_prob, y_prob) + + assert viz.estimator_name == "LogisticRegression" + + # cannot fail thanks to pyplot fixture + import matplotlib as mpl # noqa + + assert isinstance(viz.line_, mpl.lines.Line2D) + assert viz.line_.get_alpha() == 0.8 + assert isinstance(viz.ax_, mpl.axes.Axes) + assert isinstance(viz.figure_, mpl.figure.Figure) + + assert viz.ax_.get_xlabel() == "Mean predicted probability (Positive class: 1)" + assert viz.ax_.get_ylabel() == "Fraction of positives (Positive class: 1)" + + expected_legend_labels = ["LogisticRegression", "Perfectly calibrated"] + legend_labels = viz.ax_.get_legend().get_texts() + assert len(legend_labels) == len(expected_legend_labels) + for labels in legend_labels: + assert labels.get_text() in expected_legend_labels + + +def test_plot_calibration_curve_pipeline(pyplot, iris_data_binary): + # Ensure pipelines are supported by CalibrationDisplay.from_estimator + X, y = iris_data_binary + clf = make_pipeline(StandardScaler(), LogisticRegression()) + clf.fit(X, y) + viz = CalibrationDisplay.from_estimator(clf, X, y) + + expected_legend_labels = [viz.estimator_name, "Perfectly calibrated"] + legend_labels = viz.ax_.get_legend().get_texts() + assert len(legend_labels) == len(expected_legend_labels) + for labels in legend_labels: + assert labels.get_text() in expected_legend_labels + + +@pytest.mark.parametrize( + "name, expected_label", [(None, "_line1"), ("my_est", "my_est")] +) +def test_calibration_display_default_labels(pyplot, name, expected_label): + prob_true = np.array([0, 1, 1, 0]) + prob_pred = np.array([0.2, 0.8, 0.8, 0.4]) + y_prob = np.array([]) + + viz = CalibrationDisplay(prob_true, prob_pred, y_prob, estimator_name=name) + viz.plot() + + expected_legend_labels = [] if name is None else [name] + expected_legend_labels.append("Perfectly calibrated") + legend_labels = viz.ax_.get_legend().get_texts() + assert len(legend_labels) == len(expected_legend_labels) + for labels in legend_labels: + assert labels.get_text() in expected_legend_labels + + +def test_calibration_display_label_class_plot(pyplot): + # Checks that when instantiating `CalibrationDisplay` class then calling + # `plot`, `self.estimator_name` is the one given in `plot` + prob_true = np.array([0, 1, 1, 0]) + prob_pred = np.array([0.2, 0.8, 0.8, 0.4]) + y_prob = np.array([]) + + name = "name one" + viz = CalibrationDisplay(prob_true, prob_pred, y_prob, estimator_name=name) + assert viz.estimator_name == name + name = "name two" + viz.plot(name=name) + + expected_legend_labels = [name, "Perfectly calibrated"] + legend_labels = viz.ax_.get_legend().get_texts() + assert len(legend_labels) == len(expected_legend_labels) + for labels in legend_labels: + assert labels.get_text() in expected_legend_labels + + +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +def test_calibration_display_name_multiple_calls( + constructor_name, pyplot, iris_data_binary +): + # Check that the `name` used when calling + # `CalibrationDisplay.from_predictions` or + # `CalibrationDisplay.from_estimator` is used when multiple + # `CalibrationDisplay.viz.plot()` calls are made. + X, y = iris_data_binary + clf_name = "my hand-crafted name" + clf = LogisticRegression().fit(X, y) + y_prob = clf.predict_proba(X)[:, 1] + + constructor = getattr(CalibrationDisplay, constructor_name) + params = (clf, X, y) if constructor_name == "from_estimator" else (y, y_prob) + + viz = constructor(*params, name=clf_name) + assert viz.estimator_name == clf_name + pyplot.close("all") + viz.plot() + + expected_legend_labels = [clf_name, "Perfectly calibrated"] + legend_labels = viz.ax_.get_legend().get_texts() + assert len(legend_labels) == len(expected_legend_labels) + for labels in legend_labels: + assert labels.get_text() in expected_legend_labels + + pyplot.close("all") + clf_name = "another_name" + viz.plot(name=clf_name) + assert len(legend_labels) == len(expected_legend_labels) + for labels in legend_labels: + assert labels.get_text() in expected_legend_labels + + +def test_calibration_display_ref_line(pyplot, iris_data_binary): + # Check that `ref_line` only appears once + X, y = iris_data_binary + lr = LogisticRegression().fit(X, y) + dt = DecisionTreeClassifier().fit(X, y) + + viz = CalibrationDisplay.from_estimator(lr, X, y) + viz2 = CalibrationDisplay.from_estimator(dt, X, y, ax=viz.ax_) + + labels = viz2.ax_.get_legend_handles_labels()[1] + assert labels.count("Perfectly calibrated") == 1 + + +@pytest.mark.parametrize("dtype_y_str", [str, object]) +def test_calibration_curve_pos_label_error_str(dtype_y_str): + """Check error message when a `pos_label` is not specified with `str` targets.""" + rng = np.random.RandomState(42) + y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=dtype_y_str) + y2 = rng.randint(0, 2, size=y1.size) + + err_msg = ( + "y_true takes value in {'eggs', 'spam'} and pos_label is not " + "specified: either make y_true take value in {0, 1} or {-1, 1} or " + "pass pos_label explicitly" + ) + with pytest.raises(ValueError, match=err_msg): + calibration_curve(y1, y2) + + +@pytest.mark.parametrize("dtype_y_str", [str, object]) +def test_calibration_curve_pos_label(dtype_y_str): + """Check the behaviour when passing explicitly `pos_label`.""" + y_true = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1]) + classes = np.array(["spam", "egg"], dtype=dtype_y_str) + y_true_str = classes[y_true] + y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.0]) + + # default case + prob_true, _ = calibration_curve(y_true, y_pred, n_bins=4) + assert_allclose(prob_true, [0, 0.5, 1, 1]) + # if `y_true` contains `str`, then `pos_label` is required + prob_true, _ = calibration_curve(y_true_str, y_pred, n_bins=4, pos_label="egg") + assert_allclose(prob_true, [0, 0.5, 1, 1]) + + prob_true, _ = calibration_curve(y_true, 1 - y_pred, n_bins=4, pos_label=0) + assert_allclose(prob_true, [0, 0, 0.5, 1]) + prob_true, _ = calibration_curve(y_true_str, 1 - y_pred, n_bins=4, pos_label="spam") + assert_allclose(prob_true, [0, 0, 0.5, 1]) + + +@pytest.mark.parametrize("pos_label, expected_pos_label", [(None, 1), (0, 0), (1, 1)]) +def test_calibration_display_pos_label( + pyplot, iris_data_binary, pos_label, expected_pos_label +): + """Check the behaviour of `pos_label` in the `CalibrationDisplay`.""" + X, y = iris_data_binary + + lr = LogisticRegression().fit(X, y) + viz = CalibrationDisplay.from_estimator(lr, X, y, pos_label=pos_label) + + y_prob = lr.predict_proba(X)[:, expected_pos_label] + prob_true, prob_pred = calibration_curve(y, y_prob, pos_label=pos_label) + + assert_allclose(viz.prob_true, prob_true) + assert_allclose(viz.prob_pred, prob_pred) + assert_allclose(viz.y_prob, y_prob) + + assert ( + viz.ax_.get_xlabel() + == f"Mean predicted probability (Positive class: {expected_pos_label})" + ) + assert ( + viz.ax_.get_ylabel() + == f"Fraction of positives (Positive class: {expected_pos_label})" + ) + + expected_legend_labels = [lr.__class__.__name__, "Perfectly calibrated"] + legend_labels = viz.ax_.get_legend().get_texts() + assert len(legend_labels) == len(expected_legend_labels) + for labels in legend_labels: + assert labels.get_text() in expected_legend_labels + + +@pytest.mark.parametrize("method", ["sigmoid", "isotonic"]) +@pytest.mark.parametrize("ensemble", [True, False]) +def test_calibrated_classifier_cv_double_sample_weights_equivalence(method, ensemble): + """Check that passing repeating twice the dataset `X` is equivalent to + passing a `sample_weight` with a factor 2.""" + X, y = load_iris(return_X_y=True) + # Scale the data to avoid any convergence issue + X = StandardScaler().fit_transform(X) + # Only use 2 classes + X, y = X[:100], y[:100] + sample_weight = np.ones_like(y) * 2 + + # Interlace the data such that a 2-fold cross-validation will be equivalent + # to using the original dataset with a sample weights of 2 + X_twice = np.zeros((X.shape[0] * 2, X.shape[1]), dtype=X.dtype) + X_twice[::2, :] = X + X_twice[1::2, :] = X + y_twice = np.zeros(y.shape[0] * 2, dtype=y.dtype) + y_twice[::2] = y + y_twice[1::2] = y + + estimator = LogisticRegression() + calibrated_clf_without_weights = CalibratedClassifierCV( + estimator, + method=method, + ensemble=ensemble, + cv=2, + ) + calibrated_clf_with_weights = clone(calibrated_clf_without_weights) + + calibrated_clf_with_weights.fit(X, y, sample_weight=sample_weight) + calibrated_clf_without_weights.fit(X_twice, y_twice) + + # Check that the underlying fitted estimators have the same coefficients + for est_with_weights, est_without_weights in zip( + calibrated_clf_with_weights.calibrated_classifiers_, + calibrated_clf_without_weights.calibrated_classifiers_, + ): + assert_allclose( + est_with_weights.estimator.coef_, + est_without_weights.estimator.coef_, + ) + + # Check that the predictions are the same + y_pred_with_weights = calibrated_clf_with_weights.predict_proba(X) + y_pred_without_weights = calibrated_clf_without_weights.predict_proba(X) + + assert_allclose(y_pred_with_weights, y_pred_without_weights) + + +@pytest.mark.parametrize("fit_params_type", ["list", "array"]) +def test_calibration_with_fit_params(fit_params_type, data): + """Tests that fit_params are passed to the underlying base estimator. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12384 + """ + X, y = data + fit_params = { + "a": _convert_container(y, fit_params_type), + "b": _convert_container(y, fit_params_type), + } + + clf = CheckingClassifier(expected_fit_params=["a", "b"]) + pc_clf = CalibratedClassifierCV(clf) + + pc_clf.fit(X, y, **fit_params) + + +@pytest.mark.parametrize( + "sample_weight", + [ + [1.0] * N_SAMPLES, + np.ones(N_SAMPLES), + ], +) +def test_calibration_with_sample_weight_estimator(sample_weight, data): + """Tests that sample_weight is passed to the underlying base + estimator. + """ + X, y = data + clf = CheckingClassifier(expected_sample_weight=True) + pc_clf = CalibratedClassifierCV(clf) + + pc_clf.fit(X, y, sample_weight=sample_weight) + + +def test_calibration_without_sample_weight_estimator(data): + """Check that even if the estimator doesn't support + sample_weight, fitting with sample_weight still works. + + There should be a warning, since the sample_weight is not passed + on to the estimator. + """ + X, y = data + sample_weight = np.ones_like(y) + + class ClfWithoutSampleWeight(CheckingClassifier): + def fit(self, X, y, **fit_params): + assert "sample_weight" not in fit_params + return super().fit(X, y, **fit_params) + + clf = ClfWithoutSampleWeight() + pc_clf = CalibratedClassifierCV(clf) + + with pytest.warns(UserWarning): + pc_clf.fit(X, y, sample_weight=sample_weight) + + +@pytest.mark.parametrize("method", ["sigmoid", "isotonic"]) +@pytest.mark.parametrize("ensemble", [True, False]) +def test_calibrated_classifier_cv_zeros_sample_weights_equivalence(method, ensemble): + """Check that passing removing some sample from the dataset `X` is + equivalent to passing a `sample_weight` with a factor 0.""" + X, y = load_iris(return_X_y=True) + # Scale the data to avoid any convergence issue + X = StandardScaler().fit_transform(X) + # Only use 2 classes and select samples such that 2-fold cross-validation + # split will lead to an equivalence with a `sample_weight` of 0 + X = np.vstack((X[:40], X[50:90])) + y = np.hstack((y[:40], y[50:90])) + sample_weight = np.zeros_like(y) + sample_weight[::2] = 1 + + estimator = LogisticRegression() + calibrated_clf_without_weights = CalibratedClassifierCV( + estimator, + method=method, + ensemble=ensemble, + cv=2, + ) + calibrated_clf_with_weights = clone(calibrated_clf_without_weights) + + calibrated_clf_with_weights.fit(X, y, sample_weight=sample_weight) + calibrated_clf_without_weights.fit(X[::2], y[::2]) + + # Check that the underlying fitted estimators have the same coefficients + for est_with_weights, est_without_weights in zip( + calibrated_clf_with_weights.calibrated_classifiers_, + calibrated_clf_without_weights.calibrated_classifiers_, + ): + assert_allclose( + est_with_weights.estimator.coef_, + est_without_weights.estimator.coef_, + ) + + # Check that the predictions are the same + y_pred_with_weights = calibrated_clf_with_weights.predict_proba(X) + y_pred_without_weights = calibrated_clf_without_weights.predict_proba(X) + + assert_allclose(y_pred_with_weights, y_pred_without_weights) + + +def test_calibration_with_non_sample_aligned_fit_param(data): + """Check that CalibratedClassifierCV does not enforce sample alignment + for fit parameters.""" + + class TestClassifier(LogisticRegression): + def fit(self, X, y, sample_weight=None, fit_param=None): + assert fit_param is not None + return super().fit(X, y, sample_weight=sample_weight) + + CalibratedClassifierCV(estimator=TestClassifier()).fit( + *data, fit_param=np.ones(len(data[1]) + 1) + ) + + +def test_calibrated_classifier_cv_works_with_large_confidence_scores( + global_random_seed, +): + """Test that :class:`CalibratedClassifierCV` works with large confidence + scores when using the `sigmoid` method, particularly with the + :class:`SGDClassifier`. + + Non-regression test for issue #26766. + """ + prob = 0.67 + n = 1000 + random_noise = np.random.default_rng(global_random_seed).normal(size=n) + + y = np.array([1] * int(n * prob) + [0] * (n - int(n * prob))) + X = 1e5 * y.reshape((-1, 1)) + random_noise + + # Check that the decision function of SGDClassifier produces predicted + # values that are quite large, for the data under consideration. + cv = check_cv(cv=None, y=y, classifier=True) + indices = cv.split(X, y) + for train, test in indices: + X_train, y_train = X[train], y[train] + X_test = X[test] + sgd_clf = SGDClassifier(loss="squared_hinge", random_state=global_random_seed) + sgd_clf.fit(X_train, y_train) + predictions = sgd_clf.decision_function(X_test) + assert (predictions > 1e4).any() + + # Compare the CalibratedClassifierCV using the sigmoid method with the + # CalibratedClassifierCV using the isotonic method. The isotonic method + # is used for comparison because it is numerically stable. + clf_sigmoid = CalibratedClassifierCV( + SGDClassifier(loss="squared_hinge", random_state=global_random_seed), + method="sigmoid", + ) + score_sigmoid = cross_val_score(clf_sigmoid, X, y, scoring="roc_auc") + + # The isotonic method is used for comparison because it is numerically + # stable. + clf_isotonic = CalibratedClassifierCV( + SGDClassifier(loss="squared_hinge", random_state=global_random_seed), + method="isotonic", + ) + score_isotonic = cross_val_score(clf_isotonic, X, y, scoring="roc_auc") + + # The AUC score should be the same because it is invariant under + # strictly monotonic conditions + assert_allclose(score_sigmoid, score_isotonic) + + +def test_sigmoid_calibration_max_abs_prediction_threshold(global_random_seed): + random_state = np.random.RandomState(seed=global_random_seed) + n = 100 + y = random_state.randint(0, 2, size=n) + + # Check that for small enough predictions ranging from -2 to 2, the + # threshold value has no impact on the outcome + predictions_small = random_state.uniform(low=-2, high=2, size=100) + + # Using a threshold lower than the maximum absolute value of the + # predictions enables internal re-scaling by max(abs(predictions_small)). + threshold_1 = 0.1 + a1, b1 = _sigmoid_calibration( + predictions=predictions_small, + y=y, + max_abs_prediction_threshold=threshold_1, + ) + + # Using a larger threshold disables rescaling. + threshold_2 = 10 + a2, b2 = _sigmoid_calibration( + predictions=predictions_small, + y=y, + max_abs_prediction_threshold=threshold_2, + ) + + # Using default threshold of 30 also disables the scaling. + a3, b3 = _sigmoid_calibration( + predictions=predictions_small, + y=y, + ) + + # Depends on the tolerance of the underlying quasy-newton solver which is + # not too strict by default. + atol = 1e-6 + assert_allclose(a1, a2, atol=atol) + assert_allclose(a2, a3, atol=atol) + assert_allclose(b1, b2, atol=atol) + assert_allclose(b2, b3, atol=atol) + + +def test_float32_predict_proba(data): + """Check that CalibratedClassifierCV works with float32 predict proba. + + Non-regression test for gh-28245. + """ + + class DummyClassifer32(DummyClassifier): + def predict_proba(self, X): + return super().predict_proba(X).astype(np.float32) + + model = DummyClassifer32() + calibrator = CalibratedClassifierCV(model) + # Does not raise an error + calibrator.fit(*data) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_discriminant_analysis.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_discriminant_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..b60053e04b25b80730779a2ad69ddba4de12a5a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_discriminant_analysis.py @@ -0,0 +1,677 @@ +import numpy as np +import pytest +from scipy import linalg + +from sklearn.cluster import KMeans +from sklearn.covariance import LedoitWolf, ShrunkCovariance, ledoit_wolf +from sklearn.datasets import make_blobs +from sklearn.discriminant_analysis import ( + LinearDiscriminantAnalysis, + QuadraticDiscriminantAnalysis, + _cov, +) +from sklearn.preprocessing import StandardScaler +from sklearn.utils import _IS_WASM, check_random_state +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + +# Data is just 6 separable points in the plane +X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype="f") +y = np.array([1, 1, 1, 2, 2, 2]) +y3 = np.array([1, 1, 2, 2, 3, 3]) + +# Degenerate data with only one feature (still should be separable) +X1 = np.array( + [[-2], [-1], [-1], [1], [1], [2]], + dtype="f", +) + +# Data is just 9 separable points in the plane +X6 = np.array( + [[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2], [1, 3], [1, 2], [2, 1], [2, 2]] +) +y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2]) +y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1]) + +# Degenerate data with 1 feature (still should be separable) +X7 = np.array([[-3], [-2], [-1], [-1], [0], [1], [1], [2], [3]]) + +# Data that has zero variance in one dimension and needs regularization +X2 = np.array( + [[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0], [2, 0], [3, 0]] +) + +# One element class +y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2]) + +# Data with less samples in a class than n_features +X5 = np.c_[np.arange(8), np.zeros((8, 3))] +y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1]) + +solver_shrinkage = [ + ("svd", None), + ("lsqr", None), + ("eigen", None), + ("lsqr", "auto"), + ("lsqr", 0), + ("lsqr", 0.43), + ("eigen", "auto"), + ("eigen", 0), + ("eigen", 0.43), +] + + +def test_lda_predict(): + # Test LDA classification. + # This checks that LDA implements fit and predict and returns correct + # values for simple toy data. + for test_case in solver_shrinkage: + solver, shrinkage = test_case + clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage) + y_pred = clf.fit(X, y).predict(X) + assert_array_equal(y_pred, y, "solver %s" % solver) + + # Assert that it works with 1D data + y_pred1 = clf.fit(X1, y).predict(X1) + assert_array_equal(y_pred1, y, "solver %s" % solver) + + # Test probability estimates + y_proba_pred1 = clf.predict_proba(X1) + assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y, "solver %s" % solver) + y_log_proba_pred1 = clf.predict_log_proba(X1) + assert_allclose( + np.exp(y_log_proba_pred1), + y_proba_pred1, + rtol=1e-6, + atol=1e-6, + err_msg="solver %s" % solver, + ) + + # Primarily test for commit 2f34950 -- "reuse" of priors + y_pred3 = clf.fit(X, y3).predict(X) + # LDA shouldn't be able to separate those + assert np.any(y_pred3 != y3), "solver %s" % solver + + clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto") + with pytest.raises(NotImplementedError): + clf.fit(X, y) + + clf = LinearDiscriminantAnalysis( + solver="lsqr", shrinkage=0.1, covariance_estimator=ShrunkCovariance() + ) + with pytest.raises( + ValueError, + match=( + "covariance_estimator and shrinkage " + "parameters are not None. " + "Only one of the two can be set." + ), + ): + clf.fit(X, y) + + # test bad solver with covariance_estimator + clf = LinearDiscriminantAnalysis(solver="svd", covariance_estimator=LedoitWolf()) + with pytest.raises( + ValueError, match="covariance estimator is not supported with svd" + ): + clf.fit(X, y) + + # test bad covariance estimator + clf = LinearDiscriminantAnalysis( + solver="lsqr", covariance_estimator=KMeans(n_clusters=2, n_init="auto") + ) + with pytest.raises(ValueError): + clf.fit(X, y) + + +@pytest.mark.parametrize("n_classes", [2, 3]) +@pytest.mark.parametrize("solver", ["svd", "lsqr", "eigen"]) +def test_lda_predict_proba(solver, n_classes): + def generate_dataset(n_samples, centers, covariances, random_state=None): + """Generate a multivariate normal data given some centers and + covariances""" + rng = check_random_state(random_state) + X = np.vstack( + [ + rng.multivariate_normal(mean, cov, size=n_samples // len(centers)) + for mean, cov in zip(centers, covariances) + ] + ) + y = np.hstack( + [[clazz] * (n_samples // len(centers)) for clazz in range(len(centers))] + ) + return X, y + + blob_centers = np.array([[0, 0], [-10, 40], [-30, 30]])[:n_classes] + blob_stds = np.array([[[10, 10], [10, 100]]] * len(blob_centers)) + X, y = generate_dataset( + n_samples=90000, centers=blob_centers, covariances=blob_stds, random_state=42 + ) + lda = LinearDiscriminantAnalysis( + solver=solver, store_covariance=True, shrinkage=None + ).fit(X, y) + # check that the empirical means and covariances are close enough to the + # one used to generate the data + assert_allclose(lda.means_, blob_centers, atol=1e-1) + assert_allclose(lda.covariance_, blob_stds[0], atol=1) + + # implement the method to compute the probability given in The Elements + # of Statistical Learning (cf. p.127, Sect. 4.4.5 "Logistic Regression + # or LDA?") + precision = linalg.inv(blob_stds[0]) + alpha_k = [] + alpha_k_0 = [] + for clazz in range(len(blob_centers) - 1): + alpha_k.append( + np.dot(precision, (blob_centers[clazz] - blob_centers[-1])[:, np.newaxis]) + ) + alpha_k_0.append( + np.dot( + -0.5 * (blob_centers[clazz] + blob_centers[-1])[np.newaxis, :], + alpha_k[-1], + ) + ) + + sample = np.array([[-22, 22]]) + + def discriminant_func(sample, coef, intercept, clazz): + return np.exp(intercept[clazz] + np.dot(sample, coef[clazz])).item() + + prob = np.array( + [ + float( + discriminant_func(sample, alpha_k, alpha_k_0, clazz) + / ( + 1 + + sum( + [ + discriminant_func(sample, alpha_k, alpha_k_0, clazz) + for clazz in range(n_classes - 1) + ] + ) + ) + ) + for clazz in range(n_classes - 1) + ] + ) + + prob_ref = 1 - np.sum(prob) + + # check the consistency of the computed probability + # all probabilities should sum to one + prob_ref_2 = float( + 1 + / ( + 1 + + sum( + [ + discriminant_func(sample, alpha_k, alpha_k_0, clazz) + for clazz in range(n_classes - 1) + ] + ) + ) + ) + + assert prob_ref == pytest.approx(prob_ref_2) + # check that the probability of LDA are close to the theoretical + # probabilities + assert_allclose( + lda.predict_proba(sample), np.hstack([prob, prob_ref])[np.newaxis], atol=1e-2 + ) + + +def test_lda_priors(): + # Test priors (negative priors) + priors = np.array([0.5, -0.5]) + clf = LinearDiscriminantAnalysis(priors=priors) + msg = "priors must be non-negative" + + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + # Test that priors passed as a list are correctly handled (run to see if + # failure) + clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5]) + clf.fit(X, y) + + # Test that priors always sum to 1 + priors = np.array([0.5, 0.6]) + prior_norm = np.array([0.45, 0.55]) + clf = LinearDiscriminantAnalysis(priors=priors) + + with pytest.warns(UserWarning): + clf.fit(X, y) + + assert_array_almost_equal(clf.priors_, prior_norm, 2) + + +def test_lda_coefs(): + # Test if the coefficients of the solvers are approximately the same. + n_features = 2 + n_classes = 2 + n_samples = 1000 + X, y = make_blobs( + n_samples=n_samples, n_features=n_features, centers=n_classes, random_state=11 + ) + + clf_lda_svd = LinearDiscriminantAnalysis(solver="svd") + clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr") + clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen") + + clf_lda_svd.fit(X, y) + clf_lda_lsqr.fit(X, y) + clf_lda_eigen.fit(X, y) + + assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1) + assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1) + assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1) + + +def test_lda_transform(): + # Test LDA transform. + clf = LinearDiscriminantAnalysis(solver="svd", n_components=1) + X_transformed = clf.fit(X, y).transform(X) + assert X_transformed.shape[1] == 1 + clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1) + X_transformed = clf.fit(X, y).transform(X) + assert X_transformed.shape[1] == 1 + + clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1) + clf.fit(X, y) + msg = "transform not implemented for 'lsqr'" + + with pytest.raises(NotImplementedError, match=msg): + clf.transform(X) + + +def test_lda_explained_variance_ratio(): + # Test if the sum of the normalized eigen vectors values equals 1, + # Also tests whether the explained_variance_ratio_ formed by the + # eigen solver is the same as the explained_variance_ratio_ formed + # by the svd solver + + state = np.random.RandomState(0) + X = state.normal(loc=0, scale=100, size=(40, 20)) + y = state.randint(0, 3, size=(40,)) + + clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen") + clf_lda_eigen.fit(X, y) + assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3) + assert clf_lda_eigen.explained_variance_ratio_.shape == ( + 2, + ), "Unexpected length for explained_variance_ratio_" + + clf_lda_svd = LinearDiscriminantAnalysis(solver="svd") + clf_lda_svd.fit(X, y) + assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3) + assert clf_lda_svd.explained_variance_ratio_.shape == ( + 2, + ), "Unexpected length for explained_variance_ratio_" + + assert_array_almost_equal( + clf_lda_svd.explained_variance_ratio_, clf_lda_eigen.explained_variance_ratio_ + ) + + +def test_lda_orthogonality(): + # arrange four classes with their means in a kite-shaped pattern + # the longer distance should be transformed to the first component, and + # the shorter distance to the second component. + means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]]) + + # We construct perfectly symmetric distributions, so the LDA can estimate + # precise means. + scatter = np.array( + [ + [0.1, 0, 0], + [-0.1, 0, 0], + [0, 0.1, 0], + [0, -0.1, 0], + [0, 0, 0.1], + [0, 0, -0.1], + ] + ) + + X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3)) + y = np.repeat(np.arange(means.shape[0]), scatter.shape[0]) + + # Fit LDA and transform the means + clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y) + means_transformed = clf.transform(means) + + d1 = means_transformed[3] - means_transformed[0] + d2 = means_transformed[2] - means_transformed[1] + d1 /= np.sqrt(np.sum(d1**2)) + d2 /= np.sqrt(np.sum(d2**2)) + + # the transformed within-class covariance should be the identity matrix + assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2)) + + # the means of classes 0 and 3 should lie on the first component + assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0) + + # the means of classes 1 and 2 should lie on the second component + assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0) + + +def test_lda_scaling(): + # Test if classification works correctly with differently scaled features. + n = 100 + rng = np.random.RandomState(1234) + # use uniform distribution of features to make sure there is absolutely no + # overlap between classes. + x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0] + x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0] + x = np.vstack((x1, x2)) * [1, 100, 10000] + y = [-1] * n + [1] * n + + for solver in ("svd", "lsqr", "eigen"): + clf = LinearDiscriminantAnalysis(solver=solver) + # should be able to separate the data perfectly + assert clf.fit(x, y).score(x, y) == 1.0, "using covariance: %s" % solver + + +def test_lda_store_covariance(): + # Test for solver 'lsqr' and 'eigen' + # 'store_covariance' has no effect on 'lsqr' and 'eigen' solvers + for solver in ("lsqr", "eigen"): + clf = LinearDiscriminantAnalysis(solver=solver).fit(X6, y6) + assert hasattr(clf, "covariance_") + + # Test the actual attribute: + clf = LinearDiscriminantAnalysis(solver=solver, store_covariance=True).fit( + X6, y6 + ) + assert hasattr(clf, "covariance_") + + assert_array_almost_equal( + clf.covariance_, np.array([[0.422222, 0.088889], [0.088889, 0.533333]]) + ) + + # Test for SVD solver, the default is to not set the covariances_ attribute + clf = LinearDiscriminantAnalysis(solver="svd").fit(X6, y6) + assert not hasattr(clf, "covariance_") + + # Test the actual attribute: + clf = LinearDiscriminantAnalysis(solver=solver, store_covariance=True).fit(X6, y6) + assert hasattr(clf, "covariance_") + + assert_array_almost_equal( + clf.covariance_, np.array([[0.422222, 0.088889], [0.088889, 0.533333]]) + ) + + +@pytest.mark.parametrize("seed", range(10)) +def test_lda_shrinkage(seed): + # Test that shrunk covariance estimator and shrinkage parameter behave the + # same + rng = np.random.RandomState(seed) + X = rng.rand(100, 10) + y = rng.randint(3, size=(100)) + c1 = LinearDiscriminantAnalysis(store_covariance=True, shrinkage=0.5, solver="lsqr") + c2 = LinearDiscriminantAnalysis( + store_covariance=True, + covariance_estimator=ShrunkCovariance(shrinkage=0.5), + solver="lsqr", + ) + c1.fit(X, y) + c2.fit(X, y) + assert_allclose(c1.means_, c2.means_) + assert_allclose(c1.covariance_, c2.covariance_) + + +def test_lda_ledoitwolf(): + # When shrinkage="auto" current implementation uses ledoitwolf estimation + # of covariance after standardizing the data. This checks that it is indeed + # the case + class StandardizedLedoitWolf: + def fit(self, X): + sc = StandardScaler() # standardize features + X_sc = sc.fit_transform(X) + s = ledoit_wolf(X_sc)[0] + # rescale + s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] + self.covariance_ = s + + rng = np.random.RandomState(0) + X = rng.rand(100, 10) + y = rng.randint(3, size=(100,)) + c1 = LinearDiscriminantAnalysis( + store_covariance=True, shrinkage="auto", solver="lsqr" + ) + c2 = LinearDiscriminantAnalysis( + store_covariance=True, + covariance_estimator=StandardizedLedoitWolf(), + solver="lsqr", + ) + c1.fit(X, y) + c2.fit(X, y) + assert_allclose(c1.means_, c2.means_) + assert_allclose(c1.covariance_, c2.covariance_) + + +@pytest.mark.parametrize("n_features", [3, 5]) +@pytest.mark.parametrize("n_classes", [5, 3]) +def test_lda_dimension_warning(n_classes, n_features): + rng = check_random_state(0) + n_samples = 10 + X = rng.randn(n_samples, n_features) + # we create n_classes labels by repeating and truncating a + # range(n_classes) until n_samples + y = np.tile(range(n_classes), n_samples // n_classes + 1)[:n_samples] + max_components = min(n_features, n_classes - 1) + + for n_components in [max_components - 1, None, max_components]: + # if n_components <= min(n_classes - 1, n_features), no warning + lda = LinearDiscriminantAnalysis(n_components=n_components) + lda.fit(X, y) + + for n_components in [max_components + 1, max(n_features, n_classes - 1) + 1]: + # if n_components > min(n_classes - 1, n_features), raise error. + # We test one unit higher than max_components, and then something + # larger than both n_features and n_classes - 1 to ensure the test + # works for any value of n_component + lda = LinearDiscriminantAnalysis(n_components=n_components) + msg = "n_components cannot be larger than " + with pytest.raises(ValueError, match=msg): + lda.fit(X, y) + + +@pytest.mark.parametrize( + "data_type, expected_type", + [ + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ], +) +def test_lda_dtype_match(data_type, expected_type): + for solver, shrinkage in solver_shrinkage: + clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage) + clf.fit(X.astype(data_type), y.astype(data_type)) + assert clf.coef_.dtype == expected_type + + +def test_lda_numeric_consistency_float32_float64(): + for solver, shrinkage in solver_shrinkage: + clf_32 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage) + clf_32.fit(X.astype(np.float32), y.astype(np.float32)) + clf_64 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage) + clf_64.fit(X.astype(np.float64), y.astype(np.float64)) + + # Check value consistency between types + rtol = 1e-6 + assert_allclose(clf_32.coef_, clf_64.coef_, rtol=rtol) + + +def test_qda(): + # QDA classification. + # This checks that QDA implements fit and predict and returns + # correct values for a simple toy dataset. + clf = QuadraticDiscriminantAnalysis() + y_pred = clf.fit(X6, y6).predict(X6) + assert_array_equal(y_pred, y6) + + # Assure that it works with 1D data + y_pred1 = clf.fit(X7, y6).predict(X7) + assert_array_equal(y_pred1, y6) + + # Test probas estimates + y_proba_pred1 = clf.predict_proba(X7) + assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6) + y_log_proba_pred1 = clf.predict_log_proba(X7) + assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8) + + y_pred3 = clf.fit(X6, y7).predict(X6) + # QDA shouldn't be able to separate those + assert np.any(y_pred3 != y7) + + # Classes should have at least 2 elements + with pytest.raises(ValueError): + clf.fit(X6, y4) + + +def test_qda_priors(): + clf = QuadraticDiscriminantAnalysis() + y_pred = clf.fit(X6, y6).predict(X6) + n_pos = np.sum(y_pred == 2) + + neg = 1e-10 + clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg])) + y_pred = clf.fit(X6, y6).predict(X6) + n_pos2 = np.sum(y_pred == 2) + + assert n_pos2 > n_pos + + +@pytest.mark.parametrize("priors_type", ["list", "tuple", "array"]) +def test_qda_prior_type(priors_type): + """Check that priors accept array-like.""" + priors = [0.5, 0.5] + clf = QuadraticDiscriminantAnalysis( + priors=_convert_container([0.5, 0.5], priors_type) + ).fit(X6, y6) + assert isinstance(clf.priors_, np.ndarray) + assert_array_equal(clf.priors_, priors) + + +def test_qda_prior_copy(): + """Check that altering `priors` without `fit` doesn't change `priors_`""" + priors = np.array([0.5, 0.5]) + qda = QuadraticDiscriminantAnalysis(priors=priors).fit(X, y) + + # we expect the following + assert_array_equal(qda.priors_, qda.priors) + + # altering `priors` without `fit` should not change `priors_` + priors[0] = 0.2 + assert qda.priors_[0] != qda.priors[0] + + +def test_qda_store_covariance(): + # The default is to not set the covariances_ attribute + clf = QuadraticDiscriminantAnalysis().fit(X6, y6) + assert not hasattr(clf, "covariance_") + + # Test the actual attribute: + clf = QuadraticDiscriminantAnalysis(store_covariance=True).fit(X6, y6) + assert hasattr(clf, "covariance_") + + assert_array_almost_equal(clf.covariance_[0], np.array([[0.7, 0.45], [0.45, 0.7]])) + + assert_array_almost_equal( + clf.covariance_[1], + np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]]), + ) + + +@pytest.mark.xfail( + _IS_WASM, + reason=( + "no floating point exceptions, see" + " https://github.com/numpy/numpy/pull/21895#issuecomment-1311525881" + ), +) +def test_qda_regularization(): + # The default is reg_param=0. and will cause issues when there is a + # constant variable. + + # Fitting on data with constant variable triggers an UserWarning. + collinear_msg = "Variables are collinear" + clf = QuadraticDiscriminantAnalysis() + with pytest.warns(UserWarning, match=collinear_msg): + y_pred = clf.fit(X2, y6) + + # XXX: RuntimeWarning is also raised at predict time because of divisions + # by zero when the model is fit with a constant feature and without + # regularization: should this be considered a bug? Either by the fit-time + # message more informative, raising and exception instead of a warning in + # this case or somehow changing predict to avoid division by zero. + with pytest.warns(RuntimeWarning, match="divide by zero"): + y_pred = clf.predict(X2) + assert np.any(y_pred != y6) + + # Adding a little regularization fixes the division by zero at predict + # time. But UserWarning will persist at fit time. + clf = QuadraticDiscriminantAnalysis(reg_param=0.01) + with pytest.warns(UserWarning, match=collinear_msg): + clf.fit(X2, y6) + y_pred = clf.predict(X2) + assert_array_equal(y_pred, y6) + + # UserWarning should also be there for the n_samples_in_a_class < + # n_features case. + clf = QuadraticDiscriminantAnalysis(reg_param=0.1) + with pytest.warns(UserWarning, match=collinear_msg): + clf.fit(X5, y5) + y_pred5 = clf.predict(X5) + assert_array_equal(y_pred5, y5) + + +def test_covariance(): + x, y = make_blobs(n_samples=100, n_features=5, centers=1, random_state=42) + + # make features correlated + x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1])) + + c_e = _cov(x, "empirical") + assert_almost_equal(c_e, c_e.T) + + c_s = _cov(x, "auto") + assert_almost_equal(c_s, c_s.T) + + +@pytest.mark.parametrize("solver", ["svd", "lsqr", "eigen"]) +def test_raises_value_error_on_same_number_of_classes_and_samples(solver): + """ + Tests that if the number of samples equals the number + of classes, a ValueError is raised. + """ + X = np.array([[0.5, 0.6], [0.6, 0.5]]) + y = np.array(["a", "b"]) + clf = LinearDiscriminantAnalysis(solver=solver) + with pytest.raises(ValueError, match="The number of samples must be more"): + clf.fit(X, y) + + +def test_get_feature_names_out(): + """Check get_feature_names_out uses class name as prefix.""" + + est = LinearDiscriminantAnalysis().fit(X, y) + names_out = est.get_feature_names_out() + + class_name_lower = "LinearDiscriminantAnalysis".lower() + expected_names_out = np.array( + [ + f"{class_name_lower}{i}" + for i in range(est.explained_variance_ratio_.shape[0]) + ], + dtype=object, + ) + assert_array_equal(names_out, expected_names_out) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_init.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_init.py new file mode 100644 index 0000000000000000000000000000000000000000..331b9b7429cbbfd91247b76079f2dcf72a2d3884 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_init.py @@ -0,0 +1,20 @@ +# Basic unittests to test functioning of module's top-level + + +__author__ = "Yaroslav Halchenko" +__license__ = "BSD" + + +try: + from sklearn import * # noqa + + _top_import_error = None +except Exception as e: + _top_import_error = e + + +def test_import_skl(): + # Test either above import has failed for some reason + # "import *" is discouraged outside of the module level, hence we + # rely on setting up the variable above + assert _top_import_error is None diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_kernel_ridge.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_kernel_ridge.py new file mode 100644 index 0000000000000000000000000000000000000000..431d326a82269a72037a6b8eb9d58ccff6db9c36 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_kernel_ridge.py @@ -0,0 +1,80 @@ +import numpy as np +import pytest + +from sklearn.datasets import make_regression +from sklearn.kernel_ridge import KernelRidge +from sklearn.linear_model import Ridge +from sklearn.metrics.pairwise import pairwise_kernels +from sklearn.utils._testing import assert_array_almost_equal, ignore_warnings +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +X, y = make_regression(n_features=10, random_state=0) +Y = np.array([y, y]).T + + +def test_kernel_ridge(): + pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X) + pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X) + assert_array_almost_equal(pred, pred2) + + +@pytest.mark.parametrize("sparse_container", [*CSR_CONTAINERS, *CSC_CONTAINERS]) +def test_kernel_ridge_sparse(sparse_container): + X_sparse = sparse_container(X) + pred = ( + Ridge(alpha=1, fit_intercept=False, solver="cholesky") + .fit(X_sparse, y) + .predict(X_sparse) + ) + pred2 = KernelRidge(kernel="linear", alpha=1).fit(X_sparse, y).predict(X_sparse) + assert_array_almost_equal(pred, pred2) + + +def test_kernel_ridge_singular_kernel(): + # alpha=0 causes a LinAlgError in computing the dual coefficients, + # which causes a fallback to a lstsq solver. This is tested here. + pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X) + kr = KernelRidge(kernel="linear", alpha=0) + ignore_warnings(kr.fit)(X, y) + pred2 = kr.predict(X) + assert_array_almost_equal(pred, pred2) + + +def test_kernel_ridge_precomputed(): + for kernel in ["linear", "rbf", "poly", "cosine"]: + K = pairwise_kernels(X, X, metric=kernel) + pred = KernelRidge(kernel=kernel).fit(X, y).predict(X) + pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K) + assert_array_almost_equal(pred, pred2) + + +def test_kernel_ridge_precomputed_kernel_unchanged(): + K = np.dot(X, X.T) + K2 = K.copy() + KernelRidge(kernel="precomputed").fit(K, y) + assert_array_almost_equal(K, K2) + + +def test_kernel_ridge_sample_weights(): + K = np.dot(X, X.T) # precomputed kernel + sw = np.random.RandomState(0).rand(X.shape[0]) + + pred = Ridge(alpha=1, fit_intercept=False).fit(X, y, sample_weight=sw).predict(X) + pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y, sample_weight=sw).predict(X) + pred3 = ( + KernelRidge(kernel="precomputed", alpha=1) + .fit(K, y, sample_weight=sw) + .predict(K) + ) + assert_array_almost_equal(pred, pred2) + assert_array_almost_equal(pred, pred3) + + +def test_kernel_ridge_multi_output(): + pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X) + pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X) + assert_array_almost_equal(pred, pred2) + + pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X) + pred3 = np.array([pred3, pred3]).T + assert_array_almost_equal(pred2, pred3) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_metaestimators.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_metaestimators.py new file mode 100644 index 0000000000000000000000000000000000000000..b3c6820faefc26cae28cfd39b780e202878b5c31 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_metaestimators.py @@ -0,0 +1,306 @@ +"""Common tests for metaestimators""" +import functools +from inspect import signature + +import numpy as np +import pytest + +from sklearn.base import BaseEstimator, is_regressor +from sklearn.datasets import make_classification +from sklearn.ensemble import BaggingClassifier +from sklearn.exceptions import NotFittedError +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.feature_selection import RFE, RFECV +from sklearn.linear_model import LogisticRegression, Ridge +from sklearn.model_selection import GridSearchCV, RandomizedSearchCV +from sklearn.pipeline import Pipeline, make_pipeline +from sklearn.preprocessing import MaxAbsScaler, StandardScaler +from sklearn.semi_supervised import SelfTrainingClassifier +from sklearn.utils import all_estimators +from sklearn.utils._testing import set_random_state +from sklearn.utils.estimator_checks import ( + _enforce_estimator_tags_X, + _enforce_estimator_tags_y, +) +from sklearn.utils.validation import check_is_fitted + + +class DelegatorData: + def __init__( + self, + name, + construct, + skip_methods=(), + fit_args=make_classification(random_state=0), + ): + self.name = name + self.construct = construct + self.fit_args = fit_args + self.skip_methods = skip_methods + + +DELEGATING_METAESTIMATORS = [ + DelegatorData("Pipeline", lambda est: Pipeline([("est", est)])), + DelegatorData( + "GridSearchCV", + lambda est: GridSearchCV(est, param_grid={"param": [5]}, cv=2), + skip_methods=["score"], + ), + DelegatorData( + "RandomizedSearchCV", + lambda est: RandomizedSearchCV( + est, param_distributions={"param": [5]}, cv=2, n_iter=1 + ), + skip_methods=["score"], + ), + DelegatorData("RFE", RFE, skip_methods=["transform", "inverse_transform"]), + DelegatorData("RFECV", RFECV, skip_methods=["transform", "inverse_transform"]), + DelegatorData( + "BaggingClassifier", + BaggingClassifier, + skip_methods=[ + "transform", + "inverse_transform", + "score", + "predict_proba", + "predict_log_proba", + "predict", + ], + ), + DelegatorData( + "SelfTrainingClassifier", + lambda est: SelfTrainingClassifier(est), + skip_methods=["transform", "inverse_transform", "predict_proba"], + ), +] + + +def test_metaestimator_delegation(): + # Ensures specified metaestimators have methods iff subestimator does + def hides(method): + @property + def wrapper(obj): + if obj.hidden_method == method.__name__: + raise AttributeError("%r is hidden" % obj.hidden_method) + return functools.partial(method, obj) + + return wrapper + + class SubEstimator(BaseEstimator): + def __init__(self, param=1, hidden_method=None): + self.param = param + self.hidden_method = hidden_method + + def fit(self, X, y=None, *args, **kwargs): + self.coef_ = np.arange(X.shape[1]) + self.classes_ = [] + return True + + def _check_fit(self): + check_is_fitted(self) + + @hides + def inverse_transform(self, X, *args, **kwargs): + self._check_fit() + return X + + @hides + def transform(self, X, *args, **kwargs): + self._check_fit() + return X + + @hides + def predict(self, X, *args, **kwargs): + self._check_fit() + return np.ones(X.shape[0]) + + @hides + def predict_proba(self, X, *args, **kwargs): + self._check_fit() + return np.ones(X.shape[0]) + + @hides + def predict_log_proba(self, X, *args, **kwargs): + self._check_fit() + return np.ones(X.shape[0]) + + @hides + def decision_function(self, X, *args, **kwargs): + self._check_fit() + return np.ones(X.shape[0]) + + @hides + def score(self, X, y, *args, **kwargs): + self._check_fit() + return 1.0 + + methods = [ + k + for k in SubEstimator.__dict__.keys() + if not k.startswith("_") and not k.startswith("fit") + ] + methods.sort() + + for delegator_data in DELEGATING_METAESTIMATORS: + delegate = SubEstimator() + delegator = delegator_data.construct(delegate) + for method in methods: + if method in delegator_data.skip_methods: + continue + assert hasattr(delegate, method) + assert hasattr( + delegator, method + ), "%s does not have method %r when its delegate does" % ( + delegator_data.name, + method, + ) + # delegation before fit raises a NotFittedError + if method == "score": + with pytest.raises(NotFittedError): + getattr(delegator, method)( + delegator_data.fit_args[0], delegator_data.fit_args[1] + ) + else: + with pytest.raises(NotFittedError): + getattr(delegator, method)(delegator_data.fit_args[0]) + + delegator.fit(*delegator_data.fit_args) + for method in methods: + if method in delegator_data.skip_methods: + continue + # smoke test delegation + if method == "score": + getattr(delegator, method)( + delegator_data.fit_args[0], delegator_data.fit_args[1] + ) + else: + getattr(delegator, method)(delegator_data.fit_args[0]) + + for method in methods: + if method in delegator_data.skip_methods: + continue + delegate = SubEstimator(hidden_method=method) + delegator = delegator_data.construct(delegate) + assert not hasattr(delegate, method) + assert not hasattr( + delegator, method + ), "%s has method %r when its delegate does not" % ( + delegator_data.name, + method, + ) + + +def _generate_meta_estimator_instances_with_pipeline(): + """Generate instances of meta-estimators fed with a pipeline + + Are considered meta-estimators all estimators accepting one of "estimator", + "base_estimator" or "estimators". + """ + for _, Estimator in sorted(all_estimators()): + sig = set(signature(Estimator).parameters) + + if "estimator" in sig or "base_estimator" in sig or "regressor" in sig: + if is_regressor(Estimator): + estimator = make_pipeline(TfidfVectorizer(), Ridge()) + param_grid = {"ridge__alpha": [0.1, 1.0]} + else: + estimator = make_pipeline(TfidfVectorizer(), LogisticRegression()) + param_grid = {"logisticregression__C": [0.1, 1.0]} + + if "param_grid" in sig or "param_distributions" in sig: + # SearchCV estimators + extra_params = {"n_iter": 2} if "n_iter" in sig else {} + yield Estimator(estimator, param_grid, **extra_params) + else: + yield Estimator(estimator) + + elif "transformer_list" in sig: + # FeatureUnion + transformer_list = [ + ("trans1", make_pipeline(TfidfVectorizer(), MaxAbsScaler())), + ( + "trans2", + make_pipeline(TfidfVectorizer(), StandardScaler(with_mean=False)), + ), + ] + yield Estimator(transformer_list) + + elif "estimators" in sig: + # stacking, voting + if is_regressor(Estimator): + estimator = [ + ("est1", make_pipeline(TfidfVectorizer(), Ridge(alpha=0.1))), + ("est2", make_pipeline(TfidfVectorizer(), Ridge(alpha=1))), + ] + else: + estimator = [ + ( + "est1", + make_pipeline(TfidfVectorizer(), LogisticRegression(C=0.1)), + ), + ("est2", make_pipeline(TfidfVectorizer(), LogisticRegression(C=1))), + ] + yield Estimator(estimator) + + else: + continue + + +# TODO: remove data validation for the following estimators +# They should be able to work on any data and delegate data validation to +# their inner estimator(s). +DATA_VALIDATION_META_ESTIMATORS_TO_IGNORE = [ + "AdaBoostClassifier", + "AdaBoostRegressor", + "BaggingClassifier", + "BaggingRegressor", + "ClassifierChain", # data validation is necessary + "IterativeImputer", + "OneVsOneClassifier", # input validation can't be avoided + "RANSACRegressor", + "RFE", + "RFECV", + "RegressorChain", # data validation is necessary + "SelfTrainingClassifier", + "SequentialFeatureSelector", # not applicable (2D data mandatory) +] + +DATA_VALIDATION_META_ESTIMATORS = [ + est + for est in _generate_meta_estimator_instances_with_pipeline() + if est.__class__.__name__ not in DATA_VALIDATION_META_ESTIMATORS_TO_IGNORE +] + + +def _get_meta_estimator_id(estimator): + return estimator.__class__.__name__ + + +@pytest.mark.parametrize( + "estimator", DATA_VALIDATION_META_ESTIMATORS, ids=_get_meta_estimator_id +) +def test_meta_estimators_delegate_data_validation(estimator): + # Check that meta-estimators delegate data validation to the inner + # estimator(s). + rng = np.random.RandomState(0) + set_random_state(estimator) + + n_samples = 30 + X = rng.choice(np.array(["aa", "bb", "cc"], dtype=object), size=n_samples) + + if is_regressor(estimator): + y = rng.normal(size=n_samples) + else: + y = rng.randint(3, size=n_samples) + + # We convert to lists to make sure it works on array-like + X = _enforce_estimator_tags_X(estimator, X).tolist() + y = _enforce_estimator_tags_y(estimator, y).tolist() + + # Calling fit should not raise any data validation exception since X is a + # valid input datastructure for the first step of the pipeline passed as + # base estimator to the meta estimator. + estimator.fit(X, y) + + # n_features_in_ should not be defined since data is not tabular data. + assert not hasattr(estimator, "n_features_in_") diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_metaestimators_metadata_routing.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_metaestimators_metadata_routing.py new file mode 100644 index 0000000000000000000000000000000000000000..c3771a1c9ddba71ecfe0faa2897c0c1b3e7fb134 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_metaestimators_metadata_routing.py @@ -0,0 +1,654 @@ +import copy +import re + +import numpy as np +import pytest + +from sklearn import config_context +from sklearn.base import is_classifier +from sklearn.calibration import CalibratedClassifierCV +from sklearn.compose import TransformedTargetRegressor +from sklearn.covariance import GraphicalLassoCV +from sklearn.ensemble import ( + AdaBoostClassifier, + AdaBoostRegressor, + BaggingClassifier, + BaggingRegressor, + StackingClassifier, + StackingRegressor, + VotingClassifier, + VotingRegressor, +) +from sklearn.exceptions import UnsetMetadataPassedError +from sklearn.experimental import ( + enable_halving_search_cv, # noqa + enable_iterative_imputer, # noqa +) +from sklearn.feature_selection import ( + RFE, + RFECV, + SelectFromModel, + SequentialFeatureSelector, +) +from sklearn.impute import IterativeImputer +from sklearn.linear_model import ( + ElasticNetCV, + LarsCV, + LassoCV, + LassoLarsCV, + LogisticRegressionCV, + MultiTaskElasticNetCV, + MultiTaskLassoCV, + OrthogonalMatchingPursuitCV, + RANSACRegressor, + RidgeClassifierCV, + RidgeCV, +) +from sklearn.model_selection import ( + GridSearchCV, + HalvingGridSearchCV, + HalvingRandomSearchCV, + RandomizedSearchCV, +) +from sklearn.multiclass import ( + OneVsOneClassifier, + OneVsRestClassifier, + OutputCodeClassifier, +) +from sklearn.multioutput import ( + ClassifierChain, + MultiOutputClassifier, + MultiOutputRegressor, + RegressorChain, +) +from sklearn.pipeline import FeatureUnion +from sklearn.semi_supervised import SelfTrainingClassifier +from sklearn.tests.metadata_routing_common import ( + ConsumingClassifier, + ConsumingRegressor, + ConsumingScorer, + ConsumingSplitter, + NonConsumingClassifier, + NonConsumingRegressor, + _Registry, + assert_request_is_empty, + check_recorded_metadata, +) +from sklearn.utils.metadata_routing import MetadataRouter + +rng = np.random.RandomState(42) +N, M = 100, 4 +X = rng.rand(N, M) +y = rng.randint(0, 3, size=N) +classes = np.unique(y) +y_multi = rng.randint(0, 3, size=(N, 3)) +classes_multi = [np.unique(y_multi[:, i]) for i in range(y_multi.shape[1])] +metadata = rng.randint(0, 10, size=N) +sample_weight = rng.rand(N) +groups = np.array([0, 1] * (len(y) // 2)) + + +@pytest.fixture(autouse=True) +def enable_slep006(): + """Enable SLEP006 for all tests.""" + with config_context(enable_metadata_routing=True): + yield + + +METAESTIMATORS: list = [ + { + "metaestimator": MultiOutputRegressor, + "estimator_name": "estimator", + "estimator": "regressor", + "X": X, + "y": y_multi, + "estimator_routing_methods": ["fit", "partial_fit"], + }, + { + "metaestimator": MultiOutputClassifier, + "estimator_name": "estimator", + "estimator": "classifier", + "X": X, + "y": y_multi, + "estimator_routing_methods": ["fit", "partial_fit"], + "method_args": {"partial_fit": {"classes": classes_multi}}, + }, + { + "metaestimator": CalibratedClassifierCV, + "estimator_name": "estimator", + "estimator": "classifier", + "X": X, + "y": y, + "estimator_routing_methods": ["fit"], + "preserves_metadata": False, + }, + { + "metaestimator": ClassifierChain, + "estimator_name": "base_estimator", + "estimator": "classifier", + "X": X, + "y": y_multi, + "estimator_routing_methods": ["fit"], + }, + { + "metaestimator": RegressorChain, + "estimator_name": "base_estimator", + "estimator": "regressor", + "X": X, + "y": y_multi, + "estimator_routing_methods": ["fit"], + }, + { + "metaestimator": LogisticRegressionCV, + "X": X, + "y": y, + "scorer_name": "scoring", + "scorer_routing_methods": ["fit", "score"], + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": GridSearchCV, + "estimator_name": "estimator", + "estimator": "classifier", + "init_args": {"param_grid": {"alpha": [0.1, 0.2]}}, + "X": X, + "y": y, + "estimator_routing_methods": ["fit"], + "preserves_metadata": "subset", + "scorer_name": "scoring", + "scorer_routing_methods": ["fit", "score"], + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": RandomizedSearchCV, + "estimator_name": "estimator", + "estimator": "classifier", + "init_args": {"param_distributions": {"alpha": [0.1, 0.2]}}, + "X": X, + "y": y, + "estimator_routing_methods": ["fit"], + "preserves_metadata": "subset", + "scorer_name": "scoring", + "scorer_routing_methods": ["fit", "score"], + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": HalvingGridSearchCV, + "estimator_name": "estimator", + "estimator": "classifier", + "init_args": {"param_grid": {"alpha": [0.1, 0.2]}}, + "X": X, + "y": y, + "estimator_routing_methods": ["fit"], + "preserves_metadata": "subset", + "scorer_name": "scoring", + "scorer_routing_methods": ["fit", "score"], + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": HalvingRandomSearchCV, + "estimator_name": "estimator", + "estimator": "classifier", + "init_args": {"param_distributions": {"alpha": [0.1, 0.2]}}, + "X": X, + "y": y, + "estimator_routing_methods": ["fit"], + "preserves_metadata": "subset", + "scorer_name": "scoring", + "scorer_routing_methods": ["fit", "score"], + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": OneVsRestClassifier, + "estimator_name": "estimator", + "estimator": "classifier", + "X": X, + "y": y, + "estimator_routing_methods": ["fit", "partial_fit"], + "method_args": {"partial_fit": {"classes": classes}}, + }, + { + "metaestimator": OneVsOneClassifier, + "estimator_name": "estimator", + "estimator": "classifier", + "X": X, + "y": y, + "estimator_routing_methods": ["fit", "partial_fit"], + "preserves_metadata": "subset", + "method_args": {"partial_fit": {"classes": classes}}, + }, + { + "metaestimator": OutputCodeClassifier, + "estimator_name": "estimator", + "estimator": "classifier", + "init_args": {"random_state": 42}, + "X": X, + "y": y, + "estimator_routing_methods": ["fit"], + }, + { + "metaestimator": SelectFromModel, + "estimator_name": "estimator", + "estimator": "classifier", + "X": X, + "y": y, + "estimator_routing_methods": ["fit", "partial_fit"], + "method_args": {"partial_fit": {"classes": classes}}, + }, + { + "metaestimator": OrthogonalMatchingPursuitCV, + "X": X, + "y": y, + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": ElasticNetCV, + "X": X, + "y": y, + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": LassoCV, + "X": X, + "y": y, + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": MultiTaskElasticNetCV, + "X": X, + "y": y_multi, + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": MultiTaskLassoCV, + "X": X, + "y": y_multi, + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": LarsCV, + "X": X, + "y": y, + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, + { + "metaestimator": LassoLarsCV, + "X": X, + "y": y, + "cv_name": "cv", + "cv_routing_methods": ["fit"], + }, +] +"""List containing all metaestimators to be tested and their settings + +The keys are as follows: + +- metaestimator: The metaestmator to be tested +- estimator_name: The name of the argument for the sub-estimator +- estimator: The sub-estimator type, either "regressor" or "classifier" +- init_args: The arguments to be passed to the metaestimator's constructor +- X: X-data to fit and predict +- y: y-data to fit +- estimator_routing_methods: list of all methods to check for routing metadata + to the sub-estimator +- preserves_metadata: + - True (default): the metaestimator passes the metadata to the + sub-estimator without modification. We check that the values recorded by + the sub-estimator are identical to what we've passed to the + metaestimator. + - False: no check is performed regarding values, we only check that a + metadata with the expected names/keys are passed. + - "subset": we check that the recorded metadata by the sub-estimator is a + subset of what is passed to the metaestimator. +- scorer_name: The name of the argument for the scorer +- scorer_routing_methods: list of all methods to check for routing metadata + to the scorer +- cv_name: The name of the argument for the CV splitter +- cv_routing_methods: list of all methods to check for routing metadata + to the splitter +- method_args: a dict of dicts, defining extra arguments needed to be passed to + methods, such as passing `classes` to `partial_fit`. +""" + +# IDs used by pytest to get meaningful verbose messages when running the tests +METAESTIMATOR_IDS = [str(row["metaestimator"].__name__) for row in METAESTIMATORS] + +UNSUPPORTED_ESTIMATORS = [ + AdaBoostClassifier(), + AdaBoostRegressor(), + BaggingClassifier(), + BaggingRegressor(), + FeatureUnion([]), + GraphicalLassoCV(), + IterativeImputer(), + RANSACRegressor(), + RFE(ConsumingClassifier()), + RFECV(ConsumingClassifier()), + RidgeCV(), + RidgeClassifierCV(), + SelfTrainingClassifier(ConsumingClassifier()), + SequentialFeatureSelector(ConsumingClassifier()), + StackingClassifier(ConsumingClassifier()), + StackingRegressor(ConsumingRegressor()), + TransformedTargetRegressor(), + VotingClassifier(ConsumingClassifier()), + VotingRegressor(ConsumingRegressor()), +] + + +def get_init_args(metaestimator_info, sub_estimator_consumes): + """Get the init args for a metaestimator + + This is a helper function to get the init args for a metaestimator from + the METAESTIMATORS list. It returns an empty dict if no init args are + required. + + Parameters + ---------- + metaestimator_info : dict + The metaestimator info from METAESTIMATORS + + sub_estimator_consumes : bool + Whether the sub-estimator consumes metadata or not. + + Returns + ------- + kwargs : dict + The init args for the metaestimator. + + (estimator, estimator_registry) : (estimator, registry) + The sub-estimator and the corresponding registry. + + (scorer, scorer_registry) : (scorer, registry) + The scorer and the corresponding registry. + + (cv, cv_registry) : (CV splitter, registry) + The CV splitter and the corresponding registry. + """ + kwargs = metaestimator_info.get("init_args", {}) + estimator, estimator_registry = None, None + scorer, scorer_registry = None, None + cv, cv_registry = None, None + if "estimator" in metaestimator_info: + estimator_name = metaestimator_info["estimator_name"] + estimator_registry = _Registry() + sub_estimator_type = metaestimator_info["estimator"] + if sub_estimator_consumes: + if sub_estimator_type == "regressor": + estimator = ConsumingRegressor(estimator_registry) + else: + estimator = ConsumingClassifier(estimator_registry) + else: + if sub_estimator_type == "regressor": + estimator = NonConsumingRegressor() + else: + estimator = NonConsumingClassifier() + kwargs[estimator_name] = estimator + if "scorer_name" in metaestimator_info: + scorer_name = metaestimator_info["scorer_name"] + scorer_registry = _Registry() + scorer = ConsumingScorer(registry=scorer_registry) + kwargs[scorer_name] = scorer + if "cv_name" in metaestimator_info: + cv_name = metaestimator_info["cv_name"] + cv_registry = _Registry() + cv = ConsumingSplitter(registry=cv_registry) + kwargs[cv_name] = cv + + return ( + kwargs, + (estimator, estimator_registry), + (scorer, scorer_registry), + (cv, cv_registry), + ) + + +@pytest.mark.parametrize("estimator", UNSUPPORTED_ESTIMATORS) +def test_unsupported_estimators_get_metadata_routing(estimator): + """Test that get_metadata_routing is not implemented on meta-estimators for + which we haven't implemented routing yet.""" + with pytest.raises(NotImplementedError): + estimator.get_metadata_routing() + + +@pytest.mark.parametrize("estimator", UNSUPPORTED_ESTIMATORS) +def test_unsupported_estimators_fit_with_metadata(estimator): + """Test that fit raises NotImplementedError when metadata routing is + enabled and a metadata is passed on meta-estimators for which we haven't + implemented routing yet.""" + with pytest.raises(NotImplementedError): + try: + estimator.fit([[1]], [1], sample_weight=[1]) + except TypeError: + # not all meta-estimators in the list support sample_weight, + # and for those we skip this test. + raise NotImplementedError + + +def test_registry_copy(): + # test that _Registry is not copied into a new instance. + a = _Registry() + b = _Registry() + assert a is not b + assert a is copy.copy(a) + assert a is copy.deepcopy(a) + + +@pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) +def test_default_request(metaestimator): + # Check that by default request is empty and the right type + cls = metaestimator["metaestimator"] + kwargs, *_ = get_init_args(metaestimator, sub_estimator_consumes=True) + instance = cls(**kwargs) + if "cv_name" in metaestimator: + # Our GroupCV splitters request groups by default, which we should + # ignore in this test. + exclude = {"splitter": ["split"]} + else: + exclude = None + assert_request_is_empty(instance.get_metadata_routing(), exclude=exclude) + assert isinstance(instance.get_metadata_routing(), MetadataRouter) + + +@pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) +def test_error_on_missing_requests_for_sub_estimator(metaestimator): + # Test that a UnsetMetadataPassedError is raised when the sub-estimator's + # requests are not set + if "estimator" not in metaestimator: + # This test only makes sense for metaestimators which have a + # sub-estimator, e.g. MyMetaEstimator(estimator=MySubEstimator()) + return + + cls = metaestimator["metaestimator"] + X = metaestimator["X"] + y = metaestimator["y"] + routing_methods = metaestimator["estimator_routing_methods"] + + for method_name in routing_methods: + for key in ["sample_weight", "metadata"]: + kwargs, (estimator, _), (scorer, _), *_ = get_init_args( + metaestimator, sub_estimator_consumes=True + ) + if scorer: + scorer.set_score_request(**{key: True}) + val = {"sample_weight": sample_weight, "metadata": metadata}[key] + method_kwargs = {key: val} + msg = ( + f"[{key}] are passed but are not explicitly set as requested or not" + f" for {estimator.__class__.__name__}.{method_name}" + ) + + instance = cls(**kwargs) + with pytest.raises(UnsetMetadataPassedError, match=re.escape(msg)): + method = getattr(instance, method_name) + method(X, y, **method_kwargs) + + +@pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) +def test_setting_request_on_sub_estimator_removes_error(metaestimator): + # When the metadata is explicitly requested on the sub-estimator, there + # should be no errors. + if "estimator" not in metaestimator: + # This test only makes sense for metaestimators which have a + # sub-estimator, e.g. MyMetaEstimator(estimator=MySubEstimator()) + return + + def set_request(estimator, method_name): + # e.g. call set_fit_request on estimator + set_request_for_method = getattr(estimator, f"set_{method_name}_request") + set_request_for_method(sample_weight=True, metadata=True) + if is_classifier(estimator) and method_name == "partial_fit": + set_request_for_method(classes=True) + + cls = metaestimator["metaestimator"] + X = metaestimator["X"] + y = metaestimator["y"] + routing_methods = metaestimator["estimator_routing_methods"] + preserves_metadata = metaestimator.get("preserves_metadata", True) + + for method_name in routing_methods: + for key in ["sample_weight", "metadata"]: + val = {"sample_weight": sample_weight, "metadata": metadata}[key] + method_kwargs = {key: val} + + kwargs, (estimator, registry), (scorer, _), (cv, _) = get_init_args( + metaestimator, sub_estimator_consumes=True + ) + if scorer: + set_request(scorer, "score") + if cv: + cv.set_split_request(groups=True, metadata=True) + set_request(estimator, method_name) + instance = cls(**kwargs) + method = getattr(instance, method_name) + extra_method_args = metaestimator.get("method_args", {}).get( + method_name, {} + ) + method(X, y, **method_kwargs, **extra_method_args) + # sanity check that registry is not empty, or else the test passes + # trivially + assert registry + if preserves_metadata is True: + for estimator in registry: + check_recorded_metadata(estimator, method_name, **method_kwargs) + elif preserves_metadata == "subset": + for estimator in registry: + check_recorded_metadata( + estimator, + method_name, + split_params=method_kwargs.keys(), + **method_kwargs, + ) + + +@pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) +def test_non_consuming_estimator_works(metaestimator): + # Test that when a non-consuming estimator is given, the meta-estimator + # works w/o setting any requests. + # Regression test for https://github.com/scikit-learn/scikit-learn/issues/28239 + if "estimator" not in metaestimator: + # This test only makes sense for metaestimators which have a + # sub-estimator, e.g. MyMetaEstimator(estimator=MySubEstimator()) + return + + def set_request(estimator, method_name): + # e.g. call set_fit_request on estimator + if is_classifier(estimator) and method_name == "partial_fit": + estimator.set_partial_fit_request(classes=True) + + cls = metaestimator["metaestimator"] + X = metaestimator["X"] + y = metaestimator["y"] + routing_methods = metaestimator["estimator_routing_methods"] + + for method_name in routing_methods: + kwargs, (estimator, _), (_, _), (_, _) = get_init_args( + metaestimator, sub_estimator_consumes=False + ) + instance = cls(**kwargs) + set_request(estimator, method_name) + method = getattr(instance, method_name) + extra_method_args = metaestimator.get("method_args", {}).get(method_name, {}) + # This following line should pass w/o raising a routing error. + method(X, y, **extra_method_args) + + +@pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) +def test_metadata_is_routed_correctly_to_scorer(metaestimator): + """Test that any requested metadata is correctly routed to the underlying + scorers in CV estimators. + """ + if "scorer_name" not in metaestimator: + # This test only makes sense for CV estimators + return + + cls = metaestimator["metaestimator"] + routing_methods = metaestimator["scorer_routing_methods"] + + for method_name in routing_methods: + kwargs, (estimator, _), (scorer, registry), (cv, _) = get_init_args( + metaestimator, sub_estimator_consumes=True + ) + if estimator: + estimator.set_fit_request(sample_weight=True, metadata=True) + scorer.set_score_request(sample_weight=True) + if cv: + cv.set_split_request(groups=True, metadata=True) + instance = cls(**kwargs) + method = getattr(instance, method_name) + method_kwargs = {"sample_weight": sample_weight} + if "fit" not in method_name: + instance.fit(X, y) + method(X, y, **method_kwargs) + + assert registry + for _scorer in registry: + check_recorded_metadata( + obj=_scorer, + method="score", + split_params=("sample_weight",), + **method_kwargs, + ) + + +@pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) +def test_metadata_is_routed_correctly_to_splitter(metaestimator): + """Test that any requested metadata is correctly routed to the underlying + splitters in CV estimators. + """ + if "cv_routing_methods" not in metaestimator: + # This test is only for metaestimators accepting a CV splitter + return + + cls = metaestimator["metaestimator"] + routing_methods = metaestimator["cv_routing_methods"] + X_ = metaestimator["X"] + y_ = metaestimator["y"] + + for method_name in routing_methods: + kwargs, (estimator, _), (scorer, _), (cv, registry) = get_init_args( + metaestimator, sub_estimator_consumes=True + ) + if estimator: + estimator.set_fit_request(sample_weight=False, metadata=False) + if scorer: + scorer.set_score_request(sample_weight=False, metadata=False) + cv.set_split_request(groups=True, metadata=True) + instance = cls(**kwargs) + method_kwargs = {"groups": groups, "metadata": metadata} + method = getattr(instance, method_name) + method(X_, y_, **method_kwargs) + assert registry + for _splitter in registry: + check_recorded_metadata(obj=_splitter, method="split", **method_kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_multioutput.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_multioutput.py new file mode 100644 index 0000000000000000000000000000000000000000..c42938229d5a6831effa2187370ed0d502bff7fa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_multioutput.py @@ -0,0 +1,836 @@ +import re + +import numpy as np +import pytest +from joblib import cpu_count + +from sklearn import datasets +from sklearn.base import ClassifierMixin, clone +from sklearn.datasets import ( + load_linnerud, + make_classification, + make_multilabel_classification, + make_regression, +) +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import ( + GradientBoostingRegressor, + RandomForestClassifier, + StackingRegressor, +) +from sklearn.exceptions import NotFittedError +from sklearn.impute import SimpleImputer +from sklearn.linear_model import ( + Lasso, + LinearRegression, + LogisticRegression, + OrthogonalMatchingPursuit, + PassiveAggressiveClassifier, + Ridge, + SGDClassifier, + SGDRegressor, +) +from sklearn.metrics import jaccard_score, mean_squared_error +from sklearn.model_selection import GridSearchCV, train_test_split +from sklearn.multiclass import OneVsRestClassifier +from sklearn.multioutput import ( + ClassifierChain, + MultiOutputClassifier, + MultiOutputRegressor, + RegressorChain, +) +from sklearn.pipeline import make_pipeline +from sklearn.svm import LinearSVC +from sklearn.tree import DecisionTreeClassifier +from sklearn.utils import shuffle +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import ( + BSR_CONTAINERS, + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + + +def test_multi_target_regression(): + X, y = datasets.make_regression(n_targets=3, random_state=0) + X_train, y_train = X[:50], y[:50] + X_test, y_test = X[50:], y[50:] + + references = np.zeros_like(y_test) + for n in range(3): + rgr = GradientBoostingRegressor(random_state=0) + rgr.fit(X_train, y_train[:, n]) + references[:, n] = rgr.predict(X_test) + + rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) + rgr.fit(X_train, y_train) + y_pred = rgr.predict(X_test) + + assert_almost_equal(references, y_pred) + + +def test_multi_target_regression_partial_fit(): + X, y = datasets.make_regression(n_targets=3, random_state=0) + X_train, y_train = X[:50], y[:50] + X_test, y_test = X[50:], y[50:] + + references = np.zeros_like(y_test) + half_index = 25 + for n in range(3): + sgr = SGDRegressor(random_state=0, max_iter=5) + sgr.partial_fit(X_train[:half_index], y_train[:half_index, n]) + sgr.partial_fit(X_train[half_index:], y_train[half_index:, n]) + references[:, n] = sgr.predict(X_test) + + sgr = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5)) + + sgr.partial_fit(X_train[:half_index], y_train[:half_index]) + sgr.partial_fit(X_train[half_index:], y_train[half_index:]) + + y_pred = sgr.predict(X_test) + assert_almost_equal(references, y_pred) + assert not hasattr(MultiOutputRegressor(Lasso), "partial_fit") + + +def test_multi_target_regression_one_target(): + # Test multi target regression raises + X, y = datasets.make_regression(n_targets=1, random_state=0) + rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) + msg = "at least two dimensions" + with pytest.raises(ValueError, match=msg): + rgr.fit(X, y) + + +@pytest.mark.parametrize( + "sparse_container", + CSR_CONTAINERS + + CSC_CONTAINERS + + COO_CONTAINERS + + LIL_CONTAINERS + + DOK_CONTAINERS + + BSR_CONTAINERS, +) +def test_multi_target_sparse_regression(sparse_container): + X, y = datasets.make_regression(n_targets=3, random_state=0) + X_train, y_train = X[:50], y[:50] + X_test = X[50:] + + rgr = MultiOutputRegressor(Lasso(random_state=0)) + rgr_sparse = MultiOutputRegressor(Lasso(random_state=0)) + + rgr.fit(X_train, y_train) + rgr_sparse.fit(sparse_container(X_train), y_train) + + assert_almost_equal( + rgr.predict(X_test), rgr_sparse.predict(sparse_container(X_test)) + ) + + +def test_multi_target_sample_weights_api(): + X = [[1, 2, 3], [4, 5, 6]] + y = [[3.141, 2.718], [2.718, 3.141]] + w = [0.8, 0.6] + + rgr = MultiOutputRegressor(OrthogonalMatchingPursuit()) + msg = "does not support sample weights" + with pytest.raises(ValueError, match=msg): + rgr.fit(X, y, w) + + # no exception should be raised if the base estimator supports weights + rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) + rgr.fit(X, y, w) + + +def test_multi_target_sample_weight_partial_fit(): + # weighted regressor + X = [[1, 2, 3], [4, 5, 6]] + y = [[3.141, 2.718], [2.718, 3.141]] + w = [2.0, 1.0] + rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5)) + rgr_w.partial_fit(X, y, w) + + # weighted with different weights + w = [2.0, 2.0] + rgr = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5)) + rgr.partial_fit(X, y, w) + + assert rgr.predict(X)[0][0] != rgr_w.predict(X)[0][0] + + +def test_multi_target_sample_weights(): + # weighted regressor + Xw = [[1, 2, 3], [4, 5, 6]] + yw = [[3.141, 2.718], [2.718, 3.141]] + w = [2.0, 1.0] + rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) + rgr_w.fit(Xw, yw, w) + + # unweighted, but with repeated samples + X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]] + y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]] + rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) + rgr.fit(X, y) + + X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]] + assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test)) + + +# Import the data +iris = datasets.load_iris() +# create a multiple targets by randomized shuffling and concatenating y. +X = iris.data +y1 = iris.target +y2 = shuffle(y1, random_state=1) +y3 = shuffle(y1, random_state=2) +y = np.column_stack((y1, y2, y3)) +n_samples, n_features = X.shape +n_outputs = y.shape[1] +n_classes = len(np.unique(y1)) +classes = list(map(np.unique, (y1, y2, y3))) + + +def test_multi_output_classification_partial_fit_parallelism(): + sgd_linear_clf = SGDClassifier(loss="log_loss", random_state=1, max_iter=5) + mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=4) + mor.partial_fit(X, y, classes) + est1 = mor.estimators_[0] + mor.partial_fit(X, y) + est2 = mor.estimators_[0] + if cpu_count() > 1: + # parallelism requires this to be the case for a sane implementation + assert est1 is not est2 + + +# check multioutput has predict_proba +def test_hasattr_multi_output_predict_proba(): + # default SGDClassifier has loss='hinge' + # which does not expose a predict_proba method + sgd_linear_clf = SGDClassifier(random_state=1, max_iter=5) + multi_target_linear = MultiOutputClassifier(sgd_linear_clf) + multi_target_linear.fit(X, y) + assert not hasattr(multi_target_linear, "predict_proba") + + # case where predict_proba attribute exists + sgd_linear_clf = SGDClassifier(loss="log_loss", random_state=1, max_iter=5) + multi_target_linear = MultiOutputClassifier(sgd_linear_clf) + multi_target_linear.fit(X, y) + assert hasattr(multi_target_linear, "predict_proba") + + +# check predict_proba passes +def test_multi_output_predict_proba(): + sgd_linear_clf = SGDClassifier(random_state=1, max_iter=5) + param = {"loss": ("hinge", "log_loss", "modified_huber")} + + # inner function for custom scoring + def custom_scorer(estimator, X, y): + if hasattr(estimator, "predict_proba"): + return 1.0 + else: + return 0.0 + + grid_clf = GridSearchCV( + sgd_linear_clf, + param_grid=param, + scoring=custom_scorer, + cv=3, + error_score="raise", + ) + multi_target_linear = MultiOutputClassifier(grid_clf) + multi_target_linear.fit(X, y) + + multi_target_linear.predict_proba(X) + + # SGDClassifier defaults to loss='hinge' which is not a probabilistic + # loss function; therefore it does not expose a predict_proba method + sgd_linear_clf = SGDClassifier(random_state=1, max_iter=5) + multi_target_linear = MultiOutputClassifier(sgd_linear_clf) + multi_target_linear.fit(X, y) + + inner2_msg = "probability estimates are not available for loss='hinge'" + inner1_msg = "'SGDClassifier' has no attribute 'predict_proba'" + outer_msg = "'MultiOutputClassifier' has no attribute 'predict_proba'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + multi_target_linear.predict_proba(X) + + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner1_msg in str(exec_info.value.__cause__) + + assert isinstance(exec_info.value.__cause__.__cause__, AttributeError) + assert inner2_msg in str(exec_info.value.__cause__.__cause__) + + +def test_multi_output_classification_partial_fit(): + # test if multi_target initializes correctly with base estimator and fit + # assert predictions work as expected for predict + + sgd_linear_clf = SGDClassifier(loss="log_loss", random_state=1, max_iter=5) + multi_target_linear = MultiOutputClassifier(sgd_linear_clf) + + # train the multi_target_linear and also get the predictions. + half_index = X.shape[0] // 2 + multi_target_linear.partial_fit(X[:half_index], y[:half_index], classes=classes) + + first_predictions = multi_target_linear.predict(X) + assert (n_samples, n_outputs) == first_predictions.shape + + multi_target_linear.partial_fit(X[half_index:], y[half_index:]) + second_predictions = multi_target_linear.predict(X) + assert (n_samples, n_outputs) == second_predictions.shape + + # train the linear classification with each column and assert that + # predictions are equal after first partial_fit and second partial_fit + for i in range(3): + # create a clone with the same state + sgd_linear_clf = clone(sgd_linear_clf) + sgd_linear_clf.partial_fit( + X[:half_index], y[:half_index, i], classes=classes[i] + ) + assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i]) + sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i]) + assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i]) + + +def test_multi_output_classification_partial_fit_no_first_classes_exception(): + sgd_linear_clf = SGDClassifier(loss="log_loss", random_state=1, max_iter=5) + multi_target_linear = MultiOutputClassifier(sgd_linear_clf) + msg = "classes must be passed on the first call to partial_fit." + with pytest.raises(ValueError, match=msg): + multi_target_linear.partial_fit(X, y) + + +def test_multi_output_classification(): + # test if multi_target initializes correctly with base estimator and fit + # assert predictions work as expected for predict, prodict_proba and score + + forest = RandomForestClassifier(n_estimators=10, random_state=1) + multi_target_forest = MultiOutputClassifier(forest) + + # train the multi_target_forest and also get the predictions. + multi_target_forest.fit(X, y) + + predictions = multi_target_forest.predict(X) + assert (n_samples, n_outputs) == predictions.shape + + predict_proba = multi_target_forest.predict_proba(X) + + assert len(predict_proba) == n_outputs + for class_probabilities in predict_proba: + assert (n_samples, n_classes) == class_probabilities.shape + + assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1), predictions) + + # train the forest with each column and assert that predictions are equal + for i in range(3): + forest_ = clone(forest) # create a clone with the same state + forest_.fit(X, y[:, i]) + assert list(forest_.predict(X)) == list(predictions[:, i]) + assert_array_equal(list(forest_.predict_proba(X)), list(predict_proba[i])) + + +def test_multiclass_multioutput_estimator(): + # test to check meta of meta estimators + svc = LinearSVC(dual="auto", random_state=0) + multi_class_svc = OneVsRestClassifier(svc) + multi_target_svc = MultiOutputClassifier(multi_class_svc) + + multi_target_svc.fit(X, y) + + predictions = multi_target_svc.predict(X) + assert (n_samples, n_outputs) == predictions.shape + + # train the forest with each column and assert that predictions are equal + for i in range(3): + multi_class_svc_ = clone(multi_class_svc) # create a clone + multi_class_svc_.fit(X, y[:, i]) + assert list(multi_class_svc_.predict(X)) == list(predictions[:, i]) + + +def test_multiclass_multioutput_estimator_predict_proba(): + seed = 542 + + # make test deterministic + rng = np.random.RandomState(seed) + + # random features + X = rng.normal(size=(5, 5)) + + # random labels + y1 = np.array(["b", "a", "a", "b", "a"]).reshape(5, 1) # 2 classes + y2 = np.array(["d", "e", "f", "e", "d"]).reshape(5, 1) # 3 classes + + Y = np.concatenate([y1, y2], axis=1) + + clf = MultiOutputClassifier( + LogisticRegression(solver="liblinear", random_state=seed) + ) + + clf.fit(X, Y) + + y_result = clf.predict_proba(X) + y_actual = [ + np.array( + [ + [0.23481764, 0.76518236], + [0.67196072, 0.32803928], + [0.54681448, 0.45318552], + [0.34883923, 0.65116077], + [0.73687069, 0.26312931], + ] + ), + np.array( + [ + [0.5171785, 0.23878628, 0.24403522], + [0.22141451, 0.64102704, 0.13755846], + [0.16751315, 0.18256843, 0.64991843], + [0.27357372, 0.55201592, 0.17441036], + [0.65745193, 0.26062899, 0.08191907], + ] + ), + ] + + for i in range(len(y_actual)): + assert_almost_equal(y_result[i], y_actual[i]) + + +def test_multi_output_classification_sample_weights(): + # weighted classifier + Xw = [[1, 2, 3], [4, 5, 6]] + yw = [[3, 2], [2, 3]] + w = np.asarray([2.0, 1.0]) + forest = RandomForestClassifier(n_estimators=10, random_state=1) + clf_w = MultiOutputClassifier(forest) + clf_w.fit(Xw, yw, w) + + # unweighted, but with repeated samples + X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]] + y = [[3, 2], [3, 2], [2, 3]] + forest = RandomForestClassifier(n_estimators=10, random_state=1) + clf = MultiOutputClassifier(forest) + clf.fit(X, y) + + X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]] + assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test)) + + +def test_multi_output_classification_partial_fit_sample_weights(): + # weighted classifier + Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] + yw = [[3, 2], [2, 3], [3, 2]] + w = np.asarray([2.0, 1.0, 1.0]) + sgd_linear_clf = SGDClassifier(random_state=1, max_iter=20) + clf_w = MultiOutputClassifier(sgd_linear_clf) + clf_w.fit(Xw, yw, w) + + # unweighted, but with repeated samples + X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] + y = [[3, 2], [3, 2], [2, 3], [3, 2]] + sgd_linear_clf = SGDClassifier(random_state=1, max_iter=20) + clf = MultiOutputClassifier(sgd_linear_clf) + clf.fit(X, y) + X_test = [[1.5, 2.5, 3.5]] + assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test)) + + +def test_multi_output_exceptions(): + # NotFittedError when fit is not done but score, predict and + # and predict_proba are called + moc = MultiOutputClassifier(LinearSVC(dual="auto", random_state=0)) + with pytest.raises(NotFittedError): + moc.score(X, y) + + # ValueError when number of outputs is different + # for fit and score + y_new = np.column_stack((y1, y2)) + moc.fit(X, y) + with pytest.raises(ValueError): + moc.score(X, y_new) + + # ValueError when y is continuous + msg = "Unknown label type" + with pytest.raises(ValueError, match=msg): + moc.fit(X, X[:, 1]) + + +@pytest.mark.parametrize("response_method", ["predict_proba", "predict"]) +def test_multi_output_not_fitted_error(response_method): + """Check that we raise the proper error when the estimator is not fitted""" + moc = MultiOutputClassifier(LogisticRegression()) + with pytest.raises(NotFittedError): + getattr(moc, response_method)(X) + + +def test_multi_output_delegate_predict_proba(): + """Check the behavior for the delegation of predict_proba to the underlying + estimator""" + + # A base estimator with `predict_proba`should expose the method even before fit + moc = MultiOutputClassifier(LogisticRegression()) + assert hasattr(moc, "predict_proba") + moc.fit(X, y) + assert hasattr(moc, "predict_proba") + + # A base estimator without `predict_proba` should raise an AttributeError + moc = MultiOutputClassifier(LinearSVC(dual="auto")) + assert not hasattr(moc, "predict_proba") + + outer_msg = "'MultiOutputClassifier' has no attribute 'predict_proba'" + inner_msg = "'LinearSVC' object has no attribute 'predict_proba'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + moc.predict_proba(X) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg == str(exec_info.value.__cause__) + + moc.fit(X, y) + assert not hasattr(moc, "predict_proba") + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + moc.predict_proba(X) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg == str(exec_info.value.__cause__) + + +def generate_multilabel_dataset_with_correlations(): + # Generate a multilabel data set from a multiclass dataset as a way of + # by representing the integer number of the original class using a binary + # encoding. + X, y = make_classification( + n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0 + ) + + Y_multi = np.array([[int(yyy) for yyy in format(yy, "#06b")[2:]] for yy in y]) + return X, Y_multi + + +def test_classifier_chain_fit_and_predict_with_linear_svc(): + # Fit classifier chain and verify predict performance using LinearSVC + X, Y = generate_multilabel_dataset_with_correlations() + classifier_chain = ClassifierChain(LinearSVC(dual="auto")) + classifier_chain.fit(X, Y) + + Y_pred = classifier_chain.predict(X) + assert Y_pred.shape == Y.shape + + Y_decision = classifier_chain.decision_function(X) + + Y_binary = Y_decision >= 0 + assert_array_equal(Y_binary, Y_pred) + assert not hasattr(classifier_chain, "predict_proba") + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_classifier_chain_fit_and_predict_with_sparse_data(csr_container): + # Fit classifier chain with sparse data + X, Y = generate_multilabel_dataset_with_correlations() + X_sparse = csr_container(X) + + classifier_chain = ClassifierChain(LogisticRegression()) + classifier_chain.fit(X_sparse, Y) + Y_pred_sparse = classifier_chain.predict(X_sparse) + + classifier_chain = ClassifierChain(LogisticRegression()) + classifier_chain.fit(X, Y) + Y_pred_dense = classifier_chain.predict(X) + + assert_array_equal(Y_pred_sparse, Y_pred_dense) + + +def test_classifier_chain_vs_independent_models(): + # Verify that an ensemble of classifier chains (each of length + # N) can achieve a higher Jaccard similarity score than N independent + # models + X, Y = generate_multilabel_dataset_with_correlations() + X_train = X[:600, :] + X_test = X[600:, :] + Y_train = Y[:600, :] + Y_test = Y[600:, :] + + ovr = OneVsRestClassifier(LogisticRegression()) + ovr.fit(X_train, Y_train) + Y_pred_ovr = ovr.predict(X_test) + + chain = ClassifierChain(LogisticRegression()) + chain.fit(X_train, Y_train) + Y_pred_chain = chain.predict(X_test) + + assert jaccard_score(Y_test, Y_pred_chain, average="samples") > jaccard_score( + Y_test, Y_pred_ovr, average="samples" + ) + + +@pytest.mark.parametrize("response_method", ["predict_proba", "predict_log_proba"]) +def test_base_chain_fit_and_predict(response_method): + # Fit base chain and verify predict performance + X, Y = generate_multilabel_dataset_with_correlations() + chains = [RegressorChain(Ridge()), ClassifierChain(LogisticRegression())] + for chain in chains: + chain.fit(X, Y) + Y_pred = chain.predict(X) + assert Y_pred.shape == Y.shape + assert [c.coef_.size for c in chain.estimators_] == list( + range(X.shape[1], X.shape[1] + Y.shape[1]) + ) + + Y_prob = getattr(chains[1], response_method)(X) + if response_method == "predict_log_proba": + Y_prob = np.exp(Y_prob) + Y_binary = Y_prob >= 0.5 + assert_array_equal(Y_binary, Y_pred) + + assert isinstance(chains[1], ClassifierMixin) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_base_chain_fit_and_predict_with_sparse_data_and_cv(csr_container): + # Fit base chain with sparse data cross_val_predict + X, Y = generate_multilabel_dataset_with_correlations() + X_sparse = csr_container(X) + base_chains = [ + ClassifierChain(LogisticRegression(), cv=3), + RegressorChain(Ridge(), cv=3), + ] + for chain in base_chains: + chain.fit(X_sparse, Y) + Y_pred = chain.predict(X_sparse) + assert Y_pred.shape == Y.shape + + +def test_base_chain_random_order(): + # Fit base chain with random order + X, Y = generate_multilabel_dataset_with_correlations() + for chain in [ClassifierChain(LogisticRegression()), RegressorChain(Ridge())]: + chain_random = clone(chain).set_params(order="random", random_state=42) + chain_random.fit(X, Y) + chain_fixed = clone(chain).set_params(order=chain_random.order_) + chain_fixed.fit(X, Y) + assert_array_equal(chain_fixed.order_, chain_random.order_) + assert list(chain_random.order) != list(range(4)) + assert len(chain_random.order_) == 4 + assert len(set(chain_random.order_)) == 4 + # Randomly ordered chain should behave identically to a fixed order + # chain with the same order. + for est1, est2 in zip(chain_random.estimators_, chain_fixed.estimators_): + assert_array_almost_equal(est1.coef_, est2.coef_) + + +def test_base_chain_crossval_fit_and_predict(): + # Fit chain with cross_val_predict and verify predict + # performance + X, Y = generate_multilabel_dataset_with_correlations() + + for chain in [ClassifierChain(LogisticRegression()), RegressorChain(Ridge())]: + chain.fit(X, Y) + chain_cv = clone(chain).set_params(cv=3) + chain_cv.fit(X, Y) + Y_pred_cv = chain_cv.predict(X) + Y_pred = chain.predict(X) + + assert Y_pred_cv.shape == Y_pred.shape + assert not np.all(Y_pred == Y_pred_cv) + if isinstance(chain, ClassifierChain): + assert jaccard_score(Y, Y_pred_cv, average="samples") > 0.4 + else: + assert mean_squared_error(Y, Y_pred_cv) < 0.25 + + +@pytest.mark.parametrize( + "estimator", + [ + RandomForestClassifier(n_estimators=2), + MultiOutputClassifier(RandomForestClassifier(n_estimators=2)), + ClassifierChain(RandomForestClassifier(n_estimators=2)), + ], +) +def test_multi_output_classes_(estimator): + # Tests classes_ attribute of multioutput classifiers + # RandomForestClassifier supports multioutput out-of-the-box + estimator.fit(X, y) + assert isinstance(estimator.classes_, list) + assert len(estimator.classes_) == n_outputs + for estimator_classes, expected_classes in zip(classes, estimator.classes_): + assert_array_equal(estimator_classes, expected_classes) + + +class DummyRegressorWithFitParams(DummyRegressor): + def fit(self, X, y, sample_weight=None, **fit_params): + self._fit_params = fit_params + return super().fit(X, y, sample_weight) + + +class DummyClassifierWithFitParams(DummyClassifier): + def fit(self, X, y, sample_weight=None, **fit_params): + self._fit_params = fit_params + return super().fit(X, y, sample_weight) + + +@pytest.mark.filterwarnings("ignore:`n_features_in_` is deprecated") +@pytest.mark.parametrize( + "estimator, dataset", + [ + ( + MultiOutputClassifier(DummyClassifierWithFitParams(strategy="prior")), + datasets.make_multilabel_classification(), + ), + ( + MultiOutputRegressor(DummyRegressorWithFitParams()), + datasets.make_regression(n_targets=3, random_state=0), + ), + ], +) +def test_multioutput_estimator_with_fit_params(estimator, dataset): + X, y = dataset + some_param = np.zeros_like(X) + estimator.fit(X, y, some_param=some_param) + for dummy_estimator in estimator.estimators_: + assert "some_param" in dummy_estimator._fit_params + + +def test_regressor_chain_w_fit_params(): + # Make sure fit_params are properly propagated to the sub-estimators + rng = np.random.RandomState(0) + X, y = datasets.make_regression(n_targets=3, random_state=0) + weight = rng.rand(y.shape[0]) + + class MySGD(SGDRegressor): + def fit(self, X, y, **fit_params): + self.sample_weight_ = fit_params["sample_weight"] + super().fit(X, y, **fit_params) + + model = RegressorChain(MySGD()) + + # Fitting with params + fit_param = {"sample_weight": weight} + model.fit(X, y, **fit_param) + + for est in model.estimators_: + assert est.sample_weight_ is weight + + +@pytest.mark.parametrize( + "MultiOutputEstimator, Estimator", + [(MultiOutputClassifier, LogisticRegression), (MultiOutputRegressor, Ridge)], +) +# FIXME: we should move this test in `estimator_checks` once we are able +# to construct meta-estimator instances +def test_support_missing_values(MultiOutputEstimator, Estimator): + # smoke test to check that pipeline MultioutputEstimators are letting + # the validation of missing values to + # the underlying pipeline, regressor or classifier + rng = np.random.RandomState(42) + X, y = rng.randn(50, 2), rng.binomial(1, 0.5, (50, 3)) + mask = rng.choice([1, 0], X.shape, p=[0.01, 0.99]).astype(bool) + X[mask] = np.nan + + pipe = make_pipeline(SimpleImputer(), Estimator()) + MultiOutputEstimator(pipe).fit(X, y).score(X, y) + + +@pytest.mark.parametrize("order_type", [list, np.array, tuple]) +def test_classifier_chain_tuple_order(order_type): + X = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] + y = [[3, 2], [2, 3], [3, 2]] + order = order_type([1, 0]) + + chain = ClassifierChain(RandomForestClassifier(), order=order) + + chain.fit(X, y) + X_test = [[1.5, 2.5, 3.5]] + y_test = [[3, 2]] + assert_array_almost_equal(chain.predict(X_test), y_test) + + +def test_classifier_chain_tuple_invalid_order(): + X = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] + y = [[3, 2], [2, 3], [3, 2]] + order = tuple([1, 2]) + + chain = ClassifierChain(RandomForestClassifier(), order=order) + + with pytest.raises(ValueError, match="invalid order"): + chain.fit(X, y) + + +def test_classifier_chain_verbose(capsys): + X, y = make_multilabel_classification( + n_samples=100, n_features=5, n_classes=3, n_labels=3, random_state=0 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + pattern = ( + r"\[Chain\].*\(1 of 3\) Processing order 0, total=.*\n" + r"\[Chain\].*\(2 of 3\) Processing order 1, total=.*\n" + r"\[Chain\].*\(3 of 3\) Processing order 2, total=.*\n$" + ) + + classifier = ClassifierChain( + DecisionTreeClassifier(), + order=[0, 1, 2], + random_state=0, + verbose=True, + ) + classifier.fit(X_train, y_train) + assert re.match(pattern, capsys.readouterr()[0]) + + +def test_regressor_chain_verbose(capsys): + X, y = make_regression(n_samples=125, n_targets=3, random_state=0) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + pattern = ( + r"\[Chain\].*\(1 of 3\) Processing order 1, total=.*\n" + r"\[Chain\].*\(2 of 3\) Processing order 0, total=.*\n" + r"\[Chain\].*\(3 of 3\) Processing order 2, total=.*\n$" + ) + regressor = RegressorChain( + LinearRegression(), + order=[1, 0, 2], + random_state=0, + verbose=True, + ) + regressor.fit(X_train, y_train) + assert re.match(pattern, capsys.readouterr()[0]) + + +def test_multioutputregressor_ducktypes_fitted_estimator(): + """Test that MultiOutputRegressor checks the fitted estimator for + predict. Non-regression test for #16549.""" + X, y = load_linnerud(return_X_y=True) + stacker = StackingRegressor( + estimators=[("sgd", SGDRegressor(random_state=1))], + final_estimator=Ridge(), + cv=2, + ) + + reg = MultiOutputRegressor(estimator=stacker).fit(X, y) + + # Does not raise + reg.predict(X) + + +@pytest.mark.parametrize( + "Cls, method", [(ClassifierChain, "fit"), (MultiOutputClassifier, "partial_fit")] +) +def test_fit_params_no_routing(Cls, method): + """Check that we raise an error when passing metadata not requested by the + underlying classifier. + """ + X, y = make_classification(n_samples=50) + clf = Cls(PassiveAggressiveClassifier()) + + with pytest.raises(ValueError, match="is only supported if"): + getattr(clf, method)(X, y, test=1) + + +def test_multioutput_regressor_has_partial_fit(): + # Test that an unfitted MultiOutputRegressor handles available_if for + # partial_fit correctly + est = MultiOutputRegressor(LinearRegression()) + msg = "This 'MultiOutputRegressor' has no attribute 'partial_fit'" + with pytest.raises(AttributeError, match=msg): + getattr(est, "partial_fit") diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_naive_bayes.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_naive_bayes.py new file mode 100644 index 0000000000000000000000000000000000000000..ae709cd49591c8bada9a0b0c0058937ab3a82976 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_naive_bayes.py @@ -0,0 +1,973 @@ +import re +import warnings + +import numpy as np +import pytest +from scipy.special import logsumexp + +from sklearn.datasets import load_digits, load_iris +from sklearn.model_selection import cross_val_score, train_test_split +from sklearn.naive_bayes import ( + BernoulliNB, + CategoricalNB, + ComplementNB, + GaussianNB, + MultinomialNB, +) +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +DISCRETE_NAIVE_BAYES_CLASSES = [BernoulliNB, CategoricalNB, ComplementNB, MultinomialNB] +ALL_NAIVE_BAYES_CLASSES = DISCRETE_NAIVE_BAYES_CLASSES + [GaussianNB] + +msg = "The default value for `force_alpha` will change" +pytestmark = pytest.mark.filterwarnings(f"ignore:{msg}:FutureWarning") + +# Data is just 6 separable points in the plane +X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) +y = np.array([1, 1, 1, 2, 2, 2]) + + +def get_random_normal_x_binary_y(global_random_seed): + # A bit more random tests + rng = np.random.RandomState(global_random_seed) + X1 = rng.normal(size=(10, 3)) + y1 = (rng.normal(size=10) > 0).astype(int) + return X1, y1 + + +def get_random_integer_x_three_classes_y(global_random_seed): + # Data is 6 random integer points in a 100 dimensional space classified to + # three classes. + rng = np.random.RandomState(global_random_seed) + X2 = rng.randint(5, size=(6, 100)) + y2 = np.array([1, 1, 2, 2, 3, 3]) + return X2, y2 + + +def test_gnb(): + # Gaussian Naive Bayes classification. + # This checks that GaussianNB implements fit and predict and returns + # correct values for a simple toy dataset. + + clf = GaussianNB() + y_pred = clf.fit(X, y).predict(X) + assert_array_equal(y_pred, y) + + y_pred_proba = clf.predict_proba(X) + y_pred_log_proba = clf.predict_log_proba(X) + assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8) + + # Test whether label mismatch between target y and classes raises + # an Error + # FIXME Remove this test once the more general partial_fit tests are merged + with pytest.raises( + ValueError, match="The target label.* in y do not exist in the initial classes" + ): + GaussianNB().partial_fit(X, y, classes=[0, 1]) + + +def test_gnb_prior(global_random_seed): + # Test whether class priors are properly set. + clf = GaussianNB().fit(X, y) + assert_array_almost_equal(np.array([3, 3]) / 6.0, clf.class_prior_, 8) + X1, y1 = get_random_normal_x_binary_y(global_random_seed) + clf = GaussianNB().fit(X1, y1) + # Check that the class priors sum to 1 + assert_array_almost_equal(clf.class_prior_.sum(), 1) + + +def test_gnb_sample_weight(global_random_seed): + """Test whether sample weights are properly used in GNB.""" + # Sample weights all being 1 should not change results + sw = np.ones(6) + clf = GaussianNB().fit(X, y) + clf_sw = GaussianNB().fit(X, y, sw) + + assert_array_almost_equal(clf.theta_, clf_sw.theta_) + assert_array_almost_equal(clf.var_, clf_sw.var_) + + # Fitting twice with half sample-weights should result + # in same result as fitting once with full weights + rng = np.random.RandomState(global_random_seed) + + sw = rng.rand(y.shape[0]) + clf1 = GaussianNB().fit(X, y, sample_weight=sw) + clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2) + clf2.partial_fit(X, y, sample_weight=sw / 2) + + assert_array_almost_equal(clf1.theta_, clf2.theta_) + assert_array_almost_equal(clf1.var_, clf2.var_) + + # Check that duplicate entries and correspondingly increased sample + # weights yield the same result + ind = rng.randint(0, X.shape[0], 20) + sample_weight = np.bincount(ind, minlength=X.shape[0]) + + clf_dupl = GaussianNB().fit(X[ind], y[ind]) + clf_sw = GaussianNB().fit(X, y, sample_weight) + + assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_) + assert_array_almost_equal(clf_dupl.var_, clf_sw.var_) + + # non-regression test for gh-24140 where a division by zero was + # occurring when a single class was present + sample_weight = (y == 1).astype(np.float64) + clf = GaussianNB().fit(X, y, sample_weight=sample_weight) + + +def test_gnb_neg_priors(): + """Test whether an error is raised in case of negative priors""" + clf = GaussianNB(priors=np.array([-1.0, 2.0])) + + msg = "Priors must be non-negative" + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_gnb_priors(): + """Test whether the class prior override is properly used""" + clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y) + assert_array_almost_equal( + clf.predict_proba([[-0.1, -0.1]]), + np.array([[0.825303662161683, 0.174696337838317]]), + 8, + ) + assert_array_almost_equal(clf.class_prior_, np.array([0.3, 0.7])) + + +def test_gnb_priors_sum_isclose(): + # test whether the class prior sum is properly tested""" + X = np.array( + [ + [-1, -1], + [-2, -1], + [-3, -2], + [-4, -5], + [-5, -4], + [1, 1], + [2, 1], + [3, 2], + [4, 4], + [5, 5], + ] + ) + priors = np.array([0.08, 0.14, 0.03, 0.16, 0.11, 0.16, 0.07, 0.14, 0.11, 0.0]) + Y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + clf = GaussianNB(priors=priors) + # smoke test for issue #9633 + clf.fit(X, Y) + + +def test_gnb_wrong_nb_priors(): + """Test whether an error is raised if the number of prior is different + from the number of class""" + clf = GaussianNB(priors=np.array([0.25, 0.25, 0.25, 0.25])) + + msg = "Number of priors must match number of classes" + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_gnb_prior_greater_one(): + """Test if an error is raised if the sum of prior greater than one""" + clf = GaussianNB(priors=np.array([2.0, 1.0])) + + msg = "The sum of the priors should be 1" + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_gnb_prior_large_bias(): + """Test if good prediction when class prior favor largely one class""" + clf = GaussianNB(priors=np.array([0.01, 0.99])) + clf.fit(X, y) + assert clf.predict([[-0.1, -0.1]]) == np.array([2]) + + +def test_gnb_check_update_with_no_data(): + """Test when the partial fit is called without any data""" + # Create an empty array + prev_points = 100 + mean = 0.0 + var = 1.0 + x_empty = np.empty((0, X.shape[1])) + tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean, var, x_empty) + assert tmean == mean + assert tvar == var + + +def test_gnb_partial_fit(): + clf = GaussianNB().fit(X, y) + clf_pf = GaussianNB().partial_fit(X, y, np.unique(y)) + assert_array_almost_equal(clf.theta_, clf_pf.theta_) + assert_array_almost_equal(clf.var_, clf_pf.var_) + assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_) + + clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y)) + clf_pf2.partial_fit(X[1::2], y[1::2]) + assert_array_almost_equal(clf.theta_, clf_pf2.theta_) + assert_array_almost_equal(clf.var_, clf_pf2.var_) + assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_) + + +def test_gnb_naive_bayes_scale_invariance(): + # Scaling the data should not change the prediction results + iris = load_iris() + X, y = iris.data, iris.target + labels = [GaussianNB().fit(f * X, y).predict(f * X) for f in [1e-10, 1, 1e10]] + assert_array_equal(labels[0], labels[1]) + assert_array_equal(labels[1], labels[2]) + + +@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) +def test_discretenb_prior(DiscreteNaiveBayes, global_random_seed): + # Test whether class priors are properly set. + X2, y2 = get_random_integer_x_three_classes_y(global_random_seed) + clf = DiscreteNaiveBayes().fit(X2, y2) + assert_array_almost_equal( + np.log(np.array([2, 2, 2]) / 6.0), clf.class_log_prior_, 8 + ) + + +@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) +def test_discretenb_partial_fit(DiscreteNaiveBayes): + clf1 = DiscreteNaiveBayes() + clf1.fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1]) + + clf2 = DiscreteNaiveBayes() + clf2.partial_fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1], classes=[0, 1]) + assert_array_equal(clf1.class_count_, clf2.class_count_) + if DiscreteNaiveBayes is CategoricalNB: + for i in range(len(clf1.category_count_)): + assert_array_equal(clf1.category_count_[i], clf2.category_count_[i]) + else: + assert_array_equal(clf1.feature_count_, clf2.feature_count_) + + clf3 = DiscreteNaiveBayes() + # all categories have to appear in the first partial fit + clf3.partial_fit([[0, 1]], [0], classes=[0, 1]) + clf3.partial_fit([[1, 0]], [1]) + clf3.partial_fit([[1, 1]], [1]) + assert_array_equal(clf1.class_count_, clf3.class_count_) + if DiscreteNaiveBayes is CategoricalNB: + # the categories for each feature of CategoricalNB are mapped to an + # index chronologically with each call of partial fit and therefore + # the category_count matrices cannot be compared for equality + for i in range(len(clf1.category_count_)): + assert_array_equal( + clf1.category_count_[i].shape, clf3.category_count_[i].shape + ) + assert_array_equal( + np.sum(clf1.category_count_[i], axis=1), + np.sum(clf3.category_count_[i], axis=1), + ) + + # assert category 0 occurs 1x in the first class and 0x in the 2nd + # class + assert_array_equal(clf1.category_count_[0][0], np.array([1, 0])) + # assert category 1 occurs 0x in the first class and 2x in the 2nd + # class + assert_array_equal(clf1.category_count_[0][1], np.array([0, 2])) + + # assert category 0 occurs 0x in the first class and 1x in the 2nd + # class + assert_array_equal(clf1.category_count_[1][0], np.array([0, 1])) + # assert category 1 occurs 1x in the first class and 1x in the 2nd + # class + assert_array_equal(clf1.category_count_[1][1], np.array([1, 1])) + else: + assert_array_equal(clf1.feature_count_, clf3.feature_count_) + + +@pytest.mark.parametrize("NaiveBayes", ALL_NAIVE_BAYES_CLASSES) +def test_NB_partial_fit_no_first_classes(NaiveBayes, global_random_seed): + # classes is required for first call to partial fit + X2, y2 = get_random_integer_x_three_classes_y(global_random_seed) + + with pytest.raises( + ValueError, match="classes must be passed on the first call to partial_fit." + ): + NaiveBayes().partial_fit(X2, y2) + + # check consistency of consecutive classes values + clf = NaiveBayes() + clf.partial_fit(X2, y2, classes=np.unique(y2)) + with pytest.raises( + ValueError, match="is not the same as on last call to partial_fit" + ): + clf.partial_fit(X2, y2, classes=np.arange(42)) + + +def test_discretenb_predict_proba(): + # Test discrete NB classes' probability scores + + # The 100s below distinguish Bernoulli from multinomial. + # FIXME: write a test to show this. + X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]] + X_multinomial = [[0, 1], [1, 3], [4, 0]] + + # test binary case (1-d output) + y = [0, 0, 2] # 2 is regression test for binary case, 02e673 + for DiscreteNaiveBayes, X in zip( + [BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial] + ): + clf = DiscreteNaiveBayes().fit(X, y) + assert clf.predict(X[-1:]) == 2 + assert clf.predict_proba([X[0]]).shape == (1, 2) + assert_array_almost_equal( + clf.predict_proba(X[:2]).sum(axis=1), np.array([1.0, 1.0]), 6 + ) + + # test multiclass case (2-d output, must sum to one) + y = [0, 1, 2] + for DiscreteNaiveBayes, X in zip( + [BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial] + ): + clf = DiscreteNaiveBayes().fit(X, y) + assert clf.predict_proba(X[0:1]).shape == (1, 3) + assert clf.predict_proba(X[:2]).shape == (2, 3) + assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1) + assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1) + assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1) + + +@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) +def test_discretenb_uniform_prior(DiscreteNaiveBayes): + # Test whether discrete NB classes fit a uniform prior + # when fit_prior=False and class_prior=None + + clf = DiscreteNaiveBayes() + clf.set_params(fit_prior=False) + clf.fit([[0], [0], [1]], [0, 0, 1]) + prior = np.exp(clf.class_log_prior_) + assert_array_almost_equal(prior, np.array([0.5, 0.5])) + + +@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) +def test_discretenb_provide_prior(DiscreteNaiveBayes): + # Test whether discrete NB classes use provided prior + + clf = DiscreteNaiveBayes(class_prior=[0.5, 0.5]) + clf.fit([[0], [0], [1]], [0, 0, 1]) + prior = np.exp(clf.class_log_prior_) + assert_array_almost_equal(prior, np.array([0.5, 0.5])) + + # Inconsistent number of classes with prior + msg = "Number of priors must match number of classes" + with pytest.raises(ValueError, match=msg): + clf.fit([[0], [1], [2]], [0, 1, 2]) + + msg = "is not the same as on last call to partial_fit" + with pytest.raises(ValueError, match=msg): + clf.partial_fit([[0], [1]], [0, 1], classes=[0, 1, 1]) + + +@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) +def test_discretenb_provide_prior_with_partial_fit(DiscreteNaiveBayes): + # Test whether discrete NB classes use provided prior + # when using partial_fit + + iris = load_iris() + iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split( + iris.data, iris.target, test_size=0.4, random_state=415 + ) + + for prior in [None, [0.3, 0.3, 0.4]]: + clf_full = DiscreteNaiveBayes(class_prior=prior) + clf_full.fit(iris.data, iris.target) + clf_partial = DiscreteNaiveBayes(class_prior=prior) + clf_partial.partial_fit(iris_data1, iris_target1, classes=[0, 1, 2]) + clf_partial.partial_fit(iris_data2, iris_target2) + assert_array_almost_equal( + clf_full.class_log_prior_, clf_partial.class_log_prior_ + ) + + +@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) +def test_discretenb_sample_weight_multiclass(DiscreteNaiveBayes): + # check shape consistency for number of samples at fit time + X = [ + [0, 0, 1], + [0, 1, 1], + [0, 1, 1], + [1, 0, 0], + ] + y = [0, 0, 1, 2] + sample_weight = np.array([1, 1, 2, 2], dtype=np.float64) + sample_weight /= sample_weight.sum() + clf = DiscreteNaiveBayes().fit(X, y, sample_weight=sample_weight) + assert_array_equal(clf.predict(X), [0, 1, 1, 2]) + + # Check sample weight using the partial_fit method + clf = DiscreteNaiveBayes() + clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2], sample_weight=sample_weight[:2]) + clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3]) + clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:]) + assert_array_equal(clf.predict(X), [0, 1, 1, 2]) + + +@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) +@pytest.mark.parametrize("use_partial_fit", [False, True]) +@pytest.mark.parametrize("train_on_single_class_y", [False, True]) +def test_discretenb_degenerate_one_class_case( + DiscreteNaiveBayes, + use_partial_fit, + train_on_single_class_y, +): + # Most array attributes of a discrete naive Bayes classifier should have a + # first-axis length equal to the number of classes. Exceptions include: + # ComplementNB.feature_all_, CategoricalNB.n_categories_. + # Confirm that this is the case for binary problems and the degenerate + # case of a single class in the training set, when fitting with `fit` or + # `partial_fit`. + # Non-regression test for handling degenerate one-class case: + # https://github.com/scikit-learn/scikit-learn/issues/18974 + + X = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + y = [1, 1, 2] + if train_on_single_class_y: + X = X[:-1] + y = y[:-1] + classes = sorted(list(set(y))) + num_classes = len(classes) + + clf = DiscreteNaiveBayes() + if use_partial_fit: + clf.partial_fit(X, y, classes=classes) + else: + clf.fit(X, y) + assert clf.predict(X[:1]) == y[0] + + # Check that attributes have expected first-axis lengths + attribute_names = [ + "classes_", + "class_count_", + "class_log_prior_", + "feature_count_", + "feature_log_prob_", + ] + for attribute_name in attribute_names: + attribute = getattr(clf, attribute_name, None) + if attribute is None: + # CategoricalNB has no feature_count_ attribute + continue + if isinstance(attribute, np.ndarray): + assert attribute.shape[0] == num_classes + else: + # CategoricalNB.feature_log_prob_ is a list of arrays + for element in attribute: + assert element.shape[0] == num_classes + + +@pytest.mark.parametrize("kind", ("dense", "sparse")) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_mnnb(kind, global_random_seed, csr_container): + # Test Multinomial Naive Bayes classification. + # This checks that MultinomialNB implements fit and predict and returns + # correct values for a simple toy dataset. + X2, y2 = get_random_integer_x_three_classes_y(global_random_seed) + + if kind == "dense": + X = X2 + elif kind == "sparse": + X = csr_container(X2) + + # Check the ability to predict the learning set. + clf = MultinomialNB() + + msg = "Negative values in data passed to" + with pytest.raises(ValueError, match=msg): + clf.fit(-X, y2) + y_pred = clf.fit(X, y2).predict(X) + + assert_array_equal(y_pred, y2) + + # Verify that np.log(clf.predict_proba(X)) gives the same results as + # clf.predict_log_proba(X) + y_pred_proba = clf.predict_proba(X) + y_pred_log_proba = clf.predict_log_proba(X) + assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8) + + # Check that incremental fitting yields the same results + clf2 = MultinomialNB() + clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2)) + clf2.partial_fit(X[2:5], y2[2:5]) + clf2.partial_fit(X[5:], y2[5:]) + + y_pred2 = clf2.predict(X) + assert_array_equal(y_pred2, y2) + + y_pred_proba2 = clf2.predict_proba(X) + y_pred_log_proba2 = clf2.predict_log_proba(X) + assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8) + assert_array_almost_equal(y_pred_proba2, y_pred_proba) + assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba) + + # Partial fit on the whole data at once should be the same as fit too + clf3 = MultinomialNB() + clf3.partial_fit(X, y2, classes=np.unique(y2)) + + y_pred3 = clf3.predict(X) + assert_array_equal(y_pred3, y2) + y_pred_proba3 = clf3.predict_proba(X) + y_pred_log_proba3 = clf3.predict_log_proba(X) + assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8) + assert_array_almost_equal(y_pred_proba3, y_pred_proba) + assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba) + + +def test_mnb_prior_unobserved_targets(): + # test smoothing of prior for yet unobserved targets + + # Create toy training data + X = np.array([[0, 1], [1, 0]]) + y = np.array([0, 1]) + + clf = MultinomialNB() + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + + clf.partial_fit(X, y, classes=[0, 1, 2]) + + assert clf.predict([[0, 1]]) == 0 + assert clf.predict([[1, 0]]) == 1 + assert clf.predict([[1, 1]]) == 0 + + # add a training example with previously unobserved class + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + + clf.partial_fit([[1, 1]], [2]) + + assert clf.predict([[0, 1]]) == 0 + assert clf.predict([[1, 0]]) == 1 + assert clf.predict([[1, 1]]) == 2 + + +def test_bnb(): + # Tests that BernoulliNB when alpha=1.0 gives the same values as + # those given for the toy example in Manning, Raghavan, and + # Schuetze's "Introduction to Information Retrieval" book: + # https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html + + # Training data points are: + # Chinese Beijing Chinese (class: China) + # Chinese Chinese Shanghai (class: China) + # Chinese Macao (class: China) + # Tokyo Japan Chinese (class: Japan) + + # Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo + X = np.array( + [[1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 1, 1, 0, 0, 1]] + ) + + # Classes are China (0), Japan (1) + Y = np.array([0, 0, 0, 1]) + + # Fit BernoulliBN w/ alpha = 1.0 + clf = BernoulliNB(alpha=1.0) + clf.fit(X, Y) + + # Check the class prior is correct + class_prior = np.array([0.75, 0.25]) + assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior) + + # Check the feature probabilities are correct + feature_prob = np.array( + [ + [0.4, 0.8, 0.2, 0.4, 0.4, 0.2], + [1 / 3.0, 2 / 3.0, 2 / 3.0, 1 / 3.0, 1 / 3.0, 2 / 3.0], + ] + ) + assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob) + + # Testing data point is: + # Chinese Chinese Chinese Tokyo Japan + X_test = np.array([[0, 1, 1, 0, 0, 1]]) + + # Check the predictive probabilities are correct + unnorm_predict_proba = np.array([[0.005183999999999999, 0.02194787379972565]]) + predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba) + assert_array_almost_equal(clf.predict_proba(X_test), predict_proba) + + +def test_bnb_feature_log_prob(): + # Test for issue #4268. + # Tests that the feature log prob value computed by BernoulliNB when + # alpha=1.0 is equal to the expression given in Manning, Raghavan, + # and Schuetze's "Introduction to Information Retrieval" book: + # http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html + + X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]]) + Y = np.array([0, 0, 1, 2, 2]) + + # Fit Bernoulli NB w/ alpha = 1.0 + clf = BernoulliNB(alpha=1.0) + clf.fit(X, Y) + + # Manually form the (log) numerator and denominator that + # constitute P(feature presence | class) + num = np.log(clf.feature_count_ + 1.0) + denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T + + # Check manual estimate matches + assert_array_almost_equal(clf.feature_log_prob_, (num - denom)) + + +def test_cnb(): + # Tests ComplementNB when alpha=1.0 for the toy example in Manning, + # Raghavan, and Schuetze's "Introduction to Information Retrieval" book: + # https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html + + # Training data points are: + # Chinese Beijing Chinese (class: China) + # Chinese Chinese Shanghai (class: China) + # Chinese Macao (class: China) + # Tokyo Japan Chinese (class: Japan) + + # Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo. + X = np.array( + [[1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 1, 1, 0, 0, 1]] + ) + + # Classes are China (0), Japan (1). + Y = np.array([0, 0, 0, 1]) + + # Check that weights are correct. See steps 4-6 in Table 4 of + # Rennie et al. (2003). + theta = np.array( + [ + [ + (0 + 1) / (3 + 6), + (1 + 1) / (3 + 6), + (1 + 1) / (3 + 6), + (0 + 1) / (3 + 6), + (0 + 1) / (3 + 6), + (1 + 1) / (3 + 6), + ], + [ + (1 + 1) / (6 + 6), + (3 + 1) / (6 + 6), + (0 + 1) / (6 + 6), + (1 + 1) / (6 + 6), + (1 + 1) / (6 + 6), + (0 + 1) / (6 + 6), + ], + ] + ) + + weights = np.zeros(theta.shape) + normed_weights = np.zeros(theta.shape) + for i in range(2): + weights[i] = -np.log(theta[i]) + normed_weights[i] = weights[i] / weights[i].sum() + + # Verify inputs are nonnegative. + clf = ComplementNB(alpha=1.0) + + msg = re.escape("Negative values in data passed to ComplementNB (input X)") + with pytest.raises(ValueError, match=msg): + clf.fit(-X, Y) + + clf.fit(X, Y) + + # Check that counts/weights are correct. + feature_count = np.array([[1, 3, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1]]) + assert_array_equal(clf.feature_count_, feature_count) + class_count = np.array([3, 1]) + assert_array_equal(clf.class_count_, class_count) + feature_all = np.array([1, 4, 1, 1, 1, 1]) + assert_array_equal(clf.feature_all_, feature_all) + assert_array_almost_equal(clf.feature_log_prob_, weights) + + clf = ComplementNB(alpha=1.0, norm=True) + clf.fit(X, Y) + assert_array_almost_equal(clf.feature_log_prob_, normed_weights) + + +def test_categoricalnb(global_random_seed): + # Check the ability to predict the training set. + clf = CategoricalNB() + X2, y2 = get_random_integer_x_three_classes_y(global_random_seed) + + y_pred = clf.fit(X2, y2).predict(X2) + assert_array_equal(y_pred, y2) + + X3 = np.array([[1, 4], [2, 5]]) + y3 = np.array([1, 2]) + clf = CategoricalNB(alpha=1, fit_prior=False) + + clf.fit(X3, y3) + assert_array_equal(clf.n_categories_, np.array([3, 6])) + + # Check error is raised for X with negative entries + X = np.array([[0, -1]]) + y = np.array([1]) + error_msg = re.escape("Negative values in data passed to CategoricalNB (input X)") + with pytest.raises(ValueError, match=error_msg): + clf.predict(X) + with pytest.raises(ValueError, match=error_msg): + clf.fit(X, y) + + # Test alpha + X3_test = np.array([[2, 5]]) + # alpha=1 increases the count of all categories by one so the final + # probability for each category is not 50/50 but 1/3 to 2/3 + bayes_numerator = np.array([[1 / 3 * 1 / 3, 2 / 3 * 2 / 3]]) + bayes_denominator = bayes_numerator.sum() + assert_array_almost_equal( + clf.predict_proba(X3_test), bayes_numerator / bayes_denominator + ) + + # Assert category_count has counted all features + assert len(clf.category_count_) == X3.shape[1] + + # Check sample_weight + X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]]) + y = np.array([1, 1, 2, 2]) + clf = CategoricalNB(alpha=1, fit_prior=False) + clf.fit(X, y) + assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([1])) + assert_array_equal(clf.n_categories_, np.array([2, 2])) + + for factor in [1.0, 0.3, 5, 0.0001]: + X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]]) + y = np.array([1, 1, 2, 2]) + sample_weight = np.array([1, 1, 10, 0.1]) * factor + clf = CategoricalNB(alpha=1, fit_prior=False) + clf.fit(X, y, sample_weight=sample_weight) + assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([2])) + assert_array_equal(clf.n_categories_, np.array([2, 2])) + + +@pytest.mark.parametrize( + "min_categories, exp_X1_count, exp_X2_count, new_X, exp_n_categories_", + [ + # check min_categories with int > observed categories + ( + 3, + np.array([[2, 0, 0], [1, 1, 0]]), + np.array([[1, 1, 0], [1, 1, 0]]), + np.array([[0, 2]]), + np.array([3, 3]), + ), + # check with list input + ( + [3, 4], + np.array([[2, 0, 0], [1, 1, 0]]), + np.array([[1, 1, 0, 0], [1, 1, 0, 0]]), + np.array([[0, 3]]), + np.array([3, 4]), + ), + # check min_categories with min less than actual + ( + [ + 1, + np.array([[2, 0], [1, 1]]), + np.array([[1, 1], [1, 1]]), + np.array([[0, 1]]), + np.array([2, 2]), + ] + ), + ], +) +def test_categoricalnb_with_min_categories( + min_categories, exp_X1_count, exp_X2_count, new_X, exp_n_categories_ +): + X_n_categories = np.array([[0, 0], [0, 1], [0, 0], [1, 1]]) + y_n_categories = np.array([1, 1, 2, 2]) + expected_prediction = np.array([1]) + + clf = CategoricalNB(alpha=1, fit_prior=False, min_categories=min_categories) + clf.fit(X_n_categories, y_n_categories) + X1_count, X2_count = clf.category_count_ + assert_array_equal(X1_count, exp_X1_count) + assert_array_equal(X2_count, exp_X2_count) + predictions = clf.predict(new_X) + assert_array_equal(predictions, expected_prediction) + assert_array_equal(clf.n_categories_, exp_n_categories_) + + +@pytest.mark.parametrize( + "min_categories, error_msg", + [ + ([[3, 2], [2, 4]], "'min_categories' should have shape"), + ], +) +def test_categoricalnb_min_categories_errors(min_categories, error_msg): + X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]]) + y = np.array([1, 1, 2, 2]) + + clf = CategoricalNB(alpha=1, fit_prior=False, min_categories=min_categories) + with pytest.raises(ValueError, match=error_msg): + clf.fit(X, y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_alpha(csr_container): + # Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case + X = np.array([[1, 0], [1, 1]]) + y = np.array([0, 1]) + nb = BernoulliNB(alpha=0.0, force_alpha=False) + msg = "alpha too small will result in numeric errors, setting alpha = 1.0e-10" + with pytest.warns(UserWarning, match=msg): + nb.partial_fit(X, y, classes=[0, 1]) + with pytest.warns(UserWarning, match=msg): + nb.fit(X, y) + prob = np.array([[1, 0], [0, 1]]) + assert_array_almost_equal(nb.predict_proba(X), prob) + + nb = MultinomialNB(alpha=0.0, force_alpha=False) + with pytest.warns(UserWarning, match=msg): + nb.partial_fit(X, y, classes=[0, 1]) + with pytest.warns(UserWarning, match=msg): + nb.fit(X, y) + prob = np.array([[2.0 / 3, 1.0 / 3], [0, 1]]) + assert_array_almost_equal(nb.predict_proba(X), prob) + + nb = CategoricalNB(alpha=0.0, force_alpha=False) + with pytest.warns(UserWarning, match=msg): + nb.fit(X, y) + prob = np.array([[1.0, 0.0], [0.0, 1.0]]) + assert_array_almost_equal(nb.predict_proba(X), prob) + + # Test sparse X + X = csr_container(X) + nb = BernoulliNB(alpha=0.0, force_alpha=False) + with pytest.warns(UserWarning, match=msg): + nb.fit(X, y) + prob = np.array([[1, 0], [0, 1]]) + assert_array_almost_equal(nb.predict_proba(X), prob) + + nb = MultinomialNB(alpha=0.0, force_alpha=False) + with pytest.warns(UserWarning, match=msg): + nb.fit(X, y) + prob = np.array([[2.0 / 3, 1.0 / 3], [0, 1]]) + assert_array_almost_equal(nb.predict_proba(X), prob) + + +def test_alpha_vector(): + X = np.array([[1, 0], [1, 1]]) + y = np.array([0, 1]) + + # Setting alpha=np.array with same length + # as number of features should be fine + alpha = np.array([1, 2]) + nb = MultinomialNB(alpha=alpha, force_alpha=False) + nb.partial_fit(X, y, classes=[0, 1]) + + # Test feature probabilities uses pseudo-counts (alpha) + feature_prob = np.array([[1 / 2, 1 / 2], [2 / 5, 3 / 5]]) + assert_array_almost_equal(nb.feature_log_prob_, np.log(feature_prob)) + + # Test predictions + prob = np.array([[5 / 9, 4 / 9], [25 / 49, 24 / 49]]) + assert_array_almost_equal(nb.predict_proba(X), prob) + + # Test alpha non-negative + alpha = np.array([1.0, -0.1]) + m_nb = MultinomialNB(alpha=alpha, force_alpha=False) + expected_msg = "All values in alpha must be greater than 0." + with pytest.raises(ValueError, match=expected_msg): + m_nb.fit(X, y) + + # Test that too small pseudo-counts are replaced + ALPHA_MIN = 1e-10 + alpha = np.array([ALPHA_MIN / 2, 0.5]) + m_nb = MultinomialNB(alpha=alpha, force_alpha=False) + m_nb.partial_fit(X, y, classes=[0, 1]) + assert_array_almost_equal(m_nb._check_alpha(), [ALPHA_MIN, 0.5], decimal=12) + + # Test correct dimensions + alpha = np.array([1.0, 2.0, 3.0]) + m_nb = MultinomialNB(alpha=alpha, force_alpha=False) + expected_msg = "When alpha is an array, it should contains `n_features`" + with pytest.raises(ValueError, match=expected_msg): + m_nb.fit(X, y) + + +def test_check_accuracy_on_digits(): + # Non regression test to make sure that any further refactoring / optim + # of the NB models do not harm the performance on a slightly non-linearly + # separable dataset + X, y = load_digits(return_X_y=True) + binary_3v8 = np.logical_or(y == 3, y == 8) + X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8] + + # Multinomial NB + scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10) + assert scores.mean() > 0.86 + + scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10) + assert scores.mean() > 0.94 + + # Bernoulli NB + scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10) + assert scores.mean() > 0.83 + + scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10) + assert scores.mean() > 0.92 + + # Gaussian NB + scores = cross_val_score(GaussianNB(), X, y, cv=10) + assert scores.mean() > 0.77 + + scores = cross_val_score(GaussianNB(var_smoothing=0.1), X, y, cv=10) + assert scores.mean() > 0.89 + + scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10) + assert scores.mean() > 0.86 + + +def test_check_alpha(): + """The provided value for alpha must only be + used if alpha < _ALPHA_MIN and force_alpha is True. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/10772 + """ + _ALPHA_MIN = 1e-10 + b = BernoulliNB(alpha=0, force_alpha=True) + assert b._check_alpha() == 0 + + alphas = np.array([0.0, 1.0]) + + b = BernoulliNB(alpha=alphas, force_alpha=True) + # We manually set `n_features_in_` not to have `_check_alpha` err + b.n_features_in_ = alphas.shape[0] + assert_array_equal(b._check_alpha(), alphas) + + msg = ( + "alpha too small will result in numeric errors, setting alpha = %.1e" + % _ALPHA_MIN + ) + b = BernoulliNB(alpha=0, force_alpha=False) + with pytest.warns(UserWarning, match=msg): + assert b._check_alpha() == _ALPHA_MIN + + b = BernoulliNB(alpha=0, force_alpha=False) + with pytest.warns(UserWarning, match=msg): + assert b._check_alpha() == _ALPHA_MIN + + b = BernoulliNB(alpha=alphas, force_alpha=False) + # We manually set `n_features_in_` not to have `_check_alpha` err + b.n_features_in_ = alphas.shape[0] + with pytest.warns(UserWarning, match=msg): + assert_array_equal(b._check_alpha(), np.array([_ALPHA_MIN, 1.0])) + + +@pytest.mark.parametrize("Estimator", ALL_NAIVE_BAYES_CLASSES) +def test_predict_joint_proba(Estimator, global_random_seed): + X2, y2 = get_random_integer_x_three_classes_y(global_random_seed) + est = Estimator().fit(X2, y2) + jll = est.predict_joint_log_proba(X2) + log_prob_x = logsumexp(jll, axis=1) + log_prob_x_y = jll - np.atleast_2d(log_prob_x).T + assert_allclose(est.predict_log_proba(X2), log_prob_x_y) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_random_projection.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_random_projection.py new file mode 100644 index 0000000000000000000000000000000000000000..b279ab75ec8d93b07aca0c41f725324a85fe1c7b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_random_projection.py @@ -0,0 +1,584 @@ +import functools +import warnings +from typing import Any, List + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn.exceptions import DataDimensionalityWarning, NotFittedError +from sklearn.metrics import euclidean_distances +from sklearn.random_projection import ( + GaussianRandomProjection, + SparseRandomProjection, + _gaussian_random_matrix, + _sparse_random_matrix, + johnson_lindenstrauss_min_dim, +) +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import COO_CONTAINERS + +all_sparse_random_matrix: List[Any] = [_sparse_random_matrix] +all_dense_random_matrix: List[Any] = [_gaussian_random_matrix] +all_random_matrix = all_sparse_random_matrix + all_dense_random_matrix + +all_SparseRandomProjection: List[Any] = [SparseRandomProjection] +all_DenseRandomProjection: List[Any] = [GaussianRandomProjection] +all_RandomProjection = all_SparseRandomProjection + all_DenseRandomProjection + + +def make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros, + random_state=None, + sparse_format="csr", +): + """Make some random data with uniformly located non zero entries with + Gaussian distributed values; `sparse_format` can be `"csr"` (default) or + `None` (in which case a dense array is returned). + """ + rng = np.random.RandomState(random_state) + data_coo = coo_container( + ( + rng.randn(n_nonzeros), + ( + rng.randint(n_samples, size=n_nonzeros), + rng.randint(n_features, size=n_nonzeros), + ), + ), + shape=(n_samples, n_features), + ) + if sparse_format is not None: + return data_coo.asformat(sparse_format) + else: + return data_coo.toarray() + + +def densify(matrix): + if not sp.issparse(matrix): + return matrix + else: + return matrix.toarray() + + +n_samples, n_features = (10, 1000) +n_nonzeros = int(n_samples * n_features / 100.0) + + +############################################################################### +# test on JL lemma +############################################################################### + + +@pytest.mark.parametrize( + "n_samples, eps", + [ + ([100, 110], [0.9, 1.1]), + ([90, 100], [0.1, 0.0]), + ([50, -40], [0.1, 0.2]), + ], +) +def test_invalid_jl_domain(n_samples, eps): + with pytest.raises(ValueError): + johnson_lindenstrauss_min_dim(n_samples, eps=eps) + + +def test_input_size_jl_min_dim(): + with pytest.raises(ValueError): + johnson_lindenstrauss_min_dim(3 * [100], eps=2 * [0.9]) + + johnson_lindenstrauss_min_dim( + np.random.randint(1, 10, size=(10, 10)), eps=np.full((10, 10), 0.5) + ) + + +############################################################################### +# tests random matrix generation +############################################################################### +def check_input_size_random_matrix(random_matrix): + inputs = [(0, 0), (-1, 1), (1, -1), (1, 0), (-1, 0)] + for n_components, n_features in inputs: + with pytest.raises(ValueError): + random_matrix(n_components, n_features) + + +def check_size_generated(random_matrix): + inputs = [(1, 5), (5, 1), (5, 5), (1, 1)] + for n_components, n_features in inputs: + assert random_matrix(n_components, n_features).shape == ( + n_components, + n_features, + ) + + +def check_zero_mean_and_unit_norm(random_matrix): + # All random matrix should produce a transformation matrix + # with zero mean and unit norm for each columns + + A = densify(random_matrix(10000, 1, random_state=0)) + + assert_array_almost_equal(0, np.mean(A), 3) + assert_array_almost_equal(1.0, np.linalg.norm(A), 1) + + +def check_input_with_sparse_random_matrix(random_matrix): + n_components, n_features = 5, 10 + + for density in [-1.0, 0.0, 1.1]: + with pytest.raises(ValueError): + random_matrix(n_components, n_features, density=density) + + +@pytest.mark.parametrize("random_matrix", all_random_matrix) +def test_basic_property_of_random_matrix(random_matrix): + # Check basic properties of random matrix generation + check_input_size_random_matrix(random_matrix) + check_size_generated(random_matrix) + check_zero_mean_and_unit_norm(random_matrix) + + +@pytest.mark.parametrize("random_matrix", all_sparse_random_matrix) +def test_basic_property_of_sparse_random_matrix(random_matrix): + check_input_with_sparse_random_matrix(random_matrix) + + random_matrix_dense = functools.partial(random_matrix, density=1.0) + + check_zero_mean_and_unit_norm(random_matrix_dense) + + +def test_gaussian_random_matrix(): + # Check some statical properties of Gaussian random matrix + # Check that the random matrix follow the proper distribution. + # Let's say that each element of a_{ij} of A is taken from + # a_ij ~ N(0.0, 1 / n_components). + # + n_components = 100 + n_features = 1000 + A = _gaussian_random_matrix(n_components, n_features, random_state=0) + + assert_array_almost_equal(0.0, np.mean(A), 2) + assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1) + + +def test_sparse_random_matrix(): + # Check some statical properties of sparse random matrix + n_components = 100 + n_features = 500 + + for density in [0.3, 1.0]: + s = 1 / density + + A = _sparse_random_matrix( + n_components, n_features, density=density, random_state=0 + ) + A = densify(A) + + # Check possible values + values = np.unique(A) + assert np.sqrt(s) / np.sqrt(n_components) in values + assert -np.sqrt(s) / np.sqrt(n_components) in values + + if density == 1.0: + assert np.size(values) == 2 + else: + assert 0.0 in values + assert np.size(values) == 3 + + # Check that the random matrix follow the proper distribution. + # Let's say that each element of a_{ij} of A is taken from + # + # - -sqrt(s) / sqrt(n_components) with probability 1 / 2s + # - 0 with probability 1 - 1 / s + # - +sqrt(s) / sqrt(n_components) with probability 1 / 2s + # + assert_almost_equal(np.mean(A == 0.0), 1 - 1 / s, decimal=2) + assert_almost_equal( + np.mean(A == np.sqrt(s) / np.sqrt(n_components)), 1 / (2 * s), decimal=2 + ) + assert_almost_equal( + np.mean(A == -np.sqrt(s) / np.sqrt(n_components)), 1 / (2 * s), decimal=2 + ) + + assert_almost_equal(np.var(A == 0.0, ddof=1), (1 - 1 / s) * 1 / s, decimal=2) + assert_almost_equal( + np.var(A == np.sqrt(s) / np.sqrt(n_components), ddof=1), + (1 - 1 / (2 * s)) * 1 / (2 * s), + decimal=2, + ) + assert_almost_equal( + np.var(A == -np.sqrt(s) / np.sqrt(n_components), ddof=1), + (1 - 1 / (2 * s)) * 1 / (2 * s), + decimal=2, + ) + + +############################################################################### +# tests on random projection transformer +############################################################################### + + +def test_random_projection_transformer_invalid_input(): + n_components = "auto" + fit_data = [[0, 1, 2]] + for RandomProjection in all_RandomProjection: + with pytest.raises(ValueError): + RandomProjection(n_components=n_components).fit(fit_data) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_try_to_transform_before_fit(coo_container, global_random_seed): + data = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros, + random_state=global_random_seed, + sparse_format=None, + ) + for RandomProjection in all_RandomProjection: + with pytest.raises(NotFittedError): + RandomProjection(n_components="auto").transform(data) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_too_many_samples_to_find_a_safe_embedding(coo_container, global_random_seed): + data = make_sparse_random_data( + coo_container, + n_samples=1000, + n_features=100, + n_nonzeros=1000, + random_state=global_random_seed, + sparse_format=None, + ) + + for RandomProjection in all_RandomProjection: + rp = RandomProjection(n_components="auto", eps=0.1) + expected_msg = ( + "eps=0.100000 and n_samples=1000 lead to a target dimension" + " of 5920 which is larger than the original space with" + " n_features=100" + ) + with pytest.raises(ValueError, match=expected_msg): + rp.fit(data) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_random_projection_embedding_quality(coo_container): + data = make_sparse_random_data( + coo_container, + n_samples=8, + n_features=5000, + n_nonzeros=15000, + random_state=0, + sparse_format=None, + ) + eps = 0.2 + + original_distances = euclidean_distances(data, squared=True) + original_distances = original_distances.ravel() + non_identical = original_distances != 0.0 + + # remove 0 distances to avoid division by 0 + original_distances = original_distances[non_identical] + + for RandomProjection in all_RandomProjection: + rp = RandomProjection(n_components="auto", eps=eps, random_state=0) + projected = rp.fit_transform(data) + + projected_distances = euclidean_distances(projected, squared=True) + projected_distances = projected_distances.ravel() + + # remove 0 distances to avoid division by 0 + projected_distances = projected_distances[non_identical] + + distances_ratio = projected_distances / original_distances + + # check that the automatically tuned values for the density respect the + # contract for eps: pairwise distances are preserved according to the + # Johnson-Lindenstrauss lemma + assert distances_ratio.max() < 1 + eps + assert 1 - eps < distances_ratio.min() + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_SparseRandomProj_output_representation(coo_container): + dense_data = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros, + random_state=0, + sparse_format=None, + ) + sparse_data = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros, + random_state=0, + sparse_format="csr", + ) + for SparseRandomProj in all_SparseRandomProjection: + # when using sparse input, the projected data can be forced to be a + # dense numpy array + rp = SparseRandomProj(n_components=10, dense_output=True, random_state=0) + rp.fit(dense_data) + assert isinstance(rp.transform(dense_data), np.ndarray) + assert isinstance(rp.transform(sparse_data), np.ndarray) + + # the output can be left to a sparse matrix instead + rp = SparseRandomProj(n_components=10, dense_output=False, random_state=0) + rp = rp.fit(dense_data) + # output for dense input will stay dense: + assert isinstance(rp.transform(dense_data), np.ndarray) + + # output for sparse output will be sparse: + assert sp.issparse(rp.transform(sparse_data)) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_correct_RandomProjection_dimensions_embedding( + coo_container, global_random_seed +): + data = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros, + random_state=global_random_seed, + sparse_format=None, + ) + for RandomProjection in all_RandomProjection: + rp = RandomProjection(n_components="auto", random_state=0, eps=0.5).fit(data) + + # the number of components is adjusted from the shape of the training + # set + assert rp.n_components == "auto" + assert rp.n_components_ == 110 + + if RandomProjection in all_SparseRandomProjection: + assert rp.density == "auto" + assert_almost_equal(rp.density_, 0.03, 2) + + assert rp.components_.shape == (110, n_features) + + projected_1 = rp.transform(data) + assert projected_1.shape == (n_samples, 110) + + # once the RP is 'fitted' the projection is always the same + projected_2 = rp.transform(data) + assert_array_equal(projected_1, projected_2) + + # fit transform with same random seed will lead to the same results + rp2 = RandomProjection(random_state=0, eps=0.5) + projected_3 = rp2.fit_transform(data) + assert_array_equal(projected_1, projected_3) + + # Try to transform with an input X of size different from fitted. + with pytest.raises(ValueError): + rp.transform(data[:, 1:5]) + + # it is also possible to fix the number of components and the density + # level + if RandomProjection in all_SparseRandomProjection: + rp = RandomProjection(n_components=100, density=0.001, random_state=0) + projected = rp.fit_transform(data) + assert projected.shape == (n_samples, 100) + assert rp.components_.shape == (100, n_features) + assert rp.components_.nnz < 115 # close to 1% density + assert 85 < rp.components_.nnz # close to 1% density + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_warning_n_components_greater_than_n_features( + coo_container, global_random_seed +): + n_features = 20 + n_samples = 5 + n_nonzeros = int(n_features / 4) + data = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros, + random_state=global_random_seed, + sparse_format=None, + ) + + for RandomProjection in all_RandomProjection: + with pytest.warns(DataDimensionalityWarning): + RandomProjection(n_components=n_features + 1).fit(data) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_works_with_sparse_data(coo_container, global_random_seed): + n_features = 20 + n_samples = 5 + n_nonzeros = int(n_features / 4) + dense_data = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros, + random_state=global_random_seed, + sparse_format=None, + ) + sparse_data = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros, + random_state=global_random_seed, + sparse_format="csr", + ) + + for RandomProjection in all_RandomProjection: + rp_dense = RandomProjection(n_components=3, random_state=1).fit(dense_data) + rp_sparse = RandomProjection(n_components=3, random_state=1).fit(sparse_data) + assert_array_almost_equal( + densify(rp_dense.components_), densify(rp_sparse.components_) + ) + + +def test_johnson_lindenstrauss_min_dim(): + """Test Johnson-Lindenstrauss for small eps. + + Regression test for #17111: before #19374, 32-bit systems would fail. + """ + assert johnson_lindenstrauss_min_dim(100, eps=1e-5) == 368416070986 + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +@pytest.mark.parametrize("random_projection_cls", all_RandomProjection) +def test_random_projection_feature_names_out( + coo_container, random_projection_cls, global_random_seed +): + data = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros, + random_state=global_random_seed, + sparse_format=None, + ) + random_projection = random_projection_cls(n_components=2) + random_projection.fit(data) + names_out = random_projection.get_feature_names_out() + class_name_lower = random_projection_cls.__name__.lower() + expected_names_out = np.array( + [f"{class_name_lower}{i}" for i in range(random_projection.n_components_)], + dtype=object, + ) + + assert_array_equal(names_out, expected_names_out) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +@pytest.mark.parametrize("n_samples", (2, 9, 10, 11, 1000)) +@pytest.mark.parametrize("n_features", (2, 9, 10, 11, 1000)) +@pytest.mark.parametrize("random_projection_cls", all_RandomProjection) +@pytest.mark.parametrize("compute_inverse_components", [True, False]) +def test_inverse_transform( + coo_container, + n_samples, + n_features, + random_projection_cls, + compute_inverse_components, + global_random_seed, +): + n_components = 10 + + random_projection = random_projection_cls( + n_components=n_components, + compute_inverse_components=compute_inverse_components, + random_state=global_random_seed, + ) + + X_dense = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros=n_samples * n_features // 100 + 1, + random_state=global_random_seed, + sparse_format=None, + ) + X_csr = make_sparse_random_data( + coo_container, + n_samples, + n_features, + n_nonzeros=n_samples * n_features // 100 + 1, + random_state=global_random_seed, + sparse_format="csr", + ) + + for X in [X_dense, X_csr]: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=( + "The number of components is higher than the number of features" + ), + category=DataDimensionalityWarning, + ) + projected = random_projection.fit_transform(X) + + if compute_inverse_components: + assert hasattr(random_projection, "inverse_components_") + inv_components = random_projection.inverse_components_ + assert inv_components.shape == (n_features, n_components) + + projected_back = random_projection.inverse_transform(projected) + assert projected_back.shape == X.shape + + projected_again = random_projection.transform(projected_back) + if hasattr(projected, "toarray"): + projected = projected.toarray() + assert_allclose(projected, projected_again, rtol=1e-7, atol=1e-10) + + +@pytest.mark.parametrize("random_projection_cls", all_RandomProjection) +@pytest.mark.parametrize( + "input_dtype, expected_dtype", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_random_projection_dtype_match( + random_projection_cls, input_dtype, expected_dtype +): + # Verify output matrix dtype + rng = np.random.RandomState(42) + X = rng.rand(25, 3000) + rp = random_projection_cls(random_state=0) + transformed = rp.fit_transform(X.astype(input_dtype)) + + assert rp.components_.dtype == expected_dtype + assert transformed.dtype == expected_dtype + + +@pytest.mark.parametrize("random_projection_cls", all_RandomProjection) +def test_random_projection_numerical_consistency(random_projection_cls): + # Verify numerical consistency among np.float32 and np.float64 + atol = 1e-5 + rng = np.random.RandomState(42) + X = rng.rand(25, 3000) + rp_32 = random_projection_cls(random_state=0) + rp_64 = random_projection_cls(random_state=0) + + projection_32 = rp_32.fit_transform(X.astype(np.float32)) + projection_64 = rp_64.fit_transform(X.astype(np.float64)) + + assert_allclose(projection_64, projection_32, atol=atol) + + assert_allclose_dense_sparse(rp_32.components_, rp_64.components_)