diff --git a/.gitattributes b/.gitattributes index 4fa34db002e390665d8b0547a722dfbc445b77b9..c27f50943976219b4cbef8fe3196b3f36e2d0502 100644 --- a/.gitattributes +++ b/.gitattributes @@ -83,3 +83,4 @@ llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs dif llmeval-env/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8fcf8c68444e5e97b45520110b7c9c5ac38a62c7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__init__.py @@ -0,0 +1,44 @@ +""" +The :mod:`sklearn.covariance` module includes methods and algorithms to +robustly estimate the covariance of features given a set of points. The +precision matrix defined as the inverse of the covariance is also estimated. +Covariance estimation is closely related to the theory of Gaussian Graphical +Models. +""" + +from ._elliptic_envelope import EllipticEnvelope +from ._empirical_covariance import ( + EmpiricalCovariance, + empirical_covariance, + log_likelihood, +) +from ._graph_lasso import GraphicalLasso, GraphicalLassoCV, graphical_lasso +from ._robust_covariance import MinCovDet, fast_mcd +from ._shrunk_covariance import ( + OAS, + LedoitWolf, + ShrunkCovariance, + ledoit_wolf, + ledoit_wolf_shrinkage, + oas, + shrunk_covariance, +) + +__all__ = [ + "EllipticEnvelope", + "EmpiricalCovariance", + "GraphicalLasso", + "GraphicalLassoCV", + "LedoitWolf", + "MinCovDet", + "OAS", + "ShrunkCovariance", + "empirical_covariance", + "fast_mcd", + "graphical_lasso", + "ledoit_wolf", + "ledoit_wolf_shrinkage", + "log_likelihood", + "oas", + "shrunk_covariance", +] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py new file mode 100644 index 0000000000000000000000000000000000000000..ed99a38c0ee56d7fb2222204612dce09529b670d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py @@ -0,0 +1,267 @@ +# Author: Virgile Fritsch +# +# License: BSD 3 clause + +from numbers import Real + +import numpy as np + +from ..base import OutlierMixin, _fit_context +from ..metrics import accuracy_score +from ..utils._param_validation import Interval +from ..utils.validation import check_is_fitted +from ._robust_covariance import MinCovDet + + +class EllipticEnvelope(OutlierMixin, MinCovDet): + """An object for detecting outliers in a Gaussian distributed dataset. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, the support of robust location and covariance estimates + is computed, and a covariance estimate is recomputed from it, + without centering the data. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, the robust location and covariance are directly computed + with the FastMCD algorithm without additional treatment. + + support_fraction : float, default=None + The proportion of points to be included in the support of the raw + MCD estimate. If None, the minimum value of support_fraction will + be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`. + Range is (0, 1). + + contamination : float, default=0.1 + The amount of contamination of the data set, i.e. the proportion + of outliers in the data set. Range is (0, 0.5]. + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling + the data. Pass an int for reproducible results across multiple function + calls. See :term:`Glossary `. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated robust location. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated robust covariance matrix. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute the + robust estimates of location and shape. + + offset_ : float + Offset used to define the decision function from the raw scores. + We have the relation: ``decision_function = score_samples - offset_``. + The offset depends on the contamination parameter and is defined in + such a way we obtain the expected number of outliers (samples with + decision function < 0) in training. + + .. versionadded:: 0.20 + + raw_location_ : ndarray of shape (n_features,) + The raw robust estimated location before correction and re-weighting. + + raw_covariance_ : ndarray of shape (n_features, n_features) + The raw robust estimated covariance before correction and re-weighting. + + raw_support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute + the raw robust estimates of location and shape, before correction + and re-weighting. + + dist_ : ndarray of shape (n_samples,) + Mahalanobis distances of the training set (on which :meth:`fit` is + called) observations. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + Notes + ----- + Outlier detection from covariance estimation may break or not + perform well in high-dimensional settings. In particular, one will + always take care to work with ``n_samples > n_features ** 2``. + + References + ---------- + .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the + minimum covariance determinant estimator" Technometrics 41(3), 212 + (1999) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import EllipticEnvelope + >>> true_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0], + ... cov=true_cov, + ... size=500) + >>> cov = EllipticEnvelope(random_state=0).fit(X) + >>> # predict returns 1 for an inlier and -1 for an outlier + >>> cov.predict([[0, 0], + ... [3, 3]]) + array([ 1, -1]) + >>> cov.covariance_ + array([[0.7411..., 0.2535...], + [0.2535..., 0.3053...]]) + >>> cov.location_ + array([0.0813... , 0.0427...]) + """ + + _parameter_constraints: dict = { + **MinCovDet._parameter_constraints, + "contamination": [Interval(Real, 0, 0.5, closed="right")], + } + + def __init__( + self, + *, + store_precision=True, + assume_centered=False, + support_fraction=None, + contamination=0.1, + random_state=None, + ): + super().__init__( + store_precision=store_precision, + assume_centered=assume_centered, + support_fraction=support_fraction, + random_state=random_state, + ) + self.contamination = contamination + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the EllipticEnvelope model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + super().fit(X) + self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination) + return self + + def decision_function(self, X): + """Compute the decision function of the given observations. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + decision : ndarray of shape (n_samples,) + Decision function of the samples. + It is equal to the shifted Mahalanobis distances. + The threshold for being an outlier is 0, which ensures a + compatibility with other outlier detection algorithms. + """ + check_is_fitted(self) + negative_mahal_dist = self.score_samples(X) + return negative_mahal_dist - self.offset_ + + def score_samples(self, X): + """Compute the negative Mahalanobis distances. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + negative_mahal_distances : array-like of shape (n_samples,) + Opposite of the Mahalanobis distances. + """ + check_is_fitted(self) + return -self.mahalanobis(X) + + def predict(self, X): + """ + Predict labels (1 inlier, -1 outlier) of X according to fitted model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + is_inlier : ndarray of shape (n_samples,) + Returns -1 for anomalies/outliers and +1 for inliers. + """ + values = self.decision_function(X) + is_inlier = np.full(values.shape[0], -1, dtype=int) + is_inlier[values >= 0] = 1 + + return is_inlier + + def score(self, X, y, sample_weight=None): + """Return the mean accuracy on the given test data and labels. + + In multi-label classification, this is the subset accuracy + which is a harsh metric since you require for each sample that + each label set be correctly predicted. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + Mean accuracy of self.predict(X) w.r.t. y. + """ + return accuracy_score(y, self.predict(X), sample_weight=sample_weight) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py new file mode 100644 index 0000000000000000000000000000000000000000..fb40ffda162a4c0f31fc82c9124daff9bb4ecbb2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py @@ -0,0 +1,1110 @@ +"""GraphicalLasso: sparse inverse covariance estimation with an l1-penalized +estimator. +""" + +# Author: Gael Varoquaux +# License: BSD 3 clause +# Copyright: INRIA +import operator +import sys +import time +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import linalg + +from ..base import _fit_context +from ..exceptions import ConvergenceWarning + +# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast' +from ..linear_model import _cd_fast as cd_fast # type: ignore +from ..linear_model import lars_path_gram +from ..model_selection import check_cv, cross_val_score +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.metadata_routing import _RoutingNotSupportedMixin +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _is_arraylike_not_scalar, + check_random_state, + check_scalar, +) +from . import EmpiricalCovariance, empirical_covariance, log_likelihood + + +# Helper functions to compute the objective and dual objective functions +# of the l1-penalized estimator +def _objective(mle, precision_, alpha): + """Evaluation of the graphical-lasso objective function + + the objective function is made of a shifted scaled version of the + normalized log-likelihood (i.e. its empirical mean over the samples) and a + penalisation term to promote sparsity + """ + p = precision_.shape[0] + cost = -2.0 * log_likelihood(mle, precision_) + p * np.log(2 * np.pi) + cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) + return cost + + +def _dual_gap(emp_cov, precision_, alpha): + """Expression of the dual gap convergence criterion + + The specific definition is given in Duchi "Projected Subgradient Methods + for Learning Sparse Gaussians". + """ + gap = np.sum(emp_cov * precision_) + gap -= precision_.shape[0] + gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) + return gap + + +# The g-lasso algorithm +def _graphical_lasso( + emp_cov, + alpha, + *, + cov_init=None, + mode="cd", + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + eps=np.finfo(np.float64).eps, +): + _, n_features = emp_cov.shape + if alpha == 0: + # Early return without regularization + precision_ = linalg.inv(emp_cov) + cost = -2.0 * log_likelihood(emp_cov, precision_) + cost += n_features * np.log(2 * np.pi) + d_gap = np.sum(emp_cov * precision_) - n_features + return emp_cov, precision_, (cost, d_gap), 0 + + if cov_init is None: + covariance_ = emp_cov.copy() + else: + covariance_ = cov_init.copy() + # As a trivial regularization (Tikhonov like), we scale down the + # off-diagonal coefficients of our starting point: This is needed, as + # in the cross-validation the cov_init can easily be + # ill-conditioned, and the CV loop blows. Beside, this takes + # conservative stand-point on the initial conditions, and it tends to + # make the convergence go faster. + covariance_ *= 0.95 + diagonal = emp_cov.flat[:: n_features + 1] + covariance_.flat[:: n_features + 1] = diagonal + precision_ = linalg.pinvh(covariance_) + + indices = np.arange(n_features) + i = 0 # initialize the counter to be robust to `max_iter=0` + costs = list() + # The different l1 regression solver have different numerical errors + if mode == "cd": + errors = dict(over="raise", invalid="ignore") + else: + errors = dict(invalid="raise") + try: + # be robust to the max_iter=0 edge case, see: + # https://github.com/scikit-learn/scikit-learn/issues/4134 + d_gap = np.inf + # set a sub_covariance buffer + sub_covariance = np.copy(covariance_[1:, 1:], order="C") + for i in range(max_iter): + for idx in range(n_features): + # To keep the contiguous matrix `sub_covariance` equal to + # covariance_[indices != idx].T[indices != idx] + # we only need to update 1 column and 1 line when idx changes + if idx > 0: + di = idx - 1 + sub_covariance[di] = covariance_[di][indices != idx] + sub_covariance[:, di] = covariance_[:, di][indices != idx] + else: + sub_covariance[:] = covariance_[1:, 1:] + row = emp_cov[idx, indices != idx] + with np.errstate(**errors): + if mode == "cd": + # Use coordinate descent + coefs = -( + precision_[indices != idx, idx] + / (precision_[idx, idx] + 1000 * eps) + ) + coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram( + coefs, + alpha, + 0, + sub_covariance, + row, + row, + max_iter, + enet_tol, + check_random_state(None), + False, + ) + else: # mode == "lars" + _, _, coefs = lars_path_gram( + Xy=row, + Gram=sub_covariance, + n_samples=row.size, + alpha_min=alpha / (n_features - 1), + copy_Gram=True, + eps=eps, + method="lars", + return_path=False, + ) + # Update the precision matrix + precision_[idx, idx] = 1.0 / ( + covariance_[idx, idx] + - np.dot(covariance_[indices != idx, idx], coefs) + ) + precision_[indices != idx, idx] = -precision_[idx, idx] * coefs + precision_[idx, indices != idx] = -precision_[idx, idx] * coefs + coefs = np.dot(sub_covariance, coefs) + covariance_[idx, indices != idx] = coefs + covariance_[indices != idx, idx] = coefs + if not np.isfinite(precision_.sum()): + raise FloatingPointError( + "The system is too ill-conditioned for this solver" + ) + d_gap = _dual_gap(emp_cov, precision_, alpha) + cost = _objective(emp_cov, precision_, alpha) + if verbose: + print( + "[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e" + % (i, cost, d_gap) + ) + costs.append((cost, d_gap)) + if np.abs(d_gap) < tol: + break + if not np.isfinite(cost) and i > 0: + raise FloatingPointError( + "Non SPD result: the system is too ill-conditioned for this solver" + ) + else: + warnings.warn( + "graphical_lasso: did not converge after %i iteration: dual gap: %.3e" + % (max_iter, d_gap), + ConvergenceWarning, + ) + except FloatingPointError as e: + e.args = (e.args[0] + ". The system is too ill-conditioned for this solver",) + raise e + + return covariance_, precision_, costs, i + 1 + + +def alpha_max(emp_cov): + """Find the maximum alpha for which there are some non-zeros off-diagonal. + + Parameters + ---------- + emp_cov : ndarray of shape (n_features, n_features) + The sample covariance matrix. + + Notes + ----- + This results from the bound for the all the Lasso that are solved + in GraphicalLasso: each time, the row of cov corresponds to Xy. As the + bound for alpha is given by `max(abs(Xy))`, the result follows. + """ + A = np.copy(emp_cov) + A.flat[:: A.shape[0] + 1] = 0 + return np.max(np.abs(A)) + + +@validate_params( + { + "emp_cov": ["array-like"], + "cov_init": ["array-like", None], + "return_costs": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def graphical_lasso( + emp_cov, + alpha, + *, + cov_init=None, + mode="cd", + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + return_costs=False, + eps=np.finfo(np.float64).eps, + return_n_iter=False, +): + """L1-penalized covariance estimator. + + Read more in the :ref:`User Guide `. + + .. versionchanged:: v0.20 + graph_lasso has been renamed to graphical_lasso + + Parameters + ---------- + emp_cov : array-like of shape (n_features, n_features) + Empirical covariance from which to compute the covariance estimate. + + alpha : float + The regularization parameter: the higher alpha, the more + regularization, the sparser the inverse covariance. + Range is (0, inf]. + + cov_init : array of shape (n_features, n_features), default=None + The initial guess for the covariance. If None, then the empirical + covariance is used. + + .. deprecated:: 1.3 + `cov_init` is deprecated in 1.3 and will be removed in 1.5. + It currently has no effect. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where p > n. Elsewhere prefer cd + which is more numerically stable. + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. Range is (0, inf]. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. Range is (0, inf]. + + max_iter : int, default=100 + The maximum number of iterations. + + verbose : bool, default=False + If verbose is True, the objective function and dual gap are + printed at each iteration. + + return_costs : bool, default=False + If return_costs is True, the objective function and dual gap + at each iteration are returned. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + covariance : ndarray of shape (n_features, n_features) + The estimated covariance matrix. + + precision : ndarray of shape (n_features, n_features) + The estimated (sparse) precision matrix. + + costs : list of (objective, dual_gap) pairs + The list of values of the objective function and the dual gap at + each iteration. Returned only if return_costs is True. + + n_iter : int + Number of iterations. Returned only if `return_n_iter` is set to True. + + See Also + -------- + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with + cross-validated choice of the l1 penalty. + + Notes + ----- + The algorithm employed to solve this problem is the GLasso algorithm, + from the Friedman 2008 Biostatistics paper. It is the same algorithm + as in the R `glasso` package. + + One possible difference with the `glasso` R package is that the + diagonal coefficients are not penalized. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_spd_matrix + >>> from sklearn.covariance import empirical_covariance, graphical_lasso + >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42) + >>> rng = np.random.RandomState(42) + >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3) + >>> emp_cov = empirical_covariance(X, assume_centered=True) + >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05) + >>> emp_cov + array([[ 1.68..., 0.21..., -0.20...], + [ 0.21..., 0.22..., -0.08...], + [-0.20..., -0.08..., 0.23...]]) + """ + + if cov_init is not None: + warnings.warn( + ( + "The cov_init parameter is deprecated in 1.3 and will be removed in " + "1.5. It does not have any effect." + ), + FutureWarning, + ) + + model = GraphicalLasso( + alpha=alpha, + mode=mode, + covariance="precomputed", + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + verbose=verbose, + eps=eps, + assume_centered=True, + ).fit(emp_cov) + + output = [model.covariance_, model.precision_] + if return_costs: + output.append(model.costs_) + if return_n_iter: + output.append(model.n_iter_) + return tuple(output) + + +class BaseGraphicalLasso(EmpiricalCovariance): + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "tol": [Interval(Real, 0, None, closed="right")], + "enet_tol": [Interval(Real, 0, None, closed="right")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "mode": [StrOptions({"cd", "lars"})], + "verbose": ["verbose"], + "eps": [Interval(Real, 0, None, closed="both")], + } + _parameter_constraints.pop("store_precision") + + def __init__( + self, + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + mode="cd", + verbose=False, + eps=np.finfo(np.float64).eps, + assume_centered=False, + ): + super().__init__(assume_centered=assume_centered) + self.tol = tol + self.enet_tol = enet_tol + self.max_iter = max_iter + self.mode = mode + self.verbose = verbose + self.eps = eps + + +class GraphicalLasso(BaseGraphicalLasso): + """Sparse inverse covariance estimation with an l1-penalized estimator. + + Read more in the :ref:`User Guide `. + + .. versionchanged:: v0.20 + GraphLasso has been renamed to GraphicalLasso + + Parameters + ---------- + alpha : float, default=0.01 + The regularization parameter: the higher alpha, the more + regularization, the sparser the inverse covariance. + Range is (0, inf]. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where p > n. Elsewhere prefer cd + which is more numerically stable. + + covariance : "precomputed", default=None + If covariance is "precomputed", the input data in `fit` is assumed + to be the covariance matrix. If `None`, the empirical covariance + is estimated from the data `X`. + + .. versionadded:: 1.3 + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. Range is (0, inf]. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. Range is (0, inf]. + + max_iter : int, default=100 + The maximum number of iterations. + + verbose : bool, default=False + If verbose is True, the objective function and dual gap are + plotted at each iteration. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + .. versionadded:: 1.3 + + assume_centered : bool, default=False + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data are centered before computation. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + + n_iter_ : int + Number of iterations run. + + costs_ : list of (objective, dual_gap) pairs + The list of values of the objective function and the dual gap at + each iteration. Returned only if return_costs is True. + + .. versionadded:: 1.3 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + graphical_lasso : L1-penalized covariance estimator. + GraphicalLassoCV : Sparse inverse covariance with + cross-validated choice of the l1 penalty. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import GraphicalLasso + >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.2, 0.0, 0.3, 0.1], + ... [0.0, 0.0, 0.1, 0.7]]) + >>> np.random.seed(0) + >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0], + ... cov=true_cov, + ... size=200) + >>> cov = GraphicalLasso().fit(X) + >>> np.around(cov.covariance_, decimals=3) + array([[0.816, 0.049, 0.218, 0.019], + [0.049, 0.364, 0.017, 0.034], + [0.218, 0.017, 0.322, 0.093], + [0.019, 0.034, 0.093, 0.69 ]]) + >>> np.around(cov.location_, decimals=3) + array([0.073, 0.04 , 0.038, 0.143]) + """ + + _parameter_constraints: dict = { + **BaseGraphicalLasso._parameter_constraints, + "alpha": [Interval(Real, 0, None, closed="both")], + "covariance": [StrOptions({"precomputed"}), None], + } + + def __init__( + self, + alpha=0.01, + *, + mode="cd", + covariance=None, + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + eps=np.finfo(np.float64).eps, + assume_centered=False, + ): + super().__init__( + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + mode=mode, + verbose=verbose, + eps=eps, + assume_centered=assume_centered, + ) + self.alpha = alpha + self.covariance = covariance + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the GraphicalLasso model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Covariance does not make sense for a single feature + X = self._validate_data(X, ensure_min_features=2, ensure_min_samples=2) + + if self.covariance == "precomputed": + emp_cov = X.copy() + self.location_ = np.zeros(X.shape[1]) + else: + emp_cov = empirical_covariance(X, assume_centered=self.assume_centered) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + + self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso( + emp_cov, + alpha=self.alpha, + cov_init=None, + mode=self.mode, + tol=self.tol, + enet_tol=self.enet_tol, + max_iter=self.max_iter, + verbose=self.verbose, + eps=self.eps, + ) + return self + + +# Cross-validation with GraphicalLasso +def graphical_lasso_path( + X, + alphas, + cov_init=None, + X_test=None, + mode="cd", + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + eps=np.finfo(np.float64).eps, +): + """l1-penalized covariance estimator along a path of decreasing alphas + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + alphas : array-like of shape (n_alphas,) + The list of regularization parameters, decreasing order. + + cov_init : array of shape (n_features, n_features), default=None + The initial guess for the covariance. + + X_test : array of shape (n_test_samples, n_features), default=None + Optional test matrix to measure generalisation error. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where p > n. Elsewhere prefer cd + which is more numerically stable. + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. The tolerance must be a positive + number. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. The tolerance must be a positive number. + + max_iter : int, default=100 + The maximum number of iterations. This parameter should be a strictly + positive integer. + + verbose : int or bool, default=False + The higher the verbosity flag, the more information is printed + during the fitting. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + .. versionadded:: 1.3 + + Returns + ------- + covariances_ : list of shape (n_alphas,) of ndarray of shape \ + (n_features, n_features) + The estimated covariance matrices. + + precisions_ : list of shape (n_alphas,) of ndarray of shape \ + (n_features, n_features) + The estimated (sparse) precision matrices. + + scores_ : list of shape (n_alphas,), dtype=float + The generalisation error (log-likelihood) on the test data. + Returned only if test data is passed. + """ + inner_verbose = max(0, verbose - 1) + emp_cov = empirical_covariance(X) + if cov_init is None: + covariance_ = emp_cov.copy() + else: + covariance_ = cov_init + covariances_ = list() + precisions_ = list() + scores_ = list() + if X_test is not None: + test_emp_cov = empirical_covariance(X_test) + + for alpha in alphas: + try: + # Capture the errors, and move on + covariance_, precision_, _, _ = _graphical_lasso( + emp_cov, + alpha=alpha, + cov_init=covariance_, + mode=mode, + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + verbose=inner_verbose, + eps=eps, + ) + covariances_.append(covariance_) + precisions_.append(precision_) + if X_test is not None: + this_score = log_likelihood(test_emp_cov, precision_) + except FloatingPointError: + this_score = -np.inf + covariances_.append(np.nan) + precisions_.append(np.nan) + if X_test is not None: + if not np.isfinite(this_score): + this_score = -np.inf + scores_.append(this_score) + if verbose == 1: + sys.stderr.write(".") + elif verbose > 1: + if X_test is not None: + print( + "[graphical_lasso_path] alpha: %.2e, score: %.2e" + % (alpha, this_score) + ) + else: + print("[graphical_lasso_path] alpha: %.2e" % alpha) + if X_test is not None: + return covariances_, precisions_, scores_ + return covariances_, precisions_ + + +class GraphicalLassoCV(_RoutingNotSupportedMixin, BaseGraphicalLasso): + """Sparse inverse covariance w/ cross-validated choice of the l1 penalty. + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + .. versionchanged:: v0.20 + GraphLassoCV has been renamed to GraphicalLassoCV + + Parameters + ---------- + alphas : int or array-like of shape (n_alphas,), dtype=float, default=4 + If an integer is given, it fixes the number of points on the + grids of alpha to be used. If a list is given, it gives the + grid to be used. See the notes in the class docstring for + more details. Range is [1, inf) for an integer. + Range is (0, inf] for an array-like of floats. + + n_refinements : int, default=4 + The number of times the grid is refined. Not used if explicit + values of alphas are passed. Range is [1, inf). + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.20 + ``cv`` default value if None changed from 3-fold to 5-fold. + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. Range is (0, inf]. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. Range is (0, inf]. + + max_iter : int, default=100 + Maximum number of iterations. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where number of features is greater + than number of samples. Elsewhere prefer cd which is more numerically + stable. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + verbose : bool, default=False + If verbose is True, the objective function and duality gap are + printed at each iteration. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + .. versionadded:: 1.3 + + assume_centered : bool, default=False + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data are centered before computation. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + + precision_ : ndarray of shape (n_features, n_features) + Estimated precision matrix (inverse covariance). + + costs_ : list of (objective, dual_gap) pairs + The list of values of the objective function and the dual gap at + each iteration. Returned only if return_costs is True. + + .. versionadded:: 1.3 + + alpha_ : float + Penalization parameter selected. + + cv_results_ : dict of ndarrays + A dict with keys: + + alphas : ndarray of shape (n_alphas,) + All penalization parameters explored. + + split(k)_test_score : ndarray of shape (n_alphas,) + Log-likelihood score on left-out data across (k)th fold. + + .. versionadded:: 1.0 + + mean_test_score : ndarray of shape (n_alphas,) + Mean of scores over the folds. + + .. versionadded:: 1.0 + + std_test_score : ndarray of shape (n_alphas,) + Standard deviation of scores over the folds. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + graphical_lasso : L1-penalized covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + + Notes + ----- + The search for the optimal penalization parameter (`alpha`) is done on an + iteratively refined grid: first the cross-validated scores on a grid are + computed, then a new refined grid is centered around the maximum, and so + on. + + One of the challenges which is faced here is that the solvers can + fail to converge to a well-conditioned estimate. The corresponding + values of `alpha` then come out as missing values, but the optimum may + be close to these missing values. + + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import GraphicalLassoCV + >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.2, 0.0, 0.3, 0.1], + ... [0.0, 0.0, 0.1, 0.7]]) + >>> np.random.seed(0) + >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0], + ... cov=true_cov, + ... size=200) + >>> cov = GraphicalLassoCV().fit(X) + >>> np.around(cov.covariance_, decimals=3) + array([[0.816, 0.051, 0.22 , 0.017], + [0.051, 0.364, 0.018, 0.036], + [0.22 , 0.018, 0.322, 0.094], + [0.017, 0.036, 0.094, 0.69 ]]) + >>> np.around(cov.location_, decimals=3) + array([0.073, 0.04 , 0.038, 0.143]) + """ + + _parameter_constraints: dict = { + **BaseGraphicalLasso._parameter_constraints, + "alphas": [Interval(Integral, 0, None, closed="left"), "array-like"], + "n_refinements": [Interval(Integral, 1, None, closed="left")], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + } + + def __init__( + self, + *, + alphas=4, + n_refinements=4, + cv=None, + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + mode="cd", + n_jobs=None, + verbose=False, + eps=np.finfo(np.float64).eps, + assume_centered=False, + ): + super().__init__( + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + mode=mode, + verbose=verbose, + eps=eps, + assume_centered=assume_centered, + ) + self.alphas = alphas + self.n_refinements = n_refinements + self.cv = cv + self.n_jobs = n_jobs + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the GraphicalLasso covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Covariance does not make sense for a single feature + X = self._validate_data(X, ensure_min_features=2) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + emp_cov = empirical_covariance(X, assume_centered=self.assume_centered) + + cv = check_cv(self.cv, y, classifier=False) + + # List of (alpha, scores, covs) + path = list() + n_alphas = self.alphas + inner_verbose = max(0, self.verbose - 1) + + if _is_arraylike_not_scalar(n_alphas): + for alpha in self.alphas: + check_scalar( + alpha, + "alpha", + Real, + min_val=0, + max_val=np.inf, + include_boundaries="right", + ) + alphas = self.alphas + n_refinements = 1 + else: + n_refinements = self.n_refinements + alpha_1 = alpha_max(emp_cov) + alpha_0 = 1e-2 * alpha_1 + alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1] + + t0 = time.time() + for i in range(n_refinements): + with warnings.catch_warnings(): + # No need to see the convergence warnings on this grid: + # they will always be points that will not converge + # during the cross-validation + warnings.simplefilter("ignore", ConvergenceWarning) + # Compute the cross-validated loss on the current grid + + # NOTE: Warm-restarting graphical_lasso_path has been tried, + # and this did not allow to gain anything + # (same execution time with or without). + this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(graphical_lasso_path)( + X[train], + alphas=alphas, + X_test=X[test], + mode=self.mode, + tol=self.tol, + enet_tol=self.enet_tol, + max_iter=int(0.1 * self.max_iter), + verbose=inner_verbose, + eps=self.eps, + ) + for train, test in cv.split(X, y) + ) + + # Little danse to transform the list in what we need + covs, _, scores = zip(*this_path) + covs = zip(*covs) + scores = zip(*scores) + path.extend(zip(alphas, scores, covs)) + path = sorted(path, key=operator.itemgetter(0), reverse=True) + + # Find the maximum (avoid using built in 'max' function to + # have a fully-reproducible selection of the smallest alpha + # in case of equality) + best_score = -np.inf + last_finite_idx = 0 + for index, (alpha, scores, _) in enumerate(path): + this_score = np.mean(scores) + if this_score >= 0.1 / np.finfo(np.float64).eps: + this_score = np.nan + if np.isfinite(this_score): + last_finite_idx = index + if this_score >= best_score: + best_score = this_score + best_index = index + + # Refine the grid + if best_index == 0: + # We do not need to go back: we have chosen + # the highest value of alpha for which there are + # non-zero coefficients + alpha_1 = path[0][0] + alpha_0 = path[1][0] + elif best_index == last_finite_idx and not best_index == len(path) - 1: + # We have non-converged models on the upper bound of the + # grid, we need to refine the grid there + alpha_1 = path[best_index][0] + alpha_0 = path[best_index + 1][0] + elif best_index == len(path) - 1: + alpha_1 = path[best_index][0] + alpha_0 = 0.01 * path[best_index][0] + else: + alpha_1 = path[best_index - 1][0] + alpha_0 = path[best_index + 1][0] + + if not _is_arraylike_not_scalar(n_alphas): + alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2) + alphas = alphas[1:-1] + + if self.verbose and n_refinements > 1: + print( + "[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is" + % (i + 1, n_refinements, time.time() - t0) + ) + + path = list(zip(*path)) + grid_scores = list(path[1]) + alphas = list(path[0]) + # Finally, compute the score with alpha = 0 + alphas.append(0) + grid_scores.append( + cross_val_score( + EmpiricalCovariance(), + X, + cv=cv, + n_jobs=self.n_jobs, + verbose=inner_verbose, + ) + ) + grid_scores = np.array(grid_scores) + + self.cv_results_ = {"alphas": np.array(alphas)} + + for i in range(grid_scores.shape[1]): + self.cv_results_[f"split{i}_test_score"] = grid_scores[:, i] + + self.cv_results_["mean_test_score"] = np.mean(grid_scores, axis=1) + self.cv_results_["std_test_score"] = np.std(grid_scores, axis=1) + + best_alpha = alphas[best_index] + self.alpha_ = best_alpha + + # Finally fit the model with the selected alpha + self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso( + emp_cov, + alpha=best_alpha, + mode=self.mode, + tol=self.tol, + enet_tol=self.enet_tol, + max_iter=self.max_iter, + verbose=inner_verbose, + eps=self.eps, + ) + return self diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..c90e855ca67681984a6bc4186ca1cb2e7b9fff59 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py @@ -0,0 +1,868 @@ +""" +Robust location and covariance estimators. + +Here are implemented estimators that are resistant to outliers. + +""" +# Author: Virgile Fritsch +# +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.stats import chi2 + +from ..base import _fit_context +from ..utils import check_array, check_random_state +from ..utils._param_validation import Interval +from ..utils.extmath import fast_logdet +from ._empirical_covariance import EmpiricalCovariance, empirical_covariance + + +# Minimum Covariance Determinant +# Implementing of an algorithm by Rousseeuw & Van Driessen described in +# (A Fast Algorithm for the Minimum Covariance Determinant Estimator, +# 1999, American Statistical Association and the American Society +# for Quality, TECHNOMETRICS) +# XXX Is this really a public function? It's not listed in the docs or +# exported by sklearn.covariance. Deprecate? +def c_step( + X, + n_support, + remaining_iterations=30, + initial_estimates=None, + verbose=False, + cov_computation_method=empirical_covariance, + random_state=None, +): + """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data set in which we look for the n_support observations whose + scatter matrix has minimum determinant. + + n_support : int + Number of observations to compute the robust estimates of location + and covariance from. This parameter must be greater than + `n_samples / 2`. + + remaining_iterations : int, default=30 + Number of iterations to perform. + According to [Rouseeuw1999]_, two iterations are sufficient to get + close to the minimum, and we never need more than 30 to reach + convergence. + + initial_estimates : tuple of shape (2,), default=None + Initial estimates of location and shape from which to run the c_step + procedure: + - initial_estimates[0]: an initial location estimate + - initial_estimates[1]: an initial covariance estimate + + verbose : bool, default=False + Verbose mode. + + cov_computation_method : callable, \ + default=:func:`sklearn.covariance.empirical_covariance` + The function which will be used to compute the covariance. + Must return array of shape (n_features, n_features). + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + location : ndarray of shape (n_features,) + Robust location estimates. + + covariance : ndarray of shape (n_features, n_features) + Robust covariance estimates. + + support : ndarray of shape (n_samples,) + A mask for the `n_support` observations whose scatter matrix has + minimum determinant. + + References + ---------- + .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant + Estimator, 1999, American Statistical Association and the American + Society for Quality, TECHNOMETRICS + """ + X = np.asarray(X) + random_state = check_random_state(random_state) + return _c_step( + X, + n_support, + remaining_iterations=remaining_iterations, + initial_estimates=initial_estimates, + verbose=verbose, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + + +def _c_step( + X, + n_support, + random_state, + remaining_iterations=30, + initial_estimates=None, + verbose=False, + cov_computation_method=empirical_covariance, +): + n_samples, n_features = X.shape + dist = np.inf + + # Initialisation + support = np.zeros(n_samples, dtype=bool) + if initial_estimates is None: + # compute initial robust estimates from a random subset + support[random_state.permutation(n_samples)[:n_support]] = True + else: + # get initial robust estimates from the function parameters + location = initial_estimates[0] + covariance = initial_estimates[1] + # run a special iteration for that case (to get an initial support) + precision = linalg.pinvh(covariance) + X_centered = X - location + dist = (np.dot(X_centered, precision) * X_centered).sum(1) + # compute new estimates + support[np.argsort(dist)[:n_support]] = True + + X_support = X[support] + location = X_support.mean(0) + covariance = cov_computation_method(X_support) + + # Iterative procedure for Minimum Covariance Determinant computation + det = fast_logdet(covariance) + # If the data already has singular covariance, calculate the precision, + # as the loop below will not be entered. + if np.isinf(det): + precision = linalg.pinvh(covariance) + + previous_det = np.inf + while det < previous_det and remaining_iterations > 0 and not np.isinf(det): + # save old estimates values + previous_location = location + previous_covariance = covariance + previous_det = det + previous_support = support + # compute a new support from the full data set mahalanobis distances + precision = linalg.pinvh(covariance) + X_centered = X - location + dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) + # compute new estimates + support = np.zeros(n_samples, dtype=bool) + support[np.argsort(dist)[:n_support]] = True + X_support = X[support] + location = X_support.mean(axis=0) + covariance = cov_computation_method(X_support) + det = fast_logdet(covariance) + # update remaining iterations for early stopping + remaining_iterations -= 1 + + previous_dist = dist + dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1) + # Check if best fit already found (det => 0, logdet => -inf) + if np.isinf(det): + results = location, covariance, det, support, dist + # Check convergence + if np.allclose(det, previous_det): + # c_step procedure converged + if verbose: + print( + "Optimal couple (location, covariance) found before" + " ending iterations (%d left)" % (remaining_iterations) + ) + results = location, covariance, det, support, dist + elif det > previous_det: + # determinant has increased (should not happen) + warnings.warn( + "Determinant has increased; this should not happen: " + "log(det) > log(previous_det) (%.15f > %.15f). " + "You may want to try with a higher value of " + "support_fraction (current value: %.3f)." + % (det, previous_det, n_support / n_samples), + RuntimeWarning, + ) + results = ( + previous_location, + previous_covariance, + previous_det, + previous_support, + previous_dist, + ) + + # Check early stopping + if remaining_iterations == 0: + if verbose: + print("Maximum number of iterations reached") + results = location, covariance, det, support, dist + + return results + + +def select_candidates( + X, + n_support, + n_trials, + select=1, + n_iter=30, + verbose=False, + cov_computation_method=empirical_covariance, + random_state=None, +): + """Finds the best pure subset of observations to compute MCD from it. + + The purpose of this function is to find the best sets of n_support + observations with respect to a minimization of their covariance + matrix determinant. Equivalently, it removes n_samples-n_support + observations to construct what we call a pure data set (i.e. not + containing outliers). The list of the observations of the pure + data set is referred to as the `support`. + + Starting from a random support, the pure data set is found by the + c_step procedure introduced by Rousseeuw and Van Driessen in + [RV]_. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data (sub)set in which we look for the n_support purest observations. + + n_support : int + The number of samples the pure data set must contain. + This parameter must be in the range `[(n + p + 1)/2] < n_support < n`. + + n_trials : int or tuple of shape (2,) + Number of different initial sets of observations from which to + run the algorithm. This parameter should be a strictly positive + integer. + Instead of giving a number of trials to perform, one can provide a + list of initial estimates that will be used to iteratively run + c_step procedures. In this case: + - n_trials[0]: array-like, shape (n_trials, n_features) + is the list of `n_trials` initial location estimates + - n_trials[1]: array-like, shape (n_trials, n_features, n_features) + is the list of `n_trials` initial covariances estimates + + select : int, default=1 + Number of best candidates results to return. This parameter must be + a strictly positive integer. + + n_iter : int, default=30 + Maximum number of iterations for the c_step procedure. + (2 is enough to be close to the final solution. "Never" exceeds 20). + This parameter must be a strictly positive integer. + + verbose : bool, default=False + Control the output verbosity. + + cov_computation_method : callable, \ + default=:func:`sklearn.covariance.empirical_covariance` + The function which will be used to compute the covariance. + Must return an array of shape (n_features, n_features). + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + See Also + --------- + c_step + + Returns + ------- + best_locations : ndarray of shape (select, n_features) + The `select` location estimates computed from the `select` best + supports found in the data set (`X`). + + best_covariances : ndarray of shape (select, n_features, n_features) + The `select` covariance estimates computed from the `select` + best supports found in the data set (`X`). + + best_supports : ndarray of shape (select, n_samples) + The `select` best supports found in the data set (`X`). + + References + ---------- + .. [RV] A Fast Algorithm for the Minimum Covariance Determinant + Estimator, 1999, American Statistical Association and the American + Society for Quality, TECHNOMETRICS + """ + random_state = check_random_state(random_state) + + if isinstance(n_trials, Integral): + run_from_estimates = False + elif isinstance(n_trials, tuple): + run_from_estimates = True + estimates_list = n_trials + n_trials = estimates_list[0].shape[0] + else: + raise TypeError( + "Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)" + % (n_trials, type(n_trials)) + ) + + # compute `n_trials` location and shape estimates candidates in the subset + all_estimates = [] + if not run_from_estimates: + # perform `n_trials` computations from random initial supports + for j in range(n_trials): + all_estimates.append( + _c_step( + X, + n_support, + remaining_iterations=n_iter, + verbose=verbose, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + ) + else: + # perform computations from every given initial estimates + for j in range(n_trials): + initial_estimates = (estimates_list[0][j], estimates_list[1][j]) + all_estimates.append( + _c_step( + X, + n_support, + remaining_iterations=n_iter, + initial_estimates=initial_estimates, + verbose=verbose, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + ) + all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip( + *all_estimates + ) + # find the `n_best` best results among the `n_trials` ones + index_best = np.argsort(all_dets_sub)[:select] + best_locations = np.asarray(all_locs_sub)[index_best] + best_covariances = np.asarray(all_covs_sub)[index_best] + best_supports = np.asarray(all_supports_sub)[index_best] + best_ds = np.asarray(all_ds_sub)[index_best] + + return best_locations, best_covariances, best_supports, best_ds + + +def fast_mcd( + X, + support_fraction=None, + cov_computation_method=empirical_covariance, + random_state=None, +): + """Estimate the Minimum Covariance Determinant matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix, with p features and n samples. + + support_fraction : float, default=None + The proportion of points to be included in the support of the raw + MCD estimate. Default is `None`, which implies that the minimum + value of `support_fraction` will be used within the algorithm: + `(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be + in the range (0, 1). + + cov_computation_method : callable, \ + default=:func:`sklearn.covariance.empirical_covariance` + The function which will be used to compute the covariance. + Must return an array of shape (n_features, n_features). + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + location : ndarray of shape (n_features,) + Robust location of the data. + + covariance : ndarray of shape (n_features, n_features) + Robust covariance of the features. + + support : ndarray of shape (n_samples,), dtype=bool + A mask of the observations that have been used to compute + the robust location and covariance estimates of the data set. + + Notes + ----- + The FastMCD algorithm has been introduced by Rousseuw and Van Driessen + in "A Fast Algorithm for the Minimum Covariance Determinant Estimator, + 1999, American Statistical Association and the American Society + for Quality, TECHNOMETRICS". + The principle is to compute robust estimates and random subsets before + pooling them into a larger subsets, and finally into the full data set. + Depending on the size of the initial sample, we have one, two or three + such computation levels. + + Note that only raw estimates are returned. If one is interested in + the correction and reweighting steps described in [RouseeuwVan]_, + see the MinCovDet object. + + References + ---------- + + .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance + Determinant Estimator, 1999, American Statistical Association + and the American Society for Quality, TECHNOMETRICS + + .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun, + Asymptotics For The Minimum Covariance Determinant Estimator, + The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 + """ + random_state = check_random_state(random_state) + + X = check_array(X, ensure_min_samples=2, estimator="fast_mcd") + n_samples, n_features = X.shape + + # minimum breakdown value + if support_fraction is None: + n_support = int(np.ceil(0.5 * (n_samples + n_features + 1))) + else: + n_support = int(support_fraction * n_samples) + + # 1-dimensional case quick computation + # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust + # Regression and Outlier Detection, John Wiley & Sons, chapter 4) + if n_features == 1: + if n_support < n_samples: + # find the sample shortest halves + X_sorted = np.sort(np.ravel(X)) + diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)] + halves_start = np.where(diff == np.min(diff))[0] + # take the middle points' mean to get the robust location estimate + location = ( + 0.5 + * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() + ) + support = np.zeros(n_samples, dtype=bool) + X_centered = X - location + support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True + covariance = np.asarray([[np.var(X[support])]]) + location = np.array([location]) + # get precision matrix in an optimized way + precision = linalg.pinvh(covariance) + dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) + else: + support = np.ones(n_samples, dtype=bool) + covariance = np.asarray([[np.var(X)]]) + location = np.asarray([np.mean(X)]) + X_centered = X - location + # get precision matrix in an optimized way + precision = linalg.pinvh(covariance) + dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) + # Starting FastMCD algorithm for p-dimensional case + if (n_samples > 500) and (n_features > 1): + # 1. Find candidate supports on subsets + # a. split the set in subsets of size ~ 300 + n_subsets = n_samples // 300 + n_samples_subsets = n_samples // n_subsets + samples_shuffle = random_state.permutation(n_samples) + h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) + # b. perform a total of 500 trials + n_trials_tot = 500 + # c. select 10 best (location, covariance) for each subset + n_best_sub = 10 + n_trials = max(10, n_trials_tot // n_subsets) + n_best_tot = n_subsets * n_best_sub + all_best_locations = np.zeros((n_best_tot, n_features)) + try: + all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) + except MemoryError: + # The above is too big. Let's try with something much small + # (and less optimal) + n_best_tot = 10 + all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) + n_best_sub = 2 + for i in range(n_subsets): + low_bound = i * n_samples_subsets + high_bound = low_bound + n_samples_subsets + current_subset = X[samples_shuffle[low_bound:high_bound]] + best_locations_sub, best_covariances_sub, _, _ = select_candidates( + current_subset, + h_subset, + n_trials, + select=n_best_sub, + n_iter=2, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) + all_best_locations[subset_slice] = best_locations_sub + all_best_covariances[subset_slice] = best_covariances_sub + # 2. Pool the candidate supports into a merged set + # (possibly the full dataset) + n_samples_merged = min(1500, n_samples) + h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) + if n_samples > 1500: + n_best_merged = 10 + else: + n_best_merged = 1 + # find the best couples (location, covariance) on the merged set + selection = random_state.permutation(n_samples)[:n_samples_merged] + locations_merged, covariances_merged, supports_merged, d = select_candidates( + X[selection], + h_merged, + n_trials=(all_best_locations, all_best_covariances), + select=n_best_merged, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + # 3. Finally get the overall best (locations, covariance) couple + if n_samples < 1500: + # directly get the best couple (location, covariance) + location = locations_merged[0] + covariance = covariances_merged[0] + support = np.zeros(n_samples, dtype=bool) + dist = np.zeros(n_samples) + support[selection] = supports_merged[0] + dist[selection] = d[0] + else: + # select the best couple on the full dataset + locations_full, covariances_full, supports_full, d = select_candidates( + X, + n_support, + n_trials=(locations_merged, covariances_merged), + select=1, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + location = locations_full[0] + covariance = covariances_full[0] + support = supports_full[0] + dist = d[0] + elif n_features > 1: + # 1. Find the 10 best couples (location, covariance) + # considering two iterations + n_trials = 30 + n_best = 10 + locations_best, covariances_best, _, _ = select_candidates( + X, + n_support, + n_trials=n_trials, + select=n_best, + n_iter=2, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + # 2. Select the best couple on the full dataset amongst the 10 + locations_full, covariances_full, supports_full, d = select_candidates( + X, + n_support, + n_trials=(locations_best, covariances_best), + select=1, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + location = locations_full[0] + covariance = covariances_full[0] + support = supports_full[0] + dist = d[0] + + return location, covariance, support, dist + + +class MinCovDet(EmpiricalCovariance): + """Minimum Covariance Determinant (MCD): robust estimator of covariance. + + The Minimum Covariance Determinant covariance estimator is to be applied + on Gaussian-distributed data, but could still be relevant on data + drawn from a unimodal, symmetric distribution. It is not meant to be used + with multi-modal data (the algorithm used to fit a MinCovDet object is + likely to fail in such a case). + One should consider projection pursuit methods to deal with multi-modal + datasets. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, the support of the robust location and the covariance + estimates is computed, and a covariance estimate is recomputed from + it, without centering the data. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, the robust location and covariance are directly computed + with the FastMCD algorithm without additional treatment. + + support_fraction : float, default=None + The proportion of points to be included in the support of the raw + MCD estimate. Default is None, which implies that the minimum + value of support_fraction will be used within the algorithm: + `(n_samples + n_features + 1) / 2 * n_samples`. The parameter must be + in the range (0, 1]. + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + raw_location_ : ndarray of shape (n_features,) + The raw robust estimated location before correction and re-weighting. + + raw_covariance_ : ndarray of shape (n_features, n_features) + The raw robust estimated covariance before correction and re-weighting. + + raw_support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute + the raw robust estimates of location and shape, before correction + and re-weighting. + + location_ : ndarray of shape (n_features,) + Estimated robust location. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated robust covariance matrix. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute + the robust estimates of location and shape. + + dist_ : ndarray of shape (n_samples,) + Mahalanobis distances of the training set (on which :meth:`fit` is + called) observations. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + LedoitWolf : LedoitWolf Estimator. + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + References + ---------- + + .. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression. + J. Am Stat Ass, 79:871, 1984. + .. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant + Estimator, 1999, American Statistical Association and the American + Society for Quality, TECHNOMETRICS + .. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun, + Asymptotics For The Minimum Covariance Determinant Estimator, + The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import MinCovDet + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> cov = MinCovDet(random_state=0).fit(X) + >>> cov.covariance_ + array([[0.7411..., 0.2535...], + [0.2535..., 0.3053...]]) + >>> cov.location_ + array([0.0813... , 0.0427...]) + """ + + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "support_fraction": [Interval(Real, 0, 1, closed="right"), None], + "random_state": ["random_state"], + } + _nonrobust_covariance = staticmethod(empirical_covariance) + + def __init__( + self, + *, + store_precision=True, + assume_centered=False, + support_fraction=None, + random_state=None, + ): + self.store_precision = store_precision + self.assume_centered = assume_centered + self.support_fraction = support_fraction + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit a Minimum Covariance Determinant with the FastMCD algorithm. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X, ensure_min_samples=2, estimator="MinCovDet") + random_state = check_random_state(self.random_state) + n_samples, n_features = X.shape + # check that the empirical covariance is full rank + if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: + warnings.warn( + "The covariance matrix associated to your dataset is not full rank" + ) + # compute and store raw estimates + raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( + X, + support_fraction=self.support_fraction, + cov_computation_method=self._nonrobust_covariance, + random_state=random_state, + ) + if self.assume_centered: + raw_location = np.zeros(n_features) + raw_covariance = self._nonrobust_covariance( + X[raw_support], assume_centered=True + ) + # get precision matrix in an optimized way + precision = linalg.pinvh(raw_covariance) + raw_dist = np.sum(np.dot(X, precision) * X, 1) + self.raw_location_ = raw_location + self.raw_covariance_ = raw_covariance + self.raw_support_ = raw_support + self.location_ = raw_location + self.support_ = raw_support + self.dist_ = raw_dist + # obtain consistency at normal models + self.correct_covariance(X) + # re-weight estimator + self.reweight_covariance(X) + + return self + + def correct_covariance(self, data): + """Apply a correction to raw Minimum Covariance Determinant estimates. + + Correction using the empirical correction factor suggested + by Rousseeuw and Van Driessen in [RVD]_. + + Parameters + ---------- + data : array-like of shape (n_samples, n_features) + The data matrix, with p features and n samples. + The data set must be the one which was used to compute + the raw estimates. + + Returns + ------- + covariance_corrected : ndarray of shape (n_features, n_features) + Corrected robust covariance estimate. + + References + ---------- + + .. [RVD] A Fast Algorithm for the Minimum Covariance + Determinant Estimator, 1999, American Statistical Association + and the American Society for Quality, TECHNOMETRICS + """ + + # Check that the covariance of the support data is not equal to 0. + # Otherwise self.dist_ = 0 and thus correction = 0. + n_samples = len(self.dist_) + n_support = np.sum(self.support_) + if n_support < n_samples and np.allclose(self.raw_covariance_, 0): + raise ValueError( + "The covariance matrix of the support data " + "is equal to 0, try to increase support_fraction" + ) + correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5) + covariance_corrected = self.raw_covariance_ * correction + self.dist_ /= correction + return covariance_corrected + + def reweight_covariance(self, data): + """Re-weight raw Minimum Covariance Determinant estimates. + + Re-weight observations using Rousseeuw's method (equivalent to + deleting outlying observations from the data set before + computing location and covariance estimates) described + in [RVDriessen]_. + + Parameters + ---------- + data : array-like of shape (n_samples, n_features) + The data matrix, with p features and n samples. + The data set must be the one which was used to compute + the raw estimates. + + Returns + ------- + location_reweighted : ndarray of shape (n_features,) + Re-weighted robust location estimate. + + covariance_reweighted : ndarray of shape (n_features, n_features) + Re-weighted robust covariance estimate. + + support_reweighted : ndarray of shape (n_samples,), dtype=bool + A mask of the observations that have been used to compute + the re-weighted robust location and covariance estimates. + + References + ---------- + + .. [RVDriessen] A Fast Algorithm for the Minimum Covariance + Determinant Estimator, 1999, American Statistical Association + and the American Society for Quality, TECHNOMETRICS + """ + n_samples, n_features = data.shape + mask = self.dist_ < chi2(n_features).isf(0.025) + if self.assume_centered: + location_reweighted = np.zeros(n_features) + else: + location_reweighted = data[mask].mean(0) + covariance_reweighted = self._nonrobust_covariance( + data[mask], assume_centered=self.assume_centered + ) + support_reweighted = np.zeros(n_samples, dtype=bool) + support_reweighted[mask] = True + self._set_covariance(covariance_reweighted) + self.location_ = location_reweighted + self.support_ = support_reweighted + X_centered = data - self.location_ + self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1) + return location_reweighted, covariance_reweighted, support_reweighted diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..2c8248d0f65025b3cd5f1e4e2c969c4b4fa9bf91 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py @@ -0,0 +1,816 @@ +""" +Covariance estimators using shrinkage. + +Shrinkage corresponds to regularising `cov` using a convex combination: +shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate. + +""" + +# Author: Alexandre Gramfort +# Gael Varoquaux +# Virgile Fritsch +# +# License: BSD 3 clause + +# avoid division truncation +import warnings +from numbers import Integral, Real + +import numpy as np + +from ..base import _fit_context +from ..utils import check_array +from ..utils._param_validation import Interval, validate_params +from . import EmpiricalCovariance, empirical_covariance + + +def _ledoit_wolf(X, *, assume_centered, block_size): + """Estimate the shrunk Ledoit-Wolf covariance matrix.""" + # for only one feature, the result is the same whatever the shrinkage + if len(X.shape) == 2 and X.shape[1] == 1: + if not assume_centered: + X = X - X.mean() + return np.atleast_2d((X**2).mean()), 0.0 + n_features = X.shape[1] + + # get Ledoit-Wolf shrinkage + shrinkage = ledoit_wolf_shrinkage( + X, assume_centered=assume_centered, block_size=block_size + ) + emp_cov = empirical_covariance(X, assume_centered=assume_centered) + mu = np.sum(np.trace(emp_cov)) / n_features + shrunk_cov = (1.0 - shrinkage) * emp_cov + shrunk_cov.flat[:: n_features + 1] += shrinkage * mu + + return shrunk_cov, shrinkage + + +def _oas(X, *, assume_centered=False): + """Estimate covariance with the Oracle Approximating Shrinkage algorithm. + + The formulation is based on [1]_. + [1] "Shrinkage algorithms for MMSE covariance estimation.", + Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. + IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. + https://arxiv.org/pdf/0907.4698.pdf + """ + if len(X.shape) == 2 and X.shape[1] == 1: + # for only one feature, the result is the same whatever the shrinkage + if not assume_centered: + X = X - X.mean() + return np.atleast_2d((X**2).mean()), 0.0 + + n_samples, n_features = X.shape + + emp_cov = empirical_covariance(X, assume_centered=assume_centered) + + # The shrinkage is defined as: + # shrinkage = min( + # trace(S @ S.T) + trace(S)**2) / ((n + 1) (trace(S @ S.T) - trace(S)**2 / p), 1 + # ) + # where n and p are n_samples and n_features, respectively (cf. Eq. 23 in [1]). + # The factor 2 / p is omitted since it does not impact the value of the estimator + # for large p. + + # Instead of computing trace(S)**2, we can compute the average of the squared + # elements of S that is equal to trace(S)**2 / p**2. + # See the definition of the Frobenius norm: + # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm + alpha = np.mean(emp_cov**2) + mu = np.trace(emp_cov) / n_features + mu_squared = mu**2 + + # The factor 1 / p**2 will cancel out since it is in both the numerator and + # denominator + num = alpha + mu_squared + den = (n_samples + 1) * (alpha - mu_squared / n_features) + shrinkage = 1.0 if den == 0 else min(num / den, 1.0) + + # The shrunk covariance is defined as: + # (1 - shrinkage) * S + shrinkage * F (cf. Eq. 4 in [1]) + # where S is the empirical covariance and F is the shrinkage target defined as + # F = trace(S) / n_features * np.identity(n_features) (cf. Eq. 3 in [1]) + shrunk_cov = (1.0 - shrinkage) * emp_cov + shrunk_cov.flat[:: n_features + 1] += shrinkage * mu + + return shrunk_cov, shrinkage + + +############################################################################### +# Public API +# ShrunkCovariance estimator + + +@validate_params( + { + "emp_cov": ["array-like"], + "shrinkage": [Interval(Real, 0, 1, closed="both")], + }, + prefer_skip_nested_validation=True, +) +def shrunk_covariance(emp_cov, shrinkage=0.1): + """Calculate covariance matrices shrunk on the diagonal. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + emp_cov : array-like of shape (..., n_features, n_features) + Covariance matrices to be shrunk, at least 2D ndarray. + + shrinkage : float, default=0.1 + Coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + Returns + ------- + shrunk_cov : ndarray of shape (..., n_features, n_features) + Shrunk covariance matrices. + + Notes + ----- + The regularized (shrunk) covariance is given by:: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where `mu = trace(cov) / n_features`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_gaussian_quantiles + >>> from sklearn.covariance import empirical_covariance, shrunk_covariance + >>> real_cov = np.array([[.8, .3], [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500) + >>> shrunk_covariance(empirical_covariance(X)) + array([[0.73..., 0.25...], + [0.25..., 0.41...]]) + """ + emp_cov = check_array(emp_cov, allow_nd=True) + n_features = emp_cov.shape[-1] + + shrunk_cov = (1.0 - shrinkage) * emp_cov + mu = np.trace(emp_cov, axis1=-2, axis2=-1) / n_features + mu = np.expand_dims(mu, axis=tuple(range(mu.ndim, emp_cov.ndim))) + shrunk_cov += shrinkage * mu * np.eye(n_features) + + return shrunk_cov + + +class ShrunkCovariance(EmpiricalCovariance): + """Covariance estimator with shrinkage. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data will be centered before computation. + + shrinkage : float, default=0.1 + Coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + Attributes + ---------- + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix + + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + + Notes + ----- + The regularized covariance is given by: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import ShrunkCovariance + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> cov = ShrunkCovariance().fit(X) + >>> cov.covariance_ + array([[0.7387..., 0.2536...], + [0.2536..., 0.4110...]]) + >>> cov.location_ + array([0.0622..., 0.0193...]) + """ + + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "shrinkage": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, *, store_precision=True, assume_centered=False, shrinkage=0.1): + super().__init__( + store_precision=store_precision, assume_centered=assume_centered + ) + self.shrinkage = shrinkage + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the shrunk covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X) + # Not calling the parent object to fit, to avoid a potential + # matrix inversion when setting the precision + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance = empirical_covariance(X, assume_centered=self.assume_centered) + covariance = shrunk_covariance(covariance, self.shrinkage) + self._set_covariance(covariance) + + return self + + +# Ledoit-Wolf estimator + + +@validate_params( + { + "X": ["array-like"], + "assume_centered": ["boolean"], + "block_size": [Interval(Integral, 1, None, closed="left")], + }, + prefer_skip_nested_validation=True, +) +def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000): + """Estimate the shrunk Ledoit-Wolf covariance matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, data will be centered before computation. + + block_size : int, default=1000 + Size of blocks into which the covariance matrix will be split. + + Returns + ------- + shrinkage : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. + + Notes + ----- + The regularized (shrunk) covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import ledoit_wolf_shrinkage + >>> real_cov = np.array([[.4, .2], [.2, .8]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) + >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X) + >>> shrinkage_coefficient + 0.23... + """ + X = check_array(X) + # for only one feature, the result is the same whatever the shrinkage + if len(X.shape) == 2 and X.shape[1] == 1: + return 0.0 + if X.ndim == 1: + X = np.reshape(X, (1, -1)) + + if X.shape[0] == 1: + warnings.warn( + "Only one sample available. You may want to reshape your data array" + ) + n_samples, n_features = X.shape + + # optionally center data + if not assume_centered: + X = X - X.mean(0) + + # A non-blocked version of the computation is present in the tests + # in tests/test_covariance.py + + # number of blocks to split the covariance matrix into + n_splits = int(n_features / block_size) + X2 = X**2 + emp_cov_trace = np.sum(X2, axis=0) / n_samples + mu = np.sum(emp_cov_trace) / n_features + beta_ = 0.0 # sum of the coefficients of + delta_ = 0.0 # sum of the *squared* coefficients of + # starting block computation + for i in range(n_splits): + for j in range(n_splits): + rows = slice(block_size * i, block_size * (i + 1)) + cols = slice(block_size * j, block_size * (j + 1)) + beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols])) + delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2) + rows = slice(block_size * i, block_size * (i + 1)) + beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :])) + delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2) + for j in range(n_splits): + cols = slice(block_size * j, block_size * (j + 1)) + beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols])) + delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2) + delta_ += np.sum( + np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2 + ) + delta_ /= n_samples**2 + beta_ += np.sum( + np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :]) + ) + # use delta_ to compute beta + beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_) + # delta is the sum of the squared coefficients of ( - mu*Id) / p + delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2 + delta /= n_features + # get final beta as the min between beta and delta + # We do this to prevent shrinking more than "1", which would invert + # the value of covariances + beta = min(beta, delta) + # finally get shrinkage + shrinkage = 0 if beta == 0 else beta / delta + return shrinkage + + +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def ledoit_wolf(X, *, assume_centered=False, block_size=1000): + """Estimate the shrunk Ledoit-Wolf covariance matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, data will be centered before computation. + + block_size : int, default=1000 + Size of blocks into which the covariance matrix will be split. + This is purely a memory optimization and does not affect results. + + Returns + ------- + shrunk_cov : ndarray of shape (n_features, n_features) + Shrunk covariance. + + shrinkage : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. + + Notes + ----- + The regularized (shrunk) covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import empirical_covariance, ledoit_wolf + >>> real_cov = np.array([[.4, .2], [.2, .8]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) + >>> covariance, shrinkage = ledoit_wolf(X) + >>> covariance + array([[0.44..., 0.16...], + [0.16..., 0.80...]]) + >>> shrinkage + 0.23... + """ + estimator = LedoitWolf( + assume_centered=assume_centered, + block_size=block_size, + store_precision=False, + ).fit(X) + + return estimator.covariance_, estimator.shrinkage_ + + +class LedoitWolf(EmpiricalCovariance): + """LedoitWolf Estimator. + + Ledoit-Wolf is a particular form of shrinkage, where the shrinkage + coefficient is computed using O. Ledoit and M. Wolf's formula as + described in "A Well-Conditioned Estimator for Large-Dimensional + Covariance Matrices", Ledoit and Wolf, Journal of Multivariate + Analysis, Volume 88, Issue 2, February 2004, pages 365-411. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data will be centered before computation. + + block_size : int, default=1000 + Size of blocks into which the covariance matrix will be split + during its Ledoit-Wolf estimation. This is purely a memory + optimization and does not affect results. + + Attributes + ---------- + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + shrinkage_ : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + Notes + ----- + The regularised covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + and shrinkage is given by the Ledoit and Wolf formula (see References) + + References + ---------- + "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices", + Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2, + February 2004, pages 365-411. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import LedoitWolf + >>> real_cov = np.array([[.4, .2], + ... [.2, .8]]) + >>> np.random.seed(0) + >>> X = np.random.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=50) + >>> cov = LedoitWolf().fit(X) + >>> cov.covariance_ + array([[0.4406..., 0.1616...], + [0.1616..., 0.8022...]]) + >>> cov.location_ + array([ 0.0595... , -0.0075...]) + """ + + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "block_size": [Interval(Integral, 1, None, closed="left")], + } + + def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000): + super().__init__( + store_precision=store_precision, assume_centered=assume_centered + ) + self.block_size = block_size + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the Ledoit-Wolf shrunk covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Not calling the parent object to fit, to avoid computing the + # covariance matrix (and potentially the precision) + X = self._validate_data(X) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance, shrinkage = _ledoit_wolf( + X - self.location_, assume_centered=True, block_size=self.block_size + ) + self.shrinkage_ = shrinkage + self._set_covariance(covariance) + + return self + + +# OAS estimator +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def oas(X, *, assume_centered=False): + """Estimate covariance with the Oracle Approximating Shrinkage as proposed in [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, data will be centered before computation. + + Returns + ------- + shrunk_cov : array-like of shape (n_features, n_features) + Shrunk covariance. + + shrinkage : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. + + Notes + ----- + The regularised covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features), + + where mu = trace(cov) / n_features and shrinkage is given by the OAS formula + (see [1]_). + + The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In + the original article, formula (23) states that 2/p (p being the number of + features) is multiplied by Trace(cov*cov) in both the numerator and + denominator, but this operation is omitted because for a large p, the value + of 2/p is so small that it doesn't affect the value of the estimator. + + References + ---------- + .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.", + Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. + IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. + <0907.4698>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import oas + >>> rng = np.random.RandomState(0) + >>> real_cov = [[.8, .3], [.3, .4]] + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500) + >>> shrunk_cov, shrinkage = oas(X) + >>> shrunk_cov + array([[0.7533..., 0.2763...], + [0.2763..., 0.3964...]]) + >>> shrinkage + 0.0195... + """ + estimator = OAS( + assume_centered=assume_centered, + ).fit(X) + return estimator.covariance_, estimator.shrinkage_ + + +class OAS(EmpiricalCovariance): + """Oracle Approximating Shrinkage Estimator as proposed in [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data will be centered before computation. + + Attributes + ---------- + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + shrinkage_ : float + coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + ShrunkCovariance : Covariance estimator with shrinkage. + + Notes + ----- + The regularised covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features), + + where mu = trace(cov) / n_features and shrinkage is given by the OAS formula + (see [1]_). + + The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In + the original article, formula (23) states that 2/p (p being the number of + features) is multiplied by Trace(cov*cov) in both the numerator and + denominator, but this operation is omitted because for a large p, the value + of 2/p is so small that it doesn't affect the value of the estimator. + + References + ---------- + .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.", + Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. + IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. + <0907.4698>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import OAS + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> oas = OAS().fit(X) + >>> oas.covariance_ + array([[0.7533..., 0.2763...], + [0.2763..., 0.3964...]]) + >>> oas.precision_ + array([[ 1.7833..., -1.2431... ], + [-1.2431..., 3.3889...]]) + >>> oas.shrinkage_ + 0.0195... + """ + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the Oracle Approximating Shrinkage covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X) + # Not calling the parent object to fit, to avoid computing the + # covariance matrix (and potentially the precision) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + + covariance, shrinkage = _oas(X - self.location_, assume_centered=True) + self.shrinkage_ = shrinkage + self._set_covariance(covariance) + + return self diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a06fa38aa194ffe2bbb233435b3b5ff07224814c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d7493ae9c059f3780cabd3a1d04feb525b2c973 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..395a53b55d8f4f2771ef4ba3cc896669d2d5735b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb3222d4523604aa04f1d9dd1924e3bdf290377b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44d0f902da136043fd3b01d52d01edc8ae262674 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd83ce991d1d04ece224daee6d3899f3056871fc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfd0f626537b2bc62cd1f18db42339765014a380 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..783478ee0422ab7009db04b85bb24a3a31375293 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da461964ee40513cadc2dacba9f5246b6f1d873a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3519ecc73e2783fa79012d8894bb035322dafd44 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f39cd3a4615ae9a78c893154a7f6ce21463cca80 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90e594e0a79c3965e014d96ac4cd8a08c4f6861e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfe2c699306c3df1eb6aa20e1b4826bee486647c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cf0dbb155522f17f074ab6da7ab47c422c45dc4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..796a369e6039dd76f4af86e3d41c4f54e8606c45 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e38fea37231894e37fb9e3b9c67ab2ed833a25d8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9211ccf8184777090584a75f834ee830ded299c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py new file mode 100644 index 0000000000000000000000000000000000000000..3f55078c68ed5cf596550eba7e3442ca92a3a28a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py @@ -0,0 +1,857 @@ +""" +Various bayesian regression +""" + +# Authors: V. Michel, F. Pedregosa, A. Gramfort +# License: BSD 3 clause + +import warnings +from math import log +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.linalg import pinvh + +from ..base import RegressorMixin, _fit_context +from ..utils import _safe_indexing +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.extmath import fast_logdet +from ..utils.validation import _check_sample_weight +from ._base import LinearModel, _preprocess_data, _rescale_data + + +# TODO(1.5) Remove +def _deprecate_n_iter(n_iter, max_iter): + """Deprecates n_iter in favour of max_iter. Checks if the n_iter has been + used instead of max_iter and generates a deprecation warning if True. + + Parameters + ---------- + n_iter : int, + Value of n_iter attribute passed by the estimator. + + max_iter : int, default=None + Value of max_iter attribute passed by the estimator. + If `None`, it corresponds to `max_iter=300`. + + Returns + ------- + max_iter : int, + Value of max_iter which shall further be used by the estimator. + + Notes + ----- + This function should be completely removed in 1.5. + """ + if n_iter != "deprecated": + if max_iter is not None: + raise ValueError( + "Both `n_iter` and `max_iter` attributes were set. Attribute" + " `n_iter` was deprecated in version 1.3 and will be removed in" + " 1.5. To avoid this error, only set the `max_iter` attribute." + ) + warnings.warn( + ( + "'n_iter' was renamed to 'max_iter' in version 1.3 and " + "will be removed in 1.5" + ), + FutureWarning, + ) + max_iter = n_iter + elif max_iter is None: + max_iter = 300 + return max_iter + + +############################################################################### +# BayesianRidge regression + + +class BayesianRidge(RegressorMixin, LinearModel): + """Bayesian ridge regression. + + Fit a Bayesian ridge model. See the Notes section for details on this + implementation and the optimization of the regularization parameters + lambda (precision of the weights) and alpha (precision of the noise). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + max_iter : int, default=None + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion. If `None`, it + corresponds to `max_iter=300`. + + .. versionchanged:: 1.3 + + tol : float, default=1e-3 + Stop the algorithm if w has converged. + + alpha_1 : float, default=1e-6 + Hyper-parameter : shape parameter for the Gamma distribution prior + over the alpha parameter. + + alpha_2 : float, default=1e-6 + Hyper-parameter : inverse scale parameter (rate parameter) for the + Gamma distribution prior over the alpha parameter. + + lambda_1 : float, default=1e-6 + Hyper-parameter : shape parameter for the Gamma distribution prior + over the lambda parameter. + + lambda_2 : float, default=1e-6 + Hyper-parameter : inverse scale parameter (rate parameter) for the + Gamma distribution prior over the lambda parameter. + + alpha_init : float, default=None + Initial value for alpha (precision of the noise). + If not set, alpha_init is 1/Var(y). + + .. versionadded:: 0.22 + + lambda_init : float, default=None + Initial value for lambda (precision of the weights). + If not set, lambda_init is 1. + + .. versionadded:: 0.22 + + compute_score : bool, default=False + If True, compute the log marginal likelihood at each iteration of the + optimization. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. + The intercept is not treated as a probabilistic parameter + and thus has no associated variance. If set + to False, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + verbose : bool, default=False + Verbose mode when fitting the model. + + n_iter : int + Maximum number of iterations. Should be greater than or equal to 1. + + .. deprecated:: 1.3 + `n_iter` is deprecated in 1.3 and will be removed in 1.5. Use + `max_iter` instead. + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + Coefficients of the regression model (mean of distribution) + + intercept_ : float + Independent term in decision function. Set to 0.0 if + `fit_intercept = False`. + + alpha_ : float + Estimated precision of the noise. + + lambda_ : float + Estimated precision of the weights. + + sigma_ : array-like of shape (n_features, n_features) + Estimated variance-covariance matrix of the weights + + scores_ : array-like of shape (n_iter_+1,) + If computed_score is True, value of the log marginal likelihood (to be + maximized) at each iteration of the optimization. The array starts + with the value of the log marginal likelihood obtained for the initial + values of alpha and lambda and ends with the value obtained for the + estimated alpha and lambda. + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + + X_offset_ : ndarray of shape (n_features,) + If `fit_intercept=True`, offset subtracted for centering data to a + zero mean. Set to np.zeros(n_features) otherwise. + + X_scale_ : ndarray of shape (n_features,) + Set to np.ones(n_features). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + ARDRegression : Bayesian ARD regression. + + Notes + ----- + There exist several strategies to perform Bayesian ridge regression. This + implementation is based on the algorithm described in Appendix A of + (Tipping, 2001) where updates of the regularization parameters are done as + suggested in (MacKay, 1992). Note that according to A New + View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these + update rules do not guarantee that the marginal likelihood is increasing + between two consecutive iterations of the optimization. + + References + ---------- + D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems, + Vol. 4, No. 3, 1992. + + M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine, + Journal of Machine Learning Research, Vol. 1, 2001. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.BayesianRidge() + >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) + BayesianRidge() + >>> clf.predict([[1, 1]]) + array([1.]) + """ + + _parameter_constraints: dict = { + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="neither")], + "alpha_1": [Interval(Real, 0, None, closed="left")], + "alpha_2": [Interval(Real, 0, None, closed="left")], + "lambda_1": [Interval(Real, 0, None, closed="left")], + "lambda_2": [Interval(Real, 0, None, closed="left")], + "alpha_init": [None, Interval(Real, 0, None, closed="left")], + "lambda_init": [None, Interval(Real, 0, None, closed="left")], + "compute_score": ["boolean"], + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + "verbose": ["verbose"], + "n_iter": [ + Interval(Integral, 1, None, closed="left"), + Hidden(StrOptions({"deprecated"})), + ], + } + + def __init__( + self, + *, + max_iter=None, # TODO(1.5): Set to 300 + tol=1.0e-3, + alpha_1=1.0e-6, + alpha_2=1.0e-6, + lambda_1=1.0e-6, + lambda_2=1.0e-6, + alpha_init=None, + lambda_init=None, + compute_score=False, + fit_intercept=True, + copy_X=True, + verbose=False, + n_iter="deprecated", # TODO(1.5): Remove + ): + self.max_iter = max_iter + self.tol = tol + self.alpha_1 = alpha_1 + self.alpha_2 = alpha_2 + self.lambda_1 = lambda_1 + self.lambda_2 = lambda_2 + self.alpha_init = alpha_init + self.lambda_init = lambda_init + self.compute_score = compute_score + self.fit_intercept = fit_intercept + self.copy_X = copy_X + self.verbose = verbose + self.n_iter = n_iter + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. + y : ndarray of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : ndarray of shape (n_samples,), default=None + Individual weights for each sample. + + .. versionadded:: 0.20 + parameter *sample_weight* support to BayesianRidge. + + Returns + ------- + self : object + Returns the instance itself. + """ + max_iter = _deprecate_n_iter(self.n_iter, self.max_iter) + + X, y = self._validate_data(X, y, dtype=[np.float64, np.float32], y_numeric=True) + dtype = X.dtype + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=dtype) + + X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=self.copy_X, + sample_weight=sample_weight, + ) + + if sample_weight is not None: + # Sample weight can be implemented via a simple rescaling. + X, y, _ = _rescale_data(X, y, sample_weight) + + self.X_offset_ = X_offset_ + self.X_scale_ = X_scale_ + n_samples, n_features = X.shape + + # Initialization of the values of the parameters + eps = np.finfo(np.float64).eps + # Add `eps` in the denominator to omit division by zero if `np.var(y)` + # is zero + alpha_ = self.alpha_init + lambda_ = self.lambda_init + if alpha_ is None: + alpha_ = 1.0 / (np.var(y) + eps) + if lambda_ is None: + lambda_ = 1.0 + + # Avoid unintended type promotion to float64 with numpy 2 + alpha_ = np.asarray(alpha_, dtype=dtype) + lambda_ = np.asarray(lambda_, dtype=dtype) + + verbose = self.verbose + lambda_1 = self.lambda_1 + lambda_2 = self.lambda_2 + alpha_1 = self.alpha_1 + alpha_2 = self.alpha_2 + + self.scores_ = list() + coef_old_ = None + + XT_y = np.dot(X.T, y) + U, S, Vh = linalg.svd(X, full_matrices=False) + eigen_vals_ = S**2 + + # Convergence loop of the bayesian ridge regression + for iter_ in range(max_iter): + # update posterior mean coef_ based on alpha_ and lambda_ and + # compute corresponding rmse + coef_, rmse_ = self._update_coef_( + X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ + ) + if self.compute_score: + # compute the log marginal likelihood + s = self._log_marginal_likelihood( + n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_ + ) + self.scores_.append(s) + + # Update alpha and lambda according to (MacKay, 1992) + gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_)) + lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_**2) + 2 * lambda_2) + alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2) + + # Check for convergence + if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: + if verbose: + print("Convergence after ", str(iter_), " iterations") + break + coef_old_ = np.copy(coef_) + + self.n_iter_ = iter_ + 1 + + # return regularization parameters and corresponding posterior mean, + # log marginal likelihood and posterior covariance + self.alpha_ = alpha_ + self.lambda_ = lambda_ + self.coef_, rmse_ = self._update_coef_( + X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ + ) + if self.compute_score: + # compute the log marginal likelihood + s = self._log_marginal_likelihood( + n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_ + ) + self.scores_.append(s) + self.scores_ = np.array(self.scores_) + + # posterior covariance is given by 1/alpha_ * scaled_sigma_ + scaled_sigma_ = np.dot( + Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis] + ) + self.sigma_ = (1.0 / alpha_) * scaled_sigma_ + + self._set_intercept(X_offset_, y_offset_, X_scale_) + + return self + + def predict(self, X, return_std=False): + """Predict using the linear model. + + In addition to the mean of the predictive distribution, also its + standard deviation can be returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + return_std : bool, default=False + Whether to return the standard deviation of posterior prediction. + + Returns + ------- + y_mean : array-like of shape (n_samples,) + Mean of predictive distribution of query points. + + y_std : array-like of shape (n_samples,) + Standard deviation of predictive distribution of query points. + """ + y_mean = self._decision_function(X) + if not return_std: + return y_mean + else: + sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) + y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_)) + return y_mean, y_std + + def _update_coef_( + self, X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ + ): + """Update posterior mean and compute corresponding rmse. + + Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where + scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features) + + np.dot(X.T, X))^-1 + """ + + if n_samples > n_features: + coef_ = np.linalg.multi_dot( + [Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y] + ) + else: + coef_ = np.linalg.multi_dot( + [X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y] + ) + + rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) + + return coef_, rmse_ + + def _log_marginal_likelihood( + self, n_samples, n_features, eigen_vals, alpha_, lambda_, coef, rmse + ): + """Log marginal likelihood.""" + alpha_1 = self.alpha_1 + alpha_2 = self.alpha_2 + lambda_1 = self.lambda_1 + lambda_2 = self.lambda_2 + + # compute the log of the determinant of the posterior covariance. + # posterior covariance is given by + # sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1 + if n_samples > n_features: + logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals)) + else: + logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype) + logdet_sigma[:n_samples] += alpha_ * eigen_vals + logdet_sigma = -np.sum(np.log(logdet_sigma)) + + score = lambda_1 * log(lambda_) - lambda_2 * lambda_ + score += alpha_1 * log(alpha_) - alpha_2 * alpha_ + score += 0.5 * ( + n_features * log(lambda_) + + n_samples * log(alpha_) + - alpha_ * rmse + - lambda_ * np.sum(coef**2) + + logdet_sigma + - n_samples * log(2 * np.pi) + ) + + return score + + +############################################################################### +# ARD (Automatic Relevance Determination) regression + + +class ARDRegression(RegressorMixin, LinearModel): + """Bayesian ARD regression. + + Fit the weights of a regression model, using an ARD prior. The weights of + the regression model are assumed to be in Gaussian distributions. + Also estimate the parameters lambda (precisions of the distributions of the + weights) and alpha (precision of the distribution of the noise). + The estimation is done by an iterative procedures (Evidence Maximization) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + max_iter : int, default=None + Maximum number of iterations. If `None`, it corresponds to `max_iter=300`. + + .. versionchanged:: 1.3 + + tol : float, default=1e-3 + Stop the algorithm if w has converged. + + alpha_1 : float, default=1e-6 + Hyper-parameter : shape parameter for the Gamma distribution prior + over the alpha parameter. + + alpha_2 : float, default=1e-6 + Hyper-parameter : inverse scale parameter (rate parameter) for the + Gamma distribution prior over the alpha parameter. + + lambda_1 : float, default=1e-6 + Hyper-parameter : shape parameter for the Gamma distribution prior + over the lambda parameter. + + lambda_2 : float, default=1e-6 + Hyper-parameter : inverse scale parameter (rate parameter) for the + Gamma distribution prior over the lambda parameter. + + compute_score : bool, default=False + If True, compute the objective function at each step of the model. + + threshold_lambda : float, default=10 000 + Threshold for removing (pruning) weights with high precision from + the computation. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + verbose : bool, default=False + Verbose mode when fitting the model. + + n_iter : int + Maximum number of iterations. + + .. deprecated:: 1.3 + `n_iter` is deprecated in 1.3 and will be removed in 1.5. Use + `max_iter` instead. + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + Coefficients of the regression model (mean of distribution) + + alpha_ : float + estimated precision of the noise. + + lambda_ : array-like of shape (n_features,) + estimated precisions of the weights. + + sigma_ : array-like of shape (n_features, n_features) + estimated variance-covariance matrix of the weights + + scores_ : float + if computed, value of the objective function (to be maximized) + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + + .. versionadded:: 1.3 + + intercept_ : float + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + X_offset_ : float + If `fit_intercept=True`, offset subtracted for centering data to a + zero mean. Set to np.zeros(n_features) otherwise. + + X_scale_ : float + Set to np.ones(n_features). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + BayesianRidge : Bayesian ridge regression. + + Notes + ----- + For an example, see :ref:`examples/linear_model/plot_ard.py + `. + + References + ---------- + D. J. C. MacKay, Bayesian nonlinear modeling for the prediction + competition, ASHRAE Transactions, 1994. + + R. Salakhutdinov, Lecture notes on Statistical Machine Learning, + http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15 + Their beta is our ``self.alpha_`` + Their alpha is our ``self.lambda_`` + ARD is a little different than the slide: only dimensions/features for + which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are + discarded. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.ARDRegression() + >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) + ARDRegression() + >>> clf.predict([[1, 1]]) + array([1.]) + """ + + _parameter_constraints: dict = { + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left")], + "alpha_1": [Interval(Real, 0, None, closed="left")], + "alpha_2": [Interval(Real, 0, None, closed="left")], + "lambda_1": [Interval(Real, 0, None, closed="left")], + "lambda_2": [Interval(Real, 0, None, closed="left")], + "compute_score": ["boolean"], + "threshold_lambda": [Interval(Real, 0, None, closed="left")], + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + "verbose": ["verbose"], + "n_iter": [ + Interval(Integral, 1, None, closed="left"), + Hidden(StrOptions({"deprecated"})), + ], + } + + def __init__( + self, + *, + max_iter=None, # TODO(1.5): Set to 300 + tol=1.0e-3, + alpha_1=1.0e-6, + alpha_2=1.0e-6, + lambda_1=1.0e-6, + lambda_2=1.0e-6, + compute_score=False, + threshold_lambda=1.0e4, + fit_intercept=True, + copy_X=True, + verbose=False, + n_iter="deprecated", # TODO(1.5): Remove + ): + self.max_iter = max_iter + self.tol = tol + self.fit_intercept = fit_intercept + self.alpha_1 = alpha_1 + self.alpha_2 = alpha_2 + self.lambda_1 = lambda_1 + self.lambda_2 = lambda_2 + self.compute_score = compute_score + self.threshold_lambda = threshold_lambda + self.copy_X = copy_X + self.verbose = verbose + self.n_iter = n_iter + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model according to the given training data and parameters. + + Iterative procedure to maximize the evidence + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + y : array-like of shape (n_samples,) + Target values (integers). Will be cast to X's dtype if necessary. + + Returns + ------- + self : object + Fitted estimator. + """ + max_iter = _deprecate_n_iter(self.n_iter, self.max_iter) + + X, y = self._validate_data( + X, y, dtype=[np.float64, np.float32], y_numeric=True, ensure_min_samples=2 + ) + dtype = X.dtype + + n_samples, n_features = X.shape + coef_ = np.zeros(n_features, dtype=dtype) + + X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=self.copy_X + ) + + self.X_offset_ = X_offset_ + self.X_scale_ = X_scale_ + + # Launch the convergence loop + keep_lambda = np.ones(n_features, dtype=bool) + + lambda_1 = self.lambda_1 + lambda_2 = self.lambda_2 + alpha_1 = self.alpha_1 + alpha_2 = self.alpha_2 + verbose = self.verbose + + # Initialization of the values of the parameters + eps = np.finfo(np.float64).eps + # Add `eps` in the denominator to omit division by zero if `np.var(y)` + # is zero. + # Explicitly set dtype to avoid unintended type promotion with numpy 2. + alpha_ = np.asarray(1.0 / (np.var(y) + eps), dtype=dtype) + lambda_ = np.ones(n_features, dtype=dtype) + + self.scores_ = list() + coef_old_ = None + + def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_): + coef_[keep_lambda] = alpha_ * np.linalg.multi_dot( + [sigma_, X[:, keep_lambda].T, y] + ) + return coef_ + + update_sigma = ( + self._update_sigma + if n_samples >= n_features + else self._update_sigma_woodbury + ) + # Iterative procedure of ARDRegression + for iter_ in range(max_iter): + sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda) + coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_) + + # Update alpha and lambda + rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) + gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_) + lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / ( + (coef_[keep_lambda]) ** 2 + 2.0 * lambda_2 + ) + alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / ( + rmse_ + 2.0 * alpha_2 + ) + + # Prune the weights with a precision over a threshold + keep_lambda = lambda_ < self.threshold_lambda + coef_[~keep_lambda] = 0 + + # Compute the objective function + if self.compute_score: + s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum() + s += alpha_1 * log(alpha_) - alpha_2 * alpha_ + s += 0.5 * ( + fast_logdet(sigma_) + + n_samples * log(alpha_) + + np.sum(np.log(lambda_)) + ) + s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_**2).sum()) + self.scores_.append(s) + + # Check for convergence + if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: + if verbose: + print("Converged after %s iterations" % iter_) + break + coef_old_ = np.copy(coef_) + + if not keep_lambda.any(): + break + + self.n_iter_ = iter_ + 1 + + if keep_lambda.any(): + # update sigma and mu using updated params from the last iteration + sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda) + coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_) + else: + sigma_ = np.array([]).reshape(0, 0) + + self.coef_ = coef_ + self.alpha_ = alpha_ + self.sigma_ = sigma_ + self.lambda_ = lambda_ + self._set_intercept(X_offset_, y_offset_, X_scale_) + return self + + def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda): + # See slides as referenced in the docstring note + # this function is used when n_samples < n_features and will invert + # a matrix of shape (n_samples, n_samples) making use of the + # woodbury formula: + # https://en.wikipedia.org/wiki/Woodbury_matrix_identity + n_samples = X.shape[0] + X_keep = X[:, keep_lambda] + inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1) + sigma_ = pinvh( + np.eye(n_samples, dtype=X.dtype) / alpha_ + + np.dot(X_keep * inv_lambda, X_keep.T) + ) + sigma_ = np.dot(sigma_, X_keep * inv_lambda) + sigma_ = -np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_) + sigma_[np.diag_indices(sigma_.shape[1])] += 1.0 / lambda_[keep_lambda] + return sigma_ + + def _update_sigma(self, X, alpha_, lambda_, keep_lambda): + # See slides as referenced in the docstring note + # this function is used when n_samples >= n_features and will + # invert a matrix of shape (n_features, n_features) + X_keep = X[:, keep_lambda] + gram = np.dot(X_keep.T, X_keep) + eye = np.eye(gram.shape[0], dtype=X.dtype) + sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram + sigma_ = pinvh(sigma_inv) + return sigma_ + + def predict(self, X, return_std=False): + """Predict using the linear model. + + In addition to the mean of the predictive distribution, also its + standard deviation can be returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + return_std : bool, default=False + Whether to return the standard deviation of posterior prediction. + + Returns + ------- + y_mean : array-like of shape (n_samples,) + Mean of predictive distribution of query points. + + y_std : array-like of shape (n_samples,) + Standard deviation of predictive distribution of query points. + """ + y_mean = self._decision_function(X) + if return_std is False: + return y_mean + else: + col_index = self.lambda_ < self.threshold_lambda + X = _safe_indexing(X, indices=col_index, axis=1) + sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) + y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_)) + return y_mean, y_std diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py new file mode 100644 index 0000000000000000000000000000000000000000..d1293bb62f262739bd784713c0e0059d486c7e24 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py @@ -0,0 +1,3182 @@ +# Author: Alexandre Gramfort +# Fabian Pedregosa +# Olivier Grisel +# Gael Varoquaux +# +# License: BSD 3 clause + +import numbers +import sys +import warnings +from abc import ABC, abstractmethod +from functools import partial +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs +from scipy import sparse + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context +from ..model_selection import check_cv +from ..utils import Bunch, check_array, check_scalar +from ..utils._metadata_requests import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + get_routing_for_object, +) +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import safe_sparse_dot +from ..utils.metadata_routing import ( + _routing_enabled, + process_routing, +) +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_sample_weight, + check_consistent_length, + check_is_fitted, + check_random_state, + column_or_1d, + has_fit_parameter, +) + +# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast' +from . import _cd_fast as cd_fast # type: ignore +from ._base import LinearModel, _pre_fit, _preprocess_data + + +def _set_order(X, y, order="C"): + """Change the order of X and y if necessary. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + order : {None, 'C', 'F'} + If 'C', dense arrays are returned as C-ordered, sparse matrices in csr + format. If 'F', dense arrays are return as F-ordered, sparse matrices + in csc format. + + Returns + ------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data with guaranteed order. + + y : ndarray of shape (n_samples,) + Target values with guaranteed order. + """ + if order not in [None, "C", "F"]: + raise ValueError( + "Unknown value for order. Got {} instead of None, 'C' or 'F'.".format(order) + ) + sparse_X = sparse.issparse(X) + sparse_y = sparse.issparse(y) + if order is not None: + sparse_format = "csc" if order == "F" else "csr" + if sparse_X: + X = X.asformat(sparse_format, copy=False) + else: + X = np.asarray(X, order=order) + if sparse_y: + y = y.asformat(sparse_format) + else: + y = np.asarray(y, order=order) + return X, y + + +############################################################################### +# Paths functions + + +def _alpha_grid( + X, + y, + Xy=None, + l1_ratio=1.0, + fit_intercept=True, + eps=1e-3, + n_alphas=100, + copy_X=True, +): + """Compute the grid of alpha values for elastic net parameter search + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data to avoid + unnecessary memory duplication + + y : ndarray of shape (n_samples,) or (n_samples, n_outputs) + Target values + + Xy : array-like of shape (n_features,) or (n_features, n_outputs),\ + default=None + Xy = np.dot(X.T, y) that can be precomputed. + + l1_ratio : float, default=1.0 + The elastic net mixing parameter, with ``0 < l1_ratio <= 1``. + For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not + supported) ``For l1_ratio = 1`` it is an L1 penalty. For + ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3`` + + n_alphas : int, default=100 + Number of alphas along the regularization path + + fit_intercept : bool, default=True + Whether to fit an intercept or not + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + """ + if l1_ratio == 0: + raise ValueError( + "Automatic alpha grid generation is not supported for" + " l1_ratio=0. Please supply a grid by providing " + "your estimator with the appropriate `alphas=` " + "argument." + ) + n_samples = len(y) + + sparse_center = False + if Xy is None: + X_sparse = sparse.issparse(X) + sparse_center = X_sparse and fit_intercept + X = check_array( + X, accept_sparse="csc", copy=(copy_X and fit_intercept and not X_sparse) + ) + if not X_sparse: + # X can be touched inplace thanks to the above line + X, y, _, _, _ = _preprocess_data( + X, y, fit_intercept=fit_intercept, copy=False + ) + Xy = safe_sparse_dot(X.T, y, dense_output=True) + + if sparse_center: + # Workaround to find alpha_max for sparse matrices. + # since we should not destroy the sparsity of such matrices. + _, _, X_offset, _, X_scale = _preprocess_data( + X, y, fit_intercept=fit_intercept + ) + mean_dot = X_offset * np.sum(y) + + if Xy.ndim == 1: + Xy = Xy[:, np.newaxis] + + if sparse_center: + if fit_intercept: + Xy -= mean_dot[:, np.newaxis] + + alpha_max = np.sqrt(np.sum(Xy**2, axis=1)).max() / (n_samples * l1_ratio) + + if alpha_max <= np.finfo(float).resolution: + alphas = np.empty(n_alphas) + alphas.fill(np.finfo(float).resolution) + return alphas + + return np.geomspace(alpha_max, alpha_max * eps, num=n_alphas) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like", "sparse matrix"], + "eps": [Interval(Real, 0, None, closed="neither")], + "n_alphas": [Interval(Integral, 1, None, closed="left")], + "alphas": ["array-like", None], + "precompute": [StrOptions({"auto"}), "boolean", "array-like"], + "Xy": ["array-like", None], + "copy_X": ["boolean"], + "coef_init": ["array-like", None], + "verbose": ["verbose"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def lasso_path( + X, + y, + *, + eps=1e-3, + n_alphas=100, + alphas=None, + precompute="auto", + Xy=None, + copy_X=True, + coef_init=None, + verbose=False, + return_n_iter=False, + positive=False, + **params, +): + """Compute Lasso path with coordinate descent. + + The Lasso optimization function varies for mono and multi-outputs. + + For mono-output tasks it is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + For multi-output tasks it is:: + + (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data to avoid + unnecessary memory duplication. If ``y`` is mono-output then ``X`` + can be sparse. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_targets) + Target values. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If ``None`` alphas are set automatically. + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + Xy : array-like of shape (n_features,) or (n_features, n_targets),\ + default=None + Xy = np.dot(X.T, y) that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + coef_init : array-like of shape (n_features, ), default=None + The initial values of the coefficients. + + verbose : bool or int, default=False + Amount of verbosity. + + return_n_iter : bool, default=False + Whether to return the number of iterations or not. + + positive : bool, default=False + If set to True, forces coefficients to be positive. + (Only allowed when ``y.ndim == 1``). + + **params : kwargs + Keyword arguments passed to the coordinate descent solver. + + Returns + ------- + alphas : ndarray of shape (n_alphas,) + The alphas along the path where models are computed. + + coefs : ndarray of shape (n_features, n_alphas) or \ + (n_targets, n_features, n_alphas) + Coefficients along the path. + + dual_gaps : ndarray of shape (n_alphas,) + The dual gaps at the end of the optimization for each alpha. + + n_iters : list of int + The number of iterations taken by the coordinate descent optimizer to + reach the specified tolerance for each alpha. + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso path using LARS + algorithm. + Lasso : The Lasso is a linear model that estimates sparse coefficients. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoCV : Lasso linear model with iterative fitting along a regularization + path. + LassoLarsCV : Cross-validated Lasso using the LARS algorithm. + sklearn.decomposition.sparse_encode : Estimator that can be used to + transform signals into sparse linear combination of atoms from a fixed. + + Notes + ----- + For an example, see + :ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py + `. + + To avoid unnecessary memory duplication the X argument of the fit method + should be directly passed as a Fortran-contiguous numpy array. + + Note that in certain cases, the Lars solver may be significantly + faster to implement this functionality. In particular, linear + interpolation can be used to retrieve model coefficients between the + values output by lars_path + + Examples + -------- + + Comparing lasso_path and lars_path with interpolation: + + >>> import numpy as np + >>> from sklearn.linear_model import lasso_path + >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T + >>> y = np.array([1, 2, 3.1]) + >>> # Use lasso_path to compute a coefficient path + >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5]) + >>> print(coef_path) + [[0. 0. 0.46874778] + [0.2159048 0.4425765 0.23689075]] + + >>> # Now use lars_path and 1D linear interpolation to compute the + >>> # same path + >>> from sklearn.linear_model import lars_path + >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso') + >>> from scipy import interpolate + >>> coef_path_continuous = interpolate.interp1d(alphas[::-1], + ... coef_path_lars[:, ::-1]) + >>> print(coef_path_continuous([5., 1., .5])) + [[0. 0. 0.46915237] + [0.2159048 0.4425765 0.23668876]] + """ + return enet_path( + X, + y, + l1_ratio=1.0, + eps=eps, + n_alphas=n_alphas, + alphas=alphas, + precompute=precompute, + Xy=Xy, + copy_X=copy_X, + coef_init=coef_init, + verbose=verbose, + positive=positive, + return_n_iter=return_n_iter, + **params, + ) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like", "sparse matrix"], + "l1_ratio": [Interval(Real, 0.0, 1.0, closed="both")], + "eps": [Interval(Real, 0.0, None, closed="neither")], + "n_alphas": [Interval(Integral, 1, None, closed="left")], + "alphas": ["array-like", None], + "precompute": [StrOptions({"auto"}), "boolean", "array-like"], + "Xy": ["array-like", None], + "copy_X": ["boolean"], + "coef_init": ["array-like", None], + "verbose": ["verbose"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + "check_input": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def enet_path( + X, + y, + *, + l1_ratio=0.5, + eps=1e-3, + n_alphas=100, + alphas=None, + precompute="auto", + Xy=None, + copy_X=True, + coef_init=None, + verbose=False, + return_n_iter=False, + positive=False, + check_input=True, + **params, +): + """Compute elastic net path with coordinate descent. + + The elastic net optimization function varies for mono and multi-outputs. + + For mono-output tasks it is:: + + 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 + + For multi-output tasks it is:: + + (1 / (2 * n_samples)) * ||Y - XW||_Fro^2 + + alpha * l1_ratio * ||W||_21 + + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data to avoid + unnecessary memory duplication. If ``y`` is mono-output then ``X`` + can be sparse. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_targets) + Target values. + + l1_ratio : float, default=0.5 + Number between 0 and 1 passed to elastic net (scaling between + l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If None alphas are set automatically. + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + Xy : array-like of shape (n_features,) or (n_features, n_targets),\ + default=None + Xy = np.dot(X.T, y) that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + coef_init : array-like of shape (n_features, ), default=None + The initial values of the coefficients. + + verbose : bool or int, default=False + Amount of verbosity. + + return_n_iter : bool, default=False + Whether to return the number of iterations or not. + + positive : bool, default=False + If set to True, forces coefficients to be positive. + (Only allowed when ``y.ndim == 1``). + + check_input : bool, default=True + If set to False, the input validation checks are skipped (including the + Gram matrix when provided). It is assumed that they are handled + by the caller. + + **params : kwargs + Keyword arguments passed to the coordinate descent solver. + + Returns + ------- + alphas : ndarray of shape (n_alphas,) + The alphas along the path where models are computed. + + coefs : ndarray of shape (n_features, n_alphas) or \ + (n_targets, n_features, n_alphas) + Coefficients along the path. + + dual_gaps : ndarray of shape (n_alphas,) + The dual gaps at the end of the optimization for each alpha. + + n_iters : list of int + The number of iterations taken by the coordinate descent optimizer to + reach the specified tolerance for each alpha. + (Is returned when ``return_n_iter`` is set to True). + + See Also + -------- + MultiTaskElasticNet : Multi-task ElasticNet model trained with L1/L2 mixed-norm \ + as regularizer. + MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in cross-validation. + ElasticNet : Linear regression with combined L1 and L2 priors as regularizer. + ElasticNetCV : Elastic Net model with iterative fitting along a regularization path. + + Notes + ----- + For an example, see + :ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py + `. + """ + X_offset_param = params.pop("X_offset", None) + X_scale_param = params.pop("X_scale", None) + sample_weight = params.pop("sample_weight", None) + tol = params.pop("tol", 1e-4) + max_iter = params.pop("max_iter", 1000) + random_state = params.pop("random_state", None) + selection = params.pop("selection", "cyclic") + + if len(params) > 0: + raise ValueError("Unexpected parameters in params", params.keys()) + + # We expect X and y to be already Fortran ordered when bypassing + # checks + if check_input: + X = check_array( + X, + accept_sparse="csc", + dtype=[np.float64, np.float32], + order="F", + copy=copy_X, + ) + y = check_array( + y, + accept_sparse="csc", + dtype=X.dtype.type, + order="F", + copy=False, + ensure_2d=False, + ) + if Xy is not None: + # Xy should be a 1d contiguous array or a 2D C ordered array + Xy = check_array( + Xy, dtype=X.dtype.type, order="C", copy=False, ensure_2d=False + ) + + n_samples, n_features = X.shape + + multi_output = False + if y.ndim != 1: + multi_output = True + n_targets = y.shape[1] + + if multi_output and positive: + raise ValueError("positive=True is not allowed for multi-output (y.ndim != 1)") + + # MultiTaskElasticNet does not support sparse matrices + if not multi_output and sparse.issparse(X): + if X_offset_param is not None: + # As sparse matrices are not actually centered we need this to be passed to + # the CD solver. + X_sparse_scaling = X_offset_param / X_scale_param + X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype) + else: + X_sparse_scaling = np.zeros(n_features, dtype=X.dtype) + + # X should have been passed through _pre_fit already if function is called + # from ElasticNet.fit + if check_input: + X, y, _, _, _, precompute, Xy = _pre_fit( + X, + y, + Xy, + precompute, + fit_intercept=False, + copy=False, + check_input=check_input, + ) + if alphas is None: + # No need to normalize of fit_intercept: it has been done + # above + alphas = _alpha_grid( + X, + y, + Xy=Xy, + l1_ratio=l1_ratio, + fit_intercept=False, + eps=eps, + n_alphas=n_alphas, + copy_X=False, + ) + elif len(alphas) > 1: + alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered + + n_alphas = len(alphas) + dual_gaps = np.empty(n_alphas) + n_iters = [] + + rng = check_random_state(random_state) + if selection not in ["random", "cyclic"]: + raise ValueError("selection should be either random or cyclic.") + random = selection == "random" + + if not multi_output: + coefs = np.empty((n_features, n_alphas), dtype=X.dtype) + else: + coefs = np.empty((n_targets, n_features, n_alphas), dtype=X.dtype) + + if coef_init is None: + coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order="F") + else: + coef_ = np.asfortranarray(coef_init, dtype=X.dtype) + + for i, alpha in enumerate(alphas): + # account for n_samples scaling in objectives between here and cd_fast + l1_reg = alpha * l1_ratio * n_samples + l2_reg = alpha * (1.0 - l1_ratio) * n_samples + if not multi_output and sparse.issparse(X): + model = cd_fast.sparse_enet_coordinate_descent( + w=coef_, + alpha=l1_reg, + beta=l2_reg, + X_data=X.data, + X_indices=X.indices, + X_indptr=X.indptr, + y=y, + sample_weight=sample_weight, + X_mean=X_sparse_scaling, + max_iter=max_iter, + tol=tol, + rng=rng, + random=random, + positive=positive, + ) + elif multi_output: + model = cd_fast.enet_coordinate_descent_multi_task( + coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random + ) + elif isinstance(precompute, np.ndarray): + # We expect precompute to be already Fortran ordered when bypassing + # checks + if check_input: + precompute = check_array(precompute, dtype=X.dtype.type, order="C") + model = cd_fast.enet_coordinate_descent_gram( + coef_, + l1_reg, + l2_reg, + precompute, + Xy, + y, + max_iter, + tol, + rng, + random, + positive, + ) + elif precompute is False: + model = cd_fast.enet_coordinate_descent( + coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, positive + ) + else: + raise ValueError( + "Precompute should be one of True, False, 'auto' or array-like. Got %r" + % precompute + ) + coef_, dual_gap_, eps_, n_iter_ = model + coefs[..., i] = coef_ + # we correct the scale of the returned dual gap, as the objective + # in cd_fast is n_samples * the objective in this docstring. + dual_gaps[i] = dual_gap_ / n_samples + n_iters.append(n_iter_) + + if verbose: + if verbose > 2: + print(model) + elif verbose > 1: + print("Path: %03i out of %03i" % (i, n_alphas)) + else: + sys.stderr.write(".") + + if return_n_iter: + return alphas, coefs, dual_gaps, n_iters + return alphas, coefs, dual_gaps + + +############################################################################### +# ElasticNet model + + +class ElasticNet(MultiOutputMixin, RegressorMixin, LinearModel): + """Linear regression with combined L1 and L2 priors as regularizer. + + Minimizes the objective function:: + + 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 + + If you are interested in controlling the L1 and L2 penalty + separately, keep in mind that this is equivalent to:: + + a * ||w||_1 + 0.5 * b * ||w||_2^2 + + where:: + + alpha = a + b and l1_ratio = a / (a + b) + + The parameter l1_ratio corresponds to alpha in the glmnet R package while + alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio + = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable, + unless you supply your own sequence of alpha. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the penalty terms. Defaults to 1.0. + See the notes for the exact mathematical meaning of this + parameter. ``alpha = 0`` is equivalent to an ordinary least square, + solved by the :class:`LinearRegression` object. For numerical + reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised. + Given this, you should use the :class:`LinearRegression` object. + + l1_ratio : float, default=0.5 + The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For + ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it + is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a + combination of L1 and L2. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If ``False``, the + data is assumed to be already centered. + + precompute : bool or array-like of shape (n_features, n_features),\ + default=False + Whether to use a precomputed Gram matrix to speed up + calculations. The Gram matrix can also be passed as argument. + For sparse input this option is always ``False`` to preserve sparsity. + + max_iter : int, default=1000 + The maximum number of iterations. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``, see Notes below. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + sparse_coef_ : sparse matrix of shape (n_features,) or \ + (n_targets, n_features) + Sparse representation of the `coef_`. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : list of int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + dual_gap_ : float or ndarray of shape (n_targets,) + Given param alpha, the dual gaps at the end of the optimization, + same shape as each observation of y. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + ElasticNetCV : Elastic net model with best model selection by + cross-validation. + SGDRegressor : Implements elastic net regression with incremental training. + SGDClassifier : Implements logistic regression with elastic net penalty + (``SGDClassifier(loss="log_loss", penalty="elasticnet")``). + + Notes + ----- + To avoid unnecessary memory duplication the X argument of the fit method + should be directly passed as a Fortran-contiguous numpy array. + + The precise stopping criteria based on `tol` are the following: First, check that + that maximum coordinate update, i.e. :math:`\\max_j |w_j^{new} - w_j^{old}|` + is smaller than `tol` times the maximum absolute coefficient, :math:`\\max_j |w_j|`. + If so, then additionally check whether the dual gap is smaller than `tol` times + :math:`||y||_2^2 / n_{\text{samples}}`. + + Examples + -------- + >>> from sklearn.linear_model import ElasticNet + >>> from sklearn.datasets import make_regression + + >>> X, y = make_regression(n_features=2, random_state=0) + >>> regr = ElasticNet(random_state=0) + >>> regr.fit(X, y) + ElasticNet(random_state=0) + >>> print(regr.coef_) + [18.83816048 64.55968825] + >>> print(regr.intercept_) + 1.451... + >>> print(regr.predict([[0, 0]])) + [1.451...] + """ + + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "fit_intercept": ["boolean"], + "precompute": ["boolean", "array-like"], + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "copy_X": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + "warm_start": ["boolean"], + "positive": ["boolean"], + "random_state": ["random_state"], + "selection": [StrOptions({"cyclic", "random"})], + } + + path = staticmethod(enet_path) + + def __init__( + self, + alpha=1.0, + *, + l1_ratio=0.5, + fit_intercept=True, + precompute=False, + max_iter=1000, + copy_X=True, + tol=1e-4, + warm_start=False, + positive=False, + random_state=None, + selection="cyclic", + ): + self.alpha = alpha + self.l1_ratio = l1_ratio + self.fit_intercept = fit_intercept + self.precompute = precompute + self.max_iter = max_iter + self.copy_X = copy_X + self.tol = tol + self.warm_start = warm_start + self.positive = positive + self.random_state = random_state + self.selection = selection + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, check_input=True): + """Fit model with coordinate descent. + + Parameters + ---------- + X : {ndarray, sparse matrix} of (n_samples, n_features) + Data. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target. Will be cast to X's dtype if necessary. + + sample_weight : float or array-like of shape (n_samples,), default=None + Sample weights. Internally, the `sample_weight` vector will be + rescaled to sum to `n_samples`. + + .. versionadded:: 0.23 + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you do. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + Coordinate descent is an algorithm that considers each column of + data at a time hence it will automatically convert the X input + as a Fortran-contiguous numpy array if necessary. + + To avoid memory re-allocation it is advised to allocate the + initial data in memory directly using that format. + """ + if self.alpha == 0: + warnings.warn( + ( + "With alpha=0, this algorithm does not converge " + "well. You are advised to use the LinearRegression " + "estimator" + ), + stacklevel=2, + ) + + # Remember if X is copied + X_copied = False + # We expect X and y to be float64 or float32 Fortran ordered arrays + # when bypassing checks + if check_input: + X_copied = self.copy_X and self.fit_intercept + X, y = self._validate_data( + X, + y, + accept_sparse="csc", + order="F", + dtype=[np.float64, np.float32], + copy=X_copied, + multi_output=True, + y_numeric=True, + ) + y = check_array( + y, order="F", copy=False, dtype=X.dtype.type, ensure_2d=False + ) + + n_samples, n_features = X.shape + alpha = self.alpha + + if isinstance(sample_weight, numbers.Number): + sample_weight = None + if sample_weight is not None: + if check_input: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + # TLDR: Rescale sw to sum up to n_samples. + # Long: The objective function of Enet + # + # 1/2 * np.average(squared error, weights=sw) + # + alpha * penalty (1) + # + # is invariant under rescaling of sw. + # But enet_path coordinate descent minimizes + # + # 1/2 * sum(squared error) + alpha' * penalty (2) + # + # and therefore sets + # + # alpha' = n_samples * alpha (3) + # + # inside its function body, which results in objective (2) being + # equivalent to (1) in case of no sw. + # With sw, however, enet_path should set + # + # alpha' = sum(sw) * alpha (4) + # + # Therefore, we use the freedom of Eq. (1) to rescale sw before + # calling enet_path, i.e. + # + # sw *= n_samples / sum(sw) + # + # such that sum(sw) = n_samples. This way, (3) and (4) are the same. + sample_weight = sample_weight * (n_samples / np.sum(sample_weight)) + # Note: Alternatively, we could also have rescaled alpha instead + # of sample_weight: + # + # alpha *= np.sum(sample_weight) / n_samples + + # Ensure copying happens only once, don't do it again if done above. + # X and y will be rescaled if sample_weight is not None, order='F' + # ensures that the returned X and y are still F-contiguous. + should_copy = self.copy_X and not X_copied + X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit( + X, + y, + None, + self.precompute, + fit_intercept=self.fit_intercept, + copy=should_copy, + check_input=check_input, + sample_weight=sample_weight, + ) + # coordinate descent needs F-ordered arrays and _pre_fit might have + # called _rescale_data + if check_input or sample_weight is not None: + X, y = _set_order(X, y, order="F") + if y.ndim == 1: + y = y[:, np.newaxis] + if Xy is not None and Xy.ndim == 1: + Xy = Xy[:, np.newaxis] + + n_targets = y.shape[1] + + if not self.warm_start or not hasattr(self, "coef_"): + coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order="F") + else: + coef_ = self.coef_ + if coef_.ndim == 1: + coef_ = coef_[np.newaxis, :] + + dual_gaps_ = np.zeros(n_targets, dtype=X.dtype) + self.n_iter_ = [] + + for k in range(n_targets): + if Xy is not None: + this_Xy = Xy[:, k] + else: + this_Xy = None + _, this_coef, this_dual_gap, this_iter = self.path( + X, + y[:, k], + l1_ratio=self.l1_ratio, + eps=None, + n_alphas=None, + alphas=[alpha], + precompute=precompute, + Xy=this_Xy, + copy_X=True, + coef_init=coef_[k], + verbose=False, + return_n_iter=True, + positive=self.positive, + check_input=False, + # from here on **params + tol=self.tol, + X_offset=X_offset, + X_scale=X_scale, + max_iter=self.max_iter, + random_state=self.random_state, + selection=self.selection, + sample_weight=sample_weight, + ) + coef_[k] = this_coef[:, 0] + dual_gaps_[k] = this_dual_gap[0] + self.n_iter_.append(this_iter[0]) + + if n_targets == 1: + self.n_iter_ = self.n_iter_[0] + self.coef_ = coef_[0] + self.dual_gap_ = dual_gaps_[0] + else: + self.coef_ = coef_ + self.dual_gap_ = dual_gaps_ + + self._set_intercept(X_offset, y_offset, X_scale) + + # check for finiteness of coefficients + if not all(np.isfinite(w).all() for w in [self.coef_, self.intercept_]): + raise ValueError( + "Coordinate descent iterations resulted in non-finite parameter" + " values. The input data may contain large values and need to" + " be preprocessed." + ) + + # return self for chaining fit and predict calls + return self + + @property + def sparse_coef_(self): + """Sparse representation of the fitted `coef_`.""" + return sparse.csr_matrix(self.coef_) + + def _decision_function(self, X): + """Decision function of the linear model. + + Parameters + ---------- + X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) + + Returns + ------- + T : ndarray of shape (n_samples,) + The predicted decision function. + """ + check_is_fitted(self) + if sparse.issparse(X): + return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ + else: + return super()._decision_function(X) + + +############################################################################### +# Lasso model + + +class Lasso(ElasticNet): + """Linear Model trained with L1 prior as regularizer (aka the Lasso). + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Technically the Lasso model is optimizing the same objective function as + the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the L1 term, controlling regularization + strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. + + When `alpha = 0`, the objective is equivalent to ordinary least + squares, solved by the :class:`LinearRegression` object. For numerical + reasons, using `alpha = 0` with the `Lasso` object is not advised. + Instead, you should use the :class:`LinearRegression` object. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to False, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : bool or array-like of shape (n_features, n_features),\ + default=False + Whether to use a precomputed Gram matrix to speed up + calculations. The Gram matrix can also be passed as argument. + For sparse input this option is always ``False`` to preserve sparsity. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``, see Notes below. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + dual_gap_ : float or ndarray of shape (n_targets,) + Given param alpha, the dual gaps at the end of the optimization, + same shape as each observation of y. + + sparse_coef_ : sparse matrix of shape (n_features, 1) or \ + (n_targets, n_features) + Readonly property derived from ``coef_``. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : int or list of int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Regularization path using LARS. + lasso_path : Regularization path using Lasso. + LassoLars : Lasso Path along the regularization parameter using LARS algorithm. + LassoCV : Lasso alpha parameter by cross-validation. + LassoLarsCV : Lasso least angle parameter algorithm by cross-validation. + sklearn.decomposition.sparse_encode : Sparse coding array estimator. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + To avoid unnecessary memory duplication the X argument of the fit method + should be directly passed as a Fortran-contiguous numpy array. + + Regularization improves the conditioning of the problem and + reduces the variance of the estimates. Larger values specify stronger + regularization. Alpha corresponds to `1 / (2C)` in other linear + models such as :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are + assumed to be specific to the targets. Hence they must correspond in + number. + + The precise stopping criteria based on `tol` are the following: First, check that + that maximum coordinate update, i.e. :math:`\\max_j |w_j^{new} - w_j^{old}|` + is smaller than `tol` times the maximum absolute coefficient, :math:`\\max_j |w_j|`. + If so, then additionally check whether the dual gap is smaller than `tol` times + :math:`||y||_2^2 / n_{\\text{samples}}`. + + The target can be a 2-dimensional array, resulting in the optimization of the + following objective:: + + (1 / (2 * n_samples)) * ||Y - XW||^2_F + alpha * ||W||_11 + + where :math:`||W||_{1,1}` is the sum of the magnitude of the matrix coefficients. + It should not be confused with :class:`~sklearn.linear_model.MultiTaskLasso` which + instead penalizes the :math:`L_{2,1}` norm of the coefficients, yielding row-wise + sparsity in the coefficients. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.Lasso(alpha=0.1) + >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) + Lasso(alpha=0.1) + >>> print(clf.coef_) + [0.85 0. ] + >>> print(clf.intercept_) + 0.15... + """ + + _parameter_constraints: dict = { + **ElasticNet._parameter_constraints, + } + _parameter_constraints.pop("l1_ratio") + + path = staticmethod(enet_path) + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + precompute=False, + copy_X=True, + max_iter=1000, + tol=1e-4, + warm_start=False, + positive=False, + random_state=None, + selection="cyclic", + ): + super().__init__( + alpha=alpha, + l1_ratio=1.0, + fit_intercept=fit_intercept, + precompute=precompute, + copy_X=copy_X, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + positive=positive, + random_state=random_state, + selection=selection, + ) + + +############################################################################### +# Functions for CV with paths functions + + +def _path_residuals( + X, + y, + sample_weight, + train, + test, + fit_intercept, + path, + path_params, + alphas=None, + l1_ratio=1, + X_order=None, + dtype=None, +): + """Returns the MSE for the models computed by 'path'. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : None or array-like of shape (n_samples,) + Sample weights. + + train : list of indices + The indices of the train set. + + test : list of indices + The indices of the test set. + + path : callable + Function returning a list of models on the path. See + enet_path for an example of signature. + + path_params : dictionary + Parameters passed to the path function. + + alphas : array-like, default=None + Array of float that is used for cross-validation. If not + provided, computed using 'path'. + + l1_ratio : float, default=1 + float between 0 and 1 passed to ElasticNet (scaling between + l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an + L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 + < l1_ratio < 1``, the penalty is a combination of L1 and L2. + + X_order : {'F', 'C'}, default=None + The order of the arrays expected by the path function to + avoid memory copies. + + dtype : a numpy dtype, default=None + The dtype of the arrays expected by the path function to + avoid memory copies. + """ + X_train = X[train] + y_train = y[train] + X_test = X[test] + y_test = y[test] + if sample_weight is None: + sw_train, sw_test = None, None + else: + sw_train = sample_weight[train] + sw_test = sample_weight[test] + n_samples = X_train.shape[0] + # TLDR: Rescale sw_train to sum up to n_samples on the training set. + # See TLDR and long comment inside ElasticNet.fit. + sw_train *= n_samples / np.sum(sw_train) + # Note: Alternatively, we could also have rescaled alpha instead + # of sample_weight: + # + # alpha *= np.sum(sample_weight) / n_samples + + if not sparse.issparse(X): + for array, array_input in ( + (X_train, X), + (y_train, y), + (X_test, X), + (y_test, y), + ): + if array.base is not array_input and not array.flags["WRITEABLE"]: + # fancy indexing should create a writable copy but it doesn't + # for read-only memmaps (cf. numpy#14132). + array.setflags(write=True) + + if y.ndim == 1: + precompute = path_params["precompute"] + else: + # No Gram variant of multi-task exists right now. + # Fall back to default enet_multitask + precompute = False + + X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit( + X_train, + y_train, + None, + precompute, + fit_intercept=fit_intercept, + copy=False, + sample_weight=sw_train, + ) + + path_params = path_params.copy() + path_params["Xy"] = Xy + path_params["X_offset"] = X_offset + path_params["X_scale"] = X_scale + path_params["precompute"] = precompute + path_params["copy_X"] = False + path_params["alphas"] = alphas + # needed for sparse cd solver + path_params["sample_weight"] = sw_train + + if "l1_ratio" in path_params: + path_params["l1_ratio"] = l1_ratio + + # Do the ordering and type casting here, as if it is done in the path, + # X is copied and a reference is kept here + X_train = check_array(X_train, accept_sparse="csc", dtype=dtype, order=X_order) + alphas, coefs, _ = path(X_train, y_train, **path_params) + del X_train, y_train + + if y.ndim == 1: + # Doing this so that it becomes coherent with multioutput. + coefs = coefs[np.newaxis, :, :] + y_offset = np.atleast_1d(y_offset) + y_test = y_test[:, np.newaxis] + + intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs) + X_test_coefs = safe_sparse_dot(X_test, coefs) + residues = X_test_coefs - y_test[:, :, np.newaxis] + residues += intercepts + if sample_weight is None: + this_mse = (residues**2).mean(axis=0) + else: + this_mse = np.average(residues**2, weights=sw_test, axis=0) + + return this_mse.mean(axis=0) + + +class LinearModelCV(MultiOutputMixin, LinearModel, ABC): + """Base class for iterative model fitting along a regularization path.""" + + _parameter_constraints: dict = { + "eps": [Interval(Real, 0, None, closed="neither")], + "n_alphas": [Interval(Integral, 1, None, closed="left")], + "alphas": ["array-like", None], + "fit_intercept": ["boolean"], + "precompute": [StrOptions({"auto"}), "array-like", "boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "copy_X": ["boolean"], + "cv": ["cv_object"], + "verbose": ["verbose"], + "n_jobs": [Integral, None], + "positive": ["boolean"], + "random_state": ["random_state"], + "selection": [StrOptions({"cyclic", "random"})], + } + + @abstractmethod + def __init__( + self, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + precompute="auto", + max_iter=1000, + tol=1e-4, + copy_X=True, + cv=None, + verbose=False, + n_jobs=None, + positive=False, + random_state=None, + selection="cyclic", + ): + self.eps = eps + self.n_alphas = n_alphas + self.alphas = alphas + self.fit_intercept = fit_intercept + self.precompute = precompute + self.max_iter = max_iter + self.tol = tol + self.copy_X = copy_X + self.cv = cv + self.verbose = verbose + self.n_jobs = n_jobs + self.positive = positive + self.random_state = random_state + self.selection = selection + + @abstractmethod + def _get_estimator(self): + """Model to be fitted after the best alpha has been determined.""" + + @abstractmethod + def _is_multitask(self): + """Bool indicating if class is meant for multidimensional target.""" + + @staticmethod + @abstractmethod + def path(X, y, **kwargs): + """Compute path with coordinate descent.""" + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, **params): + """Fit linear model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data + to avoid unnecessary memory duplication. If y is mono-output, + X can be sparse. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : float or array-like of shape (n_samples,), \ + default=None + Sample weights used for fitting and evaluation of the weighted + mean squared error of each cv-fold. Note that the cross validated + MSE that is finally used to find the best model is the unweighted + mean over the (weighted) MSEs of each test fold. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of fitted model. + """ + _raise_for_params(params, self, "fit") + + # This makes sure that there is no duplication in memory. + # Dealing right with copy_X is important in the following: + # Multiple functions touch X and subsamples of X and can induce a + # lot of duplication of memory + copy_X = self.copy_X and self.fit_intercept + + check_y_params = dict( + copy=False, dtype=[np.float64, np.float32], ensure_2d=False + ) + if isinstance(X, np.ndarray) or sparse.issparse(X): + # Keep a reference to X + reference_to_old_X = X + # Let us not impose fortran ordering so far: it is + # not useful for the cross-validation loop and will be done + # by the model fitting itself + + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be + # csr. We also want to allow y to be 64 or 32 but check_X_y only + # allows to convert for 64. + check_X_params = dict( + accept_sparse="csc", dtype=[np.float64, np.float32], copy=False + ) + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + if sparse.issparse(X): + if hasattr(reference_to_old_X, "data") and not np.may_share_memory( + reference_to_old_X.data, X.data + ): + # X is a sparse matrix and has been copied + copy_X = False + elif not np.may_share_memory(reference_to_old_X, X): + # X has been copied + copy_X = False + del reference_to_old_X + else: + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be + # csr. We also want to allow y to be 64 or 32 but check_X_y only + # allows to convert for 64. + check_X_params = dict( + accept_sparse="csc", + dtype=[np.float64, np.float32], + order="F", + copy=copy_X, + ) + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + copy_X = False + + check_consistent_length(X, y) + + if not self._is_multitask(): + if y.ndim > 1 and y.shape[1] > 1: + raise ValueError( + "For multi-task outputs, use MultiTask%s" % self.__class__.__name__ + ) + y = column_or_1d(y, warn=True) + else: + if sparse.issparse(X): + raise TypeError("X should be dense but a sparse matrix waspassed") + elif y.ndim == 1: + raise ValueError( + "For mono-task outputs, use %sCV" % self.__class__.__name__[9:] + ) + + if isinstance(sample_weight, numbers.Number): + sample_weight = None + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + model = self._get_estimator() + + # All LinearModelCV parameters except 'cv' are acceptable + path_params = self.get_params() + + # Pop `intercept` that is not parameter of the path function + path_params.pop("fit_intercept", None) + + if "l1_ratio" in path_params: + l1_ratios = np.atleast_1d(path_params["l1_ratio"]) + # For the first path, we need to set l1_ratio + path_params["l1_ratio"] = l1_ratios[0] + else: + l1_ratios = [ + 1, + ] + path_params.pop("cv", None) + path_params.pop("n_jobs", None) + + alphas = self.alphas + n_l1_ratio = len(l1_ratios) + + check_scalar_alpha = partial( + check_scalar, + target_type=Real, + min_val=0.0, + include_boundaries="left", + ) + + if alphas is None: + alphas = [ + _alpha_grid( + X, + y, + l1_ratio=l1_ratio, + fit_intercept=self.fit_intercept, + eps=self.eps, + n_alphas=self.n_alphas, + copy_X=self.copy_X, + ) + for l1_ratio in l1_ratios + ] + else: + # Making sure alphas entries are scalars. + for index, alpha in enumerate(alphas): + check_scalar_alpha(alpha, f"alphas[{index}]") + # Making sure alphas is properly ordered. + alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1)) + + # We want n_alphas to be the number of alphas used for each l1_ratio. + n_alphas = len(alphas[0]) + path_params.update({"n_alphas": n_alphas}) + + path_params["copy_X"] = copy_X + # We are not computing in parallel, we can modify X + # inplace in the folds + if effective_n_jobs(self.n_jobs) > 1: + path_params["copy_X"] = False + + # init cross-validation generator + cv = check_cv(self.cv) + + if _routing_enabled(): + splitter_supports_sample_weight = get_routing_for_object(cv).consumes( + method="split", params=["sample_weight"] + ) + if ( + sample_weight is not None + and not splitter_supports_sample_weight + and not has_fit_parameter(self, "sample_weight") + ): + raise ValueError( + "The CV splitter and underlying estimator do not support" + " sample weights." + ) + + if splitter_supports_sample_weight: + params["sample_weight"] = sample_weight + + routed_params = process_routing(self, "fit", **params) + + if sample_weight is not None and not has_fit_parameter( + self, "sample_weight" + ): + # MultiTaskElasticNetCV does not (yet) support sample_weight + sample_weight = None + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split=Bunch()) + + # Compute path for all folds and compute MSE to get the best alpha + folds = list(cv.split(X, y, **routed_params.splitter.split)) + best_mse = np.inf + + # We do a double for loop folded in one, in order to be able to + # iterate in parallel on l1_ratio and folds + jobs = ( + delayed(_path_residuals)( + X, + y, + sample_weight, + train, + test, + self.fit_intercept, + self.path, + path_params, + alphas=this_alphas, + l1_ratio=this_l1_ratio, + X_order="F", + dtype=X.dtype.type, + ) + for this_l1_ratio, this_alphas in zip(l1_ratios, alphas) + for train, test in folds + ) + mse_paths = Parallel( + n_jobs=self.n_jobs, + verbose=self.verbose, + prefer="threads", + )(jobs) + mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1)) + # The mean is computed over folds. + mean_mse = np.mean(mse_paths, axis=1) + self.mse_path_ = np.squeeze(np.moveaxis(mse_paths, 2, 1)) + for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas, mean_mse): + i_best_alpha = np.argmin(mse_alphas) + this_best_mse = mse_alphas[i_best_alpha] + if this_best_mse < best_mse: + best_alpha = l1_alphas[i_best_alpha] + best_l1_ratio = l1_ratio + best_mse = this_best_mse + + self.l1_ratio_ = best_l1_ratio + self.alpha_ = best_alpha + if self.alphas is None: + self.alphas_ = np.asarray(alphas) + if n_l1_ratio == 1: + self.alphas_ = self.alphas_[0] + # Remove duplicate alphas in case alphas is provided. + else: + self.alphas_ = np.asarray(alphas[0]) + + # Refit the model with the parameters selected + common_params = { + name: value + for name, value in self.get_params().items() + if name in model.get_params() + } + model.set_params(**common_params) + model.alpha = best_alpha + model.l1_ratio = best_l1_ratio + model.copy_X = copy_X + precompute = getattr(self, "precompute", None) + if isinstance(precompute, str) and precompute == "auto": + model.precompute = False + + if sample_weight is None: + # MultiTaskElasticNetCV does not (yet) support sample_weight, even + # not sample_weight=None. + model.fit(X, y) + else: + model.fit(X, y, sample_weight=sample_weight) + if not hasattr(self, "l1_ratio"): + del self.l1_ratio_ + self.coef_ = model.coef_ + self.intercept_ = model.intercept_ + self.dual_gap_ = model.dual_gap_ + self.n_iter_ = model.n_iter_ + return self + + def _more_tags(self): + # Note: check_sample_weights_invariance(kind='ones') should work, but + # currently we can only mark a whole test as xfail. + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + splitter=check_cv(self.cv), + method_mapping=MethodMapping().add(callee="split", caller="fit"), + ) + ) + return router + + +class LassoCV(RegressorMixin, LinearModelCV): + """Lasso linear model with iterative fitting along a regularization path. + + See glossary entry for :term:`cross-validation estimator`. + + The best model is selected by cross-validation. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If ``None`` alphas are set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : bool or int, default=False + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + positive : bool, default=False + If positive, restrict regression coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + alpha_ : float + The amount of penalization chosen by cross validation. + + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + mse_path_ : ndarray of shape (n_alphas, n_folds) + Mean square error for the test set on each fold, varying alpha. + + alphas_ : ndarray of shape (n_alphas,) + The grid of alphas used for fitting. + + dual_gap_ : float or ndarray of shape (n_targets,) + The dual gap at the end of the optimization for the optimal alpha + (``alpha_``). + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso path using LARS + algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : The Lasso is a linear model that estimates sparse coefficients. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoCV : Lasso linear model with iterative fitting along a regularization + path. + LassoLarsCV : Cross-validated Lasso using the LARS algorithm. + + Notes + ----- + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` argument of the `fit` + method should be directly passed as a Fortran-contiguous numpy array. + + For an example, see + :ref:`examples/linear_model/plot_lasso_model_selection.py + `. + + :class:`LassoCV` leads to different results than a hyperparameter + search using :class:`~sklearn.model_selection.GridSearchCV` with a + :class:`Lasso` model. In :class:`LassoCV`, a model for a given + penalty `alpha` is warm started using the coefficients of the + closest model (trained at the previous iteration) on the + regularization path. It tends to speed up the hyperparameter + search. + + Examples + -------- + >>> from sklearn.linear_model import LassoCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(noise=4, random_state=0) + >>> reg = LassoCV(cv=5, random_state=0).fit(X, y) + >>> reg.score(X, y) + 0.9993... + >>> reg.predict(X[:1,]) + array([-78.4951...]) + """ + + path = staticmethod(lasso_path) + + def __init__( + self, + *, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + precompute="auto", + max_iter=1000, + tol=1e-4, + copy_X=True, + cv=None, + verbose=False, + n_jobs=None, + positive=False, + random_state=None, + selection="cyclic", + ): + super().__init__( + eps=eps, + n_alphas=n_alphas, + alphas=alphas, + fit_intercept=fit_intercept, + precompute=precompute, + max_iter=max_iter, + tol=tol, + copy_X=copy_X, + cv=cv, + verbose=verbose, + n_jobs=n_jobs, + positive=positive, + random_state=random_state, + selection=selection, + ) + + def _get_estimator(self): + return Lasso() + + def _is_multitask(self): + return False + + def _more_tags(self): + return {"multioutput": False} + + +class ElasticNetCV(RegressorMixin, LinearModelCV): + """Elastic Net model with iterative fitting along a regularization path. + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + l1_ratio : float or list of float, default=0.5 + Float between 0 and 1 passed to ElasticNet (scaling between + l1 and l2 penalties). For ``l1_ratio = 0`` + the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. + For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 + This parameter can be a list, in which case the different + values are tested by cross-validation and the one giving the best + prediction score is used. Note that a good choice of list of + values for l1_ratio is often to put more values close to 1 + (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, + .9, .95, .99, 1]``. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path, used for each l1_ratio. + + alphas : array-like, default=None + List of alphas where to compute the models. + If None alphas are set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + verbose : bool or int, default=0 + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + alpha_ : float + The amount of penalization chosen by cross validation. + + l1_ratio_ : float + The compromise between l1 and l2 penalization chosen by + cross validation. + + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + intercept_ : float or ndarray of shape (n_targets, n_features) + Independent term in the decision function. + + mse_path_ : ndarray of shape (n_l1_ratio, n_alpha, n_folds) + Mean square error for the test set on each fold, varying l1_ratio and + alpha. + + alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas) + The grid of alphas used for fitting, for each l1_ratio. + + dual_gap_ : float + The dual gaps at the end of the optimization for the optimal alpha. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + enet_path : Compute elastic net path with coordinate descent. + ElasticNet : Linear regression with combined L1 and L2 priors as regularizer. + + Notes + ----- + In `fit`, once the best parameters `l1_ratio` and `alpha` are found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` argument of the `fit` + method should be directly passed as a Fortran-contiguous numpy array. + + The parameter `l1_ratio` corresponds to alpha in the glmnet R package + while alpha corresponds to the lambda parameter in glmnet. + More specifically, the optimization objective is:: + + 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 + + If you are interested in controlling the L1 and L2 penalty + separately, keep in mind that this is equivalent to:: + + a * L1 + b * L2 + + for:: + + alpha = a + b and l1_ratio = a / (a + b). + + For an example, see + :ref:`examples/linear_model/plot_lasso_model_selection.py + `. + + Examples + -------- + >>> from sklearn.linear_model import ElasticNetCV + >>> from sklearn.datasets import make_regression + + >>> X, y = make_regression(n_features=2, random_state=0) + >>> regr = ElasticNetCV(cv=5, random_state=0) + >>> regr.fit(X, y) + ElasticNetCV(cv=5, random_state=0) + >>> print(regr.alpha_) + 0.199... + >>> print(regr.intercept_) + 0.398... + >>> print(regr.predict([[0, 0]])) + [0.398...] + """ + + _parameter_constraints: dict = { + **LinearModelCV._parameter_constraints, + "l1_ratio": [Interval(Real, 0, 1, closed="both"), "array-like"], + } + + path = staticmethod(enet_path) + + def __init__( + self, + *, + l1_ratio=0.5, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + precompute="auto", + max_iter=1000, + tol=1e-4, + cv=None, + copy_X=True, + verbose=0, + n_jobs=None, + positive=False, + random_state=None, + selection="cyclic", + ): + self.l1_ratio = l1_ratio + self.eps = eps + self.n_alphas = n_alphas + self.alphas = alphas + self.fit_intercept = fit_intercept + self.precompute = precompute + self.max_iter = max_iter + self.tol = tol + self.cv = cv + self.copy_X = copy_X + self.verbose = verbose + self.n_jobs = n_jobs + self.positive = positive + self.random_state = random_state + self.selection = selection + + def _get_estimator(self): + return ElasticNet() + + def _is_multitask(self): + return False + + def _more_tags(self): + return {"multioutput": False} + + +############################################################################### +# Multi Task ElasticNet and Lasso models (with joint feature selection) + + +class MultiTaskElasticNet(Lasso): + """Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer. + + The optimization objective for MultiTaskElasticNet is:: + + (1 / (2 * n_samples)) * ||Y - XW||_Fro^2 + + alpha * l1_ratio * ||W||_21 + + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + + Where:: + + ||W||_21 = sum_i sqrt(sum_j W_ij ^ 2) + + i.e. the sum of norms of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the L1/L2 term. Defaults to 1.0. + + l1_ratio : float, default=0.5 + The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. + For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it + is an L2 penalty. + For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). If a 1D y is + passed in at fit (non multi-task usage), ``coef_`` is then a 1D array. + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + dual_gap_ : float + The dual gaps at the end of the optimization. + + eps_ : float + The tolerance scaled scaled by the variance of the target `y`. + + sparse_coef_ : sparse matrix of shape (n_features,) or \ + (n_targets, n_features) + Sparse representation of the `coef_`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in + cross-validation. + ElasticNet : Linear regression with combined L1 and L2 priors as regularizer. + MultiTaskLasso : Multi-task Lasso model trained with L1/L2 + mixed-norm as regularizer. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + To avoid unnecessary memory duplication the X and y arguments of the fit + method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1) + >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) + MultiTaskElasticNet(alpha=0.1) + >>> print(clf.coef_) + [[0.45663524 0.45612256] + [0.45663524 0.45612256]] + >>> print(clf.intercept_) + [0.0872422 0.0872422] + """ + + _parameter_constraints: dict = { + **ElasticNet._parameter_constraints, + } + for param in ("precompute", "positive"): + _parameter_constraints.pop(param) + + def __init__( + self, + alpha=1.0, + *, + l1_ratio=0.5, + fit_intercept=True, + copy_X=True, + max_iter=1000, + tol=1e-4, + warm_start=False, + random_state=None, + selection="cyclic", + ): + self.l1_ratio = l1_ratio + self.alpha = alpha + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.copy_X = copy_X + self.tol = tol + self.warm_start = warm_start + self.random_state = random_state + self.selection = selection + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit MultiTaskElasticNet model with coordinate descent. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data. + y : ndarray of shape (n_samples, n_targets) + Target. Will be cast to X's dtype if necessary. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + Coordinate descent is an algorithm that considers each column of + data at a time hence it will automatically convert the X input + as a Fortran-contiguous numpy array if necessary. + + To avoid memory re-allocation it is advised to allocate the + initial data in memory directly using that format. + """ + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be csr. + check_X_params = dict( + dtype=[np.float64, np.float32], + order="F", + copy=self.copy_X and self.fit_intercept, + ) + check_y_params = dict(ensure_2d=False, order="F") + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + check_consistent_length(X, y) + y = y.astype(X.dtype) + + if hasattr(self, "l1_ratio"): + model_str = "ElasticNet" + else: + model_str = "Lasso" + if y.ndim == 1: + raise ValueError("For mono-task outputs, use %s" % model_str) + + n_samples, n_features = X.shape + n_targets = y.shape[1] + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=False + ) + + if not self.warm_start or not hasattr(self, "coef_"): + self.coef_ = np.zeros( + (n_targets, n_features), dtype=X.dtype.type, order="F" + ) + + l1_reg = self.alpha * self.l1_ratio * n_samples + l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples + + self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory + + random = self.selection == "random" + + ( + self.coef_, + self.dual_gap_, + self.eps_, + self.n_iter_, + ) = cd_fast.enet_coordinate_descent_multi_task( + self.coef_, + l1_reg, + l2_reg, + X, + y, + self.max_iter, + self.tol, + check_random_state(self.random_state), + random, + ) + + # account for different objective scaling here and in cd_fast + self.dual_gap_ /= n_samples + + self._set_intercept(X_offset, y_offset, X_scale) + + # return self for chaining fit and predict calls + return self + + def _more_tags(self): + return {"multioutput_only": True} + + +class MultiTaskLasso(MultiTaskElasticNet): + """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the L1/L2 term. Defaults to 1.0. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + dual_gap_ : ndarray of shape (n_alphas,) + The dual gaps at the end of the optimization for each alpha. + + eps_ : float + The tolerance scaled scaled by the variance of the target `y`. + + sparse_coef_ : sparse matrix of shape (n_features,) or \ + (n_targets, n_features) + Sparse representation of the `coef_`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Lasso: Linear Model trained with L1 prior as regularizer (aka the Lasso). + MultiTaskLassoCV: Multi-task L1 regularized linear model with built-in + cross-validation. + MultiTaskElasticNetCV: Multi-task L1/L2 ElasticNet with built-in cross-validation. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + To avoid unnecessary memory duplication the X and y arguments of the fit + method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.MultiTaskLasso(alpha=0.1) + >>> clf.fit([[0, 1], [1, 2], [2, 4]], [[0, 0], [1, 1], [2, 3]]) + MultiTaskLasso(alpha=0.1) + >>> print(clf.coef_) + [[0. 0.60809415] + [0. 0.94592424]] + >>> print(clf.intercept_) + [-0.41888636 -0.87382323] + """ + + _parameter_constraints: dict = { + **MultiTaskElasticNet._parameter_constraints, + } + _parameter_constraints.pop("l1_ratio") + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=1000, + tol=1e-4, + warm_start=False, + random_state=None, + selection="cyclic", + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.copy_X = copy_X + self.tol = tol + self.warm_start = warm_start + self.l1_ratio = 1.0 + self.random_state = random_state + self.selection = selection + + +class MultiTaskElasticNetCV(RegressorMixin, LinearModelCV): + """Multi-task L1/L2 ElasticNet with built-in cross-validation. + + See glossary entry for :term:`cross-validation estimator`. + + The optimization objective for MultiTaskElasticNet is:: + + (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + + alpha * l1_ratio * ||W||_21 + + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.15 + + Parameters + ---------- + l1_ratio : float or list of float, default=0.5 + The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. + For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it + is an L2 penalty. + For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. + This parameter can be a list, in which case the different + values are tested by cross-validation and the one giving the best + prediction score is used. Note that a good choice of list of + values for l1_ratio is often to put more values close to 1 + (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, + .9, .95, .99, 1]``. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If not provided, set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + verbose : bool or int, default=0 + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. Note that this is + used only if multiple values for l1_ratio are given. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + alpha_ : float + The amount of penalization chosen by cross validation. + + mse_path_ : ndarray of shape (n_alphas, n_folds) or \ + (n_l1_ratio, n_alphas, n_folds) + Mean square error for the test set on each fold, varying alpha. + + alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas) + The grid of alphas used for fitting, for each l1_ratio. + + l1_ratio_ : float + Best l1_ratio obtained by cross-validation. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + dual_gap_ : float + The dual gap at the end of the optimization for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MultiTaskElasticNet : Multi-task L1/L2 ElasticNet with built-in cross-validation. + ElasticNetCV : Elastic net model with best model selection by + cross-validation. + MultiTaskLassoCV : Multi-task Lasso model trained with L1 norm + as regularizer and built-in cross-validation. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + In `fit`, once the best parameters `l1_ratio` and `alpha` are found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` and `y` arguments of the + `fit` method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.MultiTaskElasticNetCV(cv=3) + >>> clf.fit([[0,0], [1, 1], [2, 2]], + ... [[0, 0], [1, 1], [2, 2]]) + MultiTaskElasticNetCV(cv=3) + >>> print(clf.coef_) + [[0.52875032 0.46958558] + [0.52875032 0.46958558]] + >>> print(clf.intercept_) + [0.00166409 0.00166409] + """ + + _parameter_constraints: dict = { + **LinearModelCV._parameter_constraints, + "l1_ratio": [Interval(Real, 0, 1, closed="both"), "array-like"], + } + _parameter_constraints.pop("precompute") + _parameter_constraints.pop("positive") + + path = staticmethod(enet_path) + + def __init__( + self, + *, + l1_ratio=0.5, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + max_iter=1000, + tol=1e-4, + cv=None, + copy_X=True, + verbose=0, + n_jobs=None, + random_state=None, + selection="cyclic", + ): + self.l1_ratio = l1_ratio + self.eps = eps + self.n_alphas = n_alphas + self.alphas = alphas + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.tol = tol + self.cv = cv + self.copy_X = copy_X + self.verbose = verbose + self.n_jobs = n_jobs + self.random_state = random_state + self.selection = selection + + def _get_estimator(self): + return MultiTaskElasticNet() + + def _is_multitask(self): + return True + + def _more_tags(self): + return {"multioutput_only": True} + + # This is necessary as LinearModelCV now supports sample_weight while + # MultiTaskElasticNet does not (yet). + def fit(self, X, y, **params): + """Fit MultiTaskElasticNet model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. + y : ndarray of shape (n_samples, n_targets) + Training target variable. Will be cast to X's dtype if necessary. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns MultiTaskElasticNet instance. + """ + return super().fit(X, y, **params) + + +class MultiTaskLassoCV(RegressorMixin, LinearModelCV): + """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer. + + See glossary entry for :term:`cross-validation estimator`. + + The optimization objective for MultiTaskLasso is:: + + (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.15 + + Parameters + ---------- + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If not provided, set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : bool or int, default=False + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. Note that this is + used only if multiple values for l1_ratio are given. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + alpha_ : float + The amount of penalization chosen by cross validation. + + mse_path_ : ndarray of shape (n_alphas, n_folds) + Mean square error for the test set on each fold, varying alpha. + + alphas_ : ndarray of shape (n_alphas,) + The grid of alphas used for fitting. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + dual_gap_ : float + The dual gap at the end of the optimization for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MultiTaskElasticNet : Multi-task ElasticNet model trained with L1/L2 + mixed-norm as regularizer. + ElasticNetCV : Elastic net model with best model selection by + cross-validation. + MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in + cross-validation. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` and `y` arguments of the + `fit` method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn.linear_model import MultiTaskLassoCV + >>> from sklearn.datasets import make_regression + >>> from sklearn.metrics import r2_score + >>> X, y = make_regression(n_targets=2, noise=4, random_state=0) + >>> reg = MultiTaskLassoCV(cv=5, random_state=0).fit(X, y) + >>> r2_score(y, reg.predict(X)) + 0.9994... + >>> reg.alpha_ + 0.5713... + >>> reg.predict(X[:1,]) + array([[153.7971..., 94.9015...]]) + """ + + _parameter_constraints: dict = { + **LinearModelCV._parameter_constraints, + } + _parameter_constraints.pop("precompute") + _parameter_constraints.pop("positive") + + path = staticmethod(lasso_path) + + def __init__( + self, + *, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + max_iter=1000, + tol=1e-4, + copy_X=True, + cv=None, + verbose=False, + n_jobs=None, + random_state=None, + selection="cyclic", + ): + super().__init__( + eps=eps, + n_alphas=n_alphas, + alphas=alphas, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + copy_X=copy_X, + cv=cv, + verbose=verbose, + n_jobs=n_jobs, + random_state=random_state, + selection=selection, + ) + + def _get_estimator(self): + return MultiTaskLasso() + + def _is_multitask(self): + return True + + def _more_tags(self): + return {"multioutput_only": True} + + # This is necessary as LinearModelCV now supports sample_weight while + # MultiTaskElasticNet does not (yet). + def fit(self, X, y, **params): + """Fit MultiTaskLasso model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data. + y : ndarray of shape (n_samples, n_targets) + Target. Will be cast to X's dtype if necessary. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of fitted model. + """ + return super().fit(X, y, **params) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b82bbd77bcf9a16040ac2cebb3f655811bbff84 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py @@ -0,0 +1,15 @@ +# License: BSD 3 clause + +from .glm import ( + GammaRegressor, + PoissonRegressor, + TweedieRegressor, + _GeneralizedLinearRegressor, +) + +__all__ = [ + "_GeneralizedLinearRegressor", + "PoissonRegressor", + "GammaRegressor", + "TweedieRegressor", +] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0ec599d2d517a9e79b3bed6c26f38d5a931fdc5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cd54a4e93e4f8d044d3904001b62abce5a4f764 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae723fd7e550590a4e3da819c683b80b5e36d008 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9b431fd2377dba50a6fabd703ae7c0334033e9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py @@ -0,0 +1,525 @@ +""" +Newton solver for Generalized Linear Models +""" + +# Author: Christian Lorentzen +# License: BSD 3 clause + +import warnings +from abc import ABC, abstractmethod + +import numpy as np +import scipy.linalg +import scipy.optimize + +from ..._loss.loss import HalfSquaredError +from ...exceptions import ConvergenceWarning +from ...utils.optimize import _check_optimize_result +from .._linear_loss import LinearModelLoss + + +class NewtonSolver(ABC): + """Newton solver for GLMs. + + This class implements Newton/2nd-order optimization routines for GLMs. Each Newton + iteration aims at finding the Newton step which is done by the inner solver. With + Hessian H, gradient g and coefficients coef, one step solves: + + H @ coef_newton = -g + + For our GLM / LinearModelLoss, we have gradient g and Hessian H: + + g = X.T @ loss.gradient + l2_reg_strength * coef + H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity + + Backtracking line search updates coef = coef_old + t * coef_newton for some t in + (0, 1]. + + This is a base class, actual implementations (child classes) may deviate from the + above pattern and use structure specific tricks. + + Usage pattern: + - initialize solver: sol = NewtonSolver(...) + - solve the problem: sol.solve(X, y, sample_weight) + + References + ---------- + - Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization" + 2nd edition + https://doi.org/10.1007/978-0-387-40065-5 + + - Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization." + Cambridge University Press, 2004. + https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Initial coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + + linear_loss : LinearModelLoss + The loss to be minimized. + + l2_reg_strength : float, default=0.0 + L2 regularization strength. + + tol : float, default=1e-4 + The optimization problem is solved when each of the following condition is + fulfilled: + 1. maximum |gradient| <= tol + 2. Newton decrement d: 1/2 * d^2 <= tol + + max_iter : int, default=100 + Maximum number of Newton steps allowed. + + n_threads : int, default=1 + Number of OpenMP threads to use for the computation of the Hessian and gradient + of the loss function. + + Attributes + ---------- + coef_old : ndarray of shape coef.shape + Coefficient of previous iteration. + + coef_newton : ndarray of shape coef.shape + Newton step. + + gradient : ndarray of shape coef.shape + Gradient of the loss w.r.t. the coefficients. + + gradient_old : ndarray of shape coef.shape + Gradient of previous iteration. + + loss_value : float + Value of objective function = loss + penalty. + + loss_value_old : float + Value of objective function of previous itertion. + + raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes) + + converged : bool + Indicator for convergence of the solver. + + iteration : int + Number of Newton steps, i.e. calls to inner_solve + + use_fallback_lbfgs_solve : bool + If set to True, the solver will resort to call LBFGS to finish the optimisation + procedure in case of convergence issues. + + gradient_times_newton : float + gradient @ coef_newton, set in inner_solve and used by line_search. If the + Newton step is a descent direction, this is negative. + """ + + def __init__( + self, + *, + coef, + linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True), + l2_reg_strength=0.0, + tol=1e-4, + max_iter=100, + n_threads=1, + verbose=0, + ): + self.coef = coef + self.linear_loss = linear_loss + self.l2_reg_strength = l2_reg_strength + self.tol = tol + self.max_iter = max_iter + self.n_threads = n_threads + self.verbose = verbose + + def setup(self, X, y, sample_weight): + """Precomputations + + If None, initializes: + - self.coef + Sets: + - self.raw_prediction + - self.loss_value + """ + _, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X) + self.loss_value = self.linear_loss.loss( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + raw_prediction=self.raw_prediction, + ) + + @abstractmethod + def update_gradient_hessian(self, X, y, sample_weight): + """Update gradient and Hessian.""" + + @abstractmethod + def inner_solve(self, X, y, sample_weight): + """Compute Newton step. + + Sets: + - self.coef_newton + - self.gradient_times_newton + """ + + def fallback_lbfgs_solve(self, X, y, sample_weight): + """Fallback solver in case of emergency. + + If a solver detects convergence problems, it may fall back to this methods in + the hope to exit with success instead of raising an error. + + Sets: + - self.coef + - self.converged + """ + opt_res = scipy.optimize.minimize( + self.linear_loss.loss_gradient, + self.coef, + method="L-BFGS-B", + jac=True, + options={ + "maxiter": self.max_iter, + "maxls": 50, # default is 20 + "iprint": self.verbose - 1, + "gtol": self.tol, + "ftol": 64 * np.finfo(np.float64).eps, + }, + args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads), + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res) + self.coef = opt_res.x + self.converged = opt_res.status == 0 + + def line_search(self, X, y, sample_weight): + """Backtracking line search. + + Sets: + - self.coef_old + - self.coef + - self.loss_value_old + - self.loss_value + - self.gradient_old + - self.gradient + - self.raw_prediction + """ + # line search parameters + beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11 + eps = 16 * np.finfo(self.loss_value.dtype).eps + t = 1 # step size + + # gradient_times_newton = self.gradient @ self.coef_newton + # was computed in inner_solve. + armijo_term = sigma * self.gradient_times_newton + _, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw( + self.coef_newton, X + ) + + self.coef_old = self.coef + self.loss_value_old = self.loss_value + self.gradient_old = self.gradient + + # np.sum(np.abs(self.gradient_old)) + sum_abs_grad_old = -1 + + is_verbose = self.verbose >= 2 + if is_verbose: + print(" Backtracking Line Search") + print(f" eps=10 * finfo.eps={eps}") + + for i in range(21): # until and including t = beta**20 ~ 1e-6 + self.coef = self.coef_old + t * self.coef_newton + raw = self.raw_prediction + t * raw_prediction_newton + self.loss_value, self.gradient = self.linear_loss.loss_gradient( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + raw_prediction=raw, + ) + # Note: If coef_newton is too large, loss_gradient may produce inf values, + # potentially accompanied by a RuntimeWarning. + # This case will be captured by the Armijo condition. + + # 1. Check Armijo / sufficient decrease condition. + # The smaller (more negative) the better. + loss_improvement = self.loss_value - self.loss_value_old + check = loss_improvement <= t * armijo_term + if is_verbose: + print( + f" line search iteration={i+1}, step size={t}\n" + f" check loss improvement <= armijo term: {loss_improvement} " + f"<= {t * armijo_term} {check}" + ) + if check: + break + # 2. Deal with relative loss differences around machine precision. + tiny_loss = np.abs(self.loss_value_old * eps) + check = np.abs(loss_improvement) <= tiny_loss + if is_verbose: + print( + " check loss |improvement| <= eps * |loss_old|:" + f" {np.abs(loss_improvement)} <= {tiny_loss} {check}" + ) + if check: + if sum_abs_grad_old < 0: + sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1) + # 2.1 Check sum of absolute gradients as alternative condition. + sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1) + check = sum_abs_grad < sum_abs_grad_old + if is_verbose: + print( + " check sum(|gradient|) < sum(|gradient_old|): " + f"{sum_abs_grad} < {sum_abs_grad_old} {check}" + ) + if check: + break + + t *= beta + else: + warnings.warn( + ( + f"Line search of Newton solver {self.__class__.__name__} at" + f" iteration #{self.iteration} did no converge after 21 line search" + " refinement iterations. It will now resort to lbfgs instead." + ), + ConvergenceWarning, + ) + if self.verbose: + print(" Line search did not converge and resorts to lbfgs instead.") + self.use_fallback_lbfgs_solve = True + return + + self.raw_prediction = raw + + def check_convergence(self, X, y, sample_weight): + """Check for convergence. + + Sets self.converged. + """ + if self.verbose: + print(" Check Convergence") + # Note: Checking maximum relative change of coefficient <= tol is a bad + # convergence criterion because even a large step could have brought us close + # to the true minimum. + # coef_step = self.coef - self.coef_old + # check = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old))) + + # 1. Criterion: maximum |gradient| <= tol + # The gradient was already updated in line_search() + check = np.max(np.abs(self.gradient)) + if self.verbose: + print(f" 1. max |gradient| {check} <= {self.tol}") + if check > self.tol: + return + + # 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol + # d = sqrt(grad @ hessian^-1 @ grad) + # = sqrt(coef_newton @ hessian @ coef_newton) + # See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1. + d2 = self.coef_newton @ self.hessian @ self.coef_newton + if self.verbose: + print(f" 2. Newton decrement {0.5 * d2} <= {self.tol}") + if 0.5 * d2 > self.tol: + return + + if self.verbose: + loss_value = self.linear_loss.loss( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + ) + print(f" Solver did converge at loss = {loss_value}.") + self.converged = True + + def finalize(self, X, y, sample_weight): + """Finalize the solvers results. + + Some solvers may need this, others not. + """ + pass + + def solve(self, X, y, sample_weight): + """Solve the optimization problem. + + This is the main routine. + + Order of calls: + self.setup() + while iteration: + self.update_gradient_hessian() + self.inner_solve() + self.line_search() + self.check_convergence() + self.finalize() + + Returns + ------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Solution of the optimization problem. + """ + # setup usually: + # - initializes self.coef if needed + # - initializes and calculates self.raw_predictions, self.loss_value + self.setup(X=X, y=y, sample_weight=sample_weight) + + self.iteration = 1 + self.converged = False + self.use_fallback_lbfgs_solve = False + + while self.iteration <= self.max_iter and not self.converged: + if self.verbose: + print(f"Newton iter={self.iteration}") + + self.use_fallback_lbfgs_solve = False # Fallback solver. + + # 1. Update Hessian and gradient + self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight) + + # TODO: + # if iteration == 1: + # We might stop early, e.g. we already are close to the optimum, + # usually detected by zero gradients at this stage. + + # 2. Inner solver + # Calculate Newton step/direction + # This usually sets self.coef_newton and self.gradient_times_newton. + self.inner_solve(X=X, y=y, sample_weight=sample_weight) + if self.use_fallback_lbfgs_solve: + break + + # 3. Backtracking line search + # This usually sets self.coef_old, self.coef, self.loss_value_old + # self.loss_value, self.gradient_old, self.gradient, + # self.raw_prediction. + self.line_search(X=X, y=y, sample_weight=sample_weight) + if self.use_fallback_lbfgs_solve: + break + + # 4. Check convergence + # Sets self.converged. + self.check_convergence(X=X, y=y, sample_weight=sample_weight) + + # 5. Next iteration + self.iteration += 1 + + if not self.converged: + if self.use_fallback_lbfgs_solve: + # Note: The fallback solver circumvents check_convergence and relies on + # the convergence checks of lbfgs instead. Enough warnings have been + # raised on the way. + self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight) + else: + warnings.warn( + ( + f"Newton solver did not converge after {self.iteration - 1} " + "iterations." + ), + ConvergenceWarning, + ) + + self.iteration -= 1 + self.finalize(X=X, y=y, sample_weight=sample_weight) + return self.coef + + +class NewtonCholeskySolver(NewtonSolver): + """Cholesky based Newton solver. + + Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear + solver. + """ + + def setup(self, X, y, sample_weight): + super().setup(X=X, y=y, sample_weight=sample_weight) + n_dof = X.shape[1] + if self.linear_loss.fit_intercept: + n_dof += 1 + self.gradient = np.empty_like(self.coef) + self.hessian = np.empty_like(self.coef, shape=(n_dof, n_dof)) + + def update_gradient_hessian(self, X, y, sample_weight): + _, _, self.hessian_warning = self.linear_loss.gradient_hessian( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + gradient_out=self.gradient, + hessian_out=self.hessian, + raw_prediction=self.raw_prediction, # this was updated in line_search + ) + + def inner_solve(self, X, y, sample_weight): + if self.hessian_warning: + warnings.warn( + ( + f"The inner solver of {self.__class__.__name__} detected a " + "pointwise hessian with many negative values at iteration " + f"#{self.iteration}. It will now resort to lbfgs instead." + ), + ConvergenceWarning, + ) + if self.verbose: + print( + " The inner solver detected a pointwise Hessian with many " + "negative values and resorts to lbfgs instead." + ) + self.use_fallback_lbfgs_solve = True + return + + try: + with warnings.catch_warnings(): + warnings.simplefilter("error", scipy.linalg.LinAlgWarning) + self.coef_newton = scipy.linalg.solve( + self.hessian, -self.gradient, check_finite=False, assume_a="sym" + ) + self.gradient_times_newton = self.gradient @ self.coef_newton + if self.gradient_times_newton > 0: + if self.verbose: + print( + " The inner solver found a Newton step that is not a " + "descent direction and resorts to LBFGS steps instead." + ) + self.use_fallback_lbfgs_solve = True + return + except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e: + warnings.warn( + f"The inner solver of {self.__class__.__name__} stumbled upon a " + "singular or very ill-conditioned Hessian matrix at iteration " + f"#{self.iteration}. It will now resort to lbfgs instead.\n" + "Further options are to use another solver or to avoid such situation " + "in the first place. Possible remedies are removing collinear features" + " of X or increasing the penalization strengths.\n" + "The original Linear Algebra message was:\n" + + str(e), + scipy.linalg.LinAlgWarning, + ) + # Possible causes: + # 1. hess_pointwise is negative. But this is already taken care in + # LinearModelLoss.gradient_hessian. + # 2. X is singular or ill-conditioned + # This might be the most probable cause. + # + # There are many possible ways to deal with this situation. Most of them + # add, explicitly or implicitly, a matrix to the hessian to make it + # positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed. + # Instead, we resort to lbfgs. + if self.verbose: + print( + " The inner solver stumbled upon an singular or ill-conditioned " + "Hessian matrix and resorts to LBFGS instead." + ) + self.use_fallback_lbfgs_solve = True + return diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py new file mode 100644 index 0000000000000000000000000000000000000000..4cac889a4da518e3116c3243be5d3701c34d1b68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py @@ -0,0 +1,904 @@ +""" +Generalized Linear Models with Exponential Dispersion Family +""" + +# Author: Christian Lorentzen +# some parts and tricks stolen from other sklearn files. +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np +import scipy.optimize + +from ..._loss.loss import ( + HalfGammaLoss, + HalfPoissonLoss, + HalfSquaredError, + HalfTweedieLoss, + HalfTweedieLossIdentity, +) +from ...base import BaseEstimator, RegressorMixin, _fit_context +from ...utils import check_array +from ...utils._openmp_helpers import _openmp_effective_n_threads +from ...utils._param_validation import Hidden, Interval, StrOptions +from ...utils.optimize import _check_optimize_result +from ...utils.validation import _check_sample_weight, check_is_fitted +from .._linear_loss import LinearModelLoss +from ._newton_solver import NewtonCholeskySolver, NewtonSolver + + +class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator): + """Regression via a penalized Generalized Linear Model (GLM). + + GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at fitting and + predicting the mean of the target y as y_pred=h(X*w) with coefficients w. + Therefore, the fit minimizes the following objective function with L2 priors as + regularizer:: + + 1/(2*sum(s_i)) * sum(s_i * deviance(y_i, h(x_i*w)) + 1/2 * alpha * ||w||_2^2 + + with inverse link function h, s=sample_weight and per observation (unit) deviance + deviance(y_i, h(x_i*w)). Note that for an EDM, 1/2 * deviance is the negative + log-likelihood up to a constant (in w) term. + The parameter ``alpha`` corresponds to the lambda parameter in glmnet. + + Instead of implementing the EDM family and a link function separately, we directly + use the loss functions `from sklearn._loss` which have the link functions included + in them for performance reasons. We pick the loss functions that implement + (1/2 times) EDM deviances. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the penalty term and thus determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (X @ coef + intercept). + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_``. + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_iter_ : int + Actual number of iterations used in the solver. + + _base_loss : BaseLoss, default=HalfSquaredError() + This is set during fit via `self._get_loss()`. + A `_base_loss` contains a specific loss function as well as the link + function. The loss to be minimized specifies the distributional assumption of + the GLM, i.e. the distribution from the EDM. Here are some examples: + + ======================= ======== ========================== + _base_loss Link Target Domain + ======================= ======== ========================== + HalfSquaredError identity y any real number + HalfPoissonLoss log 0 <= y + HalfGammaLoss log 0 < y + HalfTweedieLoss log dependent on tweedie power + HalfTweedieLossIdentity identity dependent on tweedie power + ======================= ======== ========================== + + The link function of the GLM, i.e. mapping from linear predictor + `X @ coeff + intercept` to prediction `y_pred`. For instance, with a log link, + we have `y_pred = exp(X @ coeff + intercept)`. + """ + + # We allow for NewtonSolver classes for the "solver" parameter but do not + # make them public in the docstrings. This facilitates testing and + # benchmarking. + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0.0, None, closed="left")], + "fit_intercept": ["boolean"], + "solver": [ + StrOptions({"lbfgs", "newton-cholesky"}), + Hidden(type), + ], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="neither")], + "warm_start": ["boolean"], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.solver = solver + self.max_iter = max_iter + self.tol = tol + self.warm_start = warm_start + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit a Generalized Linear Model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Fitted model. + """ + X, y = self._validate_data( + X, + y, + accept_sparse=["csc", "csr"], + dtype=[np.float64, np.float32], + y_numeric=True, + multi_output=False, + ) + + # required by losses + if self.solver == "lbfgs": + # lbfgs will force coef and therefore raw_prediction to be float64. The + # base_loss needs y, X @ coef and sample_weight all of same dtype + # (and contiguous). + loss_dtype = np.float64 + else: + loss_dtype = min(max(y.dtype, X.dtype), np.float64) + y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False) + + if sample_weight is not None: + # Note that _check_sample_weight calls check_array(order="C") required by + # losses. + sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype) + + n_samples, n_features = X.shape + self._base_loss = self._get_loss() + + linear_loss = LinearModelLoss( + base_loss=self._base_loss, + fit_intercept=self.fit_intercept, + ) + + if not linear_loss.base_loss.in_y_true_range(y): + raise ValueError( + "Some value(s) of y are out of the valid range of the loss" + f" {self._base_loss.__class__.__name__!r}." + ) + + # TODO: if alpha=0 check that X is not rank deficient + + # NOTE: Rescaling of sample_weight: + # We want to minimize + # obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance) + # + 1/2 * alpha * L2, + # with + # deviance = 2 * loss. + # The objective is invariant to multiplying sample_weight by a constant. We + # could choose this constant such that sum(sample_weight) = 1 in order to end + # up with + # obj = sum(sample_weight * loss) + 1/2 * alpha * L2. + # But LinearModelLoss.loss() already computes + # average(loss, weights=sample_weight) + # Thus, without rescaling, we have + # obj = LinearModelLoss.loss(...) + + if self.warm_start and hasattr(self, "coef_"): + if self.fit_intercept: + # LinearModelLoss needs intercept at the end of coefficient array. + coef = np.concatenate((self.coef_, np.array([self.intercept_]))) + else: + coef = self.coef_ + coef = coef.astype(loss_dtype, copy=False) + else: + coef = linear_loss.init_zero_coef(X, dtype=loss_dtype) + if self.fit_intercept: + coef[-1] = linear_loss.base_loss.link.link( + np.average(y, weights=sample_weight) + ) + + l2_reg_strength = self.alpha + n_threads = _openmp_effective_n_threads() + + # Algorithms for optimization: + # Note again that our losses implement 1/2 * deviance. + if self.solver == "lbfgs": + func = linear_loss.loss_gradient + + opt_res = scipy.optimize.minimize( + func, + coef, + method="L-BFGS-B", + jac=True, + options={ + "maxiter": self.max_iter, + "maxls": 50, # default is 20 + "iprint": self.verbose - 1, + "gtol": self.tol, + # The constant 64 was found empirically to pass the test suite. + # The point is that ftol is very small, but a bit larger than + # machine precision for float64, which is the dtype used by lbfgs. + "ftol": 64 * np.finfo(float).eps, + }, + args=(X, y, sample_weight, l2_reg_strength, n_threads), + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res) + coef = opt_res.x + elif self.solver == "newton-cholesky": + sol = NewtonCholeskySolver( + coef=coef, + linear_loss=linear_loss, + l2_reg_strength=l2_reg_strength, + tol=self.tol, + max_iter=self.max_iter, + n_threads=n_threads, + verbose=self.verbose, + ) + coef = sol.solve(X, y, sample_weight) + self.n_iter_ = sol.iteration + elif issubclass(self.solver, NewtonSolver): + sol = self.solver( + coef=coef, + linear_loss=linear_loss, + l2_reg_strength=l2_reg_strength, + tol=self.tol, + max_iter=self.max_iter, + n_threads=n_threads, + ) + coef = sol.solve(X, y, sample_weight) + self.n_iter_ = sol.iteration + else: + raise ValueError(f"Invalid solver={self.solver}.") + + if self.fit_intercept: + self.intercept_ = coef[-1] + self.coef_ = coef[:-1] + else: + # set intercept to zero as the other linear models do + self.intercept_ = 0.0 + self.coef_ = coef + + return self + + def _linear_predictor(self, X): + """Compute the linear_predictor = `X @ coef_ + intercept_`. + + Note that we often use the term raw_prediction instead of linear predictor. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + Returns + ------- + y_pred : array of shape (n_samples,) + Returns predicted values of linear predictor. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=["csr", "csc", "coo"], + dtype=[np.float64, np.float32], + ensure_2d=True, + allow_nd=False, + reset=False, + ) + return X @ self.coef_ + self.intercept_ + + def predict(self, X): + """Predict using GLM with feature matrix X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + Returns + ------- + y_pred : array of shape (n_samples,) + Returns predicted values. + """ + # check_array is done in _linear_predictor + raw_prediction = self._linear_predictor(X) + y_pred = self._base_loss.link.inverse(raw_prediction) + return y_pred + + def score(self, X, y, sample_weight=None): + """Compute D^2, the percentage of deviance explained. + + D^2 is a generalization of the coefficient of determination R^2. + R^2 uses squared error and D^2 uses the deviance of this GLM, see the + :ref:`User Guide `. + + D^2 is defined as + :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`, + :math:`D_{null}` is the null deviance, i.e. the deviance of a model + with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`. + The mean :math:`\\bar{y}` is averaged by sample_weight. + Best possible score is 1.0 and it can be negative (because the model + can be arbitrarily worse). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) + True values of target. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + D^2 of self.predict(X) w.r.t. y. + """ + # TODO: Adapt link to User Guide in the docstring, once + # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. + # + # Note, default score defined in RegressorMixin is R^2 score. + # TODO: make D^2 a score function in module metrics (and thereby get + # input validation and so on) + raw_prediction = self._linear_predictor(X) # validates X + # required by losses + y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False) + + if sample_weight is not None: + # Note that _check_sample_weight calls check_array(order="C") required by + # losses. + sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) + + base_loss = self._base_loss + + if not base_loss.in_y_true_range(y): + raise ValueError( + "Some value(s) of y are out of the valid range of the loss" + f" {base_loss.__name__}." + ) + + constant = np.average( + base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None), + weights=sample_weight, + ) + + # Missing factor of 2 in deviance cancels out. + deviance = base_loss( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=1, + ) + y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) + deviance_null = base_loss( + y_true=y, + raw_prediction=np.tile(y_mean, y.shape[0]), + sample_weight=sample_weight, + n_threads=1, + ) + return 1 - (deviance + constant) / (deviance_null + constant) + + def _more_tags(self): + try: + # Create instance of BaseLoss if fit wasn't called yet. This is necessary as + # TweedieRegressor might set the used loss during fit different from + # self._base_loss. + base_loss = self._get_loss() + return {"requires_positive_y": not base_loss.in_y_true_range(-1.0)} + except (ValueError, AttributeError, TypeError): + # This happens when the link or power parameter of TweedieRegressor is + # invalid. We fallback on the default tags in that case. + return {} + + def _get_loss(self): + """This is only necessary because of the link and power arguments of the + TweedieRegressor. + + Note that we do not need to pass sample_weight to the loss class as this is + only needed to set loss.constant_hessian on which GLMs do not rely. + """ + return HalfSquaredError() + + +class PoissonRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Poisson distribution. + + This regressor uses the 'log' link function. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (`X @ coef + intercept`). + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_`` . + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Actual number of iterations used in the solver. + + See Also + -------- + TweedieRegressor : Generalized Linear Model with a Tweedie distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.PoissonRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [12, 17, 22, 21] + >>> clf.fit(X, y) + PoissonRegressor() + >>> clf.score(X, y) + 0.990... + >>> clf.coef_ + array([0.121..., 0.158...]) + >>> clf.intercept_ + 2.088... + >>> clf.predict([[1, 1], [3, 4]]) + array([10.676..., 21.875...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + + def _get_loss(self): + return HalfPoissonLoss() + + +class GammaRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Gamma distribution. + + This regressor uses the 'log' link function. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor `X @ coef_ + intercept_`. + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for `coef_` and `intercept_`. + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + n_iter_ : int + Actual number of iterations used in the solver. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PoissonRegressor : Generalized Linear Model with a Poisson distribution. + TweedieRegressor : Generalized Linear Model with a Tweedie distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.GammaRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [19, 26, 33, 30] + >>> clf.fit(X, y) + GammaRegressor() + >>> clf.score(X, y) + 0.773... + >>> clf.coef_ + array([0.072..., 0.066...]) + >>> clf.intercept_ + 2.896... + >>> clf.predict([[1, 0], [2, 8]]) + array([19.483..., 35.795...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + + def _get_loss(self): + return HalfGammaLoss() + + +class TweedieRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Tweedie distribution. + + This estimator can be used to model different GLMs depending on the + ``power`` parameter, which determines the underlying distribution. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + power : float, default=0 + The power determines the underlying target distribution according + to the following table: + + +-------+------------------------+ + | Power | Distribution | + +=======+========================+ + | 0 | Normal | + +-------+------------------------+ + | 1 | Poisson | + +-------+------------------------+ + | (1,2) | Compound Poisson Gamma | + +-------+------------------------+ + | 2 | Gamma | + +-------+------------------------+ + | 3 | Inverse Gaussian | + +-------+------------------------+ + + For ``0 < power < 1``, no distribution exists. + + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (`X @ coef + intercept`). + + link : {'auto', 'identity', 'log'}, default='auto' + The link function of the GLM, i.e. mapping from linear predictor + `X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets + the link depending on the chosen `power` parameter as follows: + + - 'identity' for ``power <= 0``, e.g. for the Normal distribution + - 'log' for ``power > 0``, e.g. for Poisson, Gamma and Inverse Gaussian + distributions + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_`` . + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_iter_ : int + Actual number of iterations used in the solver. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PoissonRegressor : Generalized Linear Model with a Poisson distribution. + GammaRegressor : Generalized Linear Model with a Gamma distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.TweedieRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [2, 3.5, 5, 5.5] + >>> clf.fit(X, y) + TweedieRegressor() + >>> clf.score(X, y) + 0.839... + >>> clf.coef_ + array([0.599..., 0.299...]) + >>> clf.intercept_ + 1.600... + >>> clf.predict([[1, 1], [3, 4]]) + array([2.500..., 4.599...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints, + "power": [Interval(Real, None, None, closed="neither")], + "link": [StrOptions({"auto", "identity", "log"})], + } + + def __init__( + self, + *, + power=0.0, + alpha=1.0, + fit_intercept=True, + link="auto", + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + self.link = link + self.power = power + + def _get_loss(self): + if self.link == "auto": + if self.power <= 0: + # identity link + return HalfTweedieLossIdentity(power=self.power) + else: + # log link + return HalfTweedieLoss(power=self.power) + + if self.link == "log": + return HalfTweedieLoss(power=self.power) + + if self.link == "identity": + return HalfTweedieLossIdentity(power=self.power) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..588cf7e93eef01b82eaf24c87c36df22ea21dade --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py @@ -0,0 +1 @@ +# License: BSD 3 clause diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d253cb3dd2367b82b6f25d71bfc6d3ec01185afa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b267a6a046cf4f141fa76b4b61a912a420709264 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py new file mode 100644 index 0000000000000000000000000000000000000000..5256a5f37027294bf0e3545d5a42bd77715e4177 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py @@ -0,0 +1,1112 @@ +# Authors: Christian Lorentzen +# +# License: BSD 3 clause + +import itertools +import warnings +from functools import partial + +import numpy as np +import pytest +import scipy +from numpy.testing import assert_allclose +from scipy import linalg +from scipy.optimize import minimize, root + +from sklearn._loss import HalfBinomialLoss, HalfPoissonLoss, HalfTweedieLoss +from sklearn._loss.link import IdentityLink, LogLink +from sklearn.base import clone +from sklearn.datasets import make_low_rank_matrix, make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + GammaRegressor, + PoissonRegressor, + Ridge, + TweedieRegressor, +) +from sklearn.linear_model._glm import _GeneralizedLinearRegressor +from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver +from sklearn.linear_model._linear_loss import LinearModelLoss +from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance +from sklearn.model_selection import train_test_split + +SOLVERS = ["lbfgs", "newton-cholesky"] + + +class BinomialRegressor(_GeneralizedLinearRegressor): + def _get_loss(self): + return HalfBinomialLoss() + + +def _special_minimize(fun, grad, x, tol_NM, tol): + # Find good starting point by Nelder-Mead + res_NM = minimize( + fun, x, method="Nelder-Mead", options={"xatol": tol_NM, "fatol": tol_NM} + ) + # Now refine via root finding on the gradient of the function, which is + # more precise than minimizing the function itself. + res = root( + grad, + res_NM.x, + method="lm", + options={"ftol": tol, "xtol": tol, "gtol": tol}, + ) + return res.x + + +@pytest.fixture(scope="module") +def regression_data(): + X, y = make_regression( + n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2 + ) + return X, y + + +@pytest.fixture( + params=itertools.product( + ["long", "wide"], + [ + BinomialRegressor(), + PoissonRegressor(), + GammaRegressor(), + # TweedieRegressor(power=3.0), # too difficult + # TweedieRegressor(power=0, link="log"), # too difficult + TweedieRegressor(power=1.5), + ], + ), + ids=lambda param: f"{param[0]}-{param[1]}", +) +def glm_dataset(global_random_seed, request): + """Dataset with GLM solutions, well conditioned X. + + This is inspired by ols_ridge_dataset in test_ridge.py. + + The construction is based on the SVD decomposition of X = U S V'. + + Parameters + ---------- + type : {"long", "wide"} + If "long", then n_samples > n_features. + If "wide", then n_features > n_samples. + model : a GLM model + + For "wide", we return the minimum norm solution: + + min ||w||_2 subject to w = argmin deviance(X, y, w) + + Note that the deviance is always minimized if y = inverse_link(X w) is possible to + achieve, which it is in the wide data case. Therefore, we can construct the + solution with minimum norm like (wide) OLS: + + min ||w||_2 subject to link(y) = raw_prediction = X w + + Returns + ------- + model : GLM model + X : ndarray + Last column of 1, i.e. intercept. + y : ndarray + coef_unpenalized : ndarray + Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in + case of ambiguity) + Last coefficient is intercept. + coef_penalized : ndarray + GLM solution with alpha=l2_reg_strength=1, i.e. + min 1/n * sum(loss) + ||w[:-1]||_2^2. + Last coefficient is intercept. + l2_reg_strength : float + Always equal 1. + """ + data_type, model = request.param + # Make larger dim more than double as big as the smaller one. + # This helps when constructing singular matrices like (X, X). + if data_type == "long": + n_samples, n_features = 12, 4 + else: + n_samples, n_features = 4, 12 + k = min(n_samples, n_features) + rng = np.random.RandomState(global_random_seed) + X = make_low_rank_matrix( + n_samples=n_samples, + n_features=n_features, + effective_rank=k, + tail_strength=0.1, + random_state=rng, + ) + X[:, -1] = 1 # last columns acts as intercept + U, s, Vt = linalg.svd(X, full_matrices=False) + assert np.all(s > 1e-3) # to be sure + assert np.max(s) / np.min(s) < 100 # condition number of X + + if data_type == "long": + coef_unpenalized = rng.uniform(low=1, high=3, size=n_features) + coef_unpenalized *= rng.choice([-1, 1], size=n_features) + raw_prediction = X @ coef_unpenalized + else: + raw_prediction = rng.uniform(low=-3, high=3, size=n_samples) + # minimum norm solution min ||w||_2 such that raw_prediction = X w: + # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction + coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction + + linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True) + sw = np.full(shape=n_samples, fill_value=1 / n_samples) + y = linear_loss.base_loss.link.inverse(raw_prediction) + + # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with + # optimizer. Note that the problem is well conditioned such that we get accurate + # results. + l2_reg_strength = 1 + fun = partial( + linear_loss.loss, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + grad = partial( + linear_loss.gradient, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + coef_penalized_with_intercept = _special_minimize( + fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14 + ) + + linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False) + fun = partial( + linear_loss.loss, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + grad = partial( + linear_loss.gradient, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + coef_penalized_without_intercept = _special_minimize( + fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14 + ) + + # To be sure + assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm( + coef_unpenalized + ) + + return ( + model, + X, + y, + coef_unpenalized, + coef_penalized_with_intercept, + coef_penalized_without_intercept, + l2_reg_strength, + ) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_glm_regression(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + + model.fit(X, y) + + rtol = 5e-5 if solver == "lbfgs" else 1e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + # Same with sample_weight. + model = ( + clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) + ) + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution on hstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2. + For long X, [X, X] is still a long but singular matrix. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + n_samples, n_features = X.shape + params = dict( + alpha=alpha / 2, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1) + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + + with warnings.catch_warnings(): + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.simplefilter("ignore", ConvergenceWarning) + model.fit(X, y) + + rtol = 2e-4 if solver == "lbfgs" else 5e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution on vstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X], [y] + [X], [y] with 1 * alpha. + It is the same alpha as the average loss stays the same. + For wide X, [X', X'] is a singular matrix. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + n_samples, n_features = X.shape + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + model.fit(X, y) + + rtol = 3e-5 if solver == "lbfgs" else 5e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + Note: This checks the minimum norm solution for wide X, i.e. + n_samples < n_features: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + + with warnings.catch_warnings(): + if solver.startswith("newton") and n_samples < n_features: + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails + # for the wide/fat case with n_features > n_samples. Most current GLM solvers do + # NOT return the minimum norm solution with fit_intercept=True. + if n_samples > n_features: + rtol = 5e-5 if solver == "lbfgs" else 1e-7 + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 5e-5 + if solver == "newton-cholesky": + rtol = 5e-4 + assert_allclose(model.predict(X), y, rtol=rtol) + + norm_solution = np.linalg.norm(np.r_[intercept, coef]) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + if solver == "newton-cholesky": + # XXX: This solver shows random behaviour. Sometimes it finds solutions + # with norm_model <= norm_solution! So we check conditionally. + if norm_model < (1 + 1e-12) * norm_solution: + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + elif solver == "lbfgs" and fit_intercept: + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + assert norm_model > (1 + 1e-12) * norm_solution + + # See https://github.com/scikit-learn/scikit-learn/issues/23670. + # Note: Even adding a tiny penalty does not give the minimal norm solution. + # XXX: We could have naively expected LBFGS to find the minimal norm + # solution by adding a very small penalty. Even that fails for a reason we + # do not properly understand at this point. + else: + # When `fit_intercept=False`, LBFGS naturally converges to the minimum norm + # solution on this problem. + # XXX: Do we have any theoretical guarantees why this should be the case? + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + GLM fit on [X] is the same as fit on [X, X]/2. + For long X, [X, X] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + intercept = coef[-1] + coef = coef[:-1] + if n_samples > n_features: + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + else: + # To know the minimum norm solution, we keep one intercept column and do + # not divide by 2. Later on, we must take special care. + X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] + else: + intercept = 0 + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + + with warnings.catch_warnings(): + if solver.startswith("newton"): + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + if fit_intercept and n_samples < n_features: + # Here we take special care. + model_intercept = 2 * model.intercept_ + model_coef = 2 * model.coef_[:-1] # exclude the other intercept term. + # For minimum norm solution, we would have + # assert model.intercept_ == pytest.approx(model.coef_[-1]) + else: + model_intercept = model.intercept_ + model_coef = model.coef_ + + if n_samples > n_features: + assert model_intercept == pytest.approx(intercept) + rtol = 1e-4 + assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 1e-6 if solver == "lbfgs" else 5e-6 + assert_allclose(model.predict(X), y, rtol=rtol) + if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky": + # Same as in test_glm_regression_unpenalized. + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + norm_solution = np.linalg.norm( + 0.5 * np.r_[intercept, intercept, coef, coef] + ) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + assert norm_model > (1 + 1e-12) * norm_solution + # For minimum norm solution, we would have + # assert model.intercept_ == pytest.approx(model.coef_[-1]) + else: + assert model_intercept == pytest.approx(intercept, rel=5e-6) + assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + GLM fit on [X] is the same as fit on [X], [y] + [X], [y]. + For wide X, [X', X'] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + + with warnings.catch_warnings(): + if solver.startswith("newton") and n_samples < n_features: + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + if n_samples > n_features: + rtol = 5e-5 if solver == "lbfgs" else 1e-6 + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 1e-6 if solver == "lbfgs" else 5e-6 + assert_allclose(model.predict(X), y, rtol=rtol) + + norm_solution = np.linalg.norm(np.r_[intercept, coef]) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + if solver == "newton-cholesky": + # XXX: This solver shows random behaviour. Sometimes it finds solutions + # with norm_model <= norm_solution! So we check conditionally. + if not (norm_model > (1 + 1e-12) * norm_solution): + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=1e-4) + elif solver == "lbfgs" and fit_intercept: + # Same as in test_glm_regression_unpenalized. + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + assert norm_model > (1 + 1e-12) * norm_solution + else: + rtol = 1e-5 if solver == "newton-cholesky" else 1e-4 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +def test_sample_weights_validation(): + """Test the raised errors in the validation of sample_weight.""" + # scalar value but not positive + X = [[1]] + y = [1] + weights = 0 + glm = _GeneralizedLinearRegressor() + + # Positive weights are accepted + glm.fit(X, y, sample_weight=1) + + # 2d array + weights = [[0]] + with pytest.raises(ValueError, match="must be 1D array or scalar"): + glm.fit(X, y, weights) + + # 1d but wrong length + weights = [1, 0] + msg = r"sample_weight.shape == \(2,\), expected \(1,\)!" + with pytest.raises(ValueError, match=msg): + glm.fit(X, y, weights) + + +@pytest.mark.parametrize( + "glm", + [ + TweedieRegressor(power=3), + PoissonRegressor(), + GammaRegressor(), + TweedieRegressor(power=1.5), + ], +) +def test_glm_wrong_y_range(glm): + y = np.array([-1, 2]) + X = np.array([[1], [1]]) + msg = r"Some value\(s\) of y are out of the valid range of the loss" + with pytest.raises(ValueError, match=msg): + glm.fit(X, y) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_glm_identity_regression(fit_intercept): + """Test GLM regression with identity link on a simple dataset.""" + coef = [1.0, 2.0] + X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T + y = np.dot(X, coef) + glm = _GeneralizedLinearRegressor( + alpha=0, + fit_intercept=fit_intercept, + tol=1e-12, + ) + if fit_intercept: + glm.fit(X[:, 1:], y) + assert_allclose(glm.coef_, coef[1:], rtol=1e-10) + assert_allclose(glm.intercept_, coef[0], rtol=1e-10) + else: + glm.fit(X, y) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("alpha", [0.0, 1.0]) +@pytest.mark.parametrize( + "GLMEstimator", [_GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor] +) +def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator): + """Test that the impact of sample_weight is consistent""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + glm_params = dict(alpha=alpha, fit_intercept=fit_intercept) + + glm = GLMEstimator(**glm_params).fit(X, y) + coef = glm.coef_.copy() + + # sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones(y.shape) + glm.fit(X, y, sample_weight=sample_weight) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + # sample_weight are normalized to 1 so, scaling them has no effect + sample_weight = 2 * np.ones(y.shape) + glm.fit(X, y, sample_weight=sample_weight) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + # setting one element of sample_weight to 0 is equivalent to removing + # the corresponding sample + sample_weight = np.ones(y.shape) + sample_weight[-1] = 0 + glm.fit(X, y, sample_weight=sample_weight) + coef1 = glm.coef_.copy() + glm.fit(X[:-1], y[:-1]) + assert_allclose(glm.coef_, coef1, rtol=1e-12) + + # check that multiplying sample_weight by 2 is equivalent + # to repeating corresponding samples twice + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = np.ones(len(y)) + sample_weight_1[: n_samples // 2] = 2 + + glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1) + + glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None) + assert_allclose(glm1.coef_, glm2.coef_) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize( + "estimator", + [ + PoissonRegressor(), + GammaRegressor(), + TweedieRegressor(power=3.0), + TweedieRegressor(power=0, link="log"), + TweedieRegressor(power=1.5), + TweedieRegressor(power=4.5), + ], +) +def test_glm_log_regression(solver, fit_intercept, estimator): + """Test GLM regression with log link on a simple dataset.""" + coef = [0.2, -0.1] + X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T + y = np.exp(np.dot(X, coef)) + glm = clone(estimator).set_params( + alpha=0, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-8, + ) + if fit_intercept: + res = glm.fit(X[:, :-1], y) + assert_allclose(res.coef_, coef[:-1], rtol=1e-6) + assert_allclose(res.intercept_, coef[-1], rtol=1e-6) + else: + res = glm.fit(X, y) + assert_allclose(res.coef_, coef, rtol=2e-6) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_warm_start(solver, fit_intercept, global_random_seed): + n_samples, n_features = 100, 10 + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features - 2, + bias=fit_intercept * 1.0, + noise=1.0, + random_state=global_random_seed, + ) + y = np.abs(y) # Poisson requires non-negative targets. + alpha = 1 + params = { + "solver": solver, + "fit_intercept": fit_intercept, + "tol": 1e-10, + } + + glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params) + glm1.fit(X, y) + + glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params) + # As we intentionally set max_iter=1 such that the solver should raise a + # ConvergenceWarning. + with pytest.warns(ConvergenceWarning): + glm2.fit(X, y) + + linear_loss = LinearModelLoss( + base_loss=glm1._get_loss(), + fit_intercept=fit_intercept, + ) + sw = np.full_like(y, fill_value=1 / n_samples) + + objective_glm1 = linear_loss.loss( + coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_, + X=X, + y=y, + sample_weight=sw, + l2_reg_strength=alpha, + ) + objective_glm2 = linear_loss.loss( + coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_, + X=X, + y=y, + sample_weight=sw, + l2_reg_strength=alpha, + ) + assert objective_glm1 < objective_glm2 + + glm2.set_params(max_iter=1000) + glm2.fit(X, y) + # The two models are not exactly identical since the lbfgs solver + # computes the approximate hessian from previous iterations, which + # will not be strictly identical in the case of a warm start. + rtol = 2e-4 if solver == "lbfgs" else 1e-9 + assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol) + assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5) + + +@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("sample_weight", [None, True]) +def test_normal_ridge_comparison( + n_samples, n_features, fit_intercept, sample_weight, request +): + """Compare with Ridge regression for Normal distributions.""" + test_size = 10 + X, y = make_regression( + n_samples=n_samples + test_size, + n_features=n_features, + n_informative=n_features - 2, + noise=0.5, + random_state=42, + ) + + if n_samples > n_features: + ridge_params = {"solver": "svd"} + else: + ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7} + + ( + X_train, + X_test, + y_train, + y_test, + ) = train_test_split(X, y, test_size=test_size, random_state=0) + + alpha = 1.0 + if sample_weight is None: + sw_train = None + alpha_ridge = alpha * n_samples + else: + sw_train = np.random.RandomState(0).rand(len(y_train)) + alpha_ridge = alpha * sw_train.sum() + + # GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2 + ridge = Ridge( + alpha=alpha_ridge, + random_state=42, + fit_intercept=fit_intercept, + **ridge_params, + ) + ridge.fit(X_train, y_train, sample_weight=sw_train) + + glm = _GeneralizedLinearRegressor( + alpha=alpha, + fit_intercept=fit_intercept, + max_iter=300, + tol=1e-5, + ) + glm.fit(X_train, y_train, sample_weight=sw_train) + assert glm.coef_.shape == (X.shape[1],) + assert_allclose(glm.coef_, ridge.coef_, atol=5e-5) + assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5) + assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4) + assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4) + + +@pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"]) +def test_poisson_glmnet(solver): + """Compare Poisson regression with L2 regularization and LogLink to glmnet""" + # library("glmnet") + # options(digits=10) + # df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2)) + # x <- data.matrix(df[,c("a", "b")]) + # y <- df$y + # fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson", + # standardize=F, thresh=1e-10, nlambda=10000) + # coef(fit, s=1) + # (Intercept) -0.12889386979 + # a 0.29019207995 + # b 0.03741173122 + X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T + y = np.array([0, 1, 1, 2]) + glm = PoissonRegressor( + alpha=1, + fit_intercept=True, + tol=1e-7, + max_iter=300, + solver=solver, + ) + glm.fit(X, y) + assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5) + assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5) + + +def test_convergence_warning(regression_data): + X, y = regression_data + + est = _GeneralizedLinearRegressor(max_iter=1, tol=1e-20) + with pytest.warns(ConvergenceWarning): + est.fit(X, y) + + +@pytest.mark.parametrize( + "name, link_class", [("identity", IdentityLink), ("log", LogLink)] +) +def test_tweedie_link_argument(name, link_class): + """Test GLM link argument set as string.""" + y = np.array([0.1, 0.5]) # in range of all distributions + X = np.array([[1], [2]]) + glm = TweedieRegressor(power=1, link=name).fit(X, y) + assert isinstance(glm._base_loss.link, link_class) + + +@pytest.mark.parametrize( + "power, expected_link_class", + [ + (0, IdentityLink), # normal + (1, LogLink), # poisson + (2, LogLink), # gamma + (3, LogLink), # inverse-gaussian + ], +) +def test_tweedie_link_auto(power, expected_link_class): + """Test that link='auto' delivers the expected link function""" + y = np.array([0.1, 0.5]) # in range of all distributions + X = np.array([[1], [2]]) + glm = TweedieRegressor(link="auto", power=power).fit(X, y) + assert isinstance(glm._base_loss.link, expected_link_class) + + +@pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3]) +@pytest.mark.parametrize("link", ["log", "identity"]) +def test_tweedie_score(regression_data, power, link): + """Test that GLM score equals d2_tweedie_score for Tweedie losses.""" + X, y = regression_data + # make y positive + y = np.abs(y) + 1.0 + glm = TweedieRegressor(power=power, link=link).fit(X, y) + assert glm.score(X, y) == pytest.approx( + d2_tweedie_score(y, glm.predict(X), power=power) + ) + + +@pytest.mark.parametrize( + "estimator, value", + [ + (PoissonRegressor(), True), + (GammaRegressor(), True), + (TweedieRegressor(power=1.5), True), + (TweedieRegressor(power=0), False), + ], +) +def test_tags(estimator, value): + assert estimator._get_tags()["requires_positive_y"] is value + + +def test_linalg_warning_with_newton_solver(global_random_seed): + newton_solver = "newton-cholesky" + rng = np.random.RandomState(global_random_seed) + # Use at least 20 samples to reduce the likelihood of getting a degenerate + # dataset for any global_random_seed. + X_orig = rng.normal(size=(20, 3)) + y = rng.poisson( + np.exp(X_orig @ np.ones(X_orig.shape[1])), size=X_orig.shape[0] + ).astype(np.float64) + + # Collinear variation of the same input features. + X_collinear = np.hstack([X_orig] * 10) + + # Let's consider the deviance of a constant baseline on this problem. + baseline_pred = np.full_like(y, y.mean()) + constant_model_deviance = mean_poisson_deviance(y, baseline_pred) + assert constant_model_deviance > 1.0 + + # No warning raised on well-conditioned design, even without regularization. + tol = 1e-10 + with warnings.catch_warnings(): + warnings.simplefilter("error") + reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(X_orig, y) + original_newton_deviance = mean_poisson_deviance(y, reg.predict(X_orig)) + + # On this dataset, we should have enough data points to not make it + # possible to get a near zero deviance (for the any of the admissible + # random seeds). This will make it easier to interpret meaning of rtol in + # the subsequent assertions: + assert original_newton_deviance > 0.2 + + # We check that the model could successfully fit information in X_orig to + # improve upon the constant baseline by a large margin (when evaluated on + # the traing set). + assert constant_model_deviance - original_newton_deviance > 0.1 + + # LBFGS is robust to a collinear design because its approximation of the + # Hessian is Symmeric Positive Definite by construction. Let's record its + # solution + with warnings.catch_warnings(): + warnings.simplefilter("error") + reg = PoissonRegressor(solver="lbfgs", alpha=0.0, tol=tol).fit(X_collinear, y) + collinear_lbfgs_deviance = mean_poisson_deviance(y, reg.predict(X_collinear)) + + # The LBFGS solution on the collinear is expected to reach a comparable + # solution to the Newton solution on the original data. + rtol = 1e-6 + assert collinear_lbfgs_deviance == pytest.approx(original_newton_deviance, rel=rtol) + + # Fitting a Newton solver on the collinear version of the training data + # without regularization should raise an informative warning and fallback + # to the LBFGS solver. + msg = ( + "The inner solver of .*Newton.*Solver stumbled upon a singular or very " + "ill-conditioned Hessian matrix" + ) + with pytest.warns(scipy.linalg.LinAlgWarning, match=msg): + reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit( + X_collinear, y + ) + # As a result we should still automatically converge to a good solution. + collinear_newton_deviance = mean_poisson_deviance(y, reg.predict(X_collinear)) + assert collinear_newton_deviance == pytest.approx( + original_newton_deviance, rel=rtol + ) + + # Increasing the regularization slightly should make the problem go away: + with warnings.catch_warnings(): + warnings.simplefilter("error", scipy.linalg.LinAlgWarning) + reg = PoissonRegressor(solver=newton_solver, alpha=1e-10).fit(X_collinear, y) + + # The slightly penalized model on the collinear data should be close enough + # to the unpenalized model on the original data. + penalized_collinear_newton_deviance = mean_poisson_deviance( + y, reg.predict(X_collinear) + ) + assert penalized_collinear_newton_deviance == pytest.approx( + original_newton_deviance, rel=rtol + ) + + +@pytest.mark.parametrize("verbose", [0, 1, 2]) +def test_newton_solver_verbosity(capsys, verbose): + """Test the std output of verbose newton solvers.""" + y = np.array([1, 2], dtype=float) + X = np.array([[1.0, 0], [0, 1]], dtype=float) + linear_loss = LinearModelLoss(base_loss=HalfPoissonLoss(), fit_intercept=False) + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.solve(X, y, None) # returns array([0., 0.69314758]) + captured = capsys.readouterr() + + if verbose == 0: + assert captured.out == "" + else: + msg = [ + "Newton iter=1", + "Check Convergence", + "1. max |gradient|", + "2. Newton decrement", + "Solver did converge at loss = ", + ] + for m in msg: + assert m in captured.out + + if verbose >= 2: + msg = ["Backtracking Line Search", "line search iteration="] + for m in msg: + assert m in captured.out + + # Set the Newton solver to a state with a completely wrong Newton step. + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.setup(X=X, y=y, sample_weight=None) + sol.iteration = 1 + sol.update_gradient_hessian(X=X, y=y, sample_weight=None) + sol.coef_newton = np.array([1.0, 0]) + sol.gradient_times_newton = sol.gradient @ sol.coef_newton + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.line_search(X=X, y=y, sample_weight=None) + captured = capsys.readouterr() + if verbose >= 1: + assert ( + "Line search did not converge and resorts to lbfgs instead." in captured.out + ) + + # Set the Newton solver to a state with bad Newton step such that the loss + # improvement in line search is tiny. + sol = NewtonCholeskySolver( + coef=np.array([1e-12, 0.69314758]), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.setup(X=X, y=y, sample_weight=None) + sol.iteration = 1 + sol.update_gradient_hessian(X=X, y=y, sample_weight=None) + sol.coef_newton = np.array([1e-6, 0]) + sol.gradient_times_newton = sol.gradient @ sol.coef_newton + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.line_search(X=X, y=y, sample_weight=None) + captured = capsys.readouterr() + if verbose >= 2: + msg = [ + "line search iteration=", + "check loss improvement <= armijo term:", + "check loss |improvement| <= eps * |loss_old|:", + "check sum(|gradient|) < sum(|gradient_old|):", + ] + for m in msg: + assert m in captured.out + + # Test for a case with negative hessian. We badly initialize coef for a Tweedie + # loss with non-canonical link, e.g. Inverse Gaussian deviance with a log link. + linear_loss = LinearModelLoss( + base_loss=HalfTweedieLoss(power=3), fit_intercept=False + ) + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X) + 1, + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.solve(X, y, None) + captured = capsys.readouterr() + if verbose >= 1: + assert ( + "The inner solver detected a pointwise Hessian with many negative values" + " and resorts to lbfgs instead." + in captured.out + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py new file mode 100644 index 0000000000000000000000000000000000000000..f5766ac0d6154a854bcbe4fa0519930fdfb267bd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py @@ -0,0 +1,2306 @@ +""" +Least Angle Regression algorithm. See the documentation on the +Generalized Linear Model for a complete discussion. +""" +# Author: Fabian Pedregosa +# Alexandre Gramfort +# Gael Varoquaux +# +# License: BSD 3 clause + +import sys +import warnings +from math import log +from numbers import Integral, Real + +import numpy as np +from scipy import interpolate, linalg +from scipy.linalg.lapack import get_lapack_funcs + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..model_selection import check_cv + +# mypy error: Module 'sklearn.utils' has no attribute 'arrayfuncs' +from ..utils import ( # type: ignore + Bunch, + arrayfuncs, + as_float_array, + check_random_state, +) +from ..utils._metadata_requests import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.parallel import Parallel, delayed +from ._base import LinearModel, LinearRegression, _preprocess_data + +SOLVE_TRIANGULAR_ARGS = {"check_finite": False} + + +@validate_params( + { + "X": [np.ndarray, None], + "y": [np.ndarray, None], + "Xy": [np.ndarray, None], + "Gram": [StrOptions({"auto"}), "boolean", np.ndarray, None], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "alpha_min": [Interval(Real, 0, None, closed="left")], + "method": [StrOptions({"lar", "lasso"})], + "copy_X": ["boolean"], + "eps": [Interval(Real, 0, None, closed="neither"), None], + "copy_Gram": ["boolean"], + "verbose": ["verbose"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def lars_path( + X, + y, + Xy=None, + *, + Gram=None, + max_iter=500, + alpha_min=0, + method="lar", + copy_X=True, + eps=np.finfo(float).eps, + copy_Gram=True, + verbose=0, + return_path=True, + return_n_iter=False, + positive=False, +): + """Compute Least Angle Regression or Lasso path using the LARS algorithm [1]. + + The optimization objective for the case method='lasso' is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + in the case of method='lar', the objective function is only known in + the form of an implicit equation (see discussion in [1]). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : None or ndarray of shape (n_samples, n_features) + Input data. Note that if X is `None` then the Gram matrix must be + specified, i.e., cannot be `None` or `False`. + + y : None or ndarray of shape (n_samples,) + Input targets. + + Xy : array-like of shape (n_features,) or (n_features, n_targets), \ + default=None + `Xy = X.T @ y` that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + Gram : None, 'auto', bool, ndarray of shape (n_features, n_features), \ + default=None + Precomputed Gram matrix `X.T @ X`, if `'auto'`, the Gram + matrix is precomputed from the given X, if there are more samples + than features. + + max_iter : int, default=500 + Maximum number of iterations to perform, set to infinity for no limit. + + alpha_min : float, default=0 + Minimum correlation along the path. It corresponds to the + regularization parameter `alpha` in the Lasso. + + method : {'lar', 'lasso'}, default='lar' + Specifies the returned model. Select `'lar'` for Least Angle + Regression, `'lasso'` for the Lasso. + + copy_X : bool, default=True + If `False`, `X` is overwritten. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the `tol` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_Gram : bool, default=True + If `False`, `Gram` is overwritten. + + verbose : int, default=0 + Controls output verbosity. + + return_path : bool, default=True + If `True`, returns the entire path, else returns only the + last point of the path. + + return_n_iter : bool, default=False + Whether to return the number of iterations. + + positive : bool, default=False + Restrict coefficients to be >= 0. + This option is only allowed with method 'lasso'. Note that the model + coefficients will not converge to the ordinary-least-squares solution + for small values of alpha. Only coefficients up to the smallest alpha + value (`alphas_[alphas_ > 0.].min()` when fit_path=True) reached by + the stepwise Lars-Lasso algorithm are typically in congruence with the + solution of the coordinate descent `lasso_path` function. + + Returns + ------- + alphas : ndarray of shape (n_alphas + 1,) + Maximum of covariances (in absolute value) at each iteration. + `n_alphas` is either `max_iter`, `n_features`, or the + number of nodes in the path with `alpha >= alpha_min`, whichever + is smaller. + + active : ndarray of shape (n_alphas,) + Indices of active variables at the end of the path. + + coefs : ndarray of shape (n_features, n_alphas + 1) + Coefficients along the path. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is set + to True. + + See Also + -------- + lars_path_gram : Compute LARS path in the sufficient stats mode. + lasso_path : Compute Lasso path with coordinate descent. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLarsCV : Cross-validated Lasso, using the LARS algorithm. + LarsCV : Cross-validated Least Angle Regression model. + sklearn.decomposition.sparse_encode : Sparse coding. + + References + ---------- + .. [1] "Least Angle Regression", Efron et al. + http://statweb.stanford.edu/~tibs/ftp/lars.pdf + + .. [2] `Wikipedia entry on the Least-angle regression + `_ + + .. [3] `Wikipedia entry on the Lasso + `_ + """ + if X is None and Gram is not None: + raise ValueError( + "X cannot be None if Gram is not None" + "Use lars_path_gram to avoid passing X and y." + ) + return _lars_path_solver( + X=X, + y=y, + Xy=Xy, + Gram=Gram, + n_samples=None, + max_iter=max_iter, + alpha_min=alpha_min, + method=method, + copy_X=copy_X, + eps=eps, + copy_Gram=copy_Gram, + verbose=verbose, + return_path=return_path, + return_n_iter=return_n_iter, + positive=positive, + ) + + +@validate_params( + { + "Xy": [np.ndarray], + "Gram": [np.ndarray], + "n_samples": [Interval(Integral, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "alpha_min": [Interval(Real, 0, None, closed="left")], + "method": [StrOptions({"lar", "lasso"})], + "copy_X": ["boolean"], + "eps": [Interval(Real, 0, None, closed="neither"), None], + "copy_Gram": ["boolean"], + "verbose": ["verbose"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def lars_path_gram( + Xy, + Gram, + *, + n_samples, + max_iter=500, + alpha_min=0, + method="lar", + copy_X=True, + eps=np.finfo(float).eps, + copy_Gram=True, + verbose=0, + return_path=True, + return_n_iter=False, + positive=False, +): + """The lars_path in the sufficient stats mode [1]. + + The optimization objective for the case method='lasso' is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + in the case of method='lars', the objective function is only known in + the form of an implicit equation (see discussion in [1]) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + Xy : ndarray of shape (n_features,) or (n_features, n_targets) + `Xy = X.T @ y`. + + Gram : ndarray of shape (n_features, n_features) + `Gram = X.T @ X`. + + n_samples : int + Equivalent size of sample. + + max_iter : int, default=500 + Maximum number of iterations to perform, set to infinity for no limit. + + alpha_min : float, default=0 + Minimum correlation along the path. It corresponds to the + regularization parameter alpha parameter in the Lasso. + + method : {'lar', 'lasso'}, default='lar' + Specifies the returned model. Select `'lar'` for Least Angle + Regression, ``'lasso'`` for the Lasso. + + copy_X : bool, default=True + If `False`, `X` is overwritten. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the `tol` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_Gram : bool, default=True + If `False`, `Gram` is overwritten. + + verbose : int, default=0 + Controls output verbosity. + + return_path : bool, default=True + If `return_path==True` returns the entire path, else returns only the + last point of the path. + + return_n_iter : bool, default=False + Whether to return the number of iterations. + + positive : bool, default=False + Restrict coefficients to be >= 0. + This option is only allowed with method 'lasso'. Note that the model + coefficients will not converge to the ordinary-least-squares solution + for small values of alpha. Only coefficients up to the smallest alpha + value (`alphas_[alphas_ > 0.].min()` when `fit_path=True`) reached by + the stepwise Lars-Lasso algorithm are typically in congruence with the + solution of the coordinate descent lasso_path function. + + Returns + ------- + alphas : ndarray of shape (n_alphas + 1,) + Maximum of covariances (in absolute value) at each iteration. + `n_alphas` is either `max_iter`, `n_features` or the + number of nodes in the path with `alpha >= alpha_min`, whichever + is smaller. + + active : ndarray of shape (n_alphas,) + Indices of active variables at the end of the path. + + coefs : ndarray of shape (n_features, n_alphas + 1) + Coefficients along the path. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is set + to True. + + See Also + -------- + lars_path_gram : Compute LARS path. + lasso_path : Compute Lasso path with coordinate descent. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLarsCV : Cross-validated Lasso, using the LARS algorithm. + LarsCV : Cross-validated Least Angle Regression model. + sklearn.decomposition.sparse_encode : Sparse coding. + + References + ---------- + .. [1] "Least Angle Regression", Efron et al. + http://statweb.stanford.edu/~tibs/ftp/lars.pdf + + .. [2] `Wikipedia entry on the Least-angle regression + `_ + + .. [3] `Wikipedia entry on the Lasso + `_ + """ + return _lars_path_solver( + X=None, + y=None, + Xy=Xy, + Gram=Gram, + n_samples=n_samples, + max_iter=max_iter, + alpha_min=alpha_min, + method=method, + copy_X=copy_X, + eps=eps, + copy_Gram=copy_Gram, + verbose=verbose, + return_path=return_path, + return_n_iter=return_n_iter, + positive=positive, + ) + + +def _lars_path_solver( + X, + y, + Xy=None, + Gram=None, + n_samples=None, + max_iter=500, + alpha_min=0, + method="lar", + copy_X=True, + eps=np.finfo(float).eps, + copy_Gram=True, + verbose=0, + return_path=True, + return_n_iter=False, + positive=False, +): + """Compute Least Angle Regression or Lasso path using LARS algorithm [1] + + The optimization objective for the case method='lasso' is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + in the case of method='lars', the objective function is only known in + the form of an implicit equation (see discussion in [1]) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : None or ndarray of shape (n_samples, n_features) + Input data. Note that if X is None then Gram must be specified, + i.e., cannot be None or False. + + y : None or ndarray of shape (n_samples,) + Input targets. + + Xy : array-like of shape (n_features,) or (n_features, n_targets), \ + default=None + `Xy = np.dot(X.T, y)` that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + Gram : None, 'auto' or array-like of shape (n_features, n_features), \ + default=None + Precomputed Gram matrix `(X' * X)`, if ``'auto'``, the Gram + matrix is precomputed from the given X, if there are more samples + than features. + + n_samples : int or float, default=None + Equivalent size of sample. If `None`, it will be `n_samples`. + + max_iter : int, default=500 + Maximum number of iterations to perform, set to infinity for no limit. + + alpha_min : float, default=0 + Minimum correlation along the path. It corresponds to the + regularization parameter alpha parameter in the Lasso. + + method : {'lar', 'lasso'}, default='lar' + Specifies the returned model. Select ``'lar'`` for Least Angle + Regression, ``'lasso'`` for the Lasso. + + copy_X : bool, default=True + If ``False``, ``X`` is overwritten. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_Gram : bool, default=True + If ``False``, ``Gram`` is overwritten. + + verbose : int, default=0 + Controls output verbosity. + + return_path : bool, default=True + If ``return_path==True`` returns the entire path, else returns only the + last point of the path. + + return_n_iter : bool, default=False + Whether to return the number of iterations. + + positive : bool, default=False + Restrict coefficients to be >= 0. + This option is only allowed with method 'lasso'. Note that the model + coefficients will not converge to the ordinary-least-squares solution + for small values of alpha. Only coefficients up to the smallest alpha + value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by + the stepwise Lars-Lasso algorithm are typically in congruence with the + solution of the coordinate descent lasso_path function. + + Returns + ------- + alphas : array-like of shape (n_alphas + 1,) + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. + + active : array-like of shape (n_alphas,) + Indices of active variables at the end of the path. + + coefs : array-like of shape (n_features, n_alphas + 1) + Coefficients along the path + + n_iter : int + Number of iterations run. Returned only if return_n_iter is set + to True. + + See Also + -------- + lasso_path + LassoLars + Lars + LassoLarsCV + LarsCV + sklearn.decomposition.sparse_encode + + References + ---------- + .. [1] "Least Angle Regression", Efron et al. + http://statweb.stanford.edu/~tibs/ftp/lars.pdf + + .. [2] `Wikipedia entry on the Least-angle regression + `_ + + .. [3] `Wikipedia entry on the Lasso + `_ + + """ + if method == "lar" and positive: + raise ValueError("Positive constraint not supported for 'lar' coding method.") + + n_samples = n_samples if n_samples is not None else y.size + + if Xy is None: + Cov = np.dot(X.T, y) + else: + Cov = Xy.copy() + + if Gram is None or Gram is False: + Gram = None + if X is None: + raise ValueError("X and Gram cannot both be unspecified.") + elif isinstance(Gram, str) and Gram == "auto" or Gram is True: + if Gram is True or X.shape[0] > X.shape[1]: + Gram = np.dot(X.T, X) + else: + Gram = None + elif copy_Gram: + Gram = Gram.copy() + + if Gram is None: + n_features = X.shape[1] + else: + n_features = Cov.shape[0] + if Gram.shape != (n_features, n_features): + raise ValueError("The shapes of the inputs Gram and Xy do not match.") + + if copy_X and X is not None and Gram is None: + # force copy. setting the array to be fortran-ordered + # speeds up the calculation of the (partial) Gram matrix + # and allows to easily swap columns + X = X.copy("F") + + max_features = min(max_iter, n_features) + + dtypes = set(a.dtype for a in (X, y, Xy, Gram) if a is not None) + if len(dtypes) == 1: + # use the precision level of input data if it is consistent + return_dtype = next(iter(dtypes)) + else: + # fallback to double precision otherwise + return_dtype = np.float64 + + if return_path: + coefs = np.zeros((max_features + 1, n_features), dtype=return_dtype) + alphas = np.zeros(max_features + 1, dtype=return_dtype) + else: + coef, prev_coef = ( + np.zeros(n_features, dtype=return_dtype), + np.zeros(n_features, dtype=return_dtype), + ) + alpha, prev_alpha = ( + np.array([0.0], dtype=return_dtype), + np.array([0.0], dtype=return_dtype), + ) + # above better ideas? + + n_iter, n_active = 0, 0 + active, indices = list(), np.arange(n_features) + # holds the sign of covariance + sign_active = np.empty(max_features, dtype=np.int8) + drop = False + + # will hold the cholesky factorization. Only lower part is + # referenced. + if Gram is None: + L = np.empty((max_features, max_features), dtype=X.dtype) + swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (X,)) + else: + L = np.empty((max_features, max_features), dtype=Gram.dtype) + swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (Cov,)) + (solve_cholesky,) = get_lapack_funcs(("potrs",), (L,)) + + if verbose: + if verbose > 1: + print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC") + else: + sys.stdout.write(".") + sys.stdout.flush() + + tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning + cov_precision = np.finfo(Cov.dtype).precision + equality_tolerance = np.finfo(np.float32).eps + + if Gram is not None: + Gram_copy = Gram.copy() + Cov_copy = Cov.copy() + + while True: + if Cov.size: + if positive: + C_idx = np.argmax(Cov) + else: + C_idx = np.argmax(np.abs(Cov)) + + C_ = Cov[C_idx] + + if positive: + C = C_ + else: + C = np.fabs(C_) + else: + C = 0.0 + + if return_path: + alpha = alphas[n_iter, np.newaxis] + coef = coefs[n_iter] + prev_alpha = alphas[n_iter - 1, np.newaxis] + prev_coef = coefs[n_iter - 1] + + alpha[0] = C / n_samples + if alpha[0] <= alpha_min + equality_tolerance: # early stopping + if abs(alpha[0] - alpha_min) > equality_tolerance: + # interpolation factor 0 <= ss < 1 + if n_iter > 0: + # In the first iteration, all alphas are zero, the formula + # below would make ss a NaN + ss = (prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0]) + coef[:] = prev_coef + ss * (coef - prev_coef) + alpha[0] = alpha_min + if return_path: + coefs[n_iter] = coef + break + + if n_iter >= max_iter or n_active >= n_features: + break + if not drop: + ########################################################## + # Append x_j to the Cholesky factorization of (Xa * Xa') # + # # + # ( L 0 ) # + # L -> ( ) , where L * w = Xa' x_j # + # ( w z ) and z = ||x_j|| # + # # + ########################################################## + + if positive: + sign_active[n_active] = np.ones_like(C_) + else: + sign_active[n_active] = np.sign(C_) + m, n = n_active, C_idx + n_active + + Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) + indices[n], indices[m] = indices[m], indices[n] + Cov_not_shortened = Cov + Cov = Cov[1:] # remove Cov[0] + + if Gram is None: + X.T[n], X.T[m] = swap(X.T[n], X.T[m]) + c = nrm2(X.T[n_active]) ** 2 + L[n_active, :n_active] = np.dot(X.T[n_active], X.T[:n_active].T) + else: + # swap does only work inplace if matrix is fortran + # contiguous ... + Gram[m], Gram[n] = swap(Gram[m], Gram[n]) + Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n]) + c = Gram[n_active, n_active] + L[n_active, :n_active] = Gram[n_active, :n_active] + + # Update the cholesky decomposition for the Gram matrix + if n_active: + linalg.solve_triangular( + L[:n_active, :n_active], + L[n_active, :n_active], + trans=0, + lower=1, + overwrite_b=True, + **SOLVE_TRIANGULAR_ARGS, + ) + + v = np.dot(L[n_active, :n_active], L[n_active, :n_active]) + diag = max(np.sqrt(np.abs(c - v)), eps) + L[n_active, n_active] = diag + + if diag < 1e-7: + # The system is becoming too ill-conditioned. + # We have degenerate vectors in our active set. + # We'll 'drop for good' the last regressor added. + warnings.warn( + "Regressors in active set degenerate. " + "Dropping a regressor, after %i iterations, " + "i.e. alpha=%.3e, " + "with an active set of %i regressors, and " + "the smallest cholesky pivot element being %.3e." + " Reduce max_iter or increase eps parameters." + % (n_iter, alpha.item(), n_active, diag), + ConvergenceWarning, + ) + + # XXX: need to figure a 'drop for good' way + Cov = Cov_not_shortened + Cov[0] = 0 + Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) + continue + + active.append(indices[n_active]) + n_active += 1 + + if verbose > 1: + print( + "%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], "", n_active, C) + ) + + if method == "lasso" and n_iter > 0 and prev_alpha[0] < alpha[0]: + # alpha is increasing. This is because the updates of Cov are + # bringing in too much numerical error that is greater than + # than the remaining correlation with the + # regressors. Time to bail out + warnings.warn( + "Early stopping the lars path, as the residues " + "are small and the current value of alpha is no " + "longer well controlled. %i iterations, alpha=%.3e, " + "previous alpha=%.3e, with an active set of %i " + "regressors." % (n_iter, alpha.item(), prev_alpha.item(), n_active), + ConvergenceWarning, + ) + break + + # least squares solution + least_squares, _ = solve_cholesky( + L[:n_active, :n_active], sign_active[:n_active], lower=True + ) + + if least_squares.size == 1 and least_squares == 0: + # This happens because sign_active[:n_active] = 0 + least_squares[...] = 1 + AA = 1.0 + else: + # is this really needed ? + AA = 1.0 / np.sqrt(np.sum(least_squares * sign_active[:n_active])) + + if not np.isfinite(AA): + # L is too ill-conditioned + i = 0 + L_ = L[:n_active, :n_active].copy() + while not np.isfinite(AA): + L_.flat[:: n_active + 1] += (2**i) * eps + least_squares, _ = solve_cholesky( + L_, sign_active[:n_active], lower=True + ) + tmp = max(np.sum(least_squares * sign_active[:n_active]), eps) + AA = 1.0 / np.sqrt(tmp) + i += 1 + least_squares *= AA + + if Gram is None: + # equiangular direction of variables in the active set + eq_dir = np.dot(X.T[:n_active].T, least_squares) + # correlation between each unactive variables and + # eqiangular vector + corr_eq_dir = np.dot(X.T[n_active:], eq_dir) + else: + # if huge number of features, this takes 50% of time, I + # think could be avoided if we just update it using an + # orthogonal (QR) decomposition of X + corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares) + + # Explicit rounding can be necessary to avoid `np.argmax(Cov)` yielding + # unstable results because of rounding errors. + np.around(corr_eq_dir, decimals=cov_precision, out=corr_eq_dir) + + g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32)) + if positive: + gamma_ = min(g1, C / AA) + else: + g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32)) + gamma_ = min(g1, g2, C / AA) + + # TODO: better names for these variables: z + drop = False + z = -coef[active] / (least_squares + tiny32) + z_pos = arrayfuncs.min_pos(z) + if z_pos < gamma_: + # some coefficients have changed sign + idx = np.where(z == z_pos)[0][::-1] + + # update the sign, important for LAR + sign_active[idx] = -sign_active[idx] + + if method == "lasso": + gamma_ = z_pos + drop = True + + n_iter += 1 + + if return_path: + if n_iter >= coefs.shape[0]: + del coef, alpha, prev_alpha, prev_coef + # resize the coefs and alphas array + add_features = 2 * max(1, (max_features - n_active)) + coefs = np.resize(coefs, (n_iter + add_features, n_features)) + coefs[-add_features:] = 0 + alphas = np.resize(alphas, n_iter + add_features) + alphas[-add_features:] = 0 + coef = coefs[n_iter] + prev_coef = coefs[n_iter - 1] + else: + # mimic the effect of incrementing n_iter on the array references + prev_coef = coef + prev_alpha[0] = alpha[0] + coef = np.zeros_like(coef) + + coef[active] = prev_coef[active] + gamma_ * least_squares + + # update correlations + Cov -= gamma_ * corr_eq_dir + + # See if any coefficient has changed sign + if drop and method == "lasso": + # handle the case when idx is not length of 1 + for ii in idx: + arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) + + n_active -= 1 + # handle the case when idx is not length of 1 + drop_idx = [active.pop(ii) for ii in idx] + + if Gram is None: + # propagate dropped variable + for ii in idx: + for i in range(ii, n_active): + X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1]) + # yeah this is stupid + indices[i], indices[i + 1] = indices[i + 1], indices[i] + + # TODO: this could be updated + residual = y - np.dot(X[:, :n_active], coef[active]) + temp = np.dot(X.T[n_active], residual) + + Cov = np.r_[temp, Cov] + else: + for ii in idx: + for i in range(ii, n_active): + indices[i], indices[i + 1] = indices[i + 1], indices[i] + Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1]) + Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1]) + + # Cov_n = Cov_j + x_j * X + increment(betas) TODO: + # will this still work with multiple drops ? + + # recompute covariance. Probably could be done better + # wrong as Xy is not swapped with the rest of variables + + # TODO: this could be updated + temp = Cov_copy[drop_idx] - np.dot(Gram_copy[drop_idx], coef) + Cov = np.r_[temp, Cov] + + sign_active = np.delete(sign_active, idx) + sign_active = np.append(sign_active, 0.0) # just to maintain size + if verbose > 1: + print( + "%s\t\t%s\t\t%s\t\t%s\t\t%s" + % (n_iter, "", drop_idx, n_active, abs(temp)) + ) + + if return_path: + # resize coefs in case of early stop + alphas = alphas[: n_iter + 1] + coefs = coefs[: n_iter + 1] + + if return_n_iter: + return alphas, active, coefs.T, n_iter + else: + return alphas, active, coefs.T + else: + if return_n_iter: + return alpha, active, coef, n_iter + else: + return alpha, active, coef + + +############################################################################### +# Estimator classes + + +class Lars(MultiOutputMixin, RegressorMixin, LinearModel): + """Least Angle Regression model a.k.a. LAR. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + precompute : bool, 'auto' or array-like , default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + n_nonzero_coefs : int, default=500 + Target number of non-zero coefficients. Use ``np.inf`` for no limit. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + fit_path : bool, default=True + If True the full path is stored in the ``coef_path_`` attribute. + If you compute the solution for a large problem or many targets, + setting ``fit_path`` to ``False`` will lead to a speedup, especially + with a small alpha. + + jitter : float, default=None + Upper bound on a uniform noise parameter to be added to the + `y` values, to satisfy the model's assumption of + one-at-a-time computations. Might help with stability. + + .. versionadded:: 0.23 + + random_state : int, RandomState instance or None, default=None + Determines random number generation for jittering. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. Ignored if `jitter` is None. + + .. versionadded:: 0.23 + + Attributes + ---------- + alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. If this is a list of array-like, the length of the outer + list is `n_targets`. + + active_ : list of shape (n_alphas,) or list of such lists + Indices of active variables at the end of the path. + If this is a list of list, the length of the outer list is `n_targets`. + + coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \ + of such arrays + The varying values of the coefficients along the path. It is not + present if the ``fit_path`` parameter is ``False``. If this is a list + of array-like, the length of the outer list is `n_targets`. + + coef_ : array-like of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the formulation formula). + + intercept_ : float or array-like of shape (n_targets,) + Independent term in decision function. + + n_iter_ : array-like or int + The number of iterations taken by lars_path to find the + grid of alphas for each target. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path: Compute Least Angle Regression or Lasso + path using LARS algorithm. + LarsCV : Cross-validated Least Angle Regression model. + sklearn.decomposition.sparse_encode : Sparse coding. + + Examples + -------- + >>> from sklearn import linear_model + >>> reg = linear_model.Lars(n_nonzero_coefs=1) + >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) + Lars(n_nonzero_coefs=1) + >>> print(reg.coef_) + [ 0. -1.11...] + """ + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "verbose": ["verbose"], + "precompute": ["boolean", StrOptions({"auto"}), np.ndarray, Hidden(None)], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")], + "eps": [Interval(Real, 0, None, closed="left")], + "copy_X": ["boolean"], + "fit_path": ["boolean"], + "jitter": [Interval(Real, 0, None, closed="left"), None], + "random_state": ["random_state"], + } + + method = "lar" + positive = False + + def __init__( + self, + *, + fit_intercept=True, + verbose=False, + precompute="auto", + n_nonzero_coefs=500, + eps=np.finfo(float).eps, + copy_X=True, + fit_path=True, + jitter=None, + random_state=None, + ): + self.fit_intercept = fit_intercept + self.verbose = verbose + self.precompute = precompute + self.n_nonzero_coefs = n_nonzero_coefs + self.eps = eps + self.copy_X = copy_X + self.fit_path = fit_path + self.jitter = jitter + self.random_state = random_state + + @staticmethod + def _get_gram(precompute, X, y): + if (not hasattr(precompute, "__array__")) and ( + (precompute is True) + or (precompute == "auto" and X.shape[0] > X.shape[1]) + or (precompute == "auto" and y.shape[1] > 1) + ): + precompute = np.dot(X.T, X) + + return precompute + + def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None): + """Auxiliary method to fit the model using X, y as training data""" + n_features = X.shape[1] + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=self.copy_X + ) + + if y.ndim == 1: + y = y[:, np.newaxis] + + n_targets = y.shape[1] + + Gram = self._get_gram(self.precompute, X, y) + + self.alphas_ = [] + self.n_iter_ = [] + self.coef_ = np.empty((n_targets, n_features), dtype=X.dtype) + + if fit_path: + self.active_ = [] + self.coef_path_ = [] + for k in range(n_targets): + this_Xy = None if Xy is None else Xy[:, k] + alphas, active, coef_path, n_iter_ = lars_path( + X, + y[:, k], + Gram=Gram, + Xy=this_Xy, + copy_X=self.copy_X, + copy_Gram=True, + alpha_min=alpha, + method=self.method, + verbose=max(0, self.verbose - 1), + max_iter=max_iter, + eps=self.eps, + return_path=True, + return_n_iter=True, + positive=self.positive, + ) + self.alphas_.append(alphas) + self.active_.append(active) + self.n_iter_.append(n_iter_) + self.coef_path_.append(coef_path) + self.coef_[k] = coef_path[:, -1] + + if n_targets == 1: + self.alphas_, self.active_, self.coef_path_, self.coef_ = [ + a[0] + for a in (self.alphas_, self.active_, self.coef_path_, self.coef_) + ] + self.n_iter_ = self.n_iter_[0] + else: + for k in range(n_targets): + this_Xy = None if Xy is None else Xy[:, k] + alphas, _, self.coef_[k], n_iter_ = lars_path( + X, + y[:, k], + Gram=Gram, + Xy=this_Xy, + copy_X=self.copy_X, + copy_Gram=True, + alpha_min=alpha, + method=self.method, + verbose=max(0, self.verbose - 1), + max_iter=max_iter, + eps=self.eps, + return_path=False, + return_n_iter=True, + positive=self.positive, + ) + self.alphas_.append(alphas) + self.n_iter_.append(n_iter_) + if n_targets == 1: + self.alphas_ = self.alphas_[0] + self.n_iter_ = self.n_iter_[0] + + self._set_intercept(X_offset, y_offset, X_scale) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, Xy=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Xy : array-like of shape (n_features,) or (n_features, n_targets), \ + default=None + Xy = np.dot(X.T, y) that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + Returns + ------- + self : object + Returns an instance of self. + """ + X, y = self._validate_data(X, y, y_numeric=True, multi_output=True) + + alpha = getattr(self, "alpha", 0.0) + if hasattr(self, "n_nonzero_coefs"): + alpha = 0.0 # n_nonzero_coefs parametrization takes priority + max_iter = self.n_nonzero_coefs + else: + max_iter = self.max_iter + + if self.jitter is not None: + rng = check_random_state(self.random_state) + + noise = rng.uniform(high=self.jitter, size=len(y)) + y = y + noise + + self._fit( + X, + y, + max_iter=max_iter, + alpha=alpha, + fit_path=self.fit_path, + Xy=Xy, + ) + + return self + + +class LassoLars(Lars): + """Lasso model fit with Least Angle Regression a.k.a. Lars. + + It is a Linear Model trained with an L1 prior as regularizer. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the penalty term. Defaults to 1.0. + ``alpha = 0`` is equivalent to an ordinary least square, solved + by :class:`LinearRegression`. For numerical reasons, using + ``alpha = 0`` with the LassoLars object is not advised and you + should prefer the LinearRegression object. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + precompute : bool, 'auto' or array-like, default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + fit_path : bool, default=True + If ``True`` the full path is stored in the ``coef_path_`` attribute. + If you compute the solution for a large problem or many targets, + setting ``fit_path`` to ``False`` will lead to a speedup, especially + with a small alpha. + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + Under the positive restriction the model coefficients will not converge + to the ordinary-least-squares solution for small values of alpha. + Only coefficients up to the smallest alpha value (``alphas_[alphas_ > + 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso + algorithm are typically in congruence with the solution of the + coordinate descent Lasso estimator. + + jitter : float, default=None + Upper bound on a uniform noise parameter to be added to the + `y` values, to satisfy the model's assumption of + one-at-a-time computations. Might help with stability. + + .. versionadded:: 0.23 + + random_state : int, RandomState instance or None, default=None + Determines random number generation for jittering. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. Ignored if `jitter` is None. + + .. versionadded:: 0.23 + + Attributes + ---------- + alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. If this is a list of array-like, the length of the outer + list is `n_targets`. + + active_ : list of length n_alphas or list of such lists + Indices of active variables at the end of the path. + If this is a list of list, the length of the outer list is `n_targets`. + + coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \ + of such arrays + If a list is passed it's expected to be one of n_targets such arrays. + The varying values of the coefficients along the path. It is not + present if the ``fit_path`` parameter is ``False``. If this is a list + of array-like, the length of the outer list is `n_targets`. + + coef_ : array-like of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the formulation formula). + + intercept_ : float or array-like of shape (n_targets,) + Independent term in decision function. + + n_iter_ : array-like or int + The number of iterations taken by lars_path to find the + grid of alphas for each target. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLarsCV: Cross-validated Lasso, using the LARS algorithm. + LassoLarsIC : Lasso model fit with Lars using BIC + or AIC for model selection. + sklearn.decomposition.sparse_encode : Sparse coding. + + Examples + -------- + >>> from sklearn import linear_model + >>> reg = linear_model.LassoLars(alpha=0.01) + >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1]) + LassoLars(alpha=0.01) + >>> print(reg.coef_) + [ 0. -0.955...] + """ + + _parameter_constraints: dict = { + **Lars._parameter_constraints, + "alpha": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "positive": ["boolean"], + } + _parameter_constraints.pop("n_nonzero_coefs") + + method = "lasso" + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + verbose=False, + precompute="auto", + max_iter=500, + eps=np.finfo(float).eps, + copy_X=True, + fit_path=True, + positive=False, + jitter=None, + random_state=None, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.verbose = verbose + self.positive = positive + self.precompute = precompute + self.copy_X = copy_X + self.eps = eps + self.fit_path = fit_path + self.jitter = jitter + self.random_state = random_state + + +############################################################################### +# Cross-validated estimator classes + + +def _check_copy_and_writeable(array, copy=False): + if copy or not array.flags.writeable: + return array.copy() + return array + + +def _lars_path_residues( + X_train, + y_train, + X_test, + y_test, + Gram=None, + copy=True, + method="lar", + verbose=False, + fit_intercept=True, + max_iter=500, + eps=np.finfo(float).eps, + positive=False, +): + """Compute the residues on left-out data for a full LARS path + + Parameters + ----------- + X_train : array-like of shape (n_samples, n_features) + The data to fit the LARS on + + y_train : array-like of shape (n_samples,) + The target variable to fit LARS on + + X_test : array-like of shape (n_samples, n_features) + The data to compute the residues on + + y_test : array-like of shape (n_samples,) + The target variable to compute the residues on + + Gram : None, 'auto' or array-like of shape (n_features, n_features), \ + default=None + Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram + matrix is precomputed from the given X, if there are more samples + than features + + copy : bool, default=True + Whether X_train, X_test, y_train and y_test should be copied; + if False, they may be overwritten. + + method : {'lar' , 'lasso'}, default='lar' + Specifies the returned model. Select ``'lar'`` for Least Angle + Regression, ``'lasso'`` for the Lasso. + + verbose : bool or int, default=False + Sets the amount of verbosity + + fit_intercept : bool, default=True + whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + See reservations for using this option in combination with method + 'lasso' for expected small values of alpha in the doc of LassoLarsCV + and LassoLarsIC. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + Returns + -------- + alphas : array-like of shape (n_alphas,) + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever + is smaller. + + active : list + Indices of active variables at the end of the path. + + coefs : array-like of shape (n_features, n_alphas) + Coefficients along the path + + residues : array-like of shape (n_alphas, n_samples) + Residues of the prediction on the test data + """ + X_train = _check_copy_and_writeable(X_train, copy) + y_train = _check_copy_and_writeable(y_train, copy) + X_test = _check_copy_and_writeable(X_test, copy) + y_test = _check_copy_and_writeable(y_test, copy) + + if fit_intercept: + X_mean = X_train.mean(axis=0) + X_train -= X_mean + X_test -= X_mean + y_mean = y_train.mean(axis=0) + y_train = as_float_array(y_train, copy=False) + y_train -= y_mean + y_test = as_float_array(y_test, copy=False) + y_test -= y_mean + + alphas, active, coefs = lars_path( + X_train, + y_train, + Gram=Gram, + copy_X=False, + copy_Gram=False, + method=method, + verbose=max(0, verbose - 1), + max_iter=max_iter, + eps=eps, + positive=positive, + ) + residues = np.dot(X_test, coefs) - y_test[:, np.newaxis] + return alphas, active, coefs, residues.T + + +class LarsCV(Lars): + """Cross-validated Least Angle Regression model. + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + precompute : bool, 'auto' or array-like , default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram matrix + cannot be passed as argument since we will use only subsets of X. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + max_n_alphas : int, default=1000 + The maximum number of points on the path used to compute the + residuals in the cross-validation. + + n_jobs : int or None, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + Attributes + ---------- + active_ : list of length n_alphas or list of such lists + Indices of active variables at the end of the path. + If this is a list of lists, the outer list length is `n_targets`. + + coef_ : array-like of shape (n_features,) + parameter vector (w in the formulation formula) + + intercept_ : float + independent term in decision function + + coef_path_ : array-like of shape (n_features, n_alphas) + the varying values of the coefficients along the path + + alpha_ : float + the estimated regularization parameter alpha + + alphas_ : array-like of shape (n_alphas,) + the different values of alpha along the path + + cv_alphas_ : array-like of shape (n_cv_alphas,) + all the values of alpha along the path for the different folds + + mse_path_ : array-like of shape (n_folds, n_cv_alphas) + the mean square error on left-out for each fold along the path + (alpha values given by ``cv_alphas``) + + n_iter_ : array-like or int + the number of iterations run by Lars with the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoLarsIC : Lasso model fit with Lars using BIC + or AIC for model selection. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> from sklearn.linear_model import LarsCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0) + >>> reg = LarsCV(cv=5).fit(X, y) + >>> reg.score(X, y) + 0.9996... + >>> reg.alpha_ + 0.2961... + >>> reg.predict(X[:1,]) + array([154.3996...]) + """ + + _parameter_constraints: dict = { + **Lars._parameter_constraints, + "max_iter": [Interval(Integral, 0, None, closed="left")], + "cv": ["cv_object"], + "max_n_alphas": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + } + + for parameter in ["n_nonzero_coefs", "jitter", "fit_path", "random_state"]: + _parameter_constraints.pop(parameter) + + method = "lar" + + def __init__( + self, + *, + fit_intercept=True, + verbose=False, + max_iter=500, + precompute="auto", + cv=None, + max_n_alphas=1000, + n_jobs=None, + eps=np.finfo(float).eps, + copy_X=True, + ): + self.max_iter = max_iter + self.cv = cv + self.max_n_alphas = max_n_alphas + self.n_jobs = n_jobs + super().__init__( + fit_intercept=fit_intercept, + verbose=verbose, + precompute=precompute, + n_nonzero_coefs=500, + eps=eps, + copy_X=copy_X, + fit_path=True, + ) + + def _more_tags(self): + return {"multioutput": False} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, **params): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of self. + """ + _raise_for_params(params, self, "fit") + + X, y = self._validate_data(X, y, y_numeric=True) + X = as_float_array(X, copy=self.copy_X) + y = as_float_array(y, copy=self.copy_X) + + # init cross-validation generator + cv = check_cv(self.cv, classifier=False) + + if _routing_enabled(): + routed_params = process_routing(self, "fit", **params) + else: + routed_params = Bunch(splitter=Bunch(split={})) + + # As we use cross-validation, the Gram matrix is not precomputed here + Gram = self.precompute + if hasattr(Gram, "__array__"): + warnings.warn( + 'Parameter "precompute" cannot be an array in ' + '%s. Automatically switch to "auto" instead.' + % self.__class__.__name__ + ) + Gram = "auto" + + cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(_lars_path_residues)( + X[train], + y[train], + X[test], + y[test], + Gram=Gram, + copy=False, + method=self.method, + verbose=max(0, self.verbose - 1), + fit_intercept=self.fit_intercept, + max_iter=self.max_iter, + eps=self.eps, + positive=self.positive, + ) + for train, test in cv.split(X, y, **routed_params.splitter.split) + ) + all_alphas = np.concatenate(list(zip(*cv_paths))[0]) + # Unique also sorts + all_alphas = np.unique(all_alphas) + # Take at most max_n_alphas values + stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas)))) + all_alphas = all_alphas[::stride] + + mse_path = np.empty((len(all_alphas), len(cv_paths))) + for index, (alphas, _, _, residues) in enumerate(cv_paths): + alphas = alphas[::-1] + residues = residues[::-1] + if alphas[0] != 0: + alphas = np.r_[0, alphas] + residues = np.r_[residues[0, np.newaxis], residues] + if alphas[-1] != all_alphas[-1]: + alphas = np.r_[alphas, all_alphas[-1]] + residues = np.r_[residues, residues[-1, np.newaxis]] + this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas) + this_residues **= 2 + mse_path[:, index] = np.mean(this_residues, axis=-1) + + mask = np.all(np.isfinite(mse_path), axis=-1) + all_alphas = all_alphas[mask] + mse_path = mse_path[mask] + # Select the alpha that minimizes left-out error + i_best_alpha = np.argmin(mse_path.mean(axis=-1)) + best_alpha = all_alphas[i_best_alpha] + + # Store our parameters + self.alpha_ = best_alpha + self.cv_alphas_ = all_alphas + self.mse_path_ = mse_path + + # Now compute the full model using best_alpha + # it will call a lasso internally when self if LassoLarsCV + # as self.method == 'lasso' + self._fit( + X, + y, + max_iter=self.max_iter, + alpha=best_alpha, + Xy=None, + fit_path=True, + ) + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + splitter=check_cv(self.cv), + method_mapping=MethodMapping().add(callee="split", caller="fit"), + ) + return router + + +class LassoLarsCV(LarsCV): + """Cross-validated Lasso, using the LARS algorithm. + + See glossary entry for :term:`cross-validation estimator`. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + precompute : bool or 'auto' , default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram matrix + cannot be passed as argument since we will use only subsets of X. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + max_n_alphas : int, default=1000 + The maximum number of points on the path used to compute the + residuals in the cross-validation. + + n_jobs : int or None, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + Under the positive restriction the model coefficients do not converge + to the ordinary-least-squares solution for small values of alpha. + Only coefficients up to the smallest alpha value (``alphas_[alphas_ > + 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso + algorithm are typically in congruence with the solution of the + coordinate descent Lasso estimator. + As a consequence using LassoLarsCV only makes sense for problems where + a sparse solution is expected and/or reached. + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + parameter vector (w in the formulation formula) + + intercept_ : float + independent term in decision function. + + coef_path_ : array-like of shape (n_features, n_alphas) + the varying values of the coefficients along the path + + alpha_ : float + the estimated regularization parameter alpha + + alphas_ : array-like of shape (n_alphas,) + the different values of alpha along the path + + cv_alphas_ : array-like of shape (n_cv_alphas,) + all the values of alpha along the path for the different folds + + mse_path_ : array-like of shape (n_folds, n_cv_alphas) + the mean square error on left-out for each fold along the path + (alpha values given by ``cv_alphas``) + + n_iter_ : array-like or int + the number of iterations run by Lars with the optimal alpha. + + active_ : list of int + Indices of active variables at the end of the path. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoLarsIC : Lasso model fit with Lars using BIC + or AIC for model selection. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + The object solves the same problem as the + :class:`~sklearn.linear_model.LassoCV` object. However, unlike the + :class:`~sklearn.linear_model.LassoCV`, it find the relevant alphas values + by itself. In general, because of this property, it will be more stable. + However, it is more fragile to heavily multicollinear datasets. + + It is more efficient than the :class:`~sklearn.linear_model.LassoCV` if + only a small number of features are selected compared to the total number, + for instance if there are very few samples compared to the number of + features. + + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> from sklearn.linear_model import LassoLarsCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(noise=4.0, random_state=0) + >>> reg = LassoLarsCV(cv=5).fit(X, y) + >>> reg.score(X, y) + 0.9993... + >>> reg.alpha_ + 0.3972... + >>> reg.predict(X[:1,]) + array([-78.4831...]) + """ + + _parameter_constraints = { + **LarsCV._parameter_constraints, + "positive": ["boolean"], + } + + method = "lasso" + + def __init__( + self, + *, + fit_intercept=True, + verbose=False, + max_iter=500, + precompute="auto", + cv=None, + max_n_alphas=1000, + n_jobs=None, + eps=np.finfo(float).eps, + copy_X=True, + positive=False, + ): + self.fit_intercept = fit_intercept + self.verbose = verbose + self.max_iter = max_iter + self.precompute = precompute + self.cv = cv + self.max_n_alphas = max_n_alphas + self.n_jobs = n_jobs + self.eps = eps + self.copy_X = copy_X + self.positive = positive + # XXX : we don't use super().__init__ + # to avoid setting n_nonzero_coefs + + +class LassoLarsIC(LassoLars): + """Lasso model fit with Lars using BIC or AIC for model selection. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + AIC is the Akaike information criterion [2]_ and BIC is the Bayes + Information criterion [3]_. Such criteria are useful to select the value + of the regularization parameter by making a trade-off between the + goodness of fit and the complexity of the model. A good model should + explain well the data while being simple. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {'aic', 'bic'}, default='aic' + The type of criterion to use. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + precompute : bool, 'auto' or array-like, default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=500 + Maximum number of iterations to perform. Can be used for + early stopping. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + Under the positive restriction the model coefficients do not converge + to the ordinary-least-squares solution for small values of alpha. + Only coefficients up to the smallest alpha value (``alphas_[alphas_ > + 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso + algorithm are typically in congruence with the solution of the + coordinate descent Lasso estimator. + As a consequence using LassoLarsIC only makes sense for problems where + a sparse solution is expected and/or reached. + + noise_variance : float, default=None + The estimated noise variance of the data. If `None`, an unbiased + estimate is computed by an OLS model. However, it is only possible + in the case where `n_samples > n_features + fit_intercept`. + + .. versionadded:: 1.1 + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + parameter vector (w in the formulation formula) + + intercept_ : float + independent term in decision function. + + alpha_ : float + the alpha parameter chosen by the information criterion + + alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. If a list, it will be of length `n_targets`. + + n_iter_ : int + number of iterations run by lars_path to find the grid of + alphas. + + criterion_ : array-like of shape (n_alphas,) + The value of the information criteria ('aic', 'bic') across all + alphas. The alpha which has the smallest information criterion is + chosen, as specified in [1]_. + + noise_variance_ : float + The estimated noise variance from the data used to compute the + criterion. + + .. versionadded:: 1.1 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoLarsCV: Cross-validated Lasso, using the LARS algorithm. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + The number of degrees of freedom is computed as in [1]_. + + To have more details regarding the mathematical formulation of the + AIC and BIC criteria, please refer to :ref:`User Guide `. + + References + ---------- + .. [1] :arxiv:`Zou, Hui, Trevor Hastie, and Robert Tibshirani. + "On the degrees of freedom of the lasso." + The Annals of Statistics 35.5 (2007): 2173-2192. + <0712.0881>` + + .. [2] `Wikipedia entry on the Akaike information criterion + `_ + + .. [3] `Wikipedia entry on the Bayesian information criterion + `_ + + Examples + -------- + >>> from sklearn import linear_model + >>> reg = linear_model.LassoLarsIC(criterion='bic') + >>> X = [[-2, 2], [-1, 1], [0, 0], [1, 1], [2, 2]] + >>> y = [-2.2222, -1.1111, 0, -1.1111, -2.2222] + >>> reg.fit(X, y) + LassoLarsIC(criterion='bic') + >>> print(reg.coef_) + [ 0. -1.11...] + """ + + _parameter_constraints: dict = { + **LassoLars._parameter_constraints, + "criterion": [StrOptions({"aic", "bic"})], + "noise_variance": [Interval(Real, 0, None, closed="left"), None], + } + + for parameter in ["jitter", "fit_path", "alpha", "random_state"]: + _parameter_constraints.pop(parameter) + + def __init__( + self, + criterion="aic", + *, + fit_intercept=True, + verbose=False, + precompute="auto", + max_iter=500, + eps=np.finfo(float).eps, + copy_X=True, + positive=False, + noise_variance=None, + ): + self.criterion = criterion + self.fit_intercept = fit_intercept + self.positive = positive + self.max_iter = max_iter + self.verbose = verbose + self.copy_X = copy_X + self.precompute = precompute + self.eps = eps + self.fit_path = True + self.noise_variance = noise_variance + + def _more_tags(self): + return {"multioutput": False} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, copy_X=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + copy_X : bool, default=None + If provided, this parameter will override the choice + of copy_X made at instance creation. + If ``True``, X will be copied; else, it may be overwritten. + + Returns + ------- + self : object + Returns an instance of self. + """ + if copy_X is None: + copy_X = self.copy_X + X, y = self._validate_data(X, y, y_numeric=True) + + X, y, Xmean, ymean, Xstd = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=copy_X + ) + + Gram = self.precompute + + alphas_, _, coef_path_, self.n_iter_ = lars_path( + X, + y, + Gram=Gram, + copy_X=copy_X, + copy_Gram=True, + alpha_min=0.0, + method="lasso", + verbose=self.verbose, + max_iter=self.max_iter, + eps=self.eps, + return_n_iter=True, + positive=self.positive, + ) + + n_samples = X.shape[0] + + if self.criterion == "aic": + criterion_factor = 2 + elif self.criterion == "bic": + criterion_factor = log(n_samples) + else: + raise ValueError( + f"criterion should be either bic or aic, got {self.criterion!r}" + ) + + residuals = y[:, np.newaxis] - np.dot(X, coef_path_) + residuals_sum_squares = np.sum(residuals**2, axis=0) + degrees_of_freedom = np.zeros(coef_path_.shape[1], dtype=int) + for k, coef in enumerate(coef_path_.T): + mask = np.abs(coef) > np.finfo(coef.dtype).eps + if not np.any(mask): + continue + # get the number of degrees of freedom equal to: + # Xc = X[:, mask] + # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs + degrees_of_freedom[k] = np.sum(mask) + + self.alphas_ = alphas_ + + if self.noise_variance is None: + self.noise_variance_ = self._estimate_noise_variance( + X, y, positive=self.positive + ) + else: + self.noise_variance_ = self.noise_variance + + self.criterion_ = ( + n_samples * np.log(2 * np.pi * self.noise_variance_) + + residuals_sum_squares / self.noise_variance_ + + criterion_factor * degrees_of_freedom + ) + n_best = np.argmin(self.criterion_) + + self.alpha_ = alphas_[n_best] + self.coef_ = coef_path_[:, n_best] + self._set_intercept(Xmean, ymean, Xstd) + return self + + def _estimate_noise_variance(self, X, y, positive): + """Compute an estimate of the variance with an OLS model. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data to be fitted by the OLS model. We expect the data to be + centered. + + y : ndarray of shape (n_samples,) + Associated target. + + positive : bool, default=False + Restrict coefficients to be >= 0. This should be inline with + the `positive` parameter from `LassoLarsIC`. + + Returns + ------- + noise_variance : float + An estimator of the noise variance of an OLS model. + """ + if X.shape[0] <= X.shape[1] + self.fit_intercept: + raise ValueError( + f"You are using {self.__class__.__name__} in the case where the number " + "of samples is smaller than the number of features. In this setting, " + "getting a good estimate for the variance of the noise is not " + "possible. Provide an estimate of the noise variance in the " + "constructor." + ) + # X and y are already centered and we don't need to fit with an intercept + ols_model = LinearRegression(positive=positive, fit_intercept=False) + y_pred = ols_model.fit(X, y).predict(X) + return np.sum((y - y_pred) ** 2) / ( + X.shape[0] - X.shape[1] - self.fit_intercept + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..4255706e284f176c0e0103f9871ce32b9bb2b132 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py @@ -0,0 +1,671 @@ +""" +Loss functions for linear models with raw_prediction = X @ coef +""" +import numpy as np +from scipy import sparse + +from ..utils.extmath import squared_norm + + +class LinearModelLoss: + """General class for loss functions with raw_prediction = X @ coef + intercept. + + Note that raw_prediction is also known as linear predictor. + + The loss is the average of per sample losses and includes a term for L2 + regularization:: + + loss = 1 / s_sum * sum_i s_i loss(y_i, X_i @ coef + intercept) + + 1/2 * l2_reg_strength * ||coef||_2^2 + + with sample weights s_i=1 if sample_weight=None and s_sum=sum_i s_i. + + Gradient and hessian, for simplicity without intercept, are:: + + gradient = 1 / s_sum * X.T @ loss.gradient + l2_reg_strength * coef + hessian = 1 / s_sum * X.T @ diag(loss.hessian) @ X + + l2_reg_strength * identity + + Conventions: + if fit_intercept: + n_dof = n_features + 1 + else: + n_dof = n_features + + if base_loss.is_multiclass: + coef.shape = (n_classes, n_dof) or ravelled (n_classes * n_dof,) + else: + coef.shape = (n_dof,) + + The intercept term is at the end of the coef array: + if base_loss.is_multiclass: + if coef.shape (n_classes, n_dof): + intercept = coef[:, -1] + if coef.shape (n_classes * n_dof,) + intercept = coef[n_features::n_dof] = coef[(n_dof-1)::n_dof] + intercept.shape = (n_classes,) + else: + intercept = coef[-1] + + Note: If coef has shape (n_classes * n_dof,), the 2d-array can be reconstructed as + + coef.reshape((n_classes, -1), order="F") + + The option order="F" makes coef[:, i] contiguous. This, in turn, makes the + coefficients without intercept, coef[:, :-1], contiguous and speeds up + matrix-vector computations. + + Note: If the average loss per sample is wanted instead of the sum of the loss per + sample, one can simply use a rescaled sample_weight such that + sum(sample_weight) = 1. + + Parameters + ---------- + base_loss : instance of class BaseLoss from sklearn._loss. + fit_intercept : bool + """ + + def __init__(self, base_loss, fit_intercept): + self.base_loss = base_loss + self.fit_intercept = fit_intercept + + def init_zero_coef(self, X, dtype=None): + """Allocate coef of correct shape with zeros. + + Parameters: + ----------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + dtype : data-type, default=None + Overrides the data type of coef. With dtype=None, coef will have the same + dtype as X. + + Returns + ------- + coef : ndarray of shape (n_dof,) or (n_classes, n_dof) + Coefficients of a linear model. + """ + n_features = X.shape[1] + n_classes = self.base_loss.n_classes + if self.fit_intercept: + n_dof = n_features + 1 + else: + n_dof = n_features + if self.base_loss.is_multiclass: + coef = np.zeros_like(X, shape=(n_classes, n_dof), dtype=dtype, order="F") + else: + coef = np.zeros_like(X, shape=n_dof, dtype=dtype) + return coef + + def weight_intercept(self, coef): + """Helper function to get coefficients and intercept. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + + Returns + ------- + weights : ndarray of shape (n_features,) or (n_classes, n_features) + Coefficients without intercept term. + intercept : float or ndarray of shape (n_classes,) + Intercept terms. + """ + if not self.base_loss.is_multiclass: + if self.fit_intercept: + intercept = coef[-1] + weights = coef[:-1] + else: + intercept = 0.0 + weights = coef + else: + # reshape to (n_classes, n_dof) + if coef.ndim == 1: + weights = coef.reshape((self.base_loss.n_classes, -1), order="F") + else: + weights = coef + if self.fit_intercept: + intercept = weights[:, -1] + weights = weights[:, :-1] + else: + intercept = 0.0 + + return weights, intercept + + def weight_intercept_raw(self, coef, X): + """Helper function to get coefficients, intercept and raw_prediction. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + Returns + ------- + weights : ndarray of shape (n_features,) or (n_classes, n_features) + Coefficients without intercept term. + intercept : float or ndarray of shape (n_classes,) + Intercept terms. + raw_prediction : ndarray of shape (n_samples,) or \ + (n_samples, n_classes) + """ + weights, intercept = self.weight_intercept(coef) + + if not self.base_loss.is_multiclass: + raw_prediction = X @ weights + intercept + else: + # weights has shape (n_classes, n_dof) + raw_prediction = X @ weights.T + intercept # ndarray, likely C-contiguous + + return weights, intercept, raw_prediction + + def l2_penalty(self, weights, l2_reg_strength): + """Compute L2 penalty term l2_reg_strength/2 *||w||_2^2.""" + norm2_w = weights @ weights if weights.ndim == 1 else squared_norm(weights) + return 0.5 * l2_reg_strength * norm2_w + + def loss( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + raw_prediction=None, + ): + """Compute the loss as weighted average over point-wise losses. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + loss : float + Weighted average of losses per sample, plus penalty. + """ + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + loss = self.base_loss.loss( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=None, + n_threads=n_threads, + ) + loss = np.average(loss, weights=sample_weight) + + return loss + self.l2_penalty(weights, l2_reg_strength) + + def loss_gradient( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + raw_prediction=None, + ): + """Computes the sum of loss and gradient w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + loss : float + Weighted average of losses per sample, plus penalty. + + gradient : ndarray of shape coef.shape + The gradient of the loss. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + loss, grad_pointwise = self.base_loss.loss_gradient( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + loss = loss.sum() / sw_sum + loss += self.l2_penalty(weights, l2_reg_strength) + + grad_pointwise /= sw_sum + + if not self.base_loss.is_multiclass: + grad = np.empty_like(coef, dtype=weights.dtype) + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + else: + grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + # grad_pointwise.shape = (n_samples, n_classes) + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + if coef.ndim == 1: + grad = grad.ravel(order="F") + + return loss, grad + + def gradient( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + raw_prediction=None, + ): + """Computes the gradient w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + gradient : ndarray of shape coef.shape + The gradient of the loss. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + grad_pointwise = self.base_loss.gradient( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + grad_pointwise /= sw_sum + + if not self.base_loss.is_multiclass: + grad = np.empty_like(coef, dtype=weights.dtype) + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + return grad + else: + grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + # gradient.shape = (n_samples, n_classes) + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + if coef.ndim == 1: + return grad.ravel(order="F") + else: + return grad + + def gradient_hessian( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + gradient_out=None, + hessian_out=None, + raw_prediction=None, + ): + """Computes gradient and hessian w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + gradient_out : None or ndarray of shape coef.shape + A location into which the gradient is stored. If None, a new array + might be created. + hessian_out : None or ndarray + A location into which the hessian is stored. If None, a new array + might be created. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + gradient : ndarray of shape coef.shape + The gradient of the loss. + + hessian : ndarray + Hessian matrix. + + hessian_warning : bool + True if pointwise hessian has more than half of its elements non-positive. + """ + n_samples, n_features = X.shape + n_dof = n_features + int(self.fit_intercept) + + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + grad_pointwise /= sw_sum + hess_pointwise /= sw_sum + + # For non-canonical link functions and far away from the optimum, the pointwise + # hessian can be negative. We take care that 75% of the hessian entries are + # positive. + hessian_warning = np.mean(hess_pointwise <= 0) > 0.25 + hess_pointwise = np.abs(hess_pointwise) + + if not self.base_loss.is_multiclass: + # gradient + if gradient_out is None: + grad = np.empty_like(coef, dtype=weights.dtype) + else: + grad = gradient_out + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + + # hessian + if hessian_out is None: + hess = np.empty(shape=(n_dof, n_dof), dtype=weights.dtype) + else: + hess = hessian_out + + if hessian_warning: + # Exit early without computing the hessian. + return grad, hess, hessian_warning + + # TODO: This "sandwich product", X' diag(W) X, is the main computational + # bottleneck for solvers. A dedicated Cython routine might improve it + # exploiting the symmetry (as opposed to, e.g., BLAS gemm). + if sparse.issparse(X): + hess[:n_features, :n_features] = ( + X.T + @ sparse.dia_matrix( + (hess_pointwise, 0), shape=(n_samples, n_samples) + ) + @ X + ).toarray() + else: + # np.einsum may use less memory but the following, using BLAS matrix + # multiplication (gemm), is by far faster. + WX = hess_pointwise[:, None] * X + hess[:n_features, :n_features] = np.dot(X.T, WX) + + if l2_reg_strength > 0: + # The L2 penalty enters the Hessian on the diagonal only. To add those + # terms, we use a flattened view on the array. + hess.reshape(-1)[ + : (n_features * n_dof) : (n_dof + 1) + ] += l2_reg_strength + + if self.fit_intercept: + # With intercept included as added column to X, the hessian becomes + # hess = (X, 1)' @ diag(h) @ (X, 1) + # = (X' @ diag(h) @ X, X' @ h) + # ( h @ X, sum(h)) + # The left upper part has already been filled, it remains to compute + # the last row and the last column. + Xh = X.T @ hess_pointwise + hess[:-1, -1] = Xh + hess[-1, :-1] = Xh + hess[-1, -1] = hess_pointwise.sum() + else: + # Here we may safely assume HalfMultinomialLoss aka categorical + # cross-entropy. + raise NotImplementedError + + return grad, hess, hessian_warning + + def gradient_hessian_product( + self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1 + ): + """Computes gradient and hessp (hessian product function) w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + + Returns + ------- + gradient : ndarray of shape coef.shape + The gradient of the loss. + + hessp : callable + Function that takes in a vector input of shape of gradient and + and returns matrix-vector product with hessian. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + + if not self.base_loss.is_multiclass: + grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + grad_pointwise /= sw_sum + hess_pointwise /= sw_sum + grad = np.empty_like(coef, dtype=weights.dtype) + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + + # Precompute as much as possible: hX, hX_sum and hessian_sum + hessian_sum = hess_pointwise.sum() + if sparse.issparse(X): + hX = ( + sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples)) + @ X + ) + else: + hX = hess_pointwise[:, np.newaxis] * X + + if self.fit_intercept: + # Calculate the double derivative with respect to intercept. + # Note: In case hX is sparse, hX.sum is a matrix object. + hX_sum = np.squeeze(np.asarray(hX.sum(axis=0))) + # prevent squeezing to zero-dim array if n_features == 1 + hX_sum = np.atleast_1d(hX_sum) + + # With intercept included and l2_reg_strength = 0, hessp returns + # res = (X, 1)' @ diag(h) @ (X, 1) @ s + # = (X, 1)' @ (hX @ s[:n_features], sum(h) * s[-1]) + # res[:n_features] = X' @ hX @ s[:n_features] + sum(h) * s[-1] + # res[-1] = 1' @ hX @ s[:n_features] + sum(h) * s[-1] + def hessp(s): + ret = np.empty_like(s) + if sparse.issparse(X): + ret[:n_features] = X.T @ (hX @ s[:n_features]) + else: + ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]]) + ret[:n_features] += l2_reg_strength * s[:n_features] + + if self.fit_intercept: + ret[:n_features] += s[-1] * hX_sum + ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1] + return ret + + else: + # Here we may safely assume HalfMultinomialLoss aka categorical + # cross-entropy. + # HalfMultinomialLoss computes only the diagonal part of the hessian, i.e. + # diagonal in the classes. Here, we want the matrix-vector product of the + # full hessian. Therefore, we call gradient_proba. + grad_pointwise, proba = self.base_loss.gradient_proba( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + grad_pointwise /= sw_sum + grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + + # Full hessian-vector product, i.e. not only the diagonal part of the + # hessian. Derivation with some index battle for input vector s: + # - sample index i + # - feature indices j, m + # - class indices k, l + # - 1_{k=l} is one if k=l else 0 + # - p_i_k is the (predicted) probability that sample i belongs to class k + # for all i: sum_k p_i_k = 1 + # - s_l_m is input vector for class l and feature m + # - X' = X transposed + # + # Note: Hessian with dropping most indices is just: + # X' @ p_k (1(k=l) - p_l) @ X + # + # result_{k j} = sum_{i, l, m} Hessian_{i, k j, m l} * s_l_m + # = sum_{i, l, m} (X')_{ji} * p_i_k * (1_{k=l} - p_i_l) + # * X_{im} s_l_m + # = sum_{i, m} (X')_{ji} * p_i_k + # * (X_{im} * s_k_m - sum_l p_i_l * X_{im} * s_l_m) + # + # See also https://github.com/scikit-learn/scikit-learn/pull/3646#discussion_r17461411 # noqa + def hessp(s): + s = s.reshape((n_classes, -1), order="F") # shape = (n_classes, n_dof) + if self.fit_intercept: + s_intercept = s[:, -1] + s = s[:, :-1] # shape = (n_classes, n_features) + else: + s_intercept = 0 + tmp = X @ s.T + s_intercept # X_{im} * s_k_m + tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis] # - sum_l .. + tmp *= proba # * p_i_k + if sample_weight is not None: + tmp *= sample_weight[:, np.newaxis] + # hess_prod = empty_like(grad), but we ravel grad below and this + # function is run after that. + hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + hess_prod[:, :n_features] = (tmp.T @ X) / sw_sum + l2_reg_strength * s + if self.fit_intercept: + hess_prod[:, -1] = tmp.sum(axis=0) / sw_sum + if coef.ndim == 1: + return hess_prod.ravel(order="F") + else: + return hess_prod + + if coef.ndim == 1: + return grad.ravel(order="F"), hessp + + return grad, hessp diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py new file mode 100644 index 0000000000000000000000000000000000000000..188204ce815ad05737dbd3302a761c76f14e1225 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py @@ -0,0 +1,2190 @@ +""" +Logistic Regression +""" + +# Author: Gael Varoquaux +# Fabian Pedregosa +# Alexandre Gramfort +# Manoj Kumar +# Lars Buitinck +# Simon Wu +# Arthur Mensch 2 and a solver that supports it, returns "multinomial". + For all other cases, in particular binary classification, return "ovr". + """ + if multi_class == "auto": + if solver in ("liblinear", "newton-cholesky"): + multi_class = "ovr" + elif n_classes > 2: + multi_class = "multinomial" + else: + multi_class = "ovr" + if multi_class == "multinomial" and solver in ("liblinear", "newton-cholesky"): + raise ValueError("Solver %s does not support a multinomial backend." % solver) + return multi_class + + +def _logistic_regression_path( + X, + y, + pos_class=None, + Cs=10, + fit_intercept=True, + max_iter=100, + tol=1e-4, + verbose=0, + solver="lbfgs", + coef=None, + class_weight=None, + dual=False, + penalty="l2", + intercept_scaling=1.0, + multi_class="auto", + random_state=None, + check_input=True, + max_squared_sum=None, + sample_weight=None, + l1_ratio=None, + n_threads=1, +): + """Compute a Logistic Regression model for a list of regularization + parameters. + + This is an implementation that uses the result of the previous model + to speed up computations along the set of solutions, making it faster + than sequentially calling LogisticRegression for the different parameters. + Note that there will be no speedup with liblinear solver, since it does + not handle warm-starting. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Input data, target values. + + pos_class : int, default=None + The class with respect to which we perform a one-vs-all fit. + If None, then it is assumed that the given problem is binary. + + Cs : int or array-like of shape (n_cs,), default=10 + List of values for the regularization parameter or integer specifying + the number of regularization parameters that should be used. In this + case, the parameters will be chosen in a logarithmic scale between + 1e-4 and 1e4. + + fit_intercept : bool, default=True + Whether to fit an intercept for the model. In this case the shape of + the returned array is (n_cs, n_features + 1). + + max_iter : int, default=100 + Maximum number of iterations for the solver. + + tol : float, default=1e-4 + Stopping criterion. For the newton-cg and lbfgs solvers, the iteration + will stop when ``max{|g_i | i = 1, ..., n} <= tol`` + where ``g_i`` is the i-th component of the gradient. + + verbose : int, default=0 + For the liblinear and lbfgs solvers set verbose to any positive + number for verbosity. + + solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \ + default='lbfgs' + Numerical solver to use. + + coef : array-like of shape (n_features,), default=None + Initialization value for coefficients of logistic regression. + Useless for liblinear solver. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + dual : bool, default=False + Dual or primal formulation. Dual formulation is only implemented for + l2 penalty with liblinear solver. Prefer dual=False when + n_samples > n_features. + + penalty : {'l1', 'l2', 'elasticnet'}, default='l2' + Used to specify the norm used in the penalization. The 'newton-cg', + 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is + only supported by the 'saga' solver. + + intercept_scaling : float, default=1. + Useful only when the solver 'liblinear' is used + and self.fit_intercept is set to True. In this case, x becomes + [x, self.intercept_scaling], + i.e. a "synthetic" feature with constant value equal to + intercept_scaling is appended to the instance vector. + The intercept becomes ``intercept_scaling * synthetic_feature_weight``. + + Note! the synthetic feature weight is subject to l1/l2 regularization + as all other features. + To lessen the effect of regularization on synthetic feature weight + (and therefore on the intercept) intercept_scaling has to be increased. + + multi_class : {'ovr', 'multinomial', 'auto'}, default='auto' + If the option chosen is 'ovr', then a binary problem is fit for each + label. For 'multinomial' the loss minimised is the multinomial loss fit + across the entire probability distribution, *even when the data is + binary*. 'multinomial' is unavailable when solver='liblinear'. + 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', + and otherwise selects 'multinomial'. + + .. versionadded:: 0.18 + Stochastic Average Gradient descent solver for 'multinomial' case. + .. versionchanged:: 0.22 + Default changed from 'ovr' to 'auto' in 0.22. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the + data. See :term:`Glossary ` for details. + + check_input : bool, default=True + If False, the input arrays X and y will not be checked. + + max_squared_sum : float, default=None + Maximum squared sum of X over samples. Used only in SAG solver. + If None, it will be computed, going through all the samples. + The value should be precomputed to speed up cross validation. + + sample_weight : array-like of shape(n_samples,), default=None + Array of weights that are assigned to individual samples. + If not provided, then each sample is given unit weight. + + l1_ratio : float, default=None + The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only + used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent + to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent + to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a + combination of L1 and L2. + + n_threads : int, default=1 + Number of OpenMP threads to use. + + Returns + ------- + coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1) + List of coefficients for the Logistic Regression model. If + fit_intercept is set to True then the second dimension will be + n_features + 1, where the last item represents the intercept. For + ``multiclass='multinomial'``, the shape is (n_classes, n_cs, + n_features) or (n_classes, n_cs, n_features + 1). + + Cs : ndarray + Grid of Cs used for cross-validation. + + n_iter : array of shape (n_cs,) + Actual number of iteration for each Cs. + + Notes + ----- + You might get slightly different results with the solver liblinear than + with the others since this uses LIBLINEAR which penalizes the intercept. + + .. versionchanged:: 0.19 + The "copy" parameter was removed. + """ + if isinstance(Cs, numbers.Integral): + Cs = np.logspace(-4, 4, Cs) + + solver = _check_solver(solver, penalty, dual) + + # Preprocessing. + if check_input: + X = check_array( + X, + accept_sparse="csr", + dtype=np.float64, + accept_large_sparse=solver not in ["liblinear", "sag", "saga"], + ) + y = check_array(y, ensure_2d=False, dtype=None) + check_consistent_length(X, y) + n_samples, n_features = X.shape + + classes = np.unique(y) + random_state = check_random_state(random_state) + + multi_class = _check_multi_class(multi_class, solver, len(classes)) + if pos_class is None and multi_class != "multinomial": + if classes.size > 2: + raise ValueError("To fit OvR, use the pos_class argument") + # np.unique(y) gives labels in sorted order. + pos_class = classes[1] + + if sample_weight is not None or class_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True) + + # If class_weights is a dict (provided by the user), the weights + # are assigned to the original labels. If it is "balanced", then + # the class_weights are assigned after masking the labels with a OvR. + le = LabelEncoder() + if isinstance(class_weight, dict) or ( + multi_class == "multinomial" and class_weight is not None + ): + class_weight_ = compute_class_weight(class_weight, classes=classes, y=y) + sample_weight *= class_weight_[le.fit_transform(y)] + + # For doing a ovr, we need to mask the labels first. For the + # multinomial case this is not necessary. + if multi_class == "ovr": + w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype) + mask = y == pos_class + y_bin = np.ones(y.shape, dtype=X.dtype) + if solver in ["lbfgs", "newton-cg", "newton-cholesky"]: + # HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead + # of in [-1, 1]. + mask_classes = np.array([0, 1]) + y_bin[~mask] = 0.0 + else: + mask_classes = np.array([-1, 1]) + y_bin[~mask] = -1.0 + + # for compute_class_weight + if class_weight == "balanced": + class_weight_ = compute_class_weight( + class_weight, classes=mask_classes, y=y_bin + ) + sample_weight *= class_weight_[le.fit_transform(y_bin)] + + else: + if solver in ["sag", "saga", "lbfgs", "newton-cg"]: + # SAG, lbfgs and newton-cg multinomial solvers need LabelEncoder, + # not LabelBinarizer, i.e. y as a 1d-array of integers. + # LabelEncoder also saves memory compared to LabelBinarizer, especially + # when n_classes is large. + le = LabelEncoder() + Y_multi = le.fit_transform(y).astype(X.dtype, copy=False) + else: + # For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded. + lbin = LabelBinarizer() + Y_multi = lbin.fit_transform(y) + if Y_multi.shape[1] == 1: + Y_multi = np.hstack([1 - Y_multi, Y_multi]) + + w0 = np.zeros( + (classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype + ) + + # IMPORTANT NOTE: + # All solvers relying on LinearModelLoss need to scale the penalty with n_samples + # or the sum of sample weights because the implemented logistic regression + # objective here is (unfortunately) + # C * sum(pointwise_loss) + penalty + # instead of (as LinearModelLoss does) + # mean(pointwise_loss) + 1/C * penalty + if solver in ["lbfgs", "newton-cg", "newton-cholesky"]: + # This needs to be calculated after sample_weight is multiplied by + # class_weight. It is even tested that passing class_weight is equivalent to + # passing sample_weights according to class_weight. + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + + if coef is not None: + # it must work both giving the bias term and not + if multi_class == "ovr": + if coef.size not in (n_features, w0.size): + raise ValueError( + "Initialization coef is of shape %d, expected shape %d or %d" + % (coef.size, n_features, w0.size) + ) + w0[: coef.size] = coef + else: + # For binary problems coef.shape[0] should be 1, otherwise it + # should be classes.size. + n_classes = classes.size + if n_classes == 2: + n_classes = 1 + + if coef.shape[0] != n_classes or coef.shape[1] not in ( + n_features, + n_features + 1, + ): + raise ValueError( + "Initialization coef is of shape (%d, %d), expected " + "shape (%d, %d) or (%d, %d)" + % ( + coef.shape[0], + coef.shape[1], + classes.size, + n_features, + classes.size, + n_features + 1, + ) + ) + + if n_classes == 1: + w0[0, : coef.shape[1]] = -coef + w0[1, : coef.shape[1]] = coef + else: + w0[:, : coef.shape[1]] = coef + + if multi_class == "multinomial": + if solver in ["lbfgs", "newton-cg"]: + # scipy.optimize.minimize and newton-cg accept only ravelled parameters, + # i.e. 1d-arrays. LinearModelLoss expects classes to be contiguous and + # reconstructs the 2d-array via w0.reshape((n_classes, -1), order="F"). + # As w0 is F-contiguous, ravel(order="F") also avoids a copy. + w0 = w0.ravel(order="F") + loss = LinearModelLoss( + base_loss=HalfMultinomialLoss(n_classes=classes.size), + fit_intercept=fit_intercept, + ) + target = Y_multi + if solver == "lbfgs": + func = loss.loss_gradient + elif solver == "newton-cg": + func = loss.loss + grad = loss.gradient + hess = loss.gradient_hessian_product # hess = [gradient, hessp] + warm_start_sag = {"coef": w0.T} + else: + target = y_bin + if solver == "lbfgs": + loss = LinearModelLoss( + base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept + ) + func = loss.loss_gradient + elif solver == "newton-cg": + loss = LinearModelLoss( + base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept + ) + func = loss.loss + grad = loss.gradient + hess = loss.gradient_hessian_product # hess = [gradient, hessp] + elif solver == "newton-cholesky": + loss = LinearModelLoss( + base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept + ) + warm_start_sag = {"coef": np.expand_dims(w0, axis=1)} + + coefs = list() + n_iter = np.zeros(len(Cs), dtype=np.int32) + for i, C in enumerate(Cs): + if solver == "lbfgs": + l2_reg_strength = 1.0 / (C * sw_sum) + iprint = [-1, 50, 1, 100, 101][ + np.searchsorted(np.array([0, 1, 2, 3]), verbose) + ] + opt_res = optimize.minimize( + func, + w0, + method="L-BFGS-B", + jac=True, + args=(X, target, sample_weight, l2_reg_strength, n_threads), + options={ + "maxiter": max_iter, + "maxls": 50, # default is 20 + "iprint": iprint, + "gtol": tol, + "ftol": 64 * np.finfo(float).eps, + }, + ) + n_iter_i = _check_optimize_result( + solver, + opt_res, + max_iter, + extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG, + ) + w0, loss = opt_res.x, opt_res.fun + elif solver == "newton-cg": + l2_reg_strength = 1.0 / (C * sw_sum) + args = (X, target, sample_weight, l2_reg_strength, n_threads) + w0, n_iter_i = _newton_cg( + hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol + ) + elif solver == "newton-cholesky": + l2_reg_strength = 1.0 / (C * sw_sum) + sol = NewtonCholeskySolver( + coef=w0, + linear_loss=loss, + l2_reg_strength=l2_reg_strength, + tol=tol, + max_iter=max_iter, + n_threads=n_threads, + verbose=verbose, + ) + w0 = sol.solve(X=X, y=target, sample_weight=sample_weight) + n_iter_i = sol.iteration + elif solver == "liblinear": + ( + coef_, + intercept_, + n_iter_i, + ) = _fit_liblinear( + X, + target, + C, + fit_intercept, + intercept_scaling, + None, + penalty, + dual, + verbose, + max_iter, + tol, + random_state, + sample_weight=sample_weight, + ) + if fit_intercept: + w0 = np.concatenate([coef_.ravel(), intercept_]) + else: + w0 = coef_.ravel() + # n_iter_i is an array for each class. However, `target` is always encoded + # in {-1, 1}, so we only take the first element of n_iter_i. + n_iter_i = n_iter_i.item() + + elif solver in ["sag", "saga"]: + if multi_class == "multinomial": + target = target.astype(X.dtype, copy=False) + loss = "multinomial" + else: + loss = "log" + # alpha is for L2-norm, beta is for L1-norm + if penalty == "l1": + alpha = 0.0 + beta = 1.0 / C + elif penalty == "l2": + alpha = 1.0 / C + beta = 0.0 + else: # Elastic-Net penalty + alpha = (1.0 / C) * (1 - l1_ratio) + beta = (1.0 / C) * l1_ratio + + w0, n_iter_i, warm_start_sag = sag_solver( + X, + target, + sample_weight, + loss, + alpha, + beta, + max_iter, + tol, + verbose, + random_state, + False, + max_squared_sum, + warm_start_sag, + is_saga=(solver == "saga"), + ) + + else: + raise ValueError( + "solver must be one of {'liblinear', 'lbfgs', " + "'newton-cg', 'sag'}, got '%s' instead" % solver + ) + + if multi_class == "multinomial": + n_classes = max(2, classes.size) + if solver in ["lbfgs", "newton-cg"]: + multi_w0 = np.reshape(w0, (n_classes, -1), order="F") + else: + multi_w0 = w0 + if n_classes == 2: + multi_w0 = multi_w0[1][np.newaxis, :] + coefs.append(multi_w0.copy()) + else: + coefs.append(w0.copy()) + + n_iter[i] = n_iter_i + + return np.array(coefs), np.array(Cs), n_iter + + +# helper function for LogisticCV +def _log_reg_scoring_path( + X, + y, + train, + test, + *, + pos_class, + Cs, + scoring, + fit_intercept, + max_iter, + tol, + class_weight, + verbose, + solver, + penalty, + dual, + intercept_scaling, + multi_class, + random_state, + max_squared_sum, + sample_weight, + l1_ratio, + score_params, +): + """Computes scores across logistic_regression_path + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target labels. + + train : list of indices + The indices of the train set. + + test : list of indices + The indices of the test set. + + pos_class : int + The class with respect to which we perform a one-vs-all fit. + If None, then it is assumed that the given problem is binary. + + Cs : int or list of floats + Each of the values in Cs describes the inverse of + regularization strength. If Cs is as an int, then a grid of Cs + values are chosen in a logarithmic scale between 1e-4 and 1e4. + + scoring : callable + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. For a list of scoring functions + that can be used, look at :mod:`sklearn.metrics`. + + fit_intercept : bool + If False, then the bias term is set to zero. Else the last + term of each coef_ gives us the intercept. + + max_iter : int + Maximum number of iterations for the solver. + + tol : float + Tolerance for stopping criteria. + + class_weight : dict or 'balanced' + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + verbose : int + For the liblinear and lbfgs solvers set verbose to any positive + number for verbosity. + + solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'} + Decides which solver to use. + + penalty : {'l1', 'l2', 'elasticnet'} + Used to specify the norm used in the penalization. The 'newton-cg', + 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is + only supported by the 'saga' solver. + + dual : bool + Dual or primal formulation. Dual formulation is only implemented for + l2 penalty with liblinear solver. Prefer dual=False when + n_samples > n_features. + + intercept_scaling : float + Useful only when the solver 'liblinear' is used + and self.fit_intercept is set to True. In this case, x becomes + [x, self.intercept_scaling], + i.e. a "synthetic" feature with constant value equals to + intercept_scaling is appended to the instance vector. + The intercept becomes intercept_scaling * synthetic feature weight + Note! the synthetic feature weight is subject to l1/l2 regularization + as all other features. + To lessen the effect of regularization on synthetic feature weight + (and therefore on the intercept) intercept_scaling has to be increased. + + multi_class : {'auto', 'ovr', 'multinomial'} + If the option chosen is 'ovr', then a binary problem is fit for each + label. For 'multinomial' the loss minimised is the multinomial loss fit + across the entire probability distribution, *even when the data is + binary*. 'multinomial' is unavailable when solver='liblinear'. + + random_state : int, RandomState instance + Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the + data. See :term:`Glossary ` for details. + + max_squared_sum : float + Maximum squared sum of X over samples. Used only in SAG solver. + If None, it will be computed, going through all the samples. + The value should be precomputed to speed up cross validation. + + sample_weight : array-like of shape(n_samples,) + Array of weights that are assigned to individual samples. + If not provided, then each sample is given unit weight. + + l1_ratio : float + The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only + used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent + to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent + to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a + combination of L1 and L2. + + score_params : dict + Parameters to pass to the `score` method of the underlying scorer. + + Returns + ------- + coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1) + List of coefficients for the Logistic Regression model. If + fit_intercept is set to True then the second dimension will be + n_features + 1, where the last item represents the intercept. + + Cs : ndarray + Grid of Cs used for cross-validation. + + scores : ndarray of shape (n_cs,) + Scores obtained for each Cs. + + n_iter : ndarray of shape(n_cs,) + Actual number of iteration for each Cs. + """ + X_train = X[train] + X_test = X[test] + y_train = y[train] + y_test = y[test] + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + sample_weight = sample_weight[train] + + coefs, Cs, n_iter = _logistic_regression_path( + X_train, + y_train, + Cs=Cs, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + class_weight=class_weight, + pos_class=pos_class, + multi_class=multi_class, + tol=tol, + verbose=verbose, + dual=dual, + penalty=penalty, + intercept_scaling=intercept_scaling, + random_state=random_state, + check_input=False, + max_squared_sum=max_squared_sum, + sample_weight=sample_weight, + ) + + log_reg = LogisticRegression(solver=solver, multi_class=multi_class) + + # The score method of Logistic Regression has a classes_ attribute. + if multi_class == "ovr": + log_reg.classes_ = np.array([-1, 1]) + elif multi_class == "multinomial": + log_reg.classes_ = np.unique(y_train) + else: + raise ValueError( + "multi_class should be either multinomial or ovr, got %d" % multi_class + ) + + if pos_class is not None: + mask = y_test == pos_class + y_test = np.ones(y_test.shape, dtype=np.float64) + y_test[~mask] = -1.0 + + scores = list() + + scoring = get_scorer(scoring) + for w in coefs: + if multi_class == "ovr": + w = w[np.newaxis, :] + if fit_intercept: + log_reg.coef_ = w[:, :-1] + log_reg.intercept_ = w[:, -1] + else: + log_reg.coef_ = w + log_reg.intercept_ = 0.0 + + if scoring is None: + scores.append(log_reg.score(X_test, y_test)) + else: + score_params = score_params or {} + score_params = _check_method_params(X=X, params=score_params, indices=test) + scores.append(scoring(log_reg, X_test, y_test, **score_params)) + + return coefs, Cs, np.array(scores), n_iter + + +class LogisticRegression(LinearClassifierMixin, SparseCoefMixin, BaseEstimator): + """ + Logistic Regression (aka logit, MaxEnt) classifier. + + In the multiclass case, the training algorithm uses the one-vs-rest (OvR) + scheme if the 'multi_class' option is set to 'ovr', and uses the + cross-entropy loss if the 'multi_class' option is set to 'multinomial'. + (Currently the 'multinomial' option is supported only by the 'lbfgs', + 'sag', 'saga' and 'newton-cg' solvers.) + + This class implements regularized logistic regression using the + 'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note + that regularization is applied by default**. It can handle both dense + and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit + floats for optimal performance; any other input format will be converted + (and copied). + + The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization + with primal formulation, or no regularization. The 'liblinear' solver + supports both L1 and L2 regularization, with a dual formulation only for + the L2 penalty. The Elastic-Net regularization is only supported by the + 'saga' solver. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + penalty : {'l1', 'l2', 'elasticnet', None}, default='l2' + Specify the norm of the penalty: + + - `None`: no penalty is added; + - `'l2'`: add a L2 penalty term and it is the default choice; + - `'l1'`: add a L1 penalty term; + - `'elasticnet'`: both L1 and L2 penalty terms are added. + + .. warning:: + Some penalties may not work with some solvers. See the parameter + `solver` below, to know the compatibility between the penalty and + solver. + + .. versionadded:: 0.19 + l1 penalty with SAGA solver (allowing 'multinomial' + L1) + + dual : bool, default=False + Dual (constrained) or primal (regularized, see also + :ref:`this equation `) formulation. Dual formulation + is only implemented for l2 penalty with liblinear solver. Prefer dual=False when + n_samples > n_features. + + tol : float, default=1e-4 + Tolerance for stopping criteria. + + C : float, default=1.0 + Inverse of regularization strength; must be a positive float. + Like in support vector machines, smaller values specify stronger + regularization. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the decision function. + + intercept_scaling : float, default=1 + Useful only when the solver 'liblinear' is used + and self.fit_intercept is set to True. In this case, x becomes + [x, self.intercept_scaling], + i.e. a "synthetic" feature with constant value equal to + intercept_scaling is appended to the instance vector. + The intercept becomes ``intercept_scaling * synthetic_feature_weight``. + + Note! the synthetic feature weight is subject to l1/l2 regularization + as all other features. + To lessen the effect of regularization on synthetic feature weight + (and therefore on the intercept) intercept_scaling has to be increased. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + .. versionadded:: 0.17 + *class_weight='balanced'* + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the + data. See :term:`Glossary ` for details. + + solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \ + default='lbfgs' + + Algorithm to use in the optimization problem. Default is 'lbfgs'. + To choose a solver, you might want to consider the following aspects: + + - For small datasets, 'liblinear' is a good choice, whereas 'sag' + and 'saga' are faster for large ones; + - For multiclass problems, only 'newton-cg', 'sag', 'saga' and + 'lbfgs' handle multinomial loss; + - 'liblinear' is limited to one-versus-rest schemes. + - 'newton-cholesky' is a good choice for `n_samples` >> `n_features`, + especially with one-hot encoded categorical features with rare + categories. Note that it is limited to binary classification and the + one-versus-rest reduction for multiclass classification. Be aware that + the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. warning:: + The choice of the algorithm depends on the penalty chosen. + Supported penalties by solver: + + - 'lbfgs' - ['l2', None] + - 'liblinear' - ['l1', 'l2'] + - 'newton-cg' - ['l2', None] + - 'newton-cholesky' - ['l2', None] + - 'sag' - ['l2', None] + - 'saga' - ['elasticnet', 'l1', 'l2', None] + + .. note:: + 'sag' and 'saga' fast convergence is only guaranteed on features + with approximately the same scale. You can preprocess the data with + a scaler from :mod:`sklearn.preprocessing`. + + .. seealso:: + Refer to the User Guide for more information regarding + :class:`LogisticRegression` and more specifically the + :ref:`Table ` + summarizing solver/penalty supports. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + .. versionchanged:: 0.22 + The default solver changed from 'liblinear' to 'lbfgs' in 0.22. + .. versionadded:: 1.2 + newton-cholesky solver. + + max_iter : int, default=100 + Maximum number of iterations taken for the solvers to converge. + + multi_class : {'auto', 'ovr', 'multinomial'}, default='auto' + If the option chosen is 'ovr', then a binary problem is fit for each + label. For 'multinomial' the loss minimised is the multinomial loss fit + across the entire probability distribution, *even when the data is + binary*. 'multinomial' is unavailable when solver='liblinear'. + 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', + and otherwise selects 'multinomial'. + + .. versionadded:: 0.18 + Stochastic Average Gradient descent solver for 'multinomial' case. + .. versionchanged:: 0.22 + Default changed from 'ovr' to 'auto' in 0.22. + + verbose : int, default=0 + For the liblinear and lbfgs solvers set verbose to any positive + number for verbosity. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + Useless for liblinear solver. See :term:`the Glossary `. + + .. versionadded:: 0.17 + *warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers. + + n_jobs : int, default=None + Number of CPU cores used when parallelizing over classes if + multi_class='ovr'". This parameter is ignored when the ``solver`` is + set to 'liblinear' regardless of whether 'multi_class' is specified or + not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` + context. ``-1`` means using all processors. + See :term:`Glossary ` for more details. + + l1_ratio : float, default=None + The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only + used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent + to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent + to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a + combination of L1 and L2. + + Attributes + ---------- + + classes_ : ndarray of shape (n_classes, ) + A list of class labels known to the classifier. + + coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) + Coefficient of the features in the decision function. + + `coef_` is of shape (1, n_features) when the given problem is binary. + In particular, when `multi_class='multinomial'`, `coef_` corresponds + to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False). + + intercept_ : ndarray of shape (1,) or (n_classes,) + Intercept (a.k.a. bias) added to the decision function. + + If `fit_intercept` is set to False, the intercept is set to zero. + `intercept_` is of shape (1,) when the given problem is binary. + In particular, when `multi_class='multinomial'`, `intercept_` + corresponds to outcome 1 (True) and `-intercept_` corresponds to + outcome 0 (False). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : ndarray of shape (n_classes,) or (1, ) + Actual number of iterations for all classes. If binary or multinomial, + it returns only 1 element. For liblinear solver, only the maximum + number of iteration across all classes is given. + + .. versionchanged:: 0.20 + + In SciPy <= 1.0.0 the number of lbfgs iterations may exceed + ``max_iter``. ``n_iter_`` will now report at most ``max_iter``. + + See Also + -------- + SGDClassifier : Incrementally trained logistic regression (when given + the parameter ``loss="log_loss"``). + LogisticRegressionCV : Logistic regression with built-in cross validation. + + Notes + ----- + The underlying C implementation uses a random number generator to + select features when fitting the model. It is thus not uncommon, + to have slightly different results for the same input data. If + that happens, try with a smaller tol parameter. + + Predict output may not match that of standalone liblinear in certain + cases. See :ref:`differences from liblinear ` + in the narrative documentation. + + References + ---------- + + L-BFGS-B -- Software for Large-scale Bound-constrained Optimization + Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales. + http://users.iems.northwestern.edu/~nocedal/lbfgsb.html + + LIBLINEAR -- A Library for Large Linear Classification + https://www.csie.ntu.edu.tw/~cjlin/liblinear/ + + SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach + Minimizing Finite Sums with the Stochastic Average Gradient + https://hal.inria.fr/hal-00860051/document + + SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014). + :arxiv:`"SAGA: A Fast Incremental Gradient Method With Support + for Non-Strongly Convex Composite Objectives" <1407.0202>` + + Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent + methods for logistic regression and maximum entropy models. + Machine Learning 85(1-2):41-75. + https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = load_iris(return_X_y=True) + >>> clf = LogisticRegression(random_state=0).fit(X, y) + >>> clf.predict(X[:2, :]) + array([0, 0]) + >>> clf.predict_proba(X[:2, :]) + array([[9.8...e-01, 1.8...e-02, 1.4...e-08], + [9.7...e-01, 2.8...e-02, ...e-08]]) + >>> clf.score(X, y) + 0.97... + """ + + _parameter_constraints: dict = { + "penalty": [StrOptions({"l1", "l2", "elasticnet"}), None], + "dual": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + "C": [Interval(Real, 0, None, closed="right")], + "fit_intercept": ["boolean"], + "intercept_scaling": [Interval(Real, 0, None, closed="neither")], + "class_weight": [dict, StrOptions({"balanced"}), None], + "random_state": ["random_state"], + "solver": [ + StrOptions( + {"lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga"} + ) + ], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "multi_class": [StrOptions({"auto", "ovr", "multinomial"})], + "verbose": ["verbose"], + "warm_start": ["boolean"], + "n_jobs": [None, Integral], + "l1_ratio": [Interval(Real, 0, 1, closed="both"), None], + } + + def __init__( + self, + penalty="l2", + *, + dual=False, + tol=1e-4, + C=1.0, + fit_intercept=True, + intercept_scaling=1, + class_weight=None, + random_state=None, + solver="lbfgs", + max_iter=100, + multi_class="auto", + verbose=0, + warm_start=False, + n_jobs=None, + l1_ratio=None, + ): + self.penalty = penalty + self.dual = dual + self.tol = tol + self.C = C + self.fit_intercept = fit_intercept + self.intercept_scaling = intercept_scaling + self.class_weight = class_weight + self.random_state = random_state + self.solver = solver + self.max_iter = max_iter + self.multi_class = multi_class + self.verbose = verbose + self.warm_start = warm_start + self.n_jobs = n_jobs + self.l1_ratio = l1_ratio + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """ + Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X. + + sample_weight : array-like of shape (n_samples,) default=None + Array of weights that are assigned to individual samples. + If not provided, then each sample is given unit weight. + + .. versionadded:: 0.17 + *sample_weight* support to LogisticRegression. + + Returns + ------- + self + Fitted estimator. + + Notes + ----- + The SAGA solver supports both float64 and float32 bit arrays. + """ + solver = _check_solver(self.solver, self.penalty, self.dual) + + if self.penalty != "elasticnet" and self.l1_ratio is not None: + warnings.warn( + "l1_ratio parameter is only used when penalty is " + "'elasticnet'. Got " + "(penalty={})".format(self.penalty) + ) + + if self.penalty == "elasticnet" and self.l1_ratio is None: + raise ValueError("l1_ratio must be specified when penalty is elasticnet.") + + if self.penalty is None: + if self.C != 1.0: # default values + warnings.warn( + "Setting penalty=None will ignore the C and l1_ratio parameters" + ) + # Note that check for l1_ratio is done right above + C_ = np.inf + penalty = "l2" + else: + C_ = self.C + penalty = self.penalty + + if solver == "lbfgs": + _dtype = np.float64 + else: + _dtype = [np.float64, np.float32] + + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + dtype=_dtype, + order="C", + accept_large_sparse=solver not in ["liblinear", "sag", "saga"], + ) + check_classification_targets(y) + self.classes_ = np.unique(y) + + multi_class = _check_multi_class(self.multi_class, solver, len(self.classes_)) + + if solver == "liblinear": + if effective_n_jobs(self.n_jobs) != 1: + warnings.warn( + "'n_jobs' > 1 does not have any effect when" + " 'solver' is set to 'liblinear'. Got 'n_jobs'" + " = {}.".format(effective_n_jobs(self.n_jobs)) + ) + self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear( + X, + y, + self.C, + self.fit_intercept, + self.intercept_scaling, + self.class_weight, + self.penalty, + self.dual, + self.verbose, + self.max_iter, + self.tol, + self.random_state, + sample_weight=sample_weight, + ) + return self + + if solver in ["sag", "saga"]: + max_squared_sum = row_norms(X, squared=True).max() + else: + max_squared_sum = None + + n_classes = len(self.classes_) + classes_ = self.classes_ + if n_classes < 2: + raise ValueError( + "This solver needs samples of at least 2 classes" + " in the data, but the data contains only one" + " class: %r" + % classes_[0] + ) + + if len(self.classes_) == 2: + n_classes = 1 + classes_ = classes_[1:] + + if self.warm_start: + warm_start_coef = getattr(self, "coef_", None) + else: + warm_start_coef = None + if warm_start_coef is not None and self.fit_intercept: + warm_start_coef = np.append( + warm_start_coef, self.intercept_[:, np.newaxis], axis=1 + ) + + # Hack so that we iterate only once for the multinomial case. + if multi_class == "multinomial": + classes_ = [None] + warm_start_coef = [warm_start_coef] + if warm_start_coef is None: + warm_start_coef = [None] * n_classes + + path_func = delayed(_logistic_regression_path) + + # The SAG solver releases the GIL so it's more efficient to use + # threads for this solver. + if solver in ["sag", "saga"]: + prefer = "threads" + else: + prefer = "processes" + + # TODO: Refactor this to avoid joblib parallelism entirely when doing binary + # and multinomial multiclass classification and use joblib only for the + # one-vs-rest multiclass case. + if ( + solver in ["lbfgs", "newton-cg", "newton-cholesky"] + and len(classes_) == 1 + and effective_n_jobs(self.n_jobs) == 1 + ): + # In the future, we would like n_threads = _openmp_effective_n_threads() + # For the time being, we just do + n_threads = 1 + else: + n_threads = 1 + + fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)( + path_func( + X, + y, + pos_class=class_, + Cs=[C_], + l1_ratio=self.l1_ratio, + fit_intercept=self.fit_intercept, + tol=self.tol, + verbose=self.verbose, + solver=solver, + multi_class=multi_class, + max_iter=self.max_iter, + class_weight=self.class_weight, + check_input=False, + random_state=self.random_state, + coef=warm_start_coef_, + penalty=penalty, + max_squared_sum=max_squared_sum, + sample_weight=sample_weight, + n_threads=n_threads, + ) + for class_, warm_start_coef_ in zip(classes_, warm_start_coef) + ) + + fold_coefs_, _, n_iter_ = zip(*fold_coefs_) + self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0] + + n_features = X.shape[1] + if multi_class == "multinomial": + self.coef_ = fold_coefs_[0][0] + else: + self.coef_ = np.asarray(fold_coefs_) + self.coef_ = self.coef_.reshape( + n_classes, n_features + int(self.fit_intercept) + ) + + if self.fit_intercept: + self.intercept_ = self.coef_[:, -1] + self.coef_ = self.coef_[:, :-1] + else: + self.intercept_ = np.zeros(n_classes) + + return self + + def predict_proba(self, X): + """ + Probability estimates. + + The returned estimates for all classes are ordered by the + label of classes. + + For a multi_class problem, if multi_class is set to be "multinomial" + the softmax function is used to find the predicted probability of + each class. + Else use a one-vs-rest approach, i.e. calculate the probability + of each class assuming it to be positive using the logistic function. + and normalize these values across all the classes. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Vector to be scored, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + T : array-like of shape (n_samples, n_classes) + Returns the probability of the sample for each class in the model, + where classes are ordered as they are in ``self.classes_``. + """ + check_is_fitted(self) + + ovr = self.multi_class in ["ovr", "warn"] or ( + self.multi_class == "auto" + and ( + self.classes_.size <= 2 + or self.solver in ("liblinear", "newton-cholesky") + ) + ) + if ovr: + return super()._predict_proba_lr(X) + else: + decision = self.decision_function(X) + if decision.ndim == 1: + # Workaround for multi_class="multinomial" and binary outcomes + # which requires softmax prediction with only a 1D decision. + decision_2d = np.c_[-decision, decision] + else: + decision_2d = decision + return softmax(decision_2d, copy=False) + + def predict_log_proba(self, X): + """ + Predict logarithm of probability estimates. + + The returned estimates for all classes are ordered by the + label of classes. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Vector to be scored, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + T : array-like of shape (n_samples, n_classes) + Returns the log-probability of the sample for each class in the + model, where classes are ordered as they are in ``self.classes_``. + """ + return np.log(self.predict_proba(X)) + + +class LogisticRegressionCV(LogisticRegression, LinearClassifierMixin, BaseEstimator): + """Logistic Regression CV (aka logit, MaxEnt) classifier. + + See glossary entry for :term:`cross-validation estimator`. + + This class implements logistic regression using liblinear, newton-cg, sag + of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2 + regularization with primal formulation. The liblinear solver supports both + L1 and L2 regularization, with a dual formulation only for the L2 penalty. + Elastic-Net penalty is only supported by the saga solver. + + For the grid of `Cs` values and `l1_ratios` values, the best hyperparameter + is selected by the cross-validator + :class:`~sklearn.model_selection.StratifiedKFold`, but it can be changed + using the :term:`cv` parameter. The 'newton-cg', 'sag', 'saga' and 'lbfgs' + solvers can warm-start the coefficients (see :term:`Glossary`). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + Cs : int or list of floats, default=10 + Each of the values in Cs describes the inverse of regularization + strength. If Cs is as an int, then a grid of Cs values are chosen + in a logarithmic scale between 1e-4 and 1e4. + Like in support vector machines, smaller values specify stronger + regularization. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the decision function. + + cv : int or cross-validation generator, default=None + The default cross-validation generator used is Stratified K-Folds. + If an integer is provided, then it is the number of folds used. + See the module :mod:`sklearn.model_selection` module for the + list of possible cross-validation objects. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + dual : bool, default=False + Dual (constrained) or primal (regularized, see also + :ref:`this equation `) formulation. Dual formulation + is only implemented for l2 penalty with liblinear solver. Prefer dual=False when + n_samples > n_features. + + penalty : {'l1', 'l2', 'elasticnet'}, default='l2' + Specify the norm of the penalty: + + - `'l2'`: add a L2 penalty term (used by default); + - `'l1'`: add a L1 penalty term; + - `'elasticnet'`: both L1 and L2 penalty terms are added. + + .. warning:: + Some penalties may not work with some solvers. See the parameter + `solver` below, to know the compatibility between the penalty and + solver. + + scoring : str or callable, default=None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. For a list of scoring functions + that can be used, look at :mod:`sklearn.metrics`. The + default scoring option used is 'accuracy'. + + solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \ + default='lbfgs' + + Algorithm to use in the optimization problem. Default is 'lbfgs'. + To choose a solver, you might want to consider the following aspects: + + - For small datasets, 'liblinear' is a good choice, whereas 'sag' + and 'saga' are faster for large ones; + - For multiclass problems, only 'newton-cg', 'sag', 'saga' and + 'lbfgs' handle multinomial loss; + - 'liblinear' might be slower in :class:`LogisticRegressionCV` + because it does not handle warm-starting. 'liblinear' is + limited to one-versus-rest schemes. + - 'newton-cholesky' is a good choice for `n_samples` >> `n_features`, + especially with one-hot encoded categorical features with rare + categories. Note that it is limited to binary classification and the + one-versus-rest reduction for multiclass classification. Be aware that + the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. warning:: + The choice of the algorithm depends on the penalty chosen. + Supported penalties by solver: + + - 'lbfgs' - ['l2'] + - 'liblinear' - ['l1', 'l2'] + - 'newton-cg' - ['l2'] + - 'newton-cholesky' - ['l2'] + - 'sag' - ['l2'] + - 'saga' - ['elasticnet', 'l1', 'l2'] + + .. note:: + 'sag' and 'saga' fast convergence is only guaranteed on features + with approximately the same scale. You can preprocess the data with + a scaler from :mod:`sklearn.preprocessing`. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + .. versionadded:: 1.2 + newton-cholesky solver. + + tol : float, default=1e-4 + Tolerance for stopping criteria. + + max_iter : int, default=100 + Maximum number of iterations of the optimization algorithm. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + .. versionadded:: 0.17 + class_weight == 'balanced' + + n_jobs : int, default=None + Number of CPU cores used during the cross-validation loop. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any + positive number for verbosity. + + refit : bool, default=True + If set to True, the scores are averaged across all folds, and the + coefs and the C that corresponds to the best score is taken, and a + final refit is done using these parameters. + Otherwise the coefs, intercepts and C that correspond to the + best scores across folds are averaged. + + intercept_scaling : float, default=1 + Useful only when the solver 'liblinear' is used + and self.fit_intercept is set to True. In this case, x becomes + [x, self.intercept_scaling], + i.e. a "synthetic" feature with constant value equal to + intercept_scaling is appended to the instance vector. + The intercept becomes ``intercept_scaling * synthetic_feature_weight``. + + Note! the synthetic feature weight is subject to l1/l2 regularization + as all other features. + To lessen the effect of regularization on synthetic feature weight + (and therefore on the intercept) intercept_scaling has to be increased. + + multi_class : {'auto, 'ovr', 'multinomial'}, default='auto' + If the option chosen is 'ovr', then a binary problem is fit for each + label. For 'multinomial' the loss minimised is the multinomial loss fit + across the entire probability distribution, *even when the data is + binary*. 'multinomial' is unavailable when solver='liblinear'. + 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', + and otherwise selects 'multinomial'. + + .. versionadded:: 0.18 + Stochastic Average Gradient descent solver for 'multinomial' case. + .. versionchanged:: 0.22 + Default changed from 'ovr' to 'auto' in 0.22. + + random_state : int, RandomState instance, default=None + Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data. + Note that this only applies to the solver and not the cross-validation + generator. See :term:`Glossary ` for details. + + l1_ratios : list of float, default=None + The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. + Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to + using ``penalty='l2'``, while 1 is equivalent to using + ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination + of L1 and L2. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes, ) + A list of class labels known to the classifier. + + coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) + Coefficient of the features in the decision function. + + `coef_` is of shape (1, n_features) when the given problem + is binary. + + intercept_ : ndarray of shape (1,) or (n_classes,) + Intercept (a.k.a. bias) added to the decision function. + + If `fit_intercept` is set to False, the intercept is set to zero. + `intercept_` is of shape(1,) when the problem is binary. + + Cs_ : ndarray of shape (n_cs) + Array of C i.e. inverse of regularization parameter values used + for cross-validation. + + l1_ratios_ : ndarray of shape (n_l1_ratios) + Array of l1_ratios used for cross-validation. If no l1_ratio is used + (i.e. penalty is not 'elasticnet'), this is set to ``[None]`` + + coefs_paths_ : ndarray of shape (n_folds, n_cs, n_features) or \ + (n_folds, n_cs, n_features + 1) + dict with classes as the keys, and the path of coefficients obtained + during cross-validating across each fold and then across each Cs + after doing an OvR for the corresponding class as values. + If the 'multi_class' option is set to 'multinomial', then + the coefs_paths are the coefficients corresponding to each class. + Each dict value has shape ``(n_folds, n_cs, n_features)`` or + ``(n_folds, n_cs, n_features + 1)`` depending on whether the + intercept is fit or not. If ``penalty='elasticnet'``, the shape is + ``(n_folds, n_cs, n_l1_ratios_, n_features)`` or + ``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``. + + scores_ : dict + dict with classes as the keys, and the values as the + grid of scores obtained during cross-validating each fold, after doing + an OvR for the corresponding class. If the 'multi_class' option + given is 'multinomial' then the same scores are repeated across + all classes, since this is the multinomial class. Each dict value + has shape ``(n_folds, n_cs)`` or ``(n_folds, n_cs, n_l1_ratios)`` if + ``penalty='elasticnet'``. + + C_ : ndarray of shape (n_classes,) or (n_classes - 1,) + Array of C that maps to the best scores across every class. If refit is + set to False, then for each class, the best C is the average of the + C's that correspond to the best scores for each fold. + `C_` is of shape(n_classes,) when the problem is binary. + + l1_ratio_ : ndarray of shape (n_classes,) or (n_classes - 1,) + Array of l1_ratio that maps to the best scores across every class. If + refit is set to False, then for each class, the best l1_ratio is the + average of the l1_ratio's that correspond to the best scores for each + fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary. + + n_iter_ : ndarray of shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs) + Actual number of iterations for all classes, folds and Cs. + In the binary or multinomial cases, the first dimension is equal to 1. + If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds, + n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + LogisticRegression : Logistic regression without tuning the + hyperparameter `C`. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegressionCV + >>> X, y = load_iris(return_X_y=True) + >>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y) + >>> clf.predict(X[:2, :]) + array([0, 0]) + >>> clf.predict_proba(X[:2, :]).shape + (2, 3) + >>> clf.score(X, y) + 0.98... + """ + + _parameter_constraints: dict = {**LogisticRegression._parameter_constraints} + + for param in ["C", "warm_start", "l1_ratio"]: + _parameter_constraints.pop(param) + + _parameter_constraints.update( + { + "Cs": [Interval(Integral, 1, None, closed="left"), "array-like"], + "cv": ["cv_object"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "l1_ratios": ["array-like", None], + "refit": ["boolean"], + "penalty": [StrOptions({"l1", "l2", "elasticnet"})], + } + ) + + def __init__( + self, + *, + Cs=10, + fit_intercept=True, + cv=None, + dual=False, + penalty="l2", + scoring=None, + solver="lbfgs", + tol=1e-4, + max_iter=100, + class_weight=None, + n_jobs=None, + verbose=0, + refit=True, + intercept_scaling=1.0, + multi_class="auto", + random_state=None, + l1_ratios=None, + ): + self.Cs = Cs + self.fit_intercept = fit_intercept + self.cv = cv + self.dual = dual + self.penalty = penalty + self.scoring = scoring + self.tol = tol + self.max_iter = max_iter + self.class_weight = class_weight + self.n_jobs = n_jobs + self.verbose = verbose + self.solver = solver + self.refit = refit + self.intercept_scaling = intercept_scaling + self.multi_class = multi_class + self.random_state = random_state + self.l1_ratios = l1_ratios + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, **params): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X. + + sample_weight : array-like of shape (n_samples,) default=None + Array of weights that are assigned to individual samples. + If not provided, then each sample is given unit weight. + + **params : dict + Parameters to pass to the underlying splitter and scorer. + + .. versionadded:: 1.4 + + Returns + ------- + self : object + Fitted LogisticRegressionCV estimator. + """ + _raise_for_params(params, self, "fit") + + solver = _check_solver(self.solver, self.penalty, self.dual) + + if self.penalty == "elasticnet": + if ( + self.l1_ratios is None + or len(self.l1_ratios) == 0 + or any( + ( + not isinstance(l1_ratio, numbers.Number) + or l1_ratio < 0 + or l1_ratio > 1 + ) + for l1_ratio in self.l1_ratios + ) + ): + raise ValueError( + "l1_ratios must be a list of numbers between " + "0 and 1; got (l1_ratios=%r)" + % self.l1_ratios + ) + l1_ratios_ = self.l1_ratios + else: + if self.l1_ratios is not None: + warnings.warn( + "l1_ratios parameter is only used when penalty " + "is 'elasticnet'. Got (penalty={})".format(self.penalty) + ) + + l1_ratios_ = [None] + + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + dtype=np.float64, + order="C", + accept_large_sparse=solver not in ["liblinear", "sag", "saga"], + ) + check_classification_targets(y) + + class_weight = self.class_weight + + # Encode for string labels + label_encoder = LabelEncoder().fit(y) + y = label_encoder.transform(y) + if isinstance(class_weight, dict): + class_weight = { + label_encoder.transform([cls])[0]: v for cls, v in class_weight.items() + } + + # The original class labels + classes = self.classes_ = label_encoder.classes_ + encoded_labels = label_encoder.transform(label_encoder.classes_) + + multi_class = _check_multi_class(self.multi_class, solver, len(classes)) + + if solver in ["sag", "saga"]: + max_squared_sum = row_norms(X, squared=True).max() + else: + max_squared_sum = None + + if _routing_enabled(): + routed_params = process_routing( + self, + "fit", + sample_weight=sample_weight, + **params, + ) + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split={}) + routed_params.scorer = Bunch(score=params) + if sample_weight is not None: + routed_params.scorer.score["sample_weight"] = sample_weight + + # init cross-validation generator + cv = check_cv(self.cv, y, classifier=True) + folds = list(cv.split(X, y, **routed_params.splitter.split)) + + # Use the label encoded classes + n_classes = len(encoded_labels) + + if n_classes < 2: + raise ValueError( + "This solver needs samples of at least 2 classes" + " in the data, but the data contains only one" + " class: %r" + % classes[0] + ) + + if n_classes == 2: + # OvR in case of binary problems is as good as fitting + # the higher label + n_classes = 1 + encoded_labels = encoded_labels[1:] + classes = classes[1:] + + # We need this hack to iterate only once over labels, in the case of + # multi_class = multinomial, without changing the value of the labels. + if multi_class == "multinomial": + iter_encoded_labels = iter_classes = [None] + else: + iter_encoded_labels = encoded_labels + iter_classes = classes + + # compute the class weights for the entire dataset y + if class_weight == "balanced": + class_weight = compute_class_weight( + class_weight, classes=np.arange(len(self.classes_)), y=y + ) + class_weight = dict(enumerate(class_weight)) + + path_func = delayed(_log_reg_scoring_path) + + # The SAG solver releases the GIL so it's more efficient to use + # threads for this solver. + if self.solver in ["sag", "saga"]: + prefer = "threads" + else: + prefer = "processes" + + fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)( + path_func( + X, + y, + train, + test, + pos_class=label, + Cs=self.Cs, + fit_intercept=self.fit_intercept, + penalty=self.penalty, + dual=self.dual, + solver=solver, + tol=self.tol, + max_iter=self.max_iter, + verbose=self.verbose, + class_weight=class_weight, + scoring=self.scoring, + multi_class=multi_class, + intercept_scaling=self.intercept_scaling, + random_state=self.random_state, + max_squared_sum=max_squared_sum, + sample_weight=sample_weight, + l1_ratio=l1_ratio, + score_params=routed_params.scorer.score, + ) + for label in iter_encoded_labels + for train, test in folds + for l1_ratio in l1_ratios_ + ) + + # _log_reg_scoring_path will output different shapes depending on the + # multi_class param, so we need to reshape the outputs accordingly. + # Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the + # rows are equal, so we just take the first one. + # After reshaping, + # - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios) + # - coefs_paths is of shape + # (n_classes, n_folds, n_Cs . n_l1_ratios, n_features) + # - n_iter is of shape + # (n_classes, n_folds, n_Cs . n_l1_ratios) or + # (1, n_folds, n_Cs . n_l1_ratios) + coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_) + self.Cs_ = Cs[0] + if multi_class == "multinomial": + coefs_paths = np.reshape( + coefs_paths, + (len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1), + ) + # equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3), + # (1, 2, 0, 3)) + coefs_paths = np.swapaxes(coefs_paths, 0, 1) + coefs_paths = np.swapaxes(coefs_paths, 0, 2) + self.n_iter_ = np.reshape( + n_iter_, (1, len(folds), len(self.Cs_) * len(l1_ratios_)) + ) + # repeat same scores across all classes + scores = np.tile(scores, (n_classes, 1, 1)) + else: + coefs_paths = np.reshape( + coefs_paths, + (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_), -1), + ) + self.n_iter_ = np.reshape( + n_iter_, (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_)) + ) + scores = np.reshape(scores, (n_classes, len(folds), -1)) + self.scores_ = dict(zip(classes, scores)) + self.coefs_paths_ = dict(zip(classes, coefs_paths)) + + self.C_ = list() + self.l1_ratio_ = list() + self.coef_ = np.empty((n_classes, X.shape[1])) + self.intercept_ = np.zeros(n_classes) + for index, (cls, encoded_label) in enumerate( + zip(iter_classes, iter_encoded_labels) + ): + if multi_class == "ovr": + scores = self.scores_[cls] + coefs_paths = self.coefs_paths_[cls] + else: + # For multinomial, all scores are the same across classes + scores = scores[0] + # coefs_paths will keep its original shape because + # logistic_regression_path expects it this way + + if self.refit: + # best_index is between 0 and (n_Cs . n_l1_ratios - 1) + # for example, with n_cs=2 and n_l1_ratios=3 + # the layout of scores is + # [c1, c2, c1, c2, c1, c2] + # l1_1 , l1_2 , l1_3 + best_index = scores.sum(axis=0).argmax() + + best_index_C = best_index % len(self.Cs_) + C_ = self.Cs_[best_index_C] + self.C_.append(C_) + + best_index_l1 = best_index // len(self.Cs_) + l1_ratio_ = l1_ratios_[best_index_l1] + self.l1_ratio_.append(l1_ratio_) + + if multi_class == "multinomial": + coef_init = np.mean(coefs_paths[:, :, best_index, :], axis=1) + else: + coef_init = np.mean(coefs_paths[:, best_index, :], axis=0) + + # Note that y is label encoded and hence pos_class must be + # the encoded label / None (for 'multinomial') + w, _, _ = _logistic_regression_path( + X, + y, + pos_class=encoded_label, + Cs=[C_], + solver=solver, + fit_intercept=self.fit_intercept, + coef=coef_init, + max_iter=self.max_iter, + tol=self.tol, + penalty=self.penalty, + class_weight=class_weight, + multi_class=multi_class, + verbose=max(0, self.verbose - 1), + random_state=self.random_state, + check_input=False, + max_squared_sum=max_squared_sum, + sample_weight=sample_weight, + l1_ratio=l1_ratio_, + ) + w = w[0] + + else: + # Take the best scores across every fold and the average of + # all coefficients corresponding to the best scores. + best_indices = np.argmax(scores, axis=1) + if multi_class == "ovr": + w = np.mean( + [coefs_paths[i, best_indices[i], :] for i in range(len(folds))], + axis=0, + ) + else: + w = np.mean( + [ + coefs_paths[:, i, best_indices[i], :] + for i in range(len(folds)) + ], + axis=0, + ) + + best_indices_C = best_indices % len(self.Cs_) + self.C_.append(np.mean(self.Cs_[best_indices_C])) + + if self.penalty == "elasticnet": + best_indices_l1 = best_indices // len(self.Cs_) + self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1])) + else: + self.l1_ratio_.append(None) + + if multi_class == "multinomial": + self.C_ = np.tile(self.C_, n_classes) + self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes) + self.coef_ = w[:, : X.shape[1]] + if self.fit_intercept: + self.intercept_ = w[:, -1] + else: + self.coef_[index] = w[: X.shape[1]] + if self.fit_intercept: + self.intercept_[index] = w[-1] + + self.C_ = np.asarray(self.C_) + self.l1_ratio_ = np.asarray(self.l1_ratio_) + self.l1_ratios_ = np.asarray(l1_ratios_) + # if elasticnet was used, add the l1_ratios dimension to some + # attributes + if self.l1_ratios is not None: + # with n_cs=2 and n_l1_ratios=3 + # the layout of scores is + # [c1, c2, c1, c2, c1, c2] + # l1_1 , l1_2 , l1_3 + # To get a 2d array with the following layout + # l1_1, l1_2, l1_3 + # c1 [[ . , . , . ], + # c2 [ . , . , . ]] + # We need to first reshape and then transpose. + # The same goes for the other arrays + for cls, coefs_path in self.coefs_paths_.items(): + self.coefs_paths_[cls] = coefs_path.reshape( + (len(folds), self.l1_ratios_.size, self.Cs_.size, -1) + ) + self.coefs_paths_[cls] = np.transpose( + self.coefs_paths_[cls], (0, 2, 1, 3) + ) + for cls, score in self.scores_.items(): + self.scores_[cls] = score.reshape( + (len(folds), self.l1_ratios_.size, self.Cs_.size) + ) + self.scores_[cls] = np.transpose(self.scores_[cls], (0, 2, 1)) + + self.n_iter_ = self.n_iter_.reshape( + (-1, len(folds), self.l1_ratios_.size, self.Cs_.size) + ) + self.n_iter_ = np.transpose(self.n_iter_, (0, 1, 3, 2)) + + return self + + def score(self, X, y, sample_weight=None, **score_params): + """Score using the `scoring` option on the given test data and labels. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) + True labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + **score_params : dict + Parameters to pass to the `score` method of the underlying scorer. + + .. versionadded:: 1.4 + + Returns + ------- + score : float + Score of self.predict(X) w.r.t. y. + """ + _raise_for_params(score_params, self, "score") + + scoring = self._get_scorer() + if _routing_enabled(): + routed_params = process_routing( + self, + "score", + sample_weight=sample_weight, + **score_params, + ) + else: + routed_params = Bunch() + routed_params.scorer = Bunch(score={}) + if sample_weight is not None: + routed_params.scorer.score["sample_weight"] = sample_weight + + return scoring( + self, + X, + y, + **routed_params.scorer.score, + ) + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + splitter=self.cv, + method_mapping=MethodMapping().add(callee="split", caller="fit"), + ) + .add( + scorer=self._get_scorer(), + method_mapping=MethodMapping() + .add(callee="score", caller="score") + .add(callee="score", caller="fit"), + ) + ) + return router + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } + + def _get_scorer(self): + """Get the scorer based on the scoring method specified. + The default scoring method is `accuracy`. + """ + scoring = self.scoring or "accuracy" + return get_scorer(scoring) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py new file mode 100644 index 0000000000000000000000000000000000000000..b2c25607f91c07b327315825d64fbbee41c359d0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py @@ -0,0 +1,623 @@ +# Author: Johannes Schönberger +# +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + BaseEstimator, + MetaEstimatorMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, + clone, +) +from ..exceptions import ConvergenceWarning +from ..utils import check_consistent_length, check_random_state +from ..utils._param_validation import ( + HasMethods, + Interval, + Options, + RealNotInt, + StrOptions, +) +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.random import sample_without_replacement +from ..utils.validation import _check_sample_weight, check_is_fitted, has_fit_parameter +from ._base import LinearRegression + +_EPSILON = np.spacing(1) + + +def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability): + """Determine number trials such that at least one outlier-free subset is + sampled for the given inlier/outlier ratio. + + Parameters + ---------- + n_inliers : int + Number of inliers in the data. + + n_samples : int + Total number of samples in the data. + + min_samples : int + Minimum number of samples chosen randomly from original data. + + probability : float + Probability (confidence) that one outlier-free sample is generated. + + Returns + ------- + trials : int + Number of trials. + + """ + inlier_ratio = n_inliers / float(n_samples) + nom = max(_EPSILON, 1 - probability) + denom = max(_EPSILON, 1 - inlier_ratio**min_samples) + if nom == 1: + return 0 + if denom == 1: + return float("inf") + return abs(float(np.ceil(np.log(nom) / np.log(denom)))) + + +class RANSACRegressor( + _RoutingNotSupportedMixin, + MetaEstimatorMixin, + RegressorMixin, + MultiOutputMixin, + BaseEstimator, +): + """RANSAC (RANdom SAmple Consensus) algorithm. + + RANSAC is an iterative algorithm for the robust estimation of parameters + from a subset of inliers from the complete data set. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object, default=None + Base estimator object which implements the following methods: + + * `fit(X, y)`: Fit model to given training data and target values. + * `score(X, y)`: Returns the mean accuracy on the given test data, + which is used for the stop criterion defined by `stop_score`. + Additionally, the score is used to decide which of two equally + large consensus sets is chosen as the better one. + * `predict(X)`: Returns predicted values using the linear model, + which is used to compute residual error using loss function. + + If `estimator` is None, then + :class:`~sklearn.linear_model.LinearRegression` is used for + target values of dtype float. + + Note that the current implementation only supports regression + estimators. + + min_samples : int (>= 1) or float ([0, 1]), default=None + Minimum number of samples chosen randomly from original data. Treated + as an absolute number of samples for `min_samples >= 1`, treated as a + relative number `ceil(min_samples * X.shape[0])` for + `min_samples < 1`. This is typically chosen as the minimal number of + samples necessary to estimate the given `estimator`. By default a + :class:`~sklearn.linear_model.LinearRegression` estimator is assumed and + `min_samples` is chosen as ``X.shape[1] + 1``. This parameter is highly + dependent upon the model, so if a `estimator` other than + :class:`~sklearn.linear_model.LinearRegression` is used, the user must + provide a value. + + residual_threshold : float, default=None + Maximum residual for a data sample to be classified as an inlier. + By default the threshold is chosen as the MAD (median absolute + deviation) of the target values `y`. Points whose residuals are + strictly equal to the threshold are considered as inliers. + + is_data_valid : callable, default=None + This function is called with the randomly selected data before the + model is fitted to it: `is_data_valid(X, y)`. If its return value is + False the current randomly chosen sub-sample is skipped. + + is_model_valid : callable, default=None + This function is called with the estimated model and the randomly + selected data: `is_model_valid(model, X, y)`. If its return value is + False the current randomly chosen sub-sample is skipped. + Rejecting samples with this function is computationally costlier than + with `is_data_valid`. `is_model_valid` should therefore only be used if + the estimated model is needed for making the rejection decision. + + max_trials : int, default=100 + Maximum number of iterations for random sample selection. + + max_skips : int, default=np.inf + Maximum number of iterations that can be skipped due to finding zero + inliers or invalid data defined by ``is_data_valid`` or invalid models + defined by ``is_model_valid``. + + .. versionadded:: 0.19 + + stop_n_inliers : int, default=np.inf + Stop iteration if at least this number of inliers are found. + + stop_score : float, default=np.inf + Stop iteration if score is greater equal than this threshold. + + stop_probability : float in range [0, 1], default=0.99 + RANSAC iteration stops if at least one outlier-free set of the training + data is sampled in RANSAC. This requires to generate at least N + samples (iterations):: + + N >= log(1 - probability) / log(1 - e**m) + + where the probability (confidence) is typically set to high value such + as 0.99 (the default) and e is the current fraction of inliers w.r.t. + the total number of samples. + + loss : str, callable, default='absolute_error' + String inputs, 'absolute_error' and 'squared_error' are supported which + find the absolute error and squared error per sample respectively. + + If ``loss`` is a callable, then it should be a function that takes + two arrays as inputs, the true and predicted value and returns a 1-D + array with the i-th value of the array corresponding to the loss + on ``X[i]``. + + If the loss on a sample is greater than the ``residual_threshold``, + then this sample is classified as an outlier. + + .. versionadded:: 0.18 + + random_state : int, RandomState instance, default=None + The generator used to initialize the centers. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + estimator_ : object + Best fitted model (copy of the `estimator` object). + + n_trials_ : int + Number of random selection trials until one of the stop criteria is + met. It is always ``<= max_trials``. + + inlier_mask_ : bool array of shape [n_samples] + Boolean mask of inliers classified as ``True``. + + n_skips_no_inliers_ : int + Number of iterations skipped due to finding zero inliers. + + .. versionadded:: 0.19 + + n_skips_invalid_data_ : int + Number of iterations skipped due to invalid data defined by + ``is_data_valid``. + + .. versionadded:: 0.19 + + n_skips_invalid_model_ : int + Number of iterations skipped due to an invalid model defined by + ``is_model_valid``. + + .. versionadded:: 0.19 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + HuberRegressor : Linear regression model that is robust to outliers. + TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model. + SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/RANSAC + .. [2] https://www.sri.com/wp-content/uploads/2021/12/ransac-publication.pdf + .. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf + + Examples + -------- + >>> from sklearn.linear_model import RANSACRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression( + ... n_samples=200, n_features=2, noise=4.0, random_state=0) + >>> reg = RANSACRegressor(random_state=0).fit(X, y) + >>> reg.score(X, y) + 0.9885... + >>> reg.predict(X[:1,]) + array([-31.9417...]) + """ # noqa: E501 + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit", "score", "predict"]), None], + "min_samples": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + None, + ], + "residual_threshold": [Interval(Real, 0, None, closed="left"), None], + "is_data_valid": [callable, None], + "is_model_valid": [callable, None], + "max_trials": [ + Interval(Integral, 0, None, closed="left"), + Options(Real, {np.inf}), + ], + "max_skips": [ + Interval(Integral, 0, None, closed="left"), + Options(Real, {np.inf}), + ], + "stop_n_inliers": [ + Interval(Integral, 0, None, closed="left"), + Options(Real, {np.inf}), + ], + "stop_score": [Interval(Real, None, None, closed="both")], + "stop_probability": [Interval(Real, 0, 1, closed="both")], + "loss": [StrOptions({"absolute_error", "squared_error"}), callable], + "random_state": ["random_state"], + } + + def __init__( + self, + estimator=None, + *, + min_samples=None, + residual_threshold=None, + is_data_valid=None, + is_model_valid=None, + max_trials=100, + max_skips=np.inf, + stop_n_inliers=np.inf, + stop_score=np.inf, + stop_probability=0.99, + loss="absolute_error", + random_state=None, + ): + self.estimator = estimator + self.min_samples = min_samples + self.residual_threshold = residual_threshold + self.is_data_valid = is_data_valid + self.is_model_valid = is_model_valid + self.max_trials = max_trials + self.max_skips = max_skips + self.stop_n_inliers = stop_n_inliers + self.stop_score = stop_score + self.stop_probability = stop_probability + self.random_state = random_state + self.loss = loss + + @_fit_context( + # RansacRegressor.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None): + """Fit estimator using RANSAC algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Individual weights for each sample + raises error if sample_weight is passed and estimator + fit method does not support it. + + .. versionadded:: 0.18 + + Returns + ------- + self : object + Fitted `RANSACRegressor` estimator. + + Raises + ------ + ValueError + If no valid consensus set could be found. This occurs if + `is_data_valid` and `is_model_valid` return False for all + `max_trials` randomly chosen sub-samples. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + # Need to validate separately here. We can't pass multi_output=True + # because that would allow y to be csr. Delay expensive finiteness + # check to the estimator's own input validation. + check_X_params = dict(accept_sparse="csr", force_all_finite=False) + check_y_params = dict(ensure_2d=False) + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + check_consistent_length(X, y) + + if self.estimator is not None: + estimator = clone(self.estimator) + else: + estimator = LinearRegression() + + if self.min_samples is None: + if not isinstance(estimator, LinearRegression): + raise ValueError( + "`min_samples` needs to be explicitly set when estimator " + "is not a LinearRegression." + ) + min_samples = X.shape[1] + 1 + elif 0 < self.min_samples < 1: + min_samples = np.ceil(self.min_samples * X.shape[0]) + elif self.min_samples >= 1: + min_samples = self.min_samples + if min_samples > X.shape[0]: + raise ValueError( + "`min_samples` may not be larger than number " + "of samples: n_samples = %d." % (X.shape[0]) + ) + + if self.residual_threshold is None: + # MAD (median absolute deviation) + residual_threshold = np.median(np.abs(y - np.median(y))) + else: + residual_threshold = self.residual_threshold + + if self.loss == "absolute_error": + if y.ndim == 1: + loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred) + else: + loss_function = lambda y_true, y_pred: np.sum( + np.abs(y_true - y_pred), axis=1 + ) + elif self.loss == "squared_error": + if y.ndim == 1: + loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2 + else: + loss_function = lambda y_true, y_pred: np.sum( + (y_true - y_pred) ** 2, axis=1 + ) + + elif callable(self.loss): + loss_function = self.loss + + random_state = check_random_state(self.random_state) + + try: # Not all estimator accept a random_state + estimator.set_params(random_state=random_state) + except ValueError: + pass + + estimator_fit_has_sample_weight = has_fit_parameter(estimator, "sample_weight") + estimator_name = type(estimator).__name__ + if sample_weight is not None and not estimator_fit_has_sample_weight: + raise ValueError( + "%s does not support sample_weight. Samples" + " weights are only used for the calibration" + " itself." % estimator_name + ) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + n_inliers_best = 1 + score_best = -np.inf + inlier_mask_best = None + X_inlier_best = None + y_inlier_best = None + inlier_best_idxs_subset = None + self.n_skips_no_inliers_ = 0 + self.n_skips_invalid_data_ = 0 + self.n_skips_invalid_model_ = 0 + + # number of data samples + n_samples = X.shape[0] + sample_idxs = np.arange(n_samples) + + self.n_trials_ = 0 + max_trials = self.max_trials + while self.n_trials_ < max_trials: + self.n_trials_ += 1 + + if ( + self.n_skips_no_inliers_ + + self.n_skips_invalid_data_ + + self.n_skips_invalid_model_ + ) > self.max_skips: + break + + # choose random sample set + subset_idxs = sample_without_replacement( + n_samples, min_samples, random_state=random_state + ) + X_subset = X[subset_idxs] + y_subset = y[subset_idxs] + + # check if random sample set is valid + if self.is_data_valid is not None and not self.is_data_valid( + X_subset, y_subset + ): + self.n_skips_invalid_data_ += 1 + continue + + # fit model for current random sample set + if sample_weight is None: + estimator.fit(X_subset, y_subset) + else: + estimator.fit( + X_subset, y_subset, sample_weight=sample_weight[subset_idxs] + ) + + # check if estimated model is valid + if self.is_model_valid is not None and not self.is_model_valid( + estimator, X_subset, y_subset + ): + self.n_skips_invalid_model_ += 1 + continue + + # residuals of all data for current random sample model + y_pred = estimator.predict(X) + residuals_subset = loss_function(y, y_pred) + + # classify data into inliers and outliers + inlier_mask_subset = residuals_subset <= residual_threshold + n_inliers_subset = np.sum(inlier_mask_subset) + + # less inliers -> skip current random sample + if n_inliers_subset < n_inliers_best: + self.n_skips_no_inliers_ += 1 + continue + + # extract inlier data set + inlier_idxs_subset = sample_idxs[inlier_mask_subset] + X_inlier_subset = X[inlier_idxs_subset] + y_inlier_subset = y[inlier_idxs_subset] + + # score of inlier data set + score_subset = estimator.score(X_inlier_subset, y_inlier_subset) + + # same number of inliers but worse score -> skip current random + # sample + if n_inliers_subset == n_inliers_best and score_subset < score_best: + continue + + # save current random sample as best sample + n_inliers_best = n_inliers_subset + score_best = score_subset + inlier_mask_best = inlier_mask_subset + X_inlier_best = X_inlier_subset + y_inlier_best = y_inlier_subset + inlier_best_idxs_subset = inlier_idxs_subset + + max_trials = min( + max_trials, + _dynamic_max_trials( + n_inliers_best, n_samples, min_samples, self.stop_probability + ), + ) + + # break if sufficient number of inliers or score is reached + if n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score: + break + + # if none of the iterations met the required criteria + if inlier_mask_best is None: + if ( + self.n_skips_no_inliers_ + + self.n_skips_invalid_data_ + + self.n_skips_invalid_model_ + ) > self.max_skips: + raise ValueError( + "RANSAC skipped more iterations than `max_skips` without" + " finding a valid consensus set. Iterations were skipped" + " because each randomly chosen sub-sample failed the" + " passing criteria. See estimator attributes for" + " diagnostics (n_skips*)." + ) + else: + raise ValueError( + "RANSAC could not find a valid consensus set. All" + " `max_trials` iterations were skipped because each" + " randomly chosen sub-sample failed the passing criteria." + " See estimator attributes for diagnostics (n_skips*)." + ) + else: + if ( + self.n_skips_no_inliers_ + + self.n_skips_invalid_data_ + + self.n_skips_invalid_model_ + ) > self.max_skips: + warnings.warn( + ( + "RANSAC found a valid consensus set but exited" + " early due to skipping more iterations than" + " `max_skips`. See estimator attributes for" + " diagnostics (n_skips*)." + ), + ConvergenceWarning, + ) + + # estimate final model using all inliers + if sample_weight is None: + estimator.fit(X_inlier_best, y_inlier_best) + else: + estimator.fit( + X_inlier_best, + y_inlier_best, + sample_weight=sample_weight[inlier_best_idxs_subset], + ) + + self.estimator_ = estimator + self.inlier_mask_ = inlier_mask_best + return self + + def predict(self, X): + """Predict using the estimated model. + + This is a wrapper for `estimator_.predict(X)`. + + Parameters + ---------- + X : {array-like or sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + y : array, shape = [n_samples] or [n_samples, n_targets] + Returns predicted values. + """ + check_is_fitted(self) + X = self._validate_data( + X, + force_all_finite=False, + accept_sparse=True, + reset=False, + ) + return self.estimator_.predict(X) + + def score(self, X, y): + """Return the score of the prediction. + + This is a wrapper for `estimator_.score(X, y)`. + + Parameters + ---------- + X : (array-like or sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Returns + ------- + z : float + Score of the prediction. + """ + check_is_fitted(self) + X = self._validate_data( + X, + force_all_finite=False, + accept_sparse=True, + reset=False, + ) + return self.estimator_.score(X, y) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + } + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py new file mode 100644 index 0000000000000000000000000000000000000000..84646f5aaf130a4252a6a3c300e4cf69f8779c55 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py @@ -0,0 +1,2612 @@ +""" +Ridge regression +""" + +# Author: Mathieu Blondel +# Reuben Fletcher-Costin +# Fabian Pedregosa +# Michael Eickenberg +# License: BSD 3 clause + + +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from functools import partial +from numbers import Integral, Real + +import numpy as np +from scipy import linalg, optimize, sparse +from scipy.sparse import linalg as sp_linalg + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context, is_classifier +from ..exceptions import ConvergenceWarning +from ..metrics import check_scoring, get_scorer_names +from ..model_selection import GridSearchCV +from ..preprocessing import LabelBinarizer +from ..utils import ( + check_array, + check_consistent_length, + check_scalar, + column_or_1d, + compute_sample_weight, +) +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import row_norms, safe_sparse_dot +from ..utils.fixes import _sparse_linalg_cg +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._base import LinearClassifierMixin, LinearModel, _preprocess_data, _rescale_data +from ._sag import sag_solver + + +def _get_rescaled_operator(X, X_offset, sample_weight_sqrt): + """Create LinearOperator for matrix products with implicit centering. + + Matrix product `LinearOperator @ coef` returns `(X - X_offset) @ coef`. + """ + + def matvec(b): + return X.dot(b) - sample_weight_sqrt * b.dot(X_offset) + + def rmatvec(b): + return X.T.dot(b) - X_offset * b.dot(sample_weight_sqrt) + + X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec) + return X1 + + +def _solve_sparse_cg( + X, + y, + alpha, + max_iter=None, + tol=1e-4, + verbose=0, + X_offset=None, + X_scale=None, + sample_weight_sqrt=None, +): + if sample_weight_sqrt is None: + sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype) + + n_samples, n_features = X.shape + + if X_offset is None or X_scale is None: + X1 = sp_linalg.aslinearoperator(X) + else: + X_offset_scale = X_offset / X_scale + X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt) + + coefs = np.empty((y.shape[1], n_features), dtype=X.dtype) + + if n_features > n_samples: + + def create_mv(curr_alpha): + def _mv(x): + return X1.matvec(X1.rmatvec(x)) + curr_alpha * x + + return _mv + + else: + + def create_mv(curr_alpha): + def _mv(x): + return X1.rmatvec(X1.matvec(x)) + curr_alpha * x + + return _mv + + for i in range(y.shape[1]): + y_column = y[:, i] + + mv = create_mv(alpha[i]) + if n_features > n_samples: + # kernel ridge + # w = X.T * inv(X X^t + alpha*Id) y + C = sp_linalg.LinearOperator( + (n_samples, n_samples), matvec=mv, dtype=X.dtype + ) + coef, info = _sparse_linalg_cg(C, y_column, rtol=tol) + coefs[i] = X1.rmatvec(coef) + else: + # linear ridge + # w = inv(X^t X + alpha*Id) * X.T y + y_column = X1.rmatvec(y_column) + C = sp_linalg.LinearOperator( + (n_features, n_features), matvec=mv, dtype=X.dtype + ) + coefs[i], info = _sparse_linalg_cg(C, y_column, maxiter=max_iter, rtol=tol) + + if info < 0: + raise ValueError("Failed with error code %d" % info) + + if max_iter is None and info > 0 and verbose: + warnings.warn( + "sparse_cg did not converge after %d iterations." % info, + ConvergenceWarning, + ) + + return coefs + + +def _solve_lsqr( + X, + y, + *, + alpha, + fit_intercept=True, + max_iter=None, + tol=1e-4, + X_offset=None, + X_scale=None, + sample_weight_sqrt=None, +): + """Solve Ridge regression via LSQR. + + We expect that y is always mean centered. + If X is dense, we expect it to be mean centered such that we can solve + ||y - Xw||_2^2 + alpha * ||w||_2^2 + + If X is sparse, we expect X_offset to be given such that we can solve + ||y - (X - X_offset)w||_2^2 + alpha * ||w||_2^2 + + With sample weights S=diag(sample_weight), this becomes + ||sqrt(S) (y - (X - X_offset) w)||_2^2 + alpha * ||w||_2^2 + and we expect y and X to already be rescaled, i.e. sqrt(S) @ y, sqrt(S) @ X. In + this case, X_offset is the sample_weight weighted mean of X before scaling by + sqrt(S). The objective then reads + ||y - (X - sqrt(S) X_offset) w)||_2^2 + alpha * ||w||_2^2 + """ + if sample_weight_sqrt is None: + sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype) + + if sparse.issparse(X) and fit_intercept: + X_offset_scale = X_offset / X_scale + X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt) + else: + # No need to touch anything + X1 = X + + n_samples, n_features = X.shape + coefs = np.empty((y.shape[1], n_features), dtype=X.dtype) + n_iter = np.empty(y.shape[1], dtype=np.int32) + + # According to the lsqr documentation, alpha = damp^2. + sqrt_alpha = np.sqrt(alpha) + + for i in range(y.shape[1]): + y_column = y[:, i] + info = sp_linalg.lsqr( + X1, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter + ) + coefs[i] = info[0] + n_iter[i] = info[2] + + return coefs, n_iter + + +def _solve_cholesky(X, y, alpha): + # w = inv(X^t X + alpha*Id) * X.T y + n_features = X.shape[1] + n_targets = y.shape[1] + + A = safe_sparse_dot(X.T, X, dense_output=True) + Xy = safe_sparse_dot(X.T, y, dense_output=True) + + one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]]) + + if one_alpha: + A.flat[:: n_features + 1] += alpha[0] + return linalg.solve(A, Xy, assume_a="pos", overwrite_a=True).T + else: + coefs = np.empty([n_targets, n_features], dtype=X.dtype) + for coef, target, current_alpha in zip(coefs, Xy.T, alpha): + A.flat[:: n_features + 1] += current_alpha + coef[:] = linalg.solve(A, target, assume_a="pos", overwrite_a=False).ravel() + A.flat[:: n_features + 1] -= current_alpha + return coefs + + +def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False): + # dual_coef = inv(X X^t + alpha*Id) y + n_samples = K.shape[0] + n_targets = y.shape[1] + + if copy: + K = K.copy() + + alpha = np.atleast_1d(alpha) + one_alpha = (alpha == alpha[0]).all() + has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None] + + if has_sw: + # Unlike other solvers, we need to support sample_weight directly + # because K might be a pre-computed kernel. + sw = np.sqrt(np.atleast_1d(sample_weight)) + y = y * sw[:, np.newaxis] + K *= np.outer(sw, sw) + + if one_alpha: + # Only one penalty, we can solve multi-target problems in one time. + K.flat[:: n_samples + 1] += alpha[0] + + try: + # Note: we must use overwrite_a=False in order to be able to + # use the fall-back solution below in case a LinAlgError + # is raised + dual_coef = linalg.solve(K, y, assume_a="pos", overwrite_a=False) + except np.linalg.LinAlgError: + warnings.warn( + "Singular matrix in solving dual problem. Using " + "least-squares solution instead." + ) + dual_coef = linalg.lstsq(K, y)[0] + + # K is expensive to compute and store in memory so change it back in + # case it was user-given. + K.flat[:: n_samples + 1] -= alpha[0] + + if has_sw: + dual_coef *= sw[:, np.newaxis] + + return dual_coef + else: + # One penalty per target. We need to solve each target separately. + dual_coefs = np.empty([n_targets, n_samples], K.dtype) + + for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha): + K.flat[:: n_samples + 1] += current_alpha + + dual_coef[:] = linalg.solve( + K, target, assume_a="pos", overwrite_a=False + ).ravel() + + K.flat[:: n_samples + 1] -= current_alpha + + if has_sw: + dual_coefs *= sw[np.newaxis, :] + + return dual_coefs.T + + +def _solve_svd(X, y, alpha): + U, s, Vt = linalg.svd(X, full_matrices=False) + idx = s > 1e-15 # same default value as scipy.linalg.pinv + s_nnz = s[idx][:, np.newaxis] + UTy = np.dot(U.T, y) + d = np.zeros((s.size, alpha.size), dtype=X.dtype) + d[idx] = s_nnz / (s_nnz**2 + alpha) + d_UT_y = d * UTy + return np.dot(Vt.T, d_UT_y).T + + +def _solve_lbfgs( + X, + y, + alpha, + positive=True, + max_iter=None, + tol=1e-4, + X_offset=None, + X_scale=None, + sample_weight_sqrt=None, +): + """Solve ridge regression with LBFGS. + + The main purpose is fitting with forcing coefficients to be positive. + For unconstrained ridge regression, there are faster dedicated solver methods. + Note that with positive bounds on the coefficients, LBFGS seems faster + than scipy.optimize.lsq_linear. + """ + n_samples, n_features = X.shape + + options = {} + if max_iter is not None: + options["maxiter"] = max_iter + config = { + "method": "L-BFGS-B", + "tol": tol, + "jac": True, + "options": options, + } + if positive: + config["bounds"] = [(0, np.inf)] * n_features + + if X_offset is not None and X_scale is not None: + X_offset_scale = X_offset / X_scale + else: + X_offset_scale = None + + if sample_weight_sqrt is None: + sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype) + + coefs = np.empty((y.shape[1], n_features), dtype=X.dtype) + + for i in range(y.shape[1]): + x0 = np.zeros((n_features,)) + y_column = y[:, i] + + def func(w): + residual = X.dot(w) - y_column + if X_offset_scale is not None: + residual -= sample_weight_sqrt * w.dot(X_offset_scale) + f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w) + grad = X.T @ residual + alpha[i] * w + if X_offset_scale is not None: + grad -= X_offset_scale * residual.dot(sample_weight_sqrt) + + return f, grad + + result = optimize.minimize(func, x0, **config) + if not result["success"]: + warnings.warn( + ( + "The lbfgs solver did not converge. Try increasing max_iter " + f"or tol. Currently: max_iter={max_iter} and tol={tol}" + ), + ConvergenceWarning, + ) + coefs[i] = result["x"] + + return coefs + + +def _get_valid_accept_sparse(is_X_sparse, solver): + if is_X_sparse and solver in ["auto", "sag", "saga"]: + return "csr" + else: + return ["csr", "csc", "coo"] + + +@validate_params( + { + "X": ["array-like", "sparse matrix", sp_linalg.LinearOperator], + "y": ["array-like"], + "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], + "sample_weight": [ + Interval(Real, None, None, closed="neither"), + "array-like", + None, + ], + "solver": [ + StrOptions( + {"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"} + ) + ], + "max_iter": [Interval(Integral, 0, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left")], + "verbose": ["verbose"], + "positive": ["boolean"], + "random_state": ["random_state"], + "return_n_iter": ["boolean"], + "return_intercept": ["boolean"], + "check_input": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def ridge_regression( + X, + y, + alpha, + *, + sample_weight=None, + solver="auto", + max_iter=None, + tol=1e-4, + verbose=0, + positive=False, + random_state=None, + return_n_iter=False, + return_intercept=False, + check_input=True, +): + """Solve the ridge equation by the method of normal equations. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix, LinearOperator} of shape \ + (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + alpha : float or array-like of shape (n_targets,) + Constant that multiplies the L2 term, controlling regularization + strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. + + When `alpha = 0`, the objective is equivalent to ordinary least + squares, solved by the :class:`LinearRegression` object. For numerical + reasons, using `alpha = 0` with the `Ridge` object is not advised. + Instead, you should use the :class:`LinearRegression` object. + + If an array is passed, penalties are assumed to be specific to the + targets. Hence they must correspond in number. + + sample_weight : float or array-like of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. If sample_weight is not None and + solver='auto', the solver will be set to 'cholesky'. + + .. versionadded:: 0.17 + + solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \ + 'sag', 'saga', 'lbfgs'}, default='auto' + Solver to use in the computational routines: + + - 'auto' chooses the solver automatically based on the type of data. + + - 'svd' uses a Singular Value Decomposition of X to compute the Ridge + coefficients. It is the most stable solver, in particular more stable + for singular matrices than 'cholesky' at the cost of being slower. + + - 'cholesky' uses the standard scipy.linalg.solve function to + obtain a closed-form solution via a Cholesky decomposition of + dot(X.T, X) + + - 'sparse_cg' uses the conjugate gradient solver as found in + scipy.sparse.linalg.cg. As an iterative algorithm, this solver is + more appropriate than 'cholesky' for large-scale data + (possibility to set `tol` and `max_iter`). + + - 'lsqr' uses the dedicated regularized least-squares routine + scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative + procedure. + + - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses + its improved, unbiased version named SAGA. Both methods also use an + iterative procedure, and are often faster than other solvers when + both n_samples and n_features are large. Note that 'sag' and + 'saga' fast convergence is only guaranteed on features with + approximately the same scale. You can preprocess the data with a + scaler from sklearn.preprocessing. + + - 'lbfgs' uses L-BFGS-B algorithm implemented in + `scipy.optimize.minimize`. It can be used only when `positive` + is True. + + All solvers except 'svd' support both dense and sparse data. However, only + 'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when + `fit_intercept` is True. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + + max_iter : int, default=None + Maximum number of iterations for conjugate gradient solver. + For the 'sparse_cg' and 'lsqr' solvers, the default value is determined + by scipy.sparse.linalg. For 'sag' and saga solver, the default value is + 1000. For 'lbfgs' solver, the default value is 15000. + + tol : float, default=1e-4 + Precision of the solution. Note that `tol` has no effect for solvers 'svd' and + 'cholesky'. + + .. versionchanged:: 1.2 + Default value changed from 1e-3 to 1e-4 for consistency with other linear + models. + + verbose : int, default=0 + Verbosity level. Setting verbose > 0 will display additional + information depending on the solver used. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + Only 'lbfgs' solver is supported in this case. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag' or 'saga' to shuffle the data. + See :term:`Glossary ` for details. + + return_n_iter : bool, default=False + If True, the method also returns `n_iter`, the actual number of + iteration performed by the solver. + + .. versionadded:: 0.17 + + return_intercept : bool, default=False + If True and if X is sparse, the method also returns the intercept, + and the solver is automatically changed to 'sag'. This is only a + temporary fix for fitting the intercept with sparse data. For dense + data, use sklearn.linear_model._preprocess_data before your regression. + + .. versionadded:: 0.17 + + check_input : bool, default=True + If False, the input arrays X and y will not be checked. + + .. versionadded:: 0.21 + + Returns + ------- + coef : ndarray of shape (n_features,) or (n_targets, n_features) + Weight vector(s). + + n_iter : int, optional + The actual number of iteration performed by the solver. + Only returned if `return_n_iter` is True. + + intercept : float or ndarray of shape (n_targets,) + The intercept of the model. Only returned if `return_intercept` + is True and if X is a scipy sparse array. + + Notes + ----- + This function won't compute the intercept. + + Regularization improves the conditioning of the problem and + reduces the variance of the estimates. Larger values specify stronger + regularization. Alpha corresponds to ``1 / (2C)`` in other linear + models such as :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are + assumed to be specific to the targets. Hence they must correspond in + number. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_regression + >>> from sklearn.linear_model import ridge_regression + >>> rng = np.random.RandomState(0) + >>> X = rng.randn(100, 4) + >>> y = 2.0 * X[:, 0] - 1.0 * X[:, 1] + 0.1 * rng.standard_normal(100) + >>> coef, intercept = ridge_regression(X, y, alpha=1.0, return_intercept=True) + >>> list(coef) + [1.9..., -1.0..., -0.0..., -0.0...] + >>> intercept + -0.0... + """ + return _ridge_regression( + X, + y, + alpha, + sample_weight=sample_weight, + solver=solver, + max_iter=max_iter, + tol=tol, + verbose=verbose, + positive=positive, + random_state=random_state, + return_n_iter=return_n_iter, + return_intercept=return_intercept, + X_scale=None, + X_offset=None, + check_input=check_input, + ) + + +def _ridge_regression( + X, + y, + alpha, + sample_weight=None, + solver="auto", + max_iter=None, + tol=1e-4, + verbose=0, + positive=False, + random_state=None, + return_n_iter=False, + return_intercept=False, + X_scale=None, + X_offset=None, + check_input=True, + fit_intercept=False, +): + has_sw = sample_weight is not None + + if solver == "auto": + if positive: + solver = "lbfgs" + elif return_intercept: + # sag supports fitting intercept directly + solver = "sag" + elif not sparse.issparse(X): + solver = "cholesky" + else: + solver = "sparse_cg" + + if solver not in ("sparse_cg", "cholesky", "svd", "lsqr", "sag", "saga", "lbfgs"): + raise ValueError( + "Known solvers are 'sparse_cg', 'cholesky', 'svd'" + " 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver + ) + + if positive and solver != "lbfgs": + raise ValueError( + "When positive=True, only 'lbfgs' solver can be used. " + f"Please change solver {solver} to 'lbfgs' " + "or set positive=False." + ) + + if solver == "lbfgs" and not positive: + raise ValueError( + "'lbfgs' solver can be used only when positive=True. " + "Please use another solver." + ) + + if return_intercept and solver != "sag": + raise ValueError( + "In Ridge, only 'sag' solver can directly fit the " + "intercept. Please change solver to 'sag' or set " + "return_intercept=False." + ) + + if check_input: + _dtype = [np.float64, np.float32] + _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver) + X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order="C") + y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None) + check_consistent_length(X, y) + + n_samples, n_features = X.shape + + if y.ndim > 2: + raise ValueError("Target y has the wrong shape %s" % str(y.shape)) + + ravel = False + if y.ndim == 1: + y = y.reshape(-1, 1) + ravel = True + + n_samples_, n_targets = y.shape + + if n_samples != n_samples_: + raise ValueError( + "Number of samples in X and y does not correspond: %d != %d" + % (n_samples, n_samples_) + ) + + if has_sw: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if solver not in ["sag", "saga"]: + # SAG supports sample_weight directly. For other solvers, + # we implement sample_weight via a simple rescaling. + X, y, sample_weight_sqrt = _rescale_data(X, y, sample_weight) + + # Some callers of this method might pass alpha as single + # element array which already has been validated. + if alpha is not None and not isinstance(alpha, np.ndarray): + alpha = check_scalar( + alpha, + "alpha", + target_type=numbers.Real, + min_val=0.0, + include_boundaries="left", + ) + + # There should be either 1 or n_targets penalties + alpha = np.asarray(alpha, dtype=X.dtype).ravel() + if alpha.size not in [1, n_targets]: + raise ValueError( + "Number of targets and number of penalties do not correspond: %d != %d" + % (alpha.size, n_targets) + ) + + if alpha.size == 1 and n_targets > 1: + alpha = np.repeat(alpha, n_targets) + + n_iter = None + if solver == "sparse_cg": + coef = _solve_sparse_cg( + X, + y, + alpha, + max_iter=max_iter, + tol=tol, + verbose=verbose, + X_offset=X_offset, + X_scale=X_scale, + sample_weight_sqrt=sample_weight_sqrt if has_sw else None, + ) + + elif solver == "lsqr": + coef, n_iter = _solve_lsqr( + X, + y, + alpha=alpha, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + X_offset=X_offset, + X_scale=X_scale, + sample_weight_sqrt=sample_weight_sqrt if has_sw else None, + ) + + elif solver == "cholesky": + if n_features > n_samples: + K = safe_sparse_dot(X, X.T, dense_output=True) + try: + dual_coef = _solve_cholesky_kernel(K, y, alpha) + + coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T + except linalg.LinAlgError: + # use SVD solver if matrix is singular + solver = "svd" + else: + try: + coef = _solve_cholesky(X, y, alpha) + except linalg.LinAlgError: + # use SVD solver if matrix is singular + solver = "svd" + + elif solver in ["sag", "saga"]: + # precompute max_squared_sum for all targets + max_squared_sum = row_norms(X, squared=True).max() + + coef = np.empty((y.shape[1], n_features), dtype=X.dtype) + n_iter = np.empty(y.shape[1], dtype=np.int32) + intercept = np.zeros((y.shape[1],), dtype=X.dtype) + for i, (alpha_i, target) in enumerate(zip(alpha, y.T)): + init = { + "coef": np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype) + } + coef_, n_iter_, _ = sag_solver( + X, + target.ravel(), + sample_weight, + "squared", + alpha_i, + 0, + max_iter, + tol, + verbose, + random_state, + False, + max_squared_sum, + init, + is_saga=solver == "saga", + ) + if return_intercept: + coef[i] = coef_[:-1] + intercept[i] = coef_[-1] + else: + coef[i] = coef_ + n_iter[i] = n_iter_ + + if intercept.shape[0] == 1: + intercept = intercept[0] + coef = np.asarray(coef) + + elif solver == "lbfgs": + coef = _solve_lbfgs( + X, + y, + alpha, + positive=positive, + tol=tol, + max_iter=max_iter, + X_offset=X_offset, + X_scale=X_scale, + sample_weight_sqrt=sample_weight_sqrt if has_sw else None, + ) + + if solver == "svd": + if sparse.issparse(X): + raise TypeError("SVD solver does not support sparse inputs currently") + coef = _solve_svd(X, y, alpha) + + if ravel: + # When y was passed as a 1d-array, we flatten the coefficients. + coef = coef.ravel() + + if return_n_iter and return_intercept: + return coef, n_iter, intercept + elif return_intercept: + return coef, intercept + elif return_n_iter: + return coef, n_iter + else: + return coef + + +class _BaseRidge(LinearModel, metaclass=ABCMeta): + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray], + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left")], + "solver": [ + StrOptions( + {"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"} + ) + ], + "positive": ["boolean"], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=None, + tol=1e-4, + solver="auto", + positive=False, + random_state=None, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.copy_X = copy_X + self.max_iter = max_iter + self.tol = tol + self.solver = solver + self.positive = positive + self.random_state = random_state + + def fit(self, X, y, sample_weight=None): + if self.solver == "lbfgs" and not self.positive: + raise ValueError( + "'lbfgs' solver can be used only when positive=True. " + "Please use another solver." + ) + + if self.positive: + if self.solver not in ["auto", "lbfgs"]: + raise ValueError( + f"solver='{self.solver}' does not support positive fitting. Please" + " set the solver to 'auto' or 'lbfgs', or set `positive=False`" + ) + else: + solver = self.solver + elif sparse.issparse(X) and self.fit_intercept: + if self.solver not in ["auto", "lbfgs", "lsqr", "sag", "sparse_cg"]: + raise ValueError( + "solver='{}' does not support fitting the intercept " + "on sparse data. Please set the solver to 'auto' or " + "'lsqr', 'sparse_cg', 'sag', 'lbfgs' " + "or set `fit_intercept=False`".format(self.solver) + ) + if self.solver in ["lsqr", "lbfgs"]: + solver = self.solver + elif self.solver == "sag" and self.max_iter is None and self.tol > 1e-4: + warnings.warn( + '"sag" solver requires many iterations to fit ' + "an intercept with sparse inputs. Either set the " + 'solver to "auto" or "sparse_cg", or set a low ' + '"tol" and a high "max_iter" (especially if inputs are ' + "not standardized)." + ) + solver = "sag" + else: + solver = "sparse_cg" + else: + solver = self.solver + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # when X is sparse we only remove offset from y + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=self.copy_X, + sample_weight=sample_weight, + ) + + if solver == "sag" and sparse.issparse(X) and self.fit_intercept: + self.coef_, self.n_iter_, self.intercept_ = _ridge_regression( + X, + y, + alpha=self.alpha, + sample_weight=sample_weight, + max_iter=self.max_iter, + tol=self.tol, + solver="sag", + positive=self.positive, + random_state=self.random_state, + return_n_iter=True, + return_intercept=True, + check_input=False, + ) + # add the offset which was subtracted by _preprocess_data + self.intercept_ += y_offset + + else: + if sparse.issparse(X) and self.fit_intercept: + # required to fit intercept with sparse_cg and lbfgs solver + params = {"X_offset": X_offset, "X_scale": X_scale} + else: + # for dense matrices or when intercept is set to 0 + params = {} + + self.coef_, self.n_iter_ = _ridge_regression( + X, + y, + alpha=self.alpha, + sample_weight=sample_weight, + max_iter=self.max_iter, + tol=self.tol, + solver=solver, + positive=self.positive, + random_state=self.random_state, + return_n_iter=True, + return_intercept=False, + check_input=False, + fit_intercept=self.fit_intercept, + **params, + ) + self._set_intercept(X_offset, y_offset, X_scale) + + return self + + +class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge): + """Linear least squares with l2 regularization. + + Minimizes the objective function:: + + ||y - Xw||^2_2 + alpha * ||w||^2_2 + + This model solves a regression model where the loss function is + the linear least squares function and regularization is given by + the l2-norm. Also known as Ridge Regression or Tikhonov regularization. + This estimator has built-in support for multi-variate regression + (i.e., when y is a 2d-array of shape (n_samples, n_targets)). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : {float, ndarray of shape (n_targets,)}, default=1.0 + Constant that multiplies the L2 term, controlling regularization + strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. + + When `alpha = 0`, the objective is equivalent to ordinary least + squares, solved by the :class:`LinearRegression` object. For numerical + reasons, using `alpha = 0` with the `Ridge` object is not advised. + Instead, you should use the :class:`LinearRegression` object. + + If an array is passed, penalties are assumed to be specific to the + targets. Hence they must correspond in number. + + fit_intercept : bool, default=True + Whether to fit the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. ``X`` and ``y`` are expected to be centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + max_iter : int, default=None + Maximum number of iterations for conjugate gradient solver. + For 'sparse_cg' and 'lsqr' solvers, the default value is determined + by scipy.sparse.linalg. For 'sag' solver, the default value is 1000. + For 'lbfgs' solver, the default value is 15000. + + tol : float, default=1e-4 + The precision of the solution (`coef_`) is determined by `tol` which + specifies a different convergence criterion for each solver: + + - 'svd': `tol` has no impact. + + - 'cholesky': `tol` has no impact. + + - 'sparse_cg': norm of residuals smaller than `tol`. + + - 'lsqr': `tol` is set as atol and btol of scipy.sparse.linalg.lsqr, + which control the norm of the residual vector in terms of the norms of + matrix and coefficients. + + - 'sag' and 'saga': relative change of coef smaller than `tol`. + + - 'lbfgs': maximum of the absolute (projected) gradient=max|residuals| + smaller than `tol`. + + .. versionchanged:: 1.2 + Default value changed from 1e-3 to 1e-4 for consistency with other linear + models. + + solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \ + 'sag', 'saga', 'lbfgs'}, default='auto' + Solver to use in the computational routines: + + - 'auto' chooses the solver automatically based on the type of data. + + - 'svd' uses a Singular Value Decomposition of X to compute the Ridge + coefficients. It is the most stable solver, in particular more stable + for singular matrices than 'cholesky' at the cost of being slower. + + - 'cholesky' uses the standard scipy.linalg.solve function to + obtain a closed-form solution. + + - 'sparse_cg' uses the conjugate gradient solver as found in + scipy.sparse.linalg.cg. As an iterative algorithm, this solver is + more appropriate than 'cholesky' for large-scale data + (possibility to set `tol` and `max_iter`). + + - 'lsqr' uses the dedicated regularized least-squares routine + scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative + procedure. + + - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses + its improved, unbiased version named SAGA. Both methods also use an + iterative procedure, and are often faster than other solvers when + both n_samples and n_features are large. Note that 'sag' and + 'saga' fast convergence is only guaranteed on features with + approximately the same scale. You can preprocess the data with a + scaler from sklearn.preprocessing. + + - 'lbfgs' uses L-BFGS-B algorithm implemented in + `scipy.optimize.minimize`. It can be used only when `positive` + is True. + + All solvers except 'svd' support both dense and sparse data. However, only + 'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when + `fit_intercept` is True. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + Only 'lbfgs' solver is supported in this case. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag' or 'saga' to shuffle the data. + See :term:`Glossary ` for details. + + .. versionadded:: 0.17 + `random_state` to support Stochastic Average Gradient. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Weight vector(s). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + n_iter_ : None or ndarray of shape (n_targets,) + Actual number of iterations for each target. Available only for + sag and lsqr solvers. Other solvers will return None. + + .. versionadded:: 0.17 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + RidgeClassifier : Ridge classifier. + RidgeCV : Ridge regression with built-in cross validation. + :class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression + combines ridge regression with the kernel trick. + + Notes + ----- + Regularization improves the conditioning of the problem and + reduces the variance of the estimates. Larger values specify stronger + regularization. Alpha corresponds to ``1 / (2C)`` in other linear + models such as :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + + Examples + -------- + >>> from sklearn.linear_model import Ridge + >>> import numpy as np + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> clf = Ridge(alpha=1.0) + >>> clf.fit(X, y) + Ridge() + """ + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=None, + tol=1e-4, + solver="auto", + positive=False, + random_state=None, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + copy_X=copy_X, + max_iter=max_iter, + tol=tol, + solver=solver, + positive=positive, + random_state=random_state, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge regression model. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + Returns + ------- + self : object + Fitted estimator. + """ + _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver) + X, y = self._validate_data( + X, + y, + accept_sparse=_accept_sparse, + dtype=[np.float64, np.float32], + multi_output=True, + y_numeric=True, + ) + return super().fit(X, y, sample_weight=sample_weight) + + +class _RidgeClassifierMixin(LinearClassifierMixin): + def _prepare_data(self, X, y, sample_weight, solver): + """Validate `X` and `y` and binarize `y`. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + solver : str + The solver used in `Ridge` to know which sparse format to support. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Validated training data. + + y : ndarray of shape (n_samples,) + Validated target values. + + sample_weight : ndarray of shape (n_samples,) + Validated sample weights. + + Y : ndarray of shape (n_samples, n_classes) + The binarized version of `y`. + """ + accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver) + X, y = self._validate_data( + X, + y, + accept_sparse=accept_sparse, + multi_output=True, + y_numeric=False, + ) + + self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) + Y = self._label_binarizer.fit_transform(y) + if not self._label_binarizer.y_type_.startswith("multilabel"): + y = column_or_1d(y, warn=True) + + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + if self.class_weight: + sample_weight = sample_weight * compute_sample_weight(self.class_weight, y) + return X, y, sample_weight, Y + + def predict(self, X): + """Predict class labels for samples in `X`. + + Parameters + ---------- + X : {array-like, spare matrix} of shape (n_samples, n_features) + The data matrix for which we want to predict the targets. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs) + Vector or matrix containing the predictions. In binary and + multiclass problems, this is a vector containing `n_samples`. In + a multilabel problem, it returns a matrix of shape + `(n_samples, n_outputs)`. + """ + check_is_fitted(self, attributes=["_label_binarizer"]) + if self._label_binarizer.y_type_.startswith("multilabel"): + # Threshold such that the negative label is -1 and positive label + # is 1 to use the inverse transform of the label binarizer fitted + # during fit. + scores = 2 * (self.decision_function(X) > 0) - 1 + return self._label_binarizer.inverse_transform(scores) + return super().predict(X) + + @property + def classes_(self): + """Classes labels.""" + return self._label_binarizer.classes_ + + def _more_tags(self): + return {"multilabel": True} + + +class RidgeClassifier(_RidgeClassifierMixin, _BaseRidge): + """Classifier using Ridge regression. + + This classifier first converts the target values into ``{-1, 1}`` and + then treats the problem as a regression task (multi-output regression in + the multiclass case). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set to false, no + intercept will be used in calculations (e.g. data is expected to be + already centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + max_iter : int, default=None + Maximum number of iterations for conjugate gradient solver. + The default value is determined by scipy.sparse.linalg. + + tol : float, default=1e-4 + The precision of the solution (`coef_`) is determined by `tol` which + specifies a different convergence criterion for each solver: + + - 'svd': `tol` has no impact. + + - 'cholesky': `tol` has no impact. + + - 'sparse_cg': norm of residuals smaller than `tol`. + + - 'lsqr': `tol` is set as atol and btol of scipy.sparse.linalg.lsqr, + which control the norm of the residual vector in terms of the norms of + matrix and coefficients. + + - 'sag' and 'saga': relative change of coef smaller than `tol`. + + - 'lbfgs': maximum of the absolute (projected) gradient=max|residuals| + smaller than `tol`. + + .. versionchanged:: 1.2 + Default value changed from 1e-3 to 1e-4 for consistency with other linear + models. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \ + 'sag', 'saga', 'lbfgs'}, default='auto' + Solver to use in the computational routines: + + - 'auto' chooses the solver automatically based on the type of data. + + - 'svd' uses a Singular Value Decomposition of X to compute the Ridge + coefficients. It is the most stable solver, in particular more stable + for singular matrices than 'cholesky' at the cost of being slower. + + - 'cholesky' uses the standard scipy.linalg.solve function to + obtain a closed-form solution. + + - 'sparse_cg' uses the conjugate gradient solver as found in + scipy.sparse.linalg.cg. As an iterative algorithm, this solver is + more appropriate than 'cholesky' for large-scale data + (possibility to set `tol` and `max_iter`). + + - 'lsqr' uses the dedicated regularized least-squares routine + scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative + procedure. + + - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses + its unbiased and more flexible version named SAGA. Both methods + use an iterative procedure, and are often faster than other solvers + when both n_samples and n_features are large. Note that 'sag' and + 'saga' fast convergence is only guaranteed on features with + approximately the same scale. You can preprocess the data with a + scaler from sklearn.preprocessing. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + + - 'lbfgs' uses L-BFGS-B algorithm implemented in + `scipy.optimize.minimize`. It can be used only when `positive` + is True. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + Only 'lbfgs' solver is supported in this case. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag' or 'saga' to shuffle the data. + See :term:`Glossary ` for details. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) + Coefficient of the features in the decision function. + + ``coef_`` is of shape (1, n_features) when the given problem is binary. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + n_iter_ : None or ndarray of shape (n_targets,) + Actual number of iterations for each target. Available only for + sag and lsqr solvers. Other solvers will return None. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression. + RidgeClassifierCV : Ridge classifier with built-in cross validation. + + Notes + ----- + For multi-class classification, n_class classifiers are trained in + a one-versus-all approach. Concretely, this is implemented by taking + advantage of the multi-variate response support in Ridge. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.linear_model import RidgeClassifier + >>> X, y = load_breast_cancer(return_X_y=True) + >>> clf = RidgeClassifier().fit(X, y) + >>> clf.score(X, y) + 0.9595... + """ + + _parameter_constraints: dict = { + **_BaseRidge._parameter_constraints, + "class_weight": [dict, StrOptions({"balanced"}), None], + } + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=None, + tol=1e-4, + class_weight=None, + solver="auto", + positive=False, + random_state=None, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + copy_X=copy_X, + max_iter=max_iter, + tol=tol, + solver=solver, + positive=positive, + random_state=random_state, + ) + self.class_weight = class_weight + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge classifier model. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + .. versionadded:: 0.17 + *sample_weight* support to RidgeClassifier. + + Returns + ------- + self : object + Instance of the estimator. + """ + X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver) + + super().fit(X, Y, sample_weight=sample_weight) + return self + + +def _check_gcv_mode(X, gcv_mode): + if gcv_mode in ["eigen", "svd"]: + return gcv_mode + # if X has more rows than columns, use decomposition of X^T.X, + # otherwise X.X^T + if X.shape[0] > X.shape[1]: + return "svd" + return "eigen" + + +def _find_smallest_angle(query, vectors): + """Find the column of vectors that is most aligned with the query. + + Both query and the columns of vectors must have their l2 norm equal to 1. + + Parameters + ---------- + query : ndarray of shape (n_samples,) + Normalized query vector. + + vectors : ndarray of shape (n_samples, n_features) + Vectors to which we compare query, as columns. Must be normalized. + """ + abs_cosine = np.abs(query.dot(vectors)) + index = np.argmax(abs_cosine) + return index + + +class _X_CenterStackOp(sparse.linalg.LinearOperator): + """Behaves as centered and scaled X with an added intercept column. + + This operator behaves as + np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]) + """ + + def __init__(self, X, X_mean, sqrt_sw): + n_samples, n_features = X.shape + super().__init__(X.dtype, (n_samples, n_features + 1)) + self.X = X + self.X_mean = X_mean + self.sqrt_sw = sqrt_sw + + def _matvec(self, v): + v = v.ravel() + return ( + safe_sparse_dot(self.X, v[:-1], dense_output=True) + - self.sqrt_sw * self.X_mean.dot(v[:-1]) + + v[-1] * self.sqrt_sw + ) + + def _matmat(self, v): + return ( + safe_sparse_dot(self.X, v[:-1], dense_output=True) + - self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1]) + + v[-1] * self.sqrt_sw[:, None] + ) + + def _transpose(self): + return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw) + + +class _XT_CenterStackOp(sparse.linalg.LinearOperator): + """Behaves as transposed centered and scaled X with an intercept column. + + This operator behaves as + np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T + """ + + def __init__(self, X, X_mean, sqrt_sw): + n_samples, n_features = X.shape + super().__init__(X.dtype, (n_features + 1, n_samples)) + self.X = X + self.X_mean = X_mean + self.sqrt_sw = sqrt_sw + + def _matvec(self, v): + v = v.ravel() + n_features = self.shape[0] + res = np.empty(n_features, dtype=self.X.dtype) + res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - ( + self.X_mean * self.sqrt_sw.dot(v) + ) + res[-1] = np.dot(v, self.sqrt_sw) + return res + + def _matmat(self, v): + n_features = self.shape[0] + res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype) + res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[ + :, None + ] * self.sqrt_sw.dot(v) + res[-1] = np.dot(self.sqrt_sw, v) + return res + + +class _IdentityRegressor: + """Fake regressor which will directly output the prediction.""" + + def decision_function(self, y_predict): + return y_predict + + def predict(self, y_predict): + return y_predict + + +class _IdentityClassifier(LinearClassifierMixin): + """Fake classifier which will directly output the prediction. + + We inherit from LinearClassifierMixin to get the proper shape for the + output `y`. + """ + + def __init__(self, classes): + self.classes_ = classes + + def decision_function(self, y_predict): + return y_predict + + +class _RidgeGCV(LinearModel): + """Ridge regression with built-in Leave-one-out Cross-Validation. + + This class is not intended to be used directly. Use RidgeCV instead. + + Notes + ----- + + We want to solve (K + alpha*Id)c = y, + where K = X X^T is the kernel matrix. + + Let G = (K + alpha*Id). + + Dual solution: c = G^-1y + Primal solution: w = X^T c + + Compute eigendecomposition K = Q V Q^T. + Then G^-1 = Q (V + alpha*Id)^-1 Q^T, + where (V + alpha*Id) is diagonal. + It is thus inexpensive to inverse for many alphas. + + Let loov be the vector of prediction values for each example + when the model was fitted with all examples but this example. + + loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1) + + Let looe be the vector of prediction errors for each example + when the model was fitted with all examples but this example. + + looe = y - loov = c / diag(G^-1) + + The best score (negative mean squared error or user-provided scoring) is + stored in the `best_score_` attribute, and the selected hyperparameter in + `alpha_`. + + References + ---------- + http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf + https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf + """ + + def __init__( + self, + alphas=(0.1, 1.0, 10.0), + *, + fit_intercept=True, + scoring=None, + copy_X=True, + gcv_mode=None, + store_cv_values=False, + is_clf=False, + alpha_per_target=False, + ): + self.alphas = alphas + self.fit_intercept = fit_intercept + self.scoring = scoring + self.copy_X = copy_X + self.gcv_mode = gcv_mode + self.store_cv_values = store_cv_values + self.is_clf = is_clf + self.alpha_per_target = alpha_per_target + + @staticmethod + def _decomp_diag(v_prime, Q): + # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T)) + return (v_prime * Q**2).sum(axis=-1) + + @staticmethod + def _diag_dot(D, B): + # compute dot(diag(D), B) + if len(B.shape) > 1: + # handle case where B is > 1-d + D = D[(slice(None),) + (np.newaxis,) * (len(B.shape) - 1)] + return D * B + + def _compute_gram(self, X, sqrt_sw): + """Computes the Gram matrix XX^T with possible centering. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The preprocessed design matrix. + + sqrt_sw : ndarray of shape (n_samples,) + square roots of sample weights + + Returns + ------- + gram : ndarray of shape (n_samples, n_samples) + The Gram matrix. + X_mean : ndarray of shape (n_feature,) + The weighted mean of ``X`` for each feature. + + Notes + ----- + When X is dense the centering has been done in preprocessing + so the mean is 0 and we just compute XX^T. + + When X is sparse it has not been centered in preprocessing, but it has + been scaled by sqrt(sample weights). + + When self.fit_intercept is False no centering is done. + + The centered X is never actually computed because centering would break + the sparsity of X. + """ + center = self.fit_intercept and sparse.issparse(X) + if not center: + # in this case centering has been done in preprocessing + # or we are not fitting an intercept. + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X, X.T, dense_output=True), X_mean + # X is sparse + n_samples = X.shape[0] + sample_weight_matrix = sparse.dia_matrix( + (sqrt_sw, 0), shape=(n_samples, n_samples) + ) + X_weighted = sample_weight_matrix.dot(X) + X_mean, _ = mean_variance_axis(X_weighted, axis=0) + X_mean *= n_samples / sqrt_sw.dot(sqrt_sw) + X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True) + X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean) + return ( + safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T, + X_mean, + ) + + def _compute_covariance(self, X, sqrt_sw): + """Computes covariance matrix X^TX with possible centering. + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + The preprocessed design matrix. + + sqrt_sw : ndarray of shape (n_samples,) + square roots of sample weights + + Returns + ------- + covariance : ndarray of shape (n_features, n_features) + The covariance matrix. + X_mean : ndarray of shape (n_feature,) + The weighted mean of ``X`` for each feature. + + Notes + ----- + Since X is sparse it has not been centered in preprocessing, but it has + been scaled by sqrt(sample weights). + + When self.fit_intercept is False no centering is done. + + The centered X is never actually computed because centering would break + the sparsity of X. + """ + if not self.fit_intercept: + # in this case centering has been done in preprocessing + # or we are not fitting an intercept. + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X.T, X, dense_output=True), X_mean + # this function only gets called for sparse X + n_samples = X.shape[0] + sample_weight_matrix = sparse.dia_matrix( + (sqrt_sw, 0), shape=(n_samples, n_samples) + ) + X_weighted = sample_weight_matrix.dot(X) + X_mean, _ = mean_variance_axis(X_weighted, axis=0) + X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw) + weight_sum = sqrt_sw.dot(sqrt_sw) + return ( + safe_sparse_dot(X.T, X, dense_output=True) + - weight_sum * np.outer(X_mean, X_mean), + X_mean, + ) + + def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw): + """Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T) + without explicitly centering X nor computing X.dot(A) + when X is sparse. + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + + A : ndarray of shape (n_features, n_features) + + X_mean : ndarray of shape (n_features,) + + sqrt_sw : ndarray of shape (n_features,) + square roots of sample weights + + Returns + ------- + diag : np.ndarray, shape (n_samples,) + The computed diagonal. + """ + intercept_col = scale = sqrt_sw + batch_size = X.shape[1] + diag = np.empty(X.shape[0], dtype=X.dtype) + for start in range(0, X.shape[0], batch_size): + batch = slice(start, min(X.shape[0], start + batch_size), 1) + X_batch = np.empty( + (X[batch].shape[0], X.shape[1] + self.fit_intercept), dtype=X.dtype + ) + if self.fit_intercept: + X_batch[:, :-1] = X[batch].toarray() - X_mean * scale[batch][:, None] + X_batch[:, -1] = intercept_col[batch] + else: + X_batch = X[batch].toarray() + diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) + return diag + + def _eigen_decompose_gram(self, X, y, sqrt_sw): + """Eigendecomposition of X.X^T, used when n_samples <= n_features.""" + # if X is dense it has already been centered in preprocessing + K, X_mean = self._compute_gram(X, sqrt_sw) + if self.fit_intercept: + # to emulate centering X with sample weights, + # ie removing the weighted average, we add a column + # containing the square roots of the sample weights. + # by centering, it is orthogonal to the other columns + K += np.outer(sqrt_sw, sqrt_sw) + eigvals, Q = linalg.eigh(K) + QT_y = np.dot(Q.T, y) + return X_mean, eigvals, Q, QT_y + + def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X.X^T (n_samples <= n_features). + """ + w = 1.0 / (eigvals + alpha) + if self.fit_intercept: + # the vector containing the square roots of the sample weights (1 + # when no sample weights) is the eigenvector of XX^T which + # corresponds to the intercept; we cancel the regularization on + # this dimension. the corresponding eigenvalue is + # sum(sample_weight). + normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) + intercept_dim = _find_smallest_angle(normalized_sw, Q) + w[intercept_dim] = 0 # cancel regularization for the intercept + + c = np.dot(Q, self._diag_dot(w, QT_y)) + G_inverse_diag = self._decomp_diag(w, Q) + # handle case where y is 2-d + if len(y.shape) != 1: + G_inverse_diag = G_inverse_diag[:, np.newaxis] + return G_inverse_diag, c + + def _eigen_decompose_covariance(self, X, y, sqrt_sw): + """Eigendecomposition of X^T.X, used when n_samples > n_features + and X is sparse. + """ + n_samples, n_features = X.shape + cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype) + cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw) + if not self.fit_intercept: + cov = cov[:-1, :-1] + # to emulate centering X with sample weights, + # ie removing the weighted average, we add a column + # containing the square roots of the sample weights. + # by centering, it is orthogonal to the other columns + # when all samples have the same weight we add a column of 1 + else: + cov[-1] = 0 + cov[:, -1] = 0 + cov[-1, -1] = sqrt_sw.dot(sqrt_sw) + nullspace_dim = max(0, n_features - n_samples) + eigvals, V = linalg.eigh(cov) + # remove eigenvalues and vectors in the null space of X^T.X + eigvals = eigvals[nullspace_dim:] + V = V[:, nullspace_dim:] + return X_mean, eigvals, V, X + + def _solve_eigen_covariance_no_intercept( + self, alpha, y, sqrt_sw, X_mean, eigvals, V, X + ): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X^T.X + (n_samples > n_features and X is sparse), and not fitting an intercept. + """ + w = 1 / (eigvals + alpha) + A = (V * w).dot(V.T) + AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) + y_hat = safe_sparse_dot(X, AXy, dense_output=True) + hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) + if len(y.shape) != 1: + # handle case where y is 2-d + hat_diag = hat_diag[:, np.newaxis] + return (1 - hat_diag) / alpha, (y - y_hat) / alpha + + def _solve_eigen_covariance_intercept( + self, alpha, y, sqrt_sw, X_mean, eigvals, V, X + ): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X^T.X + (n_samples > n_features and X is sparse), + and we are fitting an intercept. + """ + # the vector [0, 0, ..., 0, 1] + # is the eigenvector of X^TX which + # corresponds to the intercept; we cancel the regularization on + # this dimension. the corresponding eigenvalue is + # sum(sample_weight), e.g. n when uniform sample weights. + intercept_sv = np.zeros(V.shape[0]) + intercept_sv[-1] = 1 + intercept_dim = _find_smallest_angle(intercept_sv, V) + w = 1 / (eigvals + alpha) + w[intercept_dim] = 1 / eigvals[intercept_dim] + A = (V * w).dot(V.T) + # add a column to X containing the square roots of sample weights + X_op = _X_CenterStackOp(X, X_mean, sqrt_sw) + AXy = A.dot(X_op.T.dot(y)) + y_hat = X_op.dot(AXy) + hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) + # return (1 - hat_diag), (y - y_hat) + if len(y.shape) != 1: + # handle case where y is 2-d + hat_diag = hat_diag[:, np.newaxis] + return (1 - hat_diag) / alpha, (y - y_hat) / alpha + + def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X^T.X + (n_samples > n_features and X is sparse). + """ + if self.fit_intercept: + return self._solve_eigen_covariance_intercept( + alpha, y, sqrt_sw, X_mean, eigvals, V, X + ) + return self._solve_eigen_covariance_no_intercept( + alpha, y, sqrt_sw, X_mean, eigvals, V, X + ) + + def _svd_decompose_design_matrix(self, X, y, sqrt_sw): + # X already centered + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + if self.fit_intercept: + # to emulate fit_intercept=True situation, add a column + # containing the square roots of the sample weights + # by centering, the other columns are orthogonal to that one + intercept_column = sqrt_sw[:, None] + X = np.hstack((X, intercept_column)) + U, singvals, _ = linalg.svd(X, full_matrices=0) + singvals_sq = singvals**2 + UT_y = np.dot(U.T, y) + return X_mean, singvals_sq, U, UT_y + + def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have an SVD decomposition of X + (n_samples > n_features and X is dense). + """ + w = ((singvals_sq + alpha) ** -1) - (alpha**-1) + if self.fit_intercept: + # detect intercept column + normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) + intercept_dim = _find_smallest_angle(normalized_sw, U) + # cancel the regularization for the intercept + w[intercept_dim] = -(alpha**-1) + c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha**-1) * y + G_inverse_diag = self._decomp_diag(w, U) + (alpha**-1) + if len(y.shape) != 1: + # handle case where y is 2-d + G_inverse_diag = G_inverse_diag[:, np.newaxis] + return G_inverse_diag, c + + def fit(self, X, y, sample_weight=None): + """Fit Ridge regression model with gcv. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. Will be cast to float64 if necessary. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to float64 if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. Note that the scale of `sample_weight` + has an impact on the loss; i.e. multiplying all weights by `k` + is equivalent to setting `alpha / k`. + + Returns + ------- + self : object + """ + X, y = self._validate_data( + X, + y, + accept_sparse=["csr", "csc", "coo"], + dtype=[np.float64], + multi_output=True, + y_numeric=True, + ) + + # alpha_per_target cannot be used in classifier mode. All subclasses + # of _RidgeGCV that are classifiers keep alpha_per_target at its + # default value: False, so the condition below should never happen. + assert not (self.is_clf and self.alpha_per_target) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + self.alphas = np.asarray(self.alphas) + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=self.copy_X, + sample_weight=sample_weight, + ) + + gcv_mode = _check_gcv_mode(X, self.gcv_mode) + + if gcv_mode == "eigen": + decompose = self._eigen_decompose_gram + solve = self._solve_eigen_gram + elif gcv_mode == "svd": + if sparse.issparse(X): + decompose = self._eigen_decompose_covariance + solve = self._solve_eigen_covariance + else: + decompose = self._svd_decompose_design_matrix + solve = self._solve_svd_design_matrix + + n_samples = X.shape[0] + + if sample_weight is not None: + X, y, sqrt_sw = _rescale_data(X, y, sample_weight) + else: + sqrt_sw = np.ones(n_samples, dtype=X.dtype) + + X_mean, *decomposition = decompose(X, y, sqrt_sw) + + scorer = check_scoring(self, scoring=self.scoring, allow_none=True) + error = scorer is None + + n_y = 1 if len(y.shape) == 1 else y.shape[1] + n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas) + + if self.store_cv_values: + self.cv_values_ = np.empty((n_samples * n_y, n_alphas), dtype=X.dtype) + + best_coef, best_score, best_alpha = None, None, None + + for i, alpha in enumerate(np.atleast_1d(self.alphas)): + G_inverse_diag, c = solve(float(alpha), y, sqrt_sw, X_mean, *decomposition) + if error: + squared_errors = (c / G_inverse_diag) ** 2 + if self.alpha_per_target: + alpha_score = -squared_errors.mean(axis=0) + else: + alpha_score = -squared_errors.mean() + if self.store_cv_values: + self.cv_values_[:, i] = squared_errors.ravel() + else: + predictions = y - (c / G_inverse_diag) + if self.store_cv_values: + self.cv_values_[:, i] = predictions.ravel() + + if self.is_clf: + identity_estimator = _IdentityClassifier(classes=np.arange(n_y)) + alpha_score = scorer( + identity_estimator, predictions, y.argmax(axis=1) + ) + else: + identity_estimator = _IdentityRegressor() + if self.alpha_per_target: + alpha_score = np.array( + [ + scorer(identity_estimator, predictions[:, j], y[:, j]) + for j in range(n_y) + ] + ) + else: + alpha_score = scorer( + identity_estimator, predictions.ravel(), y.ravel() + ) + + # Keep track of the best model + if best_score is None: + # initialize + if self.alpha_per_target and n_y > 1: + best_coef = c + best_score = np.atleast_1d(alpha_score) + best_alpha = np.full(n_y, alpha) + else: + best_coef = c + best_score = alpha_score + best_alpha = alpha + else: + # update + if self.alpha_per_target and n_y > 1: + to_update = alpha_score > best_score + best_coef[:, to_update] = c[:, to_update] + best_score[to_update] = alpha_score[to_update] + best_alpha[to_update] = alpha + elif alpha_score > best_score: + best_coef, best_score, best_alpha = c, alpha_score, alpha + + self.alpha_ = best_alpha + self.best_score_ = best_score + self.dual_coef_ = best_coef + self.coef_ = safe_sparse_dot(self.dual_coef_.T, X) + + if sparse.issparse(X): + X_offset = X_mean * X_scale + else: + X_offset += X_mean * X_scale + self._set_intercept(X_offset, y_offset, X_scale) + + if self.store_cv_values: + if len(y.shape) == 1: + cv_values_shape = n_samples, n_alphas + else: + cv_values_shape = n_samples, n_y, n_alphas + self.cv_values_ = self.cv_values_.reshape(cv_values_shape) + + return self + + +class _BaseRidgeCV(LinearModel): + _parameter_constraints: dict = { + "alphas": ["array-like", Interval(Real, 0, None, closed="neither")], + "fit_intercept": ["boolean"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "cv": ["cv_object"], + "gcv_mode": [StrOptions({"auto", "svd", "eigen"}), None], + "store_cv_values": ["boolean"], + "alpha_per_target": ["boolean"], + } + + def __init__( + self, + alphas=(0.1, 1.0, 10.0), + *, + fit_intercept=True, + scoring=None, + cv=None, + gcv_mode=None, + store_cv_values=False, + alpha_per_target=False, + ): + self.alphas = alphas + self.fit_intercept = fit_intercept + self.scoring = scoring + self.cv = cv + self.gcv_mode = gcv_mode + self.store_cv_values = store_cv_values + self.alpha_per_target = alpha_per_target + + def fit(self, X, y, sample_weight=None): + """Fit Ridge regression model with cv. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. If using GCV, will be cast to float64 + if necessary. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + When sample_weight is provided, the selected hyperparameter may depend + on whether we use leave-one-out cross-validation (cv=None or cv='auto') + or another form of cross-validation, because only leave-one-out + cross-validation takes the sample weights into account when computing + the validation score. + """ + cv = self.cv + + check_scalar_alpha = partial( + check_scalar, + target_type=numbers.Real, + min_val=0.0, + include_boundaries="neither", + ) + + if isinstance(self.alphas, (np.ndarray, list, tuple)): + n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas) + if n_alphas != 1: + for index, alpha in enumerate(self.alphas): + alpha = check_scalar_alpha(alpha, f"alphas[{index}]") + else: + self.alphas[0] = check_scalar_alpha(self.alphas[0], "alphas") + alphas = np.asarray(self.alphas) + + if cv is None: + estimator = _RidgeGCV( + alphas, + fit_intercept=self.fit_intercept, + scoring=self.scoring, + gcv_mode=self.gcv_mode, + store_cv_values=self.store_cv_values, + is_clf=is_classifier(self), + alpha_per_target=self.alpha_per_target, + ) + estimator.fit(X, y, sample_weight=sample_weight) + self.alpha_ = estimator.alpha_ + self.best_score_ = estimator.best_score_ + if self.store_cv_values: + self.cv_values_ = estimator.cv_values_ + else: + if self.store_cv_values: + raise ValueError("cv!=None and store_cv_values=True are incompatible") + if self.alpha_per_target: + raise ValueError("cv!=None and alpha_per_target=True are incompatible") + + parameters = {"alpha": alphas} + solver = "sparse_cg" if sparse.issparse(X) else "auto" + model = RidgeClassifier if is_classifier(self) else Ridge + gs = GridSearchCV( + model( + fit_intercept=self.fit_intercept, + solver=solver, + ), + parameters, + cv=cv, + scoring=self.scoring, + ) + gs.fit(X, y, sample_weight=sample_weight) + estimator = gs.best_estimator_ + self.alpha_ = gs.best_estimator_.alpha + self.best_score_ = gs.best_score_ + + self.coef_ = estimator.coef_ + self.intercept_ = estimator.intercept_ + self.n_features_in_ = estimator.n_features_in_ + if hasattr(estimator, "feature_names_in_"): + self.feature_names_in_ = estimator.feature_names_in_ + + return self + + +class RidgeCV( + _RoutingNotSupportedMixin, MultiOutputMixin, RegressorMixin, _BaseRidgeCV +): + """Ridge regression with built-in cross-validation. + + See glossary entry for :term:`cross-validation estimator`. + + By default, it performs efficient Leave-One-Out Cross-Validation. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alphas : array-like of shape (n_alphas,), default=(0.1, 1.0, 10.0) + Array of alpha values to try. + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + If using Leave-One-Out cross-validation, alphas must be positive. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + scoring : str, callable, default=None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + If None, the negative mean squared error if cv is 'auto' or None + (i.e. when using leave-one-out cross-validation), and r2 score + otherwise. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the efficient Leave-One-Out cross-validation + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if ``y`` is binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used, else, + :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + gcv_mode : {'auto', 'svd', 'eigen'}, default='auto' + Flag indicating which strategy to use when performing + Leave-One-Out Cross-Validation. Options are:: + + 'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen' + 'svd' : force use of singular value decomposition of X when X is + dense, eigenvalue decomposition of X^T.X when X is sparse. + 'eigen' : force computation via eigendecomposition of X.X^T + + The 'auto' mode is the default and is intended to pick the cheaper + option of the two depending on the shape of the training data. + + store_cv_values : bool, default=False + Flag indicating if the cross-validation values corresponding to + each alpha should be stored in the ``cv_values_`` attribute (see + below). This flag is only compatible with ``cv=None`` (i.e. using + Leave-One-Out Cross-Validation). + + alpha_per_target : bool, default=False + Flag indicating whether to optimize the alpha value (picked from the + `alphas` parameter list) for each target separately (for multi-output + settings: multiple prediction targets). When set to `True`, after + fitting, the `alpha_` attribute will contain a value for each target. + When set to `False`, a single alpha is used for all targets. + + .. versionadded:: 0.24 + + Attributes + ---------- + cv_values_ : ndarray of shape (n_samples, n_alphas) or \ + shape (n_samples, n_targets, n_alphas), optional + Cross-validation values for each alpha (only available if + ``store_cv_values=True`` and ``cv=None``). After ``fit()`` has been + called, this attribute will contain the mean squared errors if + `scoring is None` otherwise it will contain standardized per point + prediction values. + + coef_ : ndarray of shape (n_features) or (n_targets, n_features) + Weight vector(s). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + alpha_ : float or ndarray of shape (n_targets,) + Estimated regularization parameter, or, if ``alpha_per_target=True``, + the estimated regularization parameter for each target. + + best_score_ : float or ndarray of shape (n_targets,) + Score of base estimator with best alpha, or, if + ``alpha_per_target=True``, a score for each target. + + .. versionadded:: 0.23 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression. + RidgeClassifier : Classifier based on ridge regression on {-1, 1} labels. + RidgeClassifierCV : Ridge classifier with built-in cross validation. + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.linear_model import RidgeCV + >>> X, y = load_diabetes(return_X_y=True) + >>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y) + >>> clf.score(X, y) + 0.5166... + """ + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge regression model with cv. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. If using GCV, will be cast to float64 + if necessary. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + When sample_weight is provided, the selected hyperparameter may depend + on whether we use leave-one-out cross-validation (cv=None or cv='auto') + or another form of cross-validation, because only leave-one-out + cross-validation takes the sample weights into account when computing + the validation score. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + super().fit(X, y, sample_weight=sample_weight) + return self + + +class RidgeClassifierCV(_RoutingNotSupportedMixin, _RidgeClassifierMixin, _BaseRidgeCV): + """Ridge classifier with built-in cross-validation. + + See glossary entry for :term:`cross-validation estimator`. + + By default, it performs Leave-One-Out Cross-Validation. Currently, + only the n_features > n_samples case is handled efficiently. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alphas : array-like of shape (n_alphas,), default=(0.1, 1.0, 10.0) + Array of alpha values to try. + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + scoring : str, callable, default=None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the efficient Leave-One-Out cross-validation + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + store_cv_values : bool, default=False + Flag indicating if the cross-validation values corresponding to + each alpha should be stored in the ``cv_values_`` attribute (see + below). This flag is only compatible with ``cv=None`` (i.e. using + Leave-One-Out Cross-Validation). + + Attributes + ---------- + cv_values_ : ndarray of shape (n_samples, n_targets, n_alphas), optional + Cross-validation values for each alpha (only if ``store_cv_values=True`` and + ``cv=None``). After ``fit()`` has been called, this attribute will + contain the mean squared errors if `scoring is None` otherwise it + will contain standardized per point prediction values. + + coef_ : ndarray of shape (1, n_features) or (n_targets, n_features) + Coefficient of the features in the decision function. + + ``coef_`` is of shape (1, n_features) when the given problem is binary. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + alpha_ : float + Estimated regularization parameter. + + best_score_ : float + Score of base estimator with best alpha. + + .. versionadded:: 0.23 + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression. + RidgeClassifier : Ridge classifier. + RidgeCV : Ridge regression with built-in cross validation. + + Notes + ----- + For multi-class classification, n_class classifiers are trained in + a one-versus-all approach. Concretely, this is implemented by taking + advantage of the multi-variate response support in Ridge. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.linear_model import RidgeClassifierCV + >>> X, y = load_breast_cancer(return_X_y=True) + >>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y) + >>> clf.score(X, y) + 0.9630... + """ + + _parameter_constraints: dict = { + **_BaseRidgeCV._parameter_constraints, + "class_weight": [dict, StrOptions({"balanced"}), None], + } + for param in ("gcv_mode", "alpha_per_target"): + _parameter_constraints.pop(param) + + def __init__( + self, + alphas=(0.1, 1.0, 10.0), + *, + fit_intercept=True, + scoring=None, + cv=None, + class_weight=None, + store_cv_values=False, + ): + super().__init__( + alphas=alphas, + fit_intercept=fit_intercept, + scoring=scoring, + cv=cv, + store_cv_values=store_cv_values, + ) + self.class_weight = class_weight + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge classifier with cv. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples + and `n_features` is the number of features. When using GCV, + will be cast to float64 if necessary. + + y : ndarray of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + # `RidgeClassifier` does not accept "sag" or "saga" solver and thus support + # csr, csc, and coo sparse matrices. By using solver="eigen" we force to accept + # all sparse format. + X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, solver="eigen") + + # If cv is None, gcv mode will be used and we used the binarized Y + # since y will not be binarized in _RidgeGCV estimator. + # If cv is not None, a GridSearchCV with some RidgeClassifier + # estimators are used where y will be binarized. Thus, we pass y + # instead of the binarized Y. + target = Y if self.cv is None else y + super().fit(X, target, sample_weight=sample_weight) + return self + + def _more_tags(self): + return { + "multilabel": True, + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + }, + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..369ac5527a5bb1b4c85b388da195945cee93f5aa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb145d56843e888393bcc3e7ba95d6619afb893c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e19372055f69a4f9fae39acac25aa222453f826 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b552d2e9206e9cf8f7c9b657d25d85466254cdc1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44ab852b9ef0236438f5d830f63f28e034f9de38 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad8828586b3e2ba5359207dc6617ad0e56104030 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ccbd616bf70fd2b44619dcb12fc88ddcc99c2d5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66b225b610e3cbcab4625a075982e285ef9da441 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1bfa0b411e3abe107650a5b6c9582fc697aabb8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cef27390c63a35fc033769b8aefb039622e6ade6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd6b6bd7e019c54e8134ffcbeda7e035f9839105 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e42cf0ab309591e890cfb03e438300533aff2a62 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ridge.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..588fae57fff3a5f36a234a03a0111d92fe87cb0c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ridge.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..745b9ce6910da8f3de01e19f6c72896e21ca2289 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..902738b738d372d8077f4bb2e435947d94551565 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..645d418646a9c8ab4f11f5b93f68e11c8891a841 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_theil_sen.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_theil_sen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..541bd8dc7221b50c1327e85ca7c99deccab7636f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_theil_sen.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_base.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..7c9f734dcf5b587c72e6549ca2c437e8b2c0bab2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_base.py @@ -0,0 +1,789 @@ +# Author: Alexandre Gramfort +# Fabian Pedregosa +# Maria Telenczuk +# +# License: BSD 3 clause + +import warnings + +import numpy as np +import pytest +from scipy import linalg, sparse + +from sklearn.datasets import load_iris, make_regression, make_sparse_uncorrelated +from sklearn.linear_model import LinearRegression +from sklearn.linear_model._base import ( + _preprocess_data, + _rescale_data, + make_dataset, +) +from sklearn.preprocessing import add_dummy_feature +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + LIL_CONTAINERS, +) + +rtol = 1e-6 + + +def test_linear_regression(): + # Test LinearRegression on a simple dataset. + # a simple dataset + X = [[1], [2]] + Y = [1, 2] + + reg = LinearRegression() + reg.fit(X, Y) + + assert_array_almost_equal(reg.coef_, [1]) + assert_array_almost_equal(reg.intercept_, [0]) + assert_array_almost_equal(reg.predict(X), [1, 2]) + + # test it also for degenerate input + X = [[1]] + Y = [0] + + reg = LinearRegression() + reg.fit(X, Y) + assert_array_almost_equal(reg.coef_, [0]) + assert_array_almost_equal(reg.intercept_, [0]) + assert_array_almost_equal(reg.predict(X), [0]) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_linear_regression_sample_weights( + sparse_container, fit_intercept, global_random_seed +): + rng = np.random.RandomState(global_random_seed) + + # It would not work with under-determined systems + n_samples, n_features = 6, 5 + + X = rng.normal(size=(n_samples, n_features)) + if sparse_container is not None: + X = sparse_container(X) + y = rng.normal(size=n_samples) + + sample_weight = 1.0 + rng.uniform(size=n_samples) + + # LinearRegression with explicit sample_weight + reg = LinearRegression(fit_intercept=fit_intercept) + reg.fit(X, y, sample_weight=sample_weight) + coefs1 = reg.coef_ + inter1 = reg.intercept_ + + assert reg.coef_.shape == (X.shape[1],) # sanity checks + + # Closed form of the weighted least square + # theta = (X^T W X)^(-1) @ X^T W y + W = np.diag(sample_weight) + X_aug = X if not fit_intercept else add_dummy_feature(X) + + Xw = X_aug.T @ W @ X_aug + yw = X_aug.T @ W @ y + coefs2 = linalg.solve(Xw, yw) + + if not fit_intercept: + assert_allclose(coefs1, coefs2) + else: + assert_allclose(coefs1, coefs2[1:]) + assert_allclose(inter1, coefs2[0]) + + +def test_raises_value_error_if_positive_and_sparse(): + error_msg = "Sparse data was passed for X, but dense data is required." + # X must not be sparse if positive == True + X = sparse.eye(10) + y = np.ones(10) + + reg = LinearRegression(positive=True) + + with pytest.raises(TypeError, match=error_msg): + reg.fit(X, y) + + +@pytest.mark.parametrize("n_samples, n_features", [(2, 3), (3, 2)]) +def test_raises_value_error_if_sample_weights_greater_than_1d(n_samples, n_features): + # Sample weights must be either scalar or 1D + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + sample_weights_OK = rng.randn(n_samples) ** 2 + 1 + sample_weights_OK_1 = 1.0 + sample_weights_OK_2 = 2.0 + + reg = LinearRegression() + + # make sure the "OK" sample weights actually work + reg.fit(X, y, sample_weights_OK) + reg.fit(X, y, sample_weights_OK_1) + reg.fit(X, y, sample_weights_OK_2) + + +def test_fit_intercept(): + # Test assertions on betas shape. + X2 = np.array([[0.38349978, 0.61650022], [0.58853682, 0.41146318]]) + X3 = np.array( + [[0.27677969, 0.70693172, 0.01628859], [0.08385139, 0.20692515, 0.70922346]] + ) + y = np.array([1, 1]) + + lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y) + lr2_with_intercept = LinearRegression().fit(X2, y) + + lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y) + lr3_with_intercept = LinearRegression().fit(X3, y) + + assert lr2_with_intercept.coef_.shape == lr2_without_intercept.coef_.shape + assert lr3_with_intercept.coef_.shape == lr3_without_intercept.coef_.shape + assert lr2_without_intercept.coef_.ndim == lr3_without_intercept.coef_.ndim + + +def test_linear_regression_sparse(global_random_seed): + # Test that linear regression also works with sparse data + rng = np.random.RandomState(global_random_seed) + n = 100 + X = sparse.eye(n, n) + beta = rng.rand(n) + y = X @ beta + + ols = LinearRegression() + ols.fit(X, y.ravel()) + assert_array_almost_equal(beta, ols.coef_ + ols.intercept_) + + assert_array_almost_equal(ols.predict(X) - y.ravel(), 0) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_linear_regression_sparse_equal_dense(fit_intercept, csr_container): + # Test that linear regression agrees between sparse and dense + rng = np.random.RandomState(0) + n_samples = 200 + n_features = 2 + X = rng.randn(n_samples, n_features) + X[X < 0.1] = 0.0 + Xcsr = csr_container(X) + y = rng.rand(n_samples) + params = dict(fit_intercept=fit_intercept) + clf_dense = LinearRegression(**params) + clf_sparse = LinearRegression(**params) + clf_dense.fit(X, y) + clf_sparse.fit(Xcsr, y) + assert clf_dense.intercept_ == pytest.approx(clf_sparse.intercept_) + assert_allclose(clf_dense.coef_, clf_sparse.coef_) + + +def test_linear_regression_multiple_outcome(): + # Test multiple-outcome linear regressions + rng = np.random.RandomState(0) + X, y = make_regression(random_state=rng) + + Y = np.vstack((y, y)).T + n_features = X.shape[1] + + reg = LinearRegression() + reg.fit((X), Y) + assert reg.coef_.shape == (2, n_features) + Y_pred = reg.predict(X) + reg.fit(X, y) + y_pred = reg.predict(X) + assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_linear_regression_sparse_multiple_outcome(global_random_seed, coo_container): + # Test multiple-outcome linear regressions with sparse data + rng = np.random.RandomState(global_random_seed) + X, y = make_sparse_uncorrelated(random_state=rng) + X = coo_container(X) + Y = np.vstack((y, y)).T + n_features = X.shape[1] + + ols = LinearRegression() + ols.fit(X, Y) + assert ols.coef_.shape == (2, n_features) + Y_pred = ols.predict(X) + ols.fit(X, y.ravel()) + y_pred = ols.predict(X) + assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) + + +def test_linear_regression_positive(): + # Test nonnegative LinearRegression on a simple dataset. + X = [[1], [2]] + y = [1, 2] + + reg = LinearRegression(positive=True) + reg.fit(X, y) + + assert_array_almost_equal(reg.coef_, [1]) + assert_array_almost_equal(reg.intercept_, [0]) + assert_array_almost_equal(reg.predict(X), [1, 2]) + + # test it also for degenerate input + X = [[1]] + y = [0] + + reg = LinearRegression(positive=True) + reg.fit(X, y) + assert_allclose(reg.coef_, [0]) + assert_allclose(reg.intercept_, [0]) + assert_allclose(reg.predict(X), [0]) + + +def test_linear_regression_positive_multiple_outcome(global_random_seed): + # Test multiple-outcome nonnegative linear regressions + rng = np.random.RandomState(global_random_seed) + X, y = make_sparse_uncorrelated(random_state=rng) + Y = np.vstack((y, y)).T + n_features = X.shape[1] + + ols = LinearRegression(positive=True) + ols.fit(X, Y) + assert ols.coef_.shape == (2, n_features) + assert np.all(ols.coef_ >= 0.0) + Y_pred = ols.predict(X) + ols.fit(X, y.ravel()) + y_pred = ols.predict(X) + assert_allclose(np.vstack((y_pred, y_pred)).T, Y_pred) + + +def test_linear_regression_positive_vs_nonpositive(global_random_seed): + # Test differences with LinearRegression when positive=False. + rng = np.random.RandomState(global_random_seed) + X, y = make_sparse_uncorrelated(random_state=rng) + + reg = LinearRegression(positive=True) + reg.fit(X, y) + regn = LinearRegression(positive=False) + regn.fit(X, y) + + assert np.mean((reg.coef_ - regn.coef_) ** 2) > 1e-3 + + +def test_linear_regression_positive_vs_nonpositive_when_positive(global_random_seed): + # Test LinearRegression fitted coefficients + # when the problem is positive. + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 4 + X = rng.rand(n_samples, n_features) + y = X[:, 0] + 2 * X[:, 1] + 3 * X[:, 2] + 1.5 * X[:, 3] + + reg = LinearRegression(positive=True) + reg.fit(X, y) + regn = LinearRegression(positive=False) + regn.fit(X, y) + + assert np.mean((reg.coef_ - regn.coef_) ** 2) < 1e-6 + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("use_sw", [True, False]) +def test_inplace_data_preprocessing(sparse_container, use_sw, global_random_seed): + # Check that the data is not modified inplace by the linear regression + # estimator. + rng = np.random.RandomState(global_random_seed) + original_X_data = rng.randn(10, 12) + original_y_data = rng.randn(10, 2) + orginal_sw_data = rng.rand(10) + + if sparse_container is not None: + X = sparse_container(original_X_data) + else: + X = original_X_data.copy() + y = original_y_data.copy() + # XXX: Note hat y_sparse is not supported (broken?) in the current + # implementation of LinearRegression. + + if use_sw: + sample_weight = orginal_sw_data.copy() + else: + sample_weight = None + + # Do not allow inplace preprocessing of X and y: + reg = LinearRegression() + reg.fit(X, y, sample_weight=sample_weight) + if sparse_container is not None: + assert_allclose(X.toarray(), original_X_data) + else: + assert_allclose(X, original_X_data) + assert_allclose(y, original_y_data) + + if use_sw: + assert_allclose(sample_weight, orginal_sw_data) + + # Allow inplace preprocessing of X and y + reg = LinearRegression(copy_X=False) + reg.fit(X, y, sample_weight=sample_weight) + if sparse_container is not None: + # No optimization relying on the inplace modification of sparse input + # data has been implemented at this time. + assert_allclose(X.toarray(), original_X_data) + else: + # X has been offset (and optionally rescaled by sample weights) + # inplace. The 0.42 threshold is arbitrary and has been found to be + # robust to any random seed in the admissible range. + assert np.linalg.norm(X - original_X_data) > 0.42 + + # y should not have been modified inplace by LinearRegression.fit. + assert_allclose(y, original_y_data) + + if use_sw: + # Sample weights have no reason to ever be modified inplace. + assert_allclose(sample_weight, orginal_sw_data) + + +def test_linear_regression_pd_sparse_dataframe_warning(): + pd = pytest.importorskip("pandas") + + # Warning is raised only when some of the columns is sparse + df = pd.DataFrame({"0": np.random.randn(10)}) + for col in range(1, 4): + arr = np.random.randn(10) + arr[:8] = 0 + # all columns but the first column is sparse + if col != 0: + arr = pd.arrays.SparseArray(arr, fill_value=0) + df[str(col)] = arr + + msg = "pandas.DataFrame with sparse columns found." + + reg = LinearRegression() + with pytest.warns(UserWarning, match=msg): + reg.fit(df.iloc[:, 0:2], df.iloc[:, 3]) + + # does not warn when the whole dataframe is sparse + df["0"] = pd.arrays.SparseArray(df["0"], fill_value=0) + assert hasattr(df, "sparse") + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + reg.fit(df.iloc[:, 0:2], df.iloc[:, 3]) + + +def test_preprocess_data(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + expected_X_mean = np.mean(X, axis=0) + expected_y_mean = np.mean(y, axis=0) + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=False) + assert_array_almost_equal(X_mean, np.zeros(n_features)) + assert_array_almost_equal(y_mean, 0) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt, X) + assert_array_almost_equal(yt, y) + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=True) + assert_array_almost_equal(X_mean, expected_X_mean) + assert_array_almost_equal(y_mean, expected_y_mean) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt, X - expected_X_mean) + assert_array_almost_equal(yt, y - expected_y_mean) + + +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_preprocess_data_multioutput(global_random_seed, sparse_container): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 3 + n_outputs = 2 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples, n_outputs) + expected_y_mean = np.mean(y, axis=0) + + if sparse_container is not None: + X = sparse_container(X) + + _, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False) + assert_array_almost_equal(y_mean, np.zeros(n_outputs)) + assert_array_almost_equal(yt, y) + + _, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True) + assert_array_almost_equal(y_mean, expected_y_mean) + assert_array_almost_equal(yt, y - y_mean) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_preprocess_data_weighted(sparse_container, global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 4 + # Generate random data with 50% of zero values to make sure + # that the sparse variant of this test is actually sparse. This also + # shifts the mean value for each columns in X further away from + # zero. + X = rng.rand(n_samples, n_features) + X[X < 0.5] = 0.0 + + # Scale the first feature of X to be 10 larger than the other to + # better check the impact of feature scaling. + X[:, 0] *= 10 + + # Constant non-zero feature. + X[:, 2] = 1.0 + + # Constant zero feature (non-materialized in the sparse case) + X[:, 3] = 0.0 + y = rng.rand(n_samples) + + sample_weight = rng.rand(n_samples) + expected_X_mean = np.average(X, axis=0, weights=sample_weight) + expected_y_mean = np.average(y, axis=0, weights=sample_weight) + + X_sample_weight_avg = np.average(X, weights=sample_weight, axis=0) + X_sample_weight_var = np.average( + (X - X_sample_weight_avg) ** 2, weights=sample_weight, axis=0 + ) + constant_mask = X_sample_weight_var < 10 * np.finfo(X.dtype).eps + assert_array_equal(constant_mask, [0, 0, 1, 1]) + expected_X_scale = np.sqrt(X_sample_weight_var) * np.sqrt(sample_weight.sum()) + + # near constant features should not be scaled + expected_X_scale[constant_mask] = 1 + + if sparse_container is not None: + X = sparse_container(X) + + # normalize is False + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data( + X, + y, + fit_intercept=True, + sample_weight=sample_weight, + ) + assert_array_almost_equal(X_mean, expected_X_mean) + assert_array_almost_equal(y_mean, expected_y_mean) + assert_array_almost_equal(X_scale, np.ones(n_features)) + if sparse_container is not None: + assert_array_almost_equal(Xt.toarray(), X.toarray()) + else: + assert_array_almost_equal(Xt, X - expected_X_mean) + assert_array_almost_equal(yt, y - expected_y_mean) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_sparse_preprocess_data_offsets(global_random_seed, lil_container): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + X = sparse.rand(n_samples, n_features, density=0.5, random_state=rng) + X = lil_container(X) + y = rng.rand(n_samples) + XA = X.toarray() + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=False) + assert_array_almost_equal(X_mean, np.zeros(n_features)) + assert_array_almost_equal(y_mean, 0) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt.toarray(), XA) + assert_array_almost_equal(yt, y) + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=True) + assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) + assert_array_almost_equal(y_mean, np.mean(y, axis=0)) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt.toarray(), XA) + assert_array_almost_equal(yt, y - np.mean(y, axis=0)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_preprocess_data(csr_container): + # Test output format of _preprocess_data, when input is csr + X, y = make_regression() + X[X < 2.5] = 0.0 + csr = csr_container(X) + csr_, y, _, _, _ = _preprocess_data(csr, y, fit_intercept=True) + assert csr_.format == "csr" + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("to_copy", (True, False)) +def test_preprocess_copy_data_no_checks(sparse_container, to_copy): + X, y = make_regression() + X[X < 2.5] = 0.0 + + if sparse_container is not None: + X = sparse_container(X) + + X_, y_, _, _, _ = _preprocess_data( + X, y, fit_intercept=True, copy=to_copy, check_input=False + ) + + if to_copy and sparse_container is not None: + assert not np.may_share_memory(X_.data, X.data) + elif to_copy: + assert not np.may_share_memory(X_, X) + elif sparse_container is not None: + assert np.may_share_memory(X_.data, X.data) + else: + assert np.may_share_memory(X_, X) + + +def test_dtype_preprocess_data(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + X_32 = np.asarray(X, dtype=np.float32) + y_32 = np.asarray(y, dtype=np.float32) + X_64 = np.asarray(X, dtype=np.float64) + y_64 = np.asarray(y, dtype=np.float64) + + for fit_intercept in [True, False]: + Xt_32, yt_32, X_mean_32, y_mean_32, X_scale_32 = _preprocess_data( + X_32, + y_32, + fit_intercept=fit_intercept, + ) + + Xt_64, yt_64, X_mean_64, y_mean_64, X_scale_64 = _preprocess_data( + X_64, + y_64, + fit_intercept=fit_intercept, + ) + + Xt_3264, yt_3264, X_mean_3264, y_mean_3264, X_scale_3264 = _preprocess_data( + X_32, + y_64, + fit_intercept=fit_intercept, + ) + + Xt_6432, yt_6432, X_mean_6432, y_mean_6432, X_scale_6432 = _preprocess_data( + X_64, + y_32, + fit_intercept=fit_intercept, + ) + + assert Xt_32.dtype == np.float32 + assert yt_32.dtype == np.float32 + assert X_mean_32.dtype == np.float32 + assert y_mean_32.dtype == np.float32 + assert X_scale_32.dtype == np.float32 + + assert Xt_64.dtype == np.float64 + assert yt_64.dtype == np.float64 + assert X_mean_64.dtype == np.float64 + assert y_mean_64.dtype == np.float64 + assert X_scale_64.dtype == np.float64 + + assert Xt_3264.dtype == np.float32 + assert yt_3264.dtype == np.float32 + assert X_mean_3264.dtype == np.float32 + assert y_mean_3264.dtype == np.float32 + assert X_scale_3264.dtype == np.float32 + + assert Xt_6432.dtype == np.float64 + assert yt_6432.dtype == np.float64 + assert X_mean_6432.dtype == np.float64 + assert y_mean_6432.dtype == np.float64 + assert X_scale_6432.dtype == np.float64 + + assert X_32.dtype == np.float32 + assert y_32.dtype == np.float32 + assert X_64.dtype == np.float64 + assert y_64.dtype == np.float64 + + assert_array_almost_equal(Xt_32, Xt_64) + assert_array_almost_equal(yt_32, yt_64) + assert_array_almost_equal(X_mean_32, X_mean_64) + assert_array_almost_equal(y_mean_32, y_mean_64) + assert_array_almost_equal(X_scale_32, X_scale_64) + + +@pytest.mark.parametrize("n_targets", [None, 2]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_rescale_data(n_targets, sparse_container, global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + + sample_weight = 1.0 + rng.rand(n_samples) + X = rng.rand(n_samples, n_features) + if n_targets is None: + y = rng.rand(n_samples) + else: + y = rng.rand(n_samples, n_targets) + + expected_sqrt_sw = np.sqrt(sample_weight) + expected_rescaled_X = X * expected_sqrt_sw[:, np.newaxis] + + if n_targets is None: + expected_rescaled_y = y * expected_sqrt_sw + else: + expected_rescaled_y = y * expected_sqrt_sw[:, np.newaxis] + + if sparse_container is not None: + X = sparse_container(X) + if n_targets is None: + y = sparse_container(y.reshape(-1, 1)) + else: + y = sparse_container(y) + + rescaled_X, rescaled_y, sqrt_sw = _rescale_data(X, y, sample_weight) + + assert_allclose(sqrt_sw, expected_sqrt_sw) + + if sparse_container is not None: + rescaled_X = rescaled_X.toarray() + rescaled_y = rescaled_y.toarray() + if n_targets is None: + rescaled_y = rescaled_y.ravel() + + assert_allclose(rescaled_X, expected_rescaled_X) + assert_allclose(rescaled_y, expected_rescaled_y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_fused_types_make_dataset(csr_container): + iris = load_iris() + + X_32 = iris.data.astype(np.float32) + y_32 = iris.target.astype(np.float32) + X_csr_32 = csr_container(X_32) + sample_weight_32 = np.arange(y_32.size, dtype=np.float32) + + X_64 = iris.data.astype(np.float64) + y_64 = iris.target.astype(np.float64) + X_csr_64 = csr_container(X_64) + sample_weight_64 = np.arange(y_64.size, dtype=np.float64) + + # array + dataset_32, _ = make_dataset(X_32, y_32, sample_weight_32) + dataset_64, _ = make_dataset(X_64, y_64, sample_weight_64) + xi_32, yi_32, _, _ = dataset_32._next_py() + xi_64, yi_64, _, _ = dataset_64._next_py() + xi_data_32, _, _ = xi_32 + xi_data_64, _, _ = xi_64 + + assert xi_data_32.dtype == np.float32 + assert xi_data_64.dtype == np.float64 + assert_allclose(yi_64, yi_32, rtol=rtol) + + # csr + datasetcsr_32, _ = make_dataset(X_csr_32, y_32, sample_weight_32) + datasetcsr_64, _ = make_dataset(X_csr_64, y_64, sample_weight_64) + xicsr_32, yicsr_32, _, _ = datasetcsr_32._next_py() + xicsr_64, yicsr_64, _, _ = datasetcsr_64._next_py() + xicsr_data_32, _, _ = xicsr_32 + xicsr_data_64, _, _ = xicsr_64 + + assert xicsr_data_32.dtype == np.float32 + assert xicsr_data_64.dtype == np.float64 + + assert_allclose(xicsr_data_64, xicsr_data_32, rtol=rtol) + assert_allclose(yicsr_64, yicsr_32, rtol=rtol) + + assert_array_equal(xi_data_32, xicsr_data_32) + assert_array_equal(xi_data_64, xicsr_data_64) + assert_array_equal(yi_32, yicsr_32) + assert_array_equal(yi_64, yicsr_64) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_linear_regression_sample_weight_consistency( + sparse_container, fit_intercept, global_random_seed +): + """Test that the impact of sample_weight is consistent. + + Note that this test is stricter than the common test + check_sample_weights_invariance alone and also tests sparse X. + It is very similar to test_enet_sample_weight_consistency. + """ + rng = np.random.RandomState(global_random_seed) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + if sparse_container is not None: + X = sparse_container(X) + params = dict(fit_intercept=fit_intercept) + + reg = LinearRegression(**params).fit(X, y, sample_weight=None) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + # 1) sample_weight=np.ones(..) must be equivalent to sample_weight=None + # same check as check_sample_weights_invariance(name, reg, kind="ones"), but we also + # test with sparse input. + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 2) sample_weight=None should be equivalent to sample_weight = number + sample_weight = 123.0 + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 3) scaling of sample_weight should have no effect, cf. np.average() + sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0]) + reg = reg.fit(X, y, sample_weight=sample_weight) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + reg.fit(X, y, sample_weight=np.pi * sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6 if sparse_container is None else 1e-5) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 4) setting elements of sample_weight to 0 is equivalent to removing these samples + sample_weight_0 = sample_weight.copy() + sample_weight_0[-5:] = 0 + y[-5:] *= 1000 # to make excluding those samples important + reg.fit(X, y, sample_weight=sample_weight_0) + coef_0 = reg.coef_.copy() + if fit_intercept: + intercept_0 = reg.intercept_ + reg.fit(X[:-5], y[:-5], sample_weight=sample_weight[:-5]) + if fit_intercept and sparse_container is None: + # FIXME: https://github.com/scikit-learn/scikit-learn/issues/26164 + # This often fails, e.g. when calling + # SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all" pytest \ + # sklearn/linear_model/tests/test_base.py\ + # ::test_linear_regression_sample_weight_consistency + pass + else: + assert_allclose(reg.coef_, coef_0, rtol=1e-5) + if fit_intercept: + assert_allclose(reg.intercept_, intercept_0) + + # 5) check that multiplying sample_weight by 2 is equivalent to repeating + # corresponding samples twice + if sparse_container is not None: + X2 = sparse.vstack([X, X[: n_samples // 2]], format="csc") + else: + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = sample_weight.copy() + sample_weight_1[: n_samples // 2] *= 2 + sample_weight_2 = np.concatenate( + [sample_weight, sample_weight[: n_samples // 2]], axis=0 + ) + + reg1 = LinearRegression(**params).fit(X, y, sample_weight=sample_weight_1) + reg2 = LinearRegression(**params).fit(X2, y2, sample_weight=sample_weight_2) + assert_allclose(reg1.coef_, reg2.coef_, rtol=1e-6) + if fit_intercept: + assert_allclose(reg1.intercept_, reg2.intercept_) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_bayes.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_bayes.py new file mode 100644 index 0000000000000000000000000000000000000000..a700a98dbbc45625a71d67b0acbb469926d2da30 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_bayes.py @@ -0,0 +1,329 @@ +# Author: Alexandre Gramfort +# Fabian Pedregosa +# +# License: BSD 3 clause + +from math import log + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.linear_model import ARDRegression, BayesianRidge, Ridge +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + _convert_container, + assert_almost_equal, + assert_array_almost_equal, + assert_array_less, +) +from sklearn.utils.extmath import fast_logdet + +diabetes = datasets.load_diabetes() + + +def test_bayesian_ridge_scores(): + """Check scores attribute shape""" + X, y = diabetes.data, diabetes.target + + clf = BayesianRidge(compute_score=True) + clf.fit(X, y) + + assert clf.scores_.shape == (clf.n_iter_ + 1,) + + +def test_bayesian_ridge_score_values(): + """Check value of score on toy example. + + Compute log marginal likelihood with equation (36) in Sparse Bayesian + Learning and the Relevance Vector Machine (Tipping, 2001): + + - 0.5 * (log |Id/alpha + X.X^T/lambda| + + y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi)) + + lambda_1 * log(lambda) - lambda_2 * lambda + + alpha_1 * log(alpha) - alpha_2 * alpha + + and check equality with the score computed during training. + """ + + X, y = diabetes.data, diabetes.target + n_samples = X.shape[0] + # check with initial values of alpha and lambda (see code for the values) + eps = np.finfo(np.float64).eps + alpha_ = 1.0 / (np.var(y) + eps) + lambda_ = 1.0 + + # value of the parameters of the Gamma hyperpriors + alpha_1 = 0.1 + alpha_2 = 0.1 + lambda_1 = 0.1 + lambda_2 = 0.1 + + # compute score using formula of docstring + score = lambda_1 * log(lambda_) - lambda_2 * lambda_ + score += alpha_1 * log(alpha_) - alpha_2 * alpha_ + M = 1.0 / alpha_ * np.eye(n_samples) + 1.0 / lambda_ * np.dot(X, X.T) + M_inv_dot_y = np.linalg.solve(M, y) + score += -0.5 * ( + fast_logdet(M) + np.dot(y.T, M_inv_dot_y) + n_samples * log(2 * np.pi) + ) + + # compute score with BayesianRidge + clf = BayesianRidge( + alpha_1=alpha_1, + alpha_2=alpha_2, + lambda_1=lambda_1, + lambda_2=lambda_2, + max_iter=1, + fit_intercept=False, + compute_score=True, + ) + clf.fit(X, y) + + assert_almost_equal(clf.scores_[0], score, decimal=9) + + +def test_bayesian_ridge_parameter(): + # Test correctness of lambda_ and alpha_ parameters (GitHub issue #8224) + X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]]) + y = np.array([1, 2, 3, 2, 0, 4, 5]).T + + # A Ridge regression model using an alpha value equal to the ratio of + # lambda_ and alpha_ from the Bayesian Ridge model must be identical + br_model = BayesianRidge(compute_score=True).fit(X, y) + rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y) + assert_array_almost_equal(rr_model.coef_, br_model.coef_) + assert_almost_equal(rr_model.intercept_, br_model.intercept_) + + +def test_bayesian_sample_weights(): + # Test correctness of the sample_weights method + X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]]) + y = np.array([1, 2, 3, 2, 0, 4, 5]).T + w = np.array([4, 3, 3, 1, 1, 2, 3]).T + + # A Ridge regression model using an alpha value equal to the ratio of + # lambda_ and alpha_ from the Bayesian Ridge model must be identical + br_model = BayesianRidge(compute_score=True).fit(X, y, sample_weight=w) + rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit( + X, y, sample_weight=w + ) + assert_array_almost_equal(rr_model.coef_, br_model.coef_) + assert_almost_equal(rr_model.intercept_, br_model.intercept_) + + +def test_toy_bayesian_ridge_object(): + # Test BayesianRidge on toy + X = np.array([[1], [2], [6], [8], [10]]) + Y = np.array([1, 2, 6, 8, 10]) + clf = BayesianRidge(compute_score=True) + clf.fit(X, Y) + + # Check that the model could approximately learn the identity function + test = [[1], [3], [4]] + assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2) + + +def test_bayesian_initial_params(): + # Test BayesianRidge with initial values (alpha_init, lambda_init) + X = np.vander(np.linspace(0, 4, 5), 4) + y = np.array([0.0, 1.0, 0.0, -1.0, 0.0]) # y = (x^3 - 6x^2 + 8x) / 3 + + # In this case, starting from the default initial values will increase + # the bias of the fitted curve. So, lambda_init should be small. + reg = BayesianRidge(alpha_init=1.0, lambda_init=1e-3) + # Check the R2 score nearly equals to one. + r2 = reg.fit(X, y).score(X, y) + assert_almost_equal(r2, 1.0) + + +def test_prediction_bayesian_ridge_ard_with_constant_input(): + # Test BayesianRidge and ARDRegression predictions for edge case of + # constant target vectors + n_samples = 4 + n_features = 5 + random_state = check_random_state(42) + constant_value = random_state.rand() + X = random_state.random_sample((n_samples, n_features)) + y = np.full(n_samples, constant_value, dtype=np.array(constant_value).dtype) + expected = np.full(n_samples, constant_value, dtype=np.array(constant_value).dtype) + + for clf in [BayesianRidge(), ARDRegression()]: + y_pred = clf.fit(X, y).predict(X) + assert_array_almost_equal(y_pred, expected) + + +def test_std_bayesian_ridge_ard_with_constant_input(): + # Test BayesianRidge and ARDRegression standard dev. for edge case of + # constant target vector + # The standard dev. should be relatively small (< 0.01 is tested here) + n_samples = 10 + n_features = 5 + random_state = check_random_state(42) + constant_value = random_state.rand() + X = random_state.random_sample((n_samples, n_features)) + y = np.full(n_samples, constant_value, dtype=np.array(constant_value).dtype) + expected_upper_boundary = 0.01 + + for clf in [BayesianRidge(), ARDRegression()]: + _, y_std = clf.fit(X, y).predict(X, return_std=True) + assert_array_less(y_std, expected_upper_boundary) + + +def test_update_of_sigma_in_ard(): + # Checks that `sigma_` is updated correctly after the last iteration + # of the ARDRegression algorithm. See issue #10128. + X = np.array([[1, 0], [0, 0]]) + y = np.array([0, 0]) + clf = ARDRegression(max_iter=1) + clf.fit(X, y) + # With the inputs above, ARDRegression prunes both of the two coefficients + # in the first iteration. Hence, the expected shape of `sigma_` is (0, 0). + assert clf.sigma_.shape == (0, 0) + # Ensure that no error is thrown at prediction stage + clf.predict(X, return_std=True) + + +def test_toy_ard_object(): + # Test BayesianRegression ARD classifier + X = np.array([[1], [2], [3]]) + Y = np.array([1, 2, 3]) + clf = ARDRegression(compute_score=True) + clf.fit(X, Y) + + # Check that the model could approximately learn the identity function + test = [[1], [3], [4]] + assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2) + + +@pytest.mark.parametrize("n_samples, n_features", ((10, 100), (100, 10))) +def test_ard_accuracy_on_easy_problem(global_random_seed, n_samples, n_features): + # Check that ARD converges with reasonable accuracy on an easy problem + # (Github issue #14055) + X = np.random.RandomState(global_random_seed).normal(size=(250, 3)) + y = X[:, 1] + + regressor = ARDRegression() + regressor.fit(X, y) + + abs_coef_error = np.abs(1 - regressor.coef_[1]) + assert abs_coef_error < 1e-10 + + +@pytest.mark.parametrize("constructor_name", ["array", "dataframe"]) +def test_return_std(constructor_name): + # Test return_std option for both Bayesian regressors + def f(X): + return np.dot(X, w) + b + + def f_noise(X, noise_mult): + return f(X) + np.random.randn(X.shape[0]) * noise_mult + + d = 5 + n_train = 50 + n_test = 10 + + w = np.array([1.0, 0.0, 1.0, -1.0, 0.0]) + b = 1.0 + + X = np.random.random((n_train, d)) + X = _convert_container(X, constructor_name) + + X_test = np.random.random((n_test, d)) + X_test = _convert_container(X_test, constructor_name) + + for decimal, noise_mult in enumerate([1, 0.1, 0.01]): + y = f_noise(X, noise_mult) + + m1 = BayesianRidge() + m1.fit(X, y) + y_mean1, y_std1 = m1.predict(X_test, return_std=True) + assert_array_almost_equal(y_std1, noise_mult, decimal=decimal) + + m2 = ARDRegression() + m2.fit(X, y) + y_mean2, y_std2 = m2.predict(X_test, return_std=True) + assert_array_almost_equal(y_std2, noise_mult, decimal=decimal) + + +def test_update_sigma(global_random_seed): + # make sure the two update_sigma() helpers are equivalent. The woodbury + # formula is used when n_samples < n_features, and the other one is used + # otherwise. + + rng = np.random.RandomState(global_random_seed) + + # set n_samples == n_features to avoid instability issues when inverting + # the matrices. Using the woodbury formula would be unstable when + # n_samples > n_features + n_samples = n_features = 10 + X = rng.randn(n_samples, n_features) + alpha = 1 + lmbda = np.arange(1, n_features + 1) + keep_lambda = np.array([True] * n_features) + + reg = ARDRegression() + + sigma = reg._update_sigma(X, alpha, lmbda, keep_lambda) + sigma_woodbury = reg._update_sigma_woodbury(X, alpha, lmbda, keep_lambda) + + np.testing.assert_allclose(sigma, sigma_woodbury) + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression]) +def test_dtype_match(dtype, Estimator): + # Test that np.float32 input data is not cast to np.float64 when possible + X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]], dtype=dtype) + y = np.array([1, 2, 3, 2, 0, 4, 5]).T + + model = Estimator() + # check type consistency + model.fit(X, y) + attributes = ["coef_", "sigma_"] + for attribute in attributes: + assert getattr(model, attribute).dtype == X.dtype + + y_mean, y_std = model.predict(X, return_std=True) + assert y_mean.dtype == X.dtype + assert y_std.dtype == X.dtype + + +@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression]) +def test_dtype_correctness(Estimator): + X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]]) + y = np.array([1, 2, 3, 2, 0, 4, 5]).T + model = Estimator() + coef_32 = model.fit(X.astype(np.float32), y).coef_ + coef_64 = model.fit(X.astype(np.float64), y).coef_ + np.testing.assert_allclose(coef_32, coef_64, rtol=1e-4) + + +# TODO(1.5) remove +@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression]) +def test_bayesian_ridge_ard_n_iter_deprecated(Estimator): + """Check the deprecation warning of `n_iter`.""" + depr_msg = ( + "'n_iter' was renamed to 'max_iter' in version 1.3 and will be removed in 1.5" + ) + X, y = diabetes.data, diabetes.target + model = Estimator(n_iter=5) + + with pytest.warns(FutureWarning, match=depr_msg): + model.fit(X, y) + + +# TODO(1.5) remove +@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression]) +def test_bayesian_ridge_ard_max_iter_and_n_iter_both_set(Estimator): + """Check that a ValueError is raised when both `max_iter` and `n_iter` are set.""" + err_msg = ( + "Both `n_iter` and `max_iter` attributes were set. Attribute" + " `n_iter` was deprecated in version 1.3 and will be removed in" + " 1.5. To avoid this error, only set the `max_iter` attribute." + ) + X, y = diabetes.data, diabetes.target + model = Estimator(n_iter=5, max_iter=5) + + with pytest.raises(ValueError, match=err_msg): + model.fit(X, y) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_common.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..ff9d7aad146f3db6d838203ec56a3f4a0bbb6eb8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_common.py @@ -0,0 +1,147 @@ +# License: BSD 3 clause + +import inspect + +import numpy as np +import pytest + +from sklearn.base import is_classifier +from sklearn.datasets import make_low_rank_matrix +from sklearn.linear_model import ( + ARDRegression, + BayesianRidge, + ElasticNet, + ElasticNetCV, + Lars, + LarsCV, + Lasso, + LassoCV, + LassoLarsCV, + LassoLarsIC, + LinearRegression, + LogisticRegression, + LogisticRegressionCV, + MultiTaskElasticNet, + MultiTaskElasticNetCV, + MultiTaskLasso, + MultiTaskLassoCV, + OrthogonalMatchingPursuit, + OrthogonalMatchingPursuitCV, + PoissonRegressor, + Ridge, + RidgeCV, + SGDRegressor, + TweedieRegressor, +) + + +# Note: GammaRegressor() and TweedieRegressor(power != 1) have a non-canonical link. +@pytest.mark.parametrize( + "model", + [ + ARDRegression(), + BayesianRidge(), + ElasticNet(), + ElasticNetCV(), + Lars(), + LarsCV(), + Lasso(), + LassoCV(), + LassoLarsCV(), + LassoLarsIC(), + LinearRegression(), + # TODO: FIx SAGA which fails badly with sample_weights. + # This is a known limitation, see: + # https://github.com/scikit-learn/scikit-learn/issues/21305 + pytest.param( + LogisticRegression( + penalty="elasticnet", solver="saga", l1_ratio=0.5, tol=1e-15 + ), + marks=pytest.mark.xfail(reason="Missing importance sampling scheme"), + ), + LogisticRegressionCV(tol=1e-6), + MultiTaskElasticNet(), + MultiTaskElasticNetCV(), + MultiTaskLasso(), + MultiTaskLassoCV(), + OrthogonalMatchingPursuit(), + OrthogonalMatchingPursuitCV(), + PoissonRegressor(), + Ridge(), + RidgeCV(), + pytest.param( + SGDRegressor(tol=1e-15), + marks=pytest.mark.xfail(reason="Insufficient precision."), + ), + SGDRegressor(penalty="elasticnet", max_iter=10_000), + TweedieRegressor(power=0), # same as Ridge + ], + ids=lambda x: x.__class__.__name__, +) +@pytest.mark.parametrize("with_sample_weight", [False, True]) +def test_balance_property(model, with_sample_weight, global_random_seed): + # Test that sum(y_predicted) == sum(y_observed) on the training set. + # This must hold for all linear models with deviance of an exponential disperson + # family as loss and the corresponding canonical link if fit_intercept=True. + # Examples: + # - squared error and identity link (most linear models) + # - Poisson deviance with log link + # - log loss with logit link + # This is known as balance property or unconditional calibration/unbiasedness. + # For reference, see Corollary 3.18, 3.20 and Chapter 5.1.5 of + # M.V. Wuthrich and M. Merz, "Statistical Foundations of Actuarial Learning and its + # Applications" (June 3, 2022). http://doi.org/10.2139/ssrn.3822407 + + if ( + with_sample_weight + and "sample_weight" not in inspect.signature(model.fit).parameters.keys() + ): + pytest.skip("Estimator does not support sample_weight.") + + rel = 2e-4 # test precision + if isinstance(model, SGDRegressor): + rel = 1e-1 + elif hasattr(model, "solver") and model.solver == "saga": + rel = 1e-2 + + rng = np.random.RandomState(global_random_seed) + n_train, n_features, n_targets = 100, 10, None + if isinstance( + model, + (MultiTaskElasticNet, MultiTaskElasticNetCV, MultiTaskLasso, MultiTaskLassoCV), + ): + n_targets = 3 + X = make_low_rank_matrix(n_samples=n_train, n_features=n_features, random_state=rng) + if n_targets: + coef = ( + rng.uniform(low=-2, high=2, size=(n_features, n_targets)) + / np.max(X, axis=0)[:, None] + ) + else: + coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) + + expectation = np.exp(X @ coef + 0.5) + y = rng.poisson(lam=expectation) + 1 # strict positive, i.e. y > 0 + if is_classifier(model): + y = (y > expectation + 1).astype(np.float64) + + if with_sample_weight: + sw = rng.uniform(low=1, high=10, size=y.shape[0]) + else: + sw = None + + model.set_params(fit_intercept=True) # to be sure + if with_sample_weight: + model.fit(X, y, sample_weight=sw) + else: + model.fit(X, y) + + # Assert balance property. + if is_classifier(model): + assert np.average(model.predict_proba(X)[:, 1], weights=sw) == pytest.approx( + np.average(y, weights=sw), rel=rel + ) + else: + assert np.average(model.predict(X), weights=sw, axis=0) == pytest.approx( + np.average(y, weights=sw, axis=0), rel=rel + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py new file mode 100644 index 0000000000000000000000000000000000000000..fe5f17ca75d00d033222cea3ff0d875b1df039c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py @@ -0,0 +1,1633 @@ +# Authors: Olivier Grisel +# Alexandre Gramfort +# License: BSD 3 clause + +import warnings +from copy import deepcopy + +import joblib +import numpy as np +import pytest +from scipy import interpolate, sparse + +from sklearn.base import clone, is_classifier +from sklearn.datasets import load_diabetes, make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + ElasticNet, + ElasticNetCV, + Lasso, + LassoCV, + LassoLars, + LassoLarsCV, + LinearRegression, + MultiTaskElasticNet, + MultiTaskElasticNetCV, + MultiTaskLasso, + MultiTaskLassoCV, + Ridge, + RidgeClassifier, + RidgeClassifierCV, + RidgeCV, + enet_path, + lars_path, + lasso_path, +) +from sklearn.linear_model._coordinate_descent import _set_order +from sklearn.model_selection import ( + BaseCrossValidator, + GridSearchCV, + LeaveOneGroupOut, +) +from sklearn.model_selection._split import GroupsConsumerMixin +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils import check_array +from sklearn.utils._testing import ( + TempMemmap, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + + +@pytest.mark.parametrize("order", ["C", "F"]) +@pytest.mark.parametrize("input_order", ["C", "F"]) +def test_set_order_dense(order, input_order): + """Check that _set_order returns arrays with promised order.""" + X = np.array([[0], [0], [0]], order=input_order) + y = np.array([0, 0, 0], order=input_order) + X2, y2 = _set_order(X, y, order=order) + if order == "C": + assert X2.flags["C_CONTIGUOUS"] + assert y2.flags["C_CONTIGUOUS"] + elif order == "F": + assert X2.flags["F_CONTIGUOUS"] + assert y2.flags["F_CONTIGUOUS"] + + if order == input_order: + assert X is X2 + assert y is y2 + + +@pytest.mark.parametrize("order", ["C", "F"]) +@pytest.mark.parametrize("input_order", ["C", "F"]) +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_set_order_sparse(order, input_order, coo_container): + """Check that _set_order returns sparse matrices in promised format.""" + X = coo_container(np.array([[0], [0], [0]])) + y = coo_container(np.array([0, 0, 0])) + sparse_format = "csc" if input_order == "F" else "csr" + X = X.asformat(sparse_format) + y = X.asformat(sparse_format) + X2, y2 = _set_order(X, y, order=order) + + format = "csc" if order == "F" else "csr" + assert sparse.issparse(X2) and X2.format == format + assert sparse.issparse(y2) and y2.format == format + + +def test_lasso_zero(): + # Check that the lasso can handle zero data without crashing + X = [[0], [0], [0]] + y = [0, 0, 0] + clf = Lasso(alpha=0.1).fit(X, y) + pred = clf.predict([[1], [2], [3]]) + assert_array_almost_equal(clf.coef_, [0]) + assert_array_almost_equal(pred, [0, 0, 0]) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_enet_nonfinite_params(): + # Check ElasticNet throws ValueError when dealing with non-finite parameter + # values + rng = np.random.RandomState(0) + n_samples = 10 + fmax = np.finfo(np.float64).max + X = fmax * rng.uniform(size=(n_samples, 2)) + y = rng.randint(0, 2, size=n_samples) + + clf = ElasticNet(alpha=0.1) + msg = "Coordinate descent iterations resulted in non-finite parameter values" + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_lasso_toy(): + # Test Lasso on a toy example for various values of alpha. + # When validating this against glmnet notice that glmnet divides it + # against nobs. + + X = [[-1], [0], [1]] + Y = [-1, 0, 1] # just a straight line + T = [[2], [3], [4]] # test sample + + clf = Lasso(alpha=1e-8) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = Lasso(alpha=0.1) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.85]) + assert_array_almost_equal(pred, [1.7, 2.55, 3.4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = Lasso(alpha=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.25]) + assert_array_almost_equal(pred, [0.5, 0.75, 1.0]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = Lasso(alpha=1) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.0]) + assert_array_almost_equal(pred, [0, 0, 0]) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_enet_toy(): + # Test ElasticNet for various parameters of alpha and l1_ratio. + # Actually, the parameters alpha = 0 should not be allowed. However, + # we test it as a border case. + # ElasticNet is tested with and without precomputed Gram matrix + + X = np.array([[-1.0], [0.0], [1.0]]) + Y = [-1, 0, 1] # just a straight line + T = [[2.0], [3.0], [4.0]] # test sample + + # this should be the same as lasso + clf = ElasticNet(alpha=1e-8, l1_ratio=1.0) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100, precompute=False) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf.set_params(max_iter=100, precompute=True) + clf.fit(X, Y) # with Gram + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf.set_params(max_iter=100, precompute=np.dot(X.T, X)) + clf.fit(X, Y) # with Gram + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.45454], 3) + assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_lasso_dual_gap(): + """ + Check that Lasso.dual_gap_ matches its objective formulation, with the + datafit normalized by n_samples + """ + X, y, _, _ = build_dataset(n_samples=10, n_features=30) + n_samples = len(y) + alpha = 0.01 * np.max(np.abs(X.T @ y)) / n_samples + clf = Lasso(alpha=alpha, fit_intercept=False).fit(X, y) + w = clf.coef_ + R = y - X @ w + primal = 0.5 * np.mean(R**2) + clf.alpha * np.sum(np.abs(w)) + # dual pt: R / n_samples, dual constraint: norm(X.T @ theta, inf) <= alpha + R /= np.max(np.abs(X.T @ R) / (n_samples * alpha)) + dual = 0.5 * (np.mean(y**2) - np.mean((y - R) ** 2)) + assert_allclose(clf.dual_gap_, primal - dual) + + +def build_dataset(n_samples=50, n_features=200, n_informative_features=10, n_targets=1): + """ + build an ill-posed linear regression problem with many noisy features and + comparatively few samples + """ + random_state = np.random.RandomState(0) + if n_targets > 1: + w = random_state.randn(n_features, n_targets) + else: + w = random_state.randn(n_features) + w[n_informative_features:] = 0.0 + X = random_state.randn(n_samples, n_features) + y = np.dot(X, w) + X_test = random_state.randn(n_samples, n_features) + y_test = np.dot(X_test, w) + return X, y, X_test, y_test + + +def test_lasso_cv(): + X, y, X_test, y_test = build_dataset() + max_iter = 150 + clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, cv=3).fit(X, y) + assert_almost_equal(clf.alpha_, 0.056, 2) + + clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True, cv=3) + clf.fit(X, y) + assert_almost_equal(clf.alpha_, 0.056, 2) + + # Check that the lars and the coordinate descent implementation + # select a similar alpha + lars = LassoLarsCV(max_iter=30, cv=3).fit(X, y) + # for this we check that they don't fall in the grid of + # clf.alphas further than 1 + assert ( + np.abs( + np.searchsorted(clf.alphas_[::-1], lars.alpha_) + - np.searchsorted(clf.alphas_[::-1], clf.alpha_) + ) + <= 1 + ) + # check that they also give a similar MSE + mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.mse_path_.T) + np.testing.assert_approx_equal( + mse_lars(clf.alphas_[5]).mean(), clf.mse_path_[5].mean(), significant=2 + ) + + # test set + assert clf.score(X_test, y_test) > 0.99 + + +def test_lasso_cv_with_some_model_selection(): + from sklearn import datasets + from sklearn.model_selection import ShuffleSplit + + diabetes = datasets.load_diabetes() + X = diabetes.data + y = diabetes.target + + pipe = make_pipeline(StandardScaler(), LassoCV(cv=ShuffleSplit(random_state=0))) + pipe.fit(X, y) + + +def test_lasso_cv_positive_constraint(): + X, y, X_test, y_test = build_dataset() + max_iter = 500 + + # Ensure the unconstrained fit has a negative coefficient + clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1) + clf_unconstrained.fit(X, y) + assert min(clf_unconstrained.coef_) < 0 + + # On same data, constrained fit has non-negative coefficients + clf_constrained = LassoCV( + n_alphas=3, eps=1e-1, max_iter=max_iter, positive=True, cv=2, n_jobs=1 + ) + clf_constrained.fit(X, y) + assert min(clf_constrained.coef_) >= 0 + + +@pytest.mark.parametrize( + "alphas, err_type, err_msg", + [ + ((1, -1, -100), ValueError, r"alphas\[1\] == -1, must be >= 0.0."), + ( + (-0.1, -1.0, -10.0), + ValueError, + r"alphas\[0\] == -0.1, must be >= 0.0.", + ), + ( + (1, 1.0, "1"), + TypeError, + r"alphas\[2\] must be an instance of float, not str", + ), + ], +) +def test_lassocv_alphas_validation(alphas, err_type, err_msg): + """Check the `alphas` validation in LassoCV.""" + + n_samples, n_features = 5, 5 + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + y = rng.randint(0, 2, n_samples) + lassocv = LassoCV(alphas=alphas) + with pytest.raises(err_type, match=err_msg): + lassocv.fit(X, y) + + +def _scale_alpha_inplace(estimator, n_samples): + """Rescale the parameter alpha from when the estimator is evoked with + normalize set to True as if it were evoked in a Pipeline with normalize set + to False and with a StandardScaler. + """ + if ("alpha" not in estimator.get_params()) and ( + "alphas" not in estimator.get_params() + ): + return + + if isinstance(estimator, (RidgeCV, RidgeClassifierCV)): + # alphas is not validated at this point and can be a list. + # We convert it to a np.ndarray to make sure broadcasting + # is used. + alphas = np.asarray(estimator.alphas) * n_samples + return estimator.set_params(alphas=alphas) + if isinstance(estimator, (Lasso, LassoLars, MultiTaskLasso)): + alpha = estimator.alpha * np.sqrt(n_samples) + if isinstance(estimator, (Ridge, RidgeClassifier)): + alpha = estimator.alpha * n_samples + if isinstance(estimator, (ElasticNet, MultiTaskElasticNet)): + if estimator.l1_ratio == 1: + alpha = estimator.alpha * np.sqrt(n_samples) + elif estimator.l1_ratio == 0: + alpha = estimator.alpha * n_samples + else: + # To avoid silent errors in case of refactoring + raise NotImplementedError + + estimator.set_params(alpha=alpha) + + +@pytest.mark.parametrize( + "LinearModel, params", + [ + (Lasso, {"tol": 1e-16, "alpha": 0.1}), + (LassoCV, {"tol": 1e-16}), + (ElasticNetCV, {}), + (RidgeClassifier, {"solver": "sparse_cg", "alpha": 0.1}), + (ElasticNet, {"tol": 1e-16, "l1_ratio": 1, "alpha": 0.01}), + (ElasticNet, {"tol": 1e-16, "l1_ratio": 0, "alpha": 0.01}), + (Ridge, {"solver": "sparse_cg", "tol": 1e-12, "alpha": 0.1}), + (LinearRegression, {}), + (RidgeCV, {}), + (RidgeClassifierCV, {}), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_model_pipeline_same_dense_and_sparse(LinearModel, params, csr_container): + # Test that linear model preceded by StandardScaler in the pipeline and + # with normalize set to False gives the same y_pred and the same .coef_ + # given X sparse or dense + + model_dense = make_pipeline(StandardScaler(with_mean=False), LinearModel(**params)) + + model_sparse = make_pipeline(StandardScaler(with_mean=False), LinearModel(**params)) + + # prepare the data + rng = np.random.RandomState(0) + n_samples = 200 + n_features = 2 + X = rng.randn(n_samples, n_features) + X[X < 0.1] = 0.0 + + X_sparse = csr_container(X) + y = rng.rand(n_samples) + + if is_classifier(model_dense): + y = np.sign(y) + + model_dense.fit(X, y) + model_sparse.fit(X_sparse, y) + + assert_allclose(model_sparse[1].coef_, model_dense[1].coef_) + y_pred_dense = model_dense.predict(X) + y_pred_sparse = model_sparse.predict(X_sparse) + assert_allclose(y_pred_dense, y_pred_sparse) + + assert_allclose(model_dense[1].intercept_, model_sparse[1].intercept_) + + +def test_lasso_path_return_models_vs_new_return_gives_same_coefficients(): + # Test that lasso_path with lars_path style output gives the + # same result + + # Some toy data + X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T + y = np.array([1, 2, 3.1]) + alphas = [5.0, 1.0, 0.5] + + # Use lars_path and lasso_path(new output) with 1D linear interpolation + # to compute the same path + alphas_lars, _, coef_path_lars = lars_path(X, y, method="lasso") + coef_path_cont_lars = interpolate.interp1d( + alphas_lars[::-1], coef_path_lars[:, ::-1] + ) + alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas) + coef_path_cont_lasso = interpolate.interp1d( + alphas_lasso2[::-1], coef_path_lasso2[:, ::-1] + ) + + assert_array_almost_equal( + coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas), decimal=1 + ) + + +def test_enet_path(): + # We use a large number of samples and of informative features so that + # the l1_ratio selected is more toward ridge than lasso + X, y, X_test, y_test = build_dataset( + n_samples=200, n_features=100, n_informative_features=100 + ) + max_iter = 150 + + # Here we have a small number of iterations, and thus the + # ElasticNet might not converge. This is to speed up tests + clf = ElasticNetCV( + alphas=[0.01, 0.05, 0.1], eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter + ) + ignore_warnings(clf.fit)(X, y) + # Well-conditioned settings, we should have selected our + # smallest penalty + assert_almost_equal(clf.alpha_, min(clf.alphas_)) + # Non-sparse ground truth: we should have selected an elastic-net + # that is closer to ridge than to lasso + assert clf.l1_ratio_ == min(clf.l1_ratio) + + clf = ElasticNetCV( + alphas=[0.01, 0.05, 0.1], + eps=2e-3, + l1_ratio=[0.5, 0.7], + cv=3, + max_iter=max_iter, + precompute=True, + ) + ignore_warnings(clf.fit)(X, y) + + # Well-conditioned settings, we should have selected our + # smallest penalty + assert_almost_equal(clf.alpha_, min(clf.alphas_)) + # Non-sparse ground truth: we should have selected an elastic-net + # that is closer to ridge than to lasso + assert clf.l1_ratio_ == min(clf.l1_ratio) + + # We are in well-conditioned settings with low noise: we should + # have a good test-set performance + assert clf.score(X_test, y_test) > 0.99 + + # Multi-output/target case + X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3) + clf = MultiTaskElasticNetCV( + n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter + ) + ignore_warnings(clf.fit)(X, y) + # We are in well-conditioned settings with low noise: we should + # have a good test-set performance + assert clf.score(X_test, y_test) > 0.99 + assert clf.coef_.shape == (3, 10) + + # Mono-output should have same cross-validated alpha_ and l1_ratio_ + # in both cases. + X, y, _, _ = build_dataset(n_features=10) + clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf1.fit(X, y) + clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf2.fit(X, y[:, np.newaxis]) + assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_) + assert_almost_equal(clf1.alpha_, clf2.alpha_) + + +def test_path_parameters(): + X, y, _, _ = build_dataset() + max_iter = 100 + + clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter, l1_ratio=0.5, tol=1e-3) + clf.fit(X, y) # new params + assert_almost_equal(0.5, clf.l1_ratio) + assert 50 == clf.n_alphas + assert 50 == len(clf.alphas_) + + +def test_warm_start(): + X, y, _, _ = build_dataset() + clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True) + ignore_warnings(clf.fit)(X, y) + ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations + + clf2 = ElasticNet(alpha=0.1, max_iter=10) + ignore_warnings(clf2.fit)(X, y) + assert_array_almost_equal(clf2.coef_, clf.coef_) + + +def test_lasso_alpha_warning(): + X = [[-1], [0], [1]] + Y = [-1, 0, 1] # just a straight line + + clf = Lasso(alpha=0) + warning_message = ( + "With alpha=0, this algorithm does not " + "converge well. You are advised to use the " + "LinearRegression estimator" + ) + with pytest.warns(UserWarning, match=warning_message): + clf.fit(X, Y) + + +def test_lasso_positive_constraint(): + X = [[-1], [0], [1]] + y = [1, 0, -1] # just a straight line with negative slope + + lasso = Lasso(alpha=0.1, positive=True) + lasso.fit(X, y) + assert min(lasso.coef_) >= 0 + + lasso = Lasso(alpha=0.1, precompute=True, positive=True) + lasso.fit(X, y) + assert min(lasso.coef_) >= 0 + + +def test_enet_positive_constraint(): + X = [[-1], [0], [1]] + y = [1, 0, -1] # just a straight line with negative slope + + enet = ElasticNet(alpha=0.1, positive=True) + enet.fit(X, y) + assert min(enet.coef_) >= 0 + + +def test_enet_cv_positive_constraint(): + X, y, X_test, y_test = build_dataset() + max_iter = 500 + + # Ensure the unconstrained fit has a negative coefficient + enetcv_unconstrained = ElasticNetCV( + n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1 + ) + enetcv_unconstrained.fit(X, y) + assert min(enetcv_unconstrained.coef_) < 0 + + # On same data, constrained fit has non-negative coefficients + enetcv_constrained = ElasticNetCV( + n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, positive=True, n_jobs=1 + ) + enetcv_constrained.fit(X, y) + assert min(enetcv_constrained.coef_) >= 0 + + +def test_uniform_targets(): + enet = ElasticNetCV(n_alphas=3) + m_enet = MultiTaskElasticNetCV(n_alphas=3) + lasso = LassoCV(n_alphas=3) + m_lasso = MultiTaskLassoCV(n_alphas=3) + + models_single_task = (enet, lasso) + models_multi_task = (m_enet, m_lasso) + + rng = np.random.RandomState(0) + + X_train = rng.random_sample(size=(10, 3)) + X_test = rng.random_sample(size=(10, 3)) + + y1 = np.empty(10) + y2 = np.empty((10, 2)) + + for model in models_single_task: + for y_values in (0, 5): + y1.fill(y_values) + assert_array_equal(model.fit(X_train, y1).predict(X_test), y1) + assert_array_equal(model.alphas_, [np.finfo(float).resolution] * 3) + + for model in models_multi_task: + for y_values in (0, 5): + y2[:, 0].fill(y_values) + y2[:, 1].fill(2 * y_values) + assert_array_equal(model.fit(X_train, y2).predict(X_test), y2) + assert_array_equal(model.alphas_, [np.finfo(float).resolution] * 3) + + +def test_multi_task_lasso_and_enet(): + X, y, X_test, y_test = build_dataset() + Y = np.c_[y, y] + # Y_test = np.c_[y_test, y_test] + clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) + assert 0 < clf.dual_gap_ < 1e-5 + assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) + + clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y) + assert 0 < clf.dual_gap_ < 1e-5 + assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) + + clf = MultiTaskElasticNet(alpha=1.0, tol=1e-8, max_iter=1) + warning_message = ( + "Objective did not converge. You might want to " + "increase the number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + clf.fit(X, Y) + + +def test_lasso_readonly_data(): + X = np.array([[-1], [0], [1]]) + Y = np.array([-1, 0, 1]) # just a straight line + T = np.array([[2], [3], [4]]) # test sample + with TempMemmap((X, Y)) as (X, Y): + clf = Lasso(alpha=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.25]) + assert_array_almost_equal(pred, [0.5, 0.75, 1.0]) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_multi_task_lasso_readonly_data(): + X, y, X_test, y_test = build_dataset() + Y = np.c_[y, y] + with TempMemmap((X, Y)) as (X, Y): + Y = np.c_[y, y] + clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) + assert 0 < clf.dual_gap_ < 1e-5 + assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) + + +def test_enet_multitarget(): + n_targets = 3 + X, y, _, _ = build_dataset( + n_samples=10, n_features=8, n_informative_features=10, n_targets=n_targets + ) + estimator = ElasticNet(alpha=0.01) + estimator.fit(X, y) + coef, intercept, dual_gap = ( + estimator.coef_, + estimator.intercept_, + estimator.dual_gap_, + ) + + for k in range(n_targets): + estimator.fit(X, y[:, k]) + assert_array_almost_equal(coef[k, :], estimator.coef_) + assert_array_almost_equal(intercept[k], estimator.intercept_) + assert_array_almost_equal(dual_gap[k], estimator.dual_gap_) + + +def test_multioutput_enetcv_error(): + rng = np.random.RandomState(0) + X = rng.randn(10, 2) + y = rng.randn(10, 2) + clf = ElasticNetCV() + with pytest.raises(ValueError): + clf.fit(X, y) + + +def test_multitask_enet_and_lasso_cv(): + X, y, _, _ = build_dataset(n_features=50, n_targets=3) + clf = MultiTaskElasticNetCV(cv=3).fit(X, y) + assert_almost_equal(clf.alpha_, 0.00556, 3) + clf = MultiTaskLassoCV(cv=3).fit(X, y) + assert_almost_equal(clf.alpha_, 0.00278, 3) + + X, y, _, _ = build_dataset(n_targets=3) + clf = MultiTaskElasticNetCV( + n_alphas=10, eps=1e-3, max_iter=100, l1_ratio=[0.3, 0.5], tol=1e-3, cv=3 + ) + clf.fit(X, y) + assert 0.5 == clf.l1_ratio_ + assert (3, X.shape[1]) == clf.coef_.shape + assert (3,) == clf.intercept_.shape + assert (2, 10, 3) == clf.mse_path_.shape + assert (2, 10) == clf.alphas_.shape + + X, y, _, _ = build_dataset(n_targets=3) + clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3, cv=3) + clf.fit(X, y) + assert (3, X.shape[1]) == clf.coef_.shape + assert (3,) == clf.intercept_.shape + assert (10, 3) == clf.mse_path_.shape + assert 10 == len(clf.alphas_) + + +def test_1d_multioutput_enet_and_multitask_enet_cv(): + X, y, _, _ = build_dataset(n_features=10) + y = y[:, np.newaxis] + clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf.fit(X, y[:, 0]) + clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf1.fit(X, y) + assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_) + assert_almost_equal(clf.alpha_, clf1.alpha_) + assert_almost_equal(clf.coef_, clf1.coef_[0]) + assert_almost_equal(clf.intercept_, clf1.intercept_[0]) + + +def test_1d_multioutput_lasso_and_multitask_lasso_cv(): + X, y, _, _ = build_dataset(n_features=10) + y = y[:, np.newaxis] + clf = LassoCV(n_alphas=5, eps=2e-3) + clf.fit(X, y[:, 0]) + clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3) + clf1.fit(X, y) + assert_almost_equal(clf.alpha_, clf1.alpha_) + assert_almost_equal(clf.coef_, clf1.coef_[0]) + assert_almost_equal(clf.intercept_, clf1.intercept_[0]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_dtype_enet_and_lassocv(csr_container): + X, y, _, _ = build_dataset(n_features=10) + clf = ElasticNetCV(n_alphas=5) + clf.fit(csr_container(X), y) + clf1 = ElasticNetCV(n_alphas=5) + clf1.fit(csr_container(X, dtype=np.float32), y) + assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) + assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) + + clf = LassoCV(n_alphas=5) + clf.fit(csr_container(X), y) + clf1 = LassoCV(n_alphas=5) + clf1.fit(csr_container(X, dtype=np.float32), y) + assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) + assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) + + +def test_elasticnet_precompute_incorrect_gram(): + # check that passing an invalid precomputed Gram matrix will raise an + # error. + X, y, _, _ = build_dataset() + + rng = np.random.RandomState(0) + + X_centered = X - np.average(X, axis=0) + garbage = rng.standard_normal(X.shape) + precompute = np.dot(garbage.T, garbage) + + clf = ElasticNet(alpha=0.01, precompute=precompute) + msg = "Gram matrix.*did not pass validation.*" + with pytest.raises(ValueError, match=msg): + clf.fit(X_centered, y) + + +def test_elasticnet_precompute_gram_weighted_samples(): + # check the equivalence between passing a precomputed Gram matrix and + # internal computation using sample weights. + X, y, _, _ = build_dataset() + + rng = np.random.RandomState(0) + sample_weight = rng.lognormal(size=y.shape) + + w_norm = sample_weight * (y.shape / np.sum(sample_weight)) + X_c = X - np.average(X, axis=0, weights=w_norm) + X_r = X_c * np.sqrt(w_norm)[:, np.newaxis] + gram = np.dot(X_r.T, X_r) + + clf1 = ElasticNet(alpha=0.01, precompute=gram) + clf1.fit(X_c, y, sample_weight=sample_weight) + + clf2 = ElasticNet(alpha=0.01, precompute=False) + clf2.fit(X, y, sample_weight=sample_weight) + + assert_allclose(clf1.coef_, clf2.coef_) + + +def test_elasticnet_precompute_gram(): + # Check the dtype-aware check for a precomputed Gram matrix + # (see https://github.com/scikit-learn/scikit-learn/pull/22059 + # and https://github.com/scikit-learn/scikit-learn/issues/21997). + # Here: (X_c.T, X_c)[2, 3] is not equal to np.dot(X_c[:, 2], X_c[:, 3]) + # but within tolerance for np.float32 + + rng = np.random.RandomState(58) + X = rng.binomial(1, 0.25, (1000, 4)).astype(np.float32) + y = rng.rand(1000).astype(np.float32) + + X_c = X - np.average(X, axis=0) + gram = np.dot(X_c.T, X_c) + + clf1 = ElasticNet(alpha=0.01, precompute=gram) + clf1.fit(X_c, y) + + clf2 = ElasticNet(alpha=0.01, precompute=False) + clf2.fit(X, y) + + assert_allclose(clf1.coef_, clf2.coef_) + + +def test_warm_start_convergence(): + X, y, _, _ = build_dataset() + model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y) + n_iter_reference = model.n_iter_ + + # This dataset is not trivial enough for the model to converge in one pass. + assert n_iter_reference > 2 + + # Check that n_iter_ is invariant to multiple calls to fit + # when warm_start=False, all else being equal. + model.fit(X, y) + n_iter_cold_start = model.n_iter_ + assert n_iter_cold_start == n_iter_reference + + # Fit the same model again, using a warm start: the optimizer just performs + # a single pass before checking that it has already converged + model.set_params(warm_start=True) + model.fit(X, y) + n_iter_warm_start = model.n_iter_ + assert n_iter_warm_start == 1 + + +def test_warm_start_convergence_with_regularizer_decrement(): + X, y = load_diabetes(return_X_y=True) + + # Train a model to converge on a lightly regularized problem + final_alpha = 1e-5 + low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y) + + # Fitting a new model on a more regularized version of the same problem. + # Fitting with high regularization is easier it should converge faster + # in general. + high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y) + assert low_reg_model.n_iter_ > high_reg_model.n_iter_ + + # Fit the solution to the original, less regularized version of the + # problem but from the solution of the highly regularized variant of + # the problem as a better starting point. This should also converge + # faster than the original model that starts from zero. + warm_low_reg_model = deepcopy(high_reg_model) + warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha) + warm_low_reg_model.fit(X, y) + assert low_reg_model.n_iter_ > warm_low_reg_model.n_iter_ + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_random_descent(csr_container): + # Test that both random and cyclic selection give the same results. + # Ensure that the test models fully converge and check a wide + # range of conditions. + + # This uses the coordinate descent algo using the gram trick. + X, y, _, _ = build_dataset(n_samples=50, n_features=20) + clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(X, y) + clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(X, y) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + # This uses the descent algo without the gram trick + clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(X.T, y[:20]) + clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(X.T, y[:20]) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + # Sparse Case + clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(csr_container(X), y) + clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(csr_container(X), y) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + # Multioutput case. + new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis])) + clf_cyclic = MultiTaskElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(X, new_y) + clf_random = MultiTaskElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(X, new_y) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + +def test_enet_path_positive(): + # Test positive parameter + + X, Y, _, _ = build_dataset(n_samples=50, n_features=50, n_targets=2) + + # For mono output + # Test that the coefs returned by positive=True in enet_path are positive + for path in [enet_path, lasso_path]: + pos_path_coef = path(X, Y[:, 0], positive=True)[1] + assert np.all(pos_path_coef >= 0) + + # For multi output, positive parameter is not allowed + # Test that an error is raised + for path in [enet_path, lasso_path]: + with pytest.raises(ValueError): + path(X, Y, positive=True) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_dense_descent_paths(csr_container): + # Test that dense and sparse input give the same input for descent paths. + X, y, _, _ = build_dataset(n_samples=50, n_features=20) + csr = csr_container(X) + for path in [enet_path, lasso_path]: + _, coefs, _ = path(X, y) + _, sparse_coefs, _ = path(csr, y) + assert_array_almost_equal(coefs, sparse_coefs) + + +@pytest.mark.parametrize("path_func", [enet_path, lasso_path]) +def test_path_unknown_parameter(path_func): + """Check that passing parameter not used by the coordinate descent solver + will raise an error.""" + X, y, _, _ = build_dataset(n_samples=50, n_features=20) + err_msg = "Unexpected parameters in params" + with pytest.raises(ValueError, match=err_msg): + path_func(X, y, normalize=True, fit_intercept=True) + + +def test_check_input_false(): + X, y, _, _ = build_dataset(n_samples=20, n_features=10) + X = check_array(X, order="F", dtype="float64") + y = check_array(X, order="F", dtype="float64") + clf = ElasticNet(selection="cyclic", tol=1e-8) + # Check that no error is raised if data is provided in the right format + clf.fit(X, y, check_input=False) + # With check_input=False, an exhaustive check is not made on y but its + # dtype is still cast in _preprocess_data to X's dtype. So the test should + # pass anyway + X = check_array(X, order="F", dtype="float32") + clf.fit(X, y, check_input=False) + # With no input checking, providing X in C order should result in false + # computation + X = check_array(X, order="C", dtype="float64") + with pytest.raises(ValueError): + clf.fit(X, y, check_input=False) + + +@pytest.mark.parametrize("check_input", [True, False]) +def test_enet_copy_X_True(check_input): + X, y, _, _ = build_dataset() + X = X.copy(order="F") + + original_X = X.copy() + enet = ElasticNet(copy_X=True) + enet.fit(X, y, check_input=check_input) + + assert_array_equal(original_X, X) + + +def test_enet_copy_X_False_check_input_False(): + X, y, _, _ = build_dataset() + X = X.copy(order="F") + + original_X = X.copy() + enet = ElasticNet(copy_X=False) + enet.fit(X, y, check_input=False) + + # No copying, X is overwritten + assert np.any(np.not_equal(original_X, X)) + + +def test_overrided_gram_matrix(): + X, y, _, _ = build_dataset(n_samples=20, n_features=10) + Gram = X.T.dot(X) + clf = ElasticNet(selection="cyclic", tol=1e-8, precompute=Gram) + warning_message = ( + "Gram matrix was provided but X was centered" + " to fit intercept: recomputing Gram matrix." + ) + with pytest.warns(UserWarning, match=warning_message): + clf.fit(X, y) + + +@pytest.mark.parametrize("model", [ElasticNet, Lasso]) +def test_lasso_non_float_y(model): + X = [[0, 0], [1, 1], [-1, -1]] + y = [0, 1, 2] + y_float = [0.0, 1.0, 2.0] + + clf = model(fit_intercept=False) + clf.fit(X, y) + clf_float = model(fit_intercept=False) + clf_float.fit(X, y_float) + assert_array_equal(clf.coef_, clf_float.coef_) + + +def test_enet_float_precision(): + # Generate dataset + X, y, X_test, y_test = build_dataset(n_samples=20, n_features=10) + # Here we have a small number of iterations, and thus the + # ElasticNet might not converge. This is to speed up tests + + for fit_intercept in [True, False]: + coef = {} + intercept = {} + for dtype in [np.float64, np.float32]: + clf = ElasticNet( + alpha=0.5, + max_iter=100, + precompute=False, + fit_intercept=fit_intercept, + ) + + X = dtype(X) + y = dtype(y) + ignore_warnings(clf.fit)(X, y) + + coef[("simple", dtype)] = clf.coef_ + intercept[("simple", dtype)] = clf.intercept_ + + assert clf.coef_.dtype == dtype + + # test precompute Gram array + Gram = X.T.dot(X) + clf_precompute = ElasticNet( + alpha=0.5, + max_iter=100, + precompute=Gram, + fit_intercept=fit_intercept, + ) + ignore_warnings(clf_precompute.fit)(X, y) + assert_array_almost_equal(clf.coef_, clf_precompute.coef_) + assert_array_almost_equal(clf.intercept_, clf_precompute.intercept_) + + # test multi task enet + multi_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis])) + clf_multioutput = MultiTaskElasticNet( + alpha=0.5, + max_iter=100, + fit_intercept=fit_intercept, + ) + clf_multioutput.fit(X, multi_y) + coef[("multi", dtype)] = clf_multioutput.coef_ + intercept[("multi", dtype)] = clf_multioutput.intercept_ + assert clf.coef_.dtype == dtype + + for v in ["simple", "multi"]: + assert_array_almost_equal( + coef[(v, np.float32)], coef[(v, np.float64)], decimal=4 + ) + assert_array_almost_equal( + intercept[(v, np.float32)], intercept[(v, np.float64)], decimal=4 + ) + + +def test_enet_l1_ratio(): + # Test that an error message is raised if an estimator that + # uses _alpha_grid is called with l1_ratio=0 + msg = ( + "Automatic alpha grid generation is not supported for l1_ratio=0. " + "Please supply a grid by providing your estimator with the " + "appropriate `alphas=` argument." + ) + X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T + y = np.array([12, 10, 11, 21, 5]) + + with pytest.raises(ValueError, match=msg): + ElasticNetCV(l1_ratio=0, random_state=42).fit(X, y) + + with pytest.raises(ValueError, match=msg): + MultiTaskElasticNetCV(l1_ratio=0, random_state=42).fit(X, y[:, None]) + + # Test that l1_ratio=0 with alpha>0 produces user warning + warning_message = ( + "Coordinate descent without L1 regularization may " + "lead to unexpected results and is discouraged. " + "Set l1_ratio > 0 to add L1 regularization." + ) + est = ElasticNetCV(l1_ratio=[0], alphas=[1]) + with pytest.warns(UserWarning, match=warning_message): + est.fit(X, y) + + # Test that l1_ratio=0 is allowed if we supply a grid manually + alphas = [0.1, 10] + estkwds = {"alphas": alphas, "random_state": 42} + est_desired = ElasticNetCV(l1_ratio=0.00001, **estkwds) + est = ElasticNetCV(l1_ratio=0, **estkwds) + with ignore_warnings(): + est_desired.fit(X, y) + est.fit(X, y) + assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) + + est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, **estkwds) + est = MultiTaskElasticNetCV(l1_ratio=0, **estkwds) + with ignore_warnings(): + est.fit(X, y[:, None]) + est_desired.fit(X, y[:, None]) + assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) + + +def test_coef_shape_not_zero(): + est_no_intercept = Lasso(fit_intercept=False) + est_no_intercept.fit(np.c_[np.ones(3)], np.ones(3)) + assert est_no_intercept.coef_.shape == (1,) + + +def test_warm_start_multitask_lasso(): + X, y, X_test, y_test = build_dataset() + Y = np.c_[y, y] + clf = MultiTaskLasso(alpha=0.1, max_iter=5, warm_start=True) + ignore_warnings(clf.fit)(X, Y) + ignore_warnings(clf.fit)(X, Y) # do a second round with 5 iterations + + clf2 = MultiTaskLasso(alpha=0.1, max_iter=10) + ignore_warnings(clf2.fit)(X, Y) + assert_array_almost_equal(clf2.coef_, clf.coef_) + + +@pytest.mark.parametrize( + "klass, n_classes, kwargs", + [ + (Lasso, 1, dict(precompute=True)), + (Lasso, 1, dict(precompute=False)), + (MultiTaskLasso, 2, dict()), + (MultiTaskLasso, 2, dict()), + ], +) +def test_enet_coordinate_descent(klass, n_classes, kwargs): + """Test that a warning is issued if model does not converge""" + clf = klass(max_iter=2, **kwargs) + n_samples = 5 + n_features = 2 + X = np.ones((n_samples, n_features)) * 1e50 + y = np.ones((n_samples, n_classes)) + if klass == Lasso: + y = y.ravel() + warning_message = ( + "Objective did not converge. You might want to" + " increase the number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + clf.fit(X, y) + + +def test_convergence_warnings(): + random_state = np.random.RandomState(0) + X = random_state.standard_normal((1000, 500)) + y = random_state.standard_normal((1000, 3)) + + # check that the model converges w/o convergence warnings + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + MultiTaskElasticNet().fit(X, y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_convergence_warning(csr_container): + X, y, _, _ = build_dataset(n_samples=1000, n_features=500) + + with pytest.warns(ConvergenceWarning): + ElasticNet(max_iter=1, tol=0).fit(csr_container(X, dtype=np.float32), y) + + # check that the model converges w/o convergence warnings + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + Lasso().fit(csr_container(X, dtype=np.float32), y) + + +@pytest.mark.parametrize( + "precompute, inner_precompute", + [ + (True, True), + ("auto", False), + (False, False), + ], +) +def test_lassoCV_does_not_set_precompute(monkeypatch, precompute, inner_precompute): + X, y, _, _ = build_dataset() + calls = 0 + + class LassoMock(Lasso): + def fit(self, X, y): + super().fit(X, y) + nonlocal calls + calls += 1 + assert self.precompute == inner_precompute + + monkeypatch.setattr("sklearn.linear_model._coordinate_descent.Lasso", LassoMock) + clf = LassoCV(precompute=precompute) + clf.fit(X, y) + assert calls > 0 + + +def test_multi_task_lasso_cv_dtype(): + n_samples, n_features = 10, 3 + rng = np.random.RandomState(42) + X = rng.binomial(1, 0.5, size=(n_samples, n_features)) + X = X.astype(int) # make it explicit that X is int + y = X[:, [0, 0]].copy() + est = MultiTaskLassoCV(n_alphas=5, fit_intercept=True).fit(X, y) + assert_array_almost_equal(est.coef_, [[1, 0, 0]] * 2, decimal=3) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("alpha", [0.01]) +@pytest.mark.parametrize("precompute", [False, True]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_enet_sample_weight_consistency( + fit_intercept, alpha, precompute, sparse_container, global_random_seed +): + """Test that the impact of sample_weight is consistent. + + Note that this test is stricter than the common test + check_sample_weights_invariance alone and also tests sparse X. + """ + rng = np.random.RandomState(global_random_seed) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + if sparse_container is not None: + X = sparse_container(X) + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + precompute=precompute, + tol=1e-6, + l1_ratio=0.5, + ) + + reg = ElasticNet(**params).fit(X, y) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + # 1) sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 2) sample_weight=None should be equivalent to sample_weight = number + sample_weight = 123.0 + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 3) scaling of sample_weight should have no effect, cf. np.average() + sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0]) + reg = reg.fit(X, y, sample_weight=sample_weight) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + reg.fit(X, y, sample_weight=np.pi * sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 4) setting elements of sample_weight to 0 is equivalent to removing these samples + sample_weight_0 = sample_weight.copy() + sample_weight_0[-5:] = 0 + y[-5:] *= 1000 # to make excluding those samples important + reg.fit(X, y, sample_weight=sample_weight_0) + coef_0 = reg.coef_.copy() + if fit_intercept: + intercept_0 = reg.intercept_ + reg.fit(X[:-5], y[:-5], sample_weight=sample_weight[:-5]) + assert_allclose(reg.coef_, coef_0, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept_0) + + # 5) check that multiplying sample_weight by 2 is equivalent to repeating + # corresponding samples twice + if sparse_container is not None: + X2 = sparse.vstack([X, X[: n_samples // 2]], format="csc") + else: + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = sample_weight.copy() + sample_weight_1[: n_samples // 2] *= 2 + sample_weight_2 = np.concatenate( + [sample_weight, sample_weight[: n_samples // 2]], axis=0 + ) + + reg1 = ElasticNet(**params).fit(X, y, sample_weight=sample_weight_1) + reg2 = ElasticNet(**params).fit(X2, y2, sample_weight=sample_weight_2) + assert_allclose(reg1.coef_, reg2.coef_, rtol=1e-6) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_enet_cv_sample_weight_correctness(fit_intercept, sparse_container): + """Test that ElasticNetCV with sample weights gives correct results.""" + rng = np.random.RandomState(42) + n_splits, n_samples, n_features = 3, 10, 5 + X = rng.rand(n_splits * n_samples, n_features) + beta = rng.rand(n_features) + beta[0:2] = 0 + y = X @ beta + rng.rand(n_splits * n_samples) + sw = np.ones_like(y) + if sparse_container is not None: + X = sparse_container(X) + params = dict(tol=1e-6) + + # Set alphas, otherwise the two cv models might use different ones. + if fit_intercept: + alphas = np.linspace(0.001, 0.01, num=91) + else: + alphas = np.linspace(0.01, 0.1, num=91) + + # We weight the first fold 2 times more. + sw[:n_samples] = 2 + groups_sw = np.r_[ + np.full(n_samples, 0), np.full(n_samples, 1), np.full(n_samples, 2) + ] + splits_sw = list(LeaveOneGroupOut().split(X, groups=groups_sw)) + reg_sw = ElasticNetCV( + alphas=alphas, cv=splits_sw, fit_intercept=fit_intercept, **params + ) + reg_sw.fit(X, y, sample_weight=sw) + + # We repeat the first fold 2 times and provide splits ourselves + if sparse_container is not None: + X = X.toarray() + X = np.r_[X[:n_samples], X] + if sparse_container is not None: + X = sparse_container(X) + y = np.r_[y[:n_samples], y] + groups = np.r_[ + np.full(2 * n_samples, 0), np.full(n_samples, 1), np.full(n_samples, 2) + ] + splits = list(LeaveOneGroupOut().split(X, groups=groups)) + reg = ElasticNetCV(alphas=alphas, cv=splits, fit_intercept=fit_intercept, **params) + reg.fit(X, y) + + # ensure that we chose meaningful alphas, i.e. not boundaries + assert alphas[0] < reg.alpha_ < alphas[-1] + assert reg_sw.alpha_ == reg.alpha_ + assert_allclose(reg_sw.coef_, reg.coef_) + assert reg_sw.intercept_ == pytest.approx(reg.intercept_) + + +@pytest.mark.parametrize("sample_weight", [False, True]) +def test_enet_cv_grid_search(sample_weight): + """Test that ElasticNetCV gives same result as GridSearchCV.""" + n_samples, n_features = 200, 10 + cv = 5 + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + effective_rank=10, + n_informative=n_features - 4, + noise=10, + random_state=0, + ) + if sample_weight: + sample_weight = np.linspace(1, 5, num=n_samples) + else: + sample_weight = None + + alphas = np.logspace(np.log10(1e-5), np.log10(1), num=10) + l1_ratios = [0.1, 0.5, 0.9] + reg = ElasticNetCV(cv=cv, alphas=alphas, l1_ratio=l1_ratios) + reg.fit(X, y, sample_weight=sample_weight) + + param = {"alpha": alphas, "l1_ratio": l1_ratios} + gs = GridSearchCV( + estimator=ElasticNet(), + param_grid=param, + cv=cv, + scoring="neg_mean_squared_error", + ).fit(X, y, sample_weight=sample_weight) + + assert reg.l1_ratio_ == pytest.approx(gs.best_params_["l1_ratio"]) + assert reg.alpha_ == pytest.approx(gs.best_params_["alpha"]) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("l1_ratio", [0, 0.5, 1]) +@pytest.mark.parametrize("precompute", [False, True]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_enet_cv_sample_weight_consistency( + fit_intercept, l1_ratio, precompute, sparse_container +): + """Test that the impact of sample_weight is consistent.""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = X.sum(axis=1) + rng.rand(n_samples) + params = dict( + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + precompute=precompute, + tol=1e-6, + cv=3, + ) + if sparse_container is not None: + X = sparse_container(X) + + if l1_ratio == 0: + params.pop("l1_ratio", None) + reg = LassoCV(**params).fit(X, y) + else: + reg = ElasticNetCV(**params).fit(X, y) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + # sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # sample_weight=None should be equivalent to sample_weight = number + sample_weight = 123.0 + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # scaling of sample_weight should have no effect, cf. np.average() + sample_weight = 2 * np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + +@pytest.mark.parametrize("estimator", [ElasticNetCV, LassoCV]) +def test_linear_models_cv_fit_with_loky(estimator): + # LinearModelsCV.fit performs inplace operations on fancy-indexed memmapped + # data when using the loky backend, causing an error due to unexpected + # behavior of fancy indexing of read-only memmaps (cf. numpy#14132). + + # Create a problem sufficiently large to cause memmapping (1MB). + # Unfortunately the scikit-learn and joblib APIs do not make it possible to + # change the max_nbyte of the inner Parallel call. + X, y = make_regression(int(1e6) // 8 + 1, 1) + assert X.nbytes > 1e6 # 1 MB + with joblib.parallel_backend("loky"): + estimator(n_jobs=2, cv=3).fit(X, y) + + +@pytest.mark.parametrize("check_input", [True, False]) +def test_enet_sample_weight_does_not_overwrite_sample_weight(check_input): + """Check that ElasticNet does not overwrite sample_weights.""" + + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + sample_weight_1_25 = 1.25 * np.ones_like(y) + sample_weight = sample_weight_1_25.copy() + + reg = ElasticNet() + reg.fit(X, y, sample_weight=sample_weight, check_input=check_input) + + assert_array_equal(sample_weight, sample_weight_1_25) + + +@pytest.mark.parametrize("ridge_alpha", [1e-1, 1.0, 1e6]) +def test_enet_ridge_consistency(ridge_alpha): + # Check that ElasticNet(l1_ratio=0) converges to the same solution as Ridge + # provided that the value of alpha is adapted. + # + # XXX: this test does not pass for weaker regularization (lower values of + # ridge_alpha): it could be either a problem of ElasticNet or Ridge (less + # likely) and depends on the dataset statistics: lower values for + # effective_rank are more problematic in particular. + + rng = np.random.RandomState(42) + n_samples = 300 + X, y = make_regression( + n_samples=n_samples, + n_features=100, + effective_rank=10, + n_informative=50, + random_state=rng, + ) + sw = rng.uniform(low=0.01, high=10, size=X.shape[0]) + alpha = 1.0 + common_params = dict( + tol=1e-12, + ) + ridge = Ridge(alpha=alpha, **common_params).fit(X, y, sample_weight=sw) + + alpha_enet = alpha / sw.sum() + enet = ElasticNet(alpha=alpha_enet, l1_ratio=0, **common_params).fit( + X, y, sample_weight=sw + ) + assert_allclose(ridge.coef_, enet.coef_) + assert_allclose(ridge.intercept_, enet.intercept_) + + +@pytest.mark.parametrize( + "estimator", + [ + Lasso(alpha=1.0), + ElasticNet(alpha=1.0, l1_ratio=0.1), + ], +) +def test_sample_weight_invariance(estimator): + rng = np.random.RandomState(42) + X, y = make_regression( + n_samples=100, + n_features=300, + effective_rank=10, + n_informative=50, + random_state=rng, + ) + sw = rng.uniform(low=0.01, high=2, size=X.shape[0]) + params = dict(tol=1e-12) + + # Check that setting some weights to 0 is equivalent to trimming the + # samples: + cutoff = X.shape[0] // 3 + sw_with_null = sw.copy() + sw_with_null[:cutoff] = 0.0 + X_trimmed, y_trimmed = X[cutoff:, :], y[cutoff:] + sw_trimmed = sw[cutoff:] + + reg_trimmed = ( + clone(estimator) + .set_params(**params) + .fit(X_trimmed, y_trimmed, sample_weight=sw_trimmed) + ) + reg_null_weighted = ( + clone(estimator).set_params(**params).fit(X, y, sample_weight=sw_with_null) + ) + assert_allclose(reg_null_weighted.coef_, reg_trimmed.coef_) + assert_allclose(reg_null_weighted.intercept_, reg_trimmed.intercept_) + + # Check that duplicating the training dataset is equivalent to multiplying + # the weights by 2: + X_dup = np.concatenate([X, X], axis=0) + y_dup = np.concatenate([y, y], axis=0) + sw_dup = np.concatenate([sw, sw], axis=0) + + reg_2sw = clone(estimator).set_params(**params).fit(X, y, sample_weight=2 * sw) + reg_dup = ( + clone(estimator).set_params(**params).fit(X_dup, y_dup, sample_weight=sw_dup) + ) + + assert_allclose(reg_2sw.coef_, reg_dup.coef_) + assert_allclose(reg_2sw.intercept_, reg_dup.intercept_) + + +def test_read_only_buffer(): + """Test that sparse coordinate descent works for read-only buffers""" + + rng = np.random.RandomState(0) + clf = ElasticNet(alpha=0.1, copy_X=True, random_state=rng) + X = np.asfortranarray(rng.uniform(size=(100, 10))) + X.setflags(write=False) + + y = rng.rand(100) + clf.fit(X, y) + + +@pytest.mark.parametrize( + "EstimatorCV", + [ElasticNetCV, LassoCV, MultiTaskElasticNetCV, MultiTaskLassoCV], +) +def test_cv_estimators_reject_params_with_no_routing_enabled(EstimatorCV): + """Check that the models inheriting from class:`LinearModelCV` raise an + error when any `params` are passed when routing is not enabled. + """ + X, y = make_regression(random_state=42) + groups = np.array([0, 1] * (len(y) // 2)) + estimator = EstimatorCV() + msg = "is only supported if enable_metadata_routing=True" + with pytest.raises(ValueError, match=msg): + estimator.fit(X, y, groups=groups) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "MultiTaskEstimatorCV", + [MultiTaskElasticNetCV, MultiTaskLassoCV], +) +def test_multitask_cv_estimators_with_sample_weight(MultiTaskEstimatorCV): + """Check that for :class:`MultiTaskElasticNetCV` and + class:`MultiTaskLassoCV` if `sample_weight` is passed and the + CV splitter does not support `sample_weight` an error is raised. + On the other hand if the splitter does support `sample_weight` + while `sample_weight` is passed there is no error and process + completes smoothly as before. + """ + + class CVSplitter(BaseCrossValidator, GroupsConsumerMixin): + def get_n_splits(self, X=None, y=None, groups=None, metadata=None): + pass # pragma: nocover + + class CVSplitterSampleWeight(CVSplitter): + def split(self, X, y=None, groups=None, sample_weight=None): + split_index = len(X) // 2 + train_indices = list(range(0, split_index)) + test_indices = list(range(split_index, len(X))) + yield test_indices, train_indices + yield train_indices, test_indices + + X, y = make_regression(random_state=42, n_targets=2) + sample_weight = np.ones(X.shape[0]) + + # If CV splitter does not support sample_weight an error is raised + splitter = CVSplitter().set_split_request(groups=True) + estimator = MultiTaskEstimatorCV(cv=splitter) + msg = "do not support sample weights" + with pytest.raises(ValueError, match=msg): + estimator.fit(X, y, sample_weight=sample_weight) + + # If CV splitter does support sample_weight no error is raised + splitter = CVSplitterSampleWeight().set_split_request( + groups=True, sample_weight=True + ) + estimator = MultiTaskEstimatorCV(cv=splitter) + estimator.fit(X, y, sample_weight=sample_weight) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py new file mode 100644 index 0000000000000000000000000000000000000000..3856d74464f0b31851095d5298c91b8cf79fd9fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py @@ -0,0 +1,216 @@ +# Authors: Manoj Kumar mks542@nyu.edu +# License: BSD 3 clause + +import numpy as np +import pytest +from scipy import optimize + +from sklearn.datasets import make_regression +from sklearn.linear_model import HuberRegressor, LinearRegression, Ridge, SGDRegressor +from sklearn.linear_model._huber import _huber_loss_and_gradient +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def make_regression_with_outliers(n_samples=50, n_features=20): + rng = np.random.RandomState(0) + # Generate data with outliers by replacing 10% of the samples with noise. + X, y = make_regression( + n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05 + ) + + # Replace 10% of the sample with noise. + num_noise = int(0.1 * n_samples) + random_samples = rng.randint(0, n_samples, num_noise) + X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1])) + return X, y + + +def test_huber_equals_lr_for_high_epsilon(): + # Test that Ridge matches LinearRegression for large epsilon + X, y = make_regression_with_outliers() + lr = LinearRegression() + lr.fit(X, y) + huber = HuberRegressor(epsilon=1e3, alpha=0.0) + huber.fit(X, y) + assert_almost_equal(huber.coef_, lr.coef_, 3) + assert_almost_equal(huber.intercept_, lr.intercept_, 2) + + +def test_huber_max_iter(): + X, y = make_regression_with_outliers() + huber = HuberRegressor(max_iter=1) + huber.fit(X, y) + assert huber.n_iter_ == huber.max_iter + + +def test_huber_gradient(): + # Test that the gradient calculated by _huber_loss_and_gradient is correct + rng = np.random.RandomState(1) + X, y = make_regression_with_outliers() + sample_weight = rng.randint(1, 3, (y.shape[0])) + + def loss_func(x, *args): + return _huber_loss_and_gradient(x, *args)[0] + + def grad_func(x, *args): + return _huber_loss_and_gradient(x, *args)[1] + + # Check using optimize.check_grad that the gradients are equal. + for _ in range(5): + # Check for both fit_intercept and otherwise. + for n_features in [X.shape[1] + 1, X.shape[1] + 2]: + w = rng.randn(n_features) + w[-1] = np.abs(w[-1]) + grad_same = optimize.check_grad( + loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight + ) + assert_almost_equal(grad_same, 1e-6, 4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_huber_sample_weights(csr_container): + # Test sample_weights implementation in HuberRegressor""" + + X, y = make_regression_with_outliers() + huber = HuberRegressor() + huber.fit(X, y) + huber_coef = huber.coef_ + huber_intercept = huber.intercept_ + + # Rescale coefs before comparing with assert_array_almost_equal to make + # sure that the number of decimal places used is somewhat insensitive to + # the amplitude of the coefficients and therefore to the scale of the + # data and the regularization parameter + scale = max(np.mean(np.abs(huber.coef_)), np.mean(np.abs(huber.intercept_))) + + huber.fit(X, y, sample_weight=np.ones(y.shape[0])) + assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale) + assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale) + + X, y = make_regression_with_outliers(n_samples=5, n_features=20) + X_new = np.vstack((X, np.vstack((X[1], X[1], X[3])))) + y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]])) + huber.fit(X_new, y_new) + huber_coef = huber.coef_ + huber_intercept = huber.intercept_ + sample_weight = np.ones(X.shape[0]) + sample_weight[1] = 3 + sample_weight[3] = 2 + huber.fit(X, y, sample_weight=sample_weight) + + assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale) + assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale) + + # Test sparse implementation with sample weights. + X_csr = csr_container(X) + huber_sparse = HuberRegressor() + huber_sparse.fit(X_csr, y, sample_weight=sample_weight) + assert_array_almost_equal(huber_sparse.coef_ / scale, huber_coef / scale) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_huber_sparse(csr_container): + X, y = make_regression_with_outliers() + huber = HuberRegressor(alpha=0.1) + huber.fit(X, y) + + X_csr = csr_container(X) + huber_sparse = HuberRegressor(alpha=0.1) + huber_sparse.fit(X_csr, y) + assert_array_almost_equal(huber_sparse.coef_, huber.coef_) + assert_array_equal(huber.outliers_, huber_sparse.outliers_) + + +def test_huber_scaling_invariant(): + # Test that outliers filtering is scaling independent. + X, y = make_regression_with_outliers() + huber = HuberRegressor(fit_intercept=False, alpha=0.0) + huber.fit(X, y) + n_outliers_mask_1 = huber.outliers_ + assert not np.all(n_outliers_mask_1) + + huber.fit(X, 2.0 * y) + n_outliers_mask_2 = huber.outliers_ + assert_array_equal(n_outliers_mask_2, n_outliers_mask_1) + + huber.fit(2.0 * X, 2.0 * y) + n_outliers_mask_3 = huber.outliers_ + assert_array_equal(n_outliers_mask_3, n_outliers_mask_1) + + +def test_huber_and_sgd_same_results(): + # Test they should converge to same coefficients for same parameters + + X, y = make_regression_with_outliers(n_samples=10, n_features=2) + + # Fit once to find out the scale parameter. Scale down X and y by scale + # so that the scale parameter is optimized to 1.0 + huber = HuberRegressor(fit_intercept=False, alpha=0.0, epsilon=1.35) + huber.fit(X, y) + X_scale = X / huber.scale_ + y_scale = y / huber.scale_ + huber.fit(X_scale, y_scale) + assert_almost_equal(huber.scale_, 1.0, 3) + + sgdreg = SGDRegressor( + alpha=0.0, + loss="huber", + shuffle=True, + random_state=0, + max_iter=10000, + fit_intercept=False, + epsilon=1.35, + tol=None, + ) + sgdreg.fit(X_scale, y_scale) + assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1) + + +def test_huber_warm_start(): + X, y = make_regression_with_outliers() + huber_warm = HuberRegressor(alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1) + + huber_warm.fit(X, y) + huber_warm_coef = huber_warm.coef_.copy() + huber_warm.fit(X, y) + + # SciPy performs the tol check after doing the coef updates, so + # these would be almost same but not equal. + assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1) + + assert huber_warm.n_iter_ == 0 + + +def test_huber_better_r2_score(): + # Test that huber returns a better r2 score than non-outliers""" + X, y = make_regression_with_outliers() + huber = HuberRegressor(alpha=0.01) + huber.fit(X, y) + linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y + mask = np.abs(linear_loss) < huber.epsilon * huber.scale_ + huber_score = huber.score(X[mask], y[mask]) + huber_outlier_score = huber.score(X[~mask], y[~mask]) + + # The Ridge regressor should be influenced by the outliers and hence + # give a worse score on the non-outliers as compared to the huber + # regressor. + ridge = Ridge(alpha=0.01) + ridge.fit(X, y) + ridge_score = ridge.score(X[mask], y[mask]) + ridge_outlier_score = ridge.score(X[~mask], y[~mask]) + assert huber_score > ridge_score + + # The huber model should also fit poorly on the outliers. + assert ridge_outlier_score > huber_outlier_score + + +def test_huber_bool(): + # Test that it does not crash with bool data + X, y = make_regression(n_samples=200, n_features=2, noise=4.0, random_state=0) + X_bool = X > 0 + HuberRegressor().fit(X_bool, y) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_least_angle.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_least_angle.py new file mode 100644 index 0000000000000000000000000000000000000000..50c6a7a95626e58163c1ec5bbd5bbed8922b9fcc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_least_angle.py @@ -0,0 +1,870 @@ +import warnings + +import numpy as np +import pytest +from scipy import linalg + +from sklearn import datasets, linear_model +from sklearn.base import clone +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + Lars, + LarsCV, + LassoLars, + LassoLarsCV, + LassoLarsIC, + lars_path, +) +from sklearn.linear_model._least_angle import _lars_path_residues +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils._testing import ( + TempMemmap, + assert_allclose, + assert_array_almost_equal, + ignore_warnings, +) + +# TODO: use another dataset that has multiple drops +diabetes = datasets.load_diabetes() +X, y = diabetes.data, diabetes.target +G = np.dot(X.T, X) +Xy = np.dot(X.T, y) +n_samples = y.size + + +def test_simple(): + # Principle of Lars is to keep covariances tied and decreasing + + # also test verbose output + import sys + from io import StringIO + + old_stdout = sys.stdout + try: + sys.stdout = StringIO() + + _, _, coef_path_ = linear_model.lars_path(X, y, method="lar", verbose=10) + + sys.stdout = old_stdout + + for i, coef_ in enumerate(coef_path_.T): + res = y - np.dot(X, coef_) + cov = np.dot(X.T, res) + C = np.max(abs(cov)) + eps = 1e-3 + ocur = len(cov[C - eps < abs(cov)]) + if i < X.shape[1]: + assert ocur == i + 1 + else: + # no more than max_pred variables can go into the active set + assert ocur == X.shape[1] + finally: + sys.stdout = old_stdout + + +def test_simple_precomputed(): + # The same, with precomputed Gram matrix + + _, _, coef_path_ = linear_model.lars_path(X, y, Gram=G, method="lar") + + for i, coef_ in enumerate(coef_path_.T): + res = y - np.dot(X, coef_) + cov = np.dot(X.T, res) + C = np.max(abs(cov)) + eps = 1e-3 + ocur = len(cov[C - eps < abs(cov)]) + if i < X.shape[1]: + assert ocur == i + 1 + else: + # no more than max_pred variables can go into the active set + assert ocur == X.shape[1] + + +def _assert_same_lars_path_result(output1, output2): + assert len(output1) == len(output2) + for o1, o2 in zip(output1, output2): + assert_allclose(o1, o2) + + +@pytest.mark.parametrize("method", ["lar", "lasso"]) +@pytest.mark.parametrize("return_path", [True, False]) +def test_lars_path_gram_equivalent(method, return_path): + _assert_same_lars_path_result( + linear_model.lars_path_gram( + Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path + ), + linear_model.lars_path(X, y, Gram=G, method=method, return_path=return_path), + ) + + +def test_x_none_gram_none_raises_value_error(): + # Test that lars_path with no X and Gram raises exception + Xy = np.dot(X.T, y) + with pytest.raises(ValueError, match="X and Gram cannot both be unspecified"): + linear_model.lars_path(None, y, Gram=None, Xy=Xy) + + +def test_all_precomputed(): + # Test that lars_path with precomputed Gram and Xy gives the right answer + G = np.dot(X.T, X) + Xy = np.dot(X.T, y) + for method in "lar", "lasso": + output = linear_model.lars_path(X, y, method=method) + output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) + for expected, got in zip(output, output_pre): + assert_array_almost_equal(expected, got) + + +@pytest.mark.filterwarnings("ignore: `rcond` parameter will change") +# numpy deprecation +def test_lars_lstsq(): + # Test that Lars gives least square solution at the end + # of the path + X1 = 3 * X # use un-normalized dataset + clf = linear_model.LassoLars(alpha=0.0) + clf.fit(X1, y) + coef_lstsq = np.linalg.lstsq(X1, y, rcond=None)[0] + assert_array_almost_equal(clf.coef_, coef_lstsq) + + +@pytest.mark.filterwarnings("ignore:`rcond` parameter will change") +# numpy deprecation +def test_lasso_gives_lstsq_solution(): + # Test that Lars Lasso gives least square solution at the end + # of the path + _, _, coef_path_ = linear_model.lars_path(X, y, method="lasso") + coef_lstsq = np.linalg.lstsq(X, y)[0] + assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) + + +def test_collinearity(): + # Check that lars_path is robust to collinearity in input + X = np.array([[3.0, 3.0, 1.0], [2.0, 2.0, 0.0], [1.0, 1.0, 0]]) + y = np.array([1.0, 0.0, 0]) + rng = np.random.RandomState(0) + + f = ignore_warnings + _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) + assert not np.isnan(coef_path_).any() + residual = np.dot(X, coef_path_[:, -1]) - y + assert (residual**2).sum() < 1.0 # just make sure it's bounded + + n_samples = 10 + X = rng.rand(n_samples, 5) + y = np.zeros(n_samples) + _, _, coef_path_ = linear_model.lars_path( + X, + y, + Gram="auto", + copy_X=False, + copy_Gram=False, + alpha_min=0.0, + method="lasso", + verbose=0, + max_iter=500, + ) + assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) + + +def test_no_path(): + # Test that the ``return_path=False`` option returns the correct output + alphas_, _, coef_path_ = linear_model.lars_path(X, y, method="lar") + alpha_, _, coef = linear_model.lars_path(X, y, method="lar", return_path=False) + + assert_array_almost_equal(coef, coef_path_[:, -1]) + assert alpha_ == alphas_[-1] + + +def test_no_path_precomputed(): + # Test that the ``return_path=False`` option with Gram remains correct + alphas_, _, coef_path_ = linear_model.lars_path(X, y, method="lar", Gram=G) + alpha_, _, coef = linear_model.lars_path( + X, y, method="lar", Gram=G, return_path=False + ) + + assert_array_almost_equal(coef, coef_path_[:, -1]) + assert alpha_ == alphas_[-1] + + +def test_no_path_all_precomputed(): + # Test that the ``return_path=False`` option with Gram and Xy remains + # correct + X, y = 3 * diabetes.data, diabetes.target + G = np.dot(X.T, X) + Xy = np.dot(X.T, y) + alphas_, _, coef_path_ = linear_model.lars_path( + X, y, method="lasso", Xy=Xy, Gram=G, alpha_min=0.9 + ) + alpha_, _, coef = linear_model.lars_path( + X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False + ) + + assert_array_almost_equal(coef, coef_path_[:, -1]) + assert alpha_ == alphas_[-1] + + +@pytest.mark.parametrize( + "classifier", [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC] +) +def test_lars_precompute(classifier): + # Check for different values of precompute + G = np.dot(X.T, X) + + clf = classifier(precompute=G) + output_1 = ignore_warnings(clf.fit)(X, y).coef_ + for precompute in [True, False, "auto", None]: + clf = classifier(precompute=precompute) + output_2 = clf.fit(X, y).coef_ + assert_array_almost_equal(output_1, output_2, decimal=8) + + +def test_singular_matrix(): + # Test when input is a singular matrix + X1 = np.array([[1, 1.0], [1.0, 1.0]]) + y1 = np.array([1, 1]) + _, _, coef_path = linear_model.lars_path(X1, y1) + assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) + + +def test_rank_deficient_design(): + # consistency test that checks that LARS Lasso is handling rank + # deficient input data (with n_features < rank) in the same way + # as coordinate descent Lasso + y = [5, 0, 5] + for X in ([[5, 0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0], [0, 0, 1]]): + # To be able to use the coefs to compute the objective function, + # we need to turn off normalization + lars = linear_model.LassoLars(0.1) + coef_lars_ = lars.fit(X, y).coef_ + obj_lars = 1.0 / (2.0 * 3.0) * linalg.norm( + y - np.dot(X, coef_lars_) + ) ** 2 + 0.1 * linalg.norm(coef_lars_, 1) + coord_descent = linear_model.Lasso(0.1, tol=1e-6) + coef_cd_ = coord_descent.fit(X, y).coef_ + obj_cd = (1.0 / (2.0 * 3.0)) * linalg.norm( + y - np.dot(X, coef_cd_) + ) ** 2 + 0.1 * linalg.norm(coef_cd_, 1) + assert obj_lars < obj_cd * (1.0 + 1e-8) + + +def test_lasso_lars_vs_lasso_cd(): + # Test that LassoLars and Lasso using coordinate descent give the + # same results. + X = 3 * diabetes.data + + alphas, _, lasso_path = linear_model.lars_path(X, y, method="lasso") + lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) + for c, a in zip(lasso_path.T, alphas): + if a == 0: + continue + lasso_cd.alpha = a + lasso_cd.fit(X, y) + error = linalg.norm(c - lasso_cd.coef_) + assert error < 0.01 + + # similar test, with the classifiers + for alpha in np.linspace(1e-2, 1 - 1e-2, 20): + clf1 = linear_model.LassoLars(alpha=alpha).fit(X, y) + clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8).fit(X, y) + err = linalg.norm(clf1.coef_ - clf2.coef_) + assert err < 1e-3 + + # same test, with normalized data + X = diabetes.data + X = X - X.sum(axis=0) + X /= np.linalg.norm(X, axis=0) + alphas, _, lasso_path = linear_model.lars_path(X, y, method="lasso") + lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) + for c, a in zip(lasso_path.T, alphas): + if a == 0: + continue + lasso_cd.alpha = a + lasso_cd.fit(X, y) + error = linalg.norm(c - lasso_cd.coef_) + assert error < 0.01 + + +def test_lasso_lars_vs_lasso_cd_early_stopping(): + # Test that LassoLars and Lasso using coordinate descent give the + # same results when early stopping is used. + # (test : before, in the middle, and in the last part of the path) + alphas_min = [10, 0.9, 1e-4] + + X = diabetes.data + + for alpha_min in alphas_min: + alphas, _, lasso_path = linear_model.lars_path( + X, y, method="lasso", alpha_min=alpha_min + ) + lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) + lasso_cd.alpha = alphas[-1] + lasso_cd.fit(X, y) + error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) + assert error < 0.01 + + # same test, with normalization + X = diabetes.data - diabetes.data.sum(axis=0) + X /= np.linalg.norm(X, axis=0) + + for alpha_min in alphas_min: + alphas, _, lasso_path = linear_model.lars_path( + X, y, method="lasso", alpha_min=alpha_min + ) + lasso_cd = linear_model.Lasso(tol=1e-8) + lasso_cd.alpha = alphas[-1] + lasso_cd.fit(X, y) + error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) + assert error < 0.01 + + +def test_lasso_lars_path_length(): + # Test that the path length of the LassoLars is right + lasso = linear_model.LassoLars() + lasso.fit(X, y) + lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) + lasso2.fit(X, y) + assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) + # Also check that the sequence of alphas is always decreasing + assert np.all(np.diff(lasso.alphas_) < 0) + + +def test_lasso_lars_vs_lasso_cd_ill_conditioned(): + # Test lasso lars on a very ill-conditioned design, and check that + # it does not blow up, and stays somewhat close to a solution given + # by the coordinate descent solver + # Also test that lasso_path (using lars_path output style) gives + # the same result as lars_path and previous lasso output style + # under these conditions. + rng = np.random.RandomState(42) + + # Generate data + n, m = 70, 100 + k = 5 + X = rng.randn(n, m) + w = np.zeros((m, 1)) + i = np.arange(0, m) + rng.shuffle(i) + supp = i[:k] + w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1) + y = np.dot(X, w) + sigma = 0.2 + y += sigma * rng.rand(*y.shape) + y = y.squeeze() + lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method="lasso") + + _, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6) + + assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) + + +def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): + # Create an ill-conditioned situation in which the LARS has to go + # far in the path to converge, and check that LARS and coordinate + # descent give the same answers + # Note it used to be the case that Lars had to use the drop for good + # strategy for this but this is no longer the case with the + # equality_tolerance checks + X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]] + y = [10, 10, 1] + alpha = 0.0001 + + def objective_function(coef): + return 1.0 / (2.0 * len(X)) * linalg.norm( + y - np.dot(X, coef) + ) ** 2 + alpha * linalg.norm(coef, 1) + + lars = linear_model.LassoLars(alpha=alpha) + warning_message = "Regressors in active set degenerate." + with pytest.warns(ConvergenceWarning, match=warning_message): + lars.fit(X, y) + lars_coef_ = lars.coef_ + lars_obj = objective_function(lars_coef_) + + coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4) + cd_coef_ = coord_descent.fit(X, y).coef_ + cd_obj = objective_function(cd_coef_) + + assert lars_obj < cd_obj * (1.0 + 1e-8) + + +def test_lars_add_features(): + # assure that at least some features get added if necessary + # test for 6d2b4c + # Hilbert matrix + n = 5 + H = 1.0 / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) + clf = linear_model.Lars(fit_intercept=False).fit(H, np.arange(n)) + assert np.all(np.isfinite(clf.coef_)) + + +def test_lars_n_nonzero_coefs(verbose=False): + lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) + lars.fit(X, y) + assert len(lars.coef_.nonzero()[0]) == 6 + # The path should be of length 6 + 1 in a Lars going down to 6 + # non-zero coefs + assert len(lars.alphas_) == 7 + + +@ignore_warnings +def test_multitarget(): + # Assure that estimators receiving multidimensional y do the right thing + Y = np.vstack([y, y**2]).T + n_targets = Y.shape[1] + estimators = [ + linear_model.LassoLars(), + linear_model.Lars(), + # regression test for gh-1615 + linear_model.LassoLars(fit_intercept=False), + linear_model.Lars(fit_intercept=False), + ] + + for estimator in estimators: + estimator.fit(X, Y) + Y_pred = estimator.predict(X) + alphas, active, coef, path = ( + estimator.alphas_, + estimator.active_, + estimator.coef_, + estimator.coef_path_, + ) + for k in range(n_targets): + estimator.fit(X, Y[:, k]) + y_pred = estimator.predict(X) + assert_array_almost_equal(alphas[k], estimator.alphas_) + assert_array_almost_equal(active[k], estimator.active_) + assert_array_almost_equal(coef[k], estimator.coef_) + assert_array_almost_equal(path[k], estimator.coef_path_) + assert_array_almost_equal(Y_pred[:, k], y_pred) + + +def test_lars_cv(): + # Test the LassoLarsCV object by checking that the optimal alpha + # increases as the number of samples increases. + # This property is not actually guaranteed in general and is just a + # property of the given dataset, with the given steps chosen. + old_alpha = 0 + lars_cv = linear_model.LassoLarsCV() + for length in (400, 200, 100): + X = diabetes.data[:length] + y = diabetes.target[:length] + lars_cv.fit(X, y) + np.testing.assert_array_less(old_alpha, lars_cv.alpha_) + old_alpha = lars_cv.alpha_ + assert not hasattr(lars_cv, "n_nonzero_coefs") + + +def test_lars_cv_max_iter(recwarn): + warnings.simplefilter("always") + with np.errstate(divide="raise", invalid="raise"): + X = diabetes.data + y = diabetes.target + rng = np.random.RandomState(42) + x = rng.randn(len(y)) + X = diabetes.data + X = np.c_[X, x, x] # add correlated features + X = StandardScaler().fit_transform(X) + lars_cv = linear_model.LassoLarsCV(max_iter=5, cv=5) + lars_cv.fit(X, y) + + # Check that there is no warning in general and no ConvergenceWarning + # in particular. + # Materialize the string representation of the warning to get a more + # informative error message in case of AssertionError. + recorded_warnings = [str(w) for w in recwarn] + assert len(recorded_warnings) == 0 + + +def test_lasso_lars_ic(): + # Test the LassoLarsIC object by checking that + # - some good features are selected. + # - alpha_bic > alpha_aic + # - n_nonzero_bic < n_nonzero_aic + lars_bic = linear_model.LassoLarsIC("bic") + lars_aic = linear_model.LassoLarsIC("aic") + rng = np.random.RandomState(42) + X = diabetes.data + X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features + X = StandardScaler().fit_transform(X) + lars_bic.fit(X, y) + lars_aic.fit(X, y) + nonzero_bic = np.where(lars_bic.coef_)[0] + nonzero_aic = np.where(lars_aic.coef_)[0] + assert lars_bic.alpha_ > lars_aic.alpha_ + assert len(nonzero_bic) < len(nonzero_aic) + assert np.max(nonzero_bic) < diabetes.data.shape[1] + + +def test_lars_path_readonly_data(): + # When using automated memory mapping on large input, the + # fold data is in read-only mode + # This is a non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/4597 + splitted_data = train_test_split(X, y, random_state=42) + with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): + # The following should not fail despite copy=False + _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) + + +def test_lars_path_positive_constraint(): + # this is the main test for the positive parameter on the lars_path method + # the estimator classes just make use of this function + + # we do the test on the diabetes dataset + + # ensure that we get negative coefficients when positive=False + # and all positive when positive=True + # for method 'lar' (default) and lasso + + err_msg = "Positive constraint not supported for 'lar' coding method." + with pytest.raises(ValueError, match=err_msg): + linear_model.lars_path( + diabetes["data"], diabetes["target"], method="lar", positive=True + ) + + method = "lasso" + _, _, coefs = linear_model.lars_path( + X, y, return_path=True, method=method, positive=False + ) + assert coefs.min() < 0 + + _, _, coefs = linear_model.lars_path( + X, y, return_path=True, method=method, positive=True + ) + assert coefs.min() >= 0 + + +# now we gonna test the positive option for all estimator classes + +default_parameter = {"fit_intercept": False} + +estimator_parameter_map = { + "LassoLars": {"alpha": 0.1}, + "LassoLarsCV": {}, + "LassoLarsIC": {}, +} + + +def test_estimatorclasses_positive_constraint(): + # testing the transmissibility for the positive option of all estimator + # classes in this same function here + default_parameter = {"fit_intercept": False} + + estimator_parameter_map = { + "LassoLars": {"alpha": 0.1}, + "LassoLarsCV": {}, + "LassoLarsIC": {}, + } + for estname in estimator_parameter_map: + params = default_parameter.copy() + params.update(estimator_parameter_map[estname]) + estimator = getattr(linear_model, estname)(positive=False, **params) + estimator.fit(X, y) + assert estimator.coef_.min() < 0 + estimator = getattr(linear_model, estname)(positive=True, **params) + estimator.fit(X, y) + assert min(estimator.coef_) >= 0 + + +def test_lasso_lars_vs_lasso_cd_positive(): + # Test that LassoLars and Lasso using coordinate descent give the + # same results when using the positive option + + # This test is basically a copy of the above with additional positive + # option. However for the middle part, the comparison of coefficient values + # for a range of alphas, we had to make an adaptations. See below. + + # not normalized data + X = 3 * diabetes.data + + alphas, _, lasso_path = linear_model.lars_path(X, y, method="lasso", positive=True) + lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) + for c, a in zip(lasso_path.T, alphas): + if a == 0: + continue + lasso_cd.alpha = a + lasso_cd.fit(X, y) + error = linalg.norm(c - lasso_cd.coef_) + assert error < 0.01 + + # The range of alphas chosen for coefficient comparison here is restricted + # as compared with the above test without the positive option. This is due + # to the circumstance that the Lars-Lasso algorithm does not converge to + # the least-squares-solution for small alphas, see 'Least Angle Regression' + # by Efron et al 2004. The coefficients are typically in congruence up to + # the smallest alpha reached by the Lars-Lasso algorithm and start to + # diverge thereafter. See + # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff + + for alpha in np.linspace(6e-1, 1 - 1e-2, 20): + clf1 = linear_model.LassoLars( + fit_intercept=False, alpha=alpha, positive=True + ).fit(X, y) + clf2 = linear_model.Lasso( + fit_intercept=False, alpha=alpha, tol=1e-8, positive=True + ).fit(X, y) + err = linalg.norm(clf1.coef_ - clf2.coef_) + assert err < 1e-3 + + # normalized data + X = diabetes.data - diabetes.data.sum(axis=0) + X /= np.linalg.norm(X, axis=0) + alphas, _, lasso_path = linear_model.lars_path(X, y, method="lasso", positive=True) + lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) + for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 + lasso_cd.alpha = a + lasso_cd.fit(X, y) + error = linalg.norm(c - lasso_cd.coef_) + assert error < 0.01 + + +def test_lasso_lars_vs_R_implementation(): + # Test that sklearn LassoLars implementation agrees with the LassoLars + # implementation available in R (lars library) when fit_intercept=False. + + # Let's generate the data used in the bug report 7778 + y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) + x = np.array( + [ + [0.47299829, 0, 0, 0, 0], + [0.08239882, 0.85784863, 0, 0, 0], + [0.30114139, -0.07501577, 0.80895216, 0, 0], + [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], + [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291], + ] + ) + + X = x.T + + # The R result was obtained using the following code: + # + # library(lars) + # model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE, + # trace=TRUE, normalize=FALSE) + # r = t(model_lasso_lars$beta) + # + + r = np.array( + [ + [ + 0, + 0, + 0, + 0, + 0, + -79.810362809499026, + -83.528788732782829, + -83.777653739190711, + -83.784156932888934, + -84.033390591756657, + ], + [0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936], + [ + 0, + -3.577397088285891, + -4.702795355871871, + -7.016748621359461, + -7.614898471899412, + -0.336938391359179, + 0, + 0, + 0.001213370600853, + 0.048162321585148, + ], + [ + 0, + 0, + 0, + 2.231558436628169, + 2.723267514525966, + 2.811549786389614, + 2.813766976061531, + 2.817462468949557, + 2.817368178703816, + 2.816221090636795, + ], + [ + 0, + 0, + -1.218422599914637, + -3.457726183014808, + -4.021304522060710, + -45.827461592423745, + -47.776608869312305, + -47.911561610746404, + -47.914845922736234, + -48.039562334265717, + ], + ] + ) + + model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False) + model_lasso_lars.fit(X, y) + skl_betas = model_lasso_lars.coef_path_ + + assert_array_almost_equal(r, skl_betas, decimal=12) + + +@pytest.mark.parametrize("copy_X", [True, False]) +def test_lasso_lars_copyX_behaviour(copy_X): + """ + Test that user input regarding copy_X is not being overridden (it was until + at least version 0.21) + + """ + lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) + rng = np.random.RandomState(0) + X = rng.normal(0, 1, (100, 5)) + X_copy = X.copy() + y = X[:, 2] + lasso_lars.fit(X, y) + assert copy_X == np.array_equal(X, X_copy) + + +@pytest.mark.parametrize("copy_X", [True, False]) +def test_lasso_lars_fit_copyX_behaviour(copy_X): + """ + Test that user input to .fit for copy_X overrides default __init__ value + + """ + lasso_lars = LassoLarsIC(precompute=False) + rng = np.random.RandomState(0) + X = rng.normal(0, 1, (100, 5)) + X_copy = X.copy() + y = X[:, 2] + lasso_lars.fit(X, y, copy_X=copy_X) + assert copy_X == np.array_equal(X, X_copy) + + +@pytest.mark.parametrize("est", (LassoLars(alpha=1e-3), Lars())) +def test_lars_with_jitter(est): + # Test that a small amount of jitter helps stability, + # using example provided in issue #2746 + + X = np.array([[0.0, 0.0, 0.0, -1.0, 0.0], [0.0, -1.0, 0.0, 0.0, 0.0]]) + y = [-2.5, -2.5] + expected_coef = [0, 2.5, 0, 2.5, 0] + + # set to fit_intercept to False since target is constant and we want check + # the value of coef. coef would be all zeros otherwise. + est.set_params(fit_intercept=False) + est_jitter = clone(est).set_params(jitter=10e-8, random_state=0) + + est.fit(X, y) + est_jitter.fit(X, y) + + assert np.mean((est.coef_ - est_jitter.coef_) ** 2) > 0.1 + np.testing.assert_allclose(est_jitter.coef_, expected_coef, rtol=1e-3) + + +def test_X_none_gram_not_none(): + with pytest.raises(ValueError, match="X cannot be None if Gram is not None"): + lars_path(X=None, y=np.array([1]), Gram=True) + + +def test_copy_X_with_auto_gram(): + # Non-regression test for #17789, `copy_X=True` and Gram='auto' does not + # overwrite X + rng = np.random.RandomState(42) + X = rng.rand(6, 6) + y = rng.rand(6) + + X_before = X.copy() + linear_model.lars_path(X, y, Gram="auto", copy_X=True, method="lasso") + # X did not change + assert_allclose(X, X_before) + + +@pytest.mark.parametrize( + "LARS, has_coef_path, args", + ( + (Lars, True, {}), + (LassoLars, True, {}), + (LassoLarsIC, False, {}), + (LarsCV, True, {}), + # max_iter=5 is for avoiding ConvergenceWarning + (LassoLarsCV, True, {"max_iter": 5}), + ), +) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +def test_lars_dtype_match(LARS, has_coef_path, args, dtype): + # The test ensures that the fit method preserves input dtype + rng = np.random.RandomState(0) + X = rng.rand(20, 6).astype(dtype) + y = rng.rand(20).astype(dtype) + + model = LARS(**args) + model.fit(X, y) + assert model.coef_.dtype == dtype + if has_coef_path: + assert model.coef_path_.dtype == dtype + assert model.intercept_.dtype == dtype + + +@pytest.mark.parametrize( + "LARS, has_coef_path, args", + ( + (Lars, True, {}), + (LassoLars, True, {}), + (LassoLarsIC, False, {}), + (LarsCV, True, {}), + # max_iter=5 is for avoiding ConvergenceWarning + (LassoLarsCV, True, {"max_iter": 5}), + ), +) +def test_lars_numeric_consistency(LARS, has_coef_path, args): + # The test ensures numerical consistency between trained coefficients + # of float32 and float64. + rtol = 1e-5 + atol = 1e-5 + + rng = np.random.RandomState(0) + X_64 = rng.rand(10, 6) + y_64 = rng.rand(10) + + model_64 = LARS(**args).fit(X_64, y_64) + model_32 = LARS(**args).fit(X_64.astype(np.float32), y_64.astype(np.float32)) + + assert_allclose(model_64.coef_, model_32.coef_, rtol=rtol, atol=atol) + if has_coef_path: + assert_allclose(model_64.coef_path_, model_32.coef_path_, rtol=rtol, atol=atol) + assert_allclose(model_64.intercept_, model_32.intercept_, rtol=rtol, atol=atol) + + +@pytest.mark.parametrize("criterion", ["aic", "bic"]) +def test_lassolarsic_alpha_selection(criterion): + """Check that we properly compute the AIC and BIC score. + + In this test, we reproduce the example of the Fig. 2 of Zou et al. + (reference [1] in LassoLarsIC) In this example, only 7 features should be + selected. + """ + model = make_pipeline(StandardScaler(), LassoLarsIC(criterion=criterion)) + model.fit(X, y) + + best_alpha_selected = np.argmin(model[-1].criterion_) + assert best_alpha_selected == 7 + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_lassolarsic_noise_variance(fit_intercept): + """Check the behaviour when `n_samples` < `n_features` and that one needs + to provide the noise variance.""" + rng = np.random.RandomState(0) + X, y = datasets.make_regression( + n_samples=10, n_features=11 - fit_intercept, random_state=rng + ) + + model = make_pipeline(StandardScaler(), LassoLarsIC(fit_intercept=fit_intercept)) + + err_msg = ( + "You are using LassoLarsIC in the case where the number of samples is smaller" + " than the number of features" + ) + with pytest.raises(ValueError, match=err_msg): + model.fit(X, y) + + model.set_params(lassolarsic__noise_variance=1.0) + model.fit(X, y).predict(X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_linear_loss.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_linear_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..659ff134198db778070f7941837b1a0b32a80a5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_linear_loss.py @@ -0,0 +1,356 @@ +""" +Tests for LinearModelLoss + +Note that correctness of losses (which compose LinearModelLoss) is already well +covered in the _loss module. +""" +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import linalg, optimize + +from sklearn._loss.loss import ( + HalfBinomialLoss, + HalfMultinomialLoss, + HalfPoissonLoss, +) +from sklearn.datasets import make_low_rank_matrix +from sklearn.linear_model._linear_loss import LinearModelLoss +from sklearn.utils.extmath import squared_norm +from sklearn.utils.fixes import CSR_CONTAINERS + +# We do not need to test all losses, just what LinearModelLoss does on top of the +# base losses. +LOSSES = [HalfBinomialLoss, HalfMultinomialLoss, HalfPoissonLoss] + + +def random_X_y_coef( + linear_model_loss, n_samples, n_features, coef_bound=(-2, 2), seed=42 +): + """Random generate y, X and coef in valid range.""" + rng = np.random.RandomState(seed) + n_dof = n_features + linear_model_loss.fit_intercept + X = make_low_rank_matrix( + n_samples=n_samples, + n_features=n_features, + random_state=rng, + ) + coef = linear_model_loss.init_zero_coef(X) + + if linear_model_loss.base_loss.is_multiclass: + n_classes = linear_model_loss.base_loss.n_classes + coef.flat[:] = rng.uniform( + low=coef_bound[0], + high=coef_bound[1], + size=n_classes * n_dof, + ) + if linear_model_loss.fit_intercept: + raw_prediction = X @ coef[:, :-1].T + coef[:, -1] + else: + raw_prediction = X @ coef.T + proba = linear_model_loss.base_loss.link.inverse(raw_prediction) + + # y = rng.choice(np.arange(n_classes), p=proba) does not work. + # See https://stackoverflow.com/a/34190035/16761084 + def choice_vectorized(items, p): + s = p.cumsum(axis=1) + r = rng.rand(p.shape[0])[:, None] + k = (s < r).sum(axis=1) + return items[k] + + y = choice_vectorized(np.arange(n_classes), p=proba).astype(np.float64) + else: + coef.flat[:] = rng.uniform( + low=coef_bound[0], + high=coef_bound[1], + size=n_dof, + ) + if linear_model_loss.fit_intercept: + raw_prediction = X @ coef[:-1] + coef[-1] + else: + raw_prediction = X @ coef + y = linear_model_loss.base_loss.link.inverse( + raw_prediction + rng.uniform(low=-1, high=1, size=n_samples) + ) + + return X, y, coef + + +@pytest.mark.parametrize("base_loss", LOSSES) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("n_features", [0, 1, 10]) +@pytest.mark.parametrize("dtype", [None, np.float32, np.float64, np.int64]) +def test_init_zero_coef(base_loss, fit_intercept, n_features, dtype): + """Test that init_zero_coef initializes coef correctly.""" + loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept) + rng = np.random.RandomState(42) + X = rng.normal(size=(5, n_features)) + coef = loss.init_zero_coef(X, dtype=dtype) + if loss.base_loss.is_multiclass: + n_classes = loss.base_loss.n_classes + assert coef.shape == (n_classes, n_features + fit_intercept) + assert coef.flags["F_CONTIGUOUS"] + else: + assert coef.shape == (n_features + fit_intercept,) + + if dtype is None: + assert coef.dtype == X.dtype + else: + assert coef.dtype == dtype + + assert np.count_nonzero(coef) == 0 + + +@pytest.mark.parametrize("base_loss", LOSSES) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +@pytest.mark.parametrize("l2_reg_strength", [0, 1]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_loss_grad_hess_are_the_same( + base_loss, fit_intercept, sample_weight, l2_reg_strength, csr_container +): + """Test that loss and gradient are the same across different functions.""" + loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept) + X, y, coef = random_X_y_coef( + linear_model_loss=loss, n_samples=10, n_features=5, seed=42 + ) + + if sample_weight == "range": + sample_weight = np.linspace(1, y.shape[0], num=y.shape[0]) + + l1 = loss.loss( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + g1 = loss.gradient( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + l2, g2 = loss.loss_gradient( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + g3, h3 = loss.gradient_hessian_product( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + if not base_loss.is_multiclass: + g4, h4, _ = loss.gradient_hessian( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + else: + with pytest.raises(NotImplementedError): + loss.gradient_hessian( + coef, + X, + y, + sample_weight=sample_weight, + l2_reg_strength=l2_reg_strength, + ) + + assert_allclose(l1, l2) + assert_allclose(g1, g2) + assert_allclose(g1, g3) + if not base_loss.is_multiclass: + assert_allclose(g1, g4) + assert_allclose(h4 @ g4, h3(g3)) + + # same for sparse X + X = csr_container(X) + l1_sp = loss.loss( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + g1_sp = loss.gradient( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + l2_sp, g2_sp = loss.loss_gradient( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + g3_sp, h3_sp = loss.gradient_hessian_product( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + if not base_loss.is_multiclass: + g4_sp, h4_sp, _ = loss.gradient_hessian( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + + assert_allclose(l1, l1_sp) + assert_allclose(l1, l2_sp) + assert_allclose(g1, g1_sp) + assert_allclose(g1, g2_sp) + assert_allclose(g1, g3_sp) + assert_allclose(h3(g1), h3_sp(g1_sp)) + if not base_loss.is_multiclass: + assert_allclose(g1, g4_sp) + assert_allclose(h4 @ g4, h4_sp @ g1_sp) + + +@pytest.mark.parametrize("base_loss", LOSSES) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +@pytest.mark.parametrize("l2_reg_strength", [0, 1]) +@pytest.mark.parametrize("X_container", CSR_CONTAINERS + [None]) +def test_loss_gradients_hessp_intercept( + base_loss, sample_weight, l2_reg_strength, X_container +): + """Test that loss and gradient handle intercept correctly.""" + loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=False) + loss_inter = LinearModelLoss(base_loss=base_loss(), fit_intercept=True) + n_samples, n_features = 10, 5 + X, y, coef = random_X_y_coef( + linear_model_loss=loss, n_samples=n_samples, n_features=n_features, seed=42 + ) + + X[:, -1] = 1 # make last column of 1 to mimic intercept term + X_inter = X[ + :, :-1 + ] # exclude intercept column as it is added automatically by loss_inter + + if X_container is not None: + X = X_container(X) + + if sample_weight == "range": + sample_weight = np.linspace(1, y.shape[0], num=y.shape[0]) + + l, g = loss.loss_gradient( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + _, hessp = loss.gradient_hessian_product( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + l_inter, g_inter = loss_inter.loss_gradient( + coef, X_inter, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + _, hessp_inter = loss_inter.gradient_hessian_product( + coef, X_inter, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + + # Note, that intercept gets no L2 penalty. + assert l == pytest.approx( + l_inter + 0.5 * l2_reg_strength * squared_norm(coef.T[-1]) + ) + + g_inter_corrected = g_inter + g_inter_corrected.T[-1] += l2_reg_strength * coef.T[-1] + assert_allclose(g, g_inter_corrected) + + s = np.random.RandomState(42).randn(*coef.shape) + h = hessp(s) + h_inter = hessp_inter(s) + h_inter_corrected = h_inter + h_inter_corrected.T[-1] += l2_reg_strength * s.T[-1] + assert_allclose(h, h_inter_corrected) + + +@pytest.mark.parametrize("base_loss", LOSSES) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +@pytest.mark.parametrize("l2_reg_strength", [0, 1]) +def test_gradients_hessians_numerically( + base_loss, fit_intercept, sample_weight, l2_reg_strength +): + """Test gradients and hessians with numerical derivatives. + + Gradient should equal the numerical derivatives of the loss function. + Hessians should equal the numerical derivatives of gradients. + """ + loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept) + n_samples, n_features = 10, 5 + X, y, coef = random_X_y_coef( + linear_model_loss=loss, n_samples=n_samples, n_features=n_features, seed=42 + ) + coef = coef.ravel(order="F") # this is important only for multinomial loss + + if sample_weight == "range": + sample_weight = np.linspace(1, y.shape[0], num=y.shape[0]) + + # 1. Check gradients numerically + eps = 1e-6 + g, hessp = loss.gradient_hessian_product( + coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength + ) + # Use a trick to get central finite difference of accuracy 4 (five-point stencil) + # https://en.wikipedia.org/wiki/Numerical_differentiation + # https://en.wikipedia.org/wiki/Finite_difference_coefficient + # approx_g1 = (f(x + eps) - f(x - eps)) / (2*eps) + approx_g1 = optimize.approx_fprime( + coef, + lambda coef: loss.loss( + coef - eps, + X, + y, + sample_weight=sample_weight, + l2_reg_strength=l2_reg_strength, + ), + 2 * eps, + ) + # approx_g2 = (f(x + 2*eps) - f(x - 2*eps)) / (4*eps) + approx_g2 = optimize.approx_fprime( + coef, + lambda coef: loss.loss( + coef - 2 * eps, + X, + y, + sample_weight=sample_weight, + l2_reg_strength=l2_reg_strength, + ), + 4 * eps, + ) + # Five-point stencil approximation + # See: https://en.wikipedia.org/wiki/Five-point_stencil#1D_first_derivative + approx_g = (4 * approx_g1 - approx_g2) / 3 + assert_allclose(g, approx_g, rtol=1e-2, atol=1e-8) + + # 2. Check hessp numerically along the second direction of the gradient + vector = np.zeros_like(g) + vector[1] = 1 + hess_col = hessp(vector) + # Computation of the Hessian is particularly fragile to numerical errors when doing + # simple finite differences. Here we compute the grad along a path in the direction + # of the vector and then use a least-square regression to estimate the slope + eps = 1e-3 + d_x = np.linspace(-eps, eps, 30) + d_grad = np.array( + [ + loss.gradient( + coef + t * vector, + X, + y, + sample_weight=sample_weight, + l2_reg_strength=l2_reg_strength, + ) + for t in d_x + ] + ) + d_grad -= d_grad.mean(axis=0) + approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel() + assert_allclose(approx_hess_col, hess_col, rtol=1e-3) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_multinomial_coef_shape(fit_intercept): + """Test that multinomial LinearModelLoss respects shape of coef.""" + loss = LinearModelLoss(base_loss=HalfMultinomialLoss(), fit_intercept=fit_intercept) + n_samples, n_features = 10, 5 + X, y, coef = random_X_y_coef( + linear_model_loss=loss, n_samples=n_samples, n_features=n_features, seed=42 + ) + s = np.random.RandomState(42).randn(*coef.shape) + + l, g = loss.loss_gradient(coef, X, y) + g1 = loss.gradient(coef, X, y) + g2, hessp = loss.gradient_hessian_product(coef, X, y) + h = hessp(s) + assert g.shape == coef.shape + assert h.shape == coef.shape + assert_allclose(g, g1) + assert_allclose(g, g2) + + coef_r = coef.ravel(order="F") + s_r = s.ravel(order="F") + l_r, g_r = loss.loss_gradient(coef_r, X, y) + g1_r = loss.gradient(coef_r, X, y) + g2_r, hessp_r = loss.gradient_hessian_product(coef_r, X, y) + h_r = hessp_r(s_r) + assert g_r.shape == coef_r.shape + assert h_r.shape == coef_r.shape + assert_allclose(g_r, g1_r) + assert_allclose(g_r, g2_r) + + assert_allclose(g, g_r.reshape(loss.base_loss.n_classes, -1, order="F")) + assert_allclose(h, h_r.reshape(loss.base_loss.n_classes, -1, order="F")) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py new file mode 100644 index 0000000000000000000000000000000000000000..9974090135ac501da0935ee3048a112f305eebcf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py @@ -0,0 +1,2194 @@ +import itertools +import os +import warnings +from functools import partial + +import numpy as np +import pytest +from numpy.testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from scipy import sparse + +from sklearn import config_context +from sklearn.base import clone +from sklearn.datasets import load_iris, make_classification +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import SGDClassifier +from sklearn.linear_model._logistic import ( + LogisticRegression as LogisticRegressionDefault, +) +from sklearn.linear_model._logistic import ( + LogisticRegressionCV as LogisticRegressionCVDefault, +) +from sklearn.linear_model._logistic import ( + _log_reg_scoring_path, + _logistic_regression_path, +) +from sklearn.metrics import get_scorer, log_loss +from sklearn.model_selection import ( + GridSearchCV, + StratifiedKFold, + cross_val_score, + train_test_split, +) +from sklearn.preprocessing import LabelEncoder, StandardScaler, scale +from sklearn.svm import l1_min_c +from sklearn.utils import _IS_32BIT, compute_class_weight, shuffle +from sklearn.utils._testing import ignore_warnings, skip_if_no_parallel +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS + +pytestmark = pytest.mark.filterwarnings( + "error::sklearn.exceptions.ConvergenceWarning:sklearn.*" +) +# Fixing random_state helps prevent ConvergenceWarnings +LogisticRegression = partial(LogisticRegressionDefault, random_state=0) +LogisticRegressionCV = partial(LogisticRegressionCVDefault, random_state=0) + + +SOLVERS = ("lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga") +X = [[-1, 0], [0, 1], [1, 1]] +Y1 = [0, 1, 1] +Y2 = [2, 1, 0] +iris = load_iris() + + +def check_predictions(clf, X, y): + """Check that the model is able to fit the classification data""" + n_samples = len(y) + classes = np.unique(y) + n_classes = classes.shape[0] + + predicted = clf.fit(X, y).predict(X) + assert_array_equal(clf.classes_, classes) + + assert predicted.shape == (n_samples,) + assert_array_equal(predicted, y) + + probabilities = clf.predict_proba(X) + assert probabilities.shape == (n_samples, n_classes) + assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) + assert_array_equal(probabilities.argmax(axis=1), y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_predict_2_classes(csr_container): + # Simple sanity check on a 2 classes dataset + # Make sure it predicts the correct result on simple datasets. + check_predictions(LogisticRegression(random_state=0), X, Y1) + check_predictions(LogisticRegression(random_state=0), csr_container(X), Y1) + + check_predictions(LogisticRegression(C=100, random_state=0), X, Y1) + check_predictions(LogisticRegression(C=100, random_state=0), csr_container(X), Y1) + + check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X, Y1) + check_predictions( + LogisticRegression(fit_intercept=False, random_state=0), csr_container(X), Y1 + ) + + +def test_logistic_cv_mock_scorer(): + class MockScorer: + def __init__(self): + self.calls = 0 + self.scores = [0.1, 0.4, 0.8, 0.5] + + def __call__(self, model, X, y, sample_weight=None): + score = self.scores[self.calls % len(self.scores)] + self.calls += 1 + return score + + mock_scorer = MockScorer() + Cs = [1, 2, 3, 4] + cv = 2 + + lr = LogisticRegressionCV(Cs=Cs, scoring=mock_scorer, cv=cv) + X, y = make_classification(random_state=0) + lr.fit(X, y) + + # Cs[2] has the highest score (0.8) from MockScorer + assert lr.C_[0] == Cs[2] + + # scorer called 8 times (cv*len(Cs)) + assert mock_scorer.calls == cv * len(Cs) + + # reset mock_scorer + mock_scorer.calls = 0 + custom_score = lr.score(X, lr.predict(X)) + + assert custom_score == mock_scorer.scores[0] + assert mock_scorer.calls == 1 + + +@skip_if_no_parallel +def test_lr_liblinear_warning(): + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + lr = LogisticRegression(solver="liblinear", n_jobs=2) + warning_message = ( + "'n_jobs' > 1 does not have any effect when" + " 'solver' is set to 'liblinear'. Got 'n_jobs'" + " = 2." + ) + with pytest.warns(UserWarning, match=warning_message): + lr.fit(iris.data, target) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_predict_3_classes(csr_container): + check_predictions(LogisticRegression(C=10), X, Y2) + check_predictions(LogisticRegression(C=10), csr_container(X), Y2) + + +@pytest.mark.parametrize( + "clf", + [ + LogisticRegression(C=len(iris.data), solver="liblinear", multi_class="ovr"), + LogisticRegression(C=len(iris.data), solver="lbfgs", multi_class="multinomial"), + LogisticRegression( + C=len(iris.data), solver="newton-cg", multi_class="multinomial" + ), + LogisticRegression( + C=len(iris.data), solver="sag", tol=1e-2, multi_class="ovr", random_state=42 + ), + LogisticRegression( + C=len(iris.data), + solver="saga", + tol=1e-2, + multi_class="ovr", + random_state=42, + ), + LogisticRegression( + C=len(iris.data), solver="newton-cholesky", multi_class="ovr" + ), + ], +) +def test_predict_iris(clf): + """Test logistic regression with the iris dataset. + + Test that both multinomial and OvR solvers handle multiclass data correctly and + give good accuracy score (>0.95) for the training data. + """ + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + if clf.solver == "lbfgs": + # lbfgs has convergence issues on the iris data with its default max_iter=100 + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + clf.fit(iris.data, target) + else: + clf.fit(iris.data, target) + assert_array_equal(np.unique(target), clf.classes_) + + pred = clf.predict(iris.data) + assert np.mean(pred == target) > 0.95 + + probabilities = clf.predict_proba(iris.data) + assert_allclose(probabilities.sum(axis=1), np.ones(n_samples)) + + pred = iris.target_names[probabilities.argmax(axis=1)] + assert np.mean(pred == target) > 0.95 + + +@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV]) +def test_check_solver_option(LR): + X, y = iris.data, iris.target + + # only 'liblinear' and 'newton-cholesky' solver + for solver in ["liblinear", "newton-cholesky"]: + msg = f"Solver {solver} does not support a multinomial backend." + lr = LR(solver=solver, multi_class="multinomial") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # all solvers except 'liblinear' and 'saga' + for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag"]: + msg = "Solver %s supports only 'l2' or None penalties," % solver + lr = LR(solver=solver, penalty="l1", multi_class="ovr") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag", "saga"]: + msg = "Solver %s supports only dual=False, got dual=True" % solver + lr = LR(solver=solver, dual=True, multi_class="ovr") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # only saga supports elasticnet. We only test for liblinear because the + # error is raised before for the other solvers (solver %s supports only l2 + # penalties) + for solver in ["liblinear"]: + msg = f"Only 'saga' solver supports elasticnet penalty, got solver={solver}." + lr = LR(solver=solver, penalty="elasticnet") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # liblinear does not support penalty='none' + # (LogisticRegressionCV does not supports penalty='none' at all) + if LR is LogisticRegression: + msg = "penalty=None is not supported for the liblinear solver" + lr = LR(penalty=None, solver="liblinear") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + +@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV]) +def test_elasticnet_l1_ratio_err_helpful(LR): + # Check that an informative error message is raised when penalty="elasticnet" + # but l1_ratio is not specified. + model = LR(penalty="elasticnet", solver="saga") + with pytest.raises(ValueError, match=r".*l1_ratio.*"): + model.fit(np.array([[1, 2], [3, 4]]), np.array([0, 1])) + + +@pytest.mark.parametrize("solver", ["lbfgs", "newton-cg", "sag", "saga"]) +def test_multinomial_binary(solver): + # Test multinomial LR on a binary problem. + target = (iris.target > 0).astype(np.intp) + target = np.array(["setosa", "not-setosa"])[target] + + clf = LogisticRegression( + solver=solver, multi_class="multinomial", random_state=42, max_iter=2000 + ) + clf.fit(iris.data, target) + + assert clf.coef_.shape == (1, iris.data.shape[1]) + assert clf.intercept_.shape == (1,) + assert_array_equal(clf.predict(iris.data), target) + + mlr = LogisticRegression( + solver=solver, multi_class="multinomial", random_state=42, fit_intercept=False + ) + mlr.fit(iris.data, target) + pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)] + assert np.mean(pred == target) > 0.9 + + +def test_multinomial_binary_probabilities(global_random_seed): + # Test multinomial LR gives expected probabilities based on the + # decision function, for a binary problem. + X, y = make_classification(random_state=global_random_seed) + clf = LogisticRegression( + multi_class="multinomial", + solver="saga", + tol=1e-3, + random_state=global_random_seed, + ) + clf.fit(X, y) + + decision = clf.decision_function(X) + proba = clf.predict_proba(X) + + expected_proba_class_1 = np.exp(decision) / (np.exp(decision) + np.exp(-decision)) + expected_proba = np.c_[1 - expected_proba_class_1, expected_proba_class_1] + + assert_almost_equal(proba, expected_proba) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_sparsify(coo_container): + # Test sparsify and densify members. + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + X = scale(iris.data) + clf = LogisticRegression(random_state=0).fit(X, target) + + pred_d_d = clf.decision_function(X) + + clf.sparsify() + assert sparse.issparse(clf.coef_) + pred_s_d = clf.decision_function(X) + + sp_data = coo_container(X) + pred_s_s = clf.decision_function(sp_data) + + clf.densify() + pred_d_s = clf.decision_function(sp_data) + + assert_array_almost_equal(pred_d_d, pred_s_d) + assert_array_almost_equal(pred_d_d, pred_s_s) + assert_array_almost_equal(pred_d_d, pred_d_s) + + +def test_inconsistent_input(): + # Test that an exception is raised on inconsistent input + rng = np.random.RandomState(0) + X_ = rng.random_sample((5, 10)) + y_ = np.ones(X_.shape[0]) + y_[0] = 0 + + clf = LogisticRegression(random_state=0) + + # Wrong dimensions for training data + y_wrong = y_[:-1] + + with pytest.raises(ValueError): + clf.fit(X, y_wrong) + + # Wrong dimensions for test data + with pytest.raises(ValueError): + clf.fit(X_, y_).predict(rng.random_sample((3, 12))) + + +def test_write_parameters(): + # Test that we can write to coef_ and intercept_ + clf = LogisticRegression(random_state=0) + clf.fit(X, Y1) + clf.coef_[:] = 0 + clf.intercept_[:] = 0 + assert_array_almost_equal(clf.decision_function(X), 0) + + +def test_nan(): + # Test proper NaN handling. + # Regression test for Issue #252: fit used to go into an infinite loop. + Xnan = np.array(X, dtype=np.float64) + Xnan[0, 1] = np.nan + logistic = LogisticRegression(random_state=0) + + with pytest.raises(ValueError): + logistic.fit(Xnan, Y1) + + +def test_consistency_path(): + # Test that the path algorithm is consistent + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = [1] * 100 + [-1] * 100 + Cs = np.logspace(0, 4, 10) + + f = ignore_warnings + # can't test with fit_intercept=True since LIBLINEAR + # penalizes the intercept + for solver in ["sag", "saga"]: + coefs, Cs, _ = f(_logistic_regression_path)( + X, + y, + Cs=Cs, + fit_intercept=False, + tol=1e-5, + solver=solver, + max_iter=1000, + multi_class="ovr", + random_state=0, + ) + for i, C in enumerate(Cs): + lr = LogisticRegression( + C=C, + fit_intercept=False, + tol=1e-5, + solver=solver, + multi_class="ovr", + random_state=0, + max_iter=1000, + ) + lr.fit(X, y) + lr_coef = lr.coef_.ravel() + assert_array_almost_equal( + lr_coef, coefs[i], decimal=4, err_msg="with solver = %s" % solver + ) + + # test for fit_intercept=True + for solver in ("lbfgs", "newton-cg", "newton-cholesky", "liblinear", "sag", "saga"): + Cs = [1e3] + coefs, Cs, _ = f(_logistic_regression_path)( + X, + y, + Cs=Cs, + tol=1e-6, + solver=solver, + intercept_scaling=10000.0, + random_state=0, + multi_class="ovr", + ) + lr = LogisticRegression( + C=Cs[0], + tol=1e-6, + intercept_scaling=10000.0, + random_state=0, + multi_class="ovr", + solver=solver, + ) + lr.fit(X, y) + lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_]) + assert_array_almost_equal( + lr_coef, coefs[0], decimal=4, err_msg="with solver = %s" % solver + ) + + +def test_logistic_regression_path_convergence_fail(): + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = [1] * 100 + [-1] * 100 + Cs = [1e3] + + # Check that the convergence message points to both a model agnostic + # advice (scaling the data) and to the logistic regression specific + # documentation that includes hints on the solver configuration. + with pytest.warns(ConvergenceWarning) as record: + _logistic_regression_path( + X, y, Cs=Cs, tol=0.0, max_iter=1, random_state=0, verbose=0 + ) + + assert len(record) == 1 + warn_msg = record[0].message.args[0] + assert "lbfgs failed to converge" in warn_msg + assert "Increase the number of iterations" in warn_msg + assert "scale the data" in warn_msg + assert "linear_model.html#logistic-regression" in warn_msg + + +def test_liblinear_dual_random_state(): + # random_state is relevant for liblinear solver only if dual=True + X, y = make_classification(n_samples=20, random_state=0) + lr1 = LogisticRegression( + random_state=0, + dual=True, + tol=1e-3, + solver="liblinear", + multi_class="ovr", + ) + lr1.fit(X, y) + lr2 = LogisticRegression( + random_state=0, + dual=True, + tol=1e-3, + solver="liblinear", + multi_class="ovr", + ) + lr2.fit(X, y) + lr3 = LogisticRegression( + random_state=8, + dual=True, + tol=1e-3, + solver="liblinear", + multi_class="ovr", + ) + lr3.fit(X, y) + + # same result for same random state + assert_array_almost_equal(lr1.coef_, lr2.coef_) + # different results for different random states + msg = "Arrays are not almost equal to 6 decimals" + with pytest.raises(AssertionError, match=msg): + assert_array_almost_equal(lr1.coef_, lr3.coef_) + + +def test_logistic_cv(): + # test for LogisticRegressionCV object + n_samples, n_features = 50, 5 + rng = np.random.RandomState(0) + X_ref = rng.randn(n_samples, n_features) + y = np.sign(X_ref.dot(5 * rng.randn(n_features))) + X_ref -= X_ref.mean() + X_ref /= X_ref.std() + lr_cv = LogisticRegressionCV( + Cs=[1.0], fit_intercept=False, solver="liblinear", multi_class="ovr", cv=3 + ) + lr_cv.fit(X_ref, y) + lr = LogisticRegression( + C=1.0, fit_intercept=False, solver="liblinear", multi_class="ovr" + ) + lr.fit(X_ref, y) + assert_array_almost_equal(lr.coef_, lr_cv.coef_) + + assert_array_equal(lr_cv.coef_.shape, (1, n_features)) + assert_array_equal(lr_cv.classes_, [-1, 1]) + assert len(lr_cv.classes_) == 2 + + coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values())) + assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features)) + assert_array_equal(lr_cv.Cs_.shape, (1,)) + scores = np.asarray(list(lr_cv.scores_.values())) + assert_array_equal(scores.shape, (1, 3, 1)) + + +@pytest.mark.parametrize( + "scoring, multiclass_agg_list", + [ + ("accuracy", [""]), + ("precision", ["_macro", "_weighted"]), + # no need to test for micro averaging because it + # is the same as accuracy for f1, precision, + # and recall (see https://github.com/ + # scikit-learn/scikit-learn/pull/ + # 11578#discussion_r203250062) + ("f1", ["_macro", "_weighted"]), + ("neg_log_loss", [""]), + ("recall", ["_macro", "_weighted"]), + ], +) +def test_logistic_cv_multinomial_score(scoring, multiclass_agg_list): + # test that LogisticRegressionCV uses the right score to compute its + # cross-validation scores when using a multinomial scoring + # see https://github.com/scikit-learn/scikit-learn/issues/8720 + X, y = make_classification( + n_samples=100, random_state=0, n_classes=3, n_informative=6 + ) + train, test = np.arange(80), np.arange(80, 100) + lr = LogisticRegression(C=1.0, multi_class="multinomial") + # we use lbfgs to support multinomial + params = lr.get_params() + # we store the params to set them further in _log_reg_scoring_path + for key in ["C", "n_jobs", "warm_start"]: + del params[key] + lr.fit(X[train], y[train]) + for averaging in multiclass_agg_list: + scorer = get_scorer(scoring + averaging) + assert_array_almost_equal( + _log_reg_scoring_path( + X, + y, + train, + test, + Cs=[1.0], + scoring=scorer, + pos_class=None, + max_squared_sum=None, + sample_weight=None, + score_params=None, + **params, + )[2][0], + scorer(lr, X[test], y[test]), + ) + + +def test_multinomial_logistic_regression_string_inputs(): + # Test with string labels for LogisticRegression(CV) + n_samples, n_features, n_classes = 50, 5, 3 + X_ref, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_classes=n_classes, + n_informative=3, + random_state=0, + ) + y_str = LabelEncoder().fit(["bar", "baz", "foo"]).inverse_transform(y) + # For numerical labels, let y values be taken from set (-1, 0, 1) + y = np.array(y) - 1 + # Test for string labels + lr = LogisticRegression(multi_class="multinomial") + lr_cv = LogisticRegressionCV(multi_class="multinomial", Cs=3) + lr_str = LogisticRegression(multi_class="multinomial") + lr_cv_str = LogisticRegressionCV(multi_class="multinomial", Cs=3) + + lr.fit(X_ref, y) + lr_cv.fit(X_ref, y) + lr_str.fit(X_ref, y_str) + lr_cv_str.fit(X_ref, y_str) + + assert_array_almost_equal(lr.coef_, lr_str.coef_) + assert sorted(lr_str.classes_) == ["bar", "baz", "foo"] + assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_) + assert sorted(lr_str.classes_) == ["bar", "baz", "foo"] + assert sorted(lr_cv_str.classes_) == ["bar", "baz", "foo"] + + # The predictions should be in original labels + assert sorted(np.unique(lr_str.predict(X_ref))) == ["bar", "baz", "foo"] + assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz", "foo"] + + # Make sure class weights can be given with string labels + lr_cv_str = LogisticRegression( + class_weight={"bar": 1, "baz": 2, "foo": 0}, multi_class="multinomial" + ).fit(X_ref, y_str) + assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz"] + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_logistic_cv_sparse(csr_container): + X, y = make_classification(n_samples=50, n_features=5, random_state=0) + X[X < 1.0] = 0.0 + csr = csr_container(X) + + clf = LogisticRegressionCV() + clf.fit(X, y) + clfs = LogisticRegressionCV() + clfs.fit(csr, y) + assert_array_almost_equal(clfs.coef_, clf.coef_) + assert_array_almost_equal(clfs.intercept_, clf.intercept_) + assert clfs.C_ == clf.C_ + + +def test_ovr_multinomial_iris(): + # Test that OvR and multinomial are correct using the iris dataset. + train, target = iris.data, iris.target + n_samples, n_features = train.shape + + # The cv indices from stratified kfold (where stratification is done based + # on the fine-grained iris classes, i.e, before the classes 0 and 1 are + # conflated) is used for both clf and clf1 + n_cv = 2 + cv = StratifiedKFold(n_cv) + precomputed_folds = list(cv.split(train, target)) + + # Train clf on the original dataset where classes 0 and 1 are separated + clf = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr") + clf.fit(train, target) + + # Conflate classes 0 and 1 and train clf1 on this modified dataset + clf1 = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr") + target_copy = target.copy() + target_copy[target_copy == 0] = 1 + clf1.fit(train, target_copy) + + # Ensure that what OvR learns for class2 is same regardless of whether + # classes 0 and 1 are separated or not + assert_allclose(clf.scores_[2], clf1.scores_[2]) + assert_allclose(clf.intercept_[2:], clf1.intercept_) + assert_allclose(clf.coef_[2][np.newaxis, :], clf1.coef_) + + # Test the shape of various attributes. + assert clf.coef_.shape == (3, n_features) + assert_array_equal(clf.classes_, [0, 1, 2]) + coefs_paths = np.asarray(list(clf.coefs_paths_.values())) + assert coefs_paths.shape == (3, n_cv, 10, n_features + 1) + assert clf.Cs_.shape == (10,) + scores = np.asarray(list(clf.scores_.values())) + assert scores.shape == (3, n_cv, 10) + + # Test that for the iris data multinomial gives a better accuracy than OvR + for solver in ["lbfgs", "newton-cg", "sag", "saga"]: + max_iter = 500 if solver in ["sag", "saga"] else 30 + clf_multi = LogisticRegressionCV( + solver=solver, + multi_class="multinomial", + max_iter=max_iter, + random_state=42, + tol=1e-3 if solver in ["sag", "saga"] else 1e-2, + cv=2, + ) + if solver == "lbfgs": + # lbfgs requires scaling to avoid convergence warnings + train = scale(train) + + clf_multi.fit(train, target) + multi_score = clf_multi.score(train, target) + ovr_score = clf.score(train, target) + assert multi_score > ovr_score + + # Test attributes of LogisticRegressionCV + assert clf.coef_.shape == clf_multi.coef_.shape + assert_array_equal(clf_multi.classes_, [0, 1, 2]) + coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values())) + assert coefs_paths.shape == (3, n_cv, 10, n_features + 1) + assert clf_multi.Cs_.shape == (10,) + scores = np.asarray(list(clf_multi.scores_.values())) + assert scores.shape == (3, n_cv, 10) + + +def test_logistic_regression_solvers(): + """Test solvers converge to the same result.""" + X, y = make_classification(n_features=10, n_informative=5, random_state=0) + + params = dict(fit_intercept=False, random_state=42, multi_class="ovr") + + regressors = { + solver: LogisticRegression(solver=solver, **params).fit(X, y) + for solver in SOLVERS + } + + for solver_1, solver_2 in itertools.combinations(regressors, r=2): + assert_array_almost_equal( + regressors[solver_1].coef_, regressors[solver_2].coef_, decimal=3 + ) + + +def test_logistic_regression_solvers_multiclass(): + """Test solvers converge to the same result for multiclass problems.""" + X, y = make_classification( + n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0 + ) + tol = 1e-7 + params = dict(fit_intercept=False, tol=tol, random_state=42, multi_class="ovr") + + # Override max iteration count for specific solvers to allow for + # proper convergence. + solver_max_iter = {"sag": 1000, "saga": 10000} + + regressors = { + solver: LogisticRegression( + solver=solver, max_iter=solver_max_iter.get(solver, 100), **params + ).fit(X, y) + for solver in SOLVERS + } + + for solver_1, solver_2 in itertools.combinations(regressors, r=2): + assert_allclose( + regressors[solver_1].coef_, + regressors[solver_2].coef_, + rtol=5e-3 if solver_2 == "saga" else 1e-3, + err_msg=f"{solver_1} vs {solver_2}", + ) + + +@pytest.mark.parametrize("weight", [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]) +@pytest.mark.parametrize("class_weight", ["weight", "balanced"]) +def test_logistic_regressioncv_class_weights(weight, class_weight, global_random_seed): + """Test class_weight for LogisticRegressionCV.""" + n_classes = len(weight) + if class_weight == "weight": + class_weight = weight + + X, y = make_classification( + n_samples=30, + n_features=3, + n_repeated=0, + n_informative=3, + n_redundant=0, + n_classes=n_classes, + random_state=global_random_seed, + ) + params = dict( + Cs=1, + fit_intercept=False, + multi_class="ovr", + class_weight=class_weight, + tol=1e-8, + ) + clf_lbfgs = LogisticRegressionCV(solver="lbfgs", **params) + + # XXX: lbfgs' line search can fail and cause a ConvergenceWarning for some + # 10% of the random seeds, but only on specific platforms (in particular + # when using Atlas BLAS/LAPACK implementation). Doubling the maxls internal + # parameter of the solver does not help. However this lack of proper + # convergence does not seem to prevent the assertion to pass, so we ignore + # the warning for now. + # See: https://github.com/scikit-learn/scikit-learn/pull/27649 + with ignore_warnings(category=ConvergenceWarning): + clf_lbfgs.fit(X, y) + + for solver in set(SOLVERS) - set(["lbfgs"]): + clf = LogisticRegressionCV(solver=solver, **params) + if solver in ("sag", "saga"): + clf.set_params( + tol=1e-18, max_iter=10000, random_state=global_random_seed + 1 + ) + clf.fit(X, y) + + assert_allclose( + clf.coef_, clf_lbfgs.coef_, rtol=1e-3, err_msg=f"{solver} vs lbfgs" + ) + + +def test_logistic_regression_sample_weights(): + X, y = make_classification( + n_samples=20, n_features=5, n_informative=3, n_classes=2, random_state=0 + ) + sample_weight = y + 1 + + for LR in [LogisticRegression, LogisticRegressionCV]: + kw = {"random_state": 42, "fit_intercept": False, "multi_class": "ovr"} + if LR is LogisticRegressionCV: + kw.update({"Cs": 3, "cv": 3}) + + # Test that passing sample_weight as ones is the same as + # not passing them at all (default None) + for solver in ["lbfgs", "liblinear"]: + clf_sw_none = LR(solver=solver, **kw) + clf_sw_ones = LR(solver=solver, **kw) + clf_sw_none.fit(X, y) + clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0])) + assert_allclose(clf_sw_none.coef_, clf_sw_ones.coef_, rtol=1e-4) + + # Test that sample weights work the same with the lbfgs, + # newton-cg, newton-cholesky and 'sag' solvers + clf_sw_lbfgs = LR(**kw, tol=1e-5) + clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight) + for solver in set(SOLVERS) - set(("lbfgs", "saga")): + clf_sw = LR(solver=solver, tol=1e-10 if solver == "sag" else 1e-5, **kw) + # ignore convergence warning due to small dataset with sag + with ignore_warnings(): + clf_sw.fit(X, y, sample_weight=sample_weight) + assert_allclose(clf_sw_lbfgs.coef_, clf_sw.coef_, rtol=1e-4) + + # Test that passing class_weight as [1,2] is the same as + # passing class weight = [1,1] but adjusting sample weights + # to be 2 for all instances of class 2 + for solver in ["lbfgs", "liblinear"]: + clf_cw_12 = LR(solver=solver, class_weight={0: 1, 1: 2}, **kw) + clf_cw_12.fit(X, y) + clf_sw_12 = LR(solver=solver, **kw) + clf_sw_12.fit(X, y, sample_weight=sample_weight) + assert_allclose(clf_cw_12.coef_, clf_sw_12.coef_, rtol=1e-4) + + # Test the above for l1 penalty and l2 penalty with dual=True. + # since the patched liblinear code is different. + clf_cw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + class_weight={0: 1, 1: 2}, + penalty="l1", + tol=1e-5, + random_state=42, + multi_class="ovr", + ) + clf_cw.fit(X, y) + clf_sw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + penalty="l1", + tol=1e-5, + random_state=42, + multi_class="ovr", + ) + clf_sw.fit(X, y, sample_weight) + assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) + + clf_cw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + class_weight={0: 1, 1: 2}, + penalty="l2", + dual=True, + random_state=42, + multi_class="ovr", + ) + clf_cw.fit(X, y) + clf_sw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + penalty="l2", + dual=True, + random_state=42, + multi_class="ovr", + ) + clf_sw.fit(X, y, sample_weight) + assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) + + +def _compute_class_weight_dictionary(y): + # helper for returning a dictionary instead of an array + classes = np.unique(y) + class_weight = compute_class_weight("balanced", classes=classes, y=y) + class_weight_dict = dict(zip(classes, class_weight)) + return class_weight_dict + + +def test_logistic_regression_class_weights(): + # Scale data to avoid convergence warnings with the lbfgs solver + X_iris = scale(iris.data) + # Multinomial case: remove 90% of class 0 + X = X_iris[45:, :] + y = iris.target[45:] + solvers = ("lbfgs", "newton-cg") + class_weight_dict = _compute_class_weight_dictionary(y) + + for solver in solvers: + clf1 = LogisticRegression( + solver=solver, multi_class="multinomial", class_weight="balanced" + ) + clf2 = LogisticRegression( + solver=solver, multi_class="multinomial", class_weight=class_weight_dict + ) + clf1.fit(X, y) + clf2.fit(X, y) + assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4) + + # Binary case: remove 90% of class 0 and 100% of class 2 + X = X_iris[45:100, :] + y = iris.target[45:100] + class_weight_dict = _compute_class_weight_dictionary(y) + + for solver in set(SOLVERS) - set(("sag", "saga")): + clf1 = LogisticRegression( + solver=solver, multi_class="ovr", class_weight="balanced" + ) + clf2 = LogisticRegression( + solver=solver, multi_class="ovr", class_weight=class_weight_dict + ) + clf1.fit(X, y) + clf2.fit(X, y) + assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6) + + +def test_logistic_regression_multinomial(): + # Tests for the multinomial option in logistic regression + + # Some basic attributes of Logistic Regression + n_samples, n_features, n_classes = 50, 20, 3 + X, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_informative=10, + n_classes=n_classes, + random_state=0, + ) + + X = StandardScaler(with_mean=False).fit_transform(X) + + # 'lbfgs' is used as a referenced + solver = "lbfgs" + ref_i = LogisticRegression(solver=solver, multi_class="multinomial", tol=1e-6) + ref_w = LogisticRegression( + solver=solver, multi_class="multinomial", fit_intercept=False, tol=1e-6 + ) + ref_i.fit(X, y) + ref_w.fit(X, y) + assert ref_i.coef_.shape == (n_classes, n_features) + assert ref_w.coef_.shape == (n_classes, n_features) + for solver in ["sag", "saga", "newton-cg"]: + clf_i = LogisticRegression( + solver=solver, + multi_class="multinomial", + random_state=42, + max_iter=2000, + tol=1e-7, + ) + clf_w = LogisticRegression( + solver=solver, + multi_class="multinomial", + random_state=42, + max_iter=2000, + tol=1e-7, + fit_intercept=False, + ) + clf_i.fit(X, y) + clf_w.fit(X, y) + assert clf_i.coef_.shape == (n_classes, n_features) + assert clf_w.coef_.shape == (n_classes, n_features) + + # Compare solutions between lbfgs and the other solvers + assert_allclose(ref_i.coef_, clf_i.coef_, rtol=1e-3) + assert_allclose(ref_w.coef_, clf_w.coef_, rtol=1e-2) + assert_allclose(ref_i.intercept_, clf_i.intercept_, rtol=1e-3) + + # Test that the path give almost the same results. However since in this + # case we take the average of the coefs after fitting across all the + # folds, it need not be exactly the same. + for solver in ["lbfgs", "newton-cg", "sag", "saga"]: + clf_path = LogisticRegressionCV( + solver=solver, max_iter=2000, tol=1e-6, multi_class="multinomial", Cs=[1.0] + ) + clf_path.fit(X, y) + assert_allclose(clf_path.coef_, ref_i.coef_, rtol=1e-2) + assert_allclose(clf_path.intercept_, ref_i.intercept_, rtol=1e-2) + + +def test_liblinear_decision_function_zero(): + # Test negative prediction when decision_function values are zero. + # Liblinear predicts the positive class when decision_function values + # are zero. This is a test to verify that we do not do the same. + # See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600 + # and the PR https://github.com/scikit-learn/scikit-learn/pull/3623 + X, y = make_classification(n_samples=5, n_features=5, random_state=0) + clf = LogisticRegression(fit_intercept=False, solver="liblinear", multi_class="ovr") + clf.fit(X, y) + + # Dummy data such that the decision function becomes zero. + X = np.zeros((5, 5)) + assert_array_equal(clf.predict(X), np.zeros(5)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_liblinear_logregcv_sparse(csr_container): + # Test LogRegCV with solver='liblinear' works for sparse matrices + + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + clf = LogisticRegressionCV(solver="liblinear", multi_class="ovr") + clf.fit(csr_container(X), y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_saga_sparse(csr_container): + # Test LogRegCV with solver='liblinear' works for sparse matrices + + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + clf = LogisticRegressionCV(solver="saga", tol=1e-2) + clf.fit(csr_container(X), y) + + +def test_logreg_intercept_scaling_zero(): + # Test that intercept_scaling is ignored when fit_intercept is False + + clf = LogisticRegression(fit_intercept=False) + clf.fit(X, Y1) + assert clf.intercept_ == 0.0 + + +def test_logreg_l1(): + # Because liblinear penalizes the intercept and saga does not, we do not + # fit the intercept to make it possible to compare the coefficients of + # the two models at convergence. + rng = np.random.RandomState(42) + n_samples = 50 + X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) + X_noise = rng.normal(size=(n_samples, 3)) + X_constant = np.ones(shape=(n_samples, 2)) + X = np.concatenate((X, X_noise, X_constant), axis=1) + lr_liblinear = LogisticRegression( + penalty="l1", + C=1.0, + solver="liblinear", + fit_intercept=False, + multi_class="ovr", + tol=1e-10, + ) + lr_liblinear.fit(X, y) + + lr_saga = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + multi_class="ovr", + max_iter=1000, + tol=1e-10, + ) + lr_saga.fit(X, y) + assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) + + # Noise and constant features should be regularized to zero by the l1 + # penalty + assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) + assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_logreg_l1_sparse_data(csr_container): + # Because liblinear penalizes the intercept and saga does not, we do not + # fit the intercept to make it possible to compare the coefficients of + # the two models at convergence. + rng = np.random.RandomState(42) + n_samples = 50 + X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) + X_noise = rng.normal(scale=0.1, size=(n_samples, 3)) + X_constant = np.zeros(shape=(n_samples, 2)) + X = np.concatenate((X, X_noise, X_constant), axis=1) + X[X < 1] = 0 + X = csr_container(X) + + lr_liblinear = LogisticRegression( + penalty="l1", + C=1.0, + solver="liblinear", + fit_intercept=False, + multi_class="ovr", + tol=1e-10, + ) + lr_liblinear.fit(X, y) + + lr_saga = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + multi_class="ovr", + max_iter=1000, + tol=1e-10, + ) + lr_saga.fit(X, y) + assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) + # Noise and constant features should be regularized to zero by the l1 + # penalty + assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) + assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) + + # Check that solving on the sparse and dense data yield the same results + lr_saga_dense = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + multi_class="ovr", + max_iter=1000, + tol=1e-10, + ) + lr_saga_dense.fit(X.toarray(), y) + assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_) + + +@pytest.mark.parametrize("random_seed", [42]) +@pytest.mark.parametrize("penalty", ["l1", "l2"]) +def test_logistic_regression_cv_refit(random_seed, penalty): + # Test that when refit=True, logistic regression cv with the saga solver + # converges to the same solution as logistic regression with a fixed + # regularization parameter. + # Internally the LogisticRegressionCV model uses a warm start to refit on + # the full data model with the optimal C found by CV. As the penalized + # logistic regression loss is convex, we should still recover exactly + # the same solution as long as the stopping criterion is strict enough (and + # that there are no exactly duplicated features when penalty='l1'). + X, y = make_classification(n_samples=100, n_features=20, random_state=random_seed) + common_params = dict( + solver="saga", + penalty=penalty, + random_state=random_seed, + max_iter=1000, + tol=1e-12, + ) + lr_cv = LogisticRegressionCV(Cs=[1.0], refit=True, **common_params) + lr_cv.fit(X, y) + lr = LogisticRegression(C=1.0, **common_params) + lr.fit(X, y) + assert_array_almost_equal(lr_cv.coef_, lr.coef_) + + +def test_logreg_predict_proba_multinomial(): + X, y = make_classification( + n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10 + ) + + # Predicted probabilities using the true-entropy loss should give a + # smaller loss than those using the ovr method. + clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs") + clf_multi.fit(X, y) + clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) + clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs") + clf_ovr.fit(X, y) + clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X)) + assert clf_ovr_loss > clf_multi_loss + + # Predicted probabilities using the soft-max function should give a + # smaller loss than those using the logistic function. + clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) + clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X)) + assert clf_wrong_loss > clf_multi_loss + + +@pytest.mark.parametrize("max_iter", np.arange(1, 5)) +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +@pytest.mark.parametrize( + "solver, message", + [ + ( + "newton-cg", + "newton-cg failed to converge. Increase the number of iterations.", + ), + ( + "liblinear", + "Liblinear failed to converge, increase the number of iterations.", + ), + ("sag", "The max_iter was reached which means the coef_ did not converge"), + ("saga", "The max_iter was reached which means the coef_ did not converge"), + ("lbfgs", "lbfgs failed to converge"), + ("newton-cholesky", "Newton solver did not converge after [0-9]* iterations"), + ], +) +def test_max_iter(max_iter, multi_class, solver, message): + # Test that the maximum number of iteration is reached + X, y_bin = iris.data, iris.target.copy() + y_bin[y_bin == 2] = 0 + + if solver in ("liblinear", "newton-cholesky") and multi_class == "multinomial": + pytest.skip("'multinomial' is not supported by liblinear and newton-cholesky") + if solver == "newton-cholesky" and max_iter > 1: + pytest.skip("solver newton-cholesky might converge very fast") + + lr = LogisticRegression( + max_iter=max_iter, + tol=1e-15, + multi_class=multi_class, + random_state=0, + solver=solver, + ) + with pytest.warns(ConvergenceWarning, match=message): + lr.fit(X, y_bin) + + assert lr.n_iter_[0] == max_iter + + +@pytest.mark.parametrize("solver", SOLVERS) +def test_n_iter(solver): + # Test that self.n_iter_ has the correct format. + X, y = iris.data, iris.target + if solver == "lbfgs": + # lbfgs requires scaling to avoid convergence warnings + X = scale(X) + + n_classes = np.unique(y).shape[0] + assert n_classes == 3 + + # Also generate a binary classification sub-problem. + y_bin = y.copy() + y_bin[y_bin == 2] = 0 + + n_Cs = 4 + n_cv_fold = 2 + + # Binary classification case + clf = LogisticRegression(tol=1e-2, C=1.0, solver=solver, random_state=42) + clf.fit(X, y_bin) + assert clf.n_iter_.shape == (1,) + + clf_cv = LogisticRegressionCV( + tol=1e-2, solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42 + ) + clf_cv.fit(X, y_bin) + assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs) + + # OvR case + clf.set_params(multi_class="ovr").fit(X, y) + assert clf.n_iter_.shape == (n_classes,) + + clf_cv.set_params(multi_class="ovr").fit(X, y) + assert clf_cv.n_iter_.shape == (n_classes, n_cv_fold, n_Cs) + + # multinomial case + if solver in ("liblinear", "newton-cholesky"): + # This solver only supports one-vs-rest multiclass classification. + return + + # When using the multinomial objective function, there is a single + # optimization problem to solve for all classes at once: + clf.set_params(multi_class="multinomial").fit(X, y) + assert clf.n_iter_.shape == (1,) + + clf_cv.set_params(multi_class="multinomial").fit(X, y) + assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs) + + +@pytest.mark.parametrize("solver", sorted(set(SOLVERS) - set(["liblinear"]))) +@pytest.mark.parametrize("warm_start", (True, False)) +@pytest.mark.parametrize("fit_intercept", (True, False)) +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +def test_warm_start(solver, warm_start, fit_intercept, multi_class): + # A 1-iteration second fit on same data should give almost same result + # with warm starting, and quite different result without warm starting. + # Warm starting does not work with liblinear solver. + X, y = iris.data, iris.target + + if solver == "newton-cholesky" and multi_class == "multinomial": + # solver does only support OvR + return + + clf = LogisticRegression( + tol=1e-4, + multi_class=multi_class, + warm_start=warm_start, + solver=solver, + random_state=42, + fit_intercept=fit_intercept, + ) + with ignore_warnings(category=ConvergenceWarning): + clf.fit(X, y) + coef_1 = clf.coef_ + + clf.max_iter = 1 + clf.fit(X, y) + cum_diff = np.sum(np.abs(coef_1 - clf.coef_)) + msg = ( + "Warm starting issue with %s solver in %s mode " + "with fit_intercept=%s and warm_start=%s" + % (solver, multi_class, str(fit_intercept), str(warm_start)) + ) + if warm_start: + assert 2.0 > cum_diff, msg + else: + assert cum_diff > 2.0, msg + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_saga_vs_liblinear(csr_container): + iris = load_iris() + X, y = iris.data, iris.target + X = np.concatenate([X] * 3) + y = np.concatenate([y] * 3) + + X_bin = X[y <= 1] + y_bin = y[y <= 1] * 2 - 1 + + X_sparse, y_sparse = make_classification( + n_samples=50, n_features=20, random_state=0 + ) + X_sparse = csr_container(X_sparse) + + for X, y in ((X_bin, y_bin), (X_sparse, y_sparse)): + for penalty in ["l1", "l2"]: + n_samples = X.shape[0] + # alpha=1e-3 is time consuming + for alpha in np.logspace(-1, 1, 3): + saga = LogisticRegression( + C=1.0 / (n_samples * alpha), + solver="saga", + multi_class="ovr", + max_iter=200, + fit_intercept=False, + penalty=penalty, + random_state=0, + tol=1e-6, + ) + + liblinear = LogisticRegression( + C=1.0 / (n_samples * alpha), + solver="liblinear", + multi_class="ovr", + max_iter=200, + fit_intercept=False, + penalty=penalty, + random_state=0, + tol=1e-6, + ) + + saga.fit(X, y) + liblinear.fit(X, y) + # Convergence for alpha=1e-3 is very slow + assert_array_almost_equal(saga.coef_, liblinear.coef_, 3) + + +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +@pytest.mark.parametrize( + "solver", ["liblinear", "newton-cg", "newton-cholesky", "saga"] +) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dtype_match(solver, multi_class, fit_intercept, csr_container): + # Test that np.float32 input data is not cast to np.float64 when possible + # and that the output is approximately the same no matter the input format. + + if solver in ("liblinear", "newton-cholesky") and multi_class == "multinomial": + pytest.skip(f"Solver={solver} does not support multinomial logistic.") + + out32_type = np.float64 if solver == "liblinear" else np.float32 + + X_32 = np.array(X).astype(np.float32) + y_32 = np.array(Y1).astype(np.float32) + X_64 = np.array(X).astype(np.float64) + y_64 = np.array(Y1).astype(np.float64) + X_sparse_32 = csr_container(X, dtype=np.float32) + X_sparse_64 = csr_container(X, dtype=np.float64) + solver_tol = 5e-4 + + lr_templ = LogisticRegression( + solver=solver, + multi_class=multi_class, + random_state=42, + tol=solver_tol, + fit_intercept=fit_intercept, + ) + + # Check 32-bit type consistency + lr_32 = clone(lr_templ) + lr_32.fit(X_32, y_32) + assert lr_32.coef_.dtype == out32_type + + # Check 32-bit type consistency with sparsity + lr_32_sparse = clone(lr_templ) + lr_32_sparse.fit(X_sparse_32, y_32) + assert lr_32_sparse.coef_.dtype == out32_type + + # Check 64-bit type consistency + lr_64 = clone(lr_templ) + lr_64.fit(X_64, y_64) + assert lr_64.coef_.dtype == np.float64 + + # Check 64-bit type consistency with sparsity + lr_64_sparse = clone(lr_templ) + lr_64_sparse.fit(X_sparse_64, y_64) + assert lr_64_sparse.coef_.dtype == np.float64 + + # solver_tol bounds the norm of the loss gradient + # dw ~= inv(H)*grad ==> |dw| ~= |inv(H)| * solver_tol, where H - hessian + # + # See https://github.com/scikit-learn/scikit-learn/pull/13645 + # + # with Z = np.hstack((np.ones((3,1)), np.array(X))) + # In [8]: np.linalg.norm(np.diag([0,2,2]) + np.linalg.inv((Z.T @ Z)/4)) + # Out[8]: 1.7193336918135917 + + # factor of 2 to get the ball diameter + atol = 2 * 1.72 * solver_tol + if os.name == "nt" and _IS_32BIT: + # FIXME + atol = 1e-2 + + # Check accuracy consistency + assert_allclose(lr_32.coef_, lr_64.coef_.astype(np.float32), atol=atol) + + if solver == "saga" and fit_intercept: + # FIXME: SAGA on sparse data fits the intercept inaccurately with the + # default tol and max_iter parameters. + atol = 1e-1 + + assert_allclose(lr_32.coef_, lr_32_sparse.coef_, atol=atol) + assert_allclose(lr_64.coef_, lr_64_sparse.coef_, atol=atol) + + +def test_warm_start_converge_LR(): + # Test to see that the logistic regression converges on warm start, + # with multi_class='multinomial'. Non-regressive test for #10836 + + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = np.array([1] * 100 + [-1] * 100) + lr_no_ws = LogisticRegression( + multi_class="multinomial", solver="sag", warm_start=False, random_state=0 + ) + lr_ws = LogisticRegression( + multi_class="multinomial", solver="sag", warm_start=True, random_state=0 + ) + + lr_no_ws_loss = log_loss(y, lr_no_ws.fit(X, y).predict_proba(X)) + for i in range(5): + lr_ws.fit(X, y) + lr_ws_loss = log_loss(y, lr_ws.predict_proba(X)) + assert_allclose(lr_no_ws_loss, lr_ws_loss, rtol=1e-5) + + +def test_elastic_net_coeffs(): + # make sure elasticnet penalty gives different coefficients from l1 and l2 + # with saga solver (l1_ratio different from 0 or 1) + X, y = make_classification(random_state=0) + + C = 2.0 + l1_ratio = 0.5 + coeffs = list() + for penalty, ratio in (("elasticnet", l1_ratio), ("l1", None), ("l2", None)): + lr = LogisticRegression( + penalty=penalty, + C=C, + solver="saga", + random_state=0, + l1_ratio=ratio, + tol=1e-3, + max_iter=200, + ) + lr.fit(X, y) + coeffs.append(lr.coef_) + + elastic_net_coeffs, l1_coeffs, l2_coeffs = coeffs + # make sure coeffs differ by at least .1 + assert not np.allclose(elastic_net_coeffs, l1_coeffs, rtol=0, atol=0.1) + assert not np.allclose(elastic_net_coeffs, l2_coeffs, rtol=0, atol=0.1) + assert not np.allclose(l2_coeffs, l1_coeffs, rtol=0, atol=0.1) + + +@pytest.mark.parametrize("C", [0.001, 0.1, 1, 10, 100, 1000, 1e6]) +@pytest.mark.parametrize("penalty, l1_ratio", [("l1", 1), ("l2", 0)]) +def test_elastic_net_l1_l2_equivalence(C, penalty, l1_ratio): + # Make sure elasticnet is equivalent to l1 when l1_ratio=1 and to l2 when + # l1_ratio=0. + X, y = make_classification(random_state=0) + + lr_enet = LogisticRegression( + penalty="elasticnet", + C=C, + l1_ratio=l1_ratio, + solver="saga", + random_state=0, + tol=1e-2, + ) + lr_expected = LogisticRegression( + penalty=penalty, C=C, solver="saga", random_state=0, tol=1e-2 + ) + lr_enet.fit(X, y) + lr_expected.fit(X, y) + + assert_array_almost_equal(lr_enet.coef_, lr_expected.coef_) + + +@pytest.mark.parametrize("C", [0.001, 1, 100, 1e6]) +def test_elastic_net_vs_l1_l2(C): + # Make sure that elasticnet with grid search on l1_ratio gives same or + # better results than just l1 or just l2. + + X, y = make_classification(500, random_state=0) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + param_grid = {"l1_ratio": np.linspace(0, 1, 5)} + + enet_clf = LogisticRegression( + penalty="elasticnet", C=C, solver="saga", random_state=0, tol=1e-2 + ) + gs = GridSearchCV(enet_clf, param_grid, refit=True) + + l1_clf = LogisticRegression( + penalty="l1", C=C, solver="saga", random_state=0, tol=1e-2 + ) + l2_clf = LogisticRegression( + penalty="l2", C=C, solver="saga", random_state=0, tol=1e-2 + ) + + for clf in (gs, l1_clf, l2_clf): + clf.fit(X_train, y_train) + + assert gs.score(X_test, y_test) >= l1_clf.score(X_test, y_test) + assert gs.score(X_test, y_test) >= l2_clf.score(X_test, y_test) + + +@pytest.mark.parametrize("C", np.logspace(-3, 2, 4)) +@pytest.mark.parametrize("l1_ratio", [0.1, 0.5, 0.9]) +def test_LogisticRegression_elastic_net_objective(C, l1_ratio): + # Check that training with a penalty matching the objective leads + # to a lower objective. + # Here we train a logistic regression with l2 (a) and elasticnet (b) + # penalties, and compute the elasticnet objective. That of a should be + # greater than that of b (both objectives are convex). + X, y = make_classification( + n_samples=1000, + n_classes=2, + n_features=20, + n_informative=10, + n_redundant=0, + n_repeated=0, + random_state=0, + ) + X = scale(X) + + lr_enet = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + C=C, + l1_ratio=l1_ratio, + fit_intercept=False, + ) + lr_l2 = LogisticRegression( + penalty="l2", solver="saga", random_state=0, C=C, fit_intercept=False + ) + lr_enet.fit(X, y) + lr_l2.fit(X, y) + + def enet_objective(lr): + coef = lr.coef_.ravel() + obj = C * log_loss(y, lr.predict_proba(X)) + obj += l1_ratio * np.sum(np.abs(coef)) + obj += (1.0 - l1_ratio) * 0.5 * np.dot(coef, coef) + return obj + + assert enet_objective(lr_enet) < enet_objective(lr_l2) + + +@pytest.mark.parametrize("multi_class", ("ovr", "multinomial")) +def test_LogisticRegressionCV_GridSearchCV_elastic_net(multi_class): + # make sure LogisticRegressionCV gives same best params (l1 and C) as + # GridSearchCV when penalty is elasticnet + + if multi_class == "ovr": + # This is actually binary classification, ovr multiclass is treated in + # test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr + X, y = make_classification(random_state=0) + else: + X, y = make_classification( + n_samples=100, n_classes=3, n_informative=3, random_state=0 + ) + + cv = StratifiedKFold(5) + + l1_ratios = np.linspace(0, 1, 3) + Cs = np.logspace(-4, 4, 3) + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=cv, + l1_ratios=l1_ratios, + random_state=0, + multi_class=multi_class, + tol=1e-2, + ) + lrcv.fit(X, y) + + param_grid = {"C": Cs, "l1_ratio": l1_ratios} + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + multi_class=multi_class, + tol=1e-2, + ) + gs = GridSearchCV(lr, param_grid, cv=cv) + gs.fit(X, y) + + assert gs.best_params_["l1_ratio"] == lrcv.l1_ratio_[0] + assert gs.best_params_["C"] == lrcv.C_[0] + + +def test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr(): + # make sure LogisticRegressionCV gives same best params (l1 and C) as + # GridSearchCV when penalty is elasticnet and multiclass is ovr. We can't + # compare best_params like in the previous test because + # LogisticRegressionCV with multi_class='ovr' will have one C and one + # l1_param for each class, while LogisticRegression will share the + # parameters over the *n_classes* classifiers. + + X, y = make_classification( + n_samples=100, n_classes=3, n_informative=3, random_state=0 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + cv = StratifiedKFold(5) + + l1_ratios = np.linspace(0, 1, 3) + Cs = np.logspace(-4, 4, 3) + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=cv, + l1_ratios=l1_ratios, + random_state=0, + multi_class="ovr", + tol=1e-2, + ) + lrcv.fit(X_train, y_train) + + param_grid = {"C": Cs, "l1_ratio": l1_ratios} + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + multi_class="ovr", + tol=1e-2, + ) + gs = GridSearchCV(lr, param_grid, cv=cv) + gs.fit(X_train, y_train) + + # Check that predictions are 80% the same + assert (lrcv.predict(X_train) == gs.predict(X_train)).mean() >= 0.8 + assert (lrcv.predict(X_test) == gs.predict(X_test)).mean() >= 0.8 + + +@pytest.mark.parametrize("penalty", ("l2", "elasticnet")) +@pytest.mark.parametrize("multi_class", ("ovr", "multinomial", "auto")) +def test_LogisticRegressionCV_no_refit(penalty, multi_class): + # Test LogisticRegressionCV attribute shapes when refit is False + + n_classes = 3 + n_features = 20 + X, y = make_classification( + n_samples=200, + n_classes=n_classes, + n_informative=n_classes, + n_features=n_features, + random_state=0, + ) + + Cs = np.logspace(-4, 4, 3) + if penalty == "elasticnet": + l1_ratios = np.linspace(0, 1, 2) + else: + l1_ratios = None + + lrcv = LogisticRegressionCV( + penalty=penalty, + Cs=Cs, + solver="saga", + l1_ratios=l1_ratios, + random_state=0, + multi_class=multi_class, + tol=1e-2, + refit=False, + ) + lrcv.fit(X, y) + assert lrcv.C_.shape == (n_classes,) + assert lrcv.l1_ratio_.shape == (n_classes,) + assert lrcv.coef_.shape == (n_classes, n_features) + + +def test_LogisticRegressionCV_elasticnet_attribute_shapes(): + # Make sure the shapes of scores_ and coefs_paths_ attributes are correct + # when using elasticnet (added one dimension for l1_ratios) + + n_classes = 3 + n_features = 20 + X, y = make_classification( + n_samples=200, + n_classes=n_classes, + n_informative=n_classes, + n_features=n_features, + random_state=0, + ) + + Cs = np.logspace(-4, 4, 3) + l1_ratios = np.linspace(0, 1, 2) + + n_folds = 2 + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=n_folds, + l1_ratios=l1_ratios, + multi_class="ovr", + random_state=0, + tol=1e-2, + ) + lrcv.fit(X, y) + coefs_paths = np.asarray(list(lrcv.coefs_paths_.values())) + assert coefs_paths.shape == ( + n_classes, + n_folds, + Cs.size, + l1_ratios.size, + n_features + 1, + ) + scores = np.asarray(list(lrcv.scores_.values())) + assert scores.shape == (n_classes, n_folds, Cs.size, l1_ratios.size) + + assert lrcv.n_iter_.shape == (n_classes, n_folds, Cs.size, l1_ratios.size) + + +def test_l1_ratio_non_elasticnet(): + msg = ( + r"l1_ratio parameter is only used when penalty is" + r" 'elasticnet'\. Got \(penalty=l1\)" + ) + with pytest.warns(UserWarning, match=msg): + LogisticRegression(penalty="l1", solver="saga", l1_ratio=0.5).fit(X, Y1) + + +@pytest.mark.parametrize("C", np.logspace(-3, 2, 4)) +@pytest.mark.parametrize("l1_ratio", [0.1, 0.5, 0.9]) +def test_elastic_net_versus_sgd(C, l1_ratio): + # Compare elasticnet penalty in LogisticRegression() and SGD(loss='log') + n_samples = 500 + X, y = make_classification( + n_samples=n_samples, + n_classes=2, + n_features=5, + n_informative=5, + n_redundant=0, + n_repeated=0, + random_state=1, + ) + X = scale(X) + + sgd = SGDClassifier( + penalty="elasticnet", + random_state=1, + fit_intercept=False, + tol=None, + max_iter=2000, + l1_ratio=l1_ratio, + alpha=1.0 / C / n_samples, + loss="log_loss", + ) + log = LogisticRegression( + penalty="elasticnet", + random_state=1, + fit_intercept=False, + tol=1e-5, + max_iter=1000, + l1_ratio=l1_ratio, + C=C, + solver="saga", + ) + + sgd.fit(X, y) + log.fit(X, y) + assert_array_almost_equal(sgd.coef_, log.coef_, decimal=1) + + +def test_logistic_regression_path_coefs_multinomial(): + # Make sure that the returned coefs by logistic_regression_path when + # multi_class='multinomial' don't override each other (used to be a + # bug). + X, y = make_classification( + n_samples=200, + n_classes=3, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=0, + n_features=2, + ) + Cs = [0.00001, 1, 10000] + coefs, _, _ = _logistic_regression_path( + X, + y, + penalty="l1", + Cs=Cs, + solver="saga", + random_state=0, + multi_class="multinomial", + ) + + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[0], coefs[1], decimal=1) + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[0], coefs[2], decimal=1) + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[1], coefs[2], decimal=1) + + +@pytest.mark.parametrize( + "est", + [ + LogisticRegression(random_state=0, max_iter=500), + LogisticRegressionCV(random_state=0, cv=3, Cs=3, tol=1e-3, max_iter=500), + ], + ids=lambda x: x.__class__.__name__, +) +@pytest.mark.parametrize("solver", SOLVERS) +def test_logistic_regression_multi_class_auto(est, solver): + # check multi_class='auto' => multi_class='ovr' + # iff binary y or liblinear or newton-cholesky + + def fit(X, y, **kw): + return clone(est).set_params(**kw).fit(X, y) + + scaled_data = scale(iris.data) + X = scaled_data[::10] + X2 = scaled_data[1::10] + y_multi = iris.target[::10] + y_bin = y_multi == 0 + est_auto_bin = fit(X, y_bin, multi_class="auto", solver=solver) + est_ovr_bin = fit(X, y_bin, multi_class="ovr", solver=solver) + assert_allclose(est_auto_bin.coef_, est_ovr_bin.coef_) + assert_allclose(est_auto_bin.predict_proba(X2), est_ovr_bin.predict_proba(X2)) + + est_auto_multi = fit(X, y_multi, multi_class="auto", solver=solver) + if solver in ("liblinear", "newton-cholesky"): + est_ovr_multi = fit(X, y_multi, multi_class="ovr", solver=solver) + assert_allclose(est_auto_multi.coef_, est_ovr_multi.coef_) + assert_allclose( + est_auto_multi.predict_proba(X2), est_ovr_multi.predict_proba(X2) + ) + else: + est_multi_multi = fit(X, y_multi, multi_class="multinomial", solver=solver) + assert_allclose(est_auto_multi.coef_, est_multi_multi.coef_) + assert_allclose( + est_auto_multi.predict_proba(X2), est_multi_multi.predict_proba(X2) + ) + + # Make sure multi_class='ovr' is distinct from ='multinomial' + assert not np.allclose( + est_auto_bin.coef_, + fit(X, y_bin, multi_class="multinomial", solver=solver).coef_, + ) + assert not np.allclose( + est_auto_bin.coef_, + fit(X, y_multi, multi_class="multinomial", solver=solver).coef_, + ) + + +@pytest.mark.parametrize("solver", sorted(set(SOLVERS) - set(["liblinear"]))) +def test_penalty_none(solver): + # - Make sure warning is raised if penalty=None and C is set to a + # non-default value. + # - Make sure setting penalty=None is equivalent to setting C=np.inf with + # l2 penalty. + X, y = make_classification(n_samples=1000, n_redundant=0, random_state=0) + + msg = "Setting penalty=None will ignore the C" + lr = LogisticRegression(penalty=None, solver=solver, C=4) + with pytest.warns(UserWarning, match=msg): + lr.fit(X, y) + + lr_none = LogisticRegression(penalty=None, solver=solver, random_state=0) + lr_l2_C_inf = LogisticRegression( + penalty="l2", C=np.inf, solver=solver, random_state=0 + ) + pred_none = lr_none.fit(X, y).predict(X) + pred_l2_C_inf = lr_l2_C_inf.fit(X, y).predict(X) + assert_array_equal(pred_none, pred_l2_C_inf) + + +@pytest.mark.parametrize( + "params", + [ + {"penalty": "l1", "dual": False, "tol": 1e-6, "max_iter": 1000}, + {"penalty": "l2", "dual": True, "tol": 1e-12, "max_iter": 1000}, + {"penalty": "l2", "dual": False, "tol": 1e-12, "max_iter": 1000}, + ], +) +def test_logisticregression_liblinear_sample_weight(params): + # check that we support sample_weight with liblinear in all possible cases: + # l1-primal, l2-primal, l2-dual + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.dtype("float"), + ) + y = np.array( + [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype("int") + ) + + X2 = np.vstack([X, X]) + y2 = np.hstack([y, 3 - y]) + sample_weight = np.ones(shape=len(y) * 2) + sample_weight[len(y) :] = 0 + X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0) + + base_clf = LogisticRegression(solver="liblinear", random_state=42) + base_clf.set_params(**params) + clf_no_weight = clone(base_clf).fit(X, y) + clf_with_weight = clone(base_clf).fit(X2, y2, sample_weight=sample_weight) + + for method in ("predict", "predict_proba", "decision_function"): + X_clf_no_weight = getattr(clf_no_weight, method)(X) + X_clf_with_weight = getattr(clf_with_weight, method)(X) + assert_allclose(X_clf_no_weight, X_clf_with_weight) + + +def test_scores_attribute_layout_elasticnet(): + # Non regression test for issue #14955. + # when penalty is elastic net the scores_ attribute has shape + # (n_classes, n_Cs, n_l1_ratios) + # We here make sure that the second dimension indeed corresponds to Cs and + # the third dimension corresponds to l1_ratios. + + X, y = make_classification(n_samples=1000, random_state=0) + cv = StratifiedKFold(n_splits=5) + + l1_ratios = [0.1, 0.9] + Cs = [0.1, 1, 10] + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + solver="saga", + l1_ratios=l1_ratios, + Cs=Cs, + cv=cv, + random_state=0, + max_iter=250, + tol=1e-3, + ) + lrcv.fit(X, y) + + avg_scores_lrcv = lrcv.scores_[1].mean(axis=0) # average over folds + + for i, C in enumerate(Cs): + for j, l1_ratio in enumerate(l1_ratios): + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + C=C, + l1_ratio=l1_ratio, + random_state=0, + max_iter=250, + tol=1e-3, + ) + + avg_score_lr = cross_val_score(lr, X, y, cv=cv).mean() + assert avg_scores_lrcv[i, j] == pytest.approx(avg_score_lr) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_multinomial_identifiability_on_iris(fit_intercept): + """Test that the multinomial classification is identifiable. + + A multinomial with c classes can be modeled with + probability_k = exp(X@coef_k) / sum(exp(X@coef_l), l=1..c) for k=1..c. + This is not identifiable, unless one chooses a further constraint. + According to [1], the maximum of the L2 penalized likelihood automatically + satisfies the symmetric constraint: + sum(coef_k, k=1..c) = 0 + + Further details can be found in [2]. + + Reference + --------- + .. [1] :doi:`Zhu, Ji and Trevor J. Hastie. "Classification of gene microarrays by + penalized logistic regression". Biostatistics 5 3 (2004): 427-43. + <10.1093/biostatistics/kxg046>` + + .. [2] :arxiv:`Noah Simon and Jerome Friedman and Trevor Hastie. (2013) + "A Blockwise Descent Algorithm for Group-penalized Multiresponse and + Multinomial Regression". <1311.6529>` + """ + # Test logistic regression with the iris dataset + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + clf = LogisticRegression( + C=len(iris.data), + solver="lbfgs", + multi_class="multinomial", + fit_intercept=fit_intercept, + ) + # Scaling X to ease convergence. + X_scaled = scale(iris.data) + clf.fit(X_scaled, target) + + # axis=0 is sum over classes + assert_allclose(clf.coef_.sum(axis=0), 0, atol=1e-10) + if fit_intercept: + clf.intercept_.sum(axis=0) == pytest.approx(0, abs=1e-15) + + +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial", "auto"]) +@pytest.mark.parametrize("class_weight", [{0: 1.0, 1: 10.0, 2: 1.0}, "balanced"]) +def test_sample_weight_not_modified(multi_class, class_weight): + X, y = load_iris(return_X_y=True) + n_features = len(X) + W = np.ones(n_features) + W[: n_features // 2] = 2 + + expected = W.copy() + + clf = LogisticRegression( + random_state=0, class_weight=class_weight, max_iter=200, multi_class=multi_class + ) + clf.fit(X, y, sample_weight=W) + assert_allclose(expected, W) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_large_sparse_matrix(solver, global_random_seed, csr_container): + # Solvers either accept large sparse matrices, or raise helpful error. + # Non-regression test for pull-request #21093. + + # generate sparse matrix with int64 indices + X = csr_container(sparse.rand(20, 10, random_state=global_random_seed)) + for attr in ["indices", "indptr"]: + setattr(X, attr, getattr(X, attr).astype("int64")) + rng = np.random.RandomState(global_random_seed) + y = rng.randint(2, size=X.shape[0]) + + if solver in ["liblinear", "sag", "saga"]: + msg = "Only sparse matrices with 32-bit integer indices" + with pytest.raises(ValueError, match=msg): + LogisticRegression(solver=solver).fit(X, y) + else: + LogisticRegression(solver=solver).fit(X, y) + + +def test_single_feature_newton_cg(): + # Test that Newton-CG works with a single feature and intercept. + # Non-regression test for issue #23605. + + X = np.array([[0.5, 0.65, 1.1, 1.25, 0.8, 0.54, 0.95, 0.7]]).T + y = np.array([1, 1, 0, 0, 1, 1, 0, 1]) + assert X.shape[1] == 1 + LogisticRegression(solver="newton-cg", fit_intercept=True).fit(X, y) + + +def test_liblinear_not_stuck(): + # Non-regression https://github.com/scikit-learn/scikit-learn/issues/18264 + X = iris.data.copy() + y = iris.target.copy() + X = X[y != 2] + y = y[y != 2] + X_prep = StandardScaler().fit_transform(X) + + C = l1_min_c(X, y, loss="log") * 10 ** (10 / 29) + clf = LogisticRegression( + penalty="l1", + solver="liblinear", + tol=1e-6, + max_iter=100, + intercept_scaling=10000.0, + random_state=0, + C=C, + ) + + # test that the fit does not raise a ConvergenceWarning + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + clf.fit(X_prep, y) + + +@pytest.mark.usefixtures("enable_slep006") +def test_lr_cv_scores_differ_when_sample_weight_is_requested(): + """Test that `sample_weight` is correctly passed to the scorer in + `LogisticRegressionCV.fit` and `LogisticRegressionCV.score` by + checking the difference in scores with the case when `sample_weight` + is not requested. + """ + rng = np.random.RandomState(10) + X, y = make_classification(n_samples=10, random_state=rng) + X_t, y_t = make_classification(n_samples=10, random_state=rng) + sample_weight = np.ones(len(y)) + sample_weight[: len(y) // 2] = 2 + kwargs = {"sample_weight": sample_weight} + + scorer1 = get_scorer("accuracy") + lr_cv1 = LogisticRegressionCV(scoring=scorer1) + lr_cv1.fit(X, y, **kwargs) + + scorer2 = get_scorer("accuracy") + scorer2.set_score_request(sample_weight=True) + lr_cv2 = LogisticRegressionCV(scoring=scorer2) + lr_cv2.fit(X, y, **kwargs) + + assert not np.allclose(lr_cv1.scores_[1], lr_cv2.scores_[1]) + + score_1 = lr_cv1.score(X_t, y_t, **kwargs) + score_2 = lr_cv2.score(X_t, y_t, **kwargs) + + assert not np.allclose(score_1, score_2) + + +def test_lr_cv_scores_without_enabling_metadata_routing(): + """Test that `sample_weight` is passed correctly to the scorer in + `LogisticRegressionCV.fit` and `LogisticRegressionCV.score` even + when `enable_metadata_routing=False` + """ + rng = np.random.RandomState(10) + X, y = make_classification(n_samples=10, random_state=rng) + X_t, y_t = make_classification(n_samples=10, random_state=rng) + sample_weight = np.ones(len(y)) + sample_weight[: len(y) // 2] = 2 + kwargs = {"sample_weight": sample_weight} + + with config_context(enable_metadata_routing=False): + scorer1 = get_scorer("accuracy") + lr_cv1 = LogisticRegressionCV(scoring=scorer1) + lr_cv1.fit(X, y, **kwargs) + score_1 = lr_cv1.score(X_t, y_t, **kwargs) + + with config_context(enable_metadata_routing=True): + scorer2 = get_scorer("accuracy") + scorer2.set_score_request(sample_weight=True) + lr_cv2 = LogisticRegressionCV(scoring=scorer2) + lr_cv2.fit(X, y, **kwargs) + score_2 = lr_cv2.score(X_t, y_t, **kwargs) + + assert_allclose(lr_cv1.scores_[1], lr_cv2.scores_[1]) + assert_allclose(score_1, score_2) + + +@pytest.mark.parametrize("solver", SOLVERS) +def test_zero_max_iter(solver): + # Make sure we can inspect the state of LogisticRegression right after + # initialization (before the first weight update). + X, y = load_iris(return_X_y=True) + y = y == 2 + with ignore_warnings(category=ConvergenceWarning): + clf = LogisticRegression(solver=solver, max_iter=0).fit(X, y) + if solver not in ["saga", "sag"]: + # XXX: sag and saga have n_iter_ = [1]... + assert clf.n_iter_ == 0 + + if solver != "lbfgs": + # XXX: lbfgs has already started to update the coefficients... + assert_allclose(clf.coef_, np.zeros_like(clf.coef_)) + assert_allclose( + clf.decision_function(X), + np.full(shape=X.shape[0], fill_value=clf.intercept_), + ) + assert_allclose( + clf.predict_proba(X), + np.full(shape=(X.shape[0], 2), fill_value=0.5), + ) + assert clf.score(X, y) < 0.7 + + +def test_passing_params_without_enabling_metadata_routing(): + """Test that the right error message is raised when metadata params + are passed while not supported when `enable_metadata_routing=False`.""" + X, y = make_classification(n_samples=10, random_state=0) + lr_cv = LogisticRegressionCV() + msg = "is only supported if enable_metadata_routing=True" + + with config_context(enable_metadata_routing=False): + params = {"extra_param": 1.0} + + with pytest.raises(ValueError, match=msg): + lr_cv.fit(X, y, **params) + + with pytest.raises(ValueError, match=msg): + lr_cv.score(X, y, **params) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py new file mode 100644 index 0000000000000000000000000000000000000000..7f4354fc803d24c2396f5105a5a4ce52c0a3e9fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py @@ -0,0 +1,262 @@ +# Author: Vlad Niculae +# License: BSD 3 clause + + +import numpy as np +import pytest + +from sklearn.datasets import make_sparse_coded_signal +from sklearn.linear_model import ( + LinearRegression, + OrthogonalMatchingPursuit, + OrthogonalMatchingPursuitCV, + orthogonal_mp, + orthogonal_mp_gram, +) +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) + +n_samples, n_features, n_nonzero_coefs, n_targets = 25, 35, 5, 3 +y, X, gamma = make_sparse_coded_signal( + n_samples=n_targets, + n_components=n_features, + n_features=n_samples, + n_nonzero_coefs=n_nonzero_coefs, + random_state=0, +) +y, X, gamma = y.T, X.T, gamma.T +# Make X not of norm 1 for testing +X *= 10 +y *= 10 +G, Xy = np.dot(X.T, X), np.dot(X.T, y) +# this makes X (n_samples, n_features) +# and y (n_samples, 3) + + +def test_correct_shapes(): + assert orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape == (n_features,) + assert orthogonal_mp(X, y, n_nonzero_coefs=5).shape == (n_features, 3) + + +def test_correct_shapes_gram(): + assert orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape == (n_features,) + assert orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape == (n_features, 3) + + +def test_n_nonzero_coefs(): + assert np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5)) <= 5 + assert ( + np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5, precompute=True)) + <= 5 + ) + + +def test_tol(): + tol = 0.5 + gamma = orthogonal_mp(X, y[:, 0], tol=tol) + gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True) + assert np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol + assert np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol + + +def test_with_without_gram(): + assert_array_almost_equal( + orthogonal_mp(X, y, n_nonzero_coefs=5), + orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True), + ) + + +def test_with_without_gram_tol(): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=1.0), orthogonal_mp(X, y, tol=1.0, precompute=True) + ) + + +def test_unreachable_accuracy(): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=0), orthogonal_mp(X, y, n_nonzero_coefs=n_features) + ) + warning_message = ( + "Orthogonal matching pursuit ended prematurely " + "due to linear dependence in the dictionary. " + "The requested precision might not have been met." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=0, precompute=True), + orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_features), + ) + + +@pytest.mark.parametrize("positional_params", [(X, y), (G, Xy)]) +@pytest.mark.parametrize( + "keyword_params", + [{"n_nonzero_coefs": n_features + 1}], +) +def test_bad_input(positional_params, keyword_params): + with pytest.raises(ValueError): + orthogonal_mp(*positional_params, **keyword_params) + + +def test_perfect_signal_recovery(): + (idx,) = gamma[:, 0].nonzero() + gamma_rec = orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5) + gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5) + assert_array_equal(idx, np.flatnonzero(gamma_rec)) + assert_array_equal(idx, np.flatnonzero(gamma_gram)) + assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2) + assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2) + + +def test_orthogonal_mp_gram_readonly(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/5956 + (idx,) = gamma[:, 0].nonzero() + G_readonly = G.copy() + G_readonly.setflags(write=False) + Xy_readonly = Xy.copy() + Xy_readonly.setflags(write=False) + gamma_gram = orthogonal_mp_gram( + G_readonly, Xy_readonly[:, 0], n_nonzero_coefs=5, copy_Gram=False, copy_Xy=False + ) + assert_array_equal(idx, np.flatnonzero(gamma_gram)) + assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2) + + +def test_estimator(): + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs) + omp.fit(X, y[:, 0]) + assert omp.coef_.shape == (n_features,) + assert omp.intercept_.shape == () + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs + + omp.fit(X, y) + assert omp.coef_.shape == (n_targets, n_features) + assert omp.intercept_.shape == (n_targets,) + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs + + coef_normalized = omp.coef_[0].copy() + omp.set_params(fit_intercept=True) + omp.fit(X, y[:, 0]) + assert_array_almost_equal(coef_normalized, omp.coef_) + + omp.set_params(fit_intercept=False) + omp.fit(X, y[:, 0]) + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs + assert omp.coef_.shape == (n_features,) + assert omp.intercept_ == 0 + + omp.fit(X, y) + assert omp.coef_.shape == (n_targets, n_features) + assert omp.intercept_ == 0 + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs + + +def test_identical_regressors(): + newX = X.copy() + newX[:, 1] = newX[:, 0] + gamma = np.zeros(n_features) + gamma[0] = gamma[1] = 1.0 + newy = np.dot(newX, gamma) + warning_message = ( + "Orthogonal matching pursuit ended prematurely " + "due to linear dependence in the dictionary. " + "The requested precision might not have been met." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + orthogonal_mp(newX, newy, n_nonzero_coefs=2) + + +def test_swapped_regressors(): + gamma = np.zeros(n_features) + # X[:, 21] should be selected first, then X[:, 0] selected second, + # which will take X[:, 21]'s place in case the algorithm does + # column swapping for optimization (which is the case at the moment) + gamma[21] = 1.0 + gamma[0] = 0.5 + new_y = np.dot(X, gamma) + new_Xy = np.dot(X.T, new_y) + gamma_hat = orthogonal_mp(X, new_y, n_nonzero_coefs=2) + gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, n_nonzero_coefs=2) + assert_array_equal(np.flatnonzero(gamma_hat), [0, 21]) + assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21]) + + +def test_no_atoms(): + y_empty = np.zeros_like(y) + Xy_empty = np.dot(X.T, y_empty) + gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, n_nonzero_coefs=1) + gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, n_nonzero_coefs=1) + assert np.all(gamma_empty == 0) + assert np.all(gamma_empty_gram == 0) + + +def test_omp_path(): + path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True) + last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True) + last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + + +def test_omp_return_path_prop_with_gram(): + path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True, precompute=True) + last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False, precompute=True) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + + +def test_omp_cv(): + y_ = y[:, 0] + gamma_ = gamma[:, 0] + ompcv = OrthogonalMatchingPursuitCV(fit_intercept=False, max_iter=10) + ompcv.fit(X, y_) + assert ompcv.n_nonzero_coefs_ == n_nonzero_coefs + assert_array_almost_equal(ompcv.coef_, gamma_) + omp = OrthogonalMatchingPursuit( + fit_intercept=False, n_nonzero_coefs=ompcv.n_nonzero_coefs_ + ) + omp.fit(X, y_) + assert_array_almost_equal(ompcv.coef_, omp.coef_) + + +def test_omp_reaches_least_squares(): + # Use small simple data; it's a sanity check but OMP can stop early + rng = check_random_state(0) + n_samples, n_features = (10, 8) + n_targets = 3 + X = rng.randn(n_samples, n_features) + Y = rng.randn(n_samples, n_targets) + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features) + lstsq = LinearRegression() + omp.fit(X, Y) + lstsq.fit(X, Y) + assert_array_almost_equal(omp.coef_, lstsq.coef_) + + +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +def test_omp_gram_dtype_match(data_type): + # verify matching input data type and output data type + coef = orthogonal_mp_gram( + G.astype(data_type), Xy.astype(data_type), n_nonzero_coefs=5 + ) + assert coef.dtype == data_type + + +def test_omp_gram_numerical_consistency(): + # verify numericaly consistency among np.float32 and np.float64 + coef_32 = orthogonal_mp_gram( + G.astype(np.float32), Xy.astype(np.float32), n_nonzero_coefs=5 + ) + coef_64 = orthogonal_mp_gram( + G.astype(np.float32), Xy.astype(np.float64), n_nonzero_coefs=5 + ) + assert_allclose(coef_32, coef_64) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_passive_aggressive.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_passive_aggressive.py new file mode 100644 index 0000000000000000000000000000000000000000..bcfd58b1eab2b51ecd8cc1097bd48577e2babe0d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_passive_aggressive.py @@ -0,0 +1,268 @@ +import numpy as np +import pytest + +from sklearn.base import ClassifierMixin +from sklearn.datasets import load_iris +from sklearn.linear_model import PassiveAggressiveClassifier, PassiveAggressiveRegressor +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +iris = load_iris() +random_state = check_random_state(12) +indices = np.arange(iris.data.shape[0]) +random_state.shuffle(indices) +X = iris.data[indices] +y = iris.target[indices] + + +class MyPassiveAggressive(ClassifierMixin): + def __init__( + self, + C=1.0, + epsilon=0.01, + loss="hinge", + fit_intercept=True, + n_iter=1, + random_state=None, + ): + self.C = C + self.epsilon = epsilon + self.loss = loss + self.fit_intercept = fit_intercept + self.n_iter = n_iter + + def fit(self, X, y): + n_samples, n_features = X.shape + self.w = np.zeros(n_features, dtype=np.float64) + self.b = 0.0 + + for t in range(self.n_iter): + for i in range(n_samples): + p = self.project(X[i]) + if self.loss in ("hinge", "squared_hinge"): + loss = max(1 - y[i] * p, 0) + else: + loss = max(np.abs(p - y[i]) - self.epsilon, 0) + + sqnorm = np.dot(X[i], X[i]) + + if self.loss in ("hinge", "epsilon_insensitive"): + step = min(self.C, loss / sqnorm) + elif self.loss in ("squared_hinge", "squared_epsilon_insensitive"): + step = loss / (sqnorm + 1.0 / (2 * self.C)) + + if self.loss in ("hinge", "squared_hinge"): + step *= y[i] + else: + step *= np.sign(y[i] - p) + + self.w += step * X[i] + if self.fit_intercept: + self.b += step + + def project(self, X): + return np.dot(X, self.w) + self.b + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_classifier_accuracy(csr_container, fit_intercept, average): + data = csr_container(X) if csr_container is not None else X + clf = PassiveAggressiveClassifier( + C=1.0, + max_iter=30, + fit_intercept=fit_intercept, + random_state=1, + average=average, + tol=None, + ) + clf.fit(data, y) + score = clf.score(data, y) + assert score > 0.79 + if average: + assert hasattr(clf, "_average_coef") + assert hasattr(clf, "_average_intercept") + assert hasattr(clf, "_standard_intercept") + assert hasattr(clf, "_standard_coef") + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_classifier_partial_fit(csr_container, average): + classes = np.unique(y) + data = csr_container(X) if csr_container is not None else X + clf = PassiveAggressiveClassifier(random_state=0, average=average, max_iter=5) + for t in range(30): + clf.partial_fit(data, y, classes) + score = clf.score(data, y) + assert score > 0.79 + if average: + assert hasattr(clf, "_average_coef") + assert hasattr(clf, "_average_intercept") + assert hasattr(clf, "_standard_intercept") + assert hasattr(clf, "_standard_coef") + + +def test_classifier_refit(): + # Classifier can be retrained on different labels and features. + clf = PassiveAggressiveClassifier(max_iter=5).fit(X, y) + assert_array_equal(clf.classes_, np.unique(y)) + + clf.fit(X[:, :-1], iris.target_names[y]) + assert_array_equal(clf.classes_, iris.target_names) + + +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +@pytest.mark.parametrize("loss", ("hinge", "squared_hinge")) +def test_classifier_correctness(loss, csr_container): + y_bin = y.copy() + y_bin[y != 1] = -1 + + clf1 = MyPassiveAggressive(loss=loss, n_iter=2) + clf1.fit(X, y_bin) + + data = csr_container(X) if csr_container is not None else X + clf2 = PassiveAggressiveClassifier(loss=loss, max_iter=2, shuffle=False, tol=None) + clf2.fit(data, y_bin) + + assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2) + + +@pytest.mark.parametrize( + "response_method", ["predict_proba", "predict_log_proba", "transform"] +) +def test_classifier_undefined_methods(response_method): + clf = PassiveAggressiveClassifier(max_iter=100) + with pytest.raises(AttributeError): + getattr(clf, response_method) + + +def test_class_weights(): + # Test class weights. + X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y2 = [1, 1, 1, -1, -1] + + clf = PassiveAggressiveClassifier( + C=0.1, max_iter=100, class_weight=None, random_state=100 + ) + clf.fit(X2, y2) + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) + + # we give a small weights to class 1 + clf = PassiveAggressiveClassifier( + C=0.1, max_iter=100, class_weight={1: 0.001}, random_state=100 + ) + clf.fit(X2, y2) + + # now the hyperplane should rotate clock-wise and + # the prediction on this point should shift + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) + + +def test_partial_fit_weight_class_balanced(): + # partial_fit with class_weight='balanced' not supported + clf = PassiveAggressiveClassifier(class_weight="balanced", max_iter=100) + with pytest.raises(ValueError): + clf.partial_fit(X, y, classes=np.unique(y)) + + +def test_equal_class_weight(): + X2 = [[1, 0], [1, 0], [0, 1], [0, 1]] + y2 = [0, 0, 1, 1] + clf = PassiveAggressiveClassifier(C=0.1, tol=None, class_weight=None) + clf.fit(X2, y2) + + # Already balanced, so "balanced" weights should have no effect + clf_balanced = PassiveAggressiveClassifier(C=0.1, tol=None, class_weight="balanced") + clf_balanced.fit(X2, y2) + + clf_weighted = PassiveAggressiveClassifier( + C=0.1, tol=None, class_weight={0: 0.5, 1: 0.5} + ) + clf_weighted.fit(X2, y2) + + # should be similar up to some epsilon due to learning rate schedule + assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2) + assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2) + + +def test_wrong_class_weight_label(): + # ValueError due to wrong class_weight label. + X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y2 = [1, 1, 1, -1, -1] + + clf = PassiveAggressiveClassifier(class_weight={0: 0.5}, max_iter=100) + with pytest.raises(ValueError): + clf.fit(X2, y2) + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_regressor_mse(csr_container, fit_intercept, average): + y_bin = y.copy() + y_bin[y != 1] = -1 + + data = csr_container(X) if csr_container is not None else X + reg = PassiveAggressiveRegressor( + C=1.0, + fit_intercept=fit_intercept, + random_state=0, + average=average, + max_iter=5, + ) + reg.fit(data, y_bin) + pred = reg.predict(data) + assert np.mean((pred - y_bin) ** 2) < 1.7 + if average: + assert hasattr(reg, "_average_coef") + assert hasattr(reg, "_average_intercept") + assert hasattr(reg, "_standard_intercept") + assert hasattr(reg, "_standard_coef") + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_regressor_partial_fit(csr_container, average): + y_bin = y.copy() + y_bin[y != 1] = -1 + + data = csr_container(X) if csr_container is not None else X + reg = PassiveAggressiveRegressor(random_state=0, average=average, max_iter=100) + for t in range(50): + reg.partial_fit(data, y_bin) + pred = reg.predict(data) + assert np.mean((pred - y_bin) ** 2) < 1.7 + if average: + assert hasattr(reg, "_average_coef") + assert hasattr(reg, "_average_intercept") + assert hasattr(reg, "_standard_intercept") + assert hasattr(reg, "_standard_coef") + + +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +@pytest.mark.parametrize("loss", ("epsilon_insensitive", "squared_epsilon_insensitive")) +def test_regressor_correctness(loss, csr_container): + y_bin = y.copy() + y_bin[y != 1] = -1 + + reg1 = MyPassiveAggressive(loss=loss, n_iter=2) + reg1.fit(X, y_bin) + + data = csr_container(X) if csr_container is not None else X + reg2 = PassiveAggressiveRegressor(tol=None, loss=loss, max_iter=2, shuffle=False) + reg2.fit(data, y_bin) + + assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2) + + +def test_regressor_undefined_methods(): + reg = PassiveAggressiveRegressor(max_iter=100) + with pytest.raises(AttributeError): + reg.transform(X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_perceptron.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..71456ae72132ccebc76da96aea9213fd55f47c9d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_perceptron.py @@ -0,0 +1,88 @@ +import numpy as np +import pytest + +from sklearn.datasets import load_iris +from sklearn.linear_model import Perceptron +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose, assert_array_almost_equal +from sklearn.utils.fixes import CSR_CONTAINERS + +iris = load_iris() +random_state = check_random_state(12) +indices = np.arange(iris.data.shape[0]) +random_state.shuffle(indices) +X = iris.data[indices] +y = iris.target[indices] + + +class MyPerceptron: + def __init__(self, n_iter=1): + self.n_iter = n_iter + + def fit(self, X, y): + n_samples, n_features = X.shape + self.w = np.zeros(n_features, dtype=np.float64) + self.b = 0.0 + + for t in range(self.n_iter): + for i in range(n_samples): + if self.predict(X[i])[0] != y[i]: + self.w += y[i] * X[i] + self.b += y[i] + + def project(self, X): + return np.dot(X, self.w) + self.b + + def predict(self, X): + X = np.atleast_2d(X) + return np.sign(self.project(X)) + + +@pytest.mark.parametrize("container", CSR_CONTAINERS + [np.array]) +def test_perceptron_accuracy(container): + data = container(X) + clf = Perceptron(max_iter=100, tol=None, shuffle=False) + clf.fit(data, y) + score = clf.score(data, y) + assert score > 0.7 + + +def test_perceptron_correctness(): + y_bin = y.copy() + y_bin[y != 1] = -1 + + clf1 = MyPerceptron(n_iter=2) + clf1.fit(X, y_bin) + + clf2 = Perceptron(max_iter=2, shuffle=False, tol=None) + clf2.fit(X, y_bin) + + assert_array_almost_equal(clf1.w, clf2.coef_.ravel()) + + +def test_undefined_methods(): + clf = Perceptron(max_iter=100) + for meth in ("predict_proba", "predict_log_proba"): + with pytest.raises(AttributeError): + getattr(clf, meth) + + +def test_perceptron_l1_ratio(): + """Check that `l1_ratio` has an impact when `penalty='elasticnet'`""" + clf1 = Perceptron(l1_ratio=0, penalty="elasticnet") + clf1.fit(X, y) + + clf2 = Perceptron(l1_ratio=0.15, penalty="elasticnet") + clf2.fit(X, y) + + assert clf1.score(X, y) != clf2.score(X, y) + + # check that the bounds of elastic net which should correspond to an l1 or + # l2 penalty depending of `l1_ratio` value. + clf_l1 = Perceptron(penalty="l1").fit(X, y) + clf_elasticnet = Perceptron(l1_ratio=1, penalty="elasticnet").fit(X, y) + assert_allclose(clf_l1.coef_, clf_elasticnet.coef_) + + clf_l2 = Perceptron(penalty="l2").fit(X, y) + clf_elasticnet = Perceptron(l1_ratio=0, penalty="elasticnet").fit(X, y) + assert_allclose(clf_l2.coef_, clf_elasticnet.coef_) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_quantile.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_quantile.py new file mode 100644 index 0000000000000000000000000000000000000000..53c1e1f071dcb11792163003bb3a3f3a290bb0aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_quantile.py @@ -0,0 +1,306 @@ +# Authors: David Dale +# Christian Lorentzen +# License: BSD 3 clause + +import numpy as np +import pytest +from pytest import approx +from scipy.optimize import minimize + +from sklearn.datasets import make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import HuberRegressor, QuantileRegressor +from sklearn.metrics import mean_pinball_loss +from sklearn.utils._testing import assert_allclose, skip_if_32bit +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + parse_version, + sp_version, +) + + +@pytest.fixture +def X_y_data(): + X, y = make_regression(n_samples=10, n_features=1, random_state=0, noise=1) + return X, y + + +@pytest.fixture +def default_solver(): + return "highs" if sp_version >= parse_version("1.6.0") else "interior-point" + + +@pytest.mark.skipif( + parse_version(sp_version.base_version) >= parse_version("1.11"), + reason="interior-point solver is not available in SciPy 1.11", +) +@pytest.mark.parametrize("solver", ["interior-point", "revised simplex"]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_incompatible_solver_for_sparse_input(X_y_data, solver, csc_container): + X, y = X_y_data + X_sparse = csc_container(X) + err_msg = ( + f"Solver {solver} does not support sparse X. Use solver 'highs' for example." + ) + with pytest.raises(ValueError, match=err_msg): + QuantileRegressor(solver=solver).fit(X_sparse, y) + + +@pytest.mark.parametrize("solver", ("highs-ds", "highs-ipm", "highs")) +@pytest.mark.skipif( + sp_version >= parse_version("1.6.0"), + reason="Solvers are available as of scipy 1.6.0", +) +def test_too_new_solver_methods_raise_error(X_y_data, solver): + """Test that highs solver raises for scipy<1.6.0.""" + X, y = X_y_data + with pytest.raises(ValueError, match="scipy>=1.6.0"): + QuantileRegressor(solver=solver).fit(X, y) + + +@pytest.mark.parametrize( + "quantile, alpha, intercept, coef", + [ + # for 50% quantile w/o regularization, any slope in [1, 10] is okay + [0.5, 0, 1, None], + # if positive error costs more, the slope is maximal + [0.51, 0, 1, 10], + # if negative error costs more, the slope is minimal + [0.49, 0, 1, 1], + # for a small lasso penalty, the slope is also minimal + [0.5, 0.01, 1, 1], + # for a large lasso penalty, the model predicts the constant median + [0.5, 100, 2, 0], + ], +) +def test_quantile_toy_example(quantile, alpha, intercept, coef, default_solver): + # test how different parameters affect a small intuitive example + X = [[0], [1], [1]] + y = [1, 2, 11] + model = QuantileRegressor( + quantile=quantile, alpha=alpha, solver=default_solver + ).fit(X, y) + assert_allclose(model.intercept_, intercept, atol=1e-2) + if coef is not None: + assert_allclose(model.coef_[0], coef, atol=1e-2) + if alpha < 100: + assert model.coef_[0] >= 1 + assert model.coef_[0] <= 10 + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_quantile_equals_huber_for_low_epsilon(fit_intercept, default_solver): + X, y = make_regression(n_samples=100, n_features=20, random_state=0, noise=1.0) + alpha = 1e-4 + huber = HuberRegressor( + epsilon=1 + 1e-4, alpha=alpha, fit_intercept=fit_intercept + ).fit(X, y) + quant = QuantileRegressor( + alpha=alpha, fit_intercept=fit_intercept, solver=default_solver + ).fit(X, y) + assert_allclose(huber.coef_, quant.coef_, atol=1e-1) + if fit_intercept: + assert huber.intercept_ == approx(quant.intercept_, abs=1e-1) + # check that we still predict fraction + assert np.mean(y < quant.predict(X)) == approx(0.5, abs=1e-1) + + +@pytest.mark.parametrize("q", [0.5, 0.9, 0.05]) +def test_quantile_estimates_calibration(q, default_solver): + # Test that model estimates percentage of points below the prediction + X, y = make_regression(n_samples=1000, n_features=20, random_state=0, noise=1.0) + quant = QuantileRegressor( + quantile=q, + alpha=0, + solver=default_solver, + ).fit(X, y) + assert np.mean(y < quant.predict(X)) == approx(q, abs=1e-2) + + +def test_quantile_sample_weight(default_solver): + # test that with unequal sample weights we still estimate weighted fraction + n = 1000 + X, y = make_regression(n_samples=n, n_features=5, random_state=0, noise=10.0) + weight = np.ones(n) + # when we increase weight of upper observations, + # estimate of quantile should go up + weight[y > y.mean()] = 100 + quant = QuantileRegressor(quantile=0.5, alpha=1e-8, solver=default_solver) + quant.fit(X, y, sample_weight=weight) + fraction_below = np.mean(y < quant.predict(X)) + assert fraction_below > 0.5 + weighted_fraction_below = np.average(y < quant.predict(X), weights=weight) + assert weighted_fraction_below == approx(0.5, abs=3e-2) + + +@pytest.mark.skipif( + sp_version < parse_version("1.6.0"), + reason="The `highs` solver is available from the 1.6.0 scipy version", +) +@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8]) +def test_asymmetric_error(quantile, default_solver): + """Test quantile regression for asymmetric distributed targets.""" + n_samples = 1000 + rng = np.random.RandomState(42) + X = np.concatenate( + ( + np.abs(rng.randn(n_samples)[:, None]), + -rng.randint(2, size=(n_samples, 1)), + ), + axis=1, + ) + intercept = 1.23 + coef = np.array([0.5, -2]) + # Take care that X @ coef + intercept > 0 + assert np.min(X @ coef + intercept) > 0 + # For an exponential distribution with rate lambda, e.g. exp(-lambda * x), + # the quantile at level q is: + # quantile(q) = - log(1 - q) / lambda + # scale = 1/lambda = -quantile(q) / log(1 - q) + y = rng.exponential( + scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples + ) + model = QuantileRegressor( + quantile=quantile, + alpha=0, + solver=default_solver, + ).fit(X, y) + # This test can be made to pass with any solver but in the interest + # of sparing continuous integration resources, the test is performed + # with the fastest solver only. + + assert model.intercept_ == approx(intercept, rel=0.2) + assert_allclose(model.coef_, coef, rtol=0.6) + assert_allclose(np.mean(model.predict(X) > y), quantile, atol=1e-2) + + # Now compare to Nelder-Mead optimization with L1 penalty + alpha = 0.01 + model.set_params(alpha=alpha).fit(X, y) + model_coef = np.r_[model.intercept_, model.coef_] + + def func(coef): + loss = mean_pinball_loss(y, X @ coef[1:] + coef[0], alpha=quantile) + L1 = np.sum(np.abs(coef[1:])) + return loss + alpha * L1 + + res = minimize( + fun=func, + x0=[1, 0, -1], + method="Nelder-Mead", + tol=1e-12, + options={"maxiter": 2000}, + ) + + assert func(model_coef) == approx(func(res.x)) + assert_allclose(model.intercept_, res.x[0]) + assert_allclose(model.coef_, res.x[1:]) + assert_allclose(np.mean(model.predict(X) > y), quantile, atol=1e-2) + + +@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8]) +def test_equivariance(quantile, default_solver): + """Test equivariace of quantile regression. + + See Koenker (2005) Quantile Regression, Chapter 2.2.3. + """ + rng = np.random.RandomState(42) + n_samples, n_features = 100, 5 + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + noise=0, + random_state=rng, + shuffle=False, + ) + # make y asymmetric + y += rng.exponential(scale=100, size=y.shape) + params = dict(alpha=0, solver=default_solver) + model1 = QuantileRegressor(quantile=quantile, **params).fit(X, y) + + # coef(q; a*y, X) = a * coef(q; y, X) + a = 2.5 + model2 = QuantileRegressor(quantile=quantile, **params).fit(X, a * y) + assert model2.intercept_ == approx(a * model1.intercept_, rel=1e-5) + assert_allclose(model2.coef_, a * model1.coef_, rtol=1e-5) + + # coef(1-q; -a*y, X) = -a * coef(q; y, X) + model2 = QuantileRegressor(quantile=1 - quantile, **params).fit(X, -a * y) + assert model2.intercept_ == approx(-a * model1.intercept_, rel=1e-5) + assert_allclose(model2.coef_, -a * model1.coef_, rtol=1e-5) + + # coef(q; y + X @ g, X) = coef(q; y, X) + g + g_intercept, g_coef = rng.randn(), rng.randn(n_features) + model2 = QuantileRegressor(quantile=quantile, **params) + model2.fit(X, y + X @ g_coef + g_intercept) + assert model2.intercept_ == approx(model1.intercept_ + g_intercept) + assert_allclose(model2.coef_, model1.coef_ + g_coef, rtol=1e-6) + + # coef(q; y, X @ A) = A^-1 @ coef(q; y, X) + A = rng.randn(n_features, n_features) + model2 = QuantileRegressor(quantile=quantile, **params) + model2.fit(X @ A, y) + assert model2.intercept_ == approx(model1.intercept_, rel=1e-5) + assert_allclose(model2.coef_, np.linalg.solve(A, model1.coef_), rtol=1e-5) + + +@pytest.mark.skipif( + parse_version(sp_version.base_version) >= parse_version("1.11"), + reason="interior-point solver is not available in SciPy 1.11", +) +@pytest.mark.filterwarnings("ignore:`method='interior-point'` is deprecated") +def test_linprog_failure(): + """Test that linprog fails.""" + X = np.linspace(0, 10, num=10).reshape(-1, 1) + y = np.linspace(0, 10, num=10) + reg = QuantileRegressor( + alpha=0, solver="interior-point", solver_options={"maxiter": 1} + ) + + msg = "Linear programming for QuantileRegressor did not succeed." + with pytest.warns(ConvergenceWarning, match=msg): + reg.fit(X, y) + + +@skip_if_32bit +@pytest.mark.skipif( + sp_version <= parse_version("1.6.0"), + reason="Solvers are available as of scipy 1.6.0", +) +@pytest.mark.parametrize( + "sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS +) +@pytest.mark.parametrize("solver", ["highs", "highs-ds", "highs-ipm"]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_sparse_input(sparse_container, solver, fit_intercept, default_solver): + """Test that sparse and dense X give same results.""" + X, y = make_regression(n_samples=100, n_features=20, random_state=1, noise=1.0) + X_sparse = sparse_container(X) + alpha = 1e-4 + quant_dense = QuantileRegressor( + alpha=alpha, fit_intercept=fit_intercept, solver=default_solver + ).fit(X, y) + quant_sparse = QuantileRegressor( + alpha=alpha, fit_intercept=fit_intercept, solver=solver + ).fit(X_sparse, y) + assert_allclose(quant_sparse.coef_, quant_dense.coef_, rtol=1e-2) + if fit_intercept: + assert quant_sparse.intercept_ == approx(quant_dense.intercept_) + # check that we still predict fraction + assert 0.45 <= np.mean(y < quant_sparse.predict(X_sparse)) <= 0.57 + + +def test_error_interior_point_future(X_y_data, monkeypatch): + """Check that we will raise a proper error when requesting + `solver='interior-point'` in SciPy >= 1.11. + """ + X, y = X_y_data + import sklearn.linear_model._quantile + + with monkeypatch.context() as m: + m.setattr(sklearn.linear_model._quantile, "sp_version", parse_version("1.11.0")) + err_msg = "Solver interior-point is not anymore available in SciPy >= 1.11.0." + with pytest.raises(ValueError, match=err_msg): + QuantileRegressor(solver="interior-point").fit(X, y) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ransac.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ransac.py new file mode 100644 index 0000000000000000000000000000000000000000..b442f6b207e708c7f7b2b989afd0c34ff492eddf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ransac.py @@ -0,0 +1,545 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal + +from sklearn.datasets import make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + LinearRegression, + OrthogonalMatchingPursuit, + RANSACRegressor, + Ridge, +) +from sklearn.linear_model._ransac import _dynamic_max_trials +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +# Generate coordinates of line +X = np.arange(-200, 200) +y = 0.2 * X + 20 +data = np.column_stack([X, y]) + +# Add some faulty data +rng = np.random.RandomState(1000) +outliers = np.unique(rng.randint(len(X), size=200)) +data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10 + +X = data[:, 0][:, np.newaxis] +y = data[:, 1] + + +def test_ransac_inliers_outliers(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + + # Estimate parameters of corrupted data + ransac_estimator.fit(X, y) + + # Ground truth / reference inlier mask + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_is_data_valid(): + def is_data_valid(X, y): + assert X.shape[0] == 2 + assert y.shape[0] == 2 + return False + + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + y = rng.rand(10, 1) + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + is_data_valid=is_data_valid, + random_state=0, + ) + with pytest.raises(ValueError): + ransac_estimator.fit(X, y) + + +def test_ransac_is_model_valid(): + def is_model_valid(estimator, X, y): + assert X.shape[0] == 2 + assert y.shape[0] == 2 + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + is_model_valid=is_model_valid, + random_state=0, + ) + with pytest.raises(ValueError): + ransac_estimator.fit(X, y) + + +def test_ransac_max_trials(): + estimator = LinearRegression() + + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + max_trials=0, + random_state=0, + ) + with pytest.raises(ValueError): + ransac_estimator.fit(X, y) + + # there is a 1e-9 chance it will take these many trials. No good reason + # 1e-2 isn't enough, can still happen + # 2 is the what ransac defines as min_samples = X.shape[1] + 1 + max_trials = _dynamic_max_trials(len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9) + ransac_estimator = RANSACRegressor(estimator, min_samples=2) + for i in range(50): + ransac_estimator.set_params(min_samples=2, random_state=i) + ransac_estimator.fit(X, y) + assert ransac_estimator.n_trials_ < max_trials + 1 + + +def test_ransac_stop_n_inliers(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + stop_n_inliers=2, + random_state=0, + ) + ransac_estimator.fit(X, y) + + assert ransac_estimator.n_trials_ == 1 + + +def test_ransac_stop_score(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + stop_score=0, + random_state=0, + ) + ransac_estimator.fit(X, y) + + assert ransac_estimator.n_trials_ == 1 + + +def test_ransac_score(): + X = np.arange(100)[:, None] + y = np.zeros((100,)) + y[0] = 1 + y[1] = 100 + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=0.5, random_state=0 + ) + ransac_estimator.fit(X, y) + + assert ransac_estimator.score(X[2:], y[2:]) == 1 + assert ransac_estimator.score(X[:2], y[:2]) < 1 + + +def test_ransac_predict(): + X = np.arange(100)[:, None] + y = np.zeros((100,)) + y[0] = 1 + y[1] = 100 + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=0.5, random_state=0 + ) + ransac_estimator.fit(X, y) + + assert_array_equal(ransac_estimator.predict(X), np.zeros(100)) + + +def test_ransac_no_valid_data(): + def is_data_valid(X, y): + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_data_valid=is_data_valid, max_trials=5 + ) + + msg = "RANSAC could not find a valid consensus set" + with pytest.raises(ValueError, match=msg): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 5 + assert ransac_estimator.n_skips_invalid_model_ == 0 + + +def test_ransac_no_valid_model(): + def is_model_valid(estimator, X, y): + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_model_valid=is_model_valid, max_trials=5 + ) + + msg = "RANSAC could not find a valid consensus set" + with pytest.raises(ValueError, match=msg): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 0 + assert ransac_estimator.n_skips_invalid_model_ == 5 + + +def test_ransac_exceed_max_skips(): + def is_data_valid(X, y): + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_data_valid=is_data_valid, max_trials=5, max_skips=3 + ) + + msg = "RANSAC skipped more iterations than `max_skips`" + with pytest.raises(ValueError, match=msg): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 4 + assert ransac_estimator.n_skips_invalid_model_ == 0 + + +def test_ransac_warn_exceed_max_skips(): + global cause_skip + cause_skip = False + + def is_data_valid(X, y): + global cause_skip + if not cause_skip: + cause_skip = True + return True + else: + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_data_valid=is_data_valid, max_skips=3, max_trials=5 + ) + warning_message = ( + "RANSAC found a valid consensus set but exited " + "early due to skipping more iterations than " + "`max_skips`. See estimator attributes for " + "diagnostics." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 4 + assert ransac_estimator.n_skips_invalid_model_ == 0 + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSR_CONTAINERS + CSC_CONTAINERS +) +def test_ransac_sparse(sparse_container): + X_sparse = sparse_container(X) + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator.fit(X_sparse, y) + + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_none_estimator(): + estimator = LinearRegression() + + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_none_estimator = RANSACRegressor( + None, min_samples=2, residual_threshold=5, random_state=0 + ) + + ransac_estimator.fit(X, y) + ransac_none_estimator.fit(X, y) + + assert_array_almost_equal( + ransac_estimator.predict(X), ransac_none_estimator.predict(X) + ) + + +def test_ransac_min_n_samples(): + estimator = LinearRegression() + ransac_estimator1 = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator2 = RANSACRegressor( + estimator, + min_samples=2.0 / X.shape[0], + residual_threshold=5, + random_state=0, + ) + ransac_estimator5 = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator6 = RANSACRegressor(estimator, residual_threshold=5, random_state=0) + ransac_estimator7 = RANSACRegressor( + estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0 + ) + # GH #19390 + ransac_estimator8 = RANSACRegressor( + Ridge(), min_samples=None, residual_threshold=5, random_state=0 + ) + + ransac_estimator1.fit(X, y) + ransac_estimator2.fit(X, y) + ransac_estimator5.fit(X, y) + ransac_estimator6.fit(X, y) + + assert_array_almost_equal( + ransac_estimator1.predict(X), ransac_estimator2.predict(X) + ) + assert_array_almost_equal( + ransac_estimator1.predict(X), ransac_estimator5.predict(X) + ) + assert_array_almost_equal( + ransac_estimator1.predict(X), ransac_estimator6.predict(X) + ) + + with pytest.raises(ValueError): + ransac_estimator7.fit(X, y) + + err_msg = "`min_samples` needs to be explicitly set" + with pytest.raises(ValueError, match=err_msg): + ransac_estimator8.fit(X, y) + + +def test_ransac_multi_dimensional_targets(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + + # 3-D target values + yyy = np.column_stack([y, y, y]) + + # Estimate parameters of corrupted data + ransac_estimator.fit(X, yyy) + + # Ground truth / reference inlier mask + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_residual_loss(): + def loss_multi1(y_true, y_pred): + return np.sum(np.abs(y_true - y_pred), axis=1) + + def loss_multi2(y_true, y_pred): + return np.sum((y_true - y_pred) ** 2, axis=1) + + def loss_mono(y_true, y_pred): + return np.abs(y_true - y_pred) + + yyy = np.column_stack([y, y, y]) + + estimator = LinearRegression() + ransac_estimator0 = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator1 = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + random_state=0, + loss=loss_multi1, + ) + ransac_estimator2 = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + random_state=0, + loss=loss_multi2, + ) + + # multi-dimensional + ransac_estimator0.fit(X, yyy) + ransac_estimator1.fit(X, yyy) + ransac_estimator2.fit(X, yyy) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator1.predict(X) + ) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator2.predict(X) + ) + + # one-dimensional + ransac_estimator0.fit(X, y) + ransac_estimator2.loss = loss_mono + ransac_estimator2.fit(X, y) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator2.predict(X) + ) + ransac_estimator3 = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + random_state=0, + loss="squared_error", + ) + ransac_estimator3.fit(X, y) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator2.predict(X) + ) + + +def test_ransac_default_residual_threshold(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor(estimator, min_samples=2, random_state=0) + + # Estimate parameters of corrupted data + ransac_estimator.fit(X, y) + + # Ground truth / reference inlier mask + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_dynamic_max_trials(): + # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in + # Hartley, R.~I. and Zisserman, A., 2004, + # Multiple View Geometry in Computer Vision, Second Edition, + # Cambridge University Press, ISBN: 0521540518 + + # e = 0%, min_samples = X + assert _dynamic_max_trials(100, 100, 2, 0.99) == 1 + + # e = 5%, min_samples = 2 + assert _dynamic_max_trials(95, 100, 2, 0.99) == 2 + # e = 10%, min_samples = 2 + assert _dynamic_max_trials(90, 100, 2, 0.99) == 3 + # e = 30%, min_samples = 2 + assert _dynamic_max_trials(70, 100, 2, 0.99) == 7 + # e = 50%, min_samples = 2 + assert _dynamic_max_trials(50, 100, 2, 0.99) == 17 + + # e = 5%, min_samples = 8 + assert _dynamic_max_trials(95, 100, 8, 0.99) == 5 + # e = 10%, min_samples = 8 + assert _dynamic_max_trials(90, 100, 8, 0.99) == 9 + # e = 30%, min_samples = 8 + assert _dynamic_max_trials(70, 100, 8, 0.99) == 78 + # e = 50%, min_samples = 8 + assert _dynamic_max_trials(50, 100, 8, 0.99) == 1177 + + # e = 0%, min_samples = 10 + assert _dynamic_max_trials(1, 100, 10, 0) == 0 + assert _dynamic_max_trials(1, 100, 10, 1) == float("inf") + + +def test_ransac_fit_sample_weight(): + ransac_estimator = RANSACRegressor(random_state=0) + n_samples = y.shape[0] + weights = np.ones(n_samples) + ransac_estimator.fit(X, y, weights) + # sanity check + assert ransac_estimator.inlier_mask_.shape[0] == n_samples + + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + # check that mask is correct + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where + # X = X1 repeated n1 times, X2 repeated n2 times and so forth + random_state = check_random_state(0) + X_ = random_state.randint(0, 200, [10, 1]) + y_ = np.ndarray.flatten(0.2 * X_ + 2) + sample_weight = random_state.randint(0, 10, 10) + outlier_X = random_state.randint(0, 1000, [1, 1]) + outlier_weight = random_state.randint(0, 10, 1) + outlier_y = random_state.randint(-1000, 0, 1) + + X_flat = np.append( + np.repeat(X_, sample_weight, axis=0), + np.repeat(outlier_X, outlier_weight, axis=0), + axis=0, + ) + y_flat = np.ndarray.flatten( + np.append( + np.repeat(y_, sample_weight, axis=0), + np.repeat(outlier_y, outlier_weight, axis=0), + axis=0, + ) + ) + ransac_estimator.fit(X_flat, y_flat) + ref_coef_ = ransac_estimator.estimator_.coef_ + + sample_weight = np.append(sample_weight, outlier_weight) + X_ = np.append(X_, outlier_X, axis=0) + y_ = np.append(y_, outlier_y) + ransac_estimator.fit(X_, y_, sample_weight) + + assert_allclose(ransac_estimator.estimator_.coef_, ref_coef_) + + # check that if estimator.fit doesn't support + # sample_weight, raises error + estimator = OrthogonalMatchingPursuit() + ransac_estimator = RANSACRegressor(estimator, min_samples=10) + + err_msg = f"{estimator.__class__.__name__} does not support sample_weight." + with pytest.raises(ValueError, match=err_msg): + ransac_estimator.fit(X, y, weights) + + +def test_ransac_final_model_fit_sample_weight(): + X, y = make_regression(n_samples=1000, random_state=10) + rng = check_random_state(42) + sample_weight = rng.randint(1, 4, size=y.shape[0]) + sample_weight = sample_weight / sample_weight.sum() + ransac = RANSACRegressor(estimator=LinearRegression(), random_state=0) + ransac.fit(X, y, sample_weight=sample_weight) + + final_model = LinearRegression() + mask_samples = ransac.inlier_mask_ + final_model.fit( + X[mask_samples], y[mask_samples], sample_weight=sample_weight[mask_samples] + ) + + assert_allclose(ransac.estimator_.coef_, final_model.coef_, atol=1e-12) + + +def test_perfect_horizontal_line(): + """Check that we can fit a line where all samples are inliers. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19497 + """ + X = np.arange(100)[:, None] + y = np.zeros((100,)) + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor(estimator, random_state=0) + ransac_estimator.fit(X, y) + + assert_allclose(ransac_estimator.estimator_.coef_, 0.0) + assert_allclose(ransac_estimator.estimator_.intercept_, 0.0) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ridge.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ridge.py new file mode 100644 index 0000000000000000000000000000000000000000..19ff441a068127c4bfcf4b2a67afb00cbeff1409 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ridge.py @@ -0,0 +1,2064 @@ +import warnings +from itertools import product + +import numpy as np +import pytest +from scipy import linalg + +from sklearn import datasets +from sklearn.datasets import ( + make_classification, + make_low_rank_matrix, + make_multilabel_classification, + make_regression, +) +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + LinearRegression, + Ridge, + RidgeClassifier, + RidgeClassifierCV, + RidgeCV, + ridge_regression, +) +from sklearn.linear_model._ridge import ( + _check_gcv_mode, + _RidgeGCV, + _solve_cholesky, + _solve_cholesky_kernel, + _solve_lbfgs, + _solve_svd, + _X_CenterStackOp, +) +from sklearn.metrics import get_scorer, make_scorer, mean_squared_error +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + KFold, + LeaveOneOut, + cross_val_predict, +) +from sklearn.preprocessing import minmax_scale +from sklearn.utils import _IS_32BIT, check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +SOLVERS = ["svd", "sparse_cg", "cholesky", "lsqr", "sag", "saga"] +SPARSE_SOLVERS_WITH_INTERCEPT = ("sparse_cg", "sag") +SPARSE_SOLVERS_WITHOUT_INTERCEPT = ("sparse_cg", "cholesky", "lsqr", "sag", "saga") + +diabetes = datasets.load_diabetes() +X_diabetes, y_diabetes = diabetes.data, diabetes.target +ind = np.arange(X_diabetes.shape[0]) +rng = np.random.RandomState(0) +rng.shuffle(ind) +ind = ind[:200] +X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind] + +iris = datasets.load_iris() +X_iris, y_iris = iris.data, iris.target + + +def _accuracy_callable(y_test, y_pred): + return np.mean(y_test == y_pred) + + +def _mean_squared_error_callable(y_test, y_pred): + return ((y_test - y_pred) ** 2).mean() + + +@pytest.fixture(params=["long", "wide"]) +def ols_ridge_dataset(global_random_seed, request): + """Dataset with OLS and Ridge solutions, well conditioned X. + + The construction is based on the SVD decomposition of X = U S V'. + + Parameters + ---------- + type : {"long", "wide"} + If "long", then n_samples > n_features. + If "wide", then n_features > n_samples. + + For "wide", we return the minimum norm solution w = X' (XX')^-1 y: + + min ||w||_2 subject to X w = y + + Returns + ------- + X : ndarray + Last column of 1, i.e. intercept. + y : ndarray + coef_ols : ndarray of shape + Minimum norm OLS solutions, i.e. min ||X w - y||_2_2 (with minimum ||w||_2 in + case of ambiguity) + Last coefficient is intercept. + coef_ridge : ndarray of shape (5,) + Ridge solution with alpha=1, i.e. min ||X w - y||_2_2 + ||w||_2^2. + Last coefficient is intercept. + """ + # Make larger dim more than double as big as the smaller one. + # This helps when constructing singular matrices like (X, X). + if request.param == "long": + n_samples, n_features = 12, 4 + else: + n_samples, n_features = 4, 12 + k = min(n_samples, n_features) + rng = np.random.RandomState(global_random_seed) + X = make_low_rank_matrix( + n_samples=n_samples, n_features=n_features, effective_rank=k, random_state=rng + ) + X[:, -1] = 1 # last columns acts as intercept + U, s, Vt = linalg.svd(X) + assert np.all(s > 1e-3) # to be sure + U1, U2 = U[:, :k], U[:, k:] + Vt1, _ = Vt[:k, :], Vt[k:, :] + + if request.param == "long": + # Add a term that vanishes in the product X'y + coef_ols = rng.uniform(low=-10, high=10, size=n_features) + y = X @ coef_ols + y += U2 @ rng.normal(size=n_samples - n_features) ** 2 + else: + y = rng.uniform(low=-10, high=10, size=n_samples) + # w = X'(XX')^-1 y = V s^-1 U' y + coef_ols = Vt1.T @ np.diag(1 / s) @ U1.T @ y + + # Add penalty alpha * ||coef||_2^2 for alpha=1 and solve via normal equations. + # Note that the problem is well conditioned such that we get accurate results. + alpha = 1 + d = alpha * np.identity(n_features) + d[-1, -1] = 0 # intercept gets no penalty + coef_ridge = linalg.solve(X.T @ X + d, X.T @ y) + + # To be sure + R_OLS = y - X @ coef_ols + R_Ridge = y - X @ coef_ridge + assert np.linalg.norm(R_OLS) < np.linalg.norm(R_Ridge) + + return X, y, coef_ols, coef_ridge + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression(solver, fit_intercept, ols_ridge_dataset, global_random_seed): + """Test that Ridge converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + """ + X, y, _, coef = ols_ridge_dataset + alpha = 1.0 # because ols_ridge_dataset uses this. + params = dict( + alpha=alpha, + fit_intercept=True, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + + # Calculate residuals and R2. + res_null = y - np.mean(y) + res_Ridge = y - X @ coef + R2_Ridge = 1 - np.sum(res_Ridge**2) / np.sum(res_null**2) + + model = Ridge(**params) + X = X[:, :-1] # remove intercept + if fit_intercept: + intercept = coef[-1] + else: + X = X - X.mean(axis=0) + y = y - y.mean() + intercept = 0 + model.fit(X, y) + coef = coef[:-1] + + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + assert model.score(X, y) == pytest.approx(R2_Ridge) + + # Same with sample_weight. + model = Ridge(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + assert model.score(X, y) == pytest.approx(R2_Ridge) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_hstacked_X( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that Ridge converges for all solvers to correct solution on hstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2. + For long X, [X, X] is a singular matrix. + """ + X, y, _, coef = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 1.0 # because ols_ridge_dataset uses this. + + model = Ridge( + alpha=alpha / 2, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1) + if fit_intercept: + intercept = coef[-1] + else: + X = X - X.mean(axis=0) + y = y - y.mean() + intercept = 0 + model.fit(X, y) + coef = coef[:-1] + + assert model.intercept_ == pytest.approx(intercept) + # coefficients are not all on the same magnitude, adding a small atol to + # make this test less brittle + assert_allclose(model.coef_, np.r_[coef, coef], atol=1e-8) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_vstacked_X( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that Ridge converges for all solvers to correct solution on vstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X], [y] + [X], [y] with 2 * alpha. + For wide X, [X', X'] is a singular matrix. + """ + X, y, _, coef = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 1.0 # because ols_ridge_dataset uses this. + + model = Ridge( + alpha=2 * alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + X = X[:, :-1] # remove intercept + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + if fit_intercept: + intercept = coef[-1] + else: + X = X - X.mean(axis=0) + y = y - y.mean() + intercept = 0 + model.fit(X, y) + coef = coef[:-1] + + assert model.intercept_ == pytest.approx(intercept) + # coefficients are not all on the same magnitude, adding a small atol to + # make this test less brittle + assert_allclose(model.coef_, coef, atol=1e-8) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_unpenalized( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that unpenalized Ridge = OLS converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + Note: This checks the minimum norm solution for wide X, i.e. + n_samples < n_features: + min ||w||_2 subject to X w = y + """ + X, y, coef, _ = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 0 # OLS + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + + model = Ridge(**params) + # Note that cholesky might give a warning: "Singular matrix in solving dual + # problem. Using least-squares solution instead." + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + model.fit(X, y) + + # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails + # for the wide/fat case with n_features > n_samples. The current Ridge solvers do + # NOT return the minimum norm solution with fit_intercept=True. + if n_samples > n_features or not fit_intercept: + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + else: + # As it is an underdetermined problem, residuals = 0. This shows that we get + # a solution to X w = y .... + assert_allclose(model.predict(X), y) + assert_allclose(X @ coef + intercept, y) + # But it is not the minimum norm solution. (This should be equal.) + assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm( + np.r_[intercept, coef] + ) + + pytest.xfail(reason="Ridge does not provide the minimum norm solution.") + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_unpenalized_hstacked_X( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that unpenalized Ridge = OLS converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + OLS fit on [X] is the same as fit on [X, X]/2. + For long X, [X, X] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to min ||X w - y||_2 + """ + X, y, coef, _ = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 0 # OLS + + model = Ridge( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + model.fit(X, y) + + if n_samples > n_features or not fit_intercept: + assert model.intercept_ == pytest.approx(intercept) + if solver == "cholesky": + # Cholesky is a bad choice for singular X. + pytest.skip() + assert_allclose(model.coef_, np.r_[coef, coef]) + else: + # FIXME: Same as in test_ridge_regression_unpenalized. + # As it is an underdetermined problem, residuals = 0. This shows that we get + # a solution to X w = y .... + assert_allclose(model.predict(X), y) + # But it is not the minimum norm solution. (This should be equal.) + assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm( + np.r_[intercept, coef, coef] + ) + + pytest.xfail(reason="Ridge does not provide the minimum norm solution.") + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, np.r_[coef, coef]) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_ridge_regression_unpenalized_vstacked_X( + solver, fit_intercept, ols_ridge_dataset, global_random_seed +): + """Test that unpenalized Ridge = OLS converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + OLS fit on [X] is the same as fit on [X], [y] + [X], [y]. + For wide X, [X', X'] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to X w = y + """ + X, y, coef, _ = ols_ridge_dataset + n_samples, n_features = X.shape + alpha = 0 # OLS + + model = Ridge( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ("sag", "saga") else 1e-10, + random_state=global_random_seed, + ) + + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + model.fit(X, y) + + if n_samples > n_features or not fit_intercept: + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + else: + # FIXME: Same as in test_ridge_regression_unpenalized. + # As it is an underdetermined problem, residuals = 0. This shows that we get + # a solution to X w = y .... + assert_allclose(model.predict(X), y) + # But it is not the minimum norm solution. (This should be equal.) + assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm( + np.r_[intercept, coef] + ) + + pytest.xfail(reason="Ridge does not provide the minimum norm solution.") + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("alpha", [1.0, 1e-2]) +def test_ridge_regression_sample_weights( + solver, + fit_intercept, + sparse_container, + alpha, + ols_ridge_dataset, + global_random_seed, +): + """Test that Ridge with sample weights gives correct results. + + We use the following trick: + ||y - Xw||_2 = (z - Aw)' W (z - Aw) + for z=[y, y], A' = [X', X'] (vstacked), and W[:n/2] + W[n/2:] = 1, W=diag(W) + """ + if sparse_container is not None: + if fit_intercept and solver not in SPARSE_SOLVERS_WITH_INTERCEPT: + pytest.skip() + elif not fit_intercept and solver not in SPARSE_SOLVERS_WITHOUT_INTERCEPT: + pytest.skip() + X, y, _, coef = ols_ridge_dataset + n_samples, n_features = X.shape + sw = rng.uniform(low=0, high=1, size=n_samples) + + model = Ridge( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-15 if solver in ["sag", "saga"] else 1e-10, + max_iter=100_000, + random_state=global_random_seed, + ) + X = X[:, :-1] # remove intercept + X = np.concatenate((X, X), axis=0) + y = np.r_[y, y] + sw = np.r_[sw, 1 - sw] * alpha + if fit_intercept: + intercept = coef[-1] + else: + X = X - X.mean(axis=0) + y = y - y.mean() + intercept = 0 + if sparse_container is not None: + X = sparse_container(X) + model.fit(X, y, sample_weight=sw) + coef = coef[:-1] + + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef) + + +def test_primal_dual_relationship(): + y = y_diabetes.reshape(-1, 1) + coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2]) + K = np.dot(X_diabetes, X_diabetes.T) + dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2]) + coef2 = np.dot(X_diabetes.T, dual_coef).T + assert_array_almost_equal(coef, coef2) + + +def test_ridge_regression_convergence_fail(): + rng = np.random.RandomState(0) + y = rng.randn(5) + X = rng.randn(5, 10) + warning_message = r"sparse_cg did not converge after" r" [0-9]+ iterations." + with pytest.warns(ConvergenceWarning, match=warning_message): + ridge_regression( + X, y, alpha=1.0, solver="sparse_cg", tol=0.0, max_iter=None, verbose=1 + ) + + +def test_ridge_shapes_type(): + # Test shape of coef_ and intercept_ + rng = np.random.RandomState(0) + n_samples, n_features = 5, 10 + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + Y1 = y[:, np.newaxis] + Y = np.c_[y, 1 + y] + + ridge = Ridge() + + ridge.fit(X, y) + assert ridge.coef_.shape == (n_features,) + assert ridge.intercept_.shape == () + assert isinstance(ridge.coef_, np.ndarray) + assert isinstance(ridge.intercept_, float) + + ridge.fit(X, Y1) + assert ridge.coef_.shape == (1, n_features) + assert ridge.intercept_.shape == (1,) + assert isinstance(ridge.coef_, np.ndarray) + assert isinstance(ridge.intercept_, np.ndarray) + + ridge.fit(X, Y) + assert ridge.coef_.shape == (2, n_features) + assert ridge.intercept_.shape == (2,) + assert isinstance(ridge.coef_, np.ndarray) + assert isinstance(ridge.intercept_, np.ndarray) + + +def test_ridge_intercept(): + # Test intercept with multiple targets GH issue #708 + rng = np.random.RandomState(0) + n_samples, n_features = 5, 10 + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + Y = np.c_[y, 1.0 + y] + + ridge = Ridge() + + ridge.fit(X, y) + intercept = ridge.intercept_ + + ridge.fit(X, Y) + assert_almost_equal(ridge.intercept_[0], intercept) + assert_almost_equal(ridge.intercept_[1], intercept + 1.0) + + +def test_ridge_vs_lstsq(): + # On alpha=0., Ridge and OLS yield the same solution. + + rng = np.random.RandomState(0) + # we need more samples than features + n_samples, n_features = 5, 4 + y = rng.randn(n_samples) + X = rng.randn(n_samples, n_features) + + ridge = Ridge(alpha=0.0, fit_intercept=False) + ols = LinearRegression(fit_intercept=False) + + ridge.fit(X, y) + ols.fit(X, y) + assert_almost_equal(ridge.coef_, ols.coef_) + + ridge.fit(X, y) + ols.fit(X, y) + assert_almost_equal(ridge.coef_, ols.coef_) + + +def test_ridge_individual_penalties(): + # Tests the ridge object using individual penalties + + rng = np.random.RandomState(42) + + n_samples, n_features, n_targets = 20, 10, 5 + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples, n_targets) + + penalties = np.arange(n_targets) + + coef_cholesky = np.array( + [ + Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_ + for alpha, target in zip(penalties, y.T) + ] + ) + + coefs_indiv_pen = [ + Ridge(alpha=penalties, solver=solver, tol=1e-12).fit(X, y).coef_ + for solver in ["svd", "sparse_cg", "lsqr", "cholesky", "sag", "saga"] + ] + for coef_indiv_pen in coefs_indiv_pen: + assert_array_almost_equal(coef_cholesky, coef_indiv_pen) + + # Test error is raised when number of targets and penalties do not match. + ridge = Ridge(alpha=penalties[:-1]) + err_msg = "Number of targets and number of penalties do not correspond: 4 != 5" + with pytest.raises(ValueError, match=err_msg): + ridge.fit(X, y) + + +@pytest.mark.parametrize("n_col", [(), (1,), (3,)]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_X_CenterStackOp(n_col, csr_container): + rng = np.random.RandomState(0) + X = rng.randn(11, 8) + X_m = rng.randn(8) + sqrt_sw = rng.randn(len(X)) + Y = rng.randn(11, *n_col) + A = rng.randn(9, *n_col) + operator = _X_CenterStackOp(csr_container(X), X_m, sqrt_sw) + reference_operator = np.hstack([X - sqrt_sw[:, None] * X_m, sqrt_sw[:, None]]) + assert_allclose(reference_operator.dot(A), operator.dot(A)) + assert_allclose(reference_operator.T.dot(Y), operator.T.dot(Y)) + + +@pytest.mark.parametrize("shape", [(10, 1), (13, 9), (3, 7), (2, 2), (20, 20)]) +@pytest.mark.parametrize("uniform_weights", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_compute_gram(shape, uniform_weights, csr_container): + rng = np.random.RandomState(0) + X = rng.randn(*shape) + if uniform_weights: + sw = np.ones(X.shape[0]) + else: + sw = rng.chisquare(1, shape[0]) + sqrt_sw = np.sqrt(sw) + X_mean = np.average(X, axis=0, weights=sw) + X_centered = (X - X_mean) * sqrt_sw[:, None] + true_gram = X_centered.dot(X_centered.T) + X_sparse = csr_container(X * sqrt_sw[:, None]) + gcv = _RidgeGCV(fit_intercept=True) + computed_gram, computed_mean = gcv._compute_gram(X_sparse, sqrt_sw) + assert_allclose(X_mean, computed_mean) + assert_allclose(true_gram, computed_gram) + + +@pytest.mark.parametrize("shape", [(10, 1), (13, 9), (3, 7), (2, 2), (20, 20)]) +@pytest.mark.parametrize("uniform_weights", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_compute_covariance(shape, uniform_weights, csr_container): + rng = np.random.RandomState(0) + X = rng.randn(*shape) + if uniform_weights: + sw = np.ones(X.shape[0]) + else: + sw = rng.chisquare(1, shape[0]) + sqrt_sw = np.sqrt(sw) + X_mean = np.average(X, axis=0, weights=sw) + X_centered = (X - X_mean) * sqrt_sw[:, None] + true_covariance = X_centered.T.dot(X_centered) + X_sparse = csr_container(X * sqrt_sw[:, None]) + gcv = _RidgeGCV(fit_intercept=True) + computed_cov, computed_mean = gcv._compute_covariance(X_sparse, sqrt_sw) + assert_allclose(X_mean, computed_mean) + assert_allclose(true_covariance, computed_cov) + + +def _make_sparse_offset_regression( + n_samples=100, + n_features=100, + proportion_nonzero=0.5, + n_informative=10, + n_targets=1, + bias=13.0, + X_offset=30.0, + noise=30.0, + shuffle=True, + coef=False, + positive=False, + random_state=None, +): + X, y, c = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_informative, + n_targets=n_targets, + bias=bias, + noise=noise, + shuffle=shuffle, + coef=True, + random_state=random_state, + ) + if n_features == 1: + c = np.asarray([c]) + X += X_offset + mask = ( + np.random.RandomState(random_state).binomial(1, proportion_nonzero, X.shape) > 0 + ) + removed_X = X.copy() + X[~mask] = 0.0 + removed_X[mask] = 0.0 + y -= removed_X.dot(c) + if positive: + y += X.dot(np.abs(c) + 1 - c) + c = np.abs(c) + 1 + if n_features == 1: + c = c[0] + if coef: + return X, y, c + return X, y + + +@pytest.mark.parametrize( + "solver, sparse_container", + ( + (solver, sparse_container) + for (solver, sparse_container) in product( + ["cholesky", "sag", "sparse_cg", "lsqr", "saga", "ridgecv"], + [None] + CSR_CONTAINERS, + ) + if sparse_container is None or solver in ["sparse_cg", "ridgecv"] + ), +) +@pytest.mark.parametrize( + "n_samples,dtype,proportion_nonzero", + [(20, "float32", 0.1), (40, "float32", 1.0), (20, "float64", 0.2)], +) +@pytest.mark.parametrize("seed", np.arange(3)) +def test_solver_consistency( + solver, proportion_nonzero, n_samples, dtype, sparse_container, seed +): + alpha = 1.0 + noise = 50.0 if proportion_nonzero > 0.9 else 500.0 + X, y = _make_sparse_offset_regression( + bias=10, + n_features=30, + proportion_nonzero=proportion_nonzero, + noise=noise, + random_state=seed, + n_samples=n_samples, + ) + + # Manually scale the data to avoid pathological cases. We use + # minmax_scale to deal with the sparse case without breaking + # the sparsity pattern. + X = minmax_scale(X) + + svd_ridge = Ridge(solver="svd", alpha=alpha).fit(X, y) + X = X.astype(dtype, copy=False) + y = y.astype(dtype, copy=False) + if sparse_container is not None: + X = sparse_container(X) + if solver == "ridgecv": + ridge = RidgeCV(alphas=[alpha]) + else: + ridge = Ridge(solver=solver, tol=1e-10, alpha=alpha) + ridge.fit(X, y) + assert_allclose(ridge.coef_, svd_ridge.coef_, atol=1e-3, rtol=1e-3) + assert_allclose(ridge.intercept_, svd_ridge.intercept_, atol=1e-3, rtol=1e-3) + + +@pytest.mark.parametrize("gcv_mode", ["svd", "eigen"]) +@pytest.mark.parametrize("X_container", [np.asarray] + CSR_CONTAINERS) +@pytest.mark.parametrize("X_shape", [(11, 8), (11, 20)]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize( + "y_shape, noise", + [ + ((11,), 1.0), + ((11, 1), 30.0), + ((11, 3), 150.0), + ], +) +def test_ridge_gcv_vs_ridge_loo_cv( + gcv_mode, X_container, X_shape, y_shape, fit_intercept, noise +): + n_samples, n_features = X_shape + n_targets = y_shape[-1] if len(y_shape) == 2 else 1 + X, y = _make_sparse_offset_regression( + n_samples=n_samples, + n_features=n_features, + n_targets=n_targets, + random_state=0, + shuffle=False, + noise=noise, + n_informative=5, + ) + y = y.reshape(y_shape) + + alphas = [1e-3, 0.1, 1.0, 10.0, 1e3] + loo_ridge = RidgeCV( + cv=n_samples, + fit_intercept=fit_intercept, + alphas=alphas, + scoring="neg_mean_squared_error", + ) + gcv_ridge = RidgeCV( + gcv_mode=gcv_mode, + fit_intercept=fit_intercept, + alphas=alphas, + ) + + loo_ridge.fit(X, y) + + X_gcv = X_container(X) + gcv_ridge.fit(X_gcv, y) + + assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_) + assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=1e-3) + assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-3) + + +def test_ridge_loo_cv_asym_scoring(): + # checking on asymmetric scoring + scoring = "explained_variance" + n_samples, n_features = 10, 5 + n_targets = 1 + X, y = _make_sparse_offset_regression( + n_samples=n_samples, + n_features=n_features, + n_targets=n_targets, + random_state=0, + shuffle=False, + noise=1, + n_informative=5, + ) + + alphas = [1e-3, 0.1, 1.0, 10.0, 1e3] + loo_ridge = RidgeCV( + cv=n_samples, fit_intercept=True, alphas=alphas, scoring=scoring + ) + + gcv_ridge = RidgeCV(fit_intercept=True, alphas=alphas, scoring=scoring) + + loo_ridge.fit(X, y) + gcv_ridge.fit(X, y) + + assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_) + assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=1e-3) + assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-3) + + +@pytest.mark.parametrize("gcv_mode", ["svd", "eigen"]) +@pytest.mark.parametrize("X_container", [np.asarray] + CSR_CONTAINERS) +@pytest.mark.parametrize("n_features", [8, 20]) +@pytest.mark.parametrize( + "y_shape, fit_intercept, noise", + [ + ((11,), True, 1.0), + ((11, 1), True, 20.0), + ((11, 3), True, 150.0), + ((11, 3), False, 30.0), + ], +) +def test_ridge_gcv_sample_weights( + gcv_mode, X_container, fit_intercept, n_features, y_shape, noise +): + alphas = [1e-3, 0.1, 1.0, 10.0, 1e3] + rng = np.random.RandomState(0) + n_targets = y_shape[-1] if len(y_shape) == 2 else 1 + X, y = _make_sparse_offset_regression( + n_samples=11, + n_features=n_features, + n_targets=n_targets, + random_state=0, + shuffle=False, + noise=noise, + ) + y = y.reshape(y_shape) + + sample_weight = 3 * rng.randn(len(X)) + sample_weight = (sample_weight - sample_weight.min() + 1).astype(int) + indices = np.repeat(np.arange(X.shape[0]), sample_weight) + sample_weight = sample_weight.astype(float) + X_tiled, y_tiled = X[indices], y[indices] + + cv = GroupKFold(n_splits=X.shape[0]) + splits = cv.split(X_tiled, y_tiled, groups=indices) + kfold = RidgeCV( + alphas=alphas, + cv=splits, + scoring="neg_mean_squared_error", + fit_intercept=fit_intercept, + ) + kfold.fit(X_tiled, y_tiled) + + ridge_reg = Ridge(alpha=kfold.alpha_, fit_intercept=fit_intercept) + splits = cv.split(X_tiled, y_tiled, groups=indices) + predictions = cross_val_predict(ridge_reg, X_tiled, y_tiled, cv=splits) + kfold_errors = (y_tiled - predictions) ** 2 + kfold_errors = [ + np.sum(kfold_errors[indices == i], axis=0) for i in np.arange(X.shape[0]) + ] + kfold_errors = np.asarray(kfold_errors) + + X_gcv = X_container(X) + gcv_ridge = RidgeCV( + alphas=alphas, + store_cv_values=True, + gcv_mode=gcv_mode, + fit_intercept=fit_intercept, + ) + gcv_ridge.fit(X_gcv, y, sample_weight=sample_weight) + if len(y_shape) == 2: + gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)] + else: + gcv_errors = gcv_ridge.cv_values_[:, alphas.index(kfold.alpha_)] + + assert kfold.alpha_ == pytest.approx(gcv_ridge.alpha_) + assert_allclose(gcv_errors, kfold_errors, rtol=1e-3) + assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=1e-3) + assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=1e-3) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize( + "mode, mode_n_greater_than_p, mode_p_greater_than_n", + [ + (None, "svd", "eigen"), + ("auto", "svd", "eigen"), + ("eigen", "eigen", "eigen"), + ("svd", "svd", "svd"), + ], +) +def test_check_gcv_mode_choice( + sparse_container, mode, mode_n_greater_than_p, mode_p_greater_than_n +): + X, _ = make_regression(n_samples=5, n_features=2) + if sparse_container is not None: + X = sparse_container(X) + assert _check_gcv_mode(X, mode) == mode_n_greater_than_p + assert _check_gcv_mode(X.T, mode) == mode_p_greater_than_n + + +def _test_ridge_loo(sparse_container): + # test that can work with both dense or sparse matrices + n_samples = X_diabetes.shape[0] + + ret = [] + + if sparse_container is None: + X, fit_intercept = X_diabetes, True + else: + X, fit_intercept = sparse_container(X_diabetes), False + ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept) + + # check best alpha + ridge_gcv.fit(X, y_diabetes) + alpha_ = ridge_gcv.alpha_ + ret.append(alpha_) + + # check that we get same best alpha with custom loss_func + f = ignore_warnings + scoring = make_scorer(mean_squared_error, greater_is_better=False) + ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring) + f(ridge_gcv2.fit)(X, y_diabetes) + assert ridge_gcv2.alpha_ == pytest.approx(alpha_) + + # check that we get same best alpha with custom score_func + def func(x, y): + return -mean_squared_error(x, y) + + scoring = make_scorer(func) + ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring) + f(ridge_gcv3.fit)(X, y_diabetes) + assert ridge_gcv3.alpha_ == pytest.approx(alpha_) + + # check that we get same best alpha with a scorer + scorer = get_scorer("neg_mean_squared_error") + ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer) + ridge_gcv4.fit(X, y_diabetes) + assert ridge_gcv4.alpha_ == pytest.approx(alpha_) + + # check that we get same best alpha with sample weights + if sparse_container is None: + ridge_gcv.fit(X, y_diabetes, sample_weight=np.ones(n_samples)) + assert ridge_gcv.alpha_ == pytest.approx(alpha_) + + # simulate several responses + Y = np.vstack((y_diabetes, y_diabetes)).T + + ridge_gcv.fit(X, Y) + Y_pred = ridge_gcv.predict(X) + ridge_gcv.fit(X, y_diabetes) + y_pred = ridge_gcv.predict(X) + + assert_allclose(np.vstack((y_pred, y_pred)).T, Y_pred, rtol=1e-5) + + return ret + + +def _test_ridge_cv(sparse_container): + X = X_diabetes if sparse_container is None else sparse_container(X_diabetes) + ridge_cv = RidgeCV() + ridge_cv.fit(X, y_diabetes) + ridge_cv.predict(X) + + assert len(ridge_cv.coef_.shape) == 1 + assert type(ridge_cv.intercept_) == np.float64 + + cv = KFold(5) + ridge_cv.set_params(cv=cv) + ridge_cv.fit(X, y_diabetes) + ridge_cv.predict(X) + + assert len(ridge_cv.coef_.shape) == 1 + assert type(ridge_cv.intercept_) == np.float64 + + +@pytest.mark.parametrize( + "ridge, make_dataset", + [ + (RidgeCV(store_cv_values=False), make_regression), + (RidgeClassifierCV(store_cv_values=False), make_classification), + ], +) +def test_ridge_gcv_cv_values_not_stored(ridge, make_dataset): + # Check that `cv_values_` is not stored when store_cv_values is False + X, y = make_dataset(n_samples=6, random_state=42) + ridge.fit(X, y) + assert not hasattr(ridge, "cv_values_") + + +@pytest.mark.parametrize( + "ridge, make_dataset", + [(RidgeCV(), make_regression), (RidgeClassifierCV(), make_classification)], +) +@pytest.mark.parametrize("cv", [None, 3]) +def test_ridge_best_score(ridge, make_dataset, cv): + # check that the best_score_ is store + X, y = make_dataset(n_samples=6, random_state=42) + ridge.set_params(store_cv_values=False, cv=cv) + ridge.fit(X, y) + assert hasattr(ridge, "best_score_") + assert isinstance(ridge.best_score_, float) + + +def test_ridge_cv_individual_penalties(): + # Tests the ridge_cv object optimizing individual penalties for each target + + rng = np.random.RandomState(42) + + # Create random dataset with multiple targets. Each target should have + # a different optimal alpha. + n_samples, n_features, n_targets = 20, 5, 3 + y = rng.randn(n_samples, n_targets) + X = ( + np.dot(y[:, [0]], np.ones((1, n_features))) + + np.dot(y[:, [1]], 0.05 * np.ones((1, n_features))) + + np.dot(y[:, [2]], 0.001 * np.ones((1, n_features))) + + rng.randn(n_samples, n_features) + ) + + alphas = (1, 100, 1000) + + # Find optimal alpha for each target + optimal_alphas = [RidgeCV(alphas=alphas).fit(X, target).alpha_ for target in y.T] + + # Find optimal alphas for all targets simultaneously + ridge_cv = RidgeCV(alphas=alphas, alpha_per_target=True).fit(X, y) + assert_array_equal(optimal_alphas, ridge_cv.alpha_) + + # The resulting regression weights should incorporate the different + # alpha values. + assert_array_almost_equal( + Ridge(alpha=ridge_cv.alpha_).fit(X, y).coef_, ridge_cv.coef_ + ) + + # Test shape of alpha_ and cv_values_ + ridge_cv = RidgeCV(alphas=alphas, alpha_per_target=True, store_cv_values=True).fit( + X, y + ) + assert ridge_cv.alpha_.shape == (n_targets,) + assert ridge_cv.best_score_.shape == (n_targets,) + assert ridge_cv.cv_values_.shape == (n_samples, len(alphas), n_targets) + + # Test edge case of there being only one alpha value + ridge_cv = RidgeCV(alphas=1, alpha_per_target=True, store_cv_values=True).fit(X, y) + assert ridge_cv.alpha_.shape == (n_targets,) + assert ridge_cv.best_score_.shape == (n_targets,) + assert ridge_cv.cv_values_.shape == (n_samples, n_targets, 1) + + # Test edge case of there being only one target + ridge_cv = RidgeCV(alphas=alphas, alpha_per_target=True, store_cv_values=True).fit( + X, y[:, 0] + ) + assert np.isscalar(ridge_cv.alpha_) + assert np.isscalar(ridge_cv.best_score_) + assert ridge_cv.cv_values_.shape == (n_samples, len(alphas)) + + # Try with a custom scoring function + ridge_cv = RidgeCV(alphas=alphas, alpha_per_target=True, scoring="r2").fit(X, y) + assert_array_equal(optimal_alphas, ridge_cv.alpha_) + assert_array_almost_equal( + Ridge(alpha=ridge_cv.alpha_).fit(X, y).coef_, ridge_cv.coef_ + ) + + # Using a custom CV object should throw an error in combination with + # alpha_per_target=True + ridge_cv = RidgeCV(alphas=alphas, cv=LeaveOneOut(), alpha_per_target=True) + msg = "cv!=None and alpha_per_target=True are incompatible" + with pytest.raises(ValueError, match=msg): + ridge_cv.fit(X, y) + ridge_cv = RidgeCV(alphas=alphas, cv=6, alpha_per_target=True) + with pytest.raises(ValueError, match=msg): + ridge_cv.fit(X, y) + + +def _test_ridge_diabetes(sparse_container): + X = X_diabetes if sparse_container is None else sparse_container(X_diabetes) + ridge = Ridge(fit_intercept=False) + ridge.fit(X, y_diabetes) + return np.round(ridge.score(X, y_diabetes), 5) + + +def _test_multi_ridge_diabetes(sparse_container): + # simulate several responses + X = X_diabetes if sparse_container is None else sparse_container(X_diabetes) + Y = np.vstack((y_diabetes, y_diabetes)).T + n_features = X_diabetes.shape[1] + + ridge = Ridge(fit_intercept=False) + ridge.fit(X, Y) + assert ridge.coef_.shape == (2, n_features) + Y_pred = ridge.predict(X) + ridge.fit(X, y_diabetes) + y_pred = ridge.predict(X) + assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) + + +def _test_ridge_classifiers(sparse_container): + n_classes = np.unique(y_iris).shape[0] + n_features = X_iris.shape[1] + X = X_iris if sparse_container is None else sparse_container(X_iris) + + for reg in (RidgeClassifier(), RidgeClassifierCV()): + reg.fit(X, y_iris) + assert reg.coef_.shape == (n_classes, n_features) + y_pred = reg.predict(X) + assert np.mean(y_iris == y_pred) > 0.79 + + cv = KFold(5) + reg = RidgeClassifierCV(cv=cv) + reg.fit(X, y_iris) + y_pred = reg.predict(X) + assert np.mean(y_iris == y_pred) >= 0.8 + + +@pytest.mark.parametrize("scoring", [None, "accuracy", _accuracy_callable]) +@pytest.mark.parametrize("cv", [None, KFold(5)]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_ridge_classifier_with_scoring(sparse_container, scoring, cv): + # non-regression test for #14672 + # check that RidgeClassifierCV works with all sort of scoring and + # cross-validation + X = X_iris if sparse_container is None else sparse_container(X_iris) + scoring_ = make_scorer(scoring) if callable(scoring) else scoring + clf = RidgeClassifierCV(scoring=scoring_, cv=cv) + # Smoke test to check that fit/predict does not raise error + clf.fit(X, y_iris).predict(X) + + +@pytest.mark.parametrize("cv", [None, KFold(5)]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_ridge_regression_custom_scoring(sparse_container, cv): + # check that custom scoring is working as expected + # check the tie breaking strategy (keep the first alpha tried) + + def _dummy_score(y_test, y_pred): + return 0.42 + + X = X_iris if sparse_container is None else sparse_container(X_iris) + alphas = np.logspace(-2, 2, num=5) + clf = RidgeClassifierCV(alphas=alphas, scoring=make_scorer(_dummy_score), cv=cv) + clf.fit(X, y_iris) + assert clf.best_score_ == pytest.approx(0.42) + # In case of tie score, the first alphas will be kept + assert clf.alpha_ == pytest.approx(alphas[0]) + + +def _test_tolerance(sparse_container): + X = X_diabetes if sparse_container is None else sparse_container(X_diabetes) + + ridge = Ridge(tol=1e-5, fit_intercept=False) + ridge.fit(X, y_diabetes) + score = ridge.score(X, y_diabetes) + + ridge2 = Ridge(tol=1e-3, fit_intercept=False) + ridge2.fit(X, y_diabetes) + score2 = ridge2.score(X, y_diabetes) + + assert score >= score2 + + +@pytest.mark.parametrize( + "test_func", + ( + _test_ridge_loo, + _test_ridge_cv, + _test_ridge_diabetes, + _test_multi_ridge_diabetes, + _test_ridge_classifiers, + _test_tolerance, + ), +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dense_sparse(test_func, csr_container): + # test dense matrix + ret_dense = test_func(None) + # test sparse matrix + ret_sparse = test_func(csr_container) + # test that the outputs are the same + if ret_dense is not None and ret_sparse is not None: + assert_array_almost_equal(ret_dense, ret_sparse, decimal=3) + + +def test_class_weights(): + # Test class weights. + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = [1, 1, 1, -1, -1] + + reg = RidgeClassifier(class_weight=None) + reg.fit(X, y) + assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1])) + + # we give a small weights to class 1 + reg = RidgeClassifier(class_weight={1: 0.001}) + reg.fit(X, y) + + # now the hyperplane should rotate clock-wise and + # the prediction on this point should shift + assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([-1])) + + # check if class_weight = 'balanced' can handle negative labels. + reg = RidgeClassifier(class_weight="balanced") + reg.fit(X, y) + assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1])) + + # class_weight = 'balanced', and class_weight = None should return + # same values when y has equal number of all labels + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0]]) + y = [1, 1, -1, -1] + reg = RidgeClassifier(class_weight=None) + reg.fit(X, y) + rega = RidgeClassifier(class_weight="balanced") + rega.fit(X, y) + assert len(rega.classes_) == 2 + assert_array_almost_equal(reg.coef_, rega.coef_) + assert_array_almost_equal(reg.intercept_, rega.intercept_) + + +@pytest.mark.parametrize("reg", (RidgeClassifier, RidgeClassifierCV)) +def test_class_weight_vs_sample_weight(reg): + """Check class_weights resemble sample_weights behavior.""" + + # Iris is balanced, so no effect expected for using 'balanced' weights + reg1 = reg() + reg1.fit(iris.data, iris.target) + reg2 = reg(class_weight="balanced") + reg2.fit(iris.data, iris.target) + assert_almost_equal(reg1.coef_, reg2.coef_) + + # Inflate importance of class 1, check against user-defined weights + sample_weight = np.ones(iris.target.shape) + sample_weight[iris.target == 1] *= 100 + class_weight = {0: 1.0, 1: 100.0, 2: 1.0} + reg1 = reg() + reg1.fit(iris.data, iris.target, sample_weight) + reg2 = reg(class_weight=class_weight) + reg2.fit(iris.data, iris.target) + assert_almost_equal(reg1.coef_, reg2.coef_) + + # Check that sample_weight and class_weight are multiplicative + reg1 = reg() + reg1.fit(iris.data, iris.target, sample_weight**2) + reg2 = reg(class_weight=class_weight) + reg2.fit(iris.data, iris.target, sample_weight) + assert_almost_equal(reg1.coef_, reg2.coef_) + + +def test_class_weights_cv(): + # Test class weights for cross validated ridge classifier. + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = [1, 1, 1, -1, -1] + + reg = RidgeClassifierCV(class_weight=None, alphas=[0.01, 0.1, 1]) + reg.fit(X, y) + + # we give a small weights to class 1 + reg = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[0.01, 0.1, 1, 10]) + reg.fit(X, y) + + assert_array_equal(reg.predict([[-0.2, 2]]), np.array([-1])) + + +@pytest.mark.parametrize( + "scoring", [None, "neg_mean_squared_error", _mean_squared_error_callable] +) +def test_ridgecv_store_cv_values(scoring): + rng = np.random.RandomState(42) + + n_samples = 8 + n_features = 5 + x = rng.randn(n_samples, n_features) + alphas = [1e-1, 1e0, 1e1] + n_alphas = len(alphas) + + scoring_ = make_scorer(scoring) if callable(scoring) else scoring + + r = RidgeCV(alphas=alphas, cv=None, store_cv_values=True, scoring=scoring_) + + # with len(y.shape) == 1 + y = rng.randn(n_samples) + r.fit(x, y) + assert r.cv_values_.shape == (n_samples, n_alphas) + + # with len(y.shape) == 2 + n_targets = 3 + y = rng.randn(n_samples, n_targets) + r.fit(x, y) + assert r.cv_values_.shape == (n_samples, n_targets, n_alphas) + + r = RidgeCV(cv=3, store_cv_values=True, scoring=scoring) + with pytest.raises(ValueError, match="cv!=None and store_cv_values"): + r.fit(x, y) + + +@pytest.mark.parametrize("scoring", [None, "accuracy", _accuracy_callable]) +def test_ridge_classifier_cv_store_cv_values(scoring): + x = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = np.array([1, 1, 1, -1, -1]) + + n_samples = x.shape[0] + alphas = [1e-1, 1e0, 1e1] + n_alphas = len(alphas) + + scoring_ = make_scorer(scoring) if callable(scoring) else scoring + + r = RidgeClassifierCV( + alphas=alphas, cv=None, store_cv_values=True, scoring=scoring_ + ) + + # with len(y.shape) == 1 + n_targets = 1 + r.fit(x, y) + assert r.cv_values_.shape == (n_samples, n_targets, n_alphas) + + # with len(y.shape) == 2 + y = np.array( + [[1, 1, 1, -1, -1], [1, -1, 1, -1, 1], [-1, -1, 1, -1, -1]] + ).transpose() + n_targets = y.shape[1] + r.fit(x, y) + assert r.cv_values_.shape == (n_samples, n_targets, n_alphas) + + +@pytest.mark.parametrize("Estimator", [RidgeCV, RidgeClassifierCV]) +def test_ridgecv_alphas_conversion(Estimator): + rng = np.random.RandomState(0) + alphas = (0.1, 1.0, 10.0) + + n_samples, n_features = 5, 5 + if Estimator is RidgeCV: + y = rng.randn(n_samples) + else: + y = rng.randint(0, 2, n_samples) + X = rng.randn(n_samples, n_features) + + ridge_est = Estimator(alphas=alphas) + assert ( + ridge_est.alphas is alphas + ), f"`alphas` was mutated in `{Estimator.__name__}.__init__`" + + ridge_est.fit(X, y) + assert_array_equal(ridge_est.alphas, np.asarray(alphas)) + + +def test_ridgecv_sample_weight(): + rng = np.random.RandomState(0) + alphas = (0.1, 1.0, 10.0) + + # There are different algorithms for n_samples > n_features + # and the opposite, so test them both. + for n_samples, n_features in ((6, 5), (5, 10)): + y = rng.randn(n_samples) + X = rng.randn(n_samples, n_features) + sample_weight = 1.0 + rng.rand(n_samples) + + cv = KFold(5) + ridgecv = RidgeCV(alphas=alphas, cv=cv) + ridgecv.fit(X, y, sample_weight=sample_weight) + + # Check using GridSearchCV directly + parameters = {"alpha": alphas} + gs = GridSearchCV(Ridge(), parameters, cv=cv) + gs.fit(X, y, sample_weight=sample_weight) + + assert ridgecv.alpha_ == gs.best_estimator_.alpha + assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_) + + +def test_raises_value_error_if_sample_weights_greater_than_1d(): + # Sample weights must be either scalar or 1D + + n_sampless = [2, 3] + n_featuress = [3, 2] + + rng = np.random.RandomState(42) + + for n_samples, n_features in zip(n_sampless, n_featuress): + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + sample_weights_OK = rng.randn(n_samples) ** 2 + 1 + sample_weights_OK_1 = 1.0 + sample_weights_OK_2 = 2.0 + sample_weights_not_OK = sample_weights_OK[:, np.newaxis] + sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :] + + ridge = Ridge(alpha=1) + + # make sure the "OK" sample weights actually work + ridge.fit(X, y, sample_weights_OK) + ridge.fit(X, y, sample_weights_OK_1) + ridge.fit(X, y, sample_weights_OK_2) + + def fit_ridge_not_ok(): + ridge.fit(X, y, sample_weights_not_OK) + + def fit_ridge_not_ok_2(): + ridge.fit(X, y, sample_weights_not_OK_2) + + err_msg = "Sample weights must be 1D array or scalar" + with pytest.raises(ValueError, match=err_msg): + fit_ridge_not_ok() + + err_msg = "Sample weights must be 1D array or scalar" + with pytest.raises(ValueError, match=err_msg): + fit_ridge_not_ok_2() + + +@pytest.mark.parametrize("n_samples,n_features", [[2, 3], [3, 2]]) +@pytest.mark.parametrize( + "sparse_container", + COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS, +) +def test_sparse_design_with_sample_weights(n_samples, n_features, sparse_container): + # Sample weights must work with sparse matrices + rng = np.random.RandomState(42) + + sparse_ridge = Ridge(alpha=1.0, fit_intercept=False) + dense_ridge = Ridge(alpha=1.0, fit_intercept=False) + + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + sample_weights = rng.randn(n_samples) ** 2 + 1 + X_sparse = sparse_container(X) + sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights) + dense_ridge.fit(X, y, sample_weight=sample_weights) + + assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_, decimal=6) + + +def test_ridgecv_int_alphas(): + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = [1, 1, 1, -1, -1] + + # Integers + ridge = RidgeCV(alphas=(1, 10, 100)) + ridge.fit(X, y) + + +@pytest.mark.parametrize("Estimator", [RidgeCV, RidgeClassifierCV]) +@pytest.mark.parametrize( + "params, err_type, err_msg", + [ + ({"alphas": (1, -1, -100)}, ValueError, r"alphas\[1\] == -1, must be > 0.0"), + ( + {"alphas": (-0.1, -1.0, -10.0)}, + ValueError, + r"alphas\[0\] == -0.1, must be > 0.0", + ), + ( + {"alphas": (1, 1.0, "1")}, + TypeError, + r"alphas\[2\] must be an instance of float, not str", + ), + ], +) +def test_ridgecv_alphas_validation(Estimator, params, err_type, err_msg): + """Check the `alphas` validation in RidgeCV and RidgeClassifierCV.""" + + n_samples, n_features = 5, 5 + X = rng.randn(n_samples, n_features) + y = rng.randint(0, 2, n_samples) + + with pytest.raises(err_type, match=err_msg): + Estimator(**params).fit(X, y) + + +@pytest.mark.parametrize("Estimator", [RidgeCV, RidgeClassifierCV]) +def test_ridgecv_alphas_scalar(Estimator): + """Check the case when `alphas` is a scalar. + This case was supported in the past when `alphas` where converted + into array in `__init__`. + We add this test to ensure backward compatibility. + """ + + n_samples, n_features = 5, 5 + X = rng.randn(n_samples, n_features) + if Estimator is RidgeCV: + y = rng.randn(n_samples) + else: + y = rng.randint(0, 2, n_samples) + + Estimator(alphas=1).fit(X, y) + + +def test_sparse_cg_max_iter(): + reg = Ridge(solver="sparse_cg", max_iter=1) + reg.fit(X_diabetes, y_diabetes) + assert reg.coef_.shape[0] == X_diabetes.shape[1] + + +@ignore_warnings +def test_n_iter(): + # Test that self.n_iter_ is correct. + n_targets = 2 + X, y = X_diabetes, y_diabetes + y_n = np.tile(y, (n_targets, 1)).T + + for max_iter in range(1, 4): + for solver in ("sag", "saga", "lsqr"): + reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12) + reg.fit(X, y_n) + assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets)) + + for solver in ("sparse_cg", "svd", "cholesky"): + reg = Ridge(solver=solver, max_iter=1, tol=1e-1) + reg.fit(X, y_n) + assert reg.n_iter_ is None + + +@pytest.mark.parametrize("solver", ["lsqr", "sparse_cg", "lbfgs", "auto"]) +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_ridge_fit_intercept_sparse( + solver, with_sample_weight, global_random_seed, csr_container +): + """Check that ridge finds the same coefs and intercept on dense and sparse input + in the presence of sample weights. + + For now only sparse_cg and lbfgs can correctly fit an intercept + with sparse X with default tol and max_iter. + 'sag' is tested separately in test_ridge_fit_intercept_sparse_sag because it + requires more iterations and should raise a warning if default max_iter is used. + Other solvers raise an exception, as checked in + test_ridge_fit_intercept_sparse_error + """ + positive = solver == "lbfgs" + X, y = _make_sparse_offset_regression( + n_features=20, random_state=global_random_seed, positive=positive + ) + + sample_weight = None + if with_sample_weight: + rng = np.random.RandomState(global_random_seed) + sample_weight = 1.0 + rng.uniform(size=X.shape[0]) + + # "auto" should switch to "sparse_cg" when X is sparse + # so the reference we use for both ("auto" and "sparse_cg") is + # Ridge(solver="sparse_cg"), fitted using the dense representation (note + # that "sparse_cg" can fit sparse or dense data) + dense_solver = "sparse_cg" if solver == "auto" else solver + dense_ridge = Ridge(solver=dense_solver, tol=1e-12, positive=positive) + sparse_ridge = Ridge(solver=solver, tol=1e-12, positive=positive) + + dense_ridge.fit(X, y, sample_weight=sample_weight) + sparse_ridge.fit(csr_container(X), y, sample_weight=sample_weight) + + assert_allclose(dense_ridge.intercept_, sparse_ridge.intercept_) + assert_allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=5e-7) + + +@pytest.mark.parametrize("solver", ["saga", "svd", "cholesky"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_ridge_fit_intercept_sparse_error(solver, csr_container): + X, y = _make_sparse_offset_regression(n_features=20, random_state=0) + X_csr = csr_container(X) + sparse_ridge = Ridge(solver=solver) + err_msg = "solver='{}' does not support".format(solver) + with pytest.raises(ValueError, match=err_msg): + sparse_ridge.fit(X_csr, y) + + +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_ridge_fit_intercept_sparse_sag( + with_sample_weight, global_random_seed, csr_container +): + X, y = _make_sparse_offset_regression( + n_features=5, n_samples=20, random_state=global_random_seed, X_offset=5.0 + ) + if with_sample_weight: + rng = np.random.RandomState(global_random_seed) + sample_weight = 1.0 + rng.uniform(size=X.shape[0]) + else: + sample_weight = None + X_csr = csr_container(X) + + params = dict( + alpha=1.0, solver="sag", fit_intercept=True, tol=1e-10, max_iter=100000 + ) + dense_ridge = Ridge(**params) + sparse_ridge = Ridge(**params) + dense_ridge.fit(X, y, sample_weight=sample_weight) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + sparse_ridge.fit(X_csr, y, sample_weight=sample_weight) + assert_allclose(dense_ridge.intercept_, sparse_ridge.intercept_, rtol=1e-4) + assert_allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=1e-4) + with pytest.warns(UserWarning, match='"sag" solver requires.*'): + Ridge(solver="sag", fit_intercept=True, tol=1e-3, max_iter=None).fit(X_csr, y) + + +@pytest.mark.parametrize("return_intercept", [False, True]) +@pytest.mark.parametrize("sample_weight", [None, np.ones(1000)]) +@pytest.mark.parametrize("container", [np.array] + CSR_CONTAINERS) +@pytest.mark.parametrize( + "solver", ["auto", "sparse_cg", "cholesky", "lsqr", "sag", "saga", "lbfgs"] +) +def test_ridge_regression_check_arguments_validity( + return_intercept, sample_weight, container, solver +): + """check if all combinations of arguments give valid estimations""" + + # test excludes 'svd' solver because it raises exception for sparse inputs + + rng = check_random_state(42) + X = rng.rand(1000, 3) + true_coefs = [1, 2, 0.1] + y = np.dot(X, true_coefs) + true_intercept = 0.0 + if return_intercept: + true_intercept = 10000.0 + y += true_intercept + X_testing = container(X) + + alpha, tol = 1e-3, 1e-6 + atol = 1e-3 if _IS_32BIT else 1e-4 + + positive = solver == "lbfgs" + + if solver not in ["sag", "auto"] and return_intercept: + with pytest.raises(ValueError, match="In Ridge, only 'sag' solver"): + ridge_regression( + X_testing, + y, + alpha=alpha, + solver=solver, + sample_weight=sample_weight, + return_intercept=return_intercept, + positive=positive, + tol=tol, + ) + return + + out = ridge_regression( + X_testing, + y, + alpha=alpha, + solver=solver, + sample_weight=sample_weight, + positive=positive, + return_intercept=return_intercept, + tol=tol, + ) + + if return_intercept: + coef, intercept = out + assert_allclose(coef, true_coefs, rtol=0, atol=atol) + assert_allclose(intercept, true_intercept, rtol=0, atol=atol) + else: + assert_allclose(out, true_coefs, rtol=0, atol=atol) + + +@pytest.mark.parametrize( + "solver", ["svd", "sparse_cg", "cholesky", "lsqr", "sag", "saga", "lbfgs"] +) +def test_dtype_match(solver): + rng = np.random.RandomState(0) + alpha = 1.0 + positive = solver == "lbfgs" + + n_samples, n_features = 6, 5 + X_64 = rng.randn(n_samples, n_features) + y_64 = rng.randn(n_samples) + X_32 = X_64.astype(np.float32) + y_32 = y_64.astype(np.float32) + + tol = 2 * np.finfo(np.float32).resolution + # Check type consistency 32bits + ridge_32 = Ridge( + alpha=alpha, solver=solver, max_iter=500, tol=tol, positive=positive + ) + ridge_32.fit(X_32, y_32) + coef_32 = ridge_32.coef_ + + # Check type consistency 64 bits + ridge_64 = Ridge( + alpha=alpha, solver=solver, max_iter=500, tol=tol, positive=positive + ) + ridge_64.fit(X_64, y_64) + coef_64 = ridge_64.coef_ + + # Do the actual checks at once for easier debug + assert coef_32.dtype == X_32.dtype + assert coef_64.dtype == X_64.dtype + assert ridge_32.predict(X_32).dtype == X_32.dtype + assert ridge_64.predict(X_64).dtype == X_64.dtype + assert_allclose(ridge_32.coef_, ridge_64.coef_, rtol=1e-4, atol=5e-4) + + +def test_dtype_match_cholesky(): + # Test different alphas in cholesky solver to ensure full coverage. + # This test is separated from test_dtype_match for clarity. + rng = np.random.RandomState(0) + alpha = np.array([1.0, 0.5]) + + n_samples, n_features, n_target = 6, 7, 2 + X_64 = rng.randn(n_samples, n_features) + y_64 = rng.randn(n_samples, n_target) + X_32 = X_64.astype(np.float32) + y_32 = y_64.astype(np.float32) + + # Check type consistency 32bits + ridge_32 = Ridge(alpha=alpha, solver="cholesky") + ridge_32.fit(X_32, y_32) + coef_32 = ridge_32.coef_ + + # Check type consistency 64 bits + ridge_64 = Ridge(alpha=alpha, solver="cholesky") + ridge_64.fit(X_64, y_64) + coef_64 = ridge_64.coef_ + + # Do all the checks at once, like this is easier to debug + assert coef_32.dtype == X_32.dtype + assert coef_64.dtype == X_64.dtype + assert ridge_32.predict(X_32).dtype == X_32.dtype + assert ridge_64.predict(X_64).dtype == X_64.dtype + assert_almost_equal(ridge_32.coef_, ridge_64.coef_, decimal=5) + + +@pytest.mark.parametrize( + "solver", ["svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"] +) +@pytest.mark.parametrize("seed", range(1)) +def test_ridge_regression_dtype_stability(solver, seed): + random_state = np.random.RandomState(seed) + n_samples, n_features = 6, 5 + X = random_state.randn(n_samples, n_features) + coef = random_state.randn(n_features) + y = np.dot(X, coef) + 0.01 * random_state.randn(n_samples) + alpha = 1.0 + positive = solver == "lbfgs" + results = dict() + # XXX: Sparse CG seems to be far less numerically stable than the + # others, maybe we should not enable float32 for this one. + atol = 1e-3 if solver == "sparse_cg" else 1e-5 + for current_dtype in (np.float32, np.float64): + results[current_dtype] = ridge_regression( + X.astype(current_dtype), + y.astype(current_dtype), + alpha=alpha, + solver=solver, + random_state=random_state, + sample_weight=None, + positive=positive, + max_iter=500, + tol=1e-10, + return_n_iter=False, + return_intercept=False, + ) + + assert results[np.float32].dtype == np.float32 + assert results[np.float64].dtype == np.float64 + assert_allclose(results[np.float32], results[np.float64], atol=atol) + + +def test_ridge_sag_with_X_fortran(): + # check that Fortran array are converted when using SAG solver + X, y = make_regression(random_state=42) + # for the order of X and y to not be C-ordered arrays + X = np.asfortranarray(X) + X = X[::2, :] + y = y[::2] + Ridge(solver="sag").fit(X, y) + + +@pytest.mark.parametrize( + "Classifier, params", + [ + (RidgeClassifier, {}), + (RidgeClassifierCV, {"cv": None}), + (RidgeClassifierCV, {"cv": 3}), + ], +) +def test_ridgeclassifier_multilabel(Classifier, params): + """Check that multilabel classification is supported and give meaningful + results.""" + X, y = make_multilabel_classification(n_classes=1, random_state=0) + y = y.reshape(-1, 1) + Y = np.concatenate([y, y], axis=1) + clf = Classifier(**params).fit(X, Y) + Y_pred = clf.predict(X) + + assert Y_pred.shape == Y.shape + assert_array_equal(Y_pred[:, 0], Y_pred[:, 1]) + Ridge(solver="sag").fit(X, y) + + +@pytest.mark.parametrize("solver", ["auto", "lbfgs"]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("alpha", [1e-3, 1e-2, 0.1, 1.0]) +def test_ridge_positive_regression_test(solver, fit_intercept, alpha): + """Test that positive Ridge finds true positive coefficients.""" + X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + coef = np.array([1, -10]) + if fit_intercept: + intercept = 20 + y = X.dot(coef) + intercept + else: + y = X.dot(coef) + + model = Ridge( + alpha=alpha, positive=True, solver=solver, fit_intercept=fit_intercept + ) + model.fit(X, y) + assert np.all(model.coef_ >= 0) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("alpha", [1e-3, 1e-2, 0.1, 1.0]) +def test_ridge_ground_truth_positive_test(fit_intercept, alpha): + """Test that Ridge w/wo positive converges to the same solution. + + Ridge with positive=True and positive=False must give the same + when the ground truth coefs are all positive. + """ + rng = np.random.RandomState(42) + X = rng.randn(300, 100) + coef = rng.uniform(0.1, 1.0, size=X.shape[1]) + if fit_intercept: + intercept = 1 + y = X @ coef + intercept + else: + y = X @ coef + y += rng.normal(size=X.shape[0]) * 0.01 + + results = [] + for positive in [True, False]: + model = Ridge( + alpha=alpha, positive=positive, fit_intercept=fit_intercept, tol=1e-10 + ) + results.append(model.fit(X, y).coef_) + assert_allclose(*results, atol=1e-6, rtol=0) + + +@pytest.mark.parametrize( + "solver", ["svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga"] +) +def test_ridge_positive_error_test(solver): + """Test input validation for positive argument in Ridge.""" + alpha = 0.1 + X = np.array([[1, 2], [3, 4]]) + coef = np.array([1, -1]) + y = X @ coef + + model = Ridge(alpha=alpha, positive=True, solver=solver, fit_intercept=False) + with pytest.raises(ValueError, match="does not support positive"): + model.fit(X, y) + + with pytest.raises(ValueError, match="only 'lbfgs' solver can be used"): + _, _ = ridge_regression( + X, y, alpha, positive=True, solver=solver, return_intercept=False + ) + + +@pytest.mark.parametrize("alpha", [1e-3, 1e-2, 0.1, 1.0]) +def test_positive_ridge_loss(alpha): + """Check ridge loss consistency when positive argument is enabled.""" + X, y = make_regression(n_samples=300, n_features=300, random_state=42) + alpha = 0.10 + n_checks = 100 + + def ridge_loss(model, random_state=None, noise_scale=1e-8): + intercept = model.intercept_ + if random_state is not None: + rng = np.random.RandomState(random_state) + coef = model.coef_ + rng.uniform(0, noise_scale, size=model.coef_.shape) + else: + coef = model.coef_ + + return 0.5 * np.sum((y - X @ coef - intercept) ** 2) + 0.5 * alpha * np.sum( + coef**2 + ) + + model = Ridge(alpha=alpha).fit(X, y) + model_positive = Ridge(alpha=alpha, positive=True).fit(X, y) + + # Check 1: + # Loss for solution found by Ridge(positive=False) + # is lower than that for solution found by Ridge(positive=True) + loss = ridge_loss(model) + loss_positive = ridge_loss(model_positive) + assert loss <= loss_positive + + # Check 2: + # Loss for solution found by Ridge(positive=True) + # is lower than that for small random positive perturbation + # of the positive solution. + for random_state in range(n_checks): + loss_perturbed = ridge_loss(model_positive, random_state=random_state) + assert loss_positive <= loss_perturbed + + +@pytest.mark.parametrize("alpha", [1e-3, 1e-2, 0.1, 1.0]) +def test_lbfgs_solver_consistency(alpha): + """Test that LBGFS gets almost the same coef of svd when positive=False.""" + X, y = make_regression(n_samples=300, n_features=300, random_state=42) + y = np.expand_dims(y, 1) + alpha = np.asarray([alpha]) + config = { + "positive": False, + "tol": 1e-16, + "max_iter": 500000, + } + + coef_lbfgs = _solve_lbfgs(X, y, alpha, **config) + coef_cholesky = _solve_svd(X, y, alpha) + assert_allclose(coef_lbfgs, coef_cholesky, atol=1e-4, rtol=0) + + +def test_lbfgs_solver_error(): + """Test that LBFGS solver raises ConvergenceWarning.""" + X = np.array([[1, -1], [1, 1]]) + y = np.array([-1e10, 1e10]) + + model = Ridge( + alpha=0.01, + solver="lbfgs", + fit_intercept=False, + tol=1e-12, + positive=True, + max_iter=1, + ) + with pytest.warns(ConvergenceWarning, match="lbfgs solver did not converge"): + model.fit(X, y) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("data", ["tall", "wide"]) +@pytest.mark.parametrize("solver", SOLVERS + ["lbfgs"]) +def test_ridge_sample_weight_consistency( + fit_intercept, sparse_container, data, solver, global_random_seed +): + """Test that the impact of sample_weight is consistent. + + Note that this test is stricter than the common test + check_sample_weights_invariance alone. + """ + # filter out solver that do not support sparse input + if sparse_container is not None: + if solver == "svd" or (solver in ("cholesky", "saga") and fit_intercept): + pytest.skip("unsupported configuration") + + # XXX: this test is quite sensitive to the seed used to generate the data: + # ideally we would like the test to pass for any global_random_seed but this is not + # the case at the moment. + rng = np.random.RandomState(42) + n_samples = 12 + if data == "tall": + n_features = n_samples // 2 + else: + n_features = n_samples * 2 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + if sparse_container is not None: + X = sparse_container(X) + params = dict( + fit_intercept=fit_intercept, + alpha=1.0, + solver=solver, + positive=(solver == "lbfgs"), + random_state=global_random_seed, # for sag/saga + tol=1e-12, + ) + + # 1) sample_weight=np.ones(..) should be equivalent to sample_weight=None + # same check as check_sample_weights_invariance(name, reg, kind="ones"), but we also + # test with sparse input. + reg = Ridge(**params).fit(X, y, sample_weight=None) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 2) setting elements of sample_weight to 0 is equivalent to removing these samples + # same check as check_sample_weights_invariance(name, reg, kind="zeros"), but we + # also test with sparse input + sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0]) + sample_weight[-5:] = 0 + y[-5:] *= 1000 # to make excluding those samples important + reg.fit(X, y, sample_weight=sample_weight) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + reg.fit(X[:-5, :], y[:-5], sample_weight=sample_weight[:-5]) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 3) scaling of sample_weight should have no effect + # Note: For models with penalty, scaling the penalty term might work. + reg2 = Ridge(**params).set_params(alpha=np.pi * params["alpha"]) + reg2.fit(X, y, sample_weight=np.pi * sample_weight) + if solver in ("sag", "saga") and not fit_intercept: + pytest.xfail(f"Solver {solver} does fail test for scaling of sample_weight.") + assert_allclose(reg2.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg2.intercept_, intercept) + + # 4) check that multiplying sample_weight by 2 is equivalent + # to repeating corresponding samples twice + if sparse_container is not None: + X = X.toarray() + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = sample_weight.copy() + sample_weight_1[: n_samples // 2] *= 2 + sample_weight_2 = np.concatenate( + [sample_weight, sample_weight[: n_samples // 2]], axis=0 + ) + if sparse_container is not None: + X = sparse_container(X) + X2 = sparse_container(X2) + reg1 = Ridge(**params).fit(X, y, sample_weight=sample_weight_1) + reg2 = Ridge(**params).fit(X2, y2, sample_weight=sample_weight_2) + assert_allclose(reg1.coef_, reg2.coef_) + if fit_intercept: + assert_allclose(reg1.intercept_, reg2.intercept_) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sag.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sag.py new file mode 100644 index 0000000000000000000000000000000000000000..96f8a79726833ccdc36585b25da4464e5486e809 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sag.py @@ -0,0 +1,1026 @@ +# Authors: Danny Sullivan +# Tom Dupre la Tour +# +# License: BSD 3 clause + +import math +import re + +import numpy as np +import pytest +from scipy.special import logsumexp + +from sklearn._loss.loss import HalfMultinomialLoss +from sklearn.base import clone +from sklearn.datasets import load_iris, make_blobs, make_classification +from sklearn.linear_model import LogisticRegression, Ridge +from sklearn.linear_model._base import make_dataset +from sklearn.linear_model._linear_loss import LinearModelLoss +from sklearn.linear_model._sag import get_auto_step_size +from sklearn.linear_model._sag_fast import _multinomial_grad_loss_all_samples +from sklearn.preprocessing import LabelBinarizer, LabelEncoder +from sklearn.utils import check_random_state, compute_class_weight +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, +) +from sklearn.utils.extmath import row_norms +from sklearn.utils.fixes import CSR_CONTAINERS + +iris = load_iris() + + +# this is used for sag classification +def log_dloss(p, y): + z = p * y + # approximately equal and saves the computation of the log + if z > 18.0: + return math.exp(-z) * -y + if z < -18.0: + return -y + return -y / (math.exp(z) + 1.0) + + +def log_loss(p, y): + return np.mean(np.log(1.0 + np.exp(-y * p))) + + +# this is used for sag regression +def squared_dloss(p, y): + return p - y + + +def squared_loss(p, y): + return np.mean(0.5 * (p - y) * (p - y)) + + +# function for measuring the log loss +def get_pobj(w, alpha, myX, myy, loss): + w = w.ravel() + pred = np.dot(myX, w) + p = loss(pred, myy) + p += alpha * w.dot(w) / 2.0 + return p + + +def sag( + X, + y, + step_size, + alpha, + n_iter=1, + dloss=None, + sparse=False, + sample_weight=None, + fit_intercept=True, + saga=False, +): + n_samples, n_features = X.shape[0], X.shape[1] + + weights = np.zeros(X.shape[1]) + sum_gradient = np.zeros(X.shape[1]) + gradient_memory = np.zeros((n_samples, n_features)) + + intercept = 0.0 + intercept_sum_gradient = 0.0 + intercept_gradient_memory = np.zeros(n_samples) + + rng = np.random.RandomState(77) + decay = 1.0 + seen = set() + + # sparse data has a fixed decay of .01 + if sparse: + decay = 0.01 + + for epoch in range(n_iter): + for k in range(n_samples): + idx = int(rng.rand() * n_samples) + # idx = k + entry = X[idx] + seen.add(idx) + p = np.dot(entry, weights) + intercept + gradient = dloss(p, y[idx]) + if sample_weight is not None: + gradient *= sample_weight[idx] + update = entry * gradient + alpha * weights + gradient_correction = update - gradient_memory[idx] + sum_gradient += gradient_correction + gradient_memory[idx] = update + if saga: + weights -= gradient_correction * step_size * (1 - 1.0 / len(seen)) + + if fit_intercept: + gradient_correction = gradient - intercept_gradient_memory[idx] + intercept_gradient_memory[idx] = gradient + intercept_sum_gradient += gradient_correction + gradient_correction *= step_size * (1.0 - 1.0 / len(seen)) + if saga: + intercept -= ( + step_size * intercept_sum_gradient / len(seen) * decay + ) + gradient_correction + else: + intercept -= step_size * intercept_sum_gradient / len(seen) * decay + + weights -= step_size * sum_gradient / len(seen) + + return weights, intercept + + +def sag_sparse( + X, + y, + step_size, + alpha, + n_iter=1, + dloss=None, + sample_weight=None, + sparse=False, + fit_intercept=True, + saga=False, + random_state=0, +): + if step_size * alpha == 1.0: + raise ZeroDivisionError( + "Sparse sag does not handle the case step_size * alpha == 1" + ) + n_samples, n_features = X.shape[0], X.shape[1] + + weights = np.zeros(n_features) + sum_gradient = np.zeros(n_features) + last_updated = np.zeros(n_features, dtype=int) + gradient_memory = np.zeros(n_samples) + rng = check_random_state(random_state) + intercept = 0.0 + intercept_sum_gradient = 0.0 + wscale = 1.0 + decay = 1.0 + seen = set() + + c_sum = np.zeros(n_iter * n_samples) + + # sparse data has a fixed decay of .01 + if sparse: + decay = 0.01 + + counter = 0 + for epoch in range(n_iter): + for k in range(n_samples): + # idx = k + idx = int(rng.rand() * n_samples) + entry = X[idx] + seen.add(idx) + + if counter >= 1: + for j in range(n_features): + if last_updated[j] == 0: + weights[j] -= c_sum[counter - 1] * sum_gradient[j] + else: + weights[j] -= ( + c_sum[counter - 1] - c_sum[last_updated[j] - 1] + ) * sum_gradient[j] + last_updated[j] = counter + + p = (wscale * np.dot(entry, weights)) + intercept + gradient = dloss(p, y[idx]) + + if sample_weight is not None: + gradient *= sample_weight[idx] + + update = entry * gradient + gradient_correction = update - (gradient_memory[idx] * entry) + sum_gradient += gradient_correction + if saga: + for j in range(n_features): + weights[j] -= ( + gradient_correction[j] + * step_size + * (1 - 1.0 / len(seen)) + / wscale + ) + + if fit_intercept: + gradient_correction = gradient - gradient_memory[idx] + intercept_sum_gradient += gradient_correction + gradient_correction *= step_size * (1.0 - 1.0 / len(seen)) + if saga: + intercept -= ( + step_size * intercept_sum_gradient / len(seen) * decay + ) + gradient_correction + else: + intercept -= step_size * intercept_sum_gradient / len(seen) * decay + + gradient_memory[idx] = gradient + + wscale *= 1.0 - alpha * step_size + if counter == 0: + c_sum[0] = step_size / (wscale * len(seen)) + else: + c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen)) + + if counter >= 1 and wscale < 1e-9: + for j in range(n_features): + if last_updated[j] == 0: + weights[j] -= c_sum[counter] * sum_gradient[j] + else: + weights[j] -= ( + c_sum[counter] - c_sum[last_updated[j] - 1] + ) * sum_gradient[j] + last_updated[j] = counter + 1 + c_sum[counter] = 0 + weights *= wscale + wscale = 1.0 + + counter += 1 + + for j in range(n_features): + if last_updated[j] == 0: + weights[j] -= c_sum[counter - 1] * sum_gradient[j] + else: + weights[j] -= ( + c_sum[counter - 1] - c_sum[last_updated[j] - 1] + ) * sum_gradient[j] + weights *= wscale + return weights, intercept + + +def get_step_size(X, alpha, fit_intercept, classification=True): + if classification: + return 4.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + 4.0 * alpha) + else: + return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha) + + +def test_classifier_matching(): + n_samples = 20 + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1) + y[y == 0] = -1 + alpha = 1.1 + fit_intercept = True + step_size = get_step_size(X, alpha, fit_intercept) + for solver in ["sag", "saga"]: + if solver == "sag": + n_iter = 80 + else: + # SAGA variance w.r.t. stream order is higher + n_iter = 300 + clf = LogisticRegression( + solver=solver, + fit_intercept=fit_intercept, + tol=1e-11, + C=1.0 / alpha / n_samples, + max_iter=n_iter, + random_state=10, + multi_class="ovr", + ) + clf.fit(X, y) + + weights, intercept = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + fit_intercept=fit_intercept, + saga=solver == "saga", + ) + weights2, intercept2 = sag( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + fit_intercept=fit_intercept, + saga=solver == "saga", + ) + weights = np.atleast_2d(weights) + intercept = np.atleast_1d(intercept) + weights2 = np.atleast_2d(weights2) + intercept2 = np.atleast_1d(intercept2) + + assert_array_almost_equal(weights, clf.coef_, decimal=9) + assert_array_almost_equal(intercept, clf.intercept_, decimal=9) + assert_array_almost_equal(weights2, clf.coef_, decimal=9) + assert_array_almost_equal(intercept2, clf.intercept_, decimal=9) + + +def test_regressor_matching(): + n_samples = 10 + n_features = 5 + + rng = np.random.RandomState(10) + X = rng.normal(size=(n_samples, n_features)) + true_w = rng.normal(size=n_features) + y = X.dot(true_w) + + alpha = 1.0 + n_iter = 100 + fit_intercept = True + + step_size = get_step_size(X, alpha, fit_intercept, classification=False) + clf = Ridge( + fit_intercept=fit_intercept, + tol=0.00000000001, + solver="sag", + alpha=alpha * n_samples, + max_iter=n_iter, + ) + clf.fit(X, y) + + weights1, intercept1 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=squared_dloss, + fit_intercept=fit_intercept, + ) + weights2, intercept2 = sag( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=squared_dloss, + fit_intercept=fit_intercept, + ) + + assert_allclose(weights1, clf.coef_) + assert_allclose(intercept1, clf.intercept_) + assert_allclose(weights2, clf.coef_) + assert_allclose(intercept2, clf.intercept_) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_pobj_matches_logistic_regression(csr_container): + """tests if the sag pobj matches log reg""" + n_samples = 100 + alpha = 1.0 + max_iter = 20 + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1) + + clf1 = LogisticRegression( + solver="sag", + fit_intercept=False, + tol=0.0000001, + C=1.0 / alpha / n_samples, + max_iter=max_iter, + random_state=10, + multi_class="ovr", + ) + clf2 = clone(clf1) + clf3 = LogisticRegression( + fit_intercept=False, + tol=0.0000001, + C=1.0 / alpha / n_samples, + max_iter=max_iter, + random_state=10, + multi_class="ovr", + ) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + clf3.fit(X, y) + + pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss) + pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss) + pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss) + + assert_array_almost_equal(pobj1, pobj2, decimal=4) + assert_array_almost_equal(pobj2, pobj3, decimal=4) + assert_array_almost_equal(pobj3, pobj1, decimal=4) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_pobj_matches_ridge_regression(csr_container): + """tests if the sag pobj matches ridge reg""" + n_samples = 100 + n_features = 10 + alpha = 1.0 + n_iter = 100 + fit_intercept = False + rng = np.random.RandomState(10) + X = rng.normal(size=(n_samples, n_features)) + true_w = rng.normal(size=n_features) + y = X.dot(true_w) + + clf1 = Ridge( + fit_intercept=fit_intercept, + tol=0.00000000001, + solver="sag", + alpha=alpha, + max_iter=n_iter, + random_state=42, + ) + clf2 = clone(clf1) + clf3 = Ridge( + fit_intercept=fit_intercept, + tol=0.00001, + solver="lsqr", + alpha=alpha, + max_iter=n_iter, + random_state=42, + ) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + clf3.fit(X, y) + + pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss) + pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss) + pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss) + + assert_array_almost_equal(pobj1, pobj2, decimal=4) + assert_array_almost_equal(pobj1, pobj3, decimal=4) + assert_array_almost_equal(pobj3, pobj2, decimal=4) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_regressor_computed_correctly(csr_container): + """tests if the sag regressor is computed correctly""" + alpha = 0.1 + n_features = 10 + n_samples = 40 + max_iter = 100 + tol = 0.000001 + fit_intercept = True + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + w = rng.normal(size=n_features) + y = np.dot(X, w) + 2.0 + step_size = get_step_size(X, alpha, fit_intercept, classification=False) + + clf1 = Ridge( + fit_intercept=fit_intercept, + tol=tol, + solver="sag", + alpha=alpha * n_samples, + max_iter=max_iter, + random_state=rng, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + spweights1, spintercept1 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=max_iter, + dloss=squared_dloss, + fit_intercept=fit_intercept, + random_state=rng, + ) + + spweights2, spintercept2 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=max_iter, + dloss=squared_dloss, + sparse=True, + fit_intercept=fit_intercept, + random_state=rng, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), spweights1.ravel(), decimal=3) + assert_almost_equal(clf1.intercept_, spintercept1, decimal=1) + + # TODO: uncomment when sparse Ridge with intercept will be fixed (#4710) + # assert_array_almost_equal(clf2.coef_.ravel(), + # spweights2.ravel(), + # decimal=3) + # assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)''' + + +def test_get_auto_step_size(): + X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64) + alpha = 1.2 + fit_intercept = False + # sum the squares of the second sample because that's the largest + max_squared_sum = 4 + 9 + 16 + max_squared_sum_ = row_norms(X, squared=True).max() + n_samples = X.shape[0] + assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4) + + for saga in [True, False]: + for fit_intercept in (True, False): + if saga: + L_sqr = max_squared_sum + alpha + int(fit_intercept) + L_log = (max_squared_sum + 4.0 * alpha + int(fit_intercept)) / 4.0 + mun_sqr = min(2 * n_samples * alpha, L_sqr) + mun_log = min(2 * n_samples * alpha, L_log) + step_size_sqr = 1 / (2 * L_sqr + mun_sqr) + step_size_log = 1 / (2 * L_log + mun_log) + else: + step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept)) + step_size_log = 4.0 / ( + max_squared_sum + 4.0 * alpha + int(fit_intercept) + ) + + step_size_sqr_ = get_auto_step_size( + max_squared_sum_, + alpha, + "squared", + fit_intercept, + n_samples=n_samples, + is_saga=saga, + ) + step_size_log_ = get_auto_step_size( + max_squared_sum_, + alpha, + "log", + fit_intercept, + n_samples=n_samples, + is_saga=saga, + ) + + assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4) + assert_almost_equal(step_size_log, step_size_log_, decimal=4) + + msg = "Unknown loss function for SAG solver, got wrong instead of" + with pytest.raises(ValueError, match=msg): + get_auto_step_size(max_squared_sum_, alpha, "wrong", fit_intercept) + + +@pytest.mark.parametrize("seed", range(3)) # locally tested with 1000 seeds +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_regressor(seed, csr_container): + """tests if the sag regressor performs well""" + xmin, xmax = -5, 5 + n_samples = 300 + tol = 0.001 + max_iter = 100 + alpha = 0.1 + rng = np.random.RandomState(seed) + X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) + + # simple linear function without noise + y = 0.5 * X.ravel() + + clf1 = Ridge( + tol=tol, + solver="sag", + max_iter=max_iter, + alpha=alpha * n_samples, + random_state=rng, + ) + clf2 = clone(clf1) + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + score1 = clf1.score(X, y) + score2 = clf2.score(X, y) + assert score1 > 0.98 + assert score2 > 0.98 + + # simple linear function with noise + y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() + + clf1 = Ridge(tol=tol, solver="sag", max_iter=max_iter, alpha=alpha * n_samples) + clf2 = clone(clf1) + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + score1 = clf1.score(X, y) + score2 = clf2.score(X, y) + assert score1 > 0.45 + assert score2 > 0.45 + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_classifier_computed_correctly(csr_container): + """tests if the binary classifier is computed correctly""" + alpha = 0.1 + n_samples = 50 + n_iter = 50 + tol = 0.00001 + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + y_tmp = np.ones(n_samples) + y_tmp[y != classes[1]] = -1 + y = y_tmp + + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=n_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + multi_class="ovr", + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + spweights, spintercept = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + fit_intercept=fit_intercept, + ) + spweights2, spintercept2 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + sparse=True, + fit_intercept=fit_intercept, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2) + assert_almost_equal(clf1.intercept_, spintercept, decimal=1) + + assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2) + assert_almost_equal(clf2.intercept_, spintercept2, decimal=1) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_multiclass_computed_correctly(csr_container): + """tests if the multiclass classifier is computed correctly""" + alpha = 0.1 + n_samples = 20 + tol = 0.00001 + max_iter = 40 + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=max_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + multi_class="ovr", + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + coef1 = [] + intercept1 = [] + coef2 = [] + intercept2 = [] + for cl in classes: + y_encoded = np.ones(n_samples) + y_encoded[y != cl] = -1 + + spweights1, spintercept1 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + dloss=log_dloss, + n_iter=max_iter, + fit_intercept=fit_intercept, + ) + spweights2, spintercept2 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + dloss=log_dloss, + n_iter=max_iter, + sparse=True, + fit_intercept=fit_intercept, + ) + coef1.append(spweights1) + intercept1.append(spintercept1) + + coef2.append(spweights2) + intercept2.append(spintercept2) + + coef1 = np.vstack(coef1) + intercept1 = np.array(intercept1) + coef2 = np.vstack(coef2) + intercept2 = np.array(intercept2) + + for i, cl in enumerate(classes): + assert_array_almost_equal(clf1.coef_[i].ravel(), coef1[i].ravel(), decimal=2) + assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1) + + assert_array_almost_equal(clf2.coef_[i].ravel(), coef2[i].ravel(), decimal=2) + assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_classifier_results(csr_container): + """tests if classifier results match target""" + alpha = 0.1 + n_features = 20 + n_samples = 10 + tol = 0.01 + max_iter = 200 + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + w = rng.normal(size=n_features) + y = np.dot(X, w) + y = np.sign(y) + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=max_iter, + tol=tol, + random_state=77, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + pred1 = clf1.predict(X) + pred2 = clf2.predict(X) + assert_almost_equal(pred1, y, decimal=12) + assert_almost_equal(pred2, y, decimal=12) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_binary_classifier_class_weight(csr_container): + """tests binary classifier with classweights for each class""" + alpha = 0.1 + n_samples = 50 + n_iter = 20 + tol = 0.00001 + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + y_tmp = np.ones(n_samples) + y_tmp[y != classes[1]] = -1 + y = y_tmp + + class_weight = {1: 0.45, -1: 0.55} + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=n_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + multi_class="ovr", + class_weight=class_weight, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + le = LabelEncoder() + class_weight_ = compute_class_weight(class_weight, classes=np.unique(y), y=y) + sample_weight = class_weight_[le.fit_transform(y)] + spweights, spintercept = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + sample_weight=sample_weight, + fit_intercept=fit_intercept, + ) + spweights2, spintercept2 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + sparse=True, + sample_weight=sample_weight, + fit_intercept=fit_intercept, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2) + assert_almost_equal(clf1.intercept_, spintercept, decimal=1) + + assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2) + assert_almost_equal(clf2.intercept_, spintercept2, decimal=1) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_multiclass_classifier_class_weight(csr_container): + """tests multiclass with classweights for each class""" + alpha = 0.1 + n_samples = 20 + tol = 0.00001 + max_iter = 50 + class_weight = {0: 0.45, 1: 0.55, 2: 0.75} + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=max_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + multi_class="ovr", + class_weight=class_weight, + ) + clf2 = clone(clf1) + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + le = LabelEncoder() + class_weight_ = compute_class_weight(class_weight, classes=np.unique(y), y=y) + sample_weight = class_weight_[le.fit_transform(y)] + + coef1 = [] + intercept1 = [] + coef2 = [] + intercept2 = [] + for cl in classes: + y_encoded = np.ones(n_samples) + y_encoded[y != cl] = -1 + + spweights1, spintercept1 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + n_iter=max_iter, + dloss=log_dloss, + sample_weight=sample_weight, + ) + spweights2, spintercept2 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + n_iter=max_iter, + dloss=log_dloss, + sample_weight=sample_weight, + sparse=True, + ) + coef1.append(spweights1) + intercept1.append(spintercept1) + coef2.append(spweights2) + intercept2.append(spintercept2) + + coef1 = np.vstack(coef1) + intercept1 = np.array(intercept1) + coef2 = np.vstack(coef2) + intercept2 = np.array(intercept2) + + for i, cl in enumerate(classes): + assert_array_almost_equal(clf1.coef_[i].ravel(), coef1[i].ravel(), decimal=2) + assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1) + + assert_array_almost_equal(clf2.coef_[i].ravel(), coef2[i].ravel(), decimal=2) + assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1) + + +def test_classifier_single_class(): + """tests if ValueError is thrown with only one class""" + X = [[1, 2], [3, 4]] + y = [1, 1] + + msg = "This solver needs samples of at least 2 classes in the data" + with pytest.raises(ValueError, match=msg): + LogisticRegression(solver="sag").fit(X, y) + + +def test_step_size_alpha_error(): + X = [[0, 0], [0, 0]] + y = [1, -1] + fit_intercept = False + alpha = 1.0 + msg = re.escape( + "Current sag implementation does not handle the case" + " step_size * alpha_scaled == 1" + ) + + clf1 = LogisticRegression(solver="sag", C=1.0 / alpha, fit_intercept=fit_intercept) + with pytest.raises(ZeroDivisionError, match=msg): + clf1.fit(X, y) + + clf2 = Ridge(fit_intercept=fit_intercept, solver="sag", alpha=alpha) + with pytest.raises(ZeroDivisionError, match=msg): + clf2.fit(X, y) + + +def test_multinomial_loss(): + # test if the multinomial loss and gradient computations are consistent + X, y = iris.data, iris.target.astype(np.float64) + n_samples, n_features = X.shape + n_classes = len(np.unique(y)) + + rng = check_random_state(42) + weights = rng.randn(n_features, n_classes) + intercept = rng.randn(n_classes) + sample_weights = np.abs(rng.randn(n_samples)) + + # compute loss and gradient like in multinomial SAG + dataset, _ = make_dataset(X, y, sample_weights, random_state=42) + loss_1, grad_1 = _multinomial_grad_loss_all_samples( + dataset, weights, intercept, n_samples, n_features, n_classes + ) + # compute loss and gradient like in multinomial LogisticRegression + loss = LinearModelLoss( + base_loss=HalfMultinomialLoss(n_classes=n_classes), + fit_intercept=True, + ) + weights_intercept = np.vstack((weights, intercept)).T + loss_2, grad_2 = loss.loss_gradient( + weights_intercept, X, y, l2_reg_strength=0.0, sample_weight=sample_weights + ) + grad_2 = grad_2[:, :-1].T + # convert to same convention, i.e. LinearModelLoss uses average(loss, weight=sw) + loss_2 *= np.sum(sample_weights) + grad_2 *= np.sum(sample_weights) + + # comparison + assert_array_almost_equal(grad_1, grad_2) + assert_almost_equal(loss_1, loss_2) + + +def test_multinomial_loss_ground_truth(): + # n_samples, n_features, n_classes = 4, 2, 3 + n_classes = 3 + X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]]) + y = np.array([0, 1, 2, 0], dtype=np.float64) + lbin = LabelBinarizer() + Y_bin = lbin.fit_transform(y) + + weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]]) + intercept = np.array([1.0, 0, -0.2]) + sample_weights = np.array([0.8, 1, 1, 0.8]) + + prediction = np.dot(X, weights) + intercept + logsumexp_prediction = logsumexp(prediction, axis=1) + p = prediction - logsumexp_prediction[:, np.newaxis] + loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum() + diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin) + grad_1 = np.dot(X.T, diff) + + loss = LinearModelLoss( + base_loss=HalfMultinomialLoss(n_classes=n_classes), + fit_intercept=True, + ) + weights_intercept = np.vstack((weights, intercept)).T + loss_2, grad_2 = loss.loss_gradient( + weights_intercept, X, y, l2_reg_strength=0.0, sample_weight=sample_weights + ) + grad_2 = grad_2[:, :-1].T + # convert to same convention, i.e. LinearModelLoss uses average(loss, weight=sw) + loss_2 *= np.sum(sample_weights) + grad_2 *= np.sum(sample_weights) + + assert_almost_equal(loss_1, loss_2) + assert_array_almost_equal(grad_1, grad_2) + + # ground truth + loss_gt = 11.680360354325961 + grad_gt = np.array( + [[-0.557487, -1.619151, +2.176638], [-0.903942, +5.258745, -4.354803]] + ) + assert_almost_equal(loss_1, loss_gt) + assert_array_almost_equal(grad_1, grad_gt) + + +@pytest.mark.parametrize("solver", ["sag", "saga"]) +def test_sag_classifier_raises_error(solver): + # Following #13316, the error handling behavior changed in cython sag. This + # is simply a non-regression test to make sure numerical errors are + # properly raised. + + # Train a classifier on a simple problem + rng = np.random.RandomState(42) + X, y = make_classification(random_state=rng) + clf = LogisticRegression(solver=solver, random_state=rng, warm_start=True) + clf.fit(X, y) + + # Trigger a numerical error by: + # - corrupting the fitted coefficients of the classifier + # - fit it again starting from its current state thanks to warm_start + clf.coef_[:] = np.nan + + with pytest.raises(ValueError, match="Floating-point under-/overflow"): + clf.fit(X, y) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sgd.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..d68eaa6d9d12f315e22e7e6b04f3505dfa0f09f2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sgd.py @@ -0,0 +1,2224 @@ +import pickle +from unittest.mock import Mock + +import joblib +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn import datasets, linear_model, metrics +from sklearn.base import clone, is_classifier +from sklearn.exceptions import ConvergenceWarning +from sklearn.kernel_approximation import Nystroem +from sklearn.linear_model import _sgd_fast as sgd_fast +from sklearn.linear_model import _stochastic_gradient +from sklearn.model_selection import ( + RandomizedSearchCV, + ShuffleSplit, + StratifiedShuffleSplit, +) +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler, scale +from sklearn.svm import OneClassSVM +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) + + +def _update_kwargs(kwargs): + if "random_state" not in kwargs: + kwargs["random_state"] = 42 + + if "tol" not in kwargs: + kwargs["tol"] = None + if "max_iter" not in kwargs: + kwargs["max_iter"] = 5 + + +class _SparseSGDClassifier(linear_model.SGDClassifier): + def fit(self, X, y, *args, **kw): + X = sp.csr_matrix(X) + return super().fit(X, y, *args, **kw) + + def partial_fit(self, X, y, *args, **kw): + X = sp.csr_matrix(X) + return super().partial_fit(X, y, *args, **kw) + + def decision_function(self, X): + X = sp.csr_matrix(X) + return super().decision_function(X) + + def predict_proba(self, X): + X = sp.csr_matrix(X) + return super().predict_proba(X) + + +class _SparseSGDRegressor(linear_model.SGDRegressor): + def fit(self, X, y, *args, **kw): + X = sp.csr_matrix(X) + return linear_model.SGDRegressor.fit(self, X, y, *args, **kw) + + def partial_fit(self, X, y, *args, **kw): + X = sp.csr_matrix(X) + return linear_model.SGDRegressor.partial_fit(self, X, y, *args, **kw) + + def decision_function(self, X, *args, **kw): + # XXX untested as of v0.22 + X = sp.csr_matrix(X) + return linear_model.SGDRegressor.decision_function(self, X, *args, **kw) + + +class _SparseSGDOneClassSVM(linear_model.SGDOneClassSVM): + def fit(self, X, *args, **kw): + X = sp.csr_matrix(X) + return linear_model.SGDOneClassSVM.fit(self, X, *args, **kw) + + def partial_fit(self, X, *args, **kw): + X = sp.csr_matrix(X) + return linear_model.SGDOneClassSVM.partial_fit(self, X, *args, **kw) + + def decision_function(self, X, *args, **kw): + X = sp.csr_matrix(X) + return linear_model.SGDOneClassSVM.decision_function(self, X, *args, **kw) + + +def SGDClassifier(**kwargs): + _update_kwargs(kwargs) + return linear_model.SGDClassifier(**kwargs) + + +def SGDRegressor(**kwargs): + _update_kwargs(kwargs) + return linear_model.SGDRegressor(**kwargs) + + +def SGDOneClassSVM(**kwargs): + _update_kwargs(kwargs) + return linear_model.SGDOneClassSVM(**kwargs) + + +def SparseSGDClassifier(**kwargs): + _update_kwargs(kwargs) + return _SparseSGDClassifier(**kwargs) + + +def SparseSGDRegressor(**kwargs): + _update_kwargs(kwargs) + return _SparseSGDRegressor(**kwargs) + + +def SparseSGDOneClassSVM(**kwargs): + _update_kwargs(kwargs) + return _SparseSGDOneClassSVM(**kwargs) + + +# Test Data + +# test sample 1 +X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) +Y = [1, 1, 1, 2, 2, 2] +T = np.array([[-1, -1], [2, 2], [3, 2]]) +true_result = [1, 2, 2] + +# test sample 2; string class labels +X2 = np.array( + [ + [-1, 1], + [-0.75, 0.5], + [-1.5, 1.5], + [1, 1], + [0.75, 0.5], + [1.5, 1.5], + [-1, -1], + [0, -0.5], + [1, -1], + ] +) +Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3 +T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]]) +true_result2 = ["one", "two", "three"] + +# test sample 3 +X3 = np.array( + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + ] +) +Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2]) + +# test sample 4 - two more or less redundant feature groups +X4 = np.array( + [ + [1, 0.9, 0.8, 0, 0, 0], + [1, 0.84, 0.98, 0, 0, 0], + [1, 0.96, 0.88, 0, 0, 0], + [1, 0.91, 0.99, 0, 0, 0], + [0, 0, 0, 0.89, 0.91, 1], + [0, 0, 0, 0.79, 0.84, 1], + [0, 0, 0, 0.91, 0.95, 1], + [0, 0, 0, 0.93, 1, 1], + ] +) +Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2]) + +iris = datasets.load_iris() + +# test sample 5 - test sample 1 as binary classification problem +X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) +Y5 = [1, 1, 1, 2, 2, 2] +true_result5 = [0, 1, 1] + + +############################################################################### +# Common Test Case to classification and regression + + +# a simple implementation of ASGD to use for testing +# uses squared loss to find the gradient +def asgd(klass, X, y, eta, alpha, weight_init=None, intercept_init=0.0): + if weight_init is None: + weights = np.zeros(X.shape[1]) + else: + weights = weight_init + + average_weights = np.zeros(X.shape[1]) + intercept = intercept_init + average_intercept = 0.0 + decay = 1.0 + + # sparse data has a fixed decay of .01 + if klass in (SparseSGDClassifier, SparseSGDRegressor): + decay = 0.01 + + for i, entry in enumerate(X): + p = np.dot(entry, weights) + p += intercept + gradient = p - y[i] + weights *= 1.0 - (eta * alpha) + weights += -(eta * gradient * entry) + intercept += -(eta * gradient) * decay + + average_weights *= i + average_weights += weights + average_weights /= i + 1.0 + + average_intercept *= i + average_intercept += intercept + average_intercept /= i + 1.0 + + return average_weights, average_intercept + + +def _test_warm_start(klass, X, Y, lr): + # Test that explicit warm restart... + clf = klass(alpha=0.01, eta0=0.01, shuffle=False, learning_rate=lr) + clf.fit(X, Y) + + clf2 = klass(alpha=0.001, eta0=0.01, shuffle=False, learning_rate=lr) + clf2.fit(X, Y, coef_init=clf.coef_.copy(), intercept_init=clf.intercept_.copy()) + + # ... and implicit warm restart are equivalent. + clf3 = klass( + alpha=0.01, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr + ) + clf3.fit(X, Y) + + assert clf3.t_ == clf.t_ + assert_array_almost_equal(clf3.coef_, clf.coef_) + + clf3.set_params(alpha=0.001) + clf3.fit(X, Y) + + assert clf3.t_ == clf2.t_ + assert_array_almost_equal(clf3.coef_, clf2.coef_) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"]) +def test_warm_start(klass, lr): + _test_warm_start(klass, X, Y, lr) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +def test_input_format(klass): + # Input format tests. + clf = klass(alpha=0.01, shuffle=False) + clf.fit(X, Y) + Y_ = np.array(Y)[:, np.newaxis] + + Y_ = np.c_[Y_, Y_] + with pytest.raises(ValueError): + clf.fit(X, Y_) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +def test_clone(klass): + # Test whether clone works ok. + clf = klass(alpha=0.01, penalty="l1") + clf = clone(clf) + clf.set_params(penalty="l2") + clf.fit(X, Y) + + clf2 = klass(alpha=0.01, penalty="l2") + clf2.fit(X, Y) + + assert_array_equal(clf.coef_, clf2.coef_) + + +@pytest.mark.parametrize( + "klass", + [ + SGDClassifier, + SparseSGDClassifier, + SGDRegressor, + SparseSGDRegressor, + SGDOneClassSVM, + SparseSGDOneClassSVM, + ], +) +def test_plain_has_no_average_attr(klass): + clf = klass(average=True, eta0=0.01) + clf.fit(X, Y) + + assert hasattr(clf, "_average_coef") + assert hasattr(clf, "_average_intercept") + assert hasattr(clf, "_standard_intercept") + assert hasattr(clf, "_standard_coef") + + clf = klass() + clf.fit(X, Y) + + assert not hasattr(clf, "_average_coef") + assert not hasattr(clf, "_average_intercept") + assert not hasattr(clf, "_standard_intercept") + assert not hasattr(clf, "_standard_coef") + + +@pytest.mark.parametrize( + "klass", + [ + SGDClassifier, + SparseSGDClassifier, + SGDRegressor, + SparseSGDRegressor, + SGDOneClassSVM, + SparseSGDOneClassSVM, + ], +) +def test_late_onset_averaging_not_reached(klass): + clf1 = klass(average=600) + clf2 = klass() + for _ in range(100): + if is_classifier(clf1): + clf1.partial_fit(X, Y, classes=np.unique(Y)) + clf2.partial_fit(X, Y, classes=np.unique(Y)) + else: + clf1.partial_fit(X, Y) + clf2.partial_fit(X, Y) + + assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16) + if klass in [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]: + assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16) + elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]: + assert_allclose(clf1.offset_, clf2.offset_) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +def test_late_onset_averaging_reached(klass): + eta0 = 0.001 + alpha = 0.0001 + Y_encode = np.array(Y) + Y_encode[Y_encode == 1] = -1.0 + Y_encode[Y_encode == 2] = 1.0 + + clf1 = klass( + average=7, + learning_rate="constant", + loss="squared_error", + eta0=eta0, + alpha=alpha, + max_iter=2, + shuffle=False, + ) + clf2 = klass( + average=0, + learning_rate="constant", + loss="squared_error", + eta0=eta0, + alpha=alpha, + max_iter=1, + shuffle=False, + ) + + clf1.fit(X, Y_encode) + clf2.fit(X, Y_encode) + + average_weights, average_intercept = asgd( + klass, + X, + Y_encode, + eta0, + alpha, + weight_init=clf2.coef_.ravel(), + intercept_init=clf2.intercept_, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), average_weights.ravel(), decimal=16) + assert_almost_equal(clf1.intercept_, average_intercept, decimal=16) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +def test_early_stopping(klass): + X = iris.data[iris.target > 0] + Y = iris.target[iris.target > 0] + for early_stopping in [True, False]: + max_iter = 1000 + clf = klass(early_stopping=early_stopping, tol=1e-3, max_iter=max_iter).fit( + X, Y + ) + assert clf.n_iter_ < max_iter + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +def test_adaptive_longer_than_constant(klass): + clf1 = klass(learning_rate="adaptive", eta0=0.01, tol=1e-3, max_iter=100) + clf1.fit(iris.data, iris.target) + clf2 = klass(learning_rate="constant", eta0=0.01, tol=1e-3, max_iter=100) + clf2.fit(iris.data, iris.target) + assert clf1.n_iter_ > clf2.n_iter_ + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +def test_validation_set_not_used_for_training(klass): + X, Y = iris.data, iris.target + validation_fraction = 0.4 + seed = 42 + shuffle = False + max_iter = 10 + clf1 = klass( + early_stopping=True, + random_state=np.random.RandomState(seed), + validation_fraction=validation_fraction, + learning_rate="constant", + eta0=0.01, + tol=None, + max_iter=max_iter, + shuffle=shuffle, + ) + clf1.fit(X, Y) + assert clf1.n_iter_ == max_iter + + clf2 = klass( + early_stopping=False, + random_state=np.random.RandomState(seed), + learning_rate="constant", + eta0=0.01, + tol=None, + max_iter=max_iter, + shuffle=shuffle, + ) + + if is_classifier(clf2): + cv = StratifiedShuffleSplit(test_size=validation_fraction, random_state=seed) + else: + cv = ShuffleSplit(test_size=validation_fraction, random_state=seed) + idx_train, idx_val = next(cv.split(X, Y)) + idx_train = np.sort(idx_train) # remove shuffling + clf2.fit(X[idx_train], Y[idx_train]) + assert clf2.n_iter_ == max_iter + + assert_array_equal(clf1.coef_, clf2.coef_) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +def test_n_iter_no_change(klass): + X, Y = iris.data, iris.target + # test that n_iter_ increases monotonically with n_iter_no_change + for early_stopping in [True, False]: + n_iter_list = [ + klass( + early_stopping=early_stopping, + n_iter_no_change=n_iter_no_change, + tol=1e-4, + max_iter=1000, + ) + .fit(X, Y) + .n_iter_ + for n_iter_no_change in [2, 3, 10] + ] + assert_array_equal(n_iter_list, sorted(n_iter_list)) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +def test_not_enough_sample_for_early_stopping(klass): + # test an error is raised if the training or validation set is empty + clf = klass(early_stopping=True, validation_fraction=0.99) + with pytest.raises(ValueError): + clf.fit(X3, Y3) + + +############################################################################### +# Classification Test Case + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sgd_clf(klass): + # Check that SGD gives any results :-) + + for loss in ("hinge", "squared_hinge", "log_loss", "modified_huber"): + clf = klass( + penalty="l2", + alpha=0.01, + fit_intercept=True, + loss=loss, + max_iter=10, + shuffle=True, + ) + clf.fit(X, Y) + # assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7) + assert_array_equal(clf.predict(T), true_result) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM] +) +def test_provide_coef(klass): + """Check that the shape of `coef_init` is validated.""" + with pytest.raises(ValueError, match="Provided coef_init does not match dataset"): + klass().fit(X, Y, coef_init=np.zeros((3,))) + + +@pytest.mark.parametrize( + "klass, fit_params", + [ + (SGDClassifier, {"intercept_init": np.zeros((3,))}), + (SparseSGDClassifier, {"intercept_init": np.zeros((3,))}), + (SGDOneClassSVM, {"offset_init": np.zeros((3,))}), + (SparseSGDOneClassSVM, {"offset_init": np.zeros((3,))}), + ], +) +def test_set_intercept_offset(klass, fit_params): + """Check that `intercept_init` or `offset_init` is validated.""" + sgd_estimator = klass() + with pytest.raises(ValueError, match="does not match dataset"): + sgd_estimator.fit(X, Y, **fit_params) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor] +) +def test_sgd_early_stopping_with_partial_fit(klass): + """Check that we raise an error for `early_stopping` used with + `partial_fit`. + """ + err_msg = "early_stopping should be False with partial_fit" + with pytest.raises(ValueError, match=err_msg): + klass(early_stopping=True).partial_fit(X, Y) + + +@pytest.mark.parametrize( + "klass, fit_params", + [ + (SGDClassifier, {"intercept_init": 0}), + (SparseSGDClassifier, {"intercept_init": 0}), + (SGDOneClassSVM, {"offset_init": 0}), + (SparseSGDOneClassSVM, {"offset_init": 0}), + ], +) +def test_set_intercept_offset_binary(klass, fit_params): + """Check that we can pass a scaler with binary classification to + `intercept_init` or `offset_init`.""" + klass().fit(X5, Y5, **fit_params) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_average_binary_computed_correctly(klass): + # Checks the SGDClassifier correctly computes the average weights + eta = 0.1 + alpha = 2.0 + n_samples = 20 + n_features = 10 + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + w = rng.normal(size=n_features) + + clf = klass( + loss="squared_error", + learning_rate="constant", + eta0=eta, + alpha=alpha, + fit_intercept=True, + max_iter=1, + average=True, + shuffle=False, + ) + + # simple linear function without noise + y = np.dot(X, w) + y = np.sign(y) + + clf.fit(X, y) + + average_weights, average_intercept = asgd(klass, X, y, eta, alpha) + average_weights = average_weights.reshape(1, -1) + assert_array_almost_equal(clf.coef_, average_weights, decimal=14) + assert_almost_equal(clf.intercept_, average_intercept, decimal=14) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_set_intercept_to_intercept(klass): + # Checks intercept_ shape consistency for the warm starts + # Inconsistent intercept_ shape. + clf = klass().fit(X5, Y5) + klass().fit(X5, Y5, intercept_init=clf.intercept_) + clf = klass().fit(X, Y) + klass().fit(X, Y, intercept_init=clf.intercept_) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sgd_at_least_two_labels(klass): + # Target must have at least two labels + clf = klass(alpha=0.01, max_iter=20) + with pytest.raises(ValueError): + clf.fit(X2, np.ones(9)) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_partial_fit_weight_class_balanced(klass): + # partial_fit with class_weight='balanced' not supported""" + regex = ( + r"class_weight 'balanced' is not supported for " + r"partial_fit\. In order to use 'balanced' weights, " + r"use compute_class_weight\('balanced', classes=classes, y=y\). " + r"In place of y you can use a large enough sample " + r"of the full training set target to properly " + r"estimate the class frequency distributions\. " + r"Pass the resulting weights as the class_weight " + r"parameter\." + ) + with pytest.raises(ValueError, match=regex): + klass(class_weight="balanced").partial_fit(X, Y, classes=np.unique(Y)) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sgd_multiclass(klass): + # Multi-class test case + clf = klass(alpha=0.01, max_iter=20).fit(X2, Y2) + assert clf.coef_.shape == (3, 2) + assert clf.intercept_.shape == (3,) + assert clf.decision_function([[0, 0]]).shape == (1, 3) + pred = clf.predict(T2) + assert_array_equal(pred, true_result2) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sgd_multiclass_average(klass): + eta = 0.001 + alpha = 0.01 + # Multi-class average test case + clf = klass( + loss="squared_error", + learning_rate="constant", + eta0=eta, + alpha=alpha, + fit_intercept=True, + max_iter=1, + average=True, + shuffle=False, + ) + + np_Y2 = np.array(Y2) + clf.fit(X2, np_Y2) + classes = np.unique(np_Y2) + + for i, cl in enumerate(classes): + y_i = np.ones(np_Y2.shape[0]) + y_i[np_Y2 != cl] = -1 + average_coef, average_intercept = asgd(klass, X2, y_i, eta, alpha) + assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16) + assert_almost_equal(average_intercept, clf.intercept_[i], decimal=16) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sgd_multiclass_with_init_coef(klass): + # Multi-class test case + clf = klass(alpha=0.01, max_iter=20) + clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3)) + assert clf.coef_.shape == (3, 2) + assert clf.intercept_.shape, (3,) + pred = clf.predict(T2) + assert_array_equal(pred, true_result2) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sgd_multiclass_njobs(klass): + # Multi-class test case with multi-core support + clf = klass(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2) + assert clf.coef_.shape == (3, 2) + assert clf.intercept_.shape == (3,) + assert clf.decision_function([[0, 0]]).shape == (1, 3) + pred = clf.predict(T2) + assert_array_equal(pred, true_result2) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_set_coef_multiclass(klass): + # Checks coef_init and intercept_init shape for multi-class + # problems + # Provided coef_ does not match dataset + clf = klass() + with pytest.raises(ValueError): + clf.fit(X2, Y2, coef_init=np.zeros((2, 2))) + + # Provided coef_ does match dataset + clf = klass().fit(X2, Y2, coef_init=np.zeros((3, 2))) + + # Provided intercept_ does not match dataset + clf = klass() + with pytest.raises(ValueError): + clf.fit(X2, Y2, intercept_init=np.zeros((1,))) + + # Provided intercept_ does match dataset. + clf = klass().fit(X2, Y2, intercept_init=np.zeros((3,))) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sgd_predict_proba_method_access(klass): + # Checks that SGDClassifier predict_proba and predict_log_proba methods + # can either be accessed or raise an appropriate error message + # otherwise. See + # https://github.com/scikit-learn/scikit-learn/issues/10938 for more + # details. + for loss in linear_model.SGDClassifier.loss_functions: + clf = SGDClassifier(loss=loss) + if loss in ("log_loss", "modified_huber"): + assert hasattr(clf, "predict_proba") + assert hasattr(clf, "predict_log_proba") + else: + inner_msg = "probability estimates are not available for loss={!r}".format( + loss + ) + assert not hasattr(clf, "predict_proba") + assert not hasattr(clf, "predict_log_proba") + with pytest.raises( + AttributeError, match="has no attribute 'predict_proba'" + ) as exec_info: + clf.predict_proba + + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + with pytest.raises( + AttributeError, match="has no attribute 'predict_log_proba'" + ) as exec_info: + clf.predict_log_proba + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sgd_proba(klass): + # Check SGD.predict_proba + + # Hinge loss does not allow for conditional prob estimate. + # We cannot use the factory here, because it defines predict_proba + # anyway. + clf = SGDClassifier(loss="hinge", alpha=0.01, max_iter=10, tol=None).fit(X, Y) + assert not hasattr(clf, "predict_proba") + assert not hasattr(clf, "predict_log_proba") + + # log and modified_huber losses can output probability estimates + # binary case + for loss in ["log_loss", "modified_huber"]: + clf = klass(loss=loss, alpha=0.01, max_iter=10) + clf.fit(X, Y) + p = clf.predict_proba([[3, 2]]) + assert p[0, 1] > 0.5 + p = clf.predict_proba([[-1, -1]]) + assert p[0, 1] < 0.5 + + # If predict_proba is 0, we get "RuntimeWarning: divide by zero encountered + # in log". We avoid it here. + with np.errstate(divide="ignore"): + p = clf.predict_log_proba([[3, 2]]) + assert p[0, 1] > p[0, 0] + p = clf.predict_log_proba([[-1, -1]]) + assert p[0, 1] < p[0, 0] + + # log loss multiclass probability estimates + clf = klass(loss="log_loss", alpha=0.01, max_iter=10).fit(X2, Y2) + + d = clf.decision_function([[0.1, -0.1], [0.3, 0.2]]) + p = clf.predict_proba([[0.1, -0.1], [0.3, 0.2]]) + assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1)) + assert_almost_equal(p[0].sum(), 1) + assert np.all(p[0] >= 0) + + p = clf.predict_proba([[-1, -1]]) + d = clf.decision_function([[-1, -1]]) + assert_array_equal(np.argsort(p[0]), np.argsort(d[0])) + + lp = clf.predict_log_proba([[3, 2]]) + p = clf.predict_proba([[3, 2]]) + assert_array_almost_equal(np.log(p), lp) + + lp = clf.predict_log_proba([[-1, -1]]) + p = clf.predict_proba([[-1, -1]]) + assert_array_almost_equal(np.log(p), lp) + + # Modified Huber multiclass probability estimates; requires a separate + # test because the hard zero/one probabilities may destroy the + # ordering present in decision_function output. + clf = klass(loss="modified_huber", alpha=0.01, max_iter=10) + clf.fit(X2, Y2) + d = clf.decision_function([[3, 2]]) + p = clf.predict_proba([[3, 2]]) + if klass != SparseSGDClassifier: + assert np.argmax(d, axis=1) == np.argmax(p, axis=1) + else: # XXX the sparse test gets a different X2 (?) + assert np.argmin(d, axis=1) == np.argmin(p, axis=1) + + # the following sample produces decision_function values < -1, + # which would cause naive normalization to fail (see comment + # in SGDClassifier.predict_proba) + x = X.mean(axis=0) + d = clf.decision_function([x]) + if np.all(d < -1): # XXX not true in sparse test case (why?) + p = clf.predict_proba([x]) + assert_array_almost_equal(p[0], [1 / 3.0] * 3) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sgd_l1(klass): + # Test L1 regularization + n = len(X4) + rng = np.random.RandomState(13) + idx = np.arange(n) + rng.shuffle(idx) + + X = X4[idx, :] + Y = Y4[idx] + + clf = klass( + penalty="l1", + alpha=0.2, + fit_intercept=False, + max_iter=2000, + tol=None, + shuffle=False, + ) + clf.fit(X, Y) + assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,))) + pred = clf.predict(X) + assert_array_equal(pred, Y) + + # test sparsify with dense inputs + clf.sparsify() + assert sp.issparse(clf.coef_) + pred = clf.predict(X) + assert_array_equal(pred, Y) + + # pickle and unpickle with sparse coef_ + clf = pickle.loads(pickle.dumps(clf)) + assert sp.issparse(clf.coef_) + pred = clf.predict(X) + assert_array_equal(pred, Y) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_class_weights(klass): + # Test class weights. + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = [1, 1, 1, -1, -1] + + clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight=None) + clf.fit(X, y) + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) + + # we give a small weights to class 1 + clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight={1: 0.001}) + clf.fit(X, y) + + # now the hyperplane should rotate clock-wise and + # the prediction on this point should shift + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_equal_class_weight(klass): + # Test if equal class weights approx. equals no class weights. + X = [[1, 0], [1, 0], [0, 1], [0, 1]] + y = [0, 0, 1, 1] + clf = klass(alpha=0.1, max_iter=1000, class_weight=None) + clf.fit(X, y) + + X = [[1, 0], [0, 1]] + y = [0, 1] + clf_weighted = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5, 1: 0.5}) + clf_weighted.fit(X, y) + + # should be similar up to some epsilon due to learning rate schedule + assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_wrong_class_weight_label(klass): + # ValueError due to not existing class label. + clf = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5}) + with pytest.raises(ValueError): + clf.fit(X, Y) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_weights_multiplied(klass): + # Tests that class_weight and sample_weight are multiplicative + class_weights = {1: 0.6, 2: 0.3} + rng = np.random.RandomState(0) + sample_weights = rng.random_sample(Y4.shape[0]) + multiplied_together = np.copy(sample_weights) + multiplied_together[Y4 == 1] *= class_weights[1] + multiplied_together[Y4 == 2] *= class_weights[2] + + clf1 = klass(alpha=0.1, max_iter=20, class_weight=class_weights) + clf2 = klass(alpha=0.1, max_iter=20) + + clf1.fit(X4, Y4, sample_weight=sample_weights) + clf2.fit(X4, Y4, sample_weight=multiplied_together) + + assert_almost_equal(clf1.coef_, clf2.coef_) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_balanced_weight(klass): + # Test class weights for imbalanced data""" + # compute reference metrics on iris dataset that is quite balanced by + # default + X, y = iris.data, iris.target + X = scale(X) + idx = np.arange(X.shape[0]) + rng = np.random.RandomState(6) + rng.shuffle(idx) + X = X[idx] + y = y[idx] + clf = klass(alpha=0.0001, max_iter=1000, class_weight=None, shuffle=False).fit(X, y) + f1 = metrics.f1_score(y, clf.predict(X), average="weighted") + assert_almost_equal(f1, 0.96, decimal=1) + + # make the same prediction using balanced class_weight + clf_balanced = klass( + alpha=0.0001, max_iter=1000, class_weight="balanced", shuffle=False + ).fit(X, y) + f1 = metrics.f1_score(y, clf_balanced.predict(X), average="weighted") + assert_almost_equal(f1, 0.96, decimal=1) + + # Make sure that in the balanced case it does not change anything + # to use "balanced" + assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6) + + # build an very very imbalanced dataset out of iris data + X_0 = X[y == 0, :] + y_0 = y[y == 0] + + X_imbalanced = np.vstack([X] + [X_0] * 10) + y_imbalanced = np.concatenate([y] + [y_0] * 10) + + # fit a model on the imbalanced data without class weight info + clf = klass(max_iter=1000, class_weight=None, shuffle=False) + clf.fit(X_imbalanced, y_imbalanced) + y_pred = clf.predict(X) + assert metrics.f1_score(y, y_pred, average="weighted") < 0.96 + + # fit a model with balanced class_weight enabled + clf = klass(max_iter=1000, class_weight="balanced", shuffle=False) + clf.fit(X_imbalanced, y_imbalanced) + y_pred = clf.predict(X) + assert metrics.f1_score(y, y_pred, average="weighted") > 0.96 + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_sample_weights(klass): + # Test weights on individual samples + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = [1, 1, 1, -1, -1] + + clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False) + clf.fit(X, y) + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) + + # we give a small weights to class 1 + clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2) + + # now the hyperplane should rotate clock-wise and + # the prediction on this point should shift + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) + + +@pytest.mark.parametrize( + "klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM] +) +def test_wrong_sample_weights(klass): + # Test if ValueError is raised if sample_weight has wrong shape + if klass in [SGDClassifier, SparseSGDClassifier]: + clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False) + elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]: + clf = klass(nu=0.1, max_iter=1000, fit_intercept=False) + # provided sample_weight too long + with pytest.raises(ValueError): + clf.fit(X, Y, sample_weight=np.arange(7)) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_partial_fit_exception(klass): + clf = klass(alpha=0.01) + # classes was not specified + with pytest.raises(ValueError): + clf.partial_fit(X3, Y3) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_partial_fit_binary(klass): + third = X.shape[0] // 3 + clf = klass(alpha=0.01) + classes = np.unique(Y) + + clf.partial_fit(X[:third], Y[:third], classes=classes) + assert clf.coef_.shape == (1, X.shape[1]) + assert clf.intercept_.shape == (1,) + assert clf.decision_function([[0, 0]]).shape == (1,) + id1 = id(clf.coef_.data) + + clf.partial_fit(X[third:], Y[third:]) + id2 = id(clf.coef_.data) + # check that coef_ haven't been re-allocated + assert id1, id2 + + y_pred = clf.predict(T) + assert_array_equal(y_pred, true_result) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_partial_fit_multiclass(klass): + third = X2.shape[0] // 3 + clf = klass(alpha=0.01) + classes = np.unique(Y2) + + clf.partial_fit(X2[:third], Y2[:third], classes=classes) + assert clf.coef_.shape == (3, X2.shape[1]) + assert clf.intercept_.shape == (3,) + assert clf.decision_function([[0, 0]]).shape == (1, 3) + id1 = id(clf.coef_.data) + + clf.partial_fit(X2[third:], Y2[third:]) + id2 = id(clf.coef_.data) + # check that coef_ haven't been re-allocated + assert id1, id2 + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_partial_fit_multiclass_average(klass): + third = X2.shape[0] // 3 + clf = klass(alpha=0.01, average=X2.shape[0]) + classes = np.unique(Y2) + + clf.partial_fit(X2[:third], Y2[:third], classes=classes) + assert clf.coef_.shape == (3, X2.shape[1]) + assert clf.intercept_.shape == (3,) + + clf.partial_fit(X2[third:], Y2[third:]) + assert clf.coef_.shape == (3, X2.shape[1]) + assert clf.intercept_.shape == (3,) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_fit_then_partial_fit(klass): + # Partial_fit should work after initial fit in the multiclass case. + # Non-regression test for #2496; fit would previously produce a + # Fortran-ordered coef_ that subsequent partial_fit couldn't handle. + clf = klass() + clf.fit(X2, Y2) + clf.partial_fit(X2, Y2) # no exception here + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"]) +def test_partial_fit_equal_fit_classif(klass, lr): + for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)): + clf = klass(alpha=0.01, eta0=0.01, max_iter=2, learning_rate=lr, shuffle=False) + clf.fit(X_, Y_) + y_pred = clf.decision_function(T_) + t = clf.t_ + + classes = np.unique(Y_) + clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False) + for i in range(2): + clf.partial_fit(X_, Y_, classes=classes) + y_pred2 = clf.decision_function(T_) + + assert clf.t_ == t + assert_array_almost_equal(y_pred, y_pred2, decimal=2) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_regression_losses(klass): + random_state = np.random.RandomState(1) + clf = klass( + alpha=0.01, + learning_rate="constant", + eta0=0.1, + loss="epsilon_insensitive", + random_state=random_state, + ) + clf.fit(X, Y) + assert 1.0 == np.mean(clf.predict(X) == Y) + + clf = klass( + alpha=0.01, + learning_rate="constant", + eta0=0.1, + loss="squared_epsilon_insensitive", + random_state=random_state, + ) + clf.fit(X, Y) + assert 1.0 == np.mean(clf.predict(X) == Y) + + clf = klass(alpha=0.01, loss="huber", random_state=random_state) + clf.fit(X, Y) + assert 1.0 == np.mean(clf.predict(X) == Y) + + clf = klass( + alpha=0.01, + learning_rate="constant", + eta0=0.01, + loss="squared_error", + random_state=random_state, + ) + clf.fit(X, Y) + assert 1.0 == np.mean(clf.predict(X) == Y) + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_warm_start_multiclass(klass): + _test_warm_start(klass, X2, Y2, "optimal") + + +@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier]) +def test_multiple_fit(klass): + # Test multiple calls of fit w/ different shaped inputs. + clf = klass(alpha=0.01, shuffle=False) + clf.fit(X, Y) + assert hasattr(clf, "coef_") + + # Non-regression test: try fitting with a different label set. + y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)] + clf.fit(X[:, :-1], y) + + +############################################################################### +# Regression Test Case + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_sgd_reg(klass): + # Check that SGD gives any results. + clf = klass(alpha=0.1, max_iter=2, fit_intercept=False) + clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2]) + assert clf.coef_[0] == clf.coef_[1] + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_sgd_averaged_computed_correctly(klass): + # Tests the average regressor matches the naive implementation + + eta = 0.001 + alpha = 0.01 + n_samples = 20 + n_features = 10 + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + w = rng.normal(size=n_features) + + # simple linear function without noise + y = np.dot(X, w) + + clf = klass( + loss="squared_error", + learning_rate="constant", + eta0=eta, + alpha=alpha, + fit_intercept=True, + max_iter=1, + average=True, + shuffle=False, + ) + + clf.fit(X, y) + average_weights, average_intercept = asgd(klass, X, y, eta, alpha) + + assert_array_almost_equal(clf.coef_, average_weights, decimal=16) + assert_almost_equal(clf.intercept_, average_intercept, decimal=16) + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_sgd_averaged_partial_fit(klass): + # Tests whether the partial fit yields the same average as the fit + eta = 0.001 + alpha = 0.01 + n_samples = 20 + n_features = 10 + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + w = rng.normal(size=n_features) + + # simple linear function without noise + y = np.dot(X, w) + + clf = klass( + loss="squared_error", + learning_rate="constant", + eta0=eta, + alpha=alpha, + fit_intercept=True, + max_iter=1, + average=True, + shuffle=False, + ) + + clf.partial_fit(X[: int(n_samples / 2)][:], y[: int(n_samples / 2)]) + clf.partial_fit(X[int(n_samples / 2) :][:], y[int(n_samples / 2) :]) + average_weights, average_intercept = asgd(klass, X, y, eta, alpha) + + assert_array_almost_equal(clf.coef_, average_weights, decimal=16) + assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16) + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_average_sparse(klass): + # Checks the average weights on data with 0s + + eta = 0.001 + alpha = 0.01 + clf = klass( + loss="squared_error", + learning_rate="constant", + eta0=eta, + alpha=alpha, + fit_intercept=True, + max_iter=1, + average=True, + shuffle=False, + ) + + n_samples = Y3.shape[0] + + clf.partial_fit(X3[: int(n_samples / 2)][:], Y3[: int(n_samples / 2)]) + clf.partial_fit(X3[int(n_samples / 2) :][:], Y3[int(n_samples / 2) :]) + average_weights, average_intercept = asgd(klass, X3, Y3, eta, alpha) + + assert_array_almost_equal(clf.coef_, average_weights, decimal=16) + assert_almost_equal(clf.intercept_, average_intercept, decimal=16) + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_sgd_least_squares_fit(klass): + xmin, xmax = -5, 5 + n_samples = 100 + rng = np.random.RandomState(0) + X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) + + # simple linear function without noise + y = 0.5 * X.ravel() + + clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False) + clf.fit(X, y) + score = clf.score(X, y) + assert score > 0.99 + + # simple linear function with noise + y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() + + clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False) + clf.fit(X, y) + score = clf.score(X, y) + assert score > 0.5 + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_sgd_epsilon_insensitive(klass): + xmin, xmax = -5, 5 + n_samples = 100 + rng = np.random.RandomState(0) + X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) + + # simple linear function without noise + y = 0.5 * X.ravel() + + clf = klass( + loss="epsilon_insensitive", + epsilon=0.01, + alpha=0.1, + max_iter=20, + fit_intercept=False, + ) + clf.fit(X, y) + score = clf.score(X, y) + assert score > 0.99 + + # simple linear function with noise + y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() + + clf = klass( + loss="epsilon_insensitive", + epsilon=0.01, + alpha=0.1, + max_iter=20, + fit_intercept=False, + ) + clf.fit(X, y) + score = clf.score(X, y) + assert score > 0.5 + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_sgd_huber_fit(klass): + xmin, xmax = -5, 5 + n_samples = 100 + rng = np.random.RandomState(0) + X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) + + # simple linear function without noise + y = 0.5 * X.ravel() + + clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False) + clf.fit(X, y) + score = clf.score(X, y) + assert score > 0.99 + + # simple linear function with noise + y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() + + clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False) + clf.fit(X, y) + score = clf.score(X, y) + assert score > 0.5 + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_elasticnet_convergence(klass): + # Check that the SGD output is consistent with coordinate descent + + n_samples, n_features = 1000, 5 + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + # ground_truth linear model that generate y from X and to which the + # models should converge if the regularizer would be set to 0.0 + ground_truth_coef = rng.randn(n_features) + y = np.dot(X, ground_truth_coef) + + # XXX: alpha = 0.1 seems to cause convergence problems + for alpha in [0.01, 0.001]: + for l1_ratio in [0.5, 0.8, 1.0]: + cd = linear_model.ElasticNet( + alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False + ) + cd.fit(X, y) + sgd = klass( + penalty="elasticnet", + max_iter=50, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=False, + ) + sgd.fit(X, y) + err_msg = ( + "cd and sgd did not converge to comparable " + "results for alpha=%f and l1_ratio=%f" % (alpha, l1_ratio) + ) + assert_almost_equal(cd.coef_, sgd.coef_, decimal=2, err_msg=err_msg) + + +@ignore_warnings +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_partial_fit(klass): + third = X.shape[0] // 3 + clf = klass(alpha=0.01) + + clf.partial_fit(X[:third], Y[:third]) + assert clf.coef_.shape == (X.shape[1],) + assert clf.intercept_.shape == (1,) + assert clf.predict([[0, 0]]).shape == (1,) + id1 = id(clf.coef_.data) + + clf.partial_fit(X[third:], Y[third:]) + id2 = id(clf.coef_.data) + # check that coef_ haven't been re-allocated + assert id1, id2 + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"]) +def test_partial_fit_equal_fit(klass, lr): + clf = klass(alpha=0.01, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False) + clf.fit(X, Y) + y_pred = clf.predict(T) + t = clf.t_ + + clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False) + for i in range(2): + clf.partial_fit(X, Y) + y_pred2 = clf.predict(T) + + assert clf.t_ == t + assert_array_almost_equal(y_pred, y_pred2, decimal=2) + + +@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor]) +def test_loss_function_epsilon(klass): + clf = klass(epsilon=0.9) + clf.set_params(epsilon=0.1) + assert clf.loss_functions["huber"][1] == 0.1 + + +############################################################################### +# SGD One Class SVM Test Case + + +# a simple implementation of ASGD to use for testing SGDOneClassSVM +def asgd_oneclass(klass, X, eta, nu, coef_init=None, offset_init=0.0): + if coef_init is None: + coef = np.zeros(X.shape[1]) + else: + coef = coef_init + + average_coef = np.zeros(X.shape[1]) + offset = offset_init + intercept = 1 - offset + average_intercept = 0.0 + decay = 1.0 + + # sparse data has a fixed decay of .01 + if klass == SparseSGDOneClassSVM: + decay = 0.01 + + for i, entry in enumerate(X): + p = np.dot(entry, coef) + p += intercept + if p <= 1.0: + gradient = -1 + else: + gradient = 0 + coef *= max(0, 1.0 - (eta * nu / 2)) + coef += -(eta * gradient * entry) + intercept += -(eta * (nu + gradient)) * decay + + average_coef *= i + average_coef += coef + average_coef /= i + 1.0 + + average_intercept *= i + average_intercept += intercept + average_intercept /= i + 1.0 + + return average_coef, 1 - average_intercept + + +@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM]) +def _test_warm_start_oneclass(klass, X, lr): + # Test that explicit warm restart... + clf = klass(nu=0.5, eta0=0.01, shuffle=False, learning_rate=lr) + clf.fit(X) + + clf2 = klass(nu=0.1, eta0=0.01, shuffle=False, learning_rate=lr) + clf2.fit(X, coef_init=clf.coef_.copy(), offset_init=clf.offset_.copy()) + + # ... and implicit warm restart are equivalent. + clf3 = klass(nu=0.5, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr) + clf3.fit(X) + + assert clf3.t_ == clf.t_ + assert_allclose(clf3.coef_, clf.coef_) + + clf3.set_params(nu=0.1) + clf3.fit(X) + + assert clf3.t_ == clf2.t_ + assert_allclose(clf3.coef_, clf2.coef_) + + +@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM]) +@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"]) +def test_warm_start_oneclass(klass, lr): + _test_warm_start_oneclass(klass, X, lr) + + +@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM]) +def test_clone_oneclass(klass): + # Test whether clone works ok. + clf = klass(nu=0.5) + clf = clone(clf) + clf.set_params(nu=0.1) + clf.fit(X) + + clf2 = klass(nu=0.1) + clf2.fit(X) + + assert_array_equal(clf.coef_, clf2.coef_) + + +@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM]) +def test_partial_fit_oneclass(klass): + third = X.shape[0] // 3 + clf = klass(nu=0.1) + + clf.partial_fit(X[:third]) + assert clf.coef_.shape == (X.shape[1],) + assert clf.offset_.shape == (1,) + assert clf.predict([[0, 0]]).shape == (1,) + previous_coefs = clf.coef_ + + clf.partial_fit(X[third:]) + # check that coef_ haven't been re-allocated + assert clf.coef_ is previous_coefs + + # raises ValueError if number of features does not match previous data + with pytest.raises(ValueError): + clf.partial_fit(X[:, 1]) + + +@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM]) +@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"]) +def test_partial_fit_equal_fit_oneclass(klass, lr): + clf = klass(nu=0.05, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False) + clf.fit(X) + y_scores = clf.decision_function(T) + t = clf.t_ + coef = clf.coef_ + offset = clf.offset_ + + clf = klass(nu=0.05, eta0=0.01, max_iter=1, learning_rate=lr, shuffle=False) + for _ in range(2): + clf.partial_fit(X) + y_scores2 = clf.decision_function(T) + + assert clf.t_ == t + assert_allclose(y_scores, y_scores2) + assert_allclose(clf.coef_, coef) + assert_allclose(clf.offset_, offset) + + +@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM]) +def test_late_onset_averaging_reached_oneclass(klass): + # Test average + eta0 = 0.001 + nu = 0.05 + + # 2 passes over the training set but average only at second pass + clf1 = klass( + average=7, learning_rate="constant", eta0=eta0, nu=nu, max_iter=2, shuffle=False + ) + # 1 pass over the training set with no averaging + clf2 = klass( + average=0, learning_rate="constant", eta0=eta0, nu=nu, max_iter=1, shuffle=False + ) + + clf1.fit(X) + clf2.fit(X) + + # Start from clf2 solution, compute averaging using asgd function and + # compare with clf1 solution + average_coef, average_offset = asgd_oneclass( + klass, X, eta0, nu, coef_init=clf2.coef_.ravel(), offset_init=clf2.offset_ + ) + + assert_allclose(clf1.coef_.ravel(), average_coef.ravel()) + assert_allclose(clf1.offset_, average_offset) + + +@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM]) +def test_sgd_averaged_computed_correctly_oneclass(klass): + # Tests the average SGD One-Class SVM matches the naive implementation + eta = 0.001 + nu = 0.05 + n_samples = 20 + n_features = 10 + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + + clf = klass( + learning_rate="constant", + eta0=eta, + nu=nu, + fit_intercept=True, + max_iter=1, + average=True, + shuffle=False, + ) + + clf.fit(X) + average_coef, average_offset = asgd_oneclass(klass, X, eta, nu) + + assert_allclose(clf.coef_, average_coef) + assert_allclose(clf.offset_, average_offset) + + +@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM]) +def test_sgd_averaged_partial_fit_oneclass(klass): + # Tests whether the partial fit yields the same average as the fit + eta = 0.001 + nu = 0.05 + n_samples = 20 + n_features = 10 + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + + clf = klass( + learning_rate="constant", + eta0=eta, + nu=nu, + fit_intercept=True, + max_iter=1, + average=True, + shuffle=False, + ) + + clf.partial_fit(X[: int(n_samples / 2)][:]) + clf.partial_fit(X[int(n_samples / 2) :][:]) + average_coef, average_offset = asgd_oneclass(klass, X, eta, nu) + + assert_allclose(clf.coef_, average_coef) + assert_allclose(clf.offset_, average_offset) + + +@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM]) +def test_average_sparse_oneclass(klass): + # Checks the average coef on data with 0s + eta = 0.001 + nu = 0.01 + clf = klass( + learning_rate="constant", + eta0=eta, + nu=nu, + fit_intercept=True, + max_iter=1, + average=True, + shuffle=False, + ) + + n_samples = X3.shape[0] + + clf.partial_fit(X3[: int(n_samples / 2)]) + clf.partial_fit(X3[int(n_samples / 2) :]) + average_coef, average_offset = asgd_oneclass(klass, X3, eta, nu) + + assert_allclose(clf.coef_, average_coef) + assert_allclose(clf.offset_, average_offset) + + +def test_sgd_oneclass(): + # Test fit, decision_function, predict and score_samples on a toy + # dataset + X_train = np.array([[-2, -1], [-1, -1], [1, 1]]) + X_test = np.array([[0.5, -2], [2, 2]]) + clf = SGDOneClassSVM( + nu=0.5, eta0=1, learning_rate="constant", shuffle=False, max_iter=1 + ) + clf.fit(X_train) + assert_allclose(clf.coef_, np.array([-0.125, 0.4375])) + assert clf.offset_[0] == -0.5 + + scores = clf.score_samples(X_test) + assert_allclose(scores, np.array([-0.9375, 0.625])) + + dec = clf.score_samples(X_test) - clf.offset_ + assert_allclose(clf.decision_function(X_test), dec) + + pred = clf.predict(X_test) + assert_array_equal(pred, np.array([-1, 1])) + + +def test_ocsvm_vs_sgdocsvm(): + # Checks SGDOneClass SVM gives a good approximation of kernelized + # One-Class SVM + nu = 0.05 + gamma = 2.0 + random_state = 42 + + # Generate train and test data + rng = np.random.RandomState(random_state) + X = 0.3 * rng.randn(500, 2) + X_train = np.r_[X + 2, X - 2] + X = 0.3 * rng.randn(100, 2) + X_test = np.r_[X + 2, X - 2] + + # One-Class SVM + clf = OneClassSVM(gamma=gamma, kernel="rbf", nu=nu) + clf.fit(X_train) + y_pred_ocsvm = clf.predict(X_test) + dec_ocsvm = clf.decision_function(X_test).reshape(1, -1) + + # SGDOneClassSVM using kernel approximation + max_iter = 15 + transform = Nystroem(gamma=gamma, random_state=random_state) + clf_sgd = SGDOneClassSVM( + nu=nu, + shuffle=True, + fit_intercept=True, + max_iter=max_iter, + random_state=random_state, + tol=None, + ) + pipe_sgd = make_pipeline(transform, clf_sgd) + pipe_sgd.fit(X_train) + y_pred_sgdocsvm = pipe_sgd.predict(X_test) + dec_sgdocsvm = pipe_sgd.decision_function(X_test).reshape(1, -1) + + assert np.mean(y_pred_sgdocsvm == y_pred_ocsvm) >= 0.99 + corrcoef = np.corrcoef(np.concatenate((dec_ocsvm, dec_sgdocsvm)))[0, 1] + assert corrcoef >= 0.9 + + +def test_l1_ratio(): + # Test if l1 ratio extremes match L1 and L2 penalty settings. + X, y = datasets.make_classification( + n_samples=1000, n_features=100, n_informative=20, random_state=1234 + ) + + # test if elasticnet with l1_ratio near 1 gives same result as pure l1 + est_en = SGDClassifier( + alpha=0.001, + penalty="elasticnet", + tol=None, + max_iter=6, + l1_ratio=0.9999999999, + random_state=42, + ).fit(X, y) + est_l1 = SGDClassifier( + alpha=0.001, penalty="l1", max_iter=6, random_state=42, tol=None + ).fit(X, y) + assert_array_almost_equal(est_en.coef_, est_l1.coef_) + + # test if elasticnet with l1_ratio near 0 gives same result as pure l2 + est_en = SGDClassifier( + alpha=0.001, + penalty="elasticnet", + tol=None, + max_iter=6, + l1_ratio=0.0000000001, + random_state=42, + ).fit(X, y) + est_l2 = SGDClassifier( + alpha=0.001, penalty="l2", max_iter=6, random_state=42, tol=None + ).fit(X, y) + assert_array_almost_equal(est_en.coef_, est_l2.coef_) + + +def test_underflow_or_overlow(): + with np.errstate(all="raise"): + # Generate some weird data with hugely unscaled features + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 10 + + X = rng.normal(size=(n_samples, n_features)) + X[:, :2] *= 1e300 + assert np.isfinite(X).all() + + # Use MinMaxScaler to scale the data without introducing a numerical + # instability (computing the standard deviation naively is not possible + # on this data) + X_scaled = MinMaxScaler().fit_transform(X) + assert np.isfinite(X_scaled).all() + + # Define a ground truth on the scaled data + ground_truth = rng.normal(size=n_features) + y = (np.dot(X_scaled, ground_truth) > 0.0).astype(np.int32) + assert_array_equal(np.unique(y), [0, 1]) + + model = SGDClassifier(alpha=0.1, loss="squared_hinge", max_iter=500) + + # smoke test: model is stable on scaled data + model.fit(X_scaled, y) + assert np.isfinite(model.coef_).all() + + # model is numerically unstable on unscaled data + msg_regxp = ( + r"Floating-point under-/overflow occurred at epoch #.*" + " Scaling input data with StandardScaler or MinMaxScaler" + " might help." + ) + with pytest.raises(ValueError, match=msg_regxp): + model.fit(X, y) + + +def test_numerical_stability_large_gradient(): + # Non regression test case for numerical stability on scaled problems + # where the gradient can still explode with some losses + model = SGDClassifier( + loss="squared_hinge", + max_iter=10, + shuffle=True, + penalty="elasticnet", + l1_ratio=0.3, + alpha=0.01, + eta0=0.001, + random_state=0, + tol=None, + ) + with np.errstate(all="raise"): + model.fit(iris.data, iris.target) + assert np.isfinite(model.coef_).all() + + +@pytest.mark.parametrize("penalty", ["l2", "l1", "elasticnet"]) +def test_large_regularization(penalty): + # Non regression tests for numerical stability issues caused by large + # regularization parameters + model = SGDClassifier( + alpha=1e5, + learning_rate="constant", + eta0=0.1, + penalty=penalty, + shuffle=False, + tol=None, + max_iter=6, + ) + with np.errstate(all="raise"): + model.fit(iris.data, iris.target) + assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_)) + + +def test_tol_parameter(): + # Test that the tol parameter behaves as expected + X = StandardScaler().fit_transform(iris.data) + y = iris.target == 1 + + # With tol is None, the number of iteration should be equal to max_iter + max_iter = 42 + model_0 = SGDClassifier(tol=None, random_state=0, max_iter=max_iter) + model_0.fit(X, y) + assert max_iter == model_0.n_iter_ + + # If tol is not None, the number of iteration should be less than max_iter + max_iter = 2000 + model_1 = SGDClassifier(tol=0, random_state=0, max_iter=max_iter) + model_1.fit(X, y) + assert max_iter > model_1.n_iter_ + assert model_1.n_iter_ > 5 + + # A larger tol should yield a smaller number of iteration + model_2 = SGDClassifier(tol=0.1, random_state=0, max_iter=max_iter) + model_2.fit(X, y) + assert model_1.n_iter_ > model_2.n_iter_ + assert model_2.n_iter_ > 3 + + # Strict tolerance and small max_iter should trigger a warning + model_3 = SGDClassifier(max_iter=3, tol=1e-3, random_state=0) + warning_message = ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + model_3.fit(X, y) + assert model_3.n_iter_ == 3 + + +def _test_loss_common(loss_function, cases): + # Test the different loss functions + # cases is a list of (p, y, expected) + for p, y, expected_loss, expected_dloss in cases: + assert_almost_equal(loss_function.py_loss(p, y), expected_loss) + assert_almost_equal(loss_function.py_dloss(p, y), expected_dloss) + + +def test_loss_hinge(): + # Test Hinge (hinge / perceptron) + # hinge + loss = sgd_fast.Hinge(1.0) + cases = [ + # (p, y, expected_loss, expected_dloss) + (1.1, 1.0, 0.0, 0.0), + (-2.0, -1.0, 0.0, 0.0), + (1.0, 1.0, 0.0, -1.0), + (-1.0, -1.0, 0.0, 1.0), + (0.5, 1.0, 0.5, -1.0), + (2.0, -1.0, 3.0, 1.0), + (-0.5, -1.0, 0.5, 1.0), + (0.0, 1.0, 1, -1.0), + ] + _test_loss_common(loss, cases) + + # perceptron + loss = sgd_fast.Hinge(0.0) + cases = [ + # (p, y, expected_loss, expected_dloss) + (1.0, 1.0, 0.0, 0.0), + (-0.1, -1.0, 0.0, 0.0), + (0.0, 1.0, 0.0, -1.0), + (0.0, -1.0, 0.0, 1.0), + (0.5, -1.0, 0.5, 1.0), + (2.0, -1.0, 2.0, 1.0), + (-0.5, 1.0, 0.5, -1.0), + (-1.0, 1.0, 1.0, -1.0), + ] + _test_loss_common(loss, cases) + + +def test_gradient_squared_hinge(): + # Test SquaredHinge + loss = sgd_fast.SquaredHinge(1.0) + cases = [ + # (p, y, expected_loss, expected_dloss) + (1.0, 1.0, 0.0, 0.0), + (-2.0, -1.0, 0.0, 0.0), + (1.0, -1.0, 4.0, 4.0), + (-1.0, 1.0, 4.0, -4.0), + (0.5, 1.0, 0.25, -1.0), + (0.5, -1.0, 2.25, 3.0), + ] + _test_loss_common(loss, cases) + + +def test_loss_log(): + # Test Log (logistic loss) + loss = sgd_fast.Log() + cases = [ + # (p, y, expected_loss, expected_dloss) + (1.0, 1.0, np.log(1.0 + np.exp(-1.0)), -1.0 / (np.exp(1.0) + 1.0)), + (1.0, -1.0, np.log(1.0 + np.exp(1.0)), 1.0 / (np.exp(-1.0) + 1.0)), + (-1.0, -1.0, np.log(1.0 + np.exp(-1.0)), 1.0 / (np.exp(1.0) + 1.0)), + (-1.0, 1.0, np.log(1.0 + np.exp(1.0)), -1.0 / (np.exp(-1.0) + 1.0)), + (0.0, 1.0, np.log(2), -0.5), + (0.0, -1.0, np.log(2), 0.5), + (17.9, -1.0, 17.9, 1.0), + (-17.9, 1.0, 17.9, -1.0), + ] + _test_loss_common(loss, cases) + assert_almost_equal(loss.py_dloss(18.1, 1.0), np.exp(-18.1) * -1.0, 16) + assert_almost_equal(loss.py_loss(18.1, 1.0), np.exp(-18.1), 16) + assert_almost_equal(loss.py_dloss(-18.1, -1.0), np.exp(-18.1) * 1.0, 16) + assert_almost_equal(loss.py_loss(-18.1, 1.0), 18.1, 16) + + +def test_loss_squared_loss(): + # Test SquaredLoss + loss = sgd_fast.SquaredLoss() + cases = [ + # (p, y, expected_loss, expected_dloss) + (0.0, 0.0, 0.0, 0.0), + (1.0, 1.0, 0.0, 0.0), + (1.0, 0.0, 0.5, 1.0), + (0.5, -1.0, 1.125, 1.5), + (-2.5, 2.0, 10.125, -4.5), + ] + _test_loss_common(loss, cases) + + +def test_loss_huber(): + # Test Huber + loss = sgd_fast.Huber(0.1) + cases = [ + # (p, y, expected_loss, expected_dloss) + (0.0, 0.0, 0.0, 0.0), + (0.1, 0.0, 0.005, 0.1), + (0.0, 0.1, 0.005, -0.1), + (3.95, 4.0, 0.00125, -0.05), + (5.0, 2.0, 0.295, 0.1), + (-1.0, 5.0, 0.595, -0.1), + ] + _test_loss_common(loss, cases) + + +def test_loss_modified_huber(): + # (p, y, expected_loss, expected_dloss) + loss = sgd_fast.ModifiedHuber() + cases = [ + # (p, y, expected_loss, expected_dloss) + (1.0, 1.0, 0.0, 0.0), + (-1.0, -1.0, 0.0, 0.0), + (2.0, 1.0, 0.0, 0.0), + (0.0, 1.0, 1.0, -2.0), + (-1.0, 1.0, 4.0, -4.0), + (0.5, -1.0, 2.25, 3.0), + (-2.0, 1.0, 8, -4.0), + (-3.0, 1.0, 12, -4.0), + ] + _test_loss_common(loss, cases) + + +def test_loss_epsilon_insensitive(): + # Test EpsilonInsensitive + loss = sgd_fast.EpsilonInsensitive(0.1) + cases = [ + # (p, y, expected_loss, expected_dloss) + (0.0, 0.0, 0.0, 0.0), + (0.1, 0.0, 0.0, 0.0), + (-2.05, -2.0, 0.0, 0.0), + (3.05, 3.0, 0.0, 0.0), + (2.2, 2.0, 0.1, 1.0), + (2.0, -1.0, 2.9, 1.0), + (2.0, 2.2, 0.1, -1.0), + (-2.0, 1.0, 2.9, -1.0), + ] + _test_loss_common(loss, cases) + + +def test_loss_squared_epsilon_insensitive(): + # Test SquaredEpsilonInsensitive + loss = sgd_fast.SquaredEpsilonInsensitive(0.1) + cases = [ + # (p, y, expected_loss, expected_dloss) + (0.0, 0.0, 0.0, 0.0), + (0.1, 0.0, 0.0, 0.0), + (-2.05, -2.0, 0.0, 0.0), + (3.05, 3.0, 0.0, 0.0), + (2.2, 2.0, 0.01, 0.2), + (2.0, -1.0, 8.41, 5.8), + (2.0, 2.2, 0.01, -0.2), + (-2.0, 1.0, 8.41, -5.8), + ] + _test_loss_common(loss, cases) + + +def test_multi_thread_multi_class_and_early_stopping(): + # This is a non-regression test for a bad interaction between + # early stopping internal attribute and thread-based parallelism. + clf = SGDClassifier( + alpha=1e-3, + tol=1e-3, + max_iter=1000, + early_stopping=True, + n_iter_no_change=100, + random_state=0, + n_jobs=2, + ) + clf.fit(iris.data, iris.target) + assert clf.n_iter_ > clf.n_iter_no_change + assert clf.n_iter_ < clf.n_iter_no_change + 20 + assert clf.score(iris.data, iris.target) > 0.8 + + +def test_multi_core_gridsearch_and_early_stopping(): + # This is a non-regression test for a bad interaction between + # early stopping internal attribute and process-based multi-core + # parallelism. + param_grid = { + "alpha": np.logspace(-4, 4, 9), + "n_iter_no_change": [5, 10, 50], + } + + clf = SGDClassifier(tol=1e-2, max_iter=1000, early_stopping=True, random_state=0) + search = RandomizedSearchCV(clf, param_grid, n_iter=5, n_jobs=2, random_state=0) + search.fit(iris.data, iris.target) + assert search.best_score_ > 0.8 + + +@pytest.mark.parametrize("backend", ["loky", "multiprocessing", "threading"]) +def test_SGDClassifier_fit_for_all_backends(backend): + # This is a non-regression smoke test. In the multi-class case, + # SGDClassifier.fit fits each class in a one-versus-all fashion using + # joblib.Parallel. However, each OvA step updates the coef_ attribute of + # the estimator in-place. Internally, SGDClassifier calls Parallel using + # require='sharedmem'. This test makes sure SGDClassifier.fit works + # consistently even when the user asks for a backend that does not provide + # sharedmem semantics. + + # We further test a case where memmapping would have been used if + # SGDClassifier.fit was called from a loky or multiprocessing backend. In + # this specific case, in-place modification of clf.coef_ would have caused + # a segmentation fault when trying to write in a readonly memory mapped + # buffer. + + random_state = np.random.RandomState(42) + + # Create a classification problem with 50000 features and 20 classes. Using + # loky or multiprocessing this make the clf.coef_ exceed the threshold + # above which memmaping is used in joblib and loky (1MB as of 2018/11/1). + X = sp.random(500, 2000, density=0.02, format="csr", random_state=random_state) + y = random_state.choice(20, 500) + + # Begin by fitting a SGD classifier sequentially + clf_sequential = SGDClassifier(max_iter=1000, n_jobs=1, random_state=42) + clf_sequential.fit(X, y) + + # Fit a SGDClassifier using the specified backend, and make sure the + # coefficients are equal to those obtained using a sequential fit + clf_parallel = SGDClassifier(max_iter=1000, n_jobs=4, random_state=42) + with joblib.parallel_backend(backend=backend): + clf_parallel.fit(X, y) + assert_array_almost_equal(clf_sequential.coef_, clf_parallel.coef_) + + +@pytest.mark.parametrize( + "Estimator", [linear_model.SGDClassifier, linear_model.SGDRegressor] +) +def test_sgd_random_state(Estimator, global_random_seed): + # Train the same model on the same data without converging and check that we + # get reproducible results by fixing the random seed. + if Estimator == linear_model.SGDRegressor: + X, y = datasets.make_regression(random_state=global_random_seed) + else: + X, y = datasets.make_classification(random_state=global_random_seed) + + # Fitting twice a model with the same hyper-parameters on the same training + # set with the same seed leads to the same results deterministically. + + est = Estimator(random_state=global_random_seed, max_iter=1) + with pytest.warns(ConvergenceWarning): + coef_same_seed_a = est.fit(X, y).coef_ + assert est.n_iter_ == 1 + + est = Estimator(random_state=global_random_seed, max_iter=1) + with pytest.warns(ConvergenceWarning): + coef_same_seed_b = est.fit(X, y).coef_ + assert est.n_iter_ == 1 + + assert_allclose(coef_same_seed_a, coef_same_seed_b) + + # Fitting twice a model with the same hyper-parameters on the same training + # set but with different random seed leads to different results after one + # epoch because of the random shuffling of the dataset. + + est = Estimator(random_state=global_random_seed + 1, max_iter=1) + with pytest.warns(ConvergenceWarning): + coef_other_seed = est.fit(X, y).coef_ + assert est.n_iter_ == 1 + + assert np.abs(coef_same_seed_a - coef_other_seed).max() > 1.0 + + +def test_validation_mask_correctly_subsets(monkeypatch): + """Test that data passed to validation callback correctly subsets. + + Non-regression test for #23255. + """ + X, Y = iris.data, iris.target + n_samples = X.shape[0] + validation_fraction = 0.2 + clf = linear_model.SGDClassifier( + early_stopping=True, + tol=1e-3, + max_iter=1000, + validation_fraction=validation_fraction, + ) + + mock = Mock(side_effect=_stochastic_gradient._ValidationScoreCallback) + monkeypatch.setattr(_stochastic_gradient, "_ValidationScoreCallback", mock) + clf.fit(X, Y) + + X_val, y_val = mock.call_args[0][1:3] + assert X_val.shape[0] == int(n_samples * validation_fraction) + assert y_val.shape[0] == int(n_samples * validation_fraction) + + +def test_sgd_error_on_zero_validation_weight(): + # Test that SGDClassifier raises error when all the validation samples + # have zero sample_weight. Non-regression test for #17229. + X, Y = iris.data, iris.target + sample_weight = np.zeros_like(Y) + validation_fraction = 0.4 + + clf = linear_model.SGDClassifier( + early_stopping=True, validation_fraction=validation_fraction, random_state=0 + ) + + error_message = ( + "The sample weights for validation set are all zero, consider using a" + " different random state." + ) + with pytest.raises(ValueError, match=error_message): + clf.fit(X, Y, sample_weight=sample_weight) + + +@pytest.mark.parametrize("Estimator", [SGDClassifier, SGDRegressor]) +def test_sgd_verbose(Estimator): + """non-regression test for gh #25249""" + Estimator(verbose=1).fit(X, Y) + + +@pytest.mark.parametrize( + "SGDEstimator", + [ + SGDClassifier, + SparseSGDClassifier, + SGDRegressor, + SparseSGDRegressor, + SGDOneClassSVM, + SparseSGDOneClassSVM, + ], +) +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +def test_sgd_dtype_match(SGDEstimator, data_type): + _X = X.astype(data_type) + _Y = np.array(Y, dtype=data_type) + sgd_model = SGDEstimator() + sgd_model.fit(_X, _Y) + assert sgd_model.coef_.dtype == data_type + + +@pytest.mark.parametrize( + "SGDEstimator", + [ + SGDClassifier, + SparseSGDClassifier, + SGDRegressor, + SparseSGDRegressor, + SGDOneClassSVM, + SparseSGDOneClassSVM, + ], +) +def test_sgd_numerical_consistency(SGDEstimator): + X_64 = X.astype(dtype=np.float64) + Y_64 = np.array(Y, dtype=np.float64) + + X_32 = X.astype(dtype=np.float32) + Y_32 = np.array(Y, dtype=np.float32) + + sgd_64 = SGDEstimator(max_iter=20) + sgd_64.fit(X_64, Y_64) + + sgd_32 = SGDEstimator(max_iter=20) + sgd_32.fit(X_32, Y_32) + + assert_allclose(sgd_64.coef_, sgd_32.coef_) + + +# TODO(1.6): remove +@pytest.mark.parametrize("Estimator", [SGDClassifier, SGDOneClassSVM]) +def test_loss_attribute_deprecation(Estimator): + # Check that we raise the proper deprecation warning if accessing + # `loss_function_`. + X = np.array([[1, 2], [3, 4]]) + y = np.array([1, 0]) + est = Estimator().fit(X, y) + + with pytest.warns(FutureWarning, match="`loss_function_` was deprecated"): + est.loss_function_ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sparse_coordinate_descent.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sparse_coordinate_descent.py new file mode 100644 index 0000000000000000000000000000000000000000..1aab9babeeb40fcc3eac3f443c1ba7a9e1bdf9d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sparse_coordinate_descent.py @@ -0,0 +1,384 @@ +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_allclose + +from sklearn.datasets import make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ElasticNet, ElasticNetCV, Lasso, LassoCV +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + create_memmap_backed_data, + ignore_warnings, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, LIL_CONTAINERS + + +def test_sparse_coef(): + # Check that the sparse_coef property works + clf = ElasticNet() + clf.coef_ = [1, 2, 3] + + assert sp.issparse(clf.sparse_coef_) + assert clf.sparse_coef_.toarray().tolist()[0] == clf.coef_ + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_lasso_zero(csc_container): + # Check that the sparse lasso can handle zero data without crashing + X = csc_container((3, 1)) + y = [0, 0, 0] + T = np.array([[1], [2], [3]]) + clf = Lasso().fit(X, y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0]) + assert_array_almost_equal(pred, [0, 0, 0]) + assert_almost_equal(clf.dual_gap_, 0) + + +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_enet_toy_list_input(with_sample_weight, csc_container): + # Test ElasticNet for various values of alpha and l1_ratio with list X + + X = np.array([[-1], [0], [1]]) + X = csc_container(X) + Y = [-1, 0, 1] # just a straight line + T = np.array([[2], [3], [4]]) # test sample + if with_sample_weight: + sw = np.array([2.0, 2, 2]) + else: + sw = None + + # this should be the same as unregularized least squares + clf = ElasticNet(alpha=0, l1_ratio=1.0) + # catch warning about alpha=0. + # this is discouraged but should work. + ignore_warnings(clf.fit)(X, Y, sample_weight=sw) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.3) + clf.fit(X, Y, sample_weight=sw) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.5) + clf.fit(X, Y, sample_weight=sw) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.45454], 3) + assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) + assert_almost_equal(clf.dual_gap_, 0) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_enet_toy_explicit_sparse_input(lil_container): + # Test ElasticNet for various values of alpha and l1_ratio with sparse X + f = ignore_warnings + # training samples + X = lil_container((3, 1)) + X[0, 0] = -1 + # X[1, 0] = 0 + X[2, 0] = 1 + Y = [-1, 0, 1] # just a straight line (the identity function) + + # test samples + T = lil_container((3, 1)) + T[0, 0] = 2 + T[1, 0] = 3 + T[2, 0] = 4 + + # this should be the same as lasso + clf = ElasticNet(alpha=0, l1_ratio=1.0) + f(clf.fit)(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.3) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.45454], 3) + assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) + assert_almost_equal(clf.dual_gap_, 0) + + +def make_sparse_data( + sparse_container, + n_samples=100, + n_features=100, + n_informative=10, + seed=42, + positive=False, + n_targets=1, +): + random_state = np.random.RandomState(seed) + + # build an ill-posed linear regression problem with many noisy features and + # comparatively few samples + + # generate a ground truth model + w = random_state.randn(n_features, n_targets) + w[n_informative:] = 0.0 # only the top features are impacting the model + if positive: + w = np.abs(w) + + X = random_state.randn(n_samples, n_features) + rnd = random_state.uniform(size=(n_samples, n_features)) + X[rnd > 0.5] = 0.0 # 50% of zeros in input signal + + # generate training ground truth labels + y = np.dot(X, w) + X = sparse_container(X) + if n_targets == 1: + y = np.ravel(y) + return X, y + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +@pytest.mark.parametrize( + "alpha, fit_intercept, positive", + [(0.1, False, False), (0.1, True, False), (1e-3, False, True), (1e-3, True, True)], +) +def test_sparse_enet_not_as_toy_dataset(csc_container, alpha, fit_intercept, positive): + n_samples, n_features, max_iter = 100, 100, 1000 + n_informative = 10 + + X, y = make_sparse_data( + csc_container, n_samples, n_features, n_informative, positive=positive + ) + + X_train, X_test = X[n_samples // 2 :], X[: n_samples // 2] + y_train, y_test = y[n_samples // 2 :], y[: n_samples // 2] + + s_clf = ElasticNet( + alpha=alpha, + l1_ratio=0.8, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=1e-7, + positive=positive, + warm_start=True, + ) + s_clf.fit(X_train, y_train) + + assert_almost_equal(s_clf.dual_gap_, 0, 4) + assert s_clf.score(X_test, y_test) > 0.85 + + # check the convergence is the same as the dense version + d_clf = ElasticNet( + alpha=alpha, + l1_ratio=0.8, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=1e-7, + positive=positive, + warm_start=True, + ) + d_clf.fit(X_train.toarray(), y_train) + + assert_almost_equal(d_clf.dual_gap_, 0, 4) + assert d_clf.score(X_test, y_test) > 0.85 + + assert_almost_equal(s_clf.coef_, d_clf.coef_, 5) + assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5) + + # check that the coefs are sparse + assert np.sum(s_clf.coef_ != 0.0) < 2 * n_informative + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_lasso_not_as_toy_dataset(csc_container): + n_samples = 100 + max_iter = 1000 + n_informative = 10 + X, y = make_sparse_data( + csc_container, n_samples=n_samples, n_informative=n_informative + ) + + X_train, X_test = X[n_samples // 2 :], X[: n_samples // 2] + y_train, y_test = y[n_samples // 2 :], y[: n_samples // 2] + + s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7) + s_clf.fit(X_train, y_train) + assert_almost_equal(s_clf.dual_gap_, 0, 4) + assert s_clf.score(X_test, y_test) > 0.85 + + # check the convergence is the same as the dense version + d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7) + d_clf.fit(X_train.toarray(), y_train) + assert_almost_equal(d_clf.dual_gap_, 0, 4) + assert d_clf.score(X_test, y_test) > 0.85 + + # check that the coefs are sparse + assert np.sum(s_clf.coef_ != 0.0) == n_informative + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_enet_multitarget(csc_container): + n_targets = 3 + X, y = make_sparse_data(csc_container, n_targets=n_targets) + + estimator = ElasticNet(alpha=0.01, precompute=False) + # XXX: There is a bug when precompute is not False! + estimator.fit(X, y) + coef, intercept, dual_gap = ( + estimator.coef_, + estimator.intercept_, + estimator.dual_gap_, + ) + + for k in range(n_targets): + estimator.fit(X, y[:, k]) + assert_array_almost_equal(coef[k, :], estimator.coef_) + assert_array_almost_equal(intercept[k], estimator.intercept_) + assert_array_almost_equal(dual_gap[k], estimator.dual_gap_) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_path_parameters(csc_container): + X, y = make_sparse_data(csc_container) + max_iter = 50 + n_alphas = 10 + clf = ElasticNetCV( + n_alphas=n_alphas, + eps=1e-3, + max_iter=max_iter, + l1_ratio=0.5, + fit_intercept=False, + ) + ignore_warnings(clf.fit)(X, y) # new params + assert_almost_equal(0.5, clf.l1_ratio) + assert n_alphas == clf.n_alphas + assert n_alphas == len(clf.alphas_) + sparse_mse_path = clf.mse_path_ + ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data + assert_almost_equal(clf.mse_path_, sparse_mse_path) + + +@pytest.mark.parametrize("Model", [Lasso, ElasticNet, LassoCV, ElasticNetCV]) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("n_samples, n_features", [(24, 6), (6, 24)]) +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_dense_equality( + Model, fit_intercept, n_samples, n_features, with_sample_weight, csc_container +): + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + effective_rank=n_features // 2, + n_informative=n_features // 2, + bias=4 * fit_intercept, + noise=1, + random_state=42, + ) + if with_sample_weight: + sw = np.abs(np.random.RandomState(42).normal(scale=10, size=y.shape)) + else: + sw = None + Xs = csc_container(X) + params = {"fit_intercept": fit_intercept} + reg_dense = Model(**params).fit(X, y, sample_weight=sw) + reg_sparse = Model(**params).fit(Xs, y, sample_weight=sw) + if fit_intercept: + assert reg_sparse.intercept_ == pytest.approx(reg_dense.intercept_) + # balance property + assert np.average(reg_sparse.predict(X), weights=sw) == pytest.approx( + np.average(y, weights=sw) + ) + assert_allclose(reg_sparse.coef_, reg_dense.coef_) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_same_output_sparse_dense_lasso_and_enet_cv(csc_container): + X, y = make_sparse_data(csc_container, n_samples=40, n_features=10) + clfs = ElasticNetCV(max_iter=100) + clfs.fit(X, y) + clfd = ElasticNetCV(max_iter=100) + clfd.fit(X.toarray(), y) + assert_almost_equal(clfs.alpha_, clfd.alpha_, 7) + assert_almost_equal(clfs.intercept_, clfd.intercept_, 7) + assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_) + assert_array_almost_equal(clfs.alphas_, clfd.alphas_) + + clfs = LassoCV(max_iter=100, cv=4) + clfs.fit(X, y) + clfd = LassoCV(max_iter=100, cv=4) + clfd.fit(X.toarray(), y) + assert_almost_equal(clfs.alpha_, clfd.alpha_, 7) + assert_almost_equal(clfs.intercept_, clfd.intercept_, 7) + assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_) + assert_array_almost_equal(clfs.alphas_, clfd.alphas_) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_same_multiple_output_sparse_dense(coo_container): + l = ElasticNet() + X = [ + [0, 1, 2, 3, 4], + [0, 2, 5, 8, 11], + [9, 10, 11, 12, 13], + [10, 11, 12, 13, 14], + ] + y = [ + [1, 2, 3, 4, 5], + [1, 3, 6, 9, 12], + [10, 11, 12, 13, 14], + [11, 12, 13, 14, 15], + ] + l.fit(X, y) + sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1) + predict_dense = l.predict(sample) + + l_sp = ElasticNet() + X_sp = coo_container(X) + l_sp.fit(X_sp, y) + sample_sparse = coo_container(sample) + predict_sparse = l_sp.predict(sample_sparse) + + assert_array_almost_equal(predict_sparse, predict_dense) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_enet_coordinate_descent(csc_container): + """Test that a warning is issued if model does not converge""" + clf = Lasso(max_iter=2) + n_samples = 5 + n_features = 2 + X = csc_container((n_samples, n_features)) * 1e50 + y = np.ones(n_samples) + warning_message = ( + "Objective did not converge. You might want " + "to increase the number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + clf.fit(X, y) + + +@pytest.mark.parametrize("copy_X", (True, False)) +def test_sparse_read_only_buffer(copy_X): + """Test that sparse coordinate descent works for read-only buffers""" + rng = np.random.RandomState(0) + + clf = ElasticNet(alpha=0.1, copy_X=copy_X, random_state=rng) + X = sp.random(100, 20, format="csc", random_state=rng) + + # Make X.data read-only + X.data = create_memmap_backed_data(X.data) + + y = rng.rand(100) + clf.fit(X, y) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_theil_sen.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_theil_sen.py new file mode 100644 index 0000000000000000000000000000000000000000..c8415d02be80aea775334c09c3f845ee4c040886 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/tests/test_theil_sen.py @@ -0,0 +1,294 @@ +""" +Testing for Theil-Sen module (sklearn.linear_model.theil_sen) +""" + +# Author: Florian Wilhelm +# License: BSD 3 clause +import os +import re +import sys +from contextlib import contextmanager + +import numpy as np +import pytest +from numpy.testing import ( + assert_array_almost_equal, + assert_array_equal, + assert_array_less, +) +from scipy.linalg import norm +from scipy.optimize import fmin_bfgs + +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import LinearRegression, TheilSenRegressor +from sklearn.linear_model._theil_sen import ( + _breakdown_point, + _modified_weiszfeld_step, + _spatial_median, +) +from sklearn.utils._testing import assert_almost_equal + + +@contextmanager +def no_stdout_stderr(): + old_stdout = sys.stdout + old_stderr = sys.stderr + with open(os.devnull, "w") as devnull: + sys.stdout = devnull + sys.stderr = devnull + yield + devnull.flush() + sys.stdout = old_stdout + sys.stderr = old_stderr + + +def gen_toy_problem_1d(intercept=True): + random_state = np.random.RandomState(0) + # Linear model y = 3*x + N(2, 0.1**2) + w = 3.0 + if intercept: + c = 2.0 + n_samples = 50 + else: + c = 0.1 + n_samples = 100 + x = random_state.normal(size=n_samples) + noise = 0.1 * random_state.normal(size=n_samples) + y = w * x + c + noise + # Add some outliers + if intercept: + x[42], y[42] = (-2, 4) + x[43], y[43] = (-2.5, 8) + x[33], y[33] = (2.5, 1) + x[49], y[49] = (2.1, 2) + else: + x[42], y[42] = (-2, 4) + x[43], y[43] = (-2.5, 8) + x[53], y[53] = (2.5, 1) + x[60], y[60] = (2.1, 2) + x[72], y[72] = (1.8, -7) + return x[:, np.newaxis], y, w, c + + +def gen_toy_problem_2d(): + random_state = np.random.RandomState(0) + n_samples = 100 + # Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2) + X = random_state.normal(size=(n_samples, 2)) + w = np.array([5.0, 10.0]) + c = 1.0 + noise = 0.1 * random_state.normal(size=n_samples) + y = np.dot(X, w) + c + noise + # Add some outliers + n_outliers = n_samples // 10 + ix = random_state.randint(0, n_samples, size=n_outliers) + y[ix] = 50 * random_state.normal(size=n_outliers) + return X, y, w, c + + +def gen_toy_problem_4d(): + random_state = np.random.RandomState(0) + n_samples = 10000 + # Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2) + X = random_state.normal(size=(n_samples, 4)) + w = np.array([5.0, 10.0, 42.0, 7.0]) + c = 1.0 + noise = 0.1 * random_state.normal(size=n_samples) + y = np.dot(X, w) + c + noise + # Add some outliers + n_outliers = n_samples // 10 + ix = random_state.randint(0, n_samples, size=n_outliers) + y[ix] = 50 * random_state.normal(size=n_outliers) + return X, y, w, c + + +def test_modweiszfeld_step_1d(): + X = np.array([1.0, 2.0, 3.0]).reshape(3, 1) + # Check startvalue is element of X and solution + median = 2.0 + new_y = _modified_weiszfeld_step(X, median) + assert_array_almost_equal(new_y, median) + # Check startvalue is not the solution + y = 2.5 + new_y = _modified_weiszfeld_step(X, y) + assert_array_less(median, new_y) + assert_array_less(new_y, y) + # Check startvalue is not the solution but element of X + y = 3.0 + new_y = _modified_weiszfeld_step(X, y) + assert_array_less(median, new_y) + assert_array_less(new_y, y) + # Check that a single vector is identity + X = np.array([1.0, 2.0, 3.0]).reshape(1, 3) + y = X[0] + new_y = _modified_weiszfeld_step(X, y) + assert_array_equal(y, new_y) + + +def test_modweiszfeld_step_2d(): + X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2) + y = np.array([0.5, 0.5]) + # Check first two iterations + new_y = _modified_weiszfeld_step(X, y) + assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3])) + new_y = _modified_weiszfeld_step(X, new_y) + assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592])) + # Check fix point + y = np.array([0.21132505, 0.78867497]) + new_y = _modified_weiszfeld_step(X, y) + assert_array_almost_equal(new_y, y) + + +def test_spatial_median_1d(): + X = np.array([1.0, 2.0, 3.0]).reshape(3, 1) + true_median = 2.0 + _, median = _spatial_median(X) + assert_array_almost_equal(median, true_median) + # Test larger problem and for exact solution in 1d case + random_state = np.random.RandomState(0) + X = random_state.randint(100, size=(1000, 1)) + true_median = np.median(X.ravel()) + _, median = _spatial_median(X) + assert_array_equal(median, true_median) + + +def test_spatial_median_2d(): + X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2) + _, median = _spatial_median(X, max_iter=100, tol=1.0e-6) + + def cost_func(y): + dists = np.array([norm(x - y) for x in X]) + return np.sum(dists) + + # Check if median is solution of the Fermat-Weber location problem + fermat_weber = fmin_bfgs(cost_func, median, disp=False) + assert_array_almost_equal(median, fermat_weber) + # Check when maximum iteration is exceeded a warning is emitted + warning_message = "Maximum number of iterations 30 reached in spatial median." + with pytest.warns(ConvergenceWarning, match=warning_message): + _spatial_median(X, max_iter=30, tol=0.0) + + +def test_theil_sen_1d(): + X, y, w, c = gen_toy_problem_1d() + # Check that Least Squares fails + lstq = LinearRegression().fit(X, y) + assert np.abs(lstq.coef_ - w) > 0.9 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_theil_sen_1d_no_intercept(): + X, y, w, c = gen_toy_problem_1d(intercept=False) + # Check that Least Squares fails + lstq = LinearRegression(fit_intercept=False).fit(X, y) + assert np.abs(lstq.coef_ - w - c) > 0.5 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w + c, 1) + assert_almost_equal(theil_sen.intercept_, 0.0) + + # non-regression test for #18104 + theil_sen.score(X, y) + + +def test_theil_sen_2d(): + X, y, w, c = gen_toy_problem_2d() + # Check that Least Squares fails + lstq = LinearRegression().fit(X, y) + assert norm(lstq.coef_ - w) > 1.0 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(max_subpopulation=1e3, random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_calc_breakdown_point(): + bp = _breakdown_point(1e10, 2) + assert np.abs(bp - 1 + 1 / (np.sqrt(2))) < 1.0e-6 + + +@pytest.mark.parametrize( + "param, ExceptionCls, match", + [ + ( + {"n_subsamples": 1}, + ValueError, + re.escape("Invalid parameter since n_features+1 > n_subsamples (2 > 1)"), + ), + ( + {"n_subsamples": 101}, + ValueError, + re.escape("Invalid parameter since n_subsamples > n_samples (101 > 50)"), + ), + ], +) +def test_checksubparams_invalid_input(param, ExceptionCls, match): + X, y, w, c = gen_toy_problem_1d() + theil_sen = TheilSenRegressor(**param, random_state=0) + with pytest.raises(ExceptionCls, match=match): + theil_sen.fit(X, y) + + +def test_checksubparams_n_subsamples_if_less_samples_than_features(): + random_state = np.random.RandomState(0) + n_samples, n_features = 10, 20 + X = random_state.normal(size=(n_samples, n_features)) + y = random_state.normal(size=n_samples) + theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0) + with pytest.raises(ValueError): + theil_sen.fit(X, y) + + +def test_subpopulation(): + X, y, w, c = gen_toy_problem_4d() + theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_subsamples(): + X, y, w, c = gen_toy_problem_4d() + theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y) + lstq = LinearRegression().fit(X, y) + # Check for exact the same results as Least Squares + assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9) + + +def test_verbosity(): + X, y, w, c = gen_toy_problem_1d() + # Check that Theil-Sen can be verbose + with no_stdout_stderr(): + TheilSenRegressor(verbose=True, random_state=0).fit(X, y) + TheilSenRegressor(verbose=True, max_subpopulation=10, random_state=0).fit(X, y) + + +def test_theil_sen_parallel(): + X, y, w, c = gen_toy_problem_2d() + # Check that Least Squares fails + lstq = LinearRegression().fit(X, y) + assert norm(lstq.coef_ - w) > 1.0 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(n_jobs=2, random_state=0, max_subpopulation=2e3).fit( + X, y + ) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_less_samples_than_features(): + random_state = np.random.RandomState(0) + n_samples, n_features = 10, 20 + X = random_state.normal(size=(n_samples, n_features)) + y = random_state.normal(size=n_samples) + # Check that Theil-Sen falls back to Least Squares if fit_intercept=False + theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) + lstq = LinearRegression(fit_intercept=False).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12) + # Check fit_intercept=True case. This will not be equal to the Least + # Squares solution since the intercept is calculated differently. + theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y) + y_pred = theil_sen.predict(X) + assert_array_almost_equal(y_pred, y, 12) diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..44045565e87ba72e92e03ecbbb72572d4746c019 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e4732f10ce97c9f5e903774dcb953c0f1518f40b27e287992075311835cefce +size 11815960