diff --git a/ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b5851bdbc2ea5e9492e805540bfa555af0fb3136 --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad37a0acc46ab46511ce568a17497d8c55fa5c71a08bcf004b3438ea60054721 +size 16778396 diff --git a/ckpts/universal/global_step40/zero/19.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/19.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a60307ae42281f13e70e5510473acab4951f0f58 --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b00e249ac56688ceb8f4c14ab09acb2661b3eb3af6f3d5b8339599114a1ddabd +size 16778317 diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc0d902b45b18638c707b3bece1a0fcb4f12d13a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py @@ -0,0 +1,15 @@ +# Author: Jan Hendrik Metzen +# Vincent Dubourg +# (mostly translation, see implementation details) +# License: BSD 3 clause + +""" +The :mod:`sklearn.gaussian_process` module implements Gaussian Process +based regression and classification. +""" + +from . import kernels +from ._gpc import GaussianProcessClassifier +from ._gpr import GaussianProcessRegressor + +__all__ = ["GaussianProcessRegressor", "GaussianProcessClassifier", "kernels"] diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71171943672834770a7840b218f7ca5cde746125 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpc.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8865834da68a7ed78c0b40eeb145b12e9265d5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpr.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..537dbbf265ba4096605ed24441cc272e479e82bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/kernels.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/kernels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0779ca126509eeb2a21c4a19b10ce2107a4f683c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/kernels.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py new file mode 100644 index 0000000000000000000000000000000000000000..013815795a853af84f2e1fd0aef7be5026c5f441 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py @@ -0,0 +1,902 @@ +"""Gaussian processes classification.""" + +# Authors: Jan Hendrik Metzen +# +# License: BSD 3 clause + +from numbers import Integral +from operator import itemgetter + +import numpy as np +import scipy.optimize +from scipy.linalg import cho_solve, cholesky, solve +from scipy.special import erf, expit + +from ..base import BaseEstimator, ClassifierMixin, _fit_context, clone +from ..multiclass import OneVsOneClassifier, OneVsRestClassifier +from ..preprocessing import LabelEncoder +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.optimize import _check_optimize_result +from ..utils.validation import check_is_fitted +from .kernels import RBF, CompoundKernel, Kernel +from .kernels import ConstantKernel as C + +# Values required for approximating the logistic sigmoid by +# error functions. coefs are obtained via: +# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf]) +# b = logistic(x) +# A = (erf(np.dot(x, self.lambdas)) + 1) / 2 +# coefs = lstsq(A, b)[0] +LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis] +COEFS = np.array( + [-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654] +)[:, np.newaxis] + + +class _BinaryGaussianProcessClassifierLaplace(BaseEstimator): + """Binary Gaussian process classification based on Laplace approximation. + + The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_. + + Internally, the Laplace approximation is used for approximating the + non-Gaussian posterior by a Gaussian. + + Currently, the implementation is restricted to using the logistic link + function. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernel : kernel instance, default=None + The kernel specifying the covariance function of the GP. If None is + passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that + the kernel's hyperparameters are optimized during fitting. + + optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b' + Can either be one of the internally supported optimizers for optimizing + the kernel's parameters, specified by a string, or an externally + defined optimizer passed as a callable. If a callable is passed, it + must have the signature:: + + def optimizer(obj_func, initial_theta, bounds): + # * 'obj_func' is the objective function to be maximized, which + # takes the hyperparameters theta as parameter and an + # optional flag eval_gradient, which determines if the + # gradient is returned additionally to the function value + # * 'initial_theta': the initial value for theta, which can be + # used by local optimizers + # * 'bounds': the bounds on the values of theta + .... + # Returned are the best found hyperparameters theta and + # the corresponding value of the target function. + return theta_opt, func_min + + Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize + is used. If None is passed, the kernel's parameters are kept fixed. + Available internal optimizers are:: + + 'fmin_l_bfgs_b' + + n_restarts_optimizer : int, default=0 + The number of restarts of the optimizer for finding the kernel's + parameters which maximize the log-marginal likelihood. The first run + of the optimizer is performed from the kernel's initial parameters, + the remaining ones (if any) from thetas sampled log-uniform randomly + from the space of allowed theta-values. If greater than 0, all bounds + must be finite. Note that n_restarts_optimizer=0 implies that one + run is performed. + + max_iter_predict : int, default=100 + The maximum number of iterations in Newton's method for approximating + the posterior during predict. Smaller values will reduce computation + time at the cost of worse results. + + warm_start : bool, default=False + If warm-starts are enabled, the solution of the last Newton iteration + on the Laplace approximation of the posterior mode is used as + initialization for the next call of _posterior_mode(). This can speed + up convergence when _posterior_mode is called several times on similar + problems as in hyperparameter optimization. See :term:`the Glossary + `. + + copy_X_train : bool, default=True + If True, a persistent copy of the training data is stored in the + object. Otherwise, just a reference to the training data is stored, + which might cause predictions to change if the data is modified + externally. + + random_state : int, RandomState instance or None, default=None + Determines random number generation used to initialize the centers. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + X_train_ : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data (also + required for prediction). + + y_train_ : array-like of shape (n_samples,) + Target values in training data (also required for prediction) + + classes_ : array-like of shape (n_classes,) + Unique class labels. + + kernel_ : kernl instance + The kernel used for prediction. The structure of the kernel is the + same as the one passed as parameter but with optimized hyperparameters + + L_ : array-like of shape (n_samples, n_samples) + Lower-triangular Cholesky decomposition of the kernel in X_train_ + + pi_ : array-like of shape (n_samples,) + The probabilities of the positive class for the training points + X_train_ + + W_sr_ : array-like of shape (n_samples,) + Square root of W, the Hessian of log-likelihood of the latent function + values for the observed labels. Since W is diagonal, only the diagonal + of sqrt(W) is stored. + + log_marginal_likelihood_value_ : float + The log-marginal-likelihood of ``self.kernel_.theta`` + + References + ---------- + .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams, + "Gaussian Processes for Machine Learning", + MIT Press 2006 `_ + """ + + def __init__( + self, + kernel=None, + *, + optimizer="fmin_l_bfgs_b", + n_restarts_optimizer=0, + max_iter_predict=100, + warm_start=False, + copy_X_train=True, + random_state=None, + ): + self.kernel = kernel + self.optimizer = optimizer + self.n_restarts_optimizer = n_restarts_optimizer + self.max_iter_predict = max_iter_predict + self.warm_start = warm_start + self.copy_X_train = copy_X_train + self.random_state = random_state + + def fit(self, X, y): + """Fit Gaussian process classification model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data. + + y : array-like of shape (n_samples,) + Target values, must be binary. + + Returns + ------- + self : returns an instance of self. + """ + if self.kernel is None: # Use an RBF kernel as default + self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF( + 1.0, length_scale_bounds="fixed" + ) + else: + self.kernel_ = clone(self.kernel) + + self.rng = check_random_state(self.random_state) + + self.X_train_ = np.copy(X) if self.copy_X_train else X + + # Encode class labels and check that it is a binary classification + # problem + label_encoder = LabelEncoder() + self.y_train_ = label_encoder.fit_transform(y) + self.classes_ = label_encoder.classes_ + if self.classes_.size > 2: + raise ValueError( + "%s supports only binary classification. y contains classes %s" + % (self.__class__.__name__, self.classes_) + ) + elif self.classes_.size == 1: + raise ValueError( + "{0:s} requires 2 classes; got {1:d} class".format( + self.__class__.__name__, self.classes_.size + ) + ) + + if self.optimizer is not None and self.kernel_.n_dims > 0: + # Choose hyperparameters based on maximizing the log-marginal + # likelihood (potentially starting from several initial values) + def obj_func(theta, eval_gradient=True): + if eval_gradient: + lml, grad = self.log_marginal_likelihood( + theta, eval_gradient=True, clone_kernel=False + ) + return -lml, -grad + else: + return -self.log_marginal_likelihood(theta, clone_kernel=False) + + # First optimize starting from theta specified in kernel + optima = [ + self._constrained_optimization( + obj_func, self.kernel_.theta, self.kernel_.bounds + ) + ] + + # Additional runs are performed from log-uniform chosen initial + # theta + if self.n_restarts_optimizer > 0: + if not np.isfinite(self.kernel_.bounds).all(): + raise ValueError( + "Multiple optimizer restarts (n_restarts_optimizer>0) " + "requires that all bounds are finite." + ) + bounds = self.kernel_.bounds + for iteration in range(self.n_restarts_optimizer): + theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1])) + optima.append( + self._constrained_optimization(obj_func, theta_initial, bounds) + ) + # Select result from run with minimal (negative) log-marginal + # likelihood + lml_values = list(map(itemgetter(1), optima)) + self.kernel_.theta = optima[np.argmin(lml_values)][0] + self.kernel_._check_bounds_params() + + self.log_marginal_likelihood_value_ = -np.min(lml_values) + else: + self.log_marginal_likelihood_value_ = self.log_marginal_likelihood( + self.kernel_.theta + ) + + # Precompute quantities required for predictions which are independent + # of actual query points + K = self.kernel_(self.X_train_) + + _, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode( + K, return_temporaries=True + ) + + return self + + def predict(self, X): + """Perform classification on an array of test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated for classification. + + Returns + ------- + C : ndarray of shape (n_samples,) + Predicted target values for X, values are from ``classes_`` + """ + check_is_fitted(self) + + # As discussed on Section 3.4.2 of GPML, for making hard binary + # decisions, it is enough to compute the MAP of the posterior and + # pass it through the link function + K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) + f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4 + + return np.where(f_star > 0, self.classes_[1], self.classes_[0]) + + def predict_proba(self, X): + """Return probability estimates for the test vector X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated for classification. + + Returns + ------- + C : array-like of shape (n_samples, n_classes) + Returns the probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute ``classes_``. + """ + check_is_fitted(self) + + # Based on Algorithm 3.2 of GPML + K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) + f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4 + v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5 + # Line 6 (compute np.diag(v.T.dot(v)) via einsum) + var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v) + + # Line 7: + # Approximate \int log(z) * N(z | f_star, var_f_star) + # Approximation is due to Williams & Barber, "Bayesian Classification + # with Gaussian Processes", Appendix A: Approximate the logistic + # sigmoid by a linear combination of 5 error functions. + # For information on how this integral can be computed see + # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html + alpha = 1 / (2 * var_f_star) + gamma = LAMBDAS * f_star + integrals = ( + np.sqrt(np.pi / alpha) + * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) + / (2 * np.sqrt(var_f_star * 2 * np.pi)) + ) + pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum() + + return np.vstack((1 - pi_star, pi_star)).T + + def log_marginal_likelihood( + self, theta=None, eval_gradient=False, clone_kernel=True + ): + """Returns log-marginal likelihood of theta for training data. + + Parameters + ---------- + theta : array-like of shape (n_kernel_params,), default=None + Kernel hyperparameters for which the log-marginal likelihood is + evaluated. If None, the precomputed log_marginal_likelihood + of ``self.kernel_.theta`` is returned. + + eval_gradient : bool, default=False + If True, the gradient of the log-marginal likelihood with respect + to the kernel hyperparameters at position theta is returned + additionally. If True, theta must not be None. + + clone_kernel : bool, default=True + If True, the kernel attribute is copied. If False, the kernel + attribute is modified, but may result in a performance improvement. + + Returns + ------- + log_likelihood : float + Log-marginal likelihood of theta for training data. + + log_likelihood_gradient : ndarray of shape (n_kernel_params,), \ + optional + Gradient of the log-marginal likelihood with respect to the kernel + hyperparameters at position theta. + Only returned when `eval_gradient` is True. + """ + if theta is None: + if eval_gradient: + raise ValueError("Gradient can only be evaluated for theta!=None") + return self.log_marginal_likelihood_value_ + + if clone_kernel: + kernel = self.kernel_.clone_with_theta(theta) + else: + kernel = self.kernel_ + kernel.theta = theta + + if eval_gradient: + K, K_gradient = kernel(self.X_train_, eval_gradient=True) + else: + K = kernel(self.X_train_) + + # Compute log-marginal-likelihood Z and also store some temporaries + # which can be reused for computing Z's gradient + Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True) + + if not eval_gradient: + return Z + + # Compute gradient based on Algorithm 5.1 of GPML + d_Z = np.empty(theta.shape[0]) + # XXX: Get rid of the np.diag() in the next line + R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7 + C = solve(L, W_sr[:, np.newaxis] * K) # Line 8 + # Line 9: (use einsum to compute np.diag(C.T.dot(C)))) + s_2 = ( + -0.5 + * (np.diag(K) - np.einsum("ij, ij -> j", C, C)) + * (pi * (1 - pi) * (1 - 2 * pi)) + ) # third derivative + + for j in range(d_Z.shape[0]): + C = K_gradient[:, :, j] # Line 11 + # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C))) + s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel()) + + b = C.dot(self.y_train_ - pi) # Line 13 + s_3 = b - K.dot(R.dot(b)) # Line 14 + + d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15 + + return Z, d_Z + + def _posterior_mode(self, K, return_temporaries=False): + """Mode-finding for binary Laplace GPC and fixed kernel. + + This approximates the posterior of the latent function values for given + inputs and target observations with a Gaussian approximation and uses + Newton's iteration to find the mode of this approximation. + """ + # Based on Algorithm 3.1 of GPML + + # If warm_start are enabled, we reuse the last solution for the + # posterior mode as initialization; otherwise, we initialize with 0 + if ( + self.warm_start + and hasattr(self, "f_cached") + and self.f_cached.shape == self.y_train_.shape + ): + f = self.f_cached + else: + f = np.zeros_like(self.y_train_, dtype=np.float64) + + # Use Newton's iteration method to find mode of Laplace approximation + log_marginal_likelihood = -np.inf + for _ in range(self.max_iter_predict): + # Line 4 + pi = expit(f) + W = pi * (1 - pi) + # Line 5 + W_sr = np.sqrt(W) + W_sr_K = W_sr[:, np.newaxis] * K + B = np.eye(W.shape[0]) + W_sr_K * W_sr + L = cholesky(B, lower=True) + # Line 6 + b = W * f + (self.y_train_ - pi) + # Line 7 + a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b)) + # Line 8 + f = K.dot(a) + + # Line 10: Compute log marginal likelihood in loop and use as + # convergence criterion + lml = ( + -0.5 * a.T.dot(f) + - np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum() + - np.log(np.diag(L)).sum() + ) + # Check if we have converged (log marginal likelihood does + # not decrease) + # XXX: more complex convergence criterion + if lml - log_marginal_likelihood < 1e-10: + break + log_marginal_likelihood = lml + + self.f_cached = f # Remember solution for later warm-starts + if return_temporaries: + return log_marginal_likelihood, (pi, W_sr, L, b, a) + else: + return log_marginal_likelihood + + def _constrained_optimization(self, obj_func, initial_theta, bounds): + if self.optimizer == "fmin_l_bfgs_b": + opt_res = scipy.optimize.minimize( + obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds + ) + _check_optimize_result("lbfgs", opt_res) + theta_opt, func_min = opt_res.x, opt_res.fun + elif callable(self.optimizer): + theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds) + else: + raise ValueError("Unknown optimizer %s." % self.optimizer) + + return theta_opt, func_min + + +class GaussianProcessClassifier(ClassifierMixin, BaseEstimator): + """Gaussian process classification (GPC) based on Laplace approximation. + + The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_. + + Internally, the Laplace approximation is used for approximating the + non-Gaussian posterior by a Gaussian. + + Currently, the implementation is restricted to using the logistic link + function. For multi-class classification, several binary one-versus rest + classifiers are fitted. Note that this class thus does not implement + a true multi-class Laplace approximation. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernel : kernel instance, default=None + The kernel specifying the covariance function of the GP. If None is + passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that + the kernel's hyperparameters are optimized during fitting. Also kernel + cannot be a `CompoundKernel`. + + optimizer : 'fmin_l_bfgs_b', callable or None, default='fmin_l_bfgs_b' + Can either be one of the internally supported optimizers for optimizing + the kernel's parameters, specified by a string, or an externally + defined optimizer passed as a callable. If a callable is passed, it + must have the signature:: + + def optimizer(obj_func, initial_theta, bounds): + # * 'obj_func' is the objective function to be maximized, which + # takes the hyperparameters theta as parameter and an + # optional flag eval_gradient, which determines if the + # gradient is returned additionally to the function value + # * 'initial_theta': the initial value for theta, which can be + # used by local optimizers + # * 'bounds': the bounds on the values of theta + .... + # Returned are the best found hyperparameters theta and + # the corresponding value of the target function. + return theta_opt, func_min + + Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize + is used. If None is passed, the kernel's parameters are kept fixed. + Available internal optimizers are:: + + 'fmin_l_bfgs_b' + + n_restarts_optimizer : int, default=0 + The number of restarts of the optimizer for finding the kernel's + parameters which maximize the log-marginal likelihood. The first run + of the optimizer is performed from the kernel's initial parameters, + the remaining ones (if any) from thetas sampled log-uniform randomly + from the space of allowed theta-values. If greater than 0, all bounds + must be finite. Note that n_restarts_optimizer=0 implies that one + run is performed. + + max_iter_predict : int, default=100 + The maximum number of iterations in Newton's method for approximating + the posterior during predict. Smaller values will reduce computation + time at the cost of worse results. + + warm_start : bool, default=False + If warm-starts are enabled, the solution of the last Newton iteration + on the Laplace approximation of the posterior mode is used as + initialization for the next call of _posterior_mode(). This can speed + up convergence when _posterior_mode is called several times on similar + problems as in hyperparameter optimization. See :term:`the Glossary + `. + + copy_X_train : bool, default=True + If True, a persistent copy of the training data is stored in the + object. Otherwise, just a reference to the training data is stored, + which might cause predictions to change if the data is modified + externally. + + random_state : int, RandomState instance or None, default=None + Determines random number generation used to initialize the centers. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + multi_class : {'one_vs_rest', 'one_vs_one'}, default='one_vs_rest' + Specifies how multi-class classification problems are handled. + Supported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest', + one binary Gaussian process classifier is fitted for each class, which + is trained to separate this class from the rest. In 'one_vs_one', one + binary Gaussian process classifier is fitted for each pair of classes, + which is trained to separate these two classes. The predictions of + these binary predictors are combined into multi-class predictions. + Note that 'one_vs_one' does not support predicting probability + estimates. + + n_jobs : int, default=None + The number of jobs to use for the computation: the specified + multiclass problems are computed in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + base_estimator_ : ``Estimator`` instance + The estimator instance that defines the likelihood function + using the observed data. + + kernel_ : kernel instance + The kernel used for prediction. In case of binary classification, + the structure of the kernel is the same as the one passed as parameter + but with optimized hyperparameters. In case of multi-class + classification, a CompoundKernel is returned which consists of the + different kernels used in the one-versus-rest classifiers. + + log_marginal_likelihood_value_ : float + The log-marginal-likelihood of ``self.kernel_.theta`` + + classes_ : array-like of shape (n_classes,) + Unique class labels. + + n_classes_ : int + The number of classes in the training data + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GaussianProcessRegressor : Gaussian process regression (GPR). + + References + ---------- + .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams, + "Gaussian Processes for Machine Learning", + MIT Press 2006 `_ + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import RBF + >>> X, y = load_iris(return_X_y=True) + >>> kernel = 1.0 * RBF(1.0) + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9866... + >>> gpc.predict_proba(X[:2,:]) + array([[0.83548752, 0.03228706, 0.13222543], + [0.79064206, 0.06525643, 0.14410151]]) + """ + + _parameter_constraints: dict = { + "kernel": [Kernel, None], + "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None], + "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")], + "max_iter_predict": [Interval(Integral, 1, None, closed="left")], + "warm_start": ["boolean"], + "copy_X_train": ["boolean"], + "random_state": ["random_state"], + "multi_class": [StrOptions({"one_vs_rest", "one_vs_one"})], + "n_jobs": [Integral, None], + } + + def __init__( + self, + kernel=None, + *, + optimizer="fmin_l_bfgs_b", + n_restarts_optimizer=0, + max_iter_predict=100, + warm_start=False, + copy_X_train=True, + random_state=None, + multi_class="one_vs_rest", + n_jobs=None, + ): + self.kernel = kernel + self.optimizer = optimizer + self.n_restarts_optimizer = n_restarts_optimizer + self.max_iter_predict = max_iter_predict + self.warm_start = warm_start + self.copy_X_train = copy_X_train + self.random_state = random_state + self.multi_class = multi_class + self.n_jobs = n_jobs + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit Gaussian process classification model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data. + + y : array-like of shape (n_samples,) + Target values, must be binary. + + Returns + ------- + self : object + Returns an instance of self. + """ + if isinstance(self.kernel, CompoundKernel): + raise ValueError("kernel cannot be a CompoundKernel") + + if self.kernel is None or self.kernel.requires_vector_input: + X, y = self._validate_data( + X, y, multi_output=False, ensure_2d=True, dtype="numeric" + ) + else: + X, y = self._validate_data( + X, y, multi_output=False, ensure_2d=False, dtype=None + ) + + self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace( + kernel=self.kernel, + optimizer=self.optimizer, + n_restarts_optimizer=self.n_restarts_optimizer, + max_iter_predict=self.max_iter_predict, + warm_start=self.warm_start, + copy_X_train=self.copy_X_train, + random_state=self.random_state, + ) + + self.classes_ = np.unique(y) + self.n_classes_ = self.classes_.size + if self.n_classes_ == 1: + raise ValueError( + "GaussianProcessClassifier requires 2 or more " + "distinct classes; got %d class (only class %s " + "is present)" % (self.n_classes_, self.classes_[0]) + ) + if self.n_classes_ > 2: + if self.multi_class == "one_vs_rest": + self.base_estimator_ = OneVsRestClassifier( + self.base_estimator_, n_jobs=self.n_jobs + ) + elif self.multi_class == "one_vs_one": + self.base_estimator_ = OneVsOneClassifier( + self.base_estimator_, n_jobs=self.n_jobs + ) + else: + raise ValueError("Unknown multi-class mode %s" % self.multi_class) + + self.base_estimator_.fit(X, y) + + if self.n_classes_ > 2: + self.log_marginal_likelihood_value_ = np.mean( + [ + estimator.log_marginal_likelihood() + for estimator in self.base_estimator_.estimators_ + ] + ) + else: + self.log_marginal_likelihood_value_ = ( + self.base_estimator_.log_marginal_likelihood() + ) + + return self + + def predict(self, X): + """Perform classification on an array of test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated for classification. + + Returns + ------- + C : ndarray of shape (n_samples,) + Predicted target values for X, values are from ``classes_``. + """ + check_is_fitted(self) + + if self.kernel is None or self.kernel.requires_vector_input: + X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False) + else: + X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False) + + return self.base_estimator_.predict(X) + + def predict_proba(self, X): + """Return probability estimates for the test vector X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated for classification. + + Returns + ------- + C : array-like of shape (n_samples, n_classes) + Returns the probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + """ + check_is_fitted(self) + if self.n_classes_ > 2 and self.multi_class == "one_vs_one": + raise ValueError( + "one_vs_one multi-class mode does not support " + "predicting probability estimates. Use " + "one_vs_rest mode instead." + ) + + if self.kernel is None or self.kernel.requires_vector_input: + X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False) + else: + X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False) + + return self.base_estimator_.predict_proba(X) + + @property + def kernel_(self): + """Return the kernel of the base estimator.""" + if self.n_classes_ == 2: + return self.base_estimator_.kernel_ + else: + return CompoundKernel( + [estimator.kernel_ for estimator in self.base_estimator_.estimators_] + ) + + def log_marginal_likelihood( + self, theta=None, eval_gradient=False, clone_kernel=True + ): + """Return log-marginal likelihood of theta for training data. + + In the case of multi-class classification, the mean log-marginal + likelihood of the one-versus-rest classifiers are returned. + + Parameters + ---------- + theta : array-like of shape (n_kernel_params,), default=None + Kernel hyperparameters for which the log-marginal likelihood is + evaluated. In the case of multi-class classification, theta may + be the hyperparameters of the compound kernel or of an individual + kernel. In the latter case, all individual kernel get assigned the + same theta values. If None, the precomputed log_marginal_likelihood + of ``self.kernel_.theta`` is returned. + + eval_gradient : bool, default=False + If True, the gradient of the log-marginal likelihood with respect + to the kernel hyperparameters at position theta is returned + additionally. Note that gradient computation is not supported + for non-binary classification. If True, theta must not be None. + + clone_kernel : bool, default=True + If True, the kernel attribute is copied. If False, the kernel + attribute is modified, but may result in a performance improvement. + + Returns + ------- + log_likelihood : float + Log-marginal likelihood of theta for training data. + + log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional + Gradient of the log-marginal likelihood with respect to the kernel + hyperparameters at position theta. + Only returned when `eval_gradient` is True. + """ + check_is_fitted(self) + + if theta is None: + if eval_gradient: + raise ValueError("Gradient can only be evaluated for theta!=None") + return self.log_marginal_likelihood_value_ + + theta = np.asarray(theta) + if self.n_classes_ == 2: + return self.base_estimator_.log_marginal_likelihood( + theta, eval_gradient, clone_kernel=clone_kernel + ) + else: + if eval_gradient: + raise NotImplementedError( + "Gradient of log-marginal-likelihood not implemented for " + "multi-class GPC." + ) + estimators = self.base_estimator_.estimators_ + n_dims = estimators[0].kernel_.n_dims + if theta.shape[0] == n_dims: # use same theta for all sub-kernels + return np.mean( + [ + estimator.log_marginal_likelihood( + theta, clone_kernel=clone_kernel + ) + for i, estimator in enumerate(estimators) + ] + ) + elif theta.shape[0] == n_dims * self.classes_.shape[0]: + # theta for compound kernel + return np.mean( + [ + estimator.log_marginal_likelihood( + theta[n_dims * i : n_dims * (i + 1)], + clone_kernel=clone_kernel, + ) + for i, estimator in enumerate(estimators) + ] + ) + else: + raise ValueError( + "Shape of theta must be either %d or %d. " + "Obtained theta with shape %d." + % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0]) + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py new file mode 100644 index 0000000000000000000000000000000000000000..d3723016be127adea0a63ed19ba5d5e306f74abc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py @@ -0,0 +1,673 @@ +"""Gaussian processes regression.""" + +# Authors: Jan Hendrik Metzen +# Modified by: Pete Green +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real +from operator import itemgetter + +import numpy as np +import scipy.optimize +from scipy.linalg import cho_solve, cholesky, solve_triangular + +from ..base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context, clone +from ..preprocessing._data import _handle_zeros_in_scale +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.optimize import _check_optimize_result +from .kernels import RBF, Kernel +from .kernels import ConstantKernel as C + +GPR_CHOLESKY_LOWER = True + + +class GaussianProcessRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator): + """Gaussian process regression (GPR). + + The implementation is based on Algorithm 2.1 of [RW2006]_. + + In addition to standard scikit-learn estimator API, + :class:`GaussianProcessRegressor`: + + * allows prediction without prior fitting (based on the GP prior) + * provides an additional method `sample_y(X)`, which evaluates samples + drawn from the GPR (prior or posterior) at given inputs + * exposes a method `log_marginal_likelihood(theta)`, which can be used + externally for other ways of selecting hyperparameters, e.g., via + Markov chain Monte Carlo. + + To learn the difference between a point-estimate approach vs. a more + Bayesian modelling approach, refer to the example entitled + :ref:`sphx_glr_auto_examples_gaussian_process_plot_compare_gpr_krr.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernel : kernel instance, default=None + The kernel specifying the covariance function of the GP. If None is + passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed") + * RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that + the kernel hyperparameters are optimized during fitting unless the + bounds are marked as "fixed". + + alpha : float or ndarray of shape (n_samples,), default=1e-10 + Value added to the diagonal of the kernel matrix during fitting. + This can prevent a potential numerical issue during fitting, by + ensuring that the calculated values form a positive definite matrix. + It can also be interpreted as the variance of additional Gaussian + measurement noise on the training observations. Note that this is + different from using a `WhiteKernel`. If an array is passed, it must + have the same number of entries as the data used for fitting and is + used as datapoint-dependent noise level. Allowing to specify the + noise level directly as a parameter is mainly for convenience and + for consistency with :class:`~sklearn.linear_model.Ridge`. + + optimizer : "fmin_l_bfgs_b", callable or None, default="fmin_l_bfgs_b" + Can either be one of the internally supported optimizers for optimizing + the kernel's parameters, specified by a string, or an externally + defined optimizer passed as a callable. If a callable is passed, it + must have the signature:: + + def optimizer(obj_func, initial_theta, bounds): + # * 'obj_func': the objective function to be minimized, which + # takes the hyperparameters theta as a parameter and an + # optional flag eval_gradient, which determines if the + # gradient is returned additionally to the function value + # * 'initial_theta': the initial value for theta, which can be + # used by local optimizers + # * 'bounds': the bounds on the values of theta + .... + # Returned are the best found hyperparameters theta and + # the corresponding value of the target function. + return theta_opt, func_min + + Per default, the L-BFGS-B algorithm from `scipy.optimize.minimize` + is used. If None is passed, the kernel's parameters are kept fixed. + Available internal optimizers are: `{'fmin_l_bfgs_b'}`. + + n_restarts_optimizer : int, default=0 + The number of restarts of the optimizer for finding the kernel's + parameters which maximize the log-marginal likelihood. The first run + of the optimizer is performed from the kernel's initial parameters, + the remaining ones (if any) from thetas sampled log-uniform randomly + from the space of allowed theta-values. If greater than 0, all bounds + must be finite. Note that `n_restarts_optimizer == 0` implies that one + run is performed. + + normalize_y : bool, default=False + Whether or not to normalize the target values `y` by removing the mean + and scaling to unit-variance. This is recommended for cases where + zero-mean, unit-variance priors are used. Note that, in this + implementation, the normalisation is reversed before the GP predictions + are reported. + + .. versionchanged:: 0.23 + + copy_X_train : bool, default=True + If True, a persistent copy of the training data is stored in the + object. Otherwise, just a reference to the training data is stored, + which might cause predictions to change if the data is modified + externally. + + n_targets : int, default=None + The number of dimensions of the target values. Used to decide the number + of outputs when sampling from the prior distributions (i.e. calling + :meth:`sample_y` before :meth:`fit`). This parameter is ignored once + :meth:`fit` has been called. + + .. versionadded:: 1.3 + + random_state : int, RandomState instance or None, default=None + Determines random number generation used to initialize the centers. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + X_train_ : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data (also + required for prediction). + + y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values in training data (also required for prediction). + + kernel_ : kernel instance + The kernel used for prediction. The structure of the kernel is the + same as the one passed as parameter but with optimized hyperparameters. + + L_ : array-like of shape (n_samples, n_samples) + Lower-triangular Cholesky decomposition of the kernel in ``X_train_``. + + alpha_ : array-like of shape (n_samples,) + Dual coefficients of training data points in kernel space. + + log_marginal_likelihood_value_ : float + The log-marginal-likelihood of ``self.kernel_.theta``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GaussianProcessClassifier : Gaussian process classification (GPC) + based on Laplace approximation. + + References + ---------- + .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams, + "Gaussian Processes for Machine Learning", + MIT Press 2006 `_ + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = DotProduct() + WhiteKernel() + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.3680... + >>> gpr.predict(X[:2,:], return_std=True) + (array([653.0..., 592.1...]), array([316.6..., 316.6...])) + """ + + _parameter_constraints: dict = { + "kernel": [None, Kernel], + "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray], + "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None], + "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")], + "normalize_y": ["boolean"], + "copy_X_train": ["boolean"], + "n_targets": [Interval(Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + } + + def __init__( + self, + kernel=None, + *, + alpha=1e-10, + optimizer="fmin_l_bfgs_b", + n_restarts_optimizer=0, + normalize_y=False, + copy_X_train=True, + n_targets=None, + random_state=None, + ): + self.kernel = kernel + self.alpha = alpha + self.optimizer = optimizer + self.n_restarts_optimizer = n_restarts_optimizer + self.normalize_y = normalize_y + self.copy_X_train = copy_X_train + self.n_targets = n_targets + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit Gaussian process regression model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Returns + ------- + self : object + GaussianProcessRegressor class instance. + """ + if self.kernel is None: # Use an RBF kernel as default + self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF( + 1.0, length_scale_bounds="fixed" + ) + else: + self.kernel_ = clone(self.kernel) + + self._rng = check_random_state(self.random_state) + + if self.kernel_.requires_vector_input: + dtype, ensure_2d = "numeric", True + else: + dtype, ensure_2d = None, False + X, y = self._validate_data( + X, + y, + multi_output=True, + y_numeric=True, + ensure_2d=ensure_2d, + dtype=dtype, + ) + + n_targets_seen = y.shape[1] if y.ndim > 1 else 1 + if self.n_targets is not None and n_targets_seen != self.n_targets: + raise ValueError( + "The number of targets seen in `y` is different from the parameter " + f"`n_targets`. Got {n_targets_seen} != {self.n_targets}." + ) + + # Normalize target value + if self.normalize_y: + self._y_train_mean = np.mean(y, axis=0) + self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False) + + # Remove mean and make unit variance + y = (y - self._y_train_mean) / self._y_train_std + + else: + shape_y_stats = (y.shape[1],) if y.ndim == 2 else 1 + self._y_train_mean = np.zeros(shape=shape_y_stats) + self._y_train_std = np.ones(shape=shape_y_stats) + + if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]: + if self.alpha.shape[0] == 1: + self.alpha = self.alpha[0] + else: + raise ValueError( + "alpha must be a scalar or an array with same number of " + f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})" + ) + + self.X_train_ = np.copy(X) if self.copy_X_train else X + self.y_train_ = np.copy(y) if self.copy_X_train else y + + if self.optimizer is not None and self.kernel_.n_dims > 0: + # Choose hyperparameters based on maximizing the log-marginal + # likelihood (potentially starting from several initial values) + def obj_func(theta, eval_gradient=True): + if eval_gradient: + lml, grad = self.log_marginal_likelihood( + theta, eval_gradient=True, clone_kernel=False + ) + return -lml, -grad + else: + return -self.log_marginal_likelihood(theta, clone_kernel=False) + + # First optimize starting from theta specified in kernel + optima = [ + ( + self._constrained_optimization( + obj_func, self.kernel_.theta, self.kernel_.bounds + ) + ) + ] + + # Additional runs are performed from log-uniform chosen initial + # theta + if self.n_restarts_optimizer > 0: + if not np.isfinite(self.kernel_.bounds).all(): + raise ValueError( + "Multiple optimizer restarts (n_restarts_optimizer>0) " + "requires that all bounds are finite." + ) + bounds = self.kernel_.bounds + for iteration in range(self.n_restarts_optimizer): + theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1]) + optima.append( + self._constrained_optimization(obj_func, theta_initial, bounds) + ) + # Select result from run with minimal (negative) log-marginal + # likelihood + lml_values = list(map(itemgetter(1), optima)) + self.kernel_.theta = optima[np.argmin(lml_values)][0] + self.kernel_._check_bounds_params() + + self.log_marginal_likelihood_value_ = -np.min(lml_values) + else: + self.log_marginal_likelihood_value_ = self.log_marginal_likelihood( + self.kernel_.theta, clone_kernel=False + ) + + # Precompute quantities required for predictions which are independent + # of actual query points + # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I) + K = self.kernel_(self.X_train_) + K[np.diag_indices_from(K)] += self.alpha + try: + self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False) + except np.linalg.LinAlgError as exc: + exc.args = ( + ( + f"The kernel, {self.kernel_}, is not returning a positive " + "definite matrix. Try gradually increasing the 'alpha' " + "parameter of your GaussianProcessRegressor estimator." + ), + ) + exc.args + raise + # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y) + self.alpha_ = cho_solve( + (self.L_, GPR_CHOLESKY_LOWER), + self.y_train_, + check_finite=False, + ) + return self + + def predict(self, X, return_std=False, return_cov=False): + """Predict using the Gaussian process regression model. + + We can also predict based on an unfitted model by using the GP prior. + In addition to the mean of the predictive distribution, optionally also + returns its standard deviation (`return_std=True`) or covariance + (`return_cov=True`). Note that at most one of the two can be requested. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated. + + return_std : bool, default=False + If True, the standard-deviation of the predictive distribution at + the query points is returned along with the mean. + + return_cov : bool, default=False + If True, the covariance of the joint predictive distribution at + the query points is returned along with the mean. + + Returns + ------- + y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets) + Mean of predictive distribution a query points. + + y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional + Standard deviation of predictive distribution at query points. + Only returned when `return_std` is True. + + y_cov : ndarray of shape (n_samples, n_samples) or \ + (n_samples, n_samples, n_targets), optional + Covariance of joint predictive distribution a query points. + Only returned when `return_cov` is True. + """ + if return_std and return_cov: + raise RuntimeError( + "At most one of return_std or return_cov can be requested." + ) + + if self.kernel is None or self.kernel.requires_vector_input: + dtype, ensure_2d = "numeric", True + else: + dtype, ensure_2d = None, False + + X = self._validate_data(X, ensure_2d=ensure_2d, dtype=dtype, reset=False) + + if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior + if self.kernel is None: + kernel = C(1.0, constant_value_bounds="fixed") * RBF( + 1.0, length_scale_bounds="fixed" + ) + else: + kernel = self.kernel + + n_targets = self.n_targets if self.n_targets is not None else 1 + y_mean = np.zeros(shape=(X.shape[0], n_targets)).squeeze() + + if return_cov: + y_cov = kernel(X) + if n_targets > 1: + y_cov = np.repeat( + np.expand_dims(y_cov, -1), repeats=n_targets, axis=-1 + ) + return y_mean, y_cov + elif return_std: + y_var = kernel.diag(X) + if n_targets > 1: + y_var = np.repeat( + np.expand_dims(y_var, -1), repeats=n_targets, axis=-1 + ) + return y_mean, np.sqrt(y_var) + else: + return y_mean + else: # Predict based on GP posterior + # Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha + K_trans = self.kernel_(X, self.X_train_) + y_mean = K_trans @ self.alpha_ + + # undo normalisation + y_mean = self._y_train_std * y_mean + self._y_train_mean + + # if y_mean has shape (n_samples, 1), reshape to (n_samples,) + if y_mean.ndim > 1 and y_mean.shape[1] == 1: + y_mean = np.squeeze(y_mean, axis=1) + + # Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T + V = solve_triangular( + self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False + ) + + if return_cov: + # Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v + y_cov = self.kernel_(X) - V.T @ V + + # undo normalisation + y_cov = np.outer(y_cov, self._y_train_std**2).reshape( + *y_cov.shape, -1 + ) + # if y_cov has shape (n_samples, n_samples, 1), reshape to + # (n_samples, n_samples) + if y_cov.shape[2] == 1: + y_cov = np.squeeze(y_cov, axis=2) + + return y_mean, y_cov + elif return_std: + # Compute variance of predictive distribution + # Use einsum to avoid explicitly forming the large matrix + # V^T @ V just to extract its diagonal afterward. + y_var = self.kernel_.diag(X).copy() + y_var -= np.einsum("ij,ji->i", V.T, V) + + # Check if any of the variances is negative because of + # numerical issues. If yes: set the variance to 0. + y_var_negative = y_var < 0 + if np.any(y_var_negative): + warnings.warn( + "Predicted variances smaller than 0. " + "Setting those variances to 0." + ) + y_var[y_var_negative] = 0.0 + + # undo normalisation + y_var = np.outer(y_var, self._y_train_std**2).reshape( + *y_var.shape, -1 + ) + + # if y_var has shape (n_samples, 1), reshape to (n_samples,) + if y_var.shape[1] == 1: + y_var = np.squeeze(y_var, axis=1) + + return y_mean, np.sqrt(y_var) + else: + return y_mean + + def sample_y(self, X, n_samples=1, random_state=0): + """Draw samples from Gaussian process and evaluate at X. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Query points where the GP is evaluated. + + n_samples : int, default=1 + Number of samples drawn from the Gaussian process per query point. + + random_state : int, RandomState instance or None, default=0 + Determines random number generation to randomly draw samples. + Pass an int for reproducible results across multiple function + calls. + See :term:`Glossary `. + + Returns + ------- + y_samples : ndarray of shape (n_samples_X, n_samples), or \ + (n_samples_X, n_targets, n_samples) + Values of n_samples samples drawn from Gaussian process and + evaluated at query points. + """ + rng = check_random_state(random_state) + + y_mean, y_cov = self.predict(X, return_cov=True) + if y_mean.ndim == 1: + y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T + else: + y_samples = [ + rng.multivariate_normal( + y_mean[:, target], y_cov[..., target], n_samples + ).T[:, np.newaxis] + for target in range(y_mean.shape[1]) + ] + y_samples = np.hstack(y_samples) + return y_samples + + def log_marginal_likelihood( + self, theta=None, eval_gradient=False, clone_kernel=True + ): + """Return log-marginal likelihood of theta for training data. + + Parameters + ---------- + theta : array-like of shape (n_kernel_params,) default=None + Kernel hyperparameters for which the log-marginal likelihood is + evaluated. If None, the precomputed log_marginal_likelihood + of ``self.kernel_.theta`` is returned. + + eval_gradient : bool, default=False + If True, the gradient of the log-marginal likelihood with respect + to the kernel hyperparameters at position theta is returned + additionally. If True, theta must not be None. + + clone_kernel : bool, default=True + If True, the kernel attribute is copied. If False, the kernel + attribute is modified, but may result in a performance improvement. + + Returns + ------- + log_likelihood : float + Log-marginal likelihood of theta for training data. + + log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional + Gradient of the log-marginal likelihood with respect to the kernel + hyperparameters at position theta. + Only returned when eval_gradient is True. + """ + if theta is None: + if eval_gradient: + raise ValueError("Gradient can only be evaluated for theta!=None") + return self.log_marginal_likelihood_value_ + + if clone_kernel: + kernel = self.kernel_.clone_with_theta(theta) + else: + kernel = self.kernel_ + kernel.theta = theta + + if eval_gradient: + K, K_gradient = kernel(self.X_train_, eval_gradient=True) + else: + K = kernel(self.X_train_) + + # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I) + K[np.diag_indices_from(K)] += self.alpha + try: + L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False) + except np.linalg.LinAlgError: + return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf + + # Support multi-dimensional output of self.y_train_ + y_train = self.y_train_ + if y_train.ndim == 1: + y_train = y_train[:, np.newaxis] + + # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y) + alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False) + + # Alg 2.1, page 19, line 7 + # -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi) + # y is originally thought to be a (1, n_samples) row vector. However, + # in multioutputs, y is of shape (n_samples, 2) and we need to compute + # y^T . alpha for each output, independently using einsum. Thus, it + # is equivalent to: + # for output_idx in range(n_outputs): + # log_likelihood_dims[output_idx] = ( + # y_train[:, [output_idx]] @ alpha[:, [output_idx]] + # ) + log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha) + log_likelihood_dims -= np.log(np.diag(L)).sum() + log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi) + # the log likehood is sum-up across the outputs + log_likelihood = log_likelihood_dims.sum(axis=-1) + + if eval_gradient: + # Eq. 5.9, p. 114, and footnote 5 in p. 114 + # 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient) + # alpha is supposed to be a vector of (n_samples,) elements. With + # multioutputs, alpha is a matrix of size (n_samples, n_outputs). + # Therefore, we want to construct a matrix of + # (n_samples, n_samples, n_outputs) equivalent to + # for output_idx in range(n_outputs): + # output_alpha = alpha[:, [output_idx]] + # inner_term[..., output_idx] = output_alpha @ output_alpha.T + inner_term = np.einsum("ik,jk->ijk", alpha, alpha) + # compute K^-1 of shape (n_samples, n_samples) + K_inv = cho_solve( + (L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False + ) + # create a new axis to use broadcasting between inner_term and + # K_inv + inner_term -= K_inv[..., np.newaxis] + # Since we are interested about the trace of + # inner_term @ K_gradient, we don't explicitly compute the + # matrix-by-matrix operation and instead use an einsum. Therefore + # it is equivalent to: + # for param_idx in range(n_kernel_params): + # for output_idx in range(n_output): + # log_likehood_gradient_dims[param_idx, output_idx] = ( + # inner_term[..., output_idx] @ + # K_gradient[..., param_idx] + # ) + log_likelihood_gradient_dims = 0.5 * np.einsum( + "ijl,jik->kl", inner_term, K_gradient + ) + # the log likehood gradient is the sum-up across the outputs + log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1) + + if eval_gradient: + return log_likelihood, log_likelihood_gradient + else: + return log_likelihood + + def _constrained_optimization(self, obj_func, initial_theta, bounds): + if self.optimizer == "fmin_l_bfgs_b": + opt_res = scipy.optimize.minimize( + obj_func, + initial_theta, + method="L-BFGS-B", + jac=True, + bounds=bounds, + ) + _check_optimize_result("lbfgs", opt_res) + theta_opt, func_min = opt_res.x, opt_res.fun + elif callable(self.optimizer): + theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds) + else: + raise ValueError(f"Unknown optimizer {self.optimizer}.") + + return theta_opt, func_min + + def _more_tags(self): + return {"requires_fit": False} diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..3b995c48b1f712ec928fc569f4354fa74492e463 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py @@ -0,0 +1,2415 @@ +""" +The :mod:`sklearn.gaussian_process.kernels` module implements a set of kernels that +can be combined by operators and used in Gaussian processes. +""" + +# Kernels for Gaussian process regression and classification. +# +# The kernels in this module allow kernel-engineering, i.e., they can be +# combined via the "+" and "*" operators or be exponentiated with a scalar +# via "**". These sum and product expressions can also contain scalar values, +# which are automatically converted to a constant kernel. +# +# All kernels allow (analytic) gradient-based hyperparameter optimization. +# The space of hyperparameters can be specified by giving lower und upper +# boundaries for the value of each hyperparameter (the search space is thus +# rectangular). Instead of specifying bounds, hyperparameters can also be +# declared to be "fixed", which causes these hyperparameters to be excluded from +# optimization. + + +# Author: Jan Hendrik Metzen +# License: BSD 3 clause + +# Note: this module is strongly inspired by the kernel module of the george +# package. + +import math +import warnings +from abc import ABCMeta, abstractmethod +from collections import namedtuple +from inspect import signature + +import numpy as np +from scipy.spatial.distance import cdist, pdist, squareform +from scipy.special import gamma, kv + +from ..base import clone +from ..exceptions import ConvergenceWarning +from ..metrics.pairwise import pairwise_kernels +from ..utils.validation import _num_samples + + +def _check_length_scale(X, length_scale): + length_scale = np.squeeze(length_scale).astype(float) + if np.ndim(length_scale) > 1: + raise ValueError("length_scale cannot be of dimension greater than 1") + if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]: + raise ValueError( + "Anisotropic kernel must have the same number of " + "dimensions as data (%d!=%d)" % (length_scale.shape[0], X.shape[1]) + ) + return length_scale + + +class Hyperparameter( + namedtuple( + "Hyperparameter", ("name", "value_type", "bounds", "n_elements", "fixed") + ) +): + """A kernel hyperparameter's specification in form of a namedtuple. + + .. versionadded:: 0.18 + + Attributes + ---------- + name : str + The name of the hyperparameter. Note that a kernel using a + hyperparameter with name "x" must have the attributes self.x and + self.x_bounds + + value_type : str + The type of the hyperparameter. Currently, only "numeric" + hyperparameters are supported. + + bounds : pair of floats >= 0 or "fixed" + The lower and upper bound on the parameter. If n_elements>1, a pair + of 1d array with n_elements each may be given alternatively. If + the string "fixed" is passed as bounds, the hyperparameter's value + cannot be changed. + + n_elements : int, default=1 + The number of elements of the hyperparameter value. Defaults to 1, + which corresponds to a scalar hyperparameter. n_elements > 1 + corresponds to a hyperparameter which is vector-valued, + such as, e.g., anisotropic length-scales. + + fixed : bool, default=None + Whether the value of this hyperparameter is fixed, i.e., cannot be + changed during hyperparameter tuning. If None is passed, the "fixed" is + derived based on the given bounds. + + Examples + -------- + >>> from sklearn.gaussian_process.kernels import ConstantKernel + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import Hyperparameter + >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0) + >>> kernel = ConstantKernel(constant_value=1.0, + ... constant_value_bounds=(0.0, 10.0)) + + We can access each hyperparameter: + + >>> for hyperparameter in kernel.hyperparameters: + ... print(hyperparameter) + Hyperparameter(name='constant_value', value_type='numeric', + bounds=array([[ 0., 10.]]), n_elements=1, fixed=False) + + >>> params = kernel.get_params() + >>> for key in sorted(params): print(f"{key} : {params[key]}") + constant_value : 1.0 + constant_value_bounds : (0.0, 10.0) + """ + + # A raw namedtuple is very memory efficient as it packs the attributes + # in a struct to get rid of the __dict__ of attributes in particular it + # does not copy the string for the keys on each instance. + # By deriving a namedtuple class just to introduce the __init__ method we + # would also reintroduce the __dict__ on the instance. By telling the + # Python interpreter that this subclass uses static __slots__ instead of + # dynamic attributes. Furthermore we don't need any additional slot in the + # subclass so we set __slots__ to the empty tuple. + __slots__ = () + + def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None): + if not isinstance(bounds, str) or bounds != "fixed": + bounds = np.atleast_2d(bounds) + if n_elements > 1: # vector-valued parameter + if bounds.shape[0] == 1: + bounds = np.repeat(bounds, n_elements, 0) + elif bounds.shape[0] != n_elements: + raise ValueError( + "Bounds on %s should have either 1 or " + "%d dimensions. Given are %d" + % (name, n_elements, bounds.shape[0]) + ) + + if fixed is None: + fixed = isinstance(bounds, str) and bounds == "fixed" + return super(Hyperparameter, cls).__new__( + cls, name, value_type, bounds, n_elements, fixed + ) + + # This is mainly a testing utility to check that two hyperparameters + # are equal. + def __eq__(self, other): + return ( + self.name == other.name + and self.value_type == other.value_type + and np.all(self.bounds == other.bounds) + and self.n_elements == other.n_elements + and self.fixed == other.fixed + ) + + +class Kernel(metaclass=ABCMeta): + """Base class for all kernels. + + .. versionadded:: 0.18 + + Examples + -------- + >>> from sklearn.gaussian_process.kernels import Kernel, RBF + >>> import numpy as np + >>> class CustomKernel(Kernel): + ... def __init__(self, length_scale=1.0): + ... self.length_scale = length_scale + ... def __call__(self, X, Y=None): + ... if Y is None: + ... Y = X + ... return np.inner(X, X if Y is None else Y) ** 2 + ... def diag(self, X): + ... return np.ones(X.shape[0]) + ... def is_stationary(self): + ... return True + >>> kernel = CustomKernel(length_scale=2.0) + >>> X = np.array([[1, 2], [3, 4]]) + >>> print(kernel(X)) + [[ 25 121] + [121 625]] + """ + + def get_params(self, deep=True): + """Get parameters of this kernel. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + params = dict() + + # introspect the constructor arguments to find the model parameters + # to represent + cls = self.__class__ + init = getattr(cls.__init__, "deprecated_original", cls.__init__) + init_sign = signature(init) + args, varargs = [], [] + for parameter in init_sign.parameters.values(): + if parameter.kind != parameter.VAR_KEYWORD and parameter.name != "self": + args.append(parameter.name) + if parameter.kind == parameter.VAR_POSITIONAL: + varargs.append(parameter.name) + + if len(varargs) != 0: + raise RuntimeError( + "scikit-learn kernels should always " + "specify their parameters in the signature" + " of their __init__ (no varargs)." + " %s doesn't follow this convention." % (cls,) + ) + for arg in args: + params[arg] = getattr(self, arg) + + return params + + def set_params(self, **params): + """Set the parameters of this kernel. + + The method works on simple kernels as well as on nested kernels. + The latter have parameters of the form ``__`` + so that it's possible to update each component of a nested object. + + Returns + ------- + self + """ + if not params: + # Simple optimisation to gain speed (inspect is slow) + return self + valid_params = self.get_params(deep=True) + for key, value in params.items(): + split = key.split("__", 1) + if len(split) > 1: + # nested objects case + name, sub_name = split + if name not in valid_params: + raise ValueError( + "Invalid parameter %s for kernel %s. " + "Check the list of available parameters " + "with `kernel.get_params().keys()`." % (name, self) + ) + sub_object = valid_params[name] + sub_object.set_params(**{sub_name: value}) + else: + # simple objects case + if key not in valid_params: + raise ValueError( + "Invalid parameter %s for kernel %s. " + "Check the list of available parameters " + "with `kernel.get_params().keys()`." + % (key, self.__class__.__name__) + ) + setattr(self, key, value) + return self + + def clone_with_theta(self, theta): + """Returns a clone of self with given hyperparameters theta. + + Parameters + ---------- + theta : ndarray of shape (n_dims,) + The hyperparameters + """ + cloned = clone(self) + cloned.theta = theta + return cloned + + @property + def n_dims(self): + """Returns the number of non-fixed hyperparameters of the kernel.""" + return self.theta.shape[0] + + @property + def hyperparameters(self): + """Returns a list of all hyperparameter specifications.""" + r = [ + getattr(self, attr) + for attr in dir(self) + if attr.startswith("hyperparameter_") + ] + return r + + @property + def theta(self): + """Returns the (flattened, log-transformed) non-fixed hyperparameters. + + Note that theta are typically the log-transformed values of the + kernel's hyperparameters as this representation of the search space + is more amenable for hyperparameter search, as hyperparameters like + length-scales naturally live on a log-scale. + + Returns + ------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + theta = [] + params = self.get_params() + for hyperparameter in self.hyperparameters: + if not hyperparameter.fixed: + theta.append(params[hyperparameter.name]) + if len(theta) > 0: + return np.log(np.hstack(theta)) + else: + return np.array([]) + + @theta.setter + def theta(self, theta): + """Sets the (flattened, log-transformed) non-fixed hyperparameters. + + Parameters + ---------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + params = self.get_params() + i = 0 + for hyperparameter in self.hyperparameters: + if hyperparameter.fixed: + continue + if hyperparameter.n_elements > 1: + # vector-valued parameter + params[hyperparameter.name] = np.exp( + theta[i : i + hyperparameter.n_elements] + ) + i += hyperparameter.n_elements + else: + params[hyperparameter.name] = np.exp(theta[i]) + i += 1 + + if i != len(theta): + raise ValueError( + "theta has not the correct number of entries." + " Should be %d; given are %d" % (i, len(theta)) + ) + self.set_params(**params) + + @property + def bounds(self): + """Returns the log-transformed bounds on the theta. + + Returns + ------- + bounds : ndarray of shape (n_dims, 2) + The log-transformed bounds on the kernel's hyperparameters theta + """ + bounds = [ + hyperparameter.bounds + for hyperparameter in self.hyperparameters + if not hyperparameter.fixed + ] + if len(bounds) > 0: + return np.log(np.vstack(bounds)) + else: + return np.array([]) + + def __add__(self, b): + if not isinstance(b, Kernel): + return Sum(self, ConstantKernel(b)) + return Sum(self, b) + + def __radd__(self, b): + if not isinstance(b, Kernel): + return Sum(ConstantKernel(b), self) + return Sum(b, self) + + def __mul__(self, b): + if not isinstance(b, Kernel): + return Product(self, ConstantKernel(b)) + return Product(self, b) + + def __rmul__(self, b): + if not isinstance(b, Kernel): + return Product(ConstantKernel(b), self) + return Product(b, self) + + def __pow__(self, b): + return Exponentiation(self, b) + + def __eq__(self, b): + if type(self) != type(b): + return False + params_a = self.get_params() + params_b = b.get_params() + for key in set(list(params_a.keys()) + list(params_b.keys())): + if np.any(params_a.get(key, None) != params_b.get(key, None)): + return False + return True + + def __repr__(self): + return "{0}({1})".format( + self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta)) + ) + + @abstractmethod + def __call__(self, X, Y=None, eval_gradient=False): + """Evaluate the kernel.""" + + @abstractmethod + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples,) + Left argument of the returned kernel k(X, Y) + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + + @abstractmethod + def is_stationary(self): + """Returns whether the kernel is stationary.""" + + @property + def requires_vector_input(self): + """Returns whether the kernel is defined on fixed-length feature + vectors or generic objects. Defaults to True for backward + compatibility.""" + return True + + def _check_bounds_params(self): + """Called after fitting to warn if bounds may have been too tight.""" + list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T) + idx = 0 + for hyp in self.hyperparameters: + if hyp.fixed: + continue + for dim in range(hyp.n_elements): + if list_close[idx, 0]: + warnings.warn( + "The optimal value found for " + "dimension %s of parameter %s is " + "close to the specified lower " + "bound %s. Decreasing the bound and" + " calling fit again may find a " + "better value." % (dim, hyp.name, hyp.bounds[dim][0]), + ConvergenceWarning, + ) + elif list_close[idx, 1]: + warnings.warn( + "The optimal value found for " + "dimension %s of parameter %s is " + "close to the specified upper " + "bound %s. Increasing the bound and" + " calling fit again may find a " + "better value." % (dim, hyp.name, hyp.bounds[dim][1]), + ConvergenceWarning, + ) + idx += 1 + + +class NormalizedKernelMixin: + """Mixin for kernels which are normalized: k(X, X)=1. + + .. versionadded:: 0.18 + """ + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return np.ones(X.shape[0]) + + +class StationaryKernelMixin: + """Mixin for kernels which are stationary: k(X, Y)= f(X-Y). + + .. versionadded:: 0.18 + """ + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return True + + +class GenericKernelMixin: + """Mixin for kernels which operate on generic objects such as variable- + length sequences, trees, and graphs. + + .. versionadded:: 0.22 + """ + + @property + def requires_vector_input(self): + """Whether the kernel works only on fixed-length feature vectors.""" + return False + + +class CompoundKernel(Kernel): + """Kernel which is composed of a set of other kernels. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernels : list of Kernels + The other kernels + + Examples + -------- + >>> from sklearn.gaussian_process.kernels import WhiteKernel + >>> from sklearn.gaussian_process.kernels import RBF + >>> from sklearn.gaussian_process.kernels import CompoundKernel + >>> kernel = CompoundKernel( + ... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)]) + >>> print(kernel.bounds) + [[-11.51292546 11.51292546] + [-11.51292546 11.51292546]] + >>> print(kernel.n_dims) + 2 + >>> print(kernel.theta) + [1.09861229 0.69314718] + """ + + def __init__(self, kernels): + self.kernels = kernels + + def get_params(self, deep=True): + """Get parameters of this kernel. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + return dict(kernels=self.kernels) + + @property + def theta(self): + """Returns the (flattened, log-transformed) non-fixed hyperparameters. + + Note that theta are typically the log-transformed values of the + kernel's hyperparameters as this representation of the search space + is more amenable for hyperparameter search, as hyperparameters like + length-scales naturally live on a log-scale. + + Returns + ------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + return np.hstack([kernel.theta for kernel in self.kernels]) + + @theta.setter + def theta(self, theta): + """Sets the (flattened, log-transformed) non-fixed hyperparameters. + + Parameters + ---------- + theta : array of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + k_dims = self.k1.n_dims + for i, kernel in enumerate(self.kernels): + kernel.theta = theta[i * k_dims : (i + 1) * k_dims] + + @property + def bounds(self): + """Returns the log-transformed bounds on the theta. + + Returns + ------- + bounds : array of shape (n_dims, 2) + The log-transformed bounds on the kernel's hyperparameters theta + """ + return np.vstack([kernel.bounds for kernel in self.kernels]) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Note that this compound kernel returns the results of all simple kernel + stacked along an additional axis. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object, \ + default=None + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_X, n_features) or list of object, \ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of the + kernel hyperparameter is computed. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels) + Kernel k(X, Y) + + K_gradient : ndarray of shape \ + (n_samples_X, n_samples_X, n_dims, n_kernels), optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + if eval_gradient: + K = [] + K_grad = [] + for kernel in self.kernels: + K_single, K_grad_single = kernel(X, Y, eval_gradient) + K.append(K_single) + K_grad.append(K_grad_single[..., np.newaxis]) + return np.dstack(K), np.concatenate(K_grad, 3) + else: + return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels]) + + def __eq__(self, b): + if type(self) != type(b) or len(self.kernels) != len(b.kernels): + return False + return np.all( + [self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))] + ) + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return np.all([kernel.is_stationary() for kernel in self.kernels]) + + @property + def requires_vector_input(self): + """Returns whether the kernel is defined on discrete structures.""" + return np.any([kernel.requires_vector_input for kernel in self.kernels]) + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to `np.diag(self(X))`; however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X, n_kernels) + Diagonal of kernel k(X, X) + """ + return np.vstack([kernel.diag(X) for kernel in self.kernels]).T + + +class KernelOperator(Kernel): + """Base class for all kernel operators. + + .. versionadded:: 0.18 + """ + + def __init__(self, k1, k2): + self.k1 = k1 + self.k2 = k2 + + def get_params(self, deep=True): + """Get parameters of this kernel. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + params = dict(k1=self.k1, k2=self.k2) + if deep: + deep_items = self.k1.get_params().items() + params.update(("k1__" + k, val) for k, val in deep_items) + deep_items = self.k2.get_params().items() + params.update(("k2__" + k, val) for k, val in deep_items) + + return params + + @property + def hyperparameters(self): + """Returns a list of all hyperparameter.""" + r = [ + Hyperparameter( + "k1__" + hyperparameter.name, + hyperparameter.value_type, + hyperparameter.bounds, + hyperparameter.n_elements, + ) + for hyperparameter in self.k1.hyperparameters + ] + + for hyperparameter in self.k2.hyperparameters: + r.append( + Hyperparameter( + "k2__" + hyperparameter.name, + hyperparameter.value_type, + hyperparameter.bounds, + hyperparameter.n_elements, + ) + ) + return r + + @property + def theta(self): + """Returns the (flattened, log-transformed) non-fixed hyperparameters. + + Note that theta are typically the log-transformed values of the + kernel's hyperparameters as this representation of the search space + is more amenable for hyperparameter search, as hyperparameters like + length-scales naturally live on a log-scale. + + Returns + ------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + return np.append(self.k1.theta, self.k2.theta) + + @theta.setter + def theta(self, theta): + """Sets the (flattened, log-transformed) non-fixed hyperparameters. + + Parameters + ---------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + k1_dims = self.k1.n_dims + self.k1.theta = theta[:k1_dims] + self.k2.theta = theta[k1_dims:] + + @property + def bounds(self): + """Returns the log-transformed bounds on the theta. + + Returns + ------- + bounds : ndarray of shape (n_dims, 2) + The log-transformed bounds on the kernel's hyperparameters theta + """ + if self.k1.bounds.size == 0: + return self.k2.bounds + if self.k2.bounds.size == 0: + return self.k1.bounds + return np.vstack((self.k1.bounds, self.k2.bounds)) + + def __eq__(self, b): + if type(self) != type(b): + return False + return (self.k1 == b.k1 and self.k2 == b.k2) or ( + self.k1 == b.k2 and self.k2 == b.k1 + ) + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return self.k1.is_stationary() and self.k2.is_stationary() + + @property + def requires_vector_input(self): + """Returns whether the kernel is stationary.""" + return self.k1.requires_vector_input or self.k2.requires_vector_input + + +class Sum(KernelOperator): + """The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2` + and combines them via + + .. math:: + k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y) + + Note that the `__add__` magic method is overridden, so + `Sum(RBF(), RBF())` is equivalent to using the + operator + with `RBF() + RBF()`. + + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + k1 : Kernel + The first base-kernel of the sum-kernel + + k2 : Kernel + The second base-kernel of the sum-kernel + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = Sum(ConstantKernel(2), RBF()) + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 1.0 + >>> kernel + 1.41**2 + RBF(length_scale=1) + """ + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_X, n_features) or list of object,\ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + if eval_gradient: + K1, K1_gradient = self.k1(X, Y, eval_gradient=True) + K2, K2_gradient = self.k2(X, Y, eval_gradient=True) + return K1 + K2, np.dstack((K1_gradient, K2_gradient)) + else: + return self.k1(X, Y) + self.k2(X, Y) + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to `np.diag(self(X))`; however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return self.k1.diag(X) + self.k2.diag(X) + + def __repr__(self): + return "{0} + {1}".format(self.k1, self.k2) + + +class Product(KernelOperator): + """The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2` + and combines them via + + .. math:: + k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y) + + Note that the `__mul__` magic method is overridden, so + `Product(RBF(), RBF())` is equivalent to using the * operator + with `RBF() * RBF()`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + k1 : Kernel + The first base-kernel of the product-kernel + + k2 : Kernel + The second base-kernel of the product-kernel + + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import (RBF, Product, + ... ConstantKernel) + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = Product(ConstantKernel(2), RBF()) + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 1.0 + >>> kernel + 1.41**2 * RBF(length_scale=1) + """ + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_Y, n_features) or list of object,\ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + if eval_gradient: + K1, K1_gradient = self.k1(X, Y, eval_gradient=True) + K2, K2_gradient = self.k2(X, Y, eval_gradient=True) + return K1 * K2, np.dstack( + (K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis]) + ) + else: + return self.k1(X, Y) * self.k2(X, Y) + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return self.k1.diag(X) * self.k2.diag(X) + + def __repr__(self): + return "{0} * {1}".format(self.k1, self.k2) + + +class Exponentiation(Kernel): + """The Exponentiation kernel takes one base kernel and a scalar parameter + :math:`p` and combines them via + + .. math:: + k_{exp}(X, Y) = k(X, Y) ^p + + Note that the `__pow__` magic method is overridden, so + `Exponentiation(RBF(), 2)` is equivalent to using the ** operator + with `RBF() ** 2`. + + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernel : Kernel + The base kernel + + exponent : float + The exponent for the base kernel + + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import (RationalQuadratic, + ... Exponentiation) + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = Exponentiation(RationalQuadratic(), exponent=2) + >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.419... + >>> gpr.predict(X[:1,:], return_std=True) + (array([635.5...]), array([0.559...])) + """ + + def __init__(self, kernel, exponent): + self.kernel = kernel + self.exponent = exponent + + def get_params(self, deep=True): + """Get parameters of this kernel. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + params = dict(kernel=self.kernel, exponent=self.exponent) + if deep: + deep_items = self.kernel.get_params().items() + params.update(("kernel__" + k, val) for k, val in deep_items) + return params + + @property + def hyperparameters(self): + """Returns a list of all hyperparameter.""" + r = [] + for hyperparameter in self.kernel.hyperparameters: + r.append( + Hyperparameter( + "kernel__" + hyperparameter.name, + hyperparameter.value_type, + hyperparameter.bounds, + hyperparameter.n_elements, + ) + ) + return r + + @property + def theta(self): + """Returns the (flattened, log-transformed) non-fixed hyperparameters. + + Note that theta are typically the log-transformed values of the + kernel's hyperparameters as this representation of the search space + is more amenable for hyperparameter search, as hyperparameters like + length-scales naturally live on a log-scale. + + Returns + ------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + return self.kernel.theta + + @theta.setter + def theta(self, theta): + """Sets the (flattened, log-transformed) non-fixed hyperparameters. + + Parameters + ---------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + self.kernel.theta = theta + + @property + def bounds(self): + """Returns the log-transformed bounds on the theta. + + Returns + ------- + bounds : ndarray of shape (n_dims, 2) + The log-transformed bounds on the kernel's hyperparameters theta + """ + return self.kernel.bounds + + def __eq__(self, b): + if type(self) != type(b): + return False + return self.kernel == b.kernel and self.exponent == b.exponent + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_Y, n_features) or list of object,\ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + if eval_gradient: + K, K_gradient = self.kernel(X, Y, eval_gradient=True) + K_gradient *= self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1) + return K**self.exponent, K_gradient + else: + K = self.kernel(X, Y, eval_gradient=False) + return K**self.exponent + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return self.kernel.diag(X) ** self.exponent + + def __repr__(self): + return "{0} ** {1}".format(self.kernel, self.exponent) + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return self.kernel.is_stationary() + + @property + def requires_vector_input(self): + """Returns whether the kernel is defined on discrete structures.""" + return self.kernel.requires_vector_input + + +class ConstantKernel(StationaryKernelMixin, GenericKernelMixin, Kernel): + """Constant kernel. + + Can be used as part of a product-kernel where it scales the magnitude of + the other factor (kernel) or as part of a sum-kernel, where it modifies + the mean of the Gaussian process. + + .. math:: + k(x_1, x_2) = constant\\_value \\;\\forall\\; x_1, x_2 + + Adding a constant kernel is equivalent to adding a constant:: + + kernel = RBF() + ConstantKernel(constant_value=2) + + is the same as:: + + kernel = RBF() + 2 + + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + constant_value : float, default=1.0 + The constant value which defines the covariance: + k(x_1, x_2) = constant_value + + constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on `constant_value`. + If set to "fixed", `constant_value` cannot be changed during + hyperparameter tuning. + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = RBF() + ConstantKernel(constant_value=2) + >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.3696... + >>> gpr.predict(X[:1,:], return_std=True) + (array([606.1...]), array([0.24...])) + """ + + def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)): + self.constant_value = constant_value + self.constant_value_bounds = constant_value_bounds + + @property + def hyperparameter_constant_value(self): + return Hyperparameter("constant_value", "numeric", self.constant_value_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_X, n_features) or list of object, \ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when eval_gradient + is True. + """ + if Y is None: + Y = X + elif eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + + K = np.full( + (_num_samples(X), _num_samples(Y)), + self.constant_value, + dtype=np.array(self.constant_value).dtype, + ) + if eval_gradient: + if not self.hyperparameter_constant_value.fixed: + return ( + K, + np.full( + (_num_samples(X), _num_samples(X), 1), + self.constant_value, + dtype=np.array(self.constant_value).dtype, + ), + ) + else: + return K, np.empty((_num_samples(X), _num_samples(X), 0)) + else: + return K + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return np.full( + _num_samples(X), + self.constant_value, + dtype=np.array(self.constant_value).dtype, + ) + + def __repr__(self): + return "{0:.3g}**2".format(np.sqrt(self.constant_value)) + + +class WhiteKernel(StationaryKernelMixin, GenericKernelMixin, Kernel): + """White kernel. + + The main use-case of this kernel is as part of a sum-kernel where it + explains the noise of the signal as independently and identically + normally-distributed. The parameter noise_level equals the variance of this + noise. + + .. math:: + k(x_1, x_2) = noise\\_level \\text{ if } x_i == x_j \\text{ else } 0 + + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + noise_level : float, default=1.0 + Parameter controlling the noise level (variance) + + noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'noise_level'. + If set to "fixed", 'noise_level' cannot be changed during + hyperparameter tuning. + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = DotProduct() + WhiteKernel(noise_level=0.5) + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.3680... + >>> gpr.predict(X[:2,:], return_std=True) + (array([653.0..., 592.1... ]), array([316.6..., 316.6...])) + """ + + def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)): + self.noise_level = noise_level + self.noise_level_bounds = noise_level_bounds + + @property + def hyperparameter_noise_level(self): + return Hyperparameter("noise_level", "numeric", self.noise_level_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_X, n_features) or list of object,\ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when eval_gradient + is True. + """ + if Y is not None and eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + + if Y is None: + K = self.noise_level * np.eye(_num_samples(X)) + if eval_gradient: + if not self.hyperparameter_noise_level.fixed: + return ( + K, + self.noise_level * np.eye(_num_samples(X))[:, :, np.newaxis], + ) + else: + return K, np.empty((_num_samples(X), _num_samples(X), 0)) + else: + return K + else: + return np.zeros((_num_samples(X), _num_samples(Y))) + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return np.full( + _num_samples(X), self.noise_level, dtype=np.array(self.noise_level).dtype + ) + + def __repr__(self): + return "{0}(noise_level={1:.3g})".format( + self.__class__.__name__, self.noise_level + ) + + +class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel): + """Radial basis function kernel (aka squared-exponential kernel). + + The RBF kernel is a stationary kernel. It is also known as the + "squared exponential" kernel. It is parameterized by a length scale + parameter :math:`l>0`, which can either be a scalar (isotropic variant + of the kernel) or a vector with the same number of dimensions as the inputs + X (anisotropic variant of the kernel). The kernel is given by: + + .. math:: + k(x_i, x_j) = \\exp\\left(- \\frac{d(x_i, x_j)^2}{2l^2} \\right) + + where :math:`l` is the length scale of the kernel and + :math:`d(\\cdot,\\cdot)` is the Euclidean distance. + For advice on how to set the length scale parameter, see e.g. [1]_. + + This kernel is infinitely differentiable, which implies that GPs with this + kernel as covariance function have mean square derivatives of all orders, + and are thus very smooth. + See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + length_scale : float or ndarray of shape (n_features,), default=1.0 + The length scale of the kernel. If a float, an isotropic kernel is + used. If an array, an anisotropic kernel is used where each dimension + of l defines the length-scale of the respective feature dimension. + + length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'length_scale'. + If set to "fixed", 'length_scale' cannot be changed during + hyperparameter tuning. + + References + ---------- + .. [1] `David Duvenaud (2014). "The Kernel Cookbook: + Advice on Covariance functions". + `_ + + .. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006). + "Gaussian Processes for Machine Learning". The MIT Press. + `_ + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import RBF + >>> X, y = load_iris(return_X_y=True) + >>> kernel = 1.0 * RBF(1.0) + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9866... + >>> gpc.predict_proba(X[:2,:]) + array([[0.8354..., 0.03228..., 0.1322...], + [0.7906..., 0.0652..., 0.1441...]]) + """ + + def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)): + self.length_scale = length_scale + self.length_scale_bounds = length_scale_bounds + + @property + def anisotropic(self): + return np.iterable(self.length_scale) and len(self.length_scale) > 1 + + @property + def hyperparameter_length_scale(self): + if self.anisotropic: + return Hyperparameter( + "length_scale", + "numeric", + self.length_scale_bounds, + len(self.length_scale), + ) + return Hyperparameter("length_scale", "numeric", self.length_scale_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + X = np.atleast_2d(X) + length_scale = _check_length_scale(X, self.length_scale) + if Y is None: + dists = pdist(X / length_scale, metric="sqeuclidean") + K = np.exp(-0.5 * dists) + # convert from upper-triangular matrix to square matrix + K = squareform(K) + np.fill_diagonal(K, 1) + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + dists = cdist(X / length_scale, Y / length_scale, metric="sqeuclidean") + K = np.exp(-0.5 * dists) + + if eval_gradient: + if self.hyperparameter_length_scale.fixed: + # Hyperparameter l kept fixed + return K, np.empty((X.shape[0], X.shape[0], 0)) + elif not self.anisotropic or length_scale.shape[0] == 1: + K_gradient = (K * squareform(dists))[:, :, np.newaxis] + return K, K_gradient + elif self.anisotropic: + # We need to recompute the pairwise dimension-wise distances + K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / ( + length_scale**2 + ) + K_gradient *= K[..., np.newaxis] + return K, K_gradient + else: + return K + + def __repr__(self): + if self.anisotropic: + return "{0}(length_scale=[{1}])".format( + self.__class__.__name__, + ", ".join(map("{0:.3g}".format, self.length_scale)), + ) + else: # isotropic + return "{0}(length_scale={1:.3g})".format( + self.__class__.__name__, np.ravel(self.length_scale)[0] + ) + + +class Matern(RBF): + """Matern kernel. + + The class of Matern kernels is a generalization of the :class:`RBF`. + It has an additional parameter :math:`\\nu` which controls the + smoothness of the resulting function. The smaller :math:`\\nu`, + the less smooth the approximated function is. + As :math:`\\nu\\rightarrow\\infty`, the kernel becomes equivalent to + the :class:`RBF` kernel. When :math:`\\nu = 1/2`, the Matérn kernel + becomes identical to the absolute exponential kernel. + Important intermediate values are + :math:`\\nu=1.5` (once differentiable functions) + and :math:`\\nu=2.5` (twice differentiable functions). + + The kernel is given by: + + .. math:: + k(x_i, x_j) = \\frac{1}{\\Gamma(\\nu)2^{\\nu-1}}\\Bigg( + \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j ) + \\Bigg)^\\nu K_\\nu\\Bigg( + \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )\\Bigg) + + + + where :math:`d(\\cdot,\\cdot)` is the Euclidean distance, + :math:`K_{\\nu}(\\cdot)` is a modified Bessel function and + :math:`\\Gamma(\\cdot)` is the gamma function. + See [1]_, Chapter 4, Section 4.2, for details regarding the different + variants of the Matern kernel. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + length_scale : float or ndarray of shape (n_features,), default=1.0 + The length scale of the kernel. If a float, an isotropic kernel is + used. If an array, an anisotropic kernel is used where each dimension + of l defines the length-scale of the respective feature dimension. + + length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'length_scale'. + If set to "fixed", 'length_scale' cannot be changed during + hyperparameter tuning. + + nu : float, default=1.5 + The parameter nu controlling the smoothness of the learned function. + The smaller nu, the less smooth the approximated function is. + For nu=inf, the kernel becomes equivalent to the RBF kernel and for + nu=0.5 to the absolute exponential kernel. Important intermediate + values are nu=1.5 (once differentiable functions) and nu=2.5 + (twice differentiable functions). Note that values of nu not in + [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost + (appr. 10 times higher) since they require to evaluate the modified + Bessel function. Furthermore, in contrast to l, nu is kept fixed to + its initial value and not optimized. + + References + ---------- + .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006). + "Gaussian Processes for Machine Learning". The MIT Press. + `_ + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import Matern + >>> X, y = load_iris(return_X_y=True) + >>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5) + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9866... + >>> gpc.predict_proba(X[:2,:]) + array([[0.8513..., 0.0368..., 0.1117...], + [0.8086..., 0.0693..., 0.1220...]]) + """ + + def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5): + super().__init__(length_scale, length_scale_bounds) + self.nu = nu + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + X = np.atleast_2d(X) + length_scale = _check_length_scale(X, self.length_scale) + if Y is None: + dists = pdist(X / length_scale, metric="euclidean") + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + dists = cdist(X / length_scale, Y / length_scale, metric="euclidean") + + if self.nu == 0.5: + K = np.exp(-dists) + elif self.nu == 1.5: + K = dists * math.sqrt(3) + K = (1.0 + K) * np.exp(-K) + elif self.nu == 2.5: + K = dists * math.sqrt(5) + K = (1.0 + K + K**2 / 3.0) * np.exp(-K) + elif self.nu == np.inf: + K = np.exp(-(dists**2) / 2.0) + else: # general case; expensive to evaluate + K = dists + K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan + tmp = math.sqrt(2 * self.nu) * K + K.fill((2 ** (1.0 - self.nu)) / gamma(self.nu)) + K *= tmp**self.nu + K *= kv(self.nu, tmp) + + if Y is None: + # convert from upper-triangular matrix to square matrix + K = squareform(K) + np.fill_diagonal(K, 1) + + if eval_gradient: + if self.hyperparameter_length_scale.fixed: + # Hyperparameter l kept fixed + K_gradient = np.empty((X.shape[0], X.shape[0], 0)) + return K, K_gradient + + # We need to recompute the pairwise dimension-wise distances + if self.anisotropic: + D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / ( + length_scale**2 + ) + else: + D = squareform(dists**2)[:, :, np.newaxis] + + if self.nu == 0.5: + denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis] + divide_result = np.zeros_like(D) + np.divide( + D, + denominator, + out=divide_result, + where=denominator != 0, + ) + K_gradient = K[..., np.newaxis] * divide_result + elif self.nu == 1.5: + K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis] + elif self.nu == 2.5: + tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis] + K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp) + elif self.nu == np.inf: + K_gradient = D * K[..., np.newaxis] + else: + # approximate gradient numerically + def f(theta): # helper function + return self.clone_with_theta(theta)(X, Y) + + return K, _approx_fprime(self.theta, f, 1e-10) + + if not self.anisotropic: + return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis] + else: + return K, K_gradient + else: + return K + + def __repr__(self): + if self.anisotropic: + return "{0}(length_scale=[{1}], nu={2:.3g})".format( + self.__class__.__name__, + ", ".join(map("{0:.3g}".format, self.length_scale)), + self.nu, + ) + else: + return "{0}(length_scale={1:.3g}, nu={2:.3g})".format( + self.__class__.__name__, np.ravel(self.length_scale)[0], self.nu + ) + + +class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel): + """Rational Quadratic kernel. + + The RationalQuadratic kernel can be seen as a scale mixture (an infinite + sum) of RBF kernels with different characteristic length scales. It is + parameterized by a length scale parameter :math:`l>0` and a scale + mixture parameter :math:`\\alpha>0`. Only the isotropic variant + where length_scale :math:`l` is a scalar is supported at the moment. + The kernel is given by: + + .. math:: + k(x_i, x_j) = \\left( + 1 + \\frac{d(x_i, x_j)^2 }{ 2\\alpha l^2}\\right)^{-\\alpha} + + where :math:`\\alpha` is the scale mixture parameter, :math:`l` is + the length scale of the kernel and :math:`d(\\cdot,\\cdot)` is the + Euclidean distance. + For advice on how to set the parameters, see e.g. [1]_. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + length_scale : float > 0, default=1.0 + The length scale of the kernel. + + alpha : float > 0, default=1.0 + Scale mixture parameter + + length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'length_scale'. + If set to "fixed", 'length_scale' cannot be changed during + hyperparameter tuning. + + alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'alpha'. + If set to "fixed", 'alpha' cannot be changed during + hyperparameter tuning. + + References + ---------- + .. [1] `David Duvenaud (2014). "The Kernel Cookbook: + Advice on Covariance functions". + `_ + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import RationalQuadratic + >>> X, y = load_iris(return_X_y=True) + >>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5) + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9733... + >>> gpc.predict_proba(X[:2,:]) + array([[0.8881..., 0.0566..., 0.05518...], + [0.8678..., 0.0707... , 0.0614...]]) + """ + + def __init__( + self, + length_scale=1.0, + alpha=1.0, + length_scale_bounds=(1e-5, 1e5), + alpha_bounds=(1e-5, 1e5), + ): + self.length_scale = length_scale + self.alpha = alpha + self.length_scale_bounds = length_scale_bounds + self.alpha_bounds = alpha_bounds + + @property + def hyperparameter_length_scale(self): + return Hyperparameter("length_scale", "numeric", self.length_scale_bounds) + + @property + def hyperparameter_alpha(self): + return Hyperparameter("alpha", "numeric", self.alpha_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims) + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when eval_gradient + is True. + """ + if len(np.atleast_1d(self.length_scale)) > 1: + raise AttributeError( + "RationalQuadratic kernel only supports isotropic version, " + "please use a single scalar for length_scale" + ) + X = np.atleast_2d(X) + if Y is None: + dists = squareform(pdist(X, metric="sqeuclidean")) + tmp = dists / (2 * self.alpha * self.length_scale**2) + base = 1 + tmp + K = base**-self.alpha + np.fill_diagonal(K, 1) + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + dists = cdist(X, Y, metric="sqeuclidean") + K = (1 + dists / (2 * self.alpha * self.length_scale**2)) ** -self.alpha + + if eval_gradient: + # gradient with respect to length_scale + if not self.hyperparameter_length_scale.fixed: + length_scale_gradient = dists * K / (self.length_scale**2 * base) + length_scale_gradient = length_scale_gradient[:, :, np.newaxis] + else: # l is kept fixed + length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0)) + + # gradient with respect to alpha + if not self.hyperparameter_alpha.fixed: + alpha_gradient = K * ( + -self.alpha * np.log(base) + + dists / (2 * self.length_scale**2 * base) + ) + alpha_gradient = alpha_gradient[:, :, np.newaxis] + else: # alpha is kept fixed + alpha_gradient = np.empty((K.shape[0], K.shape[1], 0)) + + return K, np.dstack((alpha_gradient, length_scale_gradient)) + else: + return K + + def __repr__(self): + return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format( + self.__class__.__name__, self.alpha, self.length_scale + ) + + +class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel): + r"""Exp-Sine-Squared kernel (aka periodic kernel). + + The ExpSineSquared kernel allows one to model functions which repeat + themselves exactly. It is parameterized by a length scale + parameter :math:`l>0` and a periodicity parameter :math:`p>0`. + Only the isotropic variant where :math:`l` is a scalar is + supported at the moment. The kernel is given by: + + .. math:: + k(x_i, x_j) = \text{exp}\left(- + \frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right) + + where :math:`l` is the length scale of the kernel, :math:`p` the + periodicity of the kernel and :math:`d(\cdot,\cdot)` is the + Euclidean distance. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + + length_scale : float > 0, default=1.0 + The length scale of the kernel. + + periodicity : float > 0, default=1.0 + The periodicity of the kernel. + + length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'length_scale'. + If set to "fixed", 'length_scale' cannot be changed during + hyperparameter tuning. + + periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'periodicity'. + If set to "fixed", 'periodicity' cannot be changed during + hyperparameter tuning. + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import ExpSineSquared + >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0) + >>> kernel = ExpSineSquared(length_scale=1, periodicity=1) + >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.0144... + >>> gpr.predict(X[:2,:], return_std=True) + (array([425.6..., 457.5...]), array([0.3894..., 0.3467...])) + """ + + def __init__( + self, + length_scale=1.0, + periodicity=1.0, + length_scale_bounds=(1e-5, 1e5), + periodicity_bounds=(1e-5, 1e5), + ): + self.length_scale = length_scale + self.periodicity = periodicity + self.length_scale_bounds = length_scale_bounds + self.periodicity_bounds = periodicity_bounds + + @property + def hyperparameter_length_scale(self): + """Returns the length scale""" + return Hyperparameter("length_scale", "numeric", self.length_scale_bounds) + + @property + def hyperparameter_periodicity(self): + return Hyperparameter("periodicity", "numeric", self.periodicity_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + X = np.atleast_2d(X) + if Y is None: + dists = squareform(pdist(X, metric="euclidean")) + arg = np.pi * dists / self.periodicity + sin_of_arg = np.sin(arg) + K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2) + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + dists = cdist(X, Y, metric="euclidean") + K = np.exp( + -2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2 + ) + + if eval_gradient: + cos_of_arg = np.cos(arg) + # gradient with respect to length_scale + if not self.hyperparameter_length_scale.fixed: + length_scale_gradient = 4 / self.length_scale**2 * sin_of_arg**2 * K + length_scale_gradient = length_scale_gradient[:, :, np.newaxis] + else: # length_scale is kept fixed + length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0)) + # gradient with respect to p + if not self.hyperparameter_periodicity.fixed: + periodicity_gradient = ( + 4 * arg / self.length_scale**2 * cos_of_arg * sin_of_arg * K + ) + periodicity_gradient = periodicity_gradient[:, :, np.newaxis] + else: # p is kept fixed + periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0)) + + return K, np.dstack((length_scale_gradient, periodicity_gradient)) + else: + return K + + def __repr__(self): + return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format( + self.__class__.__name__, self.length_scale, self.periodicity + ) + + +class DotProduct(Kernel): + r"""Dot-Product kernel. + + The DotProduct kernel is non-stationary and can be obtained from linear + regression by putting :math:`N(0, 1)` priors on the coefficients + of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)` + on the bias. The DotProduct kernel is invariant to a rotation of + the coordinates about the origin, but not translations. + It is parameterized by a parameter sigma_0 :math:`\sigma` + which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`, + the kernel is called the homogeneous linear kernel, otherwise + it is inhomogeneous. The kernel is given by + + .. math:: + k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j + + The DotProduct kernel is commonly combined with exponentiation. + + See [1]_, Chapter 4, Section 4.2, for further details regarding the + DotProduct kernel. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + sigma_0 : float >= 0, default=1.0 + Parameter controlling the inhomogenity of the kernel. If sigma_0=0, + the kernel is homogeneous. + + sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'sigma_0'. + If set to "fixed", 'sigma_0' cannot be changed during + hyperparameter tuning. + + References + ---------- + .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006). + "Gaussian Processes for Machine Learning". The MIT Press. + `_ + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = DotProduct() + WhiteKernel() + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.3680... + >>> gpr.predict(X[:2,:], return_std=True) + (array([653.0..., 592.1...]), array([316.6..., 316.6...])) + """ + + def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)): + self.sigma_0 = sigma_0 + self.sigma_0_bounds = sigma_0_bounds + + @property + def hyperparameter_sigma_0(self): + return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + X = np.atleast_2d(X) + if Y is None: + K = np.inner(X, X) + self.sigma_0**2 + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + K = np.inner(X, Y) + self.sigma_0**2 + + if eval_gradient: + if not self.hyperparameter_sigma_0.fixed: + K_gradient = np.empty((K.shape[0], K.shape[1], 1)) + K_gradient[..., 0] = 2 * self.sigma_0**2 + return K, K_gradient + else: + return K, np.empty((X.shape[0], X.shape[0], 0)) + else: + return K + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y). + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X). + """ + return np.einsum("ij,ij->i", X, X) + self.sigma_0**2 + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return False + + def __repr__(self): + return "{0}(sigma_0={1:.3g})".format(self.__class__.__name__, self.sigma_0) + + +# adapted from scipy/optimize/optimize.py for functions with 2d output +def _approx_fprime(xk, f, epsilon, args=()): + f0 = f(*((xk,) + args)) + grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float) + ei = np.zeros((len(xk),), float) + for k in range(len(xk)): + ei[k] = 1.0 + d = epsilon * ei + grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k] + ei[k] = 0.0 + return grad + + +class PairwiseKernel(Kernel): + """Wrapper for kernels in sklearn.metrics.pairwise. + + A thin wrapper around the functionality of the kernels in + sklearn.metrics.pairwise. + + Note: Evaluation of eval_gradient is not analytic but numeric and all + kernels support only isotropic distances. The parameter gamma is + considered to be a hyperparameter and may be optimized. The other + kernel parameters are set directly at initialization and are kept + fixed. + + .. versionadded:: 0.18 + + Parameters + ---------- + gamma : float, default=1.0 + Parameter gamma of the pairwise kernel specified by metric. It should + be positive. + + gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'gamma'. + If set to "fixed", 'gamma' cannot be changed during + hyperparameter tuning. + + metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial", \ + "rbf", "laplacian", "sigmoid", "cosine"} or callable, \ + default="linear" + The metric to use when calculating kernel between instances in a + feature array. If metric is a string, it must be one of the metrics + in pairwise.PAIRWISE_KERNEL_FUNCTIONS. + If metric is "precomputed", X is assumed to be a kernel matrix. + Alternatively, if metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays from X as input and return a value indicating + the distance between them. + + pairwise_kernels_kwargs : dict, default=None + All entries of this dict (if any) are passed as keyword arguments to + the pairwise kernel function. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import PairwiseKernel + >>> X, y = load_iris(return_X_y=True) + >>> kernel = PairwiseKernel(metric='rbf') + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9733... + >>> gpc.predict_proba(X[:2,:]) + array([[0.8880..., 0.05663..., 0.05532...], + [0.8676..., 0.07073..., 0.06165...]]) + """ + + def __init__( + self, + gamma=1.0, + gamma_bounds=(1e-5, 1e5), + metric="linear", + pairwise_kernels_kwargs=None, + ): + self.gamma = gamma + self.gamma_bounds = gamma_bounds + self.metric = metric + self.pairwise_kernels_kwargs = pairwise_kernels_kwargs + + @property + def hyperparameter_gamma(self): + return Hyperparameter("gamma", "numeric", self.gamma_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + pairwise_kernels_kwargs = self.pairwise_kernels_kwargs + if self.pairwise_kernels_kwargs is None: + pairwise_kernels_kwargs = {} + + X = np.atleast_2d(X) + K = pairwise_kernels( + X, + Y, + metric=self.metric, + gamma=self.gamma, + filter_params=True, + **pairwise_kernels_kwargs, + ) + if eval_gradient: + if self.hyperparameter_gamma.fixed: + return K, np.empty((X.shape[0], X.shape[0], 0)) + else: + # approximate gradient numerically + def f(gamma): # helper function + return pairwise_kernels( + X, + Y, + metric=self.metric, + gamma=np.exp(gamma), + filter_params=True, + **pairwise_kernels_kwargs, + ) + + return K, _approx_fprime(self.theta, f, 1e-10) + else: + return K + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + # We have to fall back to slow way of computing diagonal + return np.apply_along_axis(self, 1, X).ravel() + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return self.metric in ["rbf"] + + def __repr__(self): + return "{0}(gamma={1}, metric={2})".format( + self.__class__.__name__, self.gamma, self.metric + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3acadd973584658bdeea208b4017ee58f77d8f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/_mini_sequence_kernel.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/_mini_sequence_kernel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81ff57f96073e7c7f6e27d7bd055b3cb30adec68 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/_mini_sequence_kernel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..852f3352624ff7d9a04abefdded46a05878d05d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91f4a6df09d790c70eb146c6870dfd382cc2eeda Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_kernels.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_kernels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb356c6e16b796540846f15e328261a391e01aad Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_kernels.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/_mini_sequence_kernel.py b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/_mini_sequence_kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..4667329aff9b8dbeffa90bb0c40c98a708fcc205 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/_mini_sequence_kernel.py @@ -0,0 +1,54 @@ +import numpy as np + +from sklearn.base import clone +from sklearn.gaussian_process.kernels import ( + GenericKernelMixin, + Hyperparameter, + Kernel, + StationaryKernelMixin, +) + + +class MiniSeqKernel(GenericKernelMixin, StationaryKernelMixin, Kernel): + """ + A minimal (but valid) convolutional kernel for sequences of variable + length. + """ + + def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)): + self.baseline_similarity = baseline_similarity + self.baseline_similarity_bounds = baseline_similarity_bounds + + @property + def hyperparameter_baseline_similarity(self): + return Hyperparameter( + "baseline_similarity", "numeric", self.baseline_similarity_bounds + ) + + def _f(self, s1, s2): + return sum( + [1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2] + ) + + def _g(self, s1, s2): + return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2]) + + def __call__(self, X, Y=None, eval_gradient=False): + if Y is None: + Y = X + + if eval_gradient: + return ( + np.array([[self._f(x, y) for y in Y] for x in X]), + np.array([[[self._g(x, y)] for y in Y] for x in X]), + ) + else: + return np.array([[self._f(x, y) for y in Y] for x in X]) + + def diag(self, X): + return np.array([self._f(x, x) for x in X]) + + def clone_with_theta(self, theta): + cloned = clone(self) + cloned.theta = theta + return cloned diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpc.py b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpc.py new file mode 100644 index 0000000000000000000000000000000000000000..842159f13ac0406c36357e4cc9b1060ff560c334 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpc.py @@ -0,0 +1,288 @@ +"""Testing for Gaussian process classification """ + +# Author: Jan Hendrik Metzen +# License: BSD 3 clause + +import warnings + +import numpy as np +import pytest +from scipy.optimize import approx_fprime + +from sklearn.exceptions import ConvergenceWarning +from sklearn.gaussian_process import GaussianProcessClassifier +from sklearn.gaussian_process.kernels import ( + RBF, + CompoundKernel, + WhiteKernel, +) +from sklearn.gaussian_process.kernels import ( + ConstantKernel as C, +) +from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel +from sklearn.utils._testing import assert_almost_equal, assert_array_equal + + +def f(x): + return np.sin(x) + + +X = np.atleast_2d(np.linspace(0, 10, 30)).T +X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T +y = np.array(f(X).ravel() > 0, dtype=int) +fX = f(X).ravel() +y_mc = np.empty(y.shape, dtype=int) # multi-class +y_mc[fX < -0.35] = 0 +y_mc[(fX >= -0.35) & (fX < 0.35)] = 1 +y_mc[fX > 0.35] = 2 + + +fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed") +kernels = [ + RBF(length_scale=0.1), + fixed_kernel, + RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), + C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), +] +non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel] + + +@pytest.mark.parametrize("kernel", kernels) +def test_predict_consistent(kernel): + # Check binary predict decision has also predicted probability above 0.5. + gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) + assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5) + + +def test_predict_consistent_structured(): + # Check binary predict decision has also predicted probability above 0.5. + X = ["A", "AB", "B"] + y = np.array([True, False, True]) + kernel = MiniSeqKernel(baseline_similarity_bounds="fixed") + gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) + assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_lml_improving(kernel): + # Test that hyperparameter-tuning improves log-marginal likelihood. + gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) + assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood( + kernel.theta + ) + + +@pytest.mark.parametrize("kernel", kernels) +def test_lml_precomputed(kernel): + # Test that lml of optimized kernel is stored correctly. + gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) + assert_almost_equal( + gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(), 7 + ) + + +@pytest.mark.parametrize("kernel", kernels) +def test_lml_without_cloning_kernel(kernel): + # Test that clone_kernel=False has side-effects of kernel.theta. + gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) + input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64) + + gpc.log_marginal_likelihood(input_theta, clone_kernel=False) + assert_almost_equal(gpc.kernel_.theta, input_theta, 7) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_converged_to_local_maximum(kernel): + # Test that we are in local maximum after hyperparameter-optimization. + gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) + + lml, lml_gradient = gpc.log_marginal_likelihood(gpc.kernel_.theta, True) + + assert np.all( + (np.abs(lml_gradient) < 1e-4) + | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) + | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1]) + ) + + +@pytest.mark.parametrize("kernel", kernels) +def test_lml_gradient(kernel): + # Compare analytic and numeric gradient of log marginal likelihood. + gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) + + lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True) + lml_gradient_approx = approx_fprime( + kernel.theta, lambda theta: gpc.log_marginal_likelihood(theta, False), 1e-10 + ) + + assert_almost_equal(lml_gradient, lml_gradient_approx, 3) + + +def test_random_starts(global_random_seed): + # Test that an increasing number of random-starts of GP fitting only + # increases the log marginal likelihood of the chosen theta. + n_samples, n_features = 25, 2 + rng = np.random.RandomState(global_random_seed) + X = rng.randn(n_samples, n_features) * 2 - 1 + y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0 + + kernel = C(1.0, (1e-2, 1e2)) * RBF( + length_scale=[1e-3] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features + ) + last_lml = -np.inf + for n_restarts_optimizer in range(5): + gp = GaussianProcessClassifier( + kernel=kernel, + n_restarts_optimizer=n_restarts_optimizer, + random_state=global_random_seed, + ).fit(X, y) + lml = gp.log_marginal_likelihood(gp.kernel_.theta) + assert lml > last_lml - np.finfo(np.float32).eps + last_lml = lml + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_custom_optimizer(kernel, global_random_seed): + # Test that GPC can use externally defined optimizers. + # Define a dummy optimizer that simply tests 10 random hyperparameters + def optimizer(obj_func, initial_theta, bounds): + rng = np.random.RandomState(global_random_seed) + theta_opt, func_min = initial_theta, obj_func( + initial_theta, eval_gradient=False + ) + for _ in range(10): + theta = np.atleast_1d( + rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1])) + ) + f = obj_func(theta, eval_gradient=False) + if f < func_min: + theta_opt, func_min = theta, f + return theta_opt, func_min + + gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer) + gpc.fit(X, y_mc) + # Checks that optimizer improved marginal likelihood + assert gpc.log_marginal_likelihood( + gpc.kernel_.theta + ) >= gpc.log_marginal_likelihood(kernel.theta) + + +@pytest.mark.parametrize("kernel", kernels) +def test_multi_class(kernel): + # Test GPC for multi-class classification problems. + gpc = GaussianProcessClassifier(kernel=kernel) + gpc.fit(X, y_mc) + + y_prob = gpc.predict_proba(X2) + assert_almost_equal(y_prob.sum(1), 1) + + y_pred = gpc.predict(X2) + assert_array_equal(np.argmax(y_prob, 1), y_pred) + + +@pytest.mark.parametrize("kernel", kernels) +def test_multi_class_n_jobs(kernel): + # Test that multi-class GPC produces identical results with n_jobs>1. + gpc = GaussianProcessClassifier(kernel=kernel) + gpc.fit(X, y_mc) + + gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2) + gpc_2.fit(X, y_mc) + + y_prob = gpc.predict_proba(X2) + y_prob_2 = gpc_2.predict_proba(X2) + assert_almost_equal(y_prob, y_prob_2) + + +def test_warning_bounds(): + kernel = RBF(length_scale_bounds=[1e-5, 1e-3]) + gpc = GaussianProcessClassifier(kernel=kernel) + warning_message = ( + "The optimal value found for dimension 0 of parameter " + "length_scale is close to the specified upper bound " + "0.001. Increasing the bound and calling fit again may " + "find a better value." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + gpc.fit(X, y) + + kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF( + length_scale_bounds=[1e3, 1e5] + ) + gpc_sum = GaussianProcessClassifier(kernel=kernel_sum) + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + gpc_sum.fit(X, y) + + assert len(record) == 2 + + assert issubclass(record[0].category, ConvergenceWarning) + assert ( + record[0].message.args[0] + == "The optimal value found for " + "dimension 0 of parameter " + "k1__noise_level is close to the " + "specified upper bound 0.001. " + "Increasing the bound and calling " + "fit again may find a better value." + ) + + assert issubclass(record[1].category, ConvergenceWarning) + assert ( + record[1].message.args[0] + == "The optimal value found for " + "dimension 0 of parameter " + "k2__length_scale is close to the " + "specified lower bound 1000.0. " + "Decreasing the bound and calling " + "fit again may find a better value." + ) + + X_tile = np.tile(X, 2) + kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2]) + gpc_dims = GaussianProcessClassifier(kernel=kernel_dims) + + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + gpc_dims.fit(X_tile, y) + + assert len(record) == 2 + + assert issubclass(record[0].category, ConvergenceWarning) + assert ( + record[0].message.args[0] + == "The optimal value found for " + "dimension 0 of parameter " + "length_scale is close to the " + "specified upper bound 100.0. " + "Increasing the bound and calling " + "fit again may find a better value." + ) + + assert issubclass(record[1].category, ConvergenceWarning) + assert ( + record[1].message.args[0] + == "The optimal value found for " + "dimension 1 of parameter " + "length_scale is close to the " + "specified upper bound 100.0. " + "Increasing the bound and calling " + "fit again may find a better value." + ) + + +@pytest.mark.parametrize( + "params, error_type, err_msg", + [ + ( + {"kernel": CompoundKernel(0)}, + ValueError, + "kernel cannot be a CompoundKernel", + ) + ], +) +def test_gpc_fit_error(params, error_type, err_msg): + """Check that expected error are raised during fit.""" + gpc = GaussianProcessClassifier(**params) + with pytest.raises(error_type, match=err_msg): + gpc.fit(X, y) diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py new file mode 100644 index 0000000000000000000000000000000000000000..d890dc05d9f02af09d1bbedf08a4e55757cdc481 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py @@ -0,0 +1,853 @@ +"""Testing for Gaussian process regression """ + +# Author: Jan Hendrik Metzen +# Modified by: Pete Green +# License: BSD 3 clause + +import re +import sys +import warnings + +import numpy as np +import pytest +from scipy.optimize import approx_fprime + +from sklearn.exceptions import ConvergenceWarning +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import ( + RBF, + DotProduct, + ExpSineSquared, + WhiteKernel, +) +from sklearn.gaussian_process.kernels import ( + ConstantKernel as C, +) +from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_less, +) + + +def f(x): + return x * np.sin(x) + + +X = np.atleast_2d([1.0, 3.0, 5.0, 6.0, 7.0, 8.0]).T +X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T +y = f(X).ravel() + +fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed") +kernels = [ + RBF(length_scale=1.0), + fixed_kernel, + RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), + C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), + C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) + + C(1e-5, (1e-5, 1e2)), + C(0.1, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) + + C(1e-5, (1e-5, 1e2)), +] +non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel] + + +@pytest.mark.parametrize("kernel", kernels) +def test_gpr_interpolation(kernel): + if sys.maxsize <= 2**32: + pytest.xfail("This test may fail on 32 bit Python") + + # Test the interpolating property for different kernels. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + y_pred, y_cov = gpr.predict(X, return_cov=True) + + assert_almost_equal(y_pred, y) + assert_almost_equal(np.diag(y_cov), 0.0) + + +def test_gpr_interpolation_structured(): + # Test the interpolating property for different kernels. + kernel = MiniSeqKernel(baseline_similarity_bounds="fixed") + X = ["A", "B", "C"] + y = np.array([1, 2, 3]) + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + y_pred, y_cov = gpr.predict(X, return_cov=True) + + assert_almost_equal( + kernel(X, eval_gradient=True)[1].ravel(), (1 - np.eye(len(X))).ravel() + ) + assert_almost_equal(y_pred, y) + assert_almost_equal(np.diag(y_cov), 0.0) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_lml_improving(kernel): + if sys.maxsize <= 2**32: + pytest.xfail("This test may fail on 32 bit Python") + + # Test that hyperparameter-tuning improves log-marginal likelihood. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood( + kernel.theta + ) + + +@pytest.mark.parametrize("kernel", kernels) +def test_lml_precomputed(kernel): + # Test that lml of optimized kernel is stored correctly. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + assert gpr.log_marginal_likelihood(gpr.kernel_.theta) == pytest.approx( + gpr.log_marginal_likelihood() + ) + + +@pytest.mark.parametrize("kernel", kernels) +def test_lml_without_cloning_kernel(kernel): + # Test that lml of optimized kernel is stored correctly. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + input_theta = np.ones(gpr.kernel_.theta.shape, dtype=np.float64) + + gpr.log_marginal_likelihood(input_theta, clone_kernel=False) + assert_almost_equal(gpr.kernel_.theta, input_theta, 7) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_converged_to_local_maximum(kernel): + # Test that we are in local maximum after hyperparameter-optimization. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + + lml, lml_gradient = gpr.log_marginal_likelihood(gpr.kernel_.theta, True) + + assert np.all( + (np.abs(lml_gradient) < 1e-4) + | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) + | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1]) + ) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_solution_inside_bounds(kernel): + # Test that hyperparameter-optimization remains in bounds# + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + + bounds = gpr.kernel_.bounds + max_ = np.finfo(gpr.kernel_.theta.dtype).max + tiny = 1e-10 + bounds[~np.isfinite(bounds[:, 1]), 1] = max_ + + assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny) + assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny) + + +@pytest.mark.parametrize("kernel", kernels) +def test_lml_gradient(kernel): + # Compare analytic and numeric gradient of log marginal likelihood. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + + lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True) + lml_gradient_approx = approx_fprime( + kernel.theta, lambda theta: gpr.log_marginal_likelihood(theta, False), 1e-10 + ) + + assert_almost_equal(lml_gradient, lml_gradient_approx, 3) + + +@pytest.mark.parametrize("kernel", kernels) +def test_prior(kernel): + # Test that GP prior has mean 0 and identical variances. + gpr = GaussianProcessRegressor(kernel=kernel) + + y_mean, y_cov = gpr.predict(X, return_cov=True) + + assert_almost_equal(y_mean, 0, 5) + if len(gpr.kernel.theta) > 1: + # XXX: quite hacky, works only for current kernels + assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5) + else: + assert_almost_equal(np.diag(y_cov), 1, 5) + + +@pytest.mark.parametrize("kernel", kernels) +def test_sample_statistics(kernel): + # Test that statistics of samples drawn from GP are correct. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + + y_mean, y_cov = gpr.predict(X2, return_cov=True) + + samples = gpr.sample_y(X2, 300000) + + # More digits accuracy would require many more samples + assert_almost_equal(y_mean, np.mean(samples, 1), 1) + assert_almost_equal( + np.diag(y_cov) / np.diag(y_cov).max(), + np.var(samples, 1) / np.diag(y_cov).max(), + 1, + ) + + +def test_no_optimizer(): + # Test that kernel parameters are unmodified when optimizer is None. + kernel = RBF(1.0) + gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y) + assert np.exp(gpr.kernel_.theta) == 1.0 + + +@pytest.mark.parametrize("kernel", kernels) +@pytest.mark.parametrize("target", [y, np.ones(X.shape[0], dtype=np.float64)]) +def test_predict_cov_vs_std(kernel, target): + if sys.maxsize <= 2**32: + pytest.xfail("This test may fail on 32 bit Python") + + # Test that predicted std.-dev. is consistent with cov's diagonal. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + y_mean, y_cov = gpr.predict(X2, return_cov=True) + y_mean, y_std = gpr.predict(X2, return_std=True) + assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std) + + +def test_anisotropic_kernel(): + # Test that GPR can identify meaningful anisotropic length-scales. + # We learn a function which varies in one dimension ten-times slower + # than in the other. The corresponding length-scales should differ by at + # least a factor 5 + rng = np.random.RandomState(0) + X = rng.uniform(-1, 1, (50, 2)) + y = X[:, 0] + 0.1 * X[:, 1] + + kernel = RBF([1.0, 1.0]) + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + assert np.exp(gpr.kernel_.theta[1]) > np.exp(gpr.kernel_.theta[0]) * 5 + + +def test_random_starts(): + # Test that an increasing number of random-starts of GP fitting only + # increases the log marginal likelihood of the chosen theta. + n_samples, n_features = 25, 2 + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) * 2 - 1 + y = ( + np.sin(X).sum(axis=1) + + np.sin(3 * X).sum(axis=1) + + rng.normal(scale=0.1, size=n_samples) + ) + + kernel = C(1.0, (1e-2, 1e2)) * RBF( + length_scale=[1.0] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features + ) + WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1)) + last_lml = -np.inf + for n_restarts_optimizer in range(5): + gp = GaussianProcessRegressor( + kernel=kernel, + n_restarts_optimizer=n_restarts_optimizer, + random_state=0, + ).fit(X, y) + lml = gp.log_marginal_likelihood(gp.kernel_.theta) + assert lml > last_lml - np.finfo(np.float32).eps + last_lml = lml + + +@pytest.mark.parametrize("kernel", kernels) +def test_y_normalization(kernel): + """ + Test normalization of the target values in GP + + Fitting non-normalizing GP on normalized y and fitting normalizing GP + on unnormalized y should yield identical results. Note that, here, + 'normalized y' refers to y that has been made zero mean and unit + variance. + + """ + + y_mean = np.mean(y) + y_std = np.std(y) + y_norm = (y - y_mean) / y_std + + # Fit non-normalizing GP on normalized y + gpr = GaussianProcessRegressor(kernel=kernel) + gpr.fit(X, y_norm) + + # Fit normalizing GP on unnormalized y + gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr_norm.fit(X, y) + + # Compare predicted mean, std-devs and covariances + y_pred, y_pred_std = gpr.predict(X2, return_std=True) + y_pred = y_pred * y_std + y_mean + y_pred_std = y_pred_std * y_std + y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True) + + assert_almost_equal(y_pred, y_pred_norm) + assert_almost_equal(y_pred_std, y_pred_std_norm) + + _, y_cov = gpr.predict(X2, return_cov=True) + y_cov = y_cov * y_std**2 + _, y_cov_norm = gpr_norm.predict(X2, return_cov=True) + + assert_almost_equal(y_cov, y_cov_norm) + + +def test_large_variance_y(): + """ + Here we test that, when noramlize_y=True, our GP can produce a + sensible fit to training data whose variance is significantly + larger than unity. This test was made in response to issue #15612. + + GP predictions are verified against predictions that were made + using GPy which, here, is treated as the 'gold standard'. Note that we + only investigate the RBF kernel here, as that is what was used in the + GPy implementation. + + The following code can be used to recreate the GPy data: + + -------------------------------------------------------------------------- + import GPy + + kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.) + gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy) + gpy.optimize() + y_pred_gpy, y_var_gpy = gpy.predict(X2) + y_pred_std_gpy = np.sqrt(y_var_gpy) + -------------------------------------------------------------------------- + """ + + # Here we utilise a larger variance version of the training data + y_large = 10 * y + + # Standard GP with normalize_y=True + RBF_params = {"length_scale": 1.0} + kernel = RBF(**RBF_params) + gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr.fit(X, y_large) + y_pred, y_pred_std = gpr.predict(X2, return_std=True) + + # 'Gold standard' mean predictions from GPy + y_pred_gpy = np.array( + [15.16918303, -27.98707845, -39.31636019, 14.52605515, 69.18503589] + ) + + # 'Gold standard' std predictions from GPy + y_pred_std_gpy = np.array( + [7.78860962, 3.83179178, 0.63149951, 0.52745188, 0.86170042] + ) + + # Based on numerical experiments, it's reasonable to expect our + # GP's mean predictions to get within 7% of predictions of those + # made by GPy. + assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0) + + # Based on numerical experiments, it's reasonable to expect our + # GP's std predictions to get within 15% of predictions of those + # made by GPy. + assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0) + + +def test_y_multioutput(): + # Test that GPR can deal with multi-dimensional target values + y_2d = np.vstack((y, y * 2)).T + + # Test for fixed kernel that first dimension of 2d GP equals the output + # of 1d GP and that second dimension is twice as large + kernel = RBF(length_scale=1.0) + + gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False) + gpr.fit(X, y) + + gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False) + gpr_2d.fit(X, y_2d) + + y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True) + y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True) + _, y_cov_1d = gpr.predict(X2, return_cov=True) + _, y_cov_2d = gpr_2d.predict(X2, return_cov=True) + + assert_almost_equal(y_pred_1d, y_pred_2d[:, 0]) + assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2) + + # Standard deviation and covariance do not depend on output + for target in range(y_2d.shape[1]): + assert_almost_equal(y_std_1d, y_std_2d[..., target]) + assert_almost_equal(y_cov_1d, y_cov_2d[..., target]) + + y_sample_1d = gpr.sample_y(X2, n_samples=10) + y_sample_2d = gpr_2d.sample_y(X2, n_samples=10) + + assert y_sample_1d.shape == (5, 10) + assert y_sample_2d.shape == (5, 2, 10) + # Only the first target will be equal + assert_almost_equal(y_sample_1d, y_sample_2d[:, 0, :]) + + # Test hyperparameter optimization + for kernel in kernels: + gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr.fit(X, y) + + gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr_2d.fit(X, np.vstack((y, y)).T) + + assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_custom_optimizer(kernel): + # Test that GPR can use externally defined optimizers. + # Define a dummy optimizer that simply tests 50 random hyperparameters + def optimizer(obj_func, initial_theta, bounds): + rng = np.random.RandomState(0) + theta_opt, func_min = initial_theta, obj_func( + initial_theta, eval_gradient=False + ) + for _ in range(50): + theta = np.atleast_1d( + rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1])) + ) + f = obj_func(theta, eval_gradient=False) + if f < func_min: + theta_opt, func_min = theta, f + return theta_opt, func_min + + gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer) + gpr.fit(X, y) + # Checks that optimizer improved marginal likelihood + assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood( + gpr.kernel.theta + ) + + +def test_gpr_correct_error_message(): + X = np.arange(12).reshape(6, -1) + y = np.ones(6) + kernel = DotProduct() + gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0) + message = ( + "The kernel, %s, is not returning a " + "positive definite matrix. Try gradually increasing " + "the 'alpha' parameter of your " + "GaussianProcessRegressor estimator." % kernel + ) + with pytest.raises(np.linalg.LinAlgError, match=re.escape(message)): + gpr.fit(X, y) + + +@pytest.mark.parametrize("kernel", kernels) +def test_duplicate_input(kernel): + # Test GPR can handle two different output-values for the same input. + gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2) + gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2) + + X_ = np.vstack((X, X[0])) + y_ = np.hstack((y, y[0] + 1)) + gpr_equal_inputs.fit(X_, y_) + + X_ = np.vstack((X, X[0] + 1e-15)) + y_ = np.hstack((y, y[0] + 1)) + gpr_similar_inputs.fit(X_, y_) + + X_test = np.linspace(0, 10, 100)[:, None] + y_pred_equal, y_std_equal = gpr_equal_inputs.predict(X_test, return_std=True) + y_pred_similar, y_std_similar = gpr_similar_inputs.predict(X_test, return_std=True) + + assert_almost_equal(y_pred_equal, y_pred_similar) + assert_almost_equal(y_std_equal, y_std_similar) + + +def test_no_fit_default_predict(): + # Test that GPR predictions without fit does not break by default. + default_kernel = C(1.0, constant_value_bounds="fixed") * RBF( + 1.0, length_scale_bounds="fixed" + ) + gpr1 = GaussianProcessRegressor() + _, y_std1 = gpr1.predict(X, return_std=True) + _, y_cov1 = gpr1.predict(X, return_cov=True) + + gpr2 = GaussianProcessRegressor(kernel=default_kernel) + _, y_std2 = gpr2.predict(X, return_std=True) + _, y_cov2 = gpr2.predict(X, return_cov=True) + + assert_array_almost_equal(y_std1, y_std2) + assert_array_almost_equal(y_cov1, y_cov2) + + +def test_warning_bounds(): + kernel = RBF(length_scale_bounds=[1e-5, 1e-3]) + gpr = GaussianProcessRegressor(kernel=kernel) + warning_message = ( + "The optimal value found for dimension 0 of parameter " + "length_scale is close to the specified upper bound " + "0.001. Increasing the bound and calling fit again may " + "find a better value." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + gpr.fit(X, y) + + kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF( + length_scale_bounds=[1e3, 1e5] + ) + gpr_sum = GaussianProcessRegressor(kernel=kernel_sum) + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + gpr_sum.fit(X, y) + + assert len(record) == 2 + + assert issubclass(record[0].category, ConvergenceWarning) + assert ( + record[0].message.args[0] + == "The optimal value found for " + "dimension 0 of parameter " + "k1__noise_level is close to the " + "specified upper bound 0.001. " + "Increasing the bound and calling " + "fit again may find a better value." + ) + + assert issubclass(record[1].category, ConvergenceWarning) + assert ( + record[1].message.args[0] + == "The optimal value found for " + "dimension 0 of parameter " + "k2__length_scale is close to the " + "specified lower bound 1000.0. " + "Decreasing the bound and calling " + "fit again may find a better value." + ) + + X_tile = np.tile(X, 2) + kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2]) + gpr_dims = GaussianProcessRegressor(kernel=kernel_dims) + + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + gpr_dims.fit(X_tile, y) + + assert len(record) == 2 + + assert issubclass(record[0].category, ConvergenceWarning) + assert ( + record[0].message.args[0] + == "The optimal value found for " + "dimension 0 of parameter " + "length_scale is close to the " + "specified lower bound 10.0. " + "Decreasing the bound and calling " + "fit again may find a better value." + ) + + assert issubclass(record[1].category, ConvergenceWarning) + assert ( + record[1].message.args[0] + == "The optimal value found for " + "dimension 1 of parameter " + "length_scale is close to the " + "specified lower bound 10.0. " + "Decreasing the bound and calling " + "fit again may find a better value." + ) + + +def test_bound_check_fixed_hyperparameter(): + # Regression test for issue #17943 + # Check that having a hyperparameter with fixed bounds doesn't cause an + # error + k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend + k2 = ExpSineSquared( + length_scale=1.0, periodicity=1.0, periodicity_bounds="fixed" + ) # seasonal component + kernel = k1 + k2 + GaussianProcessRegressor(kernel=kernel).fit(X, y) + + +@pytest.mark.parametrize("kernel", kernels) +def test_constant_target(kernel): + """Check that the std. dev. is affected to 1 when normalizing a constant + feature. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/18318 + NaN where affected to the target when scaling due to null std. dev. with + constant target. + """ + y_constant = np.ones(X.shape[0], dtype=np.float64) + + gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr.fit(X, y_constant) + assert gpr._y_train_std == pytest.approx(1.0) + + y_pred, y_cov = gpr.predict(X, return_cov=True) + assert_allclose(y_pred, y_constant) + # set atol because we compare to zero + assert_allclose(np.diag(y_cov), 0.0, atol=1e-9) + + # Test multi-target data + n_samples, n_targets = X.shape[0], 2 + rng = np.random.RandomState(0) + y = np.concatenate( + [ + rng.normal(size=(n_samples, 1)), # non-constant target + np.full(shape=(n_samples, 1), fill_value=2), # constant target + ], + axis=1, + ) + + gpr.fit(X, y) + Y_pred, Y_cov = gpr.predict(X, return_cov=True) + + assert_allclose(Y_pred[:, 1], 2) + assert_allclose(np.diag(Y_cov[..., 1]), 0.0, atol=1e-9) + + assert Y_pred.shape == (n_samples, n_targets) + assert Y_cov.shape == (n_samples, n_samples, n_targets) + + +def test_gpr_consistency_std_cov_non_invertible_kernel(): + """Check the consistency between the returned std. dev. and the covariance. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19936 + Inconsistencies were observed when the kernel cannot be inverted (or + numerically stable). + """ + kernel = C(8.98576054e05, (1e-12, 1e12)) * RBF( + [5.91326520e02, 1.32584051e03], (1e-12, 1e12) + ) + WhiteKernel(noise_level=1e-5) + gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, optimizer=None) + X_train = np.array( + [ + [0.0, 0.0], + [1.54919334, -0.77459667], + [-1.54919334, 0.0], + [0.0, -1.54919334], + [0.77459667, 0.77459667], + [-0.77459667, 1.54919334], + ] + ) + y_train = np.array( + [ + [-2.14882017e-10], + [-4.66975823e00], + [4.01823986e00], + [-1.30303674e00], + [-1.35760156e00], + [3.31215668e00], + ] + ) + gpr.fit(X_train, y_train) + X_test = np.array( + [ + [-1.93649167, -1.93649167], + [1.93649167, -1.93649167], + [-1.93649167, 1.93649167], + [1.93649167, 1.93649167], + ] + ) + pred1, std = gpr.predict(X_test, return_std=True) + pred2, cov = gpr.predict(X_test, return_cov=True) + assert_allclose(std, np.sqrt(np.diagonal(cov)), rtol=1e-5) + + +@pytest.mark.parametrize( + "params, TypeError, err_msg", + [ + ( + {"alpha": np.zeros(100)}, + ValueError, + "alpha must be a scalar or an array with same number of entries as y", + ), + ( + { + "kernel": WhiteKernel(noise_level_bounds=(-np.inf, np.inf)), + "n_restarts_optimizer": 2, + }, + ValueError, + "requires that all bounds are finite", + ), + ], +) +def test_gpr_fit_error(params, TypeError, err_msg): + """Check that expected error are raised during fit.""" + gpr = GaussianProcessRegressor(**params) + with pytest.raises(TypeError, match=err_msg): + gpr.fit(X, y) + + +def test_gpr_lml_error(): + """Check that we raise the proper error in the LML method.""" + gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y) + + err_msg = "Gradient can only be evaluated for theta!=None" + with pytest.raises(ValueError, match=err_msg): + gpr.log_marginal_likelihood(eval_gradient=True) + + +def test_gpr_predict_error(): + """Check that we raise the proper error during predict.""" + gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y) + + err_msg = "At most one of return_std or return_cov can be requested." + with pytest.raises(RuntimeError, match=err_msg): + gpr.predict(X, return_cov=True, return_std=True) + + +@pytest.mark.parametrize("normalize_y", [True, False]) +@pytest.mark.parametrize("n_targets", [None, 1, 10]) +def test_predict_shapes(normalize_y, n_targets): + """Check the shapes of y_mean, y_std, and y_cov in single-output + (n_targets=None) and multi-output settings, including the edge case when + n_targets=1, where the sklearn convention is to squeeze the predictions. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/17394 + https://github.com/scikit-learn/scikit-learn/issues/18065 + https://github.com/scikit-learn/scikit-learn/issues/22174 + """ + rng = np.random.RandomState(1234) + + n_features, n_samples_train, n_samples_test = 6, 9, 7 + + y_train_shape = (n_samples_train,) + if n_targets is not None: + y_train_shape = y_train_shape + (n_targets,) + + # By convention single-output data is squeezed upon prediction + y_test_shape = (n_samples_test,) + if n_targets is not None and n_targets > 1: + y_test_shape = y_test_shape + (n_targets,) + + X_train = rng.randn(n_samples_train, n_features) + X_test = rng.randn(n_samples_test, n_features) + y_train = rng.randn(*y_train_shape) + + model = GaussianProcessRegressor(normalize_y=normalize_y) + model.fit(X_train, y_train) + + y_pred, y_std = model.predict(X_test, return_std=True) + _, y_cov = model.predict(X_test, return_cov=True) + + assert y_pred.shape == y_test_shape + assert y_std.shape == y_test_shape + assert y_cov.shape == (n_samples_test,) + y_test_shape + + +@pytest.mark.parametrize("normalize_y", [True, False]) +@pytest.mark.parametrize("n_targets", [None, 1, 10]) +def test_sample_y_shapes(normalize_y, n_targets): + """Check the shapes of y_samples in single-output (n_targets=0) and + multi-output settings, including the edge case when n_targets=1, where the + sklearn convention is to squeeze the predictions. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/22175 + """ + rng = np.random.RandomState(1234) + + n_features, n_samples_train = 6, 9 + # Number of spatial locations to predict at + n_samples_X_test = 7 + # Number of sample predictions per test point + n_samples_y_test = 5 + + y_train_shape = (n_samples_train,) + if n_targets is not None: + y_train_shape = y_train_shape + (n_targets,) + + # By convention single-output data is squeezed upon prediction + if n_targets is not None and n_targets > 1: + y_test_shape = (n_samples_X_test, n_targets, n_samples_y_test) + else: + y_test_shape = (n_samples_X_test, n_samples_y_test) + + X_train = rng.randn(n_samples_train, n_features) + X_test = rng.randn(n_samples_X_test, n_features) + y_train = rng.randn(*y_train_shape) + + model = GaussianProcessRegressor(normalize_y=normalize_y) + + # FIXME: before fitting, the estimator does not have information regarding + # the number of targets and default to 1. This is inconsistent with the shape + # provided after `fit`. This assert should be made once the following issue + # is fixed: + # https://github.com/scikit-learn/scikit-learn/issues/22430 + # y_samples = model.sample_y(X_test, n_samples=n_samples_y_test) + # assert y_samples.shape == y_test_shape + + model.fit(X_train, y_train) + + y_samples = model.sample_y(X_test, n_samples=n_samples_y_test) + assert y_samples.shape == y_test_shape + + +@pytest.mark.parametrize("n_targets", [None, 1, 2, 3]) +@pytest.mark.parametrize("n_samples", [1, 5]) +def test_sample_y_shape_with_prior(n_targets, n_samples): + """Check the output shape of `sample_y` is consistent before and after `fit`.""" + rng = np.random.RandomState(1024) + + X = rng.randn(10, 3) + y = rng.randn(10, n_targets if n_targets is not None else 1) + + model = GaussianProcessRegressor(n_targets=n_targets) + shape_before_fit = model.sample_y(X, n_samples=n_samples).shape + model.fit(X, y) + shape_after_fit = model.sample_y(X, n_samples=n_samples).shape + assert shape_before_fit == shape_after_fit + + +@pytest.mark.parametrize("n_targets", [None, 1, 2, 3]) +def test_predict_shape_with_prior(n_targets): + """Check the output shape of `predict` with prior distribution.""" + rng = np.random.RandomState(1024) + + n_sample = 10 + X = rng.randn(n_sample, 3) + y = rng.randn(n_sample, n_targets if n_targets is not None else 1) + + model = GaussianProcessRegressor(n_targets=n_targets) + mean_prior, cov_prior = model.predict(X, return_cov=True) + _, std_prior = model.predict(X, return_std=True) + + model.fit(X, y) + mean_post, cov_post = model.predict(X, return_cov=True) + _, std_post = model.predict(X, return_std=True) + + assert mean_prior.shape == mean_post.shape + assert cov_prior.shape == cov_post.shape + assert std_prior.shape == std_post.shape + + +def test_n_targets_error(): + """Check that an error is raised when the number of targets seen at fit is + inconsistent with n_targets. + """ + rng = np.random.RandomState(0) + X = rng.randn(10, 3) + y = rng.randn(10, 2) + + model = GaussianProcessRegressor(n_targets=1) + with pytest.raises(ValueError, match="The number of targets seen in `y`"): + model.fit(X, y) + + +class CustomKernel(C): + """ + A custom kernel that has a diag method that returns the first column of the + input matrix X. This is a helper for the test to check that the input + matrix X is not mutated. + """ + + def diag(self, X): + return X[:, 0] + + +def test_gpr_predict_input_not_modified(): + """ + Check that the input X is not modified by the predict method of the + GaussianProcessRegressor when setting return_std=True. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/24340 + """ + gpr = GaussianProcessRegressor(kernel=CustomKernel()).fit(X, y) + + X2_copy = np.copy(X2) + _, _ = gpr.predict(X2, return_std=True) + + assert_allclose(X2, X2_copy) diff --git a/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_kernels.py b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..8733f94c94e06b2b2d205200cf5e08eb2fe20a22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_kernels.py @@ -0,0 +1,388 @@ +"""Testing for kernels for Gaussian processes.""" + +# Author: Jan Hendrik Metzen +# License: BSD 3 clause + +from inspect import signature + +import numpy as np +import pytest + +from sklearn.base import clone +from sklearn.gaussian_process.kernels import ( + RBF, + CompoundKernel, + ConstantKernel, + DotProduct, + Exponentiation, + ExpSineSquared, + KernelOperator, + Matern, + PairwiseKernel, + RationalQuadratic, + WhiteKernel, + _approx_fprime, +) +from sklearn.metrics.pairwise import ( + PAIRWISE_KERNEL_FUNCTIONS, + euclidean_distances, + pairwise_kernels, +) +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + +X = np.random.RandomState(0).normal(0, 1, (5, 2)) +Y = np.random.RandomState(0).normal(0, 1, (6, 2)) + +kernel_rbf_plus_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0) +kernels = [ + RBF(length_scale=2.0), + RBF(length_scale_bounds=(0.5, 2.0)), + ConstantKernel(constant_value=10.0), + 2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"), + 2.0 * RBF(length_scale=0.5), + kernel_rbf_plus_white, + 2.0 * RBF(length_scale=[0.5, 2.0]), + 2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"), + 2.0 * Matern(length_scale=0.5, nu=0.5), + 2.0 * Matern(length_scale=1.5, nu=1.5), + 2.0 * Matern(length_scale=2.5, nu=2.5), + 2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5), + 3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5), + 4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5), + RationalQuadratic(length_scale=0.5, alpha=1.5), + ExpSineSquared(length_scale=0.5, periodicity=1.5), + DotProduct(sigma_0=2.0), + DotProduct(sigma_0=2.0) ** 2, + RBF(length_scale=[2.0]), + Matern(length_scale=[2.0]), +] +for metric in PAIRWISE_KERNEL_FUNCTIONS: + if metric in ["additive_chi2", "chi2"]: + continue + kernels.append(PairwiseKernel(gamma=1.0, metric=metric)) + + +@pytest.mark.parametrize("kernel", kernels) +def test_kernel_gradient(kernel): + # Compare analytic and numeric gradient of kernels. + K, K_gradient = kernel(X, eval_gradient=True) + + assert K_gradient.shape[0] == X.shape[0] + assert K_gradient.shape[1] == X.shape[0] + assert K_gradient.shape[2] == kernel.theta.shape[0] + + def eval_kernel_for_theta(theta): + kernel_clone = kernel.clone_with_theta(theta) + K = kernel_clone(X, eval_gradient=False) + return K + + K_gradient_approx = _approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10) + + assert_almost_equal(K_gradient, K_gradient_approx, 4) + + +@pytest.mark.parametrize( + "kernel", + [ + kernel + for kernel in kernels + # skip non-basic kernels + if not (isinstance(kernel, (KernelOperator, Exponentiation))) + ], +) +def test_kernel_theta(kernel): + # Check that parameter vector theta of kernel is set correctly. + theta = kernel.theta + _, K_gradient = kernel(X, eval_gradient=True) + + # Determine kernel parameters that contribute to theta + init_sign = signature(kernel.__class__.__init__).parameters.values() + args = [p.name for p in init_sign if p.name != "self"] + theta_vars = map( + lambda s: s[0 : -len("_bounds")], filter(lambda s: s.endswith("_bounds"), args) + ) + assert set(hyperparameter.name for hyperparameter in kernel.hyperparameters) == set( + theta_vars + ) + + # Check that values returned in theta are consistent with + # hyperparameter values (being their logarithms) + for i, hyperparameter in enumerate(kernel.hyperparameters): + assert theta[i] == np.log(getattr(kernel, hyperparameter.name)) + + # Fixed kernel parameters must be excluded from theta and gradient. + for i, hyperparameter in enumerate(kernel.hyperparameters): + # create copy with certain hyperparameter fixed + params = kernel.get_params() + params[hyperparameter.name + "_bounds"] = "fixed" + kernel_class = kernel.__class__ + new_kernel = kernel_class(**params) + # Check that theta and K_gradient are identical with the fixed + # dimension left out + _, K_gradient_new = new_kernel(X, eval_gradient=True) + assert theta.shape[0] == new_kernel.theta.shape[0] + 1 + assert K_gradient.shape[2] == K_gradient_new.shape[2] + 1 + if i > 0: + assert theta[:i] == new_kernel.theta[:i] + assert_array_equal(K_gradient[..., :i], K_gradient_new[..., :i]) + if i + 1 < len(kernel.hyperparameters): + assert theta[i + 1 :] == new_kernel.theta[i:] + assert_array_equal(K_gradient[..., i + 1 :], K_gradient_new[..., i:]) + + # Check that values of theta are modified correctly + for i, hyperparameter in enumerate(kernel.hyperparameters): + theta[i] = np.log(42) + kernel.theta = theta + assert_almost_equal(getattr(kernel, hyperparameter.name), 42) + + setattr(kernel, hyperparameter.name, 43) + assert_almost_equal(kernel.theta[i], np.log(43)) + + +@pytest.mark.parametrize( + "kernel", + [ + kernel + for kernel in kernels + # Identity is not satisfied on diagonal + if kernel != kernel_rbf_plus_white + ], +) +def test_auto_vs_cross(kernel): + # Auto-correlation and cross-correlation should be consistent. + K_auto = kernel(X) + K_cross = kernel(X, X) + assert_almost_equal(K_auto, K_cross, 5) + + +@pytest.mark.parametrize("kernel", kernels) +def test_kernel_diag(kernel): + # Test that diag method of kernel returns consistent results. + K_call_diag = np.diag(kernel(X)) + K_diag = kernel.diag(X) + assert_almost_equal(K_call_diag, K_diag, 5) + + +def test_kernel_operator_commutative(): + # Adding kernels and multiplying kernels should be commutative. + # Check addition + assert_almost_equal((RBF(2.0) + 1.0)(X), (1.0 + RBF(2.0))(X)) + + # Check multiplication + assert_almost_equal((3.0 * RBF(2.0))(X), (RBF(2.0) * 3.0)(X)) + + +def test_kernel_anisotropic(): + # Anisotropic kernel should be consistent with isotropic kernels. + kernel = 3.0 * RBF([0.5, 2.0]) + + K = kernel(X) + X1 = np.array(X) + X1[:, 0] *= 4 + K1 = 3.0 * RBF(2.0)(X1) + assert_almost_equal(K, K1) + + X2 = np.array(X) + X2[:, 1] /= 4 + K2 = 3.0 * RBF(0.5)(X2) + assert_almost_equal(K, K2) + + # Check getting and setting via theta + kernel.theta = kernel.theta + np.log(2) + assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0])) + assert_array_equal(kernel.k2.length_scale, [1.0, 4.0]) + + +@pytest.mark.parametrize( + "kernel", [kernel for kernel in kernels if kernel.is_stationary()] +) +def test_kernel_stationary(kernel): + # Test stationarity of kernels. + K = kernel(X, X + 1) + assert_almost_equal(K[0, 0], np.diag(K)) + + +@pytest.mark.parametrize("kernel", kernels) +def test_kernel_input_type(kernel): + # Test whether kernels is for vectors or structured data + if isinstance(kernel, Exponentiation): + assert kernel.requires_vector_input == kernel.kernel.requires_vector_input + if isinstance(kernel, KernelOperator): + assert kernel.requires_vector_input == ( + kernel.k1.requires_vector_input or kernel.k2.requires_vector_input + ) + + +def test_compound_kernel_input_type(): + kernel = CompoundKernel([WhiteKernel(noise_level=3.0)]) + assert not kernel.requires_vector_input + + kernel = CompoundKernel([WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)]) + assert kernel.requires_vector_input + + +def check_hyperparameters_equal(kernel1, kernel2): + # Check that hyperparameters of two kernels are equal + for attr in set(dir(kernel1) + dir(kernel2)): + if attr.startswith("hyperparameter_"): + attr_value1 = getattr(kernel1, attr) + attr_value2 = getattr(kernel2, attr) + assert attr_value1 == attr_value2 + + +@pytest.mark.parametrize("kernel", kernels) +def test_kernel_clone(kernel): + # Test that sklearn's clone works correctly on kernels. + kernel_cloned = clone(kernel) + + # XXX: Should this be fixed? + # This differs from the sklearn's estimators equality check. + assert kernel == kernel_cloned + assert id(kernel) != id(kernel_cloned) + + # Check that all constructor parameters are equal. + assert kernel.get_params() == kernel_cloned.get_params() + + # Check that all hyperparameters are equal. + check_hyperparameters_equal(kernel, kernel_cloned) + + +@pytest.mark.parametrize("kernel", kernels) +def test_kernel_clone_after_set_params(kernel): + # This test is to verify that using set_params does not + # break clone on kernels. + # This used to break because in kernels such as the RBF, non-trivial + # logic that modified the length scale used to be in the constructor + # See https://github.com/scikit-learn/scikit-learn/issues/6961 + # for more details. + bounds = (1e-5, 1e5) + kernel_cloned = clone(kernel) + params = kernel.get_params() + # RationalQuadratic kernel is isotropic. + isotropic_kernels = (ExpSineSquared, RationalQuadratic) + if "length_scale" in params and not isinstance(kernel, isotropic_kernels): + length_scale = params["length_scale"] + if np.iterable(length_scale): + # XXX unreached code as of v0.22 + params["length_scale"] = length_scale[0] + params["length_scale_bounds"] = bounds + else: + params["length_scale"] = [length_scale] * 2 + params["length_scale_bounds"] = bounds * 2 + kernel_cloned.set_params(**params) + kernel_cloned_clone = clone(kernel_cloned) + assert kernel_cloned_clone.get_params() == kernel_cloned.get_params() + assert id(kernel_cloned_clone) != id(kernel_cloned) + check_hyperparameters_equal(kernel_cloned, kernel_cloned_clone) + + +def test_matern_kernel(): + # Test consistency of Matern kernel for special values of nu. + K = Matern(nu=1.5, length_scale=1.0)(X) + # the diagonal elements of a matern kernel are 1 + assert_array_almost_equal(np.diag(K), np.ones(X.shape[0])) + # matern kernel for coef0==0.5 is equal to absolute exponential kernel + K_absexp = np.exp(-euclidean_distances(X, X, squared=False)) + K = Matern(nu=0.5, length_scale=1.0)(X) + assert_array_almost_equal(K, K_absexp) + # matern kernel with coef0==inf is equal to RBF kernel + K_rbf = RBF(length_scale=1.0)(X) + K = Matern(nu=np.inf, length_scale=1.0)(X) + assert_array_almost_equal(K, K_rbf) + assert_allclose(K, K_rbf) + # test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5]) + # result in nearly identical results as the general case for coef0 in + # [0.5 + tiny, 1.5 + tiny, 2.5 + tiny] + tiny = 1e-10 + for nu in [0.5, 1.5, 2.5]: + K1 = Matern(nu=nu, length_scale=1.0)(X) + K2 = Matern(nu=nu + tiny, length_scale=1.0)(X) + assert_array_almost_equal(K1, K2) + # test that coef0==large is close to RBF + large = 100 + K1 = Matern(nu=large, length_scale=1.0)(X) + K2 = RBF(length_scale=1.0)(X) + assert_array_almost_equal(K1, K2, decimal=2) + + +@pytest.mark.parametrize("kernel", kernels) +def test_kernel_versus_pairwise(kernel): + # Check that GP kernels can also be used as pairwise kernels. + + # Test auto-kernel + if kernel != kernel_rbf_plus_white: + # For WhiteKernel: k(X) != k(X,X). This is assumed by + # pairwise_kernels + K1 = kernel(X) + K2 = pairwise_kernels(X, metric=kernel) + assert_array_almost_equal(K1, K2) + + # Test cross-kernel + K1 = kernel(X, Y) + K2 = pairwise_kernels(X, Y, metric=kernel) + assert_array_almost_equal(K1, K2) + + +@pytest.mark.parametrize("kernel", kernels) +def test_set_get_params(kernel): + # Check that set_params()/get_params() is consistent with kernel.theta. + + # Test get_params() + index = 0 + params = kernel.get_params() + for hyperparameter in kernel.hyperparameters: + if isinstance("string", type(hyperparameter.bounds)): + if hyperparameter.bounds == "fixed": + continue + size = hyperparameter.n_elements + if size > 1: # anisotropic kernels + assert_almost_equal( + np.exp(kernel.theta[index : index + size]), params[hyperparameter.name] + ) + index += size + else: + assert_almost_equal( + np.exp(kernel.theta[index]), params[hyperparameter.name] + ) + index += 1 + # Test set_params() + index = 0 + value = 10 # arbitrary value + for hyperparameter in kernel.hyperparameters: + if isinstance("string", type(hyperparameter.bounds)): + if hyperparameter.bounds == "fixed": + continue + size = hyperparameter.n_elements + if size > 1: # anisotropic kernels + kernel.set_params(**{hyperparameter.name: [value] * size}) + assert_almost_equal( + np.exp(kernel.theta[index : index + size]), [value] * size + ) + index += size + else: + kernel.set_params(**{hyperparameter.name: value}) + assert_almost_equal(np.exp(kernel.theta[index]), value) + index += 1 + + +@pytest.mark.parametrize("kernel", kernels) +def test_repr_kernels(kernel): + # Smoke-test for repr in kernels. + + repr(kernel) + + +def test_rational_quadratic_kernel(): + kernel = RationalQuadratic(length_scale=[1.0, 1.0]) + message = ( + "RationalQuadratic kernel only supports isotropic " + "version, please use a single " + "scalar for length_scale" + ) + with pytest.raises(AttributeError, match=message): + kernel(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa71199c84db85dec6ed25c549580bf6c1f1e05c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1704754449e2319280783de8e60691a3c94b7ae0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52b88d369ffeab14b0726e6fb5f25ea5d071755f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fd351e2f0c1c4283263288fd006e58b7ed6a10e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f722d3f0bf7eeeb271808eb0f0c0d9c19c7a87dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50baba5a8fc832617670716eb6fe07c502a76381 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28c03626659949f1d41747a347c93846ab2a9d1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4debd0dbca6e94a8ae6a24c85b1ae2ba04e096b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6366d84ebbe1071189742783f799c0327d11b280 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36289dec6ba2ef499685b9ef9abd83fa358eea35 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ee49b26860456874d318567c682fb80efd8e73b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abca1189e39d8e09369a1e28120dc2ddf7ae97d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55b31f55efe974060630ff1c027de56f07367445 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d38943f7cfac1aeeae5489a42b025bd9a207c175 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..907e56fe5d0b1f80c7b28b08f9bf0798a5419b1b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c951902519333953aa238191e5da1c11c172f2fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21e1f21801c606518acb67142d7afac1b3e1aca6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b82bbd77bcf9a16040ac2cebb3f655811bbff84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py @@ -0,0 +1,15 @@ +# License: BSD 3 clause + +from .glm import ( + GammaRegressor, + PoissonRegressor, + TweedieRegressor, + _GeneralizedLinearRegressor, +) + +__all__ = [ + "_GeneralizedLinearRegressor", + "PoissonRegressor", + "GammaRegressor", + "TweedieRegressor", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f28c68f7bc19fa6dcbe2c5cb083e375670db26bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91257d83c8d5b7346fef13c79e20398b9f10e0a3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6c35e6b2a8ba120a9c4ea4002b876e4bdc5847e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9b431fd2377dba50a6fabd703ae7c0334033e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py @@ -0,0 +1,525 @@ +""" +Newton solver for Generalized Linear Models +""" + +# Author: Christian Lorentzen +# License: BSD 3 clause + +import warnings +from abc import ABC, abstractmethod + +import numpy as np +import scipy.linalg +import scipy.optimize + +from ..._loss.loss import HalfSquaredError +from ...exceptions import ConvergenceWarning +from ...utils.optimize import _check_optimize_result +from .._linear_loss import LinearModelLoss + + +class NewtonSolver(ABC): + """Newton solver for GLMs. + + This class implements Newton/2nd-order optimization routines for GLMs. Each Newton + iteration aims at finding the Newton step which is done by the inner solver. With + Hessian H, gradient g and coefficients coef, one step solves: + + H @ coef_newton = -g + + For our GLM / LinearModelLoss, we have gradient g and Hessian H: + + g = X.T @ loss.gradient + l2_reg_strength * coef + H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity + + Backtracking line search updates coef = coef_old + t * coef_newton for some t in + (0, 1]. + + This is a base class, actual implementations (child classes) may deviate from the + above pattern and use structure specific tricks. + + Usage pattern: + - initialize solver: sol = NewtonSolver(...) + - solve the problem: sol.solve(X, y, sample_weight) + + References + ---------- + - Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization" + 2nd edition + https://doi.org/10.1007/978-0-387-40065-5 + + - Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization." + Cambridge University Press, 2004. + https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Initial coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + + linear_loss : LinearModelLoss + The loss to be minimized. + + l2_reg_strength : float, default=0.0 + L2 regularization strength. + + tol : float, default=1e-4 + The optimization problem is solved when each of the following condition is + fulfilled: + 1. maximum |gradient| <= tol + 2. Newton decrement d: 1/2 * d^2 <= tol + + max_iter : int, default=100 + Maximum number of Newton steps allowed. + + n_threads : int, default=1 + Number of OpenMP threads to use for the computation of the Hessian and gradient + of the loss function. + + Attributes + ---------- + coef_old : ndarray of shape coef.shape + Coefficient of previous iteration. + + coef_newton : ndarray of shape coef.shape + Newton step. + + gradient : ndarray of shape coef.shape + Gradient of the loss w.r.t. the coefficients. + + gradient_old : ndarray of shape coef.shape + Gradient of previous iteration. + + loss_value : float + Value of objective function = loss + penalty. + + loss_value_old : float + Value of objective function of previous itertion. + + raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes) + + converged : bool + Indicator for convergence of the solver. + + iteration : int + Number of Newton steps, i.e. calls to inner_solve + + use_fallback_lbfgs_solve : bool + If set to True, the solver will resort to call LBFGS to finish the optimisation + procedure in case of convergence issues. + + gradient_times_newton : float + gradient @ coef_newton, set in inner_solve and used by line_search. If the + Newton step is a descent direction, this is negative. + """ + + def __init__( + self, + *, + coef, + linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True), + l2_reg_strength=0.0, + tol=1e-4, + max_iter=100, + n_threads=1, + verbose=0, + ): + self.coef = coef + self.linear_loss = linear_loss + self.l2_reg_strength = l2_reg_strength + self.tol = tol + self.max_iter = max_iter + self.n_threads = n_threads + self.verbose = verbose + + def setup(self, X, y, sample_weight): + """Precomputations + + If None, initializes: + - self.coef + Sets: + - self.raw_prediction + - self.loss_value + """ + _, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X) + self.loss_value = self.linear_loss.loss( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + raw_prediction=self.raw_prediction, + ) + + @abstractmethod + def update_gradient_hessian(self, X, y, sample_weight): + """Update gradient and Hessian.""" + + @abstractmethod + def inner_solve(self, X, y, sample_weight): + """Compute Newton step. + + Sets: + - self.coef_newton + - self.gradient_times_newton + """ + + def fallback_lbfgs_solve(self, X, y, sample_weight): + """Fallback solver in case of emergency. + + If a solver detects convergence problems, it may fall back to this methods in + the hope to exit with success instead of raising an error. + + Sets: + - self.coef + - self.converged + """ + opt_res = scipy.optimize.minimize( + self.linear_loss.loss_gradient, + self.coef, + method="L-BFGS-B", + jac=True, + options={ + "maxiter": self.max_iter, + "maxls": 50, # default is 20 + "iprint": self.verbose - 1, + "gtol": self.tol, + "ftol": 64 * np.finfo(np.float64).eps, + }, + args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads), + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res) + self.coef = opt_res.x + self.converged = opt_res.status == 0 + + def line_search(self, X, y, sample_weight): + """Backtracking line search. + + Sets: + - self.coef_old + - self.coef + - self.loss_value_old + - self.loss_value + - self.gradient_old + - self.gradient + - self.raw_prediction + """ + # line search parameters + beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11 + eps = 16 * np.finfo(self.loss_value.dtype).eps + t = 1 # step size + + # gradient_times_newton = self.gradient @ self.coef_newton + # was computed in inner_solve. + armijo_term = sigma * self.gradient_times_newton + _, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw( + self.coef_newton, X + ) + + self.coef_old = self.coef + self.loss_value_old = self.loss_value + self.gradient_old = self.gradient + + # np.sum(np.abs(self.gradient_old)) + sum_abs_grad_old = -1 + + is_verbose = self.verbose >= 2 + if is_verbose: + print(" Backtracking Line Search") + print(f" eps=10 * finfo.eps={eps}") + + for i in range(21): # until and including t = beta**20 ~ 1e-6 + self.coef = self.coef_old + t * self.coef_newton + raw = self.raw_prediction + t * raw_prediction_newton + self.loss_value, self.gradient = self.linear_loss.loss_gradient( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + raw_prediction=raw, + ) + # Note: If coef_newton is too large, loss_gradient may produce inf values, + # potentially accompanied by a RuntimeWarning. + # This case will be captured by the Armijo condition. + + # 1. Check Armijo / sufficient decrease condition. + # The smaller (more negative) the better. + loss_improvement = self.loss_value - self.loss_value_old + check = loss_improvement <= t * armijo_term + if is_verbose: + print( + f" line search iteration={i+1}, step size={t}\n" + f" check loss improvement <= armijo term: {loss_improvement} " + f"<= {t * armijo_term} {check}" + ) + if check: + break + # 2. Deal with relative loss differences around machine precision. + tiny_loss = np.abs(self.loss_value_old * eps) + check = np.abs(loss_improvement) <= tiny_loss + if is_verbose: + print( + " check loss |improvement| <= eps * |loss_old|:" + f" {np.abs(loss_improvement)} <= {tiny_loss} {check}" + ) + if check: + if sum_abs_grad_old < 0: + sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1) + # 2.1 Check sum of absolute gradients as alternative condition. + sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1) + check = sum_abs_grad < sum_abs_grad_old + if is_verbose: + print( + " check sum(|gradient|) < sum(|gradient_old|): " + f"{sum_abs_grad} < {sum_abs_grad_old} {check}" + ) + if check: + break + + t *= beta + else: + warnings.warn( + ( + f"Line search of Newton solver {self.__class__.__name__} at" + f" iteration #{self.iteration} did no converge after 21 line search" + " refinement iterations. It will now resort to lbfgs instead." + ), + ConvergenceWarning, + ) + if self.verbose: + print(" Line search did not converge and resorts to lbfgs instead.") + self.use_fallback_lbfgs_solve = True + return + + self.raw_prediction = raw + + def check_convergence(self, X, y, sample_weight): + """Check for convergence. + + Sets self.converged. + """ + if self.verbose: + print(" Check Convergence") + # Note: Checking maximum relative change of coefficient <= tol is a bad + # convergence criterion because even a large step could have brought us close + # to the true minimum. + # coef_step = self.coef - self.coef_old + # check = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old))) + + # 1. Criterion: maximum |gradient| <= tol + # The gradient was already updated in line_search() + check = np.max(np.abs(self.gradient)) + if self.verbose: + print(f" 1. max |gradient| {check} <= {self.tol}") + if check > self.tol: + return + + # 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol + # d = sqrt(grad @ hessian^-1 @ grad) + # = sqrt(coef_newton @ hessian @ coef_newton) + # See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1. + d2 = self.coef_newton @ self.hessian @ self.coef_newton + if self.verbose: + print(f" 2. Newton decrement {0.5 * d2} <= {self.tol}") + if 0.5 * d2 > self.tol: + return + + if self.verbose: + loss_value = self.linear_loss.loss( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + ) + print(f" Solver did converge at loss = {loss_value}.") + self.converged = True + + def finalize(self, X, y, sample_weight): + """Finalize the solvers results. + + Some solvers may need this, others not. + """ + pass + + def solve(self, X, y, sample_weight): + """Solve the optimization problem. + + This is the main routine. + + Order of calls: + self.setup() + while iteration: + self.update_gradient_hessian() + self.inner_solve() + self.line_search() + self.check_convergence() + self.finalize() + + Returns + ------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Solution of the optimization problem. + """ + # setup usually: + # - initializes self.coef if needed + # - initializes and calculates self.raw_predictions, self.loss_value + self.setup(X=X, y=y, sample_weight=sample_weight) + + self.iteration = 1 + self.converged = False + self.use_fallback_lbfgs_solve = False + + while self.iteration <= self.max_iter and not self.converged: + if self.verbose: + print(f"Newton iter={self.iteration}") + + self.use_fallback_lbfgs_solve = False # Fallback solver. + + # 1. Update Hessian and gradient + self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight) + + # TODO: + # if iteration == 1: + # We might stop early, e.g. we already are close to the optimum, + # usually detected by zero gradients at this stage. + + # 2. Inner solver + # Calculate Newton step/direction + # This usually sets self.coef_newton and self.gradient_times_newton. + self.inner_solve(X=X, y=y, sample_weight=sample_weight) + if self.use_fallback_lbfgs_solve: + break + + # 3. Backtracking line search + # This usually sets self.coef_old, self.coef, self.loss_value_old + # self.loss_value, self.gradient_old, self.gradient, + # self.raw_prediction. + self.line_search(X=X, y=y, sample_weight=sample_weight) + if self.use_fallback_lbfgs_solve: + break + + # 4. Check convergence + # Sets self.converged. + self.check_convergence(X=X, y=y, sample_weight=sample_weight) + + # 5. Next iteration + self.iteration += 1 + + if not self.converged: + if self.use_fallback_lbfgs_solve: + # Note: The fallback solver circumvents check_convergence and relies on + # the convergence checks of lbfgs instead. Enough warnings have been + # raised on the way. + self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight) + else: + warnings.warn( + ( + f"Newton solver did not converge after {self.iteration - 1} " + "iterations." + ), + ConvergenceWarning, + ) + + self.iteration -= 1 + self.finalize(X=X, y=y, sample_weight=sample_weight) + return self.coef + + +class NewtonCholeskySolver(NewtonSolver): + """Cholesky based Newton solver. + + Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear + solver. + """ + + def setup(self, X, y, sample_weight): + super().setup(X=X, y=y, sample_weight=sample_weight) + n_dof = X.shape[1] + if self.linear_loss.fit_intercept: + n_dof += 1 + self.gradient = np.empty_like(self.coef) + self.hessian = np.empty_like(self.coef, shape=(n_dof, n_dof)) + + def update_gradient_hessian(self, X, y, sample_weight): + _, _, self.hessian_warning = self.linear_loss.gradient_hessian( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + gradient_out=self.gradient, + hessian_out=self.hessian, + raw_prediction=self.raw_prediction, # this was updated in line_search + ) + + def inner_solve(self, X, y, sample_weight): + if self.hessian_warning: + warnings.warn( + ( + f"The inner solver of {self.__class__.__name__} detected a " + "pointwise hessian with many negative values at iteration " + f"#{self.iteration}. It will now resort to lbfgs instead." + ), + ConvergenceWarning, + ) + if self.verbose: + print( + " The inner solver detected a pointwise Hessian with many " + "negative values and resorts to lbfgs instead." + ) + self.use_fallback_lbfgs_solve = True + return + + try: + with warnings.catch_warnings(): + warnings.simplefilter("error", scipy.linalg.LinAlgWarning) + self.coef_newton = scipy.linalg.solve( + self.hessian, -self.gradient, check_finite=False, assume_a="sym" + ) + self.gradient_times_newton = self.gradient @ self.coef_newton + if self.gradient_times_newton > 0: + if self.verbose: + print( + " The inner solver found a Newton step that is not a " + "descent direction and resorts to LBFGS steps instead." + ) + self.use_fallback_lbfgs_solve = True + return + except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e: + warnings.warn( + f"The inner solver of {self.__class__.__name__} stumbled upon a " + "singular or very ill-conditioned Hessian matrix at iteration " + f"#{self.iteration}. It will now resort to lbfgs instead.\n" + "Further options are to use another solver or to avoid such situation " + "in the first place. Possible remedies are removing collinear features" + " of X or increasing the penalization strengths.\n" + "The original Linear Algebra message was:\n" + + str(e), + scipy.linalg.LinAlgWarning, + ) + # Possible causes: + # 1. hess_pointwise is negative. But this is already taken care in + # LinearModelLoss.gradient_hessian. + # 2. X is singular or ill-conditioned + # This might be the most probable cause. + # + # There are many possible ways to deal with this situation. Most of them + # add, explicitly or implicitly, a matrix to the hessian to make it + # positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed. + # Instead, we resort to lbfgs. + if self.verbose: + print( + " The inner solver stumbled upon an singular or ill-conditioned " + "Hessian matrix and resorts to LBFGS instead." + ) + self.use_fallback_lbfgs_solve = True + return diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py new file mode 100644 index 0000000000000000000000000000000000000000..4cac889a4da518e3116c3243be5d3701c34d1b68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py @@ -0,0 +1,904 @@ +""" +Generalized Linear Models with Exponential Dispersion Family +""" + +# Author: Christian Lorentzen +# some parts and tricks stolen from other sklearn files. +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np +import scipy.optimize + +from ..._loss.loss import ( + HalfGammaLoss, + HalfPoissonLoss, + HalfSquaredError, + HalfTweedieLoss, + HalfTweedieLossIdentity, +) +from ...base import BaseEstimator, RegressorMixin, _fit_context +from ...utils import check_array +from ...utils._openmp_helpers import _openmp_effective_n_threads +from ...utils._param_validation import Hidden, Interval, StrOptions +from ...utils.optimize import _check_optimize_result +from ...utils.validation import _check_sample_weight, check_is_fitted +from .._linear_loss import LinearModelLoss +from ._newton_solver import NewtonCholeskySolver, NewtonSolver + + +class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator): + """Regression via a penalized Generalized Linear Model (GLM). + + GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at fitting and + predicting the mean of the target y as y_pred=h(X*w) with coefficients w. + Therefore, the fit minimizes the following objective function with L2 priors as + regularizer:: + + 1/(2*sum(s_i)) * sum(s_i * deviance(y_i, h(x_i*w)) + 1/2 * alpha * ||w||_2^2 + + with inverse link function h, s=sample_weight and per observation (unit) deviance + deviance(y_i, h(x_i*w)). Note that for an EDM, 1/2 * deviance is the negative + log-likelihood up to a constant (in w) term. + The parameter ``alpha`` corresponds to the lambda parameter in glmnet. + + Instead of implementing the EDM family and a link function separately, we directly + use the loss functions `from sklearn._loss` which have the link functions included + in them for performance reasons. We pick the loss functions that implement + (1/2 times) EDM deviances. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the penalty term and thus determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (X @ coef + intercept). + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_``. + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_iter_ : int + Actual number of iterations used in the solver. + + _base_loss : BaseLoss, default=HalfSquaredError() + This is set during fit via `self._get_loss()`. + A `_base_loss` contains a specific loss function as well as the link + function. The loss to be minimized specifies the distributional assumption of + the GLM, i.e. the distribution from the EDM. Here are some examples: + + ======================= ======== ========================== + _base_loss Link Target Domain + ======================= ======== ========================== + HalfSquaredError identity y any real number + HalfPoissonLoss log 0 <= y + HalfGammaLoss log 0 < y + HalfTweedieLoss log dependent on tweedie power + HalfTweedieLossIdentity identity dependent on tweedie power + ======================= ======== ========================== + + The link function of the GLM, i.e. mapping from linear predictor + `X @ coeff + intercept` to prediction `y_pred`. For instance, with a log link, + we have `y_pred = exp(X @ coeff + intercept)`. + """ + + # We allow for NewtonSolver classes for the "solver" parameter but do not + # make them public in the docstrings. This facilitates testing and + # benchmarking. + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0.0, None, closed="left")], + "fit_intercept": ["boolean"], + "solver": [ + StrOptions({"lbfgs", "newton-cholesky"}), + Hidden(type), + ], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="neither")], + "warm_start": ["boolean"], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.solver = solver + self.max_iter = max_iter + self.tol = tol + self.warm_start = warm_start + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit a Generalized Linear Model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Fitted model. + """ + X, y = self._validate_data( + X, + y, + accept_sparse=["csc", "csr"], + dtype=[np.float64, np.float32], + y_numeric=True, + multi_output=False, + ) + + # required by losses + if self.solver == "lbfgs": + # lbfgs will force coef and therefore raw_prediction to be float64. The + # base_loss needs y, X @ coef and sample_weight all of same dtype + # (and contiguous). + loss_dtype = np.float64 + else: + loss_dtype = min(max(y.dtype, X.dtype), np.float64) + y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False) + + if sample_weight is not None: + # Note that _check_sample_weight calls check_array(order="C") required by + # losses. + sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype) + + n_samples, n_features = X.shape + self._base_loss = self._get_loss() + + linear_loss = LinearModelLoss( + base_loss=self._base_loss, + fit_intercept=self.fit_intercept, + ) + + if not linear_loss.base_loss.in_y_true_range(y): + raise ValueError( + "Some value(s) of y are out of the valid range of the loss" + f" {self._base_loss.__class__.__name__!r}." + ) + + # TODO: if alpha=0 check that X is not rank deficient + + # NOTE: Rescaling of sample_weight: + # We want to minimize + # obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance) + # + 1/2 * alpha * L2, + # with + # deviance = 2 * loss. + # The objective is invariant to multiplying sample_weight by a constant. We + # could choose this constant such that sum(sample_weight) = 1 in order to end + # up with + # obj = sum(sample_weight * loss) + 1/2 * alpha * L2. + # But LinearModelLoss.loss() already computes + # average(loss, weights=sample_weight) + # Thus, without rescaling, we have + # obj = LinearModelLoss.loss(...) + + if self.warm_start and hasattr(self, "coef_"): + if self.fit_intercept: + # LinearModelLoss needs intercept at the end of coefficient array. + coef = np.concatenate((self.coef_, np.array([self.intercept_]))) + else: + coef = self.coef_ + coef = coef.astype(loss_dtype, copy=False) + else: + coef = linear_loss.init_zero_coef(X, dtype=loss_dtype) + if self.fit_intercept: + coef[-1] = linear_loss.base_loss.link.link( + np.average(y, weights=sample_weight) + ) + + l2_reg_strength = self.alpha + n_threads = _openmp_effective_n_threads() + + # Algorithms for optimization: + # Note again that our losses implement 1/2 * deviance. + if self.solver == "lbfgs": + func = linear_loss.loss_gradient + + opt_res = scipy.optimize.minimize( + func, + coef, + method="L-BFGS-B", + jac=True, + options={ + "maxiter": self.max_iter, + "maxls": 50, # default is 20 + "iprint": self.verbose - 1, + "gtol": self.tol, + # The constant 64 was found empirically to pass the test suite. + # The point is that ftol is very small, but a bit larger than + # machine precision for float64, which is the dtype used by lbfgs. + "ftol": 64 * np.finfo(float).eps, + }, + args=(X, y, sample_weight, l2_reg_strength, n_threads), + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res) + coef = opt_res.x + elif self.solver == "newton-cholesky": + sol = NewtonCholeskySolver( + coef=coef, + linear_loss=linear_loss, + l2_reg_strength=l2_reg_strength, + tol=self.tol, + max_iter=self.max_iter, + n_threads=n_threads, + verbose=self.verbose, + ) + coef = sol.solve(X, y, sample_weight) + self.n_iter_ = sol.iteration + elif issubclass(self.solver, NewtonSolver): + sol = self.solver( + coef=coef, + linear_loss=linear_loss, + l2_reg_strength=l2_reg_strength, + tol=self.tol, + max_iter=self.max_iter, + n_threads=n_threads, + ) + coef = sol.solve(X, y, sample_weight) + self.n_iter_ = sol.iteration + else: + raise ValueError(f"Invalid solver={self.solver}.") + + if self.fit_intercept: + self.intercept_ = coef[-1] + self.coef_ = coef[:-1] + else: + # set intercept to zero as the other linear models do + self.intercept_ = 0.0 + self.coef_ = coef + + return self + + def _linear_predictor(self, X): + """Compute the linear_predictor = `X @ coef_ + intercept_`. + + Note that we often use the term raw_prediction instead of linear predictor. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + Returns + ------- + y_pred : array of shape (n_samples,) + Returns predicted values of linear predictor. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=["csr", "csc", "coo"], + dtype=[np.float64, np.float32], + ensure_2d=True, + allow_nd=False, + reset=False, + ) + return X @ self.coef_ + self.intercept_ + + def predict(self, X): + """Predict using GLM with feature matrix X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + Returns + ------- + y_pred : array of shape (n_samples,) + Returns predicted values. + """ + # check_array is done in _linear_predictor + raw_prediction = self._linear_predictor(X) + y_pred = self._base_loss.link.inverse(raw_prediction) + return y_pred + + def score(self, X, y, sample_weight=None): + """Compute D^2, the percentage of deviance explained. + + D^2 is a generalization of the coefficient of determination R^2. + R^2 uses squared error and D^2 uses the deviance of this GLM, see the + :ref:`User Guide `. + + D^2 is defined as + :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`, + :math:`D_{null}` is the null deviance, i.e. the deviance of a model + with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`. + The mean :math:`\\bar{y}` is averaged by sample_weight. + Best possible score is 1.0 and it can be negative (because the model + can be arbitrarily worse). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) + True values of target. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + D^2 of self.predict(X) w.r.t. y. + """ + # TODO: Adapt link to User Guide in the docstring, once + # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. + # + # Note, default score defined in RegressorMixin is R^2 score. + # TODO: make D^2 a score function in module metrics (and thereby get + # input validation and so on) + raw_prediction = self._linear_predictor(X) # validates X + # required by losses + y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False) + + if sample_weight is not None: + # Note that _check_sample_weight calls check_array(order="C") required by + # losses. + sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) + + base_loss = self._base_loss + + if not base_loss.in_y_true_range(y): + raise ValueError( + "Some value(s) of y are out of the valid range of the loss" + f" {base_loss.__name__}." + ) + + constant = np.average( + base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None), + weights=sample_weight, + ) + + # Missing factor of 2 in deviance cancels out. + deviance = base_loss( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=1, + ) + y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) + deviance_null = base_loss( + y_true=y, + raw_prediction=np.tile(y_mean, y.shape[0]), + sample_weight=sample_weight, + n_threads=1, + ) + return 1 - (deviance + constant) / (deviance_null + constant) + + def _more_tags(self): + try: + # Create instance of BaseLoss if fit wasn't called yet. This is necessary as + # TweedieRegressor might set the used loss during fit different from + # self._base_loss. + base_loss = self._get_loss() + return {"requires_positive_y": not base_loss.in_y_true_range(-1.0)} + except (ValueError, AttributeError, TypeError): + # This happens when the link or power parameter of TweedieRegressor is + # invalid. We fallback on the default tags in that case. + return {} + + def _get_loss(self): + """This is only necessary because of the link and power arguments of the + TweedieRegressor. + + Note that we do not need to pass sample_weight to the loss class as this is + only needed to set loss.constant_hessian on which GLMs do not rely. + """ + return HalfSquaredError() + + +class PoissonRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Poisson distribution. + + This regressor uses the 'log' link function. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (`X @ coef + intercept`). + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_`` . + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Actual number of iterations used in the solver. + + See Also + -------- + TweedieRegressor : Generalized Linear Model with a Tweedie distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.PoissonRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [12, 17, 22, 21] + >>> clf.fit(X, y) + PoissonRegressor() + >>> clf.score(X, y) + 0.990... + >>> clf.coef_ + array([0.121..., 0.158...]) + >>> clf.intercept_ + 2.088... + >>> clf.predict([[1, 1], [3, 4]]) + array([10.676..., 21.875...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + + def _get_loss(self): + return HalfPoissonLoss() + + +class GammaRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Gamma distribution. + + This regressor uses the 'log' link function. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor `X @ coef_ + intercept_`. + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for `coef_` and `intercept_`. + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + n_iter_ : int + Actual number of iterations used in the solver. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PoissonRegressor : Generalized Linear Model with a Poisson distribution. + TweedieRegressor : Generalized Linear Model with a Tweedie distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.GammaRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [19, 26, 33, 30] + >>> clf.fit(X, y) + GammaRegressor() + >>> clf.score(X, y) + 0.773... + >>> clf.coef_ + array([0.072..., 0.066...]) + >>> clf.intercept_ + 2.896... + >>> clf.predict([[1, 0], [2, 8]]) + array([19.483..., 35.795...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + + def _get_loss(self): + return HalfGammaLoss() + + +class TweedieRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Tweedie distribution. + + This estimator can be used to model different GLMs depending on the + ``power`` parameter, which determines the underlying distribution. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + power : float, default=0 + The power determines the underlying target distribution according + to the following table: + + +-------+------------------------+ + | Power | Distribution | + +=======+========================+ + | 0 | Normal | + +-------+------------------------+ + | 1 | Poisson | + +-------+------------------------+ + | (1,2) | Compound Poisson Gamma | + +-------+------------------------+ + | 2 | Gamma | + +-------+------------------------+ + | 3 | Inverse Gaussian | + +-------+------------------------+ + + For ``0 < power < 1``, no distribution exists. + + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (`X @ coef + intercept`). + + link : {'auto', 'identity', 'log'}, default='auto' + The link function of the GLM, i.e. mapping from linear predictor + `X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets + the link depending on the chosen `power` parameter as follows: + + - 'identity' for ``power <= 0``, e.g. for the Normal distribution + - 'log' for ``power > 0``, e.g. for Poisson, Gamma and Inverse Gaussian + distributions + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_`` . + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_iter_ : int + Actual number of iterations used in the solver. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PoissonRegressor : Generalized Linear Model with a Poisson distribution. + GammaRegressor : Generalized Linear Model with a Gamma distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.TweedieRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [2, 3.5, 5, 5.5] + >>> clf.fit(X, y) + TweedieRegressor() + >>> clf.score(X, y) + 0.839... + >>> clf.coef_ + array([0.599..., 0.299...]) + >>> clf.intercept_ + 1.600... + >>> clf.predict([[1, 1], [3, 4]]) + array([2.500..., 4.599...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints, + "power": [Interval(Real, None, None, closed="neither")], + "link": [StrOptions({"auto", "identity", "log"})], + } + + def __init__( + self, + *, + power=0.0, + alpha=1.0, + fit_intercept=True, + link="auto", + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + self.link = link + self.power = power + + def _get_loss(self): + if self.link == "auto": + if self.power <= 0: + # identity link + return HalfTweedieLossIdentity(power=self.power) + else: + # log link + return HalfTweedieLoss(power=self.power) + + if self.link == "log": + return HalfTweedieLoss(power=self.power) + + if self.link == "identity": + return HalfTweedieLossIdentity(power=self.power) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..588cf7e93eef01b82eaf24c87c36df22ea21dade --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py @@ -0,0 +1 @@ +# License: BSD 3 clause diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f028025b9af0cac6d6ce8b7d57ced3690171927 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..675be2a9847b8d87fcb9abff769fc944c47419e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py new file mode 100644 index 0000000000000000000000000000000000000000..5256a5f37027294bf0e3545d5a42bd77715e4177 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py @@ -0,0 +1,1112 @@ +# Authors: Christian Lorentzen +# +# License: BSD 3 clause + +import itertools +import warnings +from functools import partial + +import numpy as np +import pytest +import scipy +from numpy.testing import assert_allclose +from scipy import linalg +from scipy.optimize import minimize, root + +from sklearn._loss import HalfBinomialLoss, HalfPoissonLoss, HalfTweedieLoss +from sklearn._loss.link import IdentityLink, LogLink +from sklearn.base import clone +from sklearn.datasets import make_low_rank_matrix, make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + GammaRegressor, + PoissonRegressor, + Ridge, + TweedieRegressor, +) +from sklearn.linear_model._glm import _GeneralizedLinearRegressor +from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver +from sklearn.linear_model._linear_loss import LinearModelLoss +from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance +from sklearn.model_selection import train_test_split + +SOLVERS = ["lbfgs", "newton-cholesky"] + + +class BinomialRegressor(_GeneralizedLinearRegressor): + def _get_loss(self): + return HalfBinomialLoss() + + +def _special_minimize(fun, grad, x, tol_NM, tol): + # Find good starting point by Nelder-Mead + res_NM = minimize( + fun, x, method="Nelder-Mead", options={"xatol": tol_NM, "fatol": tol_NM} + ) + # Now refine via root finding on the gradient of the function, which is + # more precise than minimizing the function itself. + res = root( + grad, + res_NM.x, + method="lm", + options={"ftol": tol, "xtol": tol, "gtol": tol}, + ) + return res.x + + +@pytest.fixture(scope="module") +def regression_data(): + X, y = make_regression( + n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2 + ) + return X, y + + +@pytest.fixture( + params=itertools.product( + ["long", "wide"], + [ + BinomialRegressor(), + PoissonRegressor(), + GammaRegressor(), + # TweedieRegressor(power=3.0), # too difficult + # TweedieRegressor(power=0, link="log"), # too difficult + TweedieRegressor(power=1.5), + ], + ), + ids=lambda param: f"{param[0]}-{param[1]}", +) +def glm_dataset(global_random_seed, request): + """Dataset with GLM solutions, well conditioned X. + + This is inspired by ols_ridge_dataset in test_ridge.py. + + The construction is based on the SVD decomposition of X = U S V'. + + Parameters + ---------- + type : {"long", "wide"} + If "long", then n_samples > n_features. + If "wide", then n_features > n_samples. + model : a GLM model + + For "wide", we return the minimum norm solution: + + min ||w||_2 subject to w = argmin deviance(X, y, w) + + Note that the deviance is always minimized if y = inverse_link(X w) is possible to + achieve, which it is in the wide data case. Therefore, we can construct the + solution with minimum norm like (wide) OLS: + + min ||w||_2 subject to link(y) = raw_prediction = X w + + Returns + ------- + model : GLM model + X : ndarray + Last column of 1, i.e. intercept. + y : ndarray + coef_unpenalized : ndarray + Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in + case of ambiguity) + Last coefficient is intercept. + coef_penalized : ndarray + GLM solution with alpha=l2_reg_strength=1, i.e. + min 1/n * sum(loss) + ||w[:-1]||_2^2. + Last coefficient is intercept. + l2_reg_strength : float + Always equal 1. + """ + data_type, model = request.param + # Make larger dim more than double as big as the smaller one. + # This helps when constructing singular matrices like (X, X). + if data_type == "long": + n_samples, n_features = 12, 4 + else: + n_samples, n_features = 4, 12 + k = min(n_samples, n_features) + rng = np.random.RandomState(global_random_seed) + X = make_low_rank_matrix( + n_samples=n_samples, + n_features=n_features, + effective_rank=k, + tail_strength=0.1, + random_state=rng, + ) + X[:, -1] = 1 # last columns acts as intercept + U, s, Vt = linalg.svd(X, full_matrices=False) + assert np.all(s > 1e-3) # to be sure + assert np.max(s) / np.min(s) < 100 # condition number of X + + if data_type == "long": + coef_unpenalized = rng.uniform(low=1, high=3, size=n_features) + coef_unpenalized *= rng.choice([-1, 1], size=n_features) + raw_prediction = X @ coef_unpenalized + else: + raw_prediction = rng.uniform(low=-3, high=3, size=n_samples) + # minimum norm solution min ||w||_2 such that raw_prediction = X w: + # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction + coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction + + linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True) + sw = np.full(shape=n_samples, fill_value=1 / n_samples) + y = linear_loss.base_loss.link.inverse(raw_prediction) + + # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with + # optimizer. Note that the problem is well conditioned such that we get accurate + # results. + l2_reg_strength = 1 + fun = partial( + linear_loss.loss, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + grad = partial( + linear_loss.gradient, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + coef_penalized_with_intercept = _special_minimize( + fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14 + ) + + linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False) + fun = partial( + linear_loss.loss, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + grad = partial( + linear_loss.gradient, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + coef_penalized_without_intercept = _special_minimize( + fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14 + ) + + # To be sure + assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm( + coef_unpenalized + ) + + return ( + model, + X, + y, + coef_unpenalized, + coef_penalized_with_intercept, + coef_penalized_without_intercept, + l2_reg_strength, + ) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_glm_regression(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + + model.fit(X, y) + + rtol = 5e-5 if solver == "lbfgs" else 1e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + # Same with sample_weight. + model = ( + clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) + ) + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution on hstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2. + For long X, [X, X] is still a long but singular matrix. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + n_samples, n_features = X.shape + params = dict( + alpha=alpha / 2, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1) + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + + with warnings.catch_warnings(): + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.simplefilter("ignore", ConvergenceWarning) + model.fit(X, y) + + rtol = 2e-4 if solver == "lbfgs" else 5e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution on vstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X], [y] + [X], [y] with 1 * alpha. + It is the same alpha as the average loss stays the same. + For wide X, [X', X'] is a singular matrix. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + n_samples, n_features = X.shape + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + model.fit(X, y) + + rtol = 3e-5 if solver == "lbfgs" else 5e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + Note: This checks the minimum norm solution for wide X, i.e. + n_samples < n_features: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + + with warnings.catch_warnings(): + if solver.startswith("newton") and n_samples < n_features: + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails + # for the wide/fat case with n_features > n_samples. Most current GLM solvers do + # NOT return the minimum norm solution with fit_intercept=True. + if n_samples > n_features: + rtol = 5e-5 if solver == "lbfgs" else 1e-7 + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 5e-5 + if solver == "newton-cholesky": + rtol = 5e-4 + assert_allclose(model.predict(X), y, rtol=rtol) + + norm_solution = np.linalg.norm(np.r_[intercept, coef]) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + if solver == "newton-cholesky": + # XXX: This solver shows random behaviour. Sometimes it finds solutions + # with norm_model <= norm_solution! So we check conditionally. + if norm_model < (1 + 1e-12) * norm_solution: + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + elif solver == "lbfgs" and fit_intercept: + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + assert norm_model > (1 + 1e-12) * norm_solution + + # See https://github.com/scikit-learn/scikit-learn/issues/23670. + # Note: Even adding a tiny penalty does not give the minimal norm solution. + # XXX: We could have naively expected LBFGS to find the minimal norm + # solution by adding a very small penalty. Even that fails for a reason we + # do not properly understand at this point. + else: + # When `fit_intercept=False`, LBFGS naturally converges to the minimum norm + # solution on this problem. + # XXX: Do we have any theoretical guarantees why this should be the case? + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + GLM fit on [X] is the same as fit on [X, X]/2. + For long X, [X, X] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + intercept = coef[-1] + coef = coef[:-1] + if n_samples > n_features: + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + else: + # To know the minimum norm solution, we keep one intercept column and do + # not divide by 2. Later on, we must take special care. + X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] + else: + intercept = 0 + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + + with warnings.catch_warnings(): + if solver.startswith("newton"): + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + if fit_intercept and n_samples < n_features: + # Here we take special care. + model_intercept = 2 * model.intercept_ + model_coef = 2 * model.coef_[:-1] # exclude the other intercept term. + # For minimum norm solution, we would have + # assert model.intercept_ == pytest.approx(model.coef_[-1]) + else: + model_intercept = model.intercept_ + model_coef = model.coef_ + + if n_samples > n_features: + assert model_intercept == pytest.approx(intercept) + rtol = 1e-4 + assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 1e-6 if solver == "lbfgs" else 5e-6 + assert_allclose(model.predict(X), y, rtol=rtol) + if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky": + # Same as in test_glm_regression_unpenalized. + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + norm_solution = np.linalg.norm( + 0.5 * np.r_[intercept, intercept, coef, coef] + ) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + assert norm_model > (1 + 1e-12) * norm_solution + # For minimum norm solution, we would have + # assert model.intercept_ == pytest.approx(model.coef_[-1]) + else: + assert model_intercept == pytest.approx(intercept, rel=5e-6) + assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + GLM fit on [X] is the same as fit on [X], [y] + [X], [y]. + For wide X, [X', X'] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + + with warnings.catch_warnings(): + if solver.startswith("newton") and n_samples < n_features: + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + if n_samples > n_features: + rtol = 5e-5 if solver == "lbfgs" else 1e-6 + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 1e-6 if solver == "lbfgs" else 5e-6 + assert_allclose(model.predict(X), y, rtol=rtol) + + norm_solution = np.linalg.norm(np.r_[intercept, coef]) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + if solver == "newton-cholesky": + # XXX: This solver shows random behaviour. Sometimes it finds solutions + # with norm_model <= norm_solution! So we check conditionally. + if not (norm_model > (1 + 1e-12) * norm_solution): + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=1e-4) + elif solver == "lbfgs" and fit_intercept: + # Same as in test_glm_regression_unpenalized. + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + assert norm_model > (1 + 1e-12) * norm_solution + else: + rtol = 1e-5 if solver == "newton-cholesky" else 1e-4 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +def test_sample_weights_validation(): + """Test the raised errors in the validation of sample_weight.""" + # scalar value but not positive + X = [[1]] + y = [1] + weights = 0 + glm = _GeneralizedLinearRegressor() + + # Positive weights are accepted + glm.fit(X, y, sample_weight=1) + + # 2d array + weights = [[0]] + with pytest.raises(ValueError, match="must be 1D array or scalar"): + glm.fit(X, y, weights) + + # 1d but wrong length + weights = [1, 0] + msg = r"sample_weight.shape == \(2,\), expected \(1,\)!" + with pytest.raises(ValueError, match=msg): + glm.fit(X, y, weights) + + +@pytest.mark.parametrize( + "glm", + [ + TweedieRegressor(power=3), + PoissonRegressor(), + GammaRegressor(), + TweedieRegressor(power=1.5), + ], +) +def test_glm_wrong_y_range(glm): + y = np.array([-1, 2]) + X = np.array([[1], [1]]) + msg = r"Some value\(s\) of y are out of the valid range of the loss" + with pytest.raises(ValueError, match=msg): + glm.fit(X, y) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_glm_identity_regression(fit_intercept): + """Test GLM regression with identity link on a simple dataset.""" + coef = [1.0, 2.0] + X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T + y = np.dot(X, coef) + glm = _GeneralizedLinearRegressor( + alpha=0, + fit_intercept=fit_intercept, + tol=1e-12, + ) + if fit_intercept: + glm.fit(X[:, 1:], y) + assert_allclose(glm.coef_, coef[1:], rtol=1e-10) + assert_allclose(glm.intercept_, coef[0], rtol=1e-10) + else: + glm.fit(X, y) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("alpha", [0.0, 1.0]) +@pytest.mark.parametrize( + "GLMEstimator", [_GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor] +) +def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator): + """Test that the impact of sample_weight is consistent""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + glm_params = dict(alpha=alpha, fit_intercept=fit_intercept) + + glm = GLMEstimator(**glm_params).fit(X, y) + coef = glm.coef_.copy() + + # sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones(y.shape) + glm.fit(X, y, sample_weight=sample_weight) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + # sample_weight are normalized to 1 so, scaling them has no effect + sample_weight = 2 * np.ones(y.shape) + glm.fit(X, y, sample_weight=sample_weight) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + # setting one element of sample_weight to 0 is equivalent to removing + # the corresponding sample + sample_weight = np.ones(y.shape) + sample_weight[-1] = 0 + glm.fit(X, y, sample_weight=sample_weight) + coef1 = glm.coef_.copy() + glm.fit(X[:-1], y[:-1]) + assert_allclose(glm.coef_, coef1, rtol=1e-12) + + # check that multiplying sample_weight by 2 is equivalent + # to repeating corresponding samples twice + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = np.ones(len(y)) + sample_weight_1[: n_samples // 2] = 2 + + glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1) + + glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None) + assert_allclose(glm1.coef_, glm2.coef_) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize( + "estimator", + [ + PoissonRegressor(), + GammaRegressor(), + TweedieRegressor(power=3.0), + TweedieRegressor(power=0, link="log"), + TweedieRegressor(power=1.5), + TweedieRegressor(power=4.5), + ], +) +def test_glm_log_regression(solver, fit_intercept, estimator): + """Test GLM regression with log link on a simple dataset.""" + coef = [0.2, -0.1] + X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T + y = np.exp(np.dot(X, coef)) + glm = clone(estimator).set_params( + alpha=0, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-8, + ) + if fit_intercept: + res = glm.fit(X[:, :-1], y) + assert_allclose(res.coef_, coef[:-1], rtol=1e-6) + assert_allclose(res.intercept_, coef[-1], rtol=1e-6) + else: + res = glm.fit(X, y) + assert_allclose(res.coef_, coef, rtol=2e-6) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_warm_start(solver, fit_intercept, global_random_seed): + n_samples, n_features = 100, 10 + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features - 2, + bias=fit_intercept * 1.0, + noise=1.0, + random_state=global_random_seed, + ) + y = np.abs(y) # Poisson requires non-negative targets. + alpha = 1 + params = { + "solver": solver, + "fit_intercept": fit_intercept, + "tol": 1e-10, + } + + glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params) + glm1.fit(X, y) + + glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params) + # As we intentionally set max_iter=1 such that the solver should raise a + # ConvergenceWarning. + with pytest.warns(ConvergenceWarning): + glm2.fit(X, y) + + linear_loss = LinearModelLoss( + base_loss=glm1._get_loss(), + fit_intercept=fit_intercept, + ) + sw = np.full_like(y, fill_value=1 / n_samples) + + objective_glm1 = linear_loss.loss( + coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_, + X=X, + y=y, + sample_weight=sw, + l2_reg_strength=alpha, + ) + objective_glm2 = linear_loss.loss( + coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_, + X=X, + y=y, + sample_weight=sw, + l2_reg_strength=alpha, + ) + assert objective_glm1 < objective_glm2 + + glm2.set_params(max_iter=1000) + glm2.fit(X, y) + # The two models are not exactly identical since the lbfgs solver + # computes the approximate hessian from previous iterations, which + # will not be strictly identical in the case of a warm start. + rtol = 2e-4 if solver == "lbfgs" else 1e-9 + assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol) + assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5) + + +@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("sample_weight", [None, True]) +def test_normal_ridge_comparison( + n_samples, n_features, fit_intercept, sample_weight, request +): + """Compare with Ridge regression for Normal distributions.""" + test_size = 10 + X, y = make_regression( + n_samples=n_samples + test_size, + n_features=n_features, + n_informative=n_features - 2, + noise=0.5, + random_state=42, + ) + + if n_samples > n_features: + ridge_params = {"solver": "svd"} + else: + ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7} + + ( + X_train, + X_test, + y_train, + y_test, + ) = train_test_split(X, y, test_size=test_size, random_state=0) + + alpha = 1.0 + if sample_weight is None: + sw_train = None + alpha_ridge = alpha * n_samples + else: + sw_train = np.random.RandomState(0).rand(len(y_train)) + alpha_ridge = alpha * sw_train.sum() + + # GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2 + ridge = Ridge( + alpha=alpha_ridge, + random_state=42, + fit_intercept=fit_intercept, + **ridge_params, + ) + ridge.fit(X_train, y_train, sample_weight=sw_train) + + glm = _GeneralizedLinearRegressor( + alpha=alpha, + fit_intercept=fit_intercept, + max_iter=300, + tol=1e-5, + ) + glm.fit(X_train, y_train, sample_weight=sw_train) + assert glm.coef_.shape == (X.shape[1],) + assert_allclose(glm.coef_, ridge.coef_, atol=5e-5) + assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5) + assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4) + assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4) + + +@pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"]) +def test_poisson_glmnet(solver): + """Compare Poisson regression with L2 regularization and LogLink to glmnet""" + # library("glmnet") + # options(digits=10) + # df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2)) + # x <- data.matrix(df[,c("a", "b")]) + # y <- df$y + # fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson", + # standardize=F, thresh=1e-10, nlambda=10000) + # coef(fit, s=1) + # (Intercept) -0.12889386979 + # a 0.29019207995 + # b 0.03741173122 + X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T + y = np.array([0, 1, 1, 2]) + glm = PoissonRegressor( + alpha=1, + fit_intercept=True, + tol=1e-7, + max_iter=300, + solver=solver, + ) + glm.fit(X, y) + assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5) + assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5) + + +def test_convergence_warning(regression_data): + X, y = regression_data + + est = _GeneralizedLinearRegressor(max_iter=1, tol=1e-20) + with pytest.warns(ConvergenceWarning): + est.fit(X, y) + + +@pytest.mark.parametrize( + "name, link_class", [("identity", IdentityLink), ("log", LogLink)] +) +def test_tweedie_link_argument(name, link_class): + """Test GLM link argument set as string.""" + y = np.array([0.1, 0.5]) # in range of all distributions + X = np.array([[1], [2]]) + glm = TweedieRegressor(power=1, link=name).fit(X, y) + assert isinstance(glm._base_loss.link, link_class) + + +@pytest.mark.parametrize( + "power, expected_link_class", + [ + (0, IdentityLink), # normal + (1, LogLink), # poisson + (2, LogLink), # gamma + (3, LogLink), # inverse-gaussian + ], +) +def test_tweedie_link_auto(power, expected_link_class): + """Test that link='auto' delivers the expected link function""" + y = np.array([0.1, 0.5]) # in range of all distributions + X = np.array([[1], [2]]) + glm = TweedieRegressor(link="auto", power=power).fit(X, y) + assert isinstance(glm._base_loss.link, expected_link_class) + + +@pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3]) +@pytest.mark.parametrize("link", ["log", "identity"]) +def test_tweedie_score(regression_data, power, link): + """Test that GLM score equals d2_tweedie_score for Tweedie losses.""" + X, y = regression_data + # make y positive + y = np.abs(y) + 1.0 + glm = TweedieRegressor(power=power, link=link).fit(X, y) + assert glm.score(X, y) == pytest.approx( + d2_tweedie_score(y, glm.predict(X), power=power) + ) + + +@pytest.mark.parametrize( + "estimator, value", + [ + (PoissonRegressor(), True), + (GammaRegressor(), True), + (TweedieRegressor(power=1.5), True), + (TweedieRegressor(power=0), False), + ], +) +def test_tags(estimator, value): + assert estimator._get_tags()["requires_positive_y"] is value + + +def test_linalg_warning_with_newton_solver(global_random_seed): + newton_solver = "newton-cholesky" + rng = np.random.RandomState(global_random_seed) + # Use at least 20 samples to reduce the likelihood of getting a degenerate + # dataset for any global_random_seed. + X_orig = rng.normal(size=(20, 3)) + y = rng.poisson( + np.exp(X_orig @ np.ones(X_orig.shape[1])), size=X_orig.shape[0] + ).astype(np.float64) + + # Collinear variation of the same input features. + X_collinear = np.hstack([X_orig] * 10) + + # Let's consider the deviance of a constant baseline on this problem. + baseline_pred = np.full_like(y, y.mean()) + constant_model_deviance = mean_poisson_deviance(y, baseline_pred) + assert constant_model_deviance > 1.0 + + # No warning raised on well-conditioned design, even without regularization. + tol = 1e-10 + with warnings.catch_warnings(): + warnings.simplefilter("error") + reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(X_orig, y) + original_newton_deviance = mean_poisson_deviance(y, reg.predict(X_orig)) + + # On this dataset, we should have enough data points to not make it + # possible to get a near zero deviance (for the any of the admissible + # random seeds). This will make it easier to interpret meaning of rtol in + # the subsequent assertions: + assert original_newton_deviance > 0.2 + + # We check that the model could successfully fit information in X_orig to + # improve upon the constant baseline by a large margin (when evaluated on + # the traing set). + assert constant_model_deviance - original_newton_deviance > 0.1 + + # LBFGS is robust to a collinear design because its approximation of the + # Hessian is Symmeric Positive Definite by construction. Let's record its + # solution + with warnings.catch_warnings(): + warnings.simplefilter("error") + reg = PoissonRegressor(solver="lbfgs", alpha=0.0, tol=tol).fit(X_collinear, y) + collinear_lbfgs_deviance = mean_poisson_deviance(y, reg.predict(X_collinear)) + + # The LBFGS solution on the collinear is expected to reach a comparable + # solution to the Newton solution on the original data. + rtol = 1e-6 + assert collinear_lbfgs_deviance == pytest.approx(original_newton_deviance, rel=rtol) + + # Fitting a Newton solver on the collinear version of the training data + # without regularization should raise an informative warning and fallback + # to the LBFGS solver. + msg = ( + "The inner solver of .*Newton.*Solver stumbled upon a singular or very " + "ill-conditioned Hessian matrix" + ) + with pytest.warns(scipy.linalg.LinAlgWarning, match=msg): + reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit( + X_collinear, y + ) + # As a result we should still automatically converge to a good solution. + collinear_newton_deviance = mean_poisson_deviance(y, reg.predict(X_collinear)) + assert collinear_newton_deviance == pytest.approx( + original_newton_deviance, rel=rtol + ) + + # Increasing the regularization slightly should make the problem go away: + with warnings.catch_warnings(): + warnings.simplefilter("error", scipy.linalg.LinAlgWarning) + reg = PoissonRegressor(solver=newton_solver, alpha=1e-10).fit(X_collinear, y) + + # The slightly penalized model on the collinear data should be close enough + # to the unpenalized model on the original data. + penalized_collinear_newton_deviance = mean_poisson_deviance( + y, reg.predict(X_collinear) + ) + assert penalized_collinear_newton_deviance == pytest.approx( + original_newton_deviance, rel=rtol + ) + + +@pytest.mark.parametrize("verbose", [0, 1, 2]) +def test_newton_solver_verbosity(capsys, verbose): + """Test the std output of verbose newton solvers.""" + y = np.array([1, 2], dtype=float) + X = np.array([[1.0, 0], [0, 1]], dtype=float) + linear_loss = LinearModelLoss(base_loss=HalfPoissonLoss(), fit_intercept=False) + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.solve(X, y, None) # returns array([0., 0.69314758]) + captured = capsys.readouterr() + + if verbose == 0: + assert captured.out == "" + else: + msg = [ + "Newton iter=1", + "Check Convergence", + "1. max |gradient|", + "2. Newton decrement", + "Solver did converge at loss = ", + ] + for m in msg: + assert m in captured.out + + if verbose >= 2: + msg = ["Backtracking Line Search", "line search iteration="] + for m in msg: + assert m in captured.out + + # Set the Newton solver to a state with a completely wrong Newton step. + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.setup(X=X, y=y, sample_weight=None) + sol.iteration = 1 + sol.update_gradient_hessian(X=X, y=y, sample_weight=None) + sol.coef_newton = np.array([1.0, 0]) + sol.gradient_times_newton = sol.gradient @ sol.coef_newton + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.line_search(X=X, y=y, sample_weight=None) + captured = capsys.readouterr() + if verbose >= 1: + assert ( + "Line search did not converge and resorts to lbfgs instead." in captured.out + ) + + # Set the Newton solver to a state with bad Newton step such that the loss + # improvement in line search is tiny. + sol = NewtonCholeskySolver( + coef=np.array([1e-12, 0.69314758]), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.setup(X=X, y=y, sample_weight=None) + sol.iteration = 1 + sol.update_gradient_hessian(X=X, y=y, sample_weight=None) + sol.coef_newton = np.array([1e-6, 0]) + sol.gradient_times_newton = sol.gradient @ sol.coef_newton + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.line_search(X=X, y=y, sample_weight=None) + captured = capsys.readouterr() + if verbose >= 2: + msg = [ + "line search iteration=", + "check loss improvement <= armijo term:", + "check loss |improvement| <= eps * |loss_old|:", + "check sum(|gradient|) < sum(|gradient_old|):", + ] + for m in msg: + assert m in captured.out + + # Test for a case with negative hessian. We badly initialize coef for a Tweedie + # loss with non-canonical link, e.g. Inverse Gaussian deviance with a log link. + linear_loss = LinearModelLoss( + base_loss=HalfTweedieLoss(power=3), fit_intercept=False + ) + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X) + 1, + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.solve(X, y, None) + captured = capsys.readouterr() + if verbose >= 1: + assert ( + "The inner solver detected a pointwise Hessian with many negative values" + " and resorts to lbfgs instead." + in captured.out + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1fa5bc1329d83f867fc4f395eedfce4ea1dd1f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d542ecd0168074ba6660d4d508b4331b9d238951 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e939674c3959498823f010800cdae6c0378e940 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71d56d7a76457404b67a4e03dad833ef88b7f4ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccf1379b66f40f8fbb917c5045873fe614ddd9a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26b2ab95fbb7ec5ba6b366f0a83031070392aa3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66cb33319f0d2777b384832edbba668096d7119e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e331e5f4b2141625a5c92549e831de04c9082f1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beeb120195a268a99c2752b11089f985571459fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..202f0ea28cc5801b3aa1d73774c614f960fe375d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..200da2ddbd8da3e043562575e54b3b59a1fb8164 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..992b5e34d751f9254f0a1bc98315e523aa9a0647 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_quantile.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_quantile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7563204427c6079ef2b7c84817efb3566aeee536 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_quantile.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ce1bd1672610ff7ca0183a63288faaf4c5aa72f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ridge.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b7a18175a8d52c70bfcce5591ae411043f049a3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ridge.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..603630d73192f86e5eb7e628ff1f8518d9df19b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b77595a686226fc54b18c61461d8e0bfa6a97ea1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86e04d71c9c3c106a7ea8eff979e96c08167fc33 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_theil_sen.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_theil_sen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eed903a93847f91e4efc90252d8cc1693ec51c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_theil_sen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_base.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..7c9f734dcf5b587c72e6549ca2c437e8b2c0bab2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_base.py @@ -0,0 +1,789 @@ +# Author: Alexandre Gramfort +# Fabian Pedregosa +# Maria Telenczuk +# +# License: BSD 3 clause + +import warnings + +import numpy as np +import pytest +from scipy import linalg, sparse + +from sklearn.datasets import load_iris, make_regression, make_sparse_uncorrelated +from sklearn.linear_model import LinearRegression +from sklearn.linear_model._base import ( + _preprocess_data, + _rescale_data, + make_dataset, +) +from sklearn.preprocessing import add_dummy_feature +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + LIL_CONTAINERS, +) + +rtol = 1e-6 + + +def test_linear_regression(): + # Test LinearRegression on a simple dataset. + # a simple dataset + X = [[1], [2]] + Y = [1, 2] + + reg = LinearRegression() + reg.fit(X, Y) + + assert_array_almost_equal(reg.coef_, [1]) + assert_array_almost_equal(reg.intercept_, [0]) + assert_array_almost_equal(reg.predict(X), [1, 2]) + + # test it also for degenerate input + X = [[1]] + Y = [0] + + reg = LinearRegression() + reg.fit(X, Y) + assert_array_almost_equal(reg.coef_, [0]) + assert_array_almost_equal(reg.intercept_, [0]) + assert_array_almost_equal(reg.predict(X), [0]) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_linear_regression_sample_weights( + sparse_container, fit_intercept, global_random_seed +): + rng = np.random.RandomState(global_random_seed) + + # It would not work with under-determined systems + n_samples, n_features = 6, 5 + + X = rng.normal(size=(n_samples, n_features)) + if sparse_container is not None: + X = sparse_container(X) + y = rng.normal(size=n_samples) + + sample_weight = 1.0 + rng.uniform(size=n_samples) + + # LinearRegression with explicit sample_weight + reg = LinearRegression(fit_intercept=fit_intercept) + reg.fit(X, y, sample_weight=sample_weight) + coefs1 = reg.coef_ + inter1 = reg.intercept_ + + assert reg.coef_.shape == (X.shape[1],) # sanity checks + + # Closed form of the weighted least square + # theta = (X^T W X)^(-1) @ X^T W y + W = np.diag(sample_weight) + X_aug = X if not fit_intercept else add_dummy_feature(X) + + Xw = X_aug.T @ W @ X_aug + yw = X_aug.T @ W @ y + coefs2 = linalg.solve(Xw, yw) + + if not fit_intercept: + assert_allclose(coefs1, coefs2) + else: + assert_allclose(coefs1, coefs2[1:]) + assert_allclose(inter1, coefs2[0]) + + +def test_raises_value_error_if_positive_and_sparse(): + error_msg = "Sparse data was passed for X, but dense data is required." + # X must not be sparse if positive == True + X = sparse.eye(10) + y = np.ones(10) + + reg = LinearRegression(positive=True) + + with pytest.raises(TypeError, match=error_msg): + reg.fit(X, y) + + +@pytest.mark.parametrize("n_samples, n_features", [(2, 3), (3, 2)]) +def test_raises_value_error_if_sample_weights_greater_than_1d(n_samples, n_features): + # Sample weights must be either scalar or 1D + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + sample_weights_OK = rng.randn(n_samples) ** 2 + 1 + sample_weights_OK_1 = 1.0 + sample_weights_OK_2 = 2.0 + + reg = LinearRegression() + + # make sure the "OK" sample weights actually work + reg.fit(X, y, sample_weights_OK) + reg.fit(X, y, sample_weights_OK_1) + reg.fit(X, y, sample_weights_OK_2) + + +def test_fit_intercept(): + # Test assertions on betas shape. + X2 = np.array([[0.38349978, 0.61650022], [0.58853682, 0.41146318]]) + X3 = np.array( + [[0.27677969, 0.70693172, 0.01628859], [0.08385139, 0.20692515, 0.70922346]] + ) + y = np.array([1, 1]) + + lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y) + lr2_with_intercept = LinearRegression().fit(X2, y) + + lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y) + lr3_with_intercept = LinearRegression().fit(X3, y) + + assert lr2_with_intercept.coef_.shape == lr2_without_intercept.coef_.shape + assert lr3_with_intercept.coef_.shape == lr3_without_intercept.coef_.shape + assert lr2_without_intercept.coef_.ndim == lr3_without_intercept.coef_.ndim + + +def test_linear_regression_sparse(global_random_seed): + # Test that linear regression also works with sparse data + rng = np.random.RandomState(global_random_seed) + n = 100 + X = sparse.eye(n, n) + beta = rng.rand(n) + y = X @ beta + + ols = LinearRegression() + ols.fit(X, y.ravel()) + assert_array_almost_equal(beta, ols.coef_ + ols.intercept_) + + assert_array_almost_equal(ols.predict(X) - y.ravel(), 0) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_linear_regression_sparse_equal_dense(fit_intercept, csr_container): + # Test that linear regression agrees between sparse and dense + rng = np.random.RandomState(0) + n_samples = 200 + n_features = 2 + X = rng.randn(n_samples, n_features) + X[X < 0.1] = 0.0 + Xcsr = csr_container(X) + y = rng.rand(n_samples) + params = dict(fit_intercept=fit_intercept) + clf_dense = LinearRegression(**params) + clf_sparse = LinearRegression(**params) + clf_dense.fit(X, y) + clf_sparse.fit(Xcsr, y) + assert clf_dense.intercept_ == pytest.approx(clf_sparse.intercept_) + assert_allclose(clf_dense.coef_, clf_sparse.coef_) + + +def test_linear_regression_multiple_outcome(): + # Test multiple-outcome linear regressions + rng = np.random.RandomState(0) + X, y = make_regression(random_state=rng) + + Y = np.vstack((y, y)).T + n_features = X.shape[1] + + reg = LinearRegression() + reg.fit((X), Y) + assert reg.coef_.shape == (2, n_features) + Y_pred = reg.predict(X) + reg.fit(X, y) + y_pred = reg.predict(X) + assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_linear_regression_sparse_multiple_outcome(global_random_seed, coo_container): + # Test multiple-outcome linear regressions with sparse data + rng = np.random.RandomState(global_random_seed) + X, y = make_sparse_uncorrelated(random_state=rng) + X = coo_container(X) + Y = np.vstack((y, y)).T + n_features = X.shape[1] + + ols = LinearRegression() + ols.fit(X, Y) + assert ols.coef_.shape == (2, n_features) + Y_pred = ols.predict(X) + ols.fit(X, y.ravel()) + y_pred = ols.predict(X) + assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) + + +def test_linear_regression_positive(): + # Test nonnegative LinearRegression on a simple dataset. + X = [[1], [2]] + y = [1, 2] + + reg = LinearRegression(positive=True) + reg.fit(X, y) + + assert_array_almost_equal(reg.coef_, [1]) + assert_array_almost_equal(reg.intercept_, [0]) + assert_array_almost_equal(reg.predict(X), [1, 2]) + + # test it also for degenerate input + X = [[1]] + y = [0] + + reg = LinearRegression(positive=True) + reg.fit(X, y) + assert_allclose(reg.coef_, [0]) + assert_allclose(reg.intercept_, [0]) + assert_allclose(reg.predict(X), [0]) + + +def test_linear_regression_positive_multiple_outcome(global_random_seed): + # Test multiple-outcome nonnegative linear regressions + rng = np.random.RandomState(global_random_seed) + X, y = make_sparse_uncorrelated(random_state=rng) + Y = np.vstack((y, y)).T + n_features = X.shape[1] + + ols = LinearRegression(positive=True) + ols.fit(X, Y) + assert ols.coef_.shape == (2, n_features) + assert np.all(ols.coef_ >= 0.0) + Y_pred = ols.predict(X) + ols.fit(X, y.ravel()) + y_pred = ols.predict(X) + assert_allclose(np.vstack((y_pred, y_pred)).T, Y_pred) + + +def test_linear_regression_positive_vs_nonpositive(global_random_seed): + # Test differences with LinearRegression when positive=False. + rng = np.random.RandomState(global_random_seed) + X, y = make_sparse_uncorrelated(random_state=rng) + + reg = LinearRegression(positive=True) + reg.fit(X, y) + regn = LinearRegression(positive=False) + regn.fit(X, y) + + assert np.mean((reg.coef_ - regn.coef_) ** 2) > 1e-3 + + +def test_linear_regression_positive_vs_nonpositive_when_positive(global_random_seed): + # Test LinearRegression fitted coefficients + # when the problem is positive. + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 4 + X = rng.rand(n_samples, n_features) + y = X[:, 0] + 2 * X[:, 1] + 3 * X[:, 2] + 1.5 * X[:, 3] + + reg = LinearRegression(positive=True) + reg.fit(X, y) + regn = LinearRegression(positive=False) + regn.fit(X, y) + + assert np.mean((reg.coef_ - regn.coef_) ** 2) < 1e-6 + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("use_sw", [True, False]) +def test_inplace_data_preprocessing(sparse_container, use_sw, global_random_seed): + # Check that the data is not modified inplace by the linear regression + # estimator. + rng = np.random.RandomState(global_random_seed) + original_X_data = rng.randn(10, 12) + original_y_data = rng.randn(10, 2) + orginal_sw_data = rng.rand(10) + + if sparse_container is not None: + X = sparse_container(original_X_data) + else: + X = original_X_data.copy() + y = original_y_data.copy() + # XXX: Note hat y_sparse is not supported (broken?) in the current + # implementation of LinearRegression. + + if use_sw: + sample_weight = orginal_sw_data.copy() + else: + sample_weight = None + + # Do not allow inplace preprocessing of X and y: + reg = LinearRegression() + reg.fit(X, y, sample_weight=sample_weight) + if sparse_container is not None: + assert_allclose(X.toarray(), original_X_data) + else: + assert_allclose(X, original_X_data) + assert_allclose(y, original_y_data) + + if use_sw: + assert_allclose(sample_weight, orginal_sw_data) + + # Allow inplace preprocessing of X and y + reg = LinearRegression(copy_X=False) + reg.fit(X, y, sample_weight=sample_weight) + if sparse_container is not None: + # No optimization relying on the inplace modification of sparse input + # data has been implemented at this time. + assert_allclose(X.toarray(), original_X_data) + else: + # X has been offset (and optionally rescaled by sample weights) + # inplace. The 0.42 threshold is arbitrary and has been found to be + # robust to any random seed in the admissible range. + assert np.linalg.norm(X - original_X_data) > 0.42 + + # y should not have been modified inplace by LinearRegression.fit. + assert_allclose(y, original_y_data) + + if use_sw: + # Sample weights have no reason to ever be modified inplace. + assert_allclose(sample_weight, orginal_sw_data) + + +def test_linear_regression_pd_sparse_dataframe_warning(): + pd = pytest.importorskip("pandas") + + # Warning is raised only when some of the columns is sparse + df = pd.DataFrame({"0": np.random.randn(10)}) + for col in range(1, 4): + arr = np.random.randn(10) + arr[:8] = 0 + # all columns but the first column is sparse + if col != 0: + arr = pd.arrays.SparseArray(arr, fill_value=0) + df[str(col)] = arr + + msg = "pandas.DataFrame with sparse columns found." + + reg = LinearRegression() + with pytest.warns(UserWarning, match=msg): + reg.fit(df.iloc[:, 0:2], df.iloc[:, 3]) + + # does not warn when the whole dataframe is sparse + df["0"] = pd.arrays.SparseArray(df["0"], fill_value=0) + assert hasattr(df, "sparse") + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + reg.fit(df.iloc[:, 0:2], df.iloc[:, 3]) + + +def test_preprocess_data(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + expected_X_mean = np.mean(X, axis=0) + expected_y_mean = np.mean(y, axis=0) + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=False) + assert_array_almost_equal(X_mean, np.zeros(n_features)) + assert_array_almost_equal(y_mean, 0) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt, X) + assert_array_almost_equal(yt, y) + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=True) + assert_array_almost_equal(X_mean, expected_X_mean) + assert_array_almost_equal(y_mean, expected_y_mean) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt, X - expected_X_mean) + assert_array_almost_equal(yt, y - expected_y_mean) + + +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_preprocess_data_multioutput(global_random_seed, sparse_container): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 3 + n_outputs = 2 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples, n_outputs) + expected_y_mean = np.mean(y, axis=0) + + if sparse_container is not None: + X = sparse_container(X) + + _, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False) + assert_array_almost_equal(y_mean, np.zeros(n_outputs)) + assert_array_almost_equal(yt, y) + + _, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True) + assert_array_almost_equal(y_mean, expected_y_mean) + assert_array_almost_equal(yt, y - y_mean) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_preprocess_data_weighted(sparse_container, global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 4 + # Generate random data with 50% of zero values to make sure + # that the sparse variant of this test is actually sparse. This also + # shifts the mean value for each columns in X further away from + # zero. + X = rng.rand(n_samples, n_features) + X[X < 0.5] = 0.0 + + # Scale the first feature of X to be 10 larger than the other to + # better check the impact of feature scaling. + X[:, 0] *= 10 + + # Constant non-zero feature. + X[:, 2] = 1.0 + + # Constant zero feature (non-materialized in the sparse case) + X[:, 3] = 0.0 + y = rng.rand(n_samples) + + sample_weight = rng.rand(n_samples) + expected_X_mean = np.average(X, axis=0, weights=sample_weight) + expected_y_mean = np.average(y, axis=0, weights=sample_weight) + + X_sample_weight_avg = np.average(X, weights=sample_weight, axis=0) + X_sample_weight_var = np.average( + (X - X_sample_weight_avg) ** 2, weights=sample_weight, axis=0 + ) + constant_mask = X_sample_weight_var < 10 * np.finfo(X.dtype).eps + assert_array_equal(constant_mask, [0, 0, 1, 1]) + expected_X_scale = np.sqrt(X_sample_weight_var) * np.sqrt(sample_weight.sum()) + + # near constant features should not be scaled + expected_X_scale[constant_mask] = 1 + + if sparse_container is not None: + X = sparse_container(X) + + # normalize is False + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data( + X, + y, + fit_intercept=True, + sample_weight=sample_weight, + ) + assert_array_almost_equal(X_mean, expected_X_mean) + assert_array_almost_equal(y_mean, expected_y_mean) + assert_array_almost_equal(X_scale, np.ones(n_features)) + if sparse_container is not None: + assert_array_almost_equal(Xt.toarray(), X.toarray()) + else: + assert_array_almost_equal(Xt, X - expected_X_mean) + assert_array_almost_equal(yt, y - expected_y_mean) + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_sparse_preprocess_data_offsets(global_random_seed, lil_container): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + X = sparse.rand(n_samples, n_features, density=0.5, random_state=rng) + X = lil_container(X) + y = rng.rand(n_samples) + XA = X.toarray() + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=False) + assert_array_almost_equal(X_mean, np.zeros(n_features)) + assert_array_almost_equal(y_mean, 0) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt.toarray(), XA) + assert_array_almost_equal(yt, y) + + Xt, yt, X_mean, y_mean, X_scale = _preprocess_data(X, y, fit_intercept=True) + assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) + assert_array_almost_equal(y_mean, np.mean(y, axis=0)) + assert_array_almost_equal(X_scale, np.ones(n_features)) + assert_array_almost_equal(Xt.toarray(), XA) + assert_array_almost_equal(yt, y - np.mean(y, axis=0)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_preprocess_data(csr_container): + # Test output format of _preprocess_data, when input is csr + X, y = make_regression() + X[X < 2.5] = 0.0 + csr = csr_container(X) + csr_, y, _, _, _ = _preprocess_data(csr, y, fit_intercept=True) + assert csr_.format == "csr" + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("to_copy", (True, False)) +def test_preprocess_copy_data_no_checks(sparse_container, to_copy): + X, y = make_regression() + X[X < 2.5] = 0.0 + + if sparse_container is not None: + X = sparse_container(X) + + X_, y_, _, _, _ = _preprocess_data( + X, y, fit_intercept=True, copy=to_copy, check_input=False + ) + + if to_copy and sparse_container is not None: + assert not np.may_share_memory(X_.data, X.data) + elif to_copy: + assert not np.may_share_memory(X_, X) + elif sparse_container is not None: + assert np.may_share_memory(X_.data, X.data) + else: + assert np.may_share_memory(X_, X) + + +def test_dtype_preprocess_data(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + X_32 = np.asarray(X, dtype=np.float32) + y_32 = np.asarray(y, dtype=np.float32) + X_64 = np.asarray(X, dtype=np.float64) + y_64 = np.asarray(y, dtype=np.float64) + + for fit_intercept in [True, False]: + Xt_32, yt_32, X_mean_32, y_mean_32, X_scale_32 = _preprocess_data( + X_32, + y_32, + fit_intercept=fit_intercept, + ) + + Xt_64, yt_64, X_mean_64, y_mean_64, X_scale_64 = _preprocess_data( + X_64, + y_64, + fit_intercept=fit_intercept, + ) + + Xt_3264, yt_3264, X_mean_3264, y_mean_3264, X_scale_3264 = _preprocess_data( + X_32, + y_64, + fit_intercept=fit_intercept, + ) + + Xt_6432, yt_6432, X_mean_6432, y_mean_6432, X_scale_6432 = _preprocess_data( + X_64, + y_32, + fit_intercept=fit_intercept, + ) + + assert Xt_32.dtype == np.float32 + assert yt_32.dtype == np.float32 + assert X_mean_32.dtype == np.float32 + assert y_mean_32.dtype == np.float32 + assert X_scale_32.dtype == np.float32 + + assert Xt_64.dtype == np.float64 + assert yt_64.dtype == np.float64 + assert X_mean_64.dtype == np.float64 + assert y_mean_64.dtype == np.float64 + assert X_scale_64.dtype == np.float64 + + assert Xt_3264.dtype == np.float32 + assert yt_3264.dtype == np.float32 + assert X_mean_3264.dtype == np.float32 + assert y_mean_3264.dtype == np.float32 + assert X_scale_3264.dtype == np.float32 + + assert Xt_6432.dtype == np.float64 + assert yt_6432.dtype == np.float64 + assert X_mean_6432.dtype == np.float64 + assert y_mean_6432.dtype == np.float64 + assert X_scale_6432.dtype == np.float64 + + assert X_32.dtype == np.float32 + assert y_32.dtype == np.float32 + assert X_64.dtype == np.float64 + assert y_64.dtype == np.float64 + + assert_array_almost_equal(Xt_32, Xt_64) + assert_array_almost_equal(yt_32, yt_64) + assert_array_almost_equal(X_mean_32, X_mean_64) + assert_array_almost_equal(y_mean_32, y_mean_64) + assert_array_almost_equal(X_scale_32, X_scale_64) + + +@pytest.mark.parametrize("n_targets", [None, 2]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_rescale_data(n_targets, sparse_container, global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 200 + n_features = 2 + + sample_weight = 1.0 + rng.rand(n_samples) + X = rng.rand(n_samples, n_features) + if n_targets is None: + y = rng.rand(n_samples) + else: + y = rng.rand(n_samples, n_targets) + + expected_sqrt_sw = np.sqrt(sample_weight) + expected_rescaled_X = X * expected_sqrt_sw[:, np.newaxis] + + if n_targets is None: + expected_rescaled_y = y * expected_sqrt_sw + else: + expected_rescaled_y = y * expected_sqrt_sw[:, np.newaxis] + + if sparse_container is not None: + X = sparse_container(X) + if n_targets is None: + y = sparse_container(y.reshape(-1, 1)) + else: + y = sparse_container(y) + + rescaled_X, rescaled_y, sqrt_sw = _rescale_data(X, y, sample_weight) + + assert_allclose(sqrt_sw, expected_sqrt_sw) + + if sparse_container is not None: + rescaled_X = rescaled_X.toarray() + rescaled_y = rescaled_y.toarray() + if n_targets is None: + rescaled_y = rescaled_y.ravel() + + assert_allclose(rescaled_X, expected_rescaled_X) + assert_allclose(rescaled_y, expected_rescaled_y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_fused_types_make_dataset(csr_container): + iris = load_iris() + + X_32 = iris.data.astype(np.float32) + y_32 = iris.target.astype(np.float32) + X_csr_32 = csr_container(X_32) + sample_weight_32 = np.arange(y_32.size, dtype=np.float32) + + X_64 = iris.data.astype(np.float64) + y_64 = iris.target.astype(np.float64) + X_csr_64 = csr_container(X_64) + sample_weight_64 = np.arange(y_64.size, dtype=np.float64) + + # array + dataset_32, _ = make_dataset(X_32, y_32, sample_weight_32) + dataset_64, _ = make_dataset(X_64, y_64, sample_weight_64) + xi_32, yi_32, _, _ = dataset_32._next_py() + xi_64, yi_64, _, _ = dataset_64._next_py() + xi_data_32, _, _ = xi_32 + xi_data_64, _, _ = xi_64 + + assert xi_data_32.dtype == np.float32 + assert xi_data_64.dtype == np.float64 + assert_allclose(yi_64, yi_32, rtol=rtol) + + # csr + datasetcsr_32, _ = make_dataset(X_csr_32, y_32, sample_weight_32) + datasetcsr_64, _ = make_dataset(X_csr_64, y_64, sample_weight_64) + xicsr_32, yicsr_32, _, _ = datasetcsr_32._next_py() + xicsr_64, yicsr_64, _, _ = datasetcsr_64._next_py() + xicsr_data_32, _, _ = xicsr_32 + xicsr_data_64, _, _ = xicsr_64 + + assert xicsr_data_32.dtype == np.float32 + assert xicsr_data_64.dtype == np.float64 + + assert_allclose(xicsr_data_64, xicsr_data_32, rtol=rtol) + assert_allclose(yicsr_64, yicsr_32, rtol=rtol) + + assert_array_equal(xi_data_32, xicsr_data_32) + assert_array_equal(xi_data_64, xicsr_data_64) + assert_array_equal(yi_32, yicsr_32) + assert_array_equal(yi_64, yicsr_64) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_linear_regression_sample_weight_consistency( + sparse_container, fit_intercept, global_random_seed +): + """Test that the impact of sample_weight is consistent. + + Note that this test is stricter than the common test + check_sample_weights_invariance alone and also tests sparse X. + It is very similar to test_enet_sample_weight_consistency. + """ + rng = np.random.RandomState(global_random_seed) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + if sparse_container is not None: + X = sparse_container(X) + params = dict(fit_intercept=fit_intercept) + + reg = LinearRegression(**params).fit(X, y, sample_weight=None) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + # 1) sample_weight=np.ones(..) must be equivalent to sample_weight=None + # same check as check_sample_weights_invariance(name, reg, kind="ones"), but we also + # test with sparse input. + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 2) sample_weight=None should be equivalent to sample_weight = number + sample_weight = 123.0 + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 3) scaling of sample_weight should have no effect, cf. np.average() + sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0]) + reg = reg.fit(X, y, sample_weight=sample_weight) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + reg.fit(X, y, sample_weight=np.pi * sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6 if sparse_container is None else 1e-5) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 4) setting elements of sample_weight to 0 is equivalent to removing these samples + sample_weight_0 = sample_weight.copy() + sample_weight_0[-5:] = 0 + y[-5:] *= 1000 # to make excluding those samples important + reg.fit(X, y, sample_weight=sample_weight_0) + coef_0 = reg.coef_.copy() + if fit_intercept: + intercept_0 = reg.intercept_ + reg.fit(X[:-5], y[:-5], sample_weight=sample_weight[:-5]) + if fit_intercept and sparse_container is None: + # FIXME: https://github.com/scikit-learn/scikit-learn/issues/26164 + # This often fails, e.g. when calling + # SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all" pytest \ + # sklearn/linear_model/tests/test_base.py\ + # ::test_linear_regression_sample_weight_consistency + pass + else: + assert_allclose(reg.coef_, coef_0, rtol=1e-5) + if fit_intercept: + assert_allclose(reg.intercept_, intercept_0) + + # 5) check that multiplying sample_weight by 2 is equivalent to repeating + # corresponding samples twice + if sparse_container is not None: + X2 = sparse.vstack([X, X[: n_samples // 2]], format="csc") + else: + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = sample_weight.copy() + sample_weight_1[: n_samples // 2] *= 2 + sample_weight_2 = np.concatenate( + [sample_weight, sample_weight[: n_samples // 2]], axis=0 + ) + + reg1 = LinearRegression(**params).fit(X, y, sample_weight=sample_weight_1) + reg2 = LinearRegression(**params).fit(X2, y2, sample_weight=sample_weight_2) + assert_allclose(reg1.coef_, reg2.coef_, rtol=1e-6) + if fit_intercept: + assert_allclose(reg1.intercept_, reg2.intercept_) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_common.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..ff9d7aad146f3db6d838203ec56a3f4a0bbb6eb8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_common.py @@ -0,0 +1,147 @@ +# License: BSD 3 clause + +import inspect + +import numpy as np +import pytest + +from sklearn.base import is_classifier +from sklearn.datasets import make_low_rank_matrix +from sklearn.linear_model import ( + ARDRegression, + BayesianRidge, + ElasticNet, + ElasticNetCV, + Lars, + LarsCV, + Lasso, + LassoCV, + LassoLarsCV, + LassoLarsIC, + LinearRegression, + LogisticRegression, + LogisticRegressionCV, + MultiTaskElasticNet, + MultiTaskElasticNetCV, + MultiTaskLasso, + MultiTaskLassoCV, + OrthogonalMatchingPursuit, + OrthogonalMatchingPursuitCV, + PoissonRegressor, + Ridge, + RidgeCV, + SGDRegressor, + TweedieRegressor, +) + + +# Note: GammaRegressor() and TweedieRegressor(power != 1) have a non-canonical link. +@pytest.mark.parametrize( + "model", + [ + ARDRegression(), + BayesianRidge(), + ElasticNet(), + ElasticNetCV(), + Lars(), + LarsCV(), + Lasso(), + LassoCV(), + LassoLarsCV(), + LassoLarsIC(), + LinearRegression(), + # TODO: FIx SAGA which fails badly with sample_weights. + # This is a known limitation, see: + # https://github.com/scikit-learn/scikit-learn/issues/21305 + pytest.param( + LogisticRegression( + penalty="elasticnet", solver="saga", l1_ratio=0.5, tol=1e-15 + ), + marks=pytest.mark.xfail(reason="Missing importance sampling scheme"), + ), + LogisticRegressionCV(tol=1e-6), + MultiTaskElasticNet(), + MultiTaskElasticNetCV(), + MultiTaskLasso(), + MultiTaskLassoCV(), + OrthogonalMatchingPursuit(), + OrthogonalMatchingPursuitCV(), + PoissonRegressor(), + Ridge(), + RidgeCV(), + pytest.param( + SGDRegressor(tol=1e-15), + marks=pytest.mark.xfail(reason="Insufficient precision."), + ), + SGDRegressor(penalty="elasticnet", max_iter=10_000), + TweedieRegressor(power=0), # same as Ridge + ], + ids=lambda x: x.__class__.__name__, +) +@pytest.mark.parametrize("with_sample_weight", [False, True]) +def test_balance_property(model, with_sample_weight, global_random_seed): + # Test that sum(y_predicted) == sum(y_observed) on the training set. + # This must hold for all linear models with deviance of an exponential disperson + # family as loss and the corresponding canonical link if fit_intercept=True. + # Examples: + # - squared error and identity link (most linear models) + # - Poisson deviance with log link + # - log loss with logit link + # This is known as balance property or unconditional calibration/unbiasedness. + # For reference, see Corollary 3.18, 3.20 and Chapter 5.1.5 of + # M.V. Wuthrich and M. Merz, "Statistical Foundations of Actuarial Learning and its + # Applications" (June 3, 2022). http://doi.org/10.2139/ssrn.3822407 + + if ( + with_sample_weight + and "sample_weight" not in inspect.signature(model.fit).parameters.keys() + ): + pytest.skip("Estimator does not support sample_weight.") + + rel = 2e-4 # test precision + if isinstance(model, SGDRegressor): + rel = 1e-1 + elif hasattr(model, "solver") and model.solver == "saga": + rel = 1e-2 + + rng = np.random.RandomState(global_random_seed) + n_train, n_features, n_targets = 100, 10, None + if isinstance( + model, + (MultiTaskElasticNet, MultiTaskElasticNetCV, MultiTaskLasso, MultiTaskLassoCV), + ): + n_targets = 3 + X = make_low_rank_matrix(n_samples=n_train, n_features=n_features, random_state=rng) + if n_targets: + coef = ( + rng.uniform(low=-2, high=2, size=(n_features, n_targets)) + / np.max(X, axis=0)[:, None] + ) + else: + coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) + + expectation = np.exp(X @ coef + 0.5) + y = rng.poisson(lam=expectation) + 1 # strict positive, i.e. y > 0 + if is_classifier(model): + y = (y > expectation + 1).astype(np.float64) + + if with_sample_weight: + sw = rng.uniform(low=1, high=10, size=y.shape[0]) + else: + sw = None + + model.set_params(fit_intercept=True) # to be sure + if with_sample_weight: + model.fit(X, y, sample_weight=sw) + else: + model.fit(X, y) + + # Assert balance property. + if is_classifier(model): + assert np.average(model.predict_proba(X)[:, 1], weights=sw) == pytest.approx( + np.average(y, weights=sw), rel=rel + ) + else: + assert np.average(model.predict(X), weights=sw, axis=0) == pytest.approx( + np.average(y, weights=sw, axis=0), rel=rel + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py new file mode 100644 index 0000000000000000000000000000000000000000..fe5f17ca75d00d033222cea3ff0d875b1df039c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py @@ -0,0 +1,1633 @@ +# Authors: Olivier Grisel +# Alexandre Gramfort +# License: BSD 3 clause + +import warnings +from copy import deepcopy + +import joblib +import numpy as np +import pytest +from scipy import interpolate, sparse + +from sklearn.base import clone, is_classifier +from sklearn.datasets import load_diabetes, make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + ElasticNet, + ElasticNetCV, + Lasso, + LassoCV, + LassoLars, + LassoLarsCV, + LinearRegression, + MultiTaskElasticNet, + MultiTaskElasticNetCV, + MultiTaskLasso, + MultiTaskLassoCV, + Ridge, + RidgeClassifier, + RidgeClassifierCV, + RidgeCV, + enet_path, + lars_path, + lasso_path, +) +from sklearn.linear_model._coordinate_descent import _set_order +from sklearn.model_selection import ( + BaseCrossValidator, + GridSearchCV, + LeaveOneGroupOut, +) +from sklearn.model_selection._split import GroupsConsumerMixin +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils import check_array +from sklearn.utils._testing import ( + TempMemmap, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + + +@pytest.mark.parametrize("order", ["C", "F"]) +@pytest.mark.parametrize("input_order", ["C", "F"]) +def test_set_order_dense(order, input_order): + """Check that _set_order returns arrays with promised order.""" + X = np.array([[0], [0], [0]], order=input_order) + y = np.array([0, 0, 0], order=input_order) + X2, y2 = _set_order(X, y, order=order) + if order == "C": + assert X2.flags["C_CONTIGUOUS"] + assert y2.flags["C_CONTIGUOUS"] + elif order == "F": + assert X2.flags["F_CONTIGUOUS"] + assert y2.flags["F_CONTIGUOUS"] + + if order == input_order: + assert X is X2 + assert y is y2 + + +@pytest.mark.parametrize("order", ["C", "F"]) +@pytest.mark.parametrize("input_order", ["C", "F"]) +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_set_order_sparse(order, input_order, coo_container): + """Check that _set_order returns sparse matrices in promised format.""" + X = coo_container(np.array([[0], [0], [0]])) + y = coo_container(np.array([0, 0, 0])) + sparse_format = "csc" if input_order == "F" else "csr" + X = X.asformat(sparse_format) + y = X.asformat(sparse_format) + X2, y2 = _set_order(X, y, order=order) + + format = "csc" if order == "F" else "csr" + assert sparse.issparse(X2) and X2.format == format + assert sparse.issparse(y2) and y2.format == format + + +def test_lasso_zero(): + # Check that the lasso can handle zero data without crashing + X = [[0], [0], [0]] + y = [0, 0, 0] + clf = Lasso(alpha=0.1).fit(X, y) + pred = clf.predict([[1], [2], [3]]) + assert_array_almost_equal(clf.coef_, [0]) + assert_array_almost_equal(pred, [0, 0, 0]) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_enet_nonfinite_params(): + # Check ElasticNet throws ValueError when dealing with non-finite parameter + # values + rng = np.random.RandomState(0) + n_samples = 10 + fmax = np.finfo(np.float64).max + X = fmax * rng.uniform(size=(n_samples, 2)) + y = rng.randint(0, 2, size=n_samples) + + clf = ElasticNet(alpha=0.1) + msg = "Coordinate descent iterations resulted in non-finite parameter values" + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_lasso_toy(): + # Test Lasso on a toy example for various values of alpha. + # When validating this against glmnet notice that glmnet divides it + # against nobs. + + X = [[-1], [0], [1]] + Y = [-1, 0, 1] # just a straight line + T = [[2], [3], [4]] # test sample + + clf = Lasso(alpha=1e-8) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = Lasso(alpha=0.1) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.85]) + assert_array_almost_equal(pred, [1.7, 2.55, 3.4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = Lasso(alpha=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.25]) + assert_array_almost_equal(pred, [0.5, 0.75, 1.0]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = Lasso(alpha=1) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.0]) + assert_array_almost_equal(pred, [0, 0, 0]) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_enet_toy(): + # Test ElasticNet for various parameters of alpha and l1_ratio. + # Actually, the parameters alpha = 0 should not be allowed. However, + # we test it as a border case. + # ElasticNet is tested with and without precomputed Gram matrix + + X = np.array([[-1.0], [0.0], [1.0]]) + Y = [-1, 0, 1] # just a straight line + T = [[2.0], [3.0], [4.0]] # test sample + + # this should be the same as lasso + clf = ElasticNet(alpha=1e-8, l1_ratio=1.0) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100, precompute=False) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf.set_params(max_iter=100, precompute=True) + clf.fit(X, Y) # with Gram + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf.set_params(max_iter=100, precompute=np.dot(X.T, X)) + clf.fit(X, Y) # with Gram + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.45454], 3) + assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_lasso_dual_gap(): + """ + Check that Lasso.dual_gap_ matches its objective formulation, with the + datafit normalized by n_samples + """ + X, y, _, _ = build_dataset(n_samples=10, n_features=30) + n_samples = len(y) + alpha = 0.01 * np.max(np.abs(X.T @ y)) / n_samples + clf = Lasso(alpha=alpha, fit_intercept=False).fit(X, y) + w = clf.coef_ + R = y - X @ w + primal = 0.5 * np.mean(R**2) + clf.alpha * np.sum(np.abs(w)) + # dual pt: R / n_samples, dual constraint: norm(X.T @ theta, inf) <= alpha + R /= np.max(np.abs(X.T @ R) / (n_samples * alpha)) + dual = 0.5 * (np.mean(y**2) - np.mean((y - R) ** 2)) + assert_allclose(clf.dual_gap_, primal - dual) + + +def build_dataset(n_samples=50, n_features=200, n_informative_features=10, n_targets=1): + """ + build an ill-posed linear regression problem with many noisy features and + comparatively few samples + """ + random_state = np.random.RandomState(0) + if n_targets > 1: + w = random_state.randn(n_features, n_targets) + else: + w = random_state.randn(n_features) + w[n_informative_features:] = 0.0 + X = random_state.randn(n_samples, n_features) + y = np.dot(X, w) + X_test = random_state.randn(n_samples, n_features) + y_test = np.dot(X_test, w) + return X, y, X_test, y_test + + +def test_lasso_cv(): + X, y, X_test, y_test = build_dataset() + max_iter = 150 + clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, cv=3).fit(X, y) + assert_almost_equal(clf.alpha_, 0.056, 2) + + clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True, cv=3) + clf.fit(X, y) + assert_almost_equal(clf.alpha_, 0.056, 2) + + # Check that the lars and the coordinate descent implementation + # select a similar alpha + lars = LassoLarsCV(max_iter=30, cv=3).fit(X, y) + # for this we check that they don't fall in the grid of + # clf.alphas further than 1 + assert ( + np.abs( + np.searchsorted(clf.alphas_[::-1], lars.alpha_) + - np.searchsorted(clf.alphas_[::-1], clf.alpha_) + ) + <= 1 + ) + # check that they also give a similar MSE + mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.mse_path_.T) + np.testing.assert_approx_equal( + mse_lars(clf.alphas_[5]).mean(), clf.mse_path_[5].mean(), significant=2 + ) + + # test set + assert clf.score(X_test, y_test) > 0.99 + + +def test_lasso_cv_with_some_model_selection(): + from sklearn import datasets + from sklearn.model_selection import ShuffleSplit + + diabetes = datasets.load_diabetes() + X = diabetes.data + y = diabetes.target + + pipe = make_pipeline(StandardScaler(), LassoCV(cv=ShuffleSplit(random_state=0))) + pipe.fit(X, y) + + +def test_lasso_cv_positive_constraint(): + X, y, X_test, y_test = build_dataset() + max_iter = 500 + + # Ensure the unconstrained fit has a negative coefficient + clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1) + clf_unconstrained.fit(X, y) + assert min(clf_unconstrained.coef_) < 0 + + # On same data, constrained fit has non-negative coefficients + clf_constrained = LassoCV( + n_alphas=3, eps=1e-1, max_iter=max_iter, positive=True, cv=2, n_jobs=1 + ) + clf_constrained.fit(X, y) + assert min(clf_constrained.coef_) >= 0 + + +@pytest.mark.parametrize( + "alphas, err_type, err_msg", + [ + ((1, -1, -100), ValueError, r"alphas\[1\] == -1, must be >= 0.0."), + ( + (-0.1, -1.0, -10.0), + ValueError, + r"alphas\[0\] == -0.1, must be >= 0.0.", + ), + ( + (1, 1.0, "1"), + TypeError, + r"alphas\[2\] must be an instance of float, not str", + ), + ], +) +def test_lassocv_alphas_validation(alphas, err_type, err_msg): + """Check the `alphas` validation in LassoCV.""" + + n_samples, n_features = 5, 5 + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + y = rng.randint(0, 2, n_samples) + lassocv = LassoCV(alphas=alphas) + with pytest.raises(err_type, match=err_msg): + lassocv.fit(X, y) + + +def _scale_alpha_inplace(estimator, n_samples): + """Rescale the parameter alpha from when the estimator is evoked with + normalize set to True as if it were evoked in a Pipeline with normalize set + to False and with a StandardScaler. + """ + if ("alpha" not in estimator.get_params()) and ( + "alphas" not in estimator.get_params() + ): + return + + if isinstance(estimator, (RidgeCV, RidgeClassifierCV)): + # alphas is not validated at this point and can be a list. + # We convert it to a np.ndarray to make sure broadcasting + # is used. + alphas = np.asarray(estimator.alphas) * n_samples + return estimator.set_params(alphas=alphas) + if isinstance(estimator, (Lasso, LassoLars, MultiTaskLasso)): + alpha = estimator.alpha * np.sqrt(n_samples) + if isinstance(estimator, (Ridge, RidgeClassifier)): + alpha = estimator.alpha * n_samples + if isinstance(estimator, (ElasticNet, MultiTaskElasticNet)): + if estimator.l1_ratio == 1: + alpha = estimator.alpha * np.sqrt(n_samples) + elif estimator.l1_ratio == 0: + alpha = estimator.alpha * n_samples + else: + # To avoid silent errors in case of refactoring + raise NotImplementedError + + estimator.set_params(alpha=alpha) + + +@pytest.mark.parametrize( + "LinearModel, params", + [ + (Lasso, {"tol": 1e-16, "alpha": 0.1}), + (LassoCV, {"tol": 1e-16}), + (ElasticNetCV, {}), + (RidgeClassifier, {"solver": "sparse_cg", "alpha": 0.1}), + (ElasticNet, {"tol": 1e-16, "l1_ratio": 1, "alpha": 0.01}), + (ElasticNet, {"tol": 1e-16, "l1_ratio": 0, "alpha": 0.01}), + (Ridge, {"solver": "sparse_cg", "tol": 1e-12, "alpha": 0.1}), + (LinearRegression, {}), + (RidgeCV, {}), + (RidgeClassifierCV, {}), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_model_pipeline_same_dense_and_sparse(LinearModel, params, csr_container): + # Test that linear model preceded by StandardScaler in the pipeline and + # with normalize set to False gives the same y_pred and the same .coef_ + # given X sparse or dense + + model_dense = make_pipeline(StandardScaler(with_mean=False), LinearModel(**params)) + + model_sparse = make_pipeline(StandardScaler(with_mean=False), LinearModel(**params)) + + # prepare the data + rng = np.random.RandomState(0) + n_samples = 200 + n_features = 2 + X = rng.randn(n_samples, n_features) + X[X < 0.1] = 0.0 + + X_sparse = csr_container(X) + y = rng.rand(n_samples) + + if is_classifier(model_dense): + y = np.sign(y) + + model_dense.fit(X, y) + model_sparse.fit(X_sparse, y) + + assert_allclose(model_sparse[1].coef_, model_dense[1].coef_) + y_pred_dense = model_dense.predict(X) + y_pred_sparse = model_sparse.predict(X_sparse) + assert_allclose(y_pred_dense, y_pred_sparse) + + assert_allclose(model_dense[1].intercept_, model_sparse[1].intercept_) + + +def test_lasso_path_return_models_vs_new_return_gives_same_coefficients(): + # Test that lasso_path with lars_path style output gives the + # same result + + # Some toy data + X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T + y = np.array([1, 2, 3.1]) + alphas = [5.0, 1.0, 0.5] + + # Use lars_path and lasso_path(new output) with 1D linear interpolation + # to compute the same path + alphas_lars, _, coef_path_lars = lars_path(X, y, method="lasso") + coef_path_cont_lars = interpolate.interp1d( + alphas_lars[::-1], coef_path_lars[:, ::-1] + ) + alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas) + coef_path_cont_lasso = interpolate.interp1d( + alphas_lasso2[::-1], coef_path_lasso2[:, ::-1] + ) + + assert_array_almost_equal( + coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas), decimal=1 + ) + + +def test_enet_path(): + # We use a large number of samples and of informative features so that + # the l1_ratio selected is more toward ridge than lasso + X, y, X_test, y_test = build_dataset( + n_samples=200, n_features=100, n_informative_features=100 + ) + max_iter = 150 + + # Here we have a small number of iterations, and thus the + # ElasticNet might not converge. This is to speed up tests + clf = ElasticNetCV( + alphas=[0.01, 0.05, 0.1], eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter + ) + ignore_warnings(clf.fit)(X, y) + # Well-conditioned settings, we should have selected our + # smallest penalty + assert_almost_equal(clf.alpha_, min(clf.alphas_)) + # Non-sparse ground truth: we should have selected an elastic-net + # that is closer to ridge than to lasso + assert clf.l1_ratio_ == min(clf.l1_ratio) + + clf = ElasticNetCV( + alphas=[0.01, 0.05, 0.1], + eps=2e-3, + l1_ratio=[0.5, 0.7], + cv=3, + max_iter=max_iter, + precompute=True, + ) + ignore_warnings(clf.fit)(X, y) + + # Well-conditioned settings, we should have selected our + # smallest penalty + assert_almost_equal(clf.alpha_, min(clf.alphas_)) + # Non-sparse ground truth: we should have selected an elastic-net + # that is closer to ridge than to lasso + assert clf.l1_ratio_ == min(clf.l1_ratio) + + # We are in well-conditioned settings with low noise: we should + # have a good test-set performance + assert clf.score(X_test, y_test) > 0.99 + + # Multi-output/target case + X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3) + clf = MultiTaskElasticNetCV( + n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter + ) + ignore_warnings(clf.fit)(X, y) + # We are in well-conditioned settings with low noise: we should + # have a good test-set performance + assert clf.score(X_test, y_test) > 0.99 + assert clf.coef_.shape == (3, 10) + + # Mono-output should have same cross-validated alpha_ and l1_ratio_ + # in both cases. + X, y, _, _ = build_dataset(n_features=10) + clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf1.fit(X, y) + clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf2.fit(X, y[:, np.newaxis]) + assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_) + assert_almost_equal(clf1.alpha_, clf2.alpha_) + + +def test_path_parameters(): + X, y, _, _ = build_dataset() + max_iter = 100 + + clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter, l1_ratio=0.5, tol=1e-3) + clf.fit(X, y) # new params + assert_almost_equal(0.5, clf.l1_ratio) + assert 50 == clf.n_alphas + assert 50 == len(clf.alphas_) + + +def test_warm_start(): + X, y, _, _ = build_dataset() + clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True) + ignore_warnings(clf.fit)(X, y) + ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations + + clf2 = ElasticNet(alpha=0.1, max_iter=10) + ignore_warnings(clf2.fit)(X, y) + assert_array_almost_equal(clf2.coef_, clf.coef_) + + +def test_lasso_alpha_warning(): + X = [[-1], [0], [1]] + Y = [-1, 0, 1] # just a straight line + + clf = Lasso(alpha=0) + warning_message = ( + "With alpha=0, this algorithm does not " + "converge well. You are advised to use the " + "LinearRegression estimator" + ) + with pytest.warns(UserWarning, match=warning_message): + clf.fit(X, Y) + + +def test_lasso_positive_constraint(): + X = [[-1], [0], [1]] + y = [1, 0, -1] # just a straight line with negative slope + + lasso = Lasso(alpha=0.1, positive=True) + lasso.fit(X, y) + assert min(lasso.coef_) >= 0 + + lasso = Lasso(alpha=0.1, precompute=True, positive=True) + lasso.fit(X, y) + assert min(lasso.coef_) >= 0 + + +def test_enet_positive_constraint(): + X = [[-1], [0], [1]] + y = [1, 0, -1] # just a straight line with negative slope + + enet = ElasticNet(alpha=0.1, positive=True) + enet.fit(X, y) + assert min(enet.coef_) >= 0 + + +def test_enet_cv_positive_constraint(): + X, y, X_test, y_test = build_dataset() + max_iter = 500 + + # Ensure the unconstrained fit has a negative coefficient + enetcv_unconstrained = ElasticNetCV( + n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1 + ) + enetcv_unconstrained.fit(X, y) + assert min(enetcv_unconstrained.coef_) < 0 + + # On same data, constrained fit has non-negative coefficients + enetcv_constrained = ElasticNetCV( + n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, positive=True, n_jobs=1 + ) + enetcv_constrained.fit(X, y) + assert min(enetcv_constrained.coef_) >= 0 + + +def test_uniform_targets(): + enet = ElasticNetCV(n_alphas=3) + m_enet = MultiTaskElasticNetCV(n_alphas=3) + lasso = LassoCV(n_alphas=3) + m_lasso = MultiTaskLassoCV(n_alphas=3) + + models_single_task = (enet, lasso) + models_multi_task = (m_enet, m_lasso) + + rng = np.random.RandomState(0) + + X_train = rng.random_sample(size=(10, 3)) + X_test = rng.random_sample(size=(10, 3)) + + y1 = np.empty(10) + y2 = np.empty((10, 2)) + + for model in models_single_task: + for y_values in (0, 5): + y1.fill(y_values) + assert_array_equal(model.fit(X_train, y1).predict(X_test), y1) + assert_array_equal(model.alphas_, [np.finfo(float).resolution] * 3) + + for model in models_multi_task: + for y_values in (0, 5): + y2[:, 0].fill(y_values) + y2[:, 1].fill(2 * y_values) + assert_array_equal(model.fit(X_train, y2).predict(X_test), y2) + assert_array_equal(model.alphas_, [np.finfo(float).resolution] * 3) + + +def test_multi_task_lasso_and_enet(): + X, y, X_test, y_test = build_dataset() + Y = np.c_[y, y] + # Y_test = np.c_[y_test, y_test] + clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) + assert 0 < clf.dual_gap_ < 1e-5 + assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) + + clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y) + assert 0 < clf.dual_gap_ < 1e-5 + assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) + + clf = MultiTaskElasticNet(alpha=1.0, tol=1e-8, max_iter=1) + warning_message = ( + "Objective did not converge. You might want to " + "increase the number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + clf.fit(X, Y) + + +def test_lasso_readonly_data(): + X = np.array([[-1], [0], [1]]) + Y = np.array([-1, 0, 1]) # just a straight line + T = np.array([[2], [3], [4]]) # test sample + with TempMemmap((X, Y)) as (X, Y): + clf = Lasso(alpha=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.25]) + assert_array_almost_equal(pred, [0.5, 0.75, 1.0]) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_multi_task_lasso_readonly_data(): + X, y, X_test, y_test = build_dataset() + Y = np.c_[y, y] + with TempMemmap((X, Y)) as (X, Y): + Y = np.c_[y, y] + clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) + assert 0 < clf.dual_gap_ < 1e-5 + assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) + + +def test_enet_multitarget(): + n_targets = 3 + X, y, _, _ = build_dataset( + n_samples=10, n_features=8, n_informative_features=10, n_targets=n_targets + ) + estimator = ElasticNet(alpha=0.01) + estimator.fit(X, y) + coef, intercept, dual_gap = ( + estimator.coef_, + estimator.intercept_, + estimator.dual_gap_, + ) + + for k in range(n_targets): + estimator.fit(X, y[:, k]) + assert_array_almost_equal(coef[k, :], estimator.coef_) + assert_array_almost_equal(intercept[k], estimator.intercept_) + assert_array_almost_equal(dual_gap[k], estimator.dual_gap_) + + +def test_multioutput_enetcv_error(): + rng = np.random.RandomState(0) + X = rng.randn(10, 2) + y = rng.randn(10, 2) + clf = ElasticNetCV() + with pytest.raises(ValueError): + clf.fit(X, y) + + +def test_multitask_enet_and_lasso_cv(): + X, y, _, _ = build_dataset(n_features=50, n_targets=3) + clf = MultiTaskElasticNetCV(cv=3).fit(X, y) + assert_almost_equal(clf.alpha_, 0.00556, 3) + clf = MultiTaskLassoCV(cv=3).fit(X, y) + assert_almost_equal(clf.alpha_, 0.00278, 3) + + X, y, _, _ = build_dataset(n_targets=3) + clf = MultiTaskElasticNetCV( + n_alphas=10, eps=1e-3, max_iter=100, l1_ratio=[0.3, 0.5], tol=1e-3, cv=3 + ) + clf.fit(X, y) + assert 0.5 == clf.l1_ratio_ + assert (3, X.shape[1]) == clf.coef_.shape + assert (3,) == clf.intercept_.shape + assert (2, 10, 3) == clf.mse_path_.shape + assert (2, 10) == clf.alphas_.shape + + X, y, _, _ = build_dataset(n_targets=3) + clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3, cv=3) + clf.fit(X, y) + assert (3, X.shape[1]) == clf.coef_.shape + assert (3,) == clf.intercept_.shape + assert (10, 3) == clf.mse_path_.shape + assert 10 == len(clf.alphas_) + + +def test_1d_multioutput_enet_and_multitask_enet_cv(): + X, y, _, _ = build_dataset(n_features=10) + y = y[:, np.newaxis] + clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf.fit(X, y[:, 0]) + clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf1.fit(X, y) + assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_) + assert_almost_equal(clf.alpha_, clf1.alpha_) + assert_almost_equal(clf.coef_, clf1.coef_[0]) + assert_almost_equal(clf.intercept_, clf1.intercept_[0]) + + +def test_1d_multioutput_lasso_and_multitask_lasso_cv(): + X, y, _, _ = build_dataset(n_features=10) + y = y[:, np.newaxis] + clf = LassoCV(n_alphas=5, eps=2e-3) + clf.fit(X, y[:, 0]) + clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3) + clf1.fit(X, y) + assert_almost_equal(clf.alpha_, clf1.alpha_) + assert_almost_equal(clf.coef_, clf1.coef_[0]) + assert_almost_equal(clf.intercept_, clf1.intercept_[0]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_dtype_enet_and_lassocv(csr_container): + X, y, _, _ = build_dataset(n_features=10) + clf = ElasticNetCV(n_alphas=5) + clf.fit(csr_container(X), y) + clf1 = ElasticNetCV(n_alphas=5) + clf1.fit(csr_container(X, dtype=np.float32), y) + assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) + assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) + + clf = LassoCV(n_alphas=5) + clf.fit(csr_container(X), y) + clf1 = LassoCV(n_alphas=5) + clf1.fit(csr_container(X, dtype=np.float32), y) + assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) + assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) + + +def test_elasticnet_precompute_incorrect_gram(): + # check that passing an invalid precomputed Gram matrix will raise an + # error. + X, y, _, _ = build_dataset() + + rng = np.random.RandomState(0) + + X_centered = X - np.average(X, axis=0) + garbage = rng.standard_normal(X.shape) + precompute = np.dot(garbage.T, garbage) + + clf = ElasticNet(alpha=0.01, precompute=precompute) + msg = "Gram matrix.*did not pass validation.*" + with pytest.raises(ValueError, match=msg): + clf.fit(X_centered, y) + + +def test_elasticnet_precompute_gram_weighted_samples(): + # check the equivalence between passing a precomputed Gram matrix and + # internal computation using sample weights. + X, y, _, _ = build_dataset() + + rng = np.random.RandomState(0) + sample_weight = rng.lognormal(size=y.shape) + + w_norm = sample_weight * (y.shape / np.sum(sample_weight)) + X_c = X - np.average(X, axis=0, weights=w_norm) + X_r = X_c * np.sqrt(w_norm)[:, np.newaxis] + gram = np.dot(X_r.T, X_r) + + clf1 = ElasticNet(alpha=0.01, precompute=gram) + clf1.fit(X_c, y, sample_weight=sample_weight) + + clf2 = ElasticNet(alpha=0.01, precompute=False) + clf2.fit(X, y, sample_weight=sample_weight) + + assert_allclose(clf1.coef_, clf2.coef_) + + +def test_elasticnet_precompute_gram(): + # Check the dtype-aware check for a precomputed Gram matrix + # (see https://github.com/scikit-learn/scikit-learn/pull/22059 + # and https://github.com/scikit-learn/scikit-learn/issues/21997). + # Here: (X_c.T, X_c)[2, 3] is not equal to np.dot(X_c[:, 2], X_c[:, 3]) + # but within tolerance for np.float32 + + rng = np.random.RandomState(58) + X = rng.binomial(1, 0.25, (1000, 4)).astype(np.float32) + y = rng.rand(1000).astype(np.float32) + + X_c = X - np.average(X, axis=0) + gram = np.dot(X_c.T, X_c) + + clf1 = ElasticNet(alpha=0.01, precompute=gram) + clf1.fit(X_c, y) + + clf2 = ElasticNet(alpha=0.01, precompute=False) + clf2.fit(X, y) + + assert_allclose(clf1.coef_, clf2.coef_) + + +def test_warm_start_convergence(): + X, y, _, _ = build_dataset() + model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y) + n_iter_reference = model.n_iter_ + + # This dataset is not trivial enough for the model to converge in one pass. + assert n_iter_reference > 2 + + # Check that n_iter_ is invariant to multiple calls to fit + # when warm_start=False, all else being equal. + model.fit(X, y) + n_iter_cold_start = model.n_iter_ + assert n_iter_cold_start == n_iter_reference + + # Fit the same model again, using a warm start: the optimizer just performs + # a single pass before checking that it has already converged + model.set_params(warm_start=True) + model.fit(X, y) + n_iter_warm_start = model.n_iter_ + assert n_iter_warm_start == 1 + + +def test_warm_start_convergence_with_regularizer_decrement(): + X, y = load_diabetes(return_X_y=True) + + # Train a model to converge on a lightly regularized problem + final_alpha = 1e-5 + low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y) + + # Fitting a new model on a more regularized version of the same problem. + # Fitting with high regularization is easier it should converge faster + # in general. + high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y) + assert low_reg_model.n_iter_ > high_reg_model.n_iter_ + + # Fit the solution to the original, less regularized version of the + # problem but from the solution of the highly regularized variant of + # the problem as a better starting point. This should also converge + # faster than the original model that starts from zero. + warm_low_reg_model = deepcopy(high_reg_model) + warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha) + warm_low_reg_model.fit(X, y) + assert low_reg_model.n_iter_ > warm_low_reg_model.n_iter_ + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_random_descent(csr_container): + # Test that both random and cyclic selection give the same results. + # Ensure that the test models fully converge and check a wide + # range of conditions. + + # This uses the coordinate descent algo using the gram trick. + X, y, _, _ = build_dataset(n_samples=50, n_features=20) + clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(X, y) + clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(X, y) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + # This uses the descent algo without the gram trick + clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(X.T, y[:20]) + clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(X.T, y[:20]) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + # Sparse Case + clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(csr_container(X), y) + clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(csr_container(X), y) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + # Multioutput case. + new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis])) + clf_cyclic = MultiTaskElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(X, new_y) + clf_random = MultiTaskElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(X, new_y) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + +def test_enet_path_positive(): + # Test positive parameter + + X, Y, _, _ = build_dataset(n_samples=50, n_features=50, n_targets=2) + + # For mono output + # Test that the coefs returned by positive=True in enet_path are positive + for path in [enet_path, lasso_path]: + pos_path_coef = path(X, Y[:, 0], positive=True)[1] + assert np.all(pos_path_coef >= 0) + + # For multi output, positive parameter is not allowed + # Test that an error is raised + for path in [enet_path, lasso_path]: + with pytest.raises(ValueError): + path(X, Y, positive=True) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_dense_descent_paths(csr_container): + # Test that dense and sparse input give the same input for descent paths. + X, y, _, _ = build_dataset(n_samples=50, n_features=20) + csr = csr_container(X) + for path in [enet_path, lasso_path]: + _, coefs, _ = path(X, y) + _, sparse_coefs, _ = path(csr, y) + assert_array_almost_equal(coefs, sparse_coefs) + + +@pytest.mark.parametrize("path_func", [enet_path, lasso_path]) +def test_path_unknown_parameter(path_func): + """Check that passing parameter not used by the coordinate descent solver + will raise an error.""" + X, y, _, _ = build_dataset(n_samples=50, n_features=20) + err_msg = "Unexpected parameters in params" + with pytest.raises(ValueError, match=err_msg): + path_func(X, y, normalize=True, fit_intercept=True) + + +def test_check_input_false(): + X, y, _, _ = build_dataset(n_samples=20, n_features=10) + X = check_array(X, order="F", dtype="float64") + y = check_array(X, order="F", dtype="float64") + clf = ElasticNet(selection="cyclic", tol=1e-8) + # Check that no error is raised if data is provided in the right format + clf.fit(X, y, check_input=False) + # With check_input=False, an exhaustive check is not made on y but its + # dtype is still cast in _preprocess_data to X's dtype. So the test should + # pass anyway + X = check_array(X, order="F", dtype="float32") + clf.fit(X, y, check_input=False) + # With no input checking, providing X in C order should result in false + # computation + X = check_array(X, order="C", dtype="float64") + with pytest.raises(ValueError): + clf.fit(X, y, check_input=False) + + +@pytest.mark.parametrize("check_input", [True, False]) +def test_enet_copy_X_True(check_input): + X, y, _, _ = build_dataset() + X = X.copy(order="F") + + original_X = X.copy() + enet = ElasticNet(copy_X=True) + enet.fit(X, y, check_input=check_input) + + assert_array_equal(original_X, X) + + +def test_enet_copy_X_False_check_input_False(): + X, y, _, _ = build_dataset() + X = X.copy(order="F") + + original_X = X.copy() + enet = ElasticNet(copy_X=False) + enet.fit(X, y, check_input=False) + + # No copying, X is overwritten + assert np.any(np.not_equal(original_X, X)) + + +def test_overrided_gram_matrix(): + X, y, _, _ = build_dataset(n_samples=20, n_features=10) + Gram = X.T.dot(X) + clf = ElasticNet(selection="cyclic", tol=1e-8, precompute=Gram) + warning_message = ( + "Gram matrix was provided but X was centered" + " to fit intercept: recomputing Gram matrix." + ) + with pytest.warns(UserWarning, match=warning_message): + clf.fit(X, y) + + +@pytest.mark.parametrize("model", [ElasticNet, Lasso]) +def test_lasso_non_float_y(model): + X = [[0, 0], [1, 1], [-1, -1]] + y = [0, 1, 2] + y_float = [0.0, 1.0, 2.0] + + clf = model(fit_intercept=False) + clf.fit(X, y) + clf_float = model(fit_intercept=False) + clf_float.fit(X, y_float) + assert_array_equal(clf.coef_, clf_float.coef_) + + +def test_enet_float_precision(): + # Generate dataset + X, y, X_test, y_test = build_dataset(n_samples=20, n_features=10) + # Here we have a small number of iterations, and thus the + # ElasticNet might not converge. This is to speed up tests + + for fit_intercept in [True, False]: + coef = {} + intercept = {} + for dtype in [np.float64, np.float32]: + clf = ElasticNet( + alpha=0.5, + max_iter=100, + precompute=False, + fit_intercept=fit_intercept, + ) + + X = dtype(X) + y = dtype(y) + ignore_warnings(clf.fit)(X, y) + + coef[("simple", dtype)] = clf.coef_ + intercept[("simple", dtype)] = clf.intercept_ + + assert clf.coef_.dtype == dtype + + # test precompute Gram array + Gram = X.T.dot(X) + clf_precompute = ElasticNet( + alpha=0.5, + max_iter=100, + precompute=Gram, + fit_intercept=fit_intercept, + ) + ignore_warnings(clf_precompute.fit)(X, y) + assert_array_almost_equal(clf.coef_, clf_precompute.coef_) + assert_array_almost_equal(clf.intercept_, clf_precompute.intercept_) + + # test multi task enet + multi_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis])) + clf_multioutput = MultiTaskElasticNet( + alpha=0.5, + max_iter=100, + fit_intercept=fit_intercept, + ) + clf_multioutput.fit(X, multi_y) + coef[("multi", dtype)] = clf_multioutput.coef_ + intercept[("multi", dtype)] = clf_multioutput.intercept_ + assert clf.coef_.dtype == dtype + + for v in ["simple", "multi"]: + assert_array_almost_equal( + coef[(v, np.float32)], coef[(v, np.float64)], decimal=4 + ) + assert_array_almost_equal( + intercept[(v, np.float32)], intercept[(v, np.float64)], decimal=4 + ) + + +def test_enet_l1_ratio(): + # Test that an error message is raised if an estimator that + # uses _alpha_grid is called with l1_ratio=0 + msg = ( + "Automatic alpha grid generation is not supported for l1_ratio=0. " + "Please supply a grid by providing your estimator with the " + "appropriate `alphas=` argument." + ) + X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T + y = np.array([12, 10, 11, 21, 5]) + + with pytest.raises(ValueError, match=msg): + ElasticNetCV(l1_ratio=0, random_state=42).fit(X, y) + + with pytest.raises(ValueError, match=msg): + MultiTaskElasticNetCV(l1_ratio=0, random_state=42).fit(X, y[:, None]) + + # Test that l1_ratio=0 with alpha>0 produces user warning + warning_message = ( + "Coordinate descent without L1 regularization may " + "lead to unexpected results and is discouraged. " + "Set l1_ratio > 0 to add L1 regularization." + ) + est = ElasticNetCV(l1_ratio=[0], alphas=[1]) + with pytest.warns(UserWarning, match=warning_message): + est.fit(X, y) + + # Test that l1_ratio=0 is allowed if we supply a grid manually + alphas = [0.1, 10] + estkwds = {"alphas": alphas, "random_state": 42} + est_desired = ElasticNetCV(l1_ratio=0.00001, **estkwds) + est = ElasticNetCV(l1_ratio=0, **estkwds) + with ignore_warnings(): + est_desired.fit(X, y) + est.fit(X, y) + assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) + + est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, **estkwds) + est = MultiTaskElasticNetCV(l1_ratio=0, **estkwds) + with ignore_warnings(): + est.fit(X, y[:, None]) + est_desired.fit(X, y[:, None]) + assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) + + +def test_coef_shape_not_zero(): + est_no_intercept = Lasso(fit_intercept=False) + est_no_intercept.fit(np.c_[np.ones(3)], np.ones(3)) + assert est_no_intercept.coef_.shape == (1,) + + +def test_warm_start_multitask_lasso(): + X, y, X_test, y_test = build_dataset() + Y = np.c_[y, y] + clf = MultiTaskLasso(alpha=0.1, max_iter=5, warm_start=True) + ignore_warnings(clf.fit)(X, Y) + ignore_warnings(clf.fit)(X, Y) # do a second round with 5 iterations + + clf2 = MultiTaskLasso(alpha=0.1, max_iter=10) + ignore_warnings(clf2.fit)(X, Y) + assert_array_almost_equal(clf2.coef_, clf.coef_) + + +@pytest.mark.parametrize( + "klass, n_classes, kwargs", + [ + (Lasso, 1, dict(precompute=True)), + (Lasso, 1, dict(precompute=False)), + (MultiTaskLasso, 2, dict()), + (MultiTaskLasso, 2, dict()), + ], +) +def test_enet_coordinate_descent(klass, n_classes, kwargs): + """Test that a warning is issued if model does not converge""" + clf = klass(max_iter=2, **kwargs) + n_samples = 5 + n_features = 2 + X = np.ones((n_samples, n_features)) * 1e50 + y = np.ones((n_samples, n_classes)) + if klass == Lasso: + y = y.ravel() + warning_message = ( + "Objective did not converge. You might want to" + " increase the number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + clf.fit(X, y) + + +def test_convergence_warnings(): + random_state = np.random.RandomState(0) + X = random_state.standard_normal((1000, 500)) + y = random_state.standard_normal((1000, 3)) + + # check that the model converges w/o convergence warnings + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + MultiTaskElasticNet().fit(X, y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_convergence_warning(csr_container): + X, y, _, _ = build_dataset(n_samples=1000, n_features=500) + + with pytest.warns(ConvergenceWarning): + ElasticNet(max_iter=1, tol=0).fit(csr_container(X, dtype=np.float32), y) + + # check that the model converges w/o convergence warnings + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + Lasso().fit(csr_container(X, dtype=np.float32), y) + + +@pytest.mark.parametrize( + "precompute, inner_precompute", + [ + (True, True), + ("auto", False), + (False, False), + ], +) +def test_lassoCV_does_not_set_precompute(monkeypatch, precompute, inner_precompute): + X, y, _, _ = build_dataset() + calls = 0 + + class LassoMock(Lasso): + def fit(self, X, y): + super().fit(X, y) + nonlocal calls + calls += 1 + assert self.precompute == inner_precompute + + monkeypatch.setattr("sklearn.linear_model._coordinate_descent.Lasso", LassoMock) + clf = LassoCV(precompute=precompute) + clf.fit(X, y) + assert calls > 0 + + +def test_multi_task_lasso_cv_dtype(): + n_samples, n_features = 10, 3 + rng = np.random.RandomState(42) + X = rng.binomial(1, 0.5, size=(n_samples, n_features)) + X = X.astype(int) # make it explicit that X is int + y = X[:, [0, 0]].copy() + est = MultiTaskLassoCV(n_alphas=5, fit_intercept=True).fit(X, y) + assert_array_almost_equal(est.coef_, [[1, 0, 0]] * 2, decimal=3) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("alpha", [0.01]) +@pytest.mark.parametrize("precompute", [False, True]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_enet_sample_weight_consistency( + fit_intercept, alpha, precompute, sparse_container, global_random_seed +): + """Test that the impact of sample_weight is consistent. + + Note that this test is stricter than the common test + check_sample_weights_invariance alone and also tests sparse X. + """ + rng = np.random.RandomState(global_random_seed) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + if sparse_container is not None: + X = sparse_container(X) + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + precompute=precompute, + tol=1e-6, + l1_ratio=0.5, + ) + + reg = ElasticNet(**params).fit(X, y) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + # 1) sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 2) sample_weight=None should be equivalent to sample_weight = number + sample_weight = 123.0 + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 3) scaling of sample_weight should have no effect, cf. np.average() + sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0]) + reg = reg.fit(X, y, sample_weight=sample_weight) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + reg.fit(X, y, sample_weight=np.pi * sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 4) setting elements of sample_weight to 0 is equivalent to removing these samples + sample_weight_0 = sample_weight.copy() + sample_weight_0[-5:] = 0 + y[-5:] *= 1000 # to make excluding those samples important + reg.fit(X, y, sample_weight=sample_weight_0) + coef_0 = reg.coef_.copy() + if fit_intercept: + intercept_0 = reg.intercept_ + reg.fit(X[:-5], y[:-5], sample_weight=sample_weight[:-5]) + assert_allclose(reg.coef_, coef_0, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept_0) + + # 5) check that multiplying sample_weight by 2 is equivalent to repeating + # corresponding samples twice + if sparse_container is not None: + X2 = sparse.vstack([X, X[: n_samples // 2]], format="csc") + else: + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = sample_weight.copy() + sample_weight_1[: n_samples // 2] *= 2 + sample_weight_2 = np.concatenate( + [sample_weight, sample_weight[: n_samples // 2]], axis=0 + ) + + reg1 = ElasticNet(**params).fit(X, y, sample_weight=sample_weight_1) + reg2 = ElasticNet(**params).fit(X2, y2, sample_weight=sample_weight_2) + assert_allclose(reg1.coef_, reg2.coef_, rtol=1e-6) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_enet_cv_sample_weight_correctness(fit_intercept, sparse_container): + """Test that ElasticNetCV with sample weights gives correct results.""" + rng = np.random.RandomState(42) + n_splits, n_samples, n_features = 3, 10, 5 + X = rng.rand(n_splits * n_samples, n_features) + beta = rng.rand(n_features) + beta[0:2] = 0 + y = X @ beta + rng.rand(n_splits * n_samples) + sw = np.ones_like(y) + if sparse_container is not None: + X = sparse_container(X) + params = dict(tol=1e-6) + + # Set alphas, otherwise the two cv models might use different ones. + if fit_intercept: + alphas = np.linspace(0.001, 0.01, num=91) + else: + alphas = np.linspace(0.01, 0.1, num=91) + + # We weight the first fold 2 times more. + sw[:n_samples] = 2 + groups_sw = np.r_[ + np.full(n_samples, 0), np.full(n_samples, 1), np.full(n_samples, 2) + ] + splits_sw = list(LeaveOneGroupOut().split(X, groups=groups_sw)) + reg_sw = ElasticNetCV( + alphas=alphas, cv=splits_sw, fit_intercept=fit_intercept, **params + ) + reg_sw.fit(X, y, sample_weight=sw) + + # We repeat the first fold 2 times and provide splits ourselves + if sparse_container is not None: + X = X.toarray() + X = np.r_[X[:n_samples], X] + if sparse_container is not None: + X = sparse_container(X) + y = np.r_[y[:n_samples], y] + groups = np.r_[ + np.full(2 * n_samples, 0), np.full(n_samples, 1), np.full(n_samples, 2) + ] + splits = list(LeaveOneGroupOut().split(X, groups=groups)) + reg = ElasticNetCV(alphas=alphas, cv=splits, fit_intercept=fit_intercept, **params) + reg.fit(X, y) + + # ensure that we chose meaningful alphas, i.e. not boundaries + assert alphas[0] < reg.alpha_ < alphas[-1] + assert reg_sw.alpha_ == reg.alpha_ + assert_allclose(reg_sw.coef_, reg.coef_) + assert reg_sw.intercept_ == pytest.approx(reg.intercept_) + + +@pytest.mark.parametrize("sample_weight", [False, True]) +def test_enet_cv_grid_search(sample_weight): + """Test that ElasticNetCV gives same result as GridSearchCV.""" + n_samples, n_features = 200, 10 + cv = 5 + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + effective_rank=10, + n_informative=n_features - 4, + noise=10, + random_state=0, + ) + if sample_weight: + sample_weight = np.linspace(1, 5, num=n_samples) + else: + sample_weight = None + + alphas = np.logspace(np.log10(1e-5), np.log10(1), num=10) + l1_ratios = [0.1, 0.5, 0.9] + reg = ElasticNetCV(cv=cv, alphas=alphas, l1_ratio=l1_ratios) + reg.fit(X, y, sample_weight=sample_weight) + + param = {"alpha": alphas, "l1_ratio": l1_ratios} + gs = GridSearchCV( + estimator=ElasticNet(), + param_grid=param, + cv=cv, + scoring="neg_mean_squared_error", + ).fit(X, y, sample_weight=sample_weight) + + assert reg.l1_ratio_ == pytest.approx(gs.best_params_["l1_ratio"]) + assert reg.alpha_ == pytest.approx(gs.best_params_["alpha"]) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("l1_ratio", [0, 0.5, 1]) +@pytest.mark.parametrize("precompute", [False, True]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_enet_cv_sample_weight_consistency( + fit_intercept, l1_ratio, precompute, sparse_container +): + """Test that the impact of sample_weight is consistent.""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = X.sum(axis=1) + rng.rand(n_samples) + params = dict( + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + precompute=precompute, + tol=1e-6, + cv=3, + ) + if sparse_container is not None: + X = sparse_container(X) + + if l1_ratio == 0: + params.pop("l1_ratio", None) + reg = LassoCV(**params).fit(X, y) + else: + reg = ElasticNetCV(**params).fit(X, y) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + # sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # sample_weight=None should be equivalent to sample_weight = number + sample_weight = 123.0 + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # scaling of sample_weight should have no effect, cf. np.average() + sample_weight = 2 * np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + +@pytest.mark.parametrize("estimator", [ElasticNetCV, LassoCV]) +def test_linear_models_cv_fit_with_loky(estimator): + # LinearModelsCV.fit performs inplace operations on fancy-indexed memmapped + # data when using the loky backend, causing an error due to unexpected + # behavior of fancy indexing of read-only memmaps (cf. numpy#14132). + + # Create a problem sufficiently large to cause memmapping (1MB). + # Unfortunately the scikit-learn and joblib APIs do not make it possible to + # change the max_nbyte of the inner Parallel call. + X, y = make_regression(int(1e6) // 8 + 1, 1) + assert X.nbytes > 1e6 # 1 MB + with joblib.parallel_backend("loky"): + estimator(n_jobs=2, cv=3).fit(X, y) + + +@pytest.mark.parametrize("check_input", [True, False]) +def test_enet_sample_weight_does_not_overwrite_sample_weight(check_input): + """Check that ElasticNet does not overwrite sample_weights.""" + + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + sample_weight_1_25 = 1.25 * np.ones_like(y) + sample_weight = sample_weight_1_25.copy() + + reg = ElasticNet() + reg.fit(X, y, sample_weight=sample_weight, check_input=check_input) + + assert_array_equal(sample_weight, sample_weight_1_25) + + +@pytest.mark.parametrize("ridge_alpha", [1e-1, 1.0, 1e6]) +def test_enet_ridge_consistency(ridge_alpha): + # Check that ElasticNet(l1_ratio=0) converges to the same solution as Ridge + # provided that the value of alpha is adapted. + # + # XXX: this test does not pass for weaker regularization (lower values of + # ridge_alpha): it could be either a problem of ElasticNet or Ridge (less + # likely) and depends on the dataset statistics: lower values for + # effective_rank are more problematic in particular. + + rng = np.random.RandomState(42) + n_samples = 300 + X, y = make_regression( + n_samples=n_samples, + n_features=100, + effective_rank=10, + n_informative=50, + random_state=rng, + ) + sw = rng.uniform(low=0.01, high=10, size=X.shape[0]) + alpha = 1.0 + common_params = dict( + tol=1e-12, + ) + ridge = Ridge(alpha=alpha, **common_params).fit(X, y, sample_weight=sw) + + alpha_enet = alpha / sw.sum() + enet = ElasticNet(alpha=alpha_enet, l1_ratio=0, **common_params).fit( + X, y, sample_weight=sw + ) + assert_allclose(ridge.coef_, enet.coef_) + assert_allclose(ridge.intercept_, enet.intercept_) + + +@pytest.mark.parametrize( + "estimator", + [ + Lasso(alpha=1.0), + ElasticNet(alpha=1.0, l1_ratio=0.1), + ], +) +def test_sample_weight_invariance(estimator): + rng = np.random.RandomState(42) + X, y = make_regression( + n_samples=100, + n_features=300, + effective_rank=10, + n_informative=50, + random_state=rng, + ) + sw = rng.uniform(low=0.01, high=2, size=X.shape[0]) + params = dict(tol=1e-12) + + # Check that setting some weights to 0 is equivalent to trimming the + # samples: + cutoff = X.shape[0] // 3 + sw_with_null = sw.copy() + sw_with_null[:cutoff] = 0.0 + X_trimmed, y_trimmed = X[cutoff:, :], y[cutoff:] + sw_trimmed = sw[cutoff:] + + reg_trimmed = ( + clone(estimator) + .set_params(**params) + .fit(X_trimmed, y_trimmed, sample_weight=sw_trimmed) + ) + reg_null_weighted = ( + clone(estimator).set_params(**params).fit(X, y, sample_weight=sw_with_null) + ) + assert_allclose(reg_null_weighted.coef_, reg_trimmed.coef_) + assert_allclose(reg_null_weighted.intercept_, reg_trimmed.intercept_) + + # Check that duplicating the training dataset is equivalent to multiplying + # the weights by 2: + X_dup = np.concatenate([X, X], axis=0) + y_dup = np.concatenate([y, y], axis=0) + sw_dup = np.concatenate([sw, sw], axis=0) + + reg_2sw = clone(estimator).set_params(**params).fit(X, y, sample_weight=2 * sw) + reg_dup = ( + clone(estimator).set_params(**params).fit(X_dup, y_dup, sample_weight=sw_dup) + ) + + assert_allclose(reg_2sw.coef_, reg_dup.coef_) + assert_allclose(reg_2sw.intercept_, reg_dup.intercept_) + + +def test_read_only_buffer(): + """Test that sparse coordinate descent works for read-only buffers""" + + rng = np.random.RandomState(0) + clf = ElasticNet(alpha=0.1, copy_X=True, random_state=rng) + X = np.asfortranarray(rng.uniform(size=(100, 10))) + X.setflags(write=False) + + y = rng.rand(100) + clf.fit(X, y) + + +@pytest.mark.parametrize( + "EstimatorCV", + [ElasticNetCV, LassoCV, MultiTaskElasticNetCV, MultiTaskLassoCV], +) +def test_cv_estimators_reject_params_with_no_routing_enabled(EstimatorCV): + """Check that the models inheriting from class:`LinearModelCV` raise an + error when any `params` are passed when routing is not enabled. + """ + X, y = make_regression(random_state=42) + groups = np.array([0, 1] * (len(y) // 2)) + estimator = EstimatorCV() + msg = "is only supported if enable_metadata_routing=True" + with pytest.raises(ValueError, match=msg): + estimator.fit(X, y, groups=groups) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "MultiTaskEstimatorCV", + [MultiTaskElasticNetCV, MultiTaskLassoCV], +) +def test_multitask_cv_estimators_with_sample_weight(MultiTaskEstimatorCV): + """Check that for :class:`MultiTaskElasticNetCV` and + class:`MultiTaskLassoCV` if `sample_weight` is passed and the + CV splitter does not support `sample_weight` an error is raised. + On the other hand if the splitter does support `sample_weight` + while `sample_weight` is passed there is no error and process + completes smoothly as before. + """ + + class CVSplitter(BaseCrossValidator, GroupsConsumerMixin): + def get_n_splits(self, X=None, y=None, groups=None, metadata=None): + pass # pragma: nocover + + class CVSplitterSampleWeight(CVSplitter): + def split(self, X, y=None, groups=None, sample_weight=None): + split_index = len(X) // 2 + train_indices = list(range(0, split_index)) + test_indices = list(range(split_index, len(X))) + yield test_indices, train_indices + yield train_indices, test_indices + + X, y = make_regression(random_state=42, n_targets=2) + sample_weight = np.ones(X.shape[0]) + + # If CV splitter does not support sample_weight an error is raised + splitter = CVSplitter().set_split_request(groups=True) + estimator = MultiTaskEstimatorCV(cv=splitter) + msg = "do not support sample weights" + with pytest.raises(ValueError, match=msg): + estimator.fit(X, y, sample_weight=sample_weight) + + # If CV splitter does support sample_weight no error is raised + splitter = CVSplitterSampleWeight().set_split_request( + groups=True, sample_weight=True + ) + estimator = MultiTaskEstimatorCV(cv=splitter) + estimator.fit(X, y, sample_weight=sample_weight) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py new file mode 100644 index 0000000000000000000000000000000000000000..3856d74464f0b31851095d5298c91b8cf79fd9fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py @@ -0,0 +1,216 @@ +# Authors: Manoj Kumar mks542@nyu.edu +# License: BSD 3 clause + +import numpy as np +import pytest +from scipy import optimize + +from sklearn.datasets import make_regression +from sklearn.linear_model import HuberRegressor, LinearRegression, Ridge, SGDRegressor +from sklearn.linear_model._huber import _huber_loss_and_gradient +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def make_regression_with_outliers(n_samples=50, n_features=20): + rng = np.random.RandomState(0) + # Generate data with outliers by replacing 10% of the samples with noise. + X, y = make_regression( + n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05 + ) + + # Replace 10% of the sample with noise. + num_noise = int(0.1 * n_samples) + random_samples = rng.randint(0, n_samples, num_noise) + X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1])) + return X, y + + +def test_huber_equals_lr_for_high_epsilon(): + # Test that Ridge matches LinearRegression for large epsilon + X, y = make_regression_with_outliers() + lr = LinearRegression() + lr.fit(X, y) + huber = HuberRegressor(epsilon=1e3, alpha=0.0) + huber.fit(X, y) + assert_almost_equal(huber.coef_, lr.coef_, 3) + assert_almost_equal(huber.intercept_, lr.intercept_, 2) + + +def test_huber_max_iter(): + X, y = make_regression_with_outliers() + huber = HuberRegressor(max_iter=1) + huber.fit(X, y) + assert huber.n_iter_ == huber.max_iter + + +def test_huber_gradient(): + # Test that the gradient calculated by _huber_loss_and_gradient is correct + rng = np.random.RandomState(1) + X, y = make_regression_with_outliers() + sample_weight = rng.randint(1, 3, (y.shape[0])) + + def loss_func(x, *args): + return _huber_loss_and_gradient(x, *args)[0] + + def grad_func(x, *args): + return _huber_loss_and_gradient(x, *args)[1] + + # Check using optimize.check_grad that the gradients are equal. + for _ in range(5): + # Check for both fit_intercept and otherwise. + for n_features in [X.shape[1] + 1, X.shape[1] + 2]: + w = rng.randn(n_features) + w[-1] = np.abs(w[-1]) + grad_same = optimize.check_grad( + loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight + ) + assert_almost_equal(grad_same, 1e-6, 4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_huber_sample_weights(csr_container): + # Test sample_weights implementation in HuberRegressor""" + + X, y = make_regression_with_outliers() + huber = HuberRegressor() + huber.fit(X, y) + huber_coef = huber.coef_ + huber_intercept = huber.intercept_ + + # Rescale coefs before comparing with assert_array_almost_equal to make + # sure that the number of decimal places used is somewhat insensitive to + # the amplitude of the coefficients and therefore to the scale of the + # data and the regularization parameter + scale = max(np.mean(np.abs(huber.coef_)), np.mean(np.abs(huber.intercept_))) + + huber.fit(X, y, sample_weight=np.ones(y.shape[0])) + assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale) + assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale) + + X, y = make_regression_with_outliers(n_samples=5, n_features=20) + X_new = np.vstack((X, np.vstack((X[1], X[1], X[3])))) + y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]])) + huber.fit(X_new, y_new) + huber_coef = huber.coef_ + huber_intercept = huber.intercept_ + sample_weight = np.ones(X.shape[0]) + sample_weight[1] = 3 + sample_weight[3] = 2 + huber.fit(X, y, sample_weight=sample_weight) + + assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale) + assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale) + + # Test sparse implementation with sample weights. + X_csr = csr_container(X) + huber_sparse = HuberRegressor() + huber_sparse.fit(X_csr, y, sample_weight=sample_weight) + assert_array_almost_equal(huber_sparse.coef_ / scale, huber_coef / scale) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_huber_sparse(csr_container): + X, y = make_regression_with_outliers() + huber = HuberRegressor(alpha=0.1) + huber.fit(X, y) + + X_csr = csr_container(X) + huber_sparse = HuberRegressor(alpha=0.1) + huber_sparse.fit(X_csr, y) + assert_array_almost_equal(huber_sparse.coef_, huber.coef_) + assert_array_equal(huber.outliers_, huber_sparse.outliers_) + + +def test_huber_scaling_invariant(): + # Test that outliers filtering is scaling independent. + X, y = make_regression_with_outliers() + huber = HuberRegressor(fit_intercept=False, alpha=0.0) + huber.fit(X, y) + n_outliers_mask_1 = huber.outliers_ + assert not np.all(n_outliers_mask_1) + + huber.fit(X, 2.0 * y) + n_outliers_mask_2 = huber.outliers_ + assert_array_equal(n_outliers_mask_2, n_outliers_mask_1) + + huber.fit(2.0 * X, 2.0 * y) + n_outliers_mask_3 = huber.outliers_ + assert_array_equal(n_outliers_mask_3, n_outliers_mask_1) + + +def test_huber_and_sgd_same_results(): + # Test they should converge to same coefficients for same parameters + + X, y = make_regression_with_outliers(n_samples=10, n_features=2) + + # Fit once to find out the scale parameter. Scale down X and y by scale + # so that the scale parameter is optimized to 1.0 + huber = HuberRegressor(fit_intercept=False, alpha=0.0, epsilon=1.35) + huber.fit(X, y) + X_scale = X / huber.scale_ + y_scale = y / huber.scale_ + huber.fit(X_scale, y_scale) + assert_almost_equal(huber.scale_, 1.0, 3) + + sgdreg = SGDRegressor( + alpha=0.0, + loss="huber", + shuffle=True, + random_state=0, + max_iter=10000, + fit_intercept=False, + epsilon=1.35, + tol=None, + ) + sgdreg.fit(X_scale, y_scale) + assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1) + + +def test_huber_warm_start(): + X, y = make_regression_with_outliers() + huber_warm = HuberRegressor(alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1) + + huber_warm.fit(X, y) + huber_warm_coef = huber_warm.coef_.copy() + huber_warm.fit(X, y) + + # SciPy performs the tol check after doing the coef updates, so + # these would be almost same but not equal. + assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1) + + assert huber_warm.n_iter_ == 0 + + +def test_huber_better_r2_score(): + # Test that huber returns a better r2 score than non-outliers""" + X, y = make_regression_with_outliers() + huber = HuberRegressor(alpha=0.01) + huber.fit(X, y) + linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y + mask = np.abs(linear_loss) < huber.epsilon * huber.scale_ + huber_score = huber.score(X[mask], y[mask]) + huber_outlier_score = huber.score(X[~mask], y[~mask]) + + # The Ridge regressor should be influenced by the outliers and hence + # give a worse score on the non-outliers as compared to the huber + # regressor. + ridge = Ridge(alpha=0.01) + ridge.fit(X, y) + ridge_score = ridge.score(X[mask], y[mask]) + ridge_outlier_score = ridge.score(X[~mask], y[~mask]) + assert huber_score > ridge_score + + # The huber model should also fit poorly on the outliers. + assert ridge_outlier_score > huber_outlier_score + + +def test_huber_bool(): + # Test that it does not crash with bool data + X, y = make_regression(n_samples=200, n_features=2, noise=4.0, random_state=0) + X_bool = X > 0 + HuberRegressor().fit(X_bool, y) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py new file mode 100644 index 0000000000000000000000000000000000000000..9974090135ac501da0935ee3048a112f305eebcf --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py @@ -0,0 +1,2194 @@ +import itertools +import os +import warnings +from functools import partial + +import numpy as np +import pytest +from numpy.testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from scipy import sparse + +from sklearn import config_context +from sklearn.base import clone +from sklearn.datasets import load_iris, make_classification +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import SGDClassifier +from sklearn.linear_model._logistic import ( + LogisticRegression as LogisticRegressionDefault, +) +from sklearn.linear_model._logistic import ( + LogisticRegressionCV as LogisticRegressionCVDefault, +) +from sklearn.linear_model._logistic import ( + _log_reg_scoring_path, + _logistic_regression_path, +) +from sklearn.metrics import get_scorer, log_loss +from sklearn.model_selection import ( + GridSearchCV, + StratifiedKFold, + cross_val_score, + train_test_split, +) +from sklearn.preprocessing import LabelEncoder, StandardScaler, scale +from sklearn.svm import l1_min_c +from sklearn.utils import _IS_32BIT, compute_class_weight, shuffle +from sklearn.utils._testing import ignore_warnings, skip_if_no_parallel +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS + +pytestmark = pytest.mark.filterwarnings( + "error::sklearn.exceptions.ConvergenceWarning:sklearn.*" +) +# Fixing random_state helps prevent ConvergenceWarnings +LogisticRegression = partial(LogisticRegressionDefault, random_state=0) +LogisticRegressionCV = partial(LogisticRegressionCVDefault, random_state=0) + + +SOLVERS = ("lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga") +X = [[-1, 0], [0, 1], [1, 1]] +Y1 = [0, 1, 1] +Y2 = [2, 1, 0] +iris = load_iris() + + +def check_predictions(clf, X, y): + """Check that the model is able to fit the classification data""" + n_samples = len(y) + classes = np.unique(y) + n_classes = classes.shape[0] + + predicted = clf.fit(X, y).predict(X) + assert_array_equal(clf.classes_, classes) + + assert predicted.shape == (n_samples,) + assert_array_equal(predicted, y) + + probabilities = clf.predict_proba(X) + assert probabilities.shape == (n_samples, n_classes) + assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) + assert_array_equal(probabilities.argmax(axis=1), y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_predict_2_classes(csr_container): + # Simple sanity check on a 2 classes dataset + # Make sure it predicts the correct result on simple datasets. + check_predictions(LogisticRegression(random_state=0), X, Y1) + check_predictions(LogisticRegression(random_state=0), csr_container(X), Y1) + + check_predictions(LogisticRegression(C=100, random_state=0), X, Y1) + check_predictions(LogisticRegression(C=100, random_state=0), csr_container(X), Y1) + + check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X, Y1) + check_predictions( + LogisticRegression(fit_intercept=False, random_state=0), csr_container(X), Y1 + ) + + +def test_logistic_cv_mock_scorer(): + class MockScorer: + def __init__(self): + self.calls = 0 + self.scores = [0.1, 0.4, 0.8, 0.5] + + def __call__(self, model, X, y, sample_weight=None): + score = self.scores[self.calls % len(self.scores)] + self.calls += 1 + return score + + mock_scorer = MockScorer() + Cs = [1, 2, 3, 4] + cv = 2 + + lr = LogisticRegressionCV(Cs=Cs, scoring=mock_scorer, cv=cv) + X, y = make_classification(random_state=0) + lr.fit(X, y) + + # Cs[2] has the highest score (0.8) from MockScorer + assert lr.C_[0] == Cs[2] + + # scorer called 8 times (cv*len(Cs)) + assert mock_scorer.calls == cv * len(Cs) + + # reset mock_scorer + mock_scorer.calls = 0 + custom_score = lr.score(X, lr.predict(X)) + + assert custom_score == mock_scorer.scores[0] + assert mock_scorer.calls == 1 + + +@skip_if_no_parallel +def test_lr_liblinear_warning(): + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + lr = LogisticRegression(solver="liblinear", n_jobs=2) + warning_message = ( + "'n_jobs' > 1 does not have any effect when" + " 'solver' is set to 'liblinear'. Got 'n_jobs'" + " = 2." + ) + with pytest.warns(UserWarning, match=warning_message): + lr.fit(iris.data, target) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_predict_3_classes(csr_container): + check_predictions(LogisticRegression(C=10), X, Y2) + check_predictions(LogisticRegression(C=10), csr_container(X), Y2) + + +@pytest.mark.parametrize( + "clf", + [ + LogisticRegression(C=len(iris.data), solver="liblinear", multi_class="ovr"), + LogisticRegression(C=len(iris.data), solver="lbfgs", multi_class="multinomial"), + LogisticRegression( + C=len(iris.data), solver="newton-cg", multi_class="multinomial" + ), + LogisticRegression( + C=len(iris.data), solver="sag", tol=1e-2, multi_class="ovr", random_state=42 + ), + LogisticRegression( + C=len(iris.data), + solver="saga", + tol=1e-2, + multi_class="ovr", + random_state=42, + ), + LogisticRegression( + C=len(iris.data), solver="newton-cholesky", multi_class="ovr" + ), + ], +) +def test_predict_iris(clf): + """Test logistic regression with the iris dataset. + + Test that both multinomial and OvR solvers handle multiclass data correctly and + give good accuracy score (>0.95) for the training data. + """ + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + if clf.solver == "lbfgs": + # lbfgs has convergence issues on the iris data with its default max_iter=100 + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + clf.fit(iris.data, target) + else: + clf.fit(iris.data, target) + assert_array_equal(np.unique(target), clf.classes_) + + pred = clf.predict(iris.data) + assert np.mean(pred == target) > 0.95 + + probabilities = clf.predict_proba(iris.data) + assert_allclose(probabilities.sum(axis=1), np.ones(n_samples)) + + pred = iris.target_names[probabilities.argmax(axis=1)] + assert np.mean(pred == target) > 0.95 + + +@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV]) +def test_check_solver_option(LR): + X, y = iris.data, iris.target + + # only 'liblinear' and 'newton-cholesky' solver + for solver in ["liblinear", "newton-cholesky"]: + msg = f"Solver {solver} does not support a multinomial backend." + lr = LR(solver=solver, multi_class="multinomial") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # all solvers except 'liblinear' and 'saga' + for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag"]: + msg = "Solver %s supports only 'l2' or None penalties," % solver + lr = LR(solver=solver, penalty="l1", multi_class="ovr") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag", "saga"]: + msg = "Solver %s supports only dual=False, got dual=True" % solver + lr = LR(solver=solver, dual=True, multi_class="ovr") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # only saga supports elasticnet. We only test for liblinear because the + # error is raised before for the other solvers (solver %s supports only l2 + # penalties) + for solver in ["liblinear"]: + msg = f"Only 'saga' solver supports elasticnet penalty, got solver={solver}." + lr = LR(solver=solver, penalty="elasticnet") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # liblinear does not support penalty='none' + # (LogisticRegressionCV does not supports penalty='none' at all) + if LR is LogisticRegression: + msg = "penalty=None is not supported for the liblinear solver" + lr = LR(penalty=None, solver="liblinear") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + +@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV]) +def test_elasticnet_l1_ratio_err_helpful(LR): + # Check that an informative error message is raised when penalty="elasticnet" + # but l1_ratio is not specified. + model = LR(penalty="elasticnet", solver="saga") + with pytest.raises(ValueError, match=r".*l1_ratio.*"): + model.fit(np.array([[1, 2], [3, 4]]), np.array([0, 1])) + + +@pytest.mark.parametrize("solver", ["lbfgs", "newton-cg", "sag", "saga"]) +def test_multinomial_binary(solver): + # Test multinomial LR on a binary problem. + target = (iris.target > 0).astype(np.intp) + target = np.array(["setosa", "not-setosa"])[target] + + clf = LogisticRegression( + solver=solver, multi_class="multinomial", random_state=42, max_iter=2000 + ) + clf.fit(iris.data, target) + + assert clf.coef_.shape == (1, iris.data.shape[1]) + assert clf.intercept_.shape == (1,) + assert_array_equal(clf.predict(iris.data), target) + + mlr = LogisticRegression( + solver=solver, multi_class="multinomial", random_state=42, fit_intercept=False + ) + mlr.fit(iris.data, target) + pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)] + assert np.mean(pred == target) > 0.9 + + +def test_multinomial_binary_probabilities(global_random_seed): + # Test multinomial LR gives expected probabilities based on the + # decision function, for a binary problem. + X, y = make_classification(random_state=global_random_seed) + clf = LogisticRegression( + multi_class="multinomial", + solver="saga", + tol=1e-3, + random_state=global_random_seed, + ) + clf.fit(X, y) + + decision = clf.decision_function(X) + proba = clf.predict_proba(X) + + expected_proba_class_1 = np.exp(decision) / (np.exp(decision) + np.exp(-decision)) + expected_proba = np.c_[1 - expected_proba_class_1, expected_proba_class_1] + + assert_almost_equal(proba, expected_proba) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_sparsify(coo_container): + # Test sparsify and densify members. + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + X = scale(iris.data) + clf = LogisticRegression(random_state=0).fit(X, target) + + pred_d_d = clf.decision_function(X) + + clf.sparsify() + assert sparse.issparse(clf.coef_) + pred_s_d = clf.decision_function(X) + + sp_data = coo_container(X) + pred_s_s = clf.decision_function(sp_data) + + clf.densify() + pred_d_s = clf.decision_function(sp_data) + + assert_array_almost_equal(pred_d_d, pred_s_d) + assert_array_almost_equal(pred_d_d, pred_s_s) + assert_array_almost_equal(pred_d_d, pred_d_s) + + +def test_inconsistent_input(): + # Test that an exception is raised on inconsistent input + rng = np.random.RandomState(0) + X_ = rng.random_sample((5, 10)) + y_ = np.ones(X_.shape[0]) + y_[0] = 0 + + clf = LogisticRegression(random_state=0) + + # Wrong dimensions for training data + y_wrong = y_[:-1] + + with pytest.raises(ValueError): + clf.fit(X, y_wrong) + + # Wrong dimensions for test data + with pytest.raises(ValueError): + clf.fit(X_, y_).predict(rng.random_sample((3, 12))) + + +def test_write_parameters(): + # Test that we can write to coef_ and intercept_ + clf = LogisticRegression(random_state=0) + clf.fit(X, Y1) + clf.coef_[:] = 0 + clf.intercept_[:] = 0 + assert_array_almost_equal(clf.decision_function(X), 0) + + +def test_nan(): + # Test proper NaN handling. + # Regression test for Issue #252: fit used to go into an infinite loop. + Xnan = np.array(X, dtype=np.float64) + Xnan[0, 1] = np.nan + logistic = LogisticRegression(random_state=0) + + with pytest.raises(ValueError): + logistic.fit(Xnan, Y1) + + +def test_consistency_path(): + # Test that the path algorithm is consistent + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = [1] * 100 + [-1] * 100 + Cs = np.logspace(0, 4, 10) + + f = ignore_warnings + # can't test with fit_intercept=True since LIBLINEAR + # penalizes the intercept + for solver in ["sag", "saga"]: + coefs, Cs, _ = f(_logistic_regression_path)( + X, + y, + Cs=Cs, + fit_intercept=False, + tol=1e-5, + solver=solver, + max_iter=1000, + multi_class="ovr", + random_state=0, + ) + for i, C in enumerate(Cs): + lr = LogisticRegression( + C=C, + fit_intercept=False, + tol=1e-5, + solver=solver, + multi_class="ovr", + random_state=0, + max_iter=1000, + ) + lr.fit(X, y) + lr_coef = lr.coef_.ravel() + assert_array_almost_equal( + lr_coef, coefs[i], decimal=4, err_msg="with solver = %s" % solver + ) + + # test for fit_intercept=True + for solver in ("lbfgs", "newton-cg", "newton-cholesky", "liblinear", "sag", "saga"): + Cs = [1e3] + coefs, Cs, _ = f(_logistic_regression_path)( + X, + y, + Cs=Cs, + tol=1e-6, + solver=solver, + intercept_scaling=10000.0, + random_state=0, + multi_class="ovr", + ) + lr = LogisticRegression( + C=Cs[0], + tol=1e-6, + intercept_scaling=10000.0, + random_state=0, + multi_class="ovr", + solver=solver, + ) + lr.fit(X, y) + lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_]) + assert_array_almost_equal( + lr_coef, coefs[0], decimal=4, err_msg="with solver = %s" % solver + ) + + +def test_logistic_regression_path_convergence_fail(): + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = [1] * 100 + [-1] * 100 + Cs = [1e3] + + # Check that the convergence message points to both a model agnostic + # advice (scaling the data) and to the logistic regression specific + # documentation that includes hints on the solver configuration. + with pytest.warns(ConvergenceWarning) as record: + _logistic_regression_path( + X, y, Cs=Cs, tol=0.0, max_iter=1, random_state=0, verbose=0 + ) + + assert len(record) == 1 + warn_msg = record[0].message.args[0] + assert "lbfgs failed to converge" in warn_msg + assert "Increase the number of iterations" in warn_msg + assert "scale the data" in warn_msg + assert "linear_model.html#logistic-regression" in warn_msg + + +def test_liblinear_dual_random_state(): + # random_state is relevant for liblinear solver only if dual=True + X, y = make_classification(n_samples=20, random_state=0) + lr1 = LogisticRegression( + random_state=0, + dual=True, + tol=1e-3, + solver="liblinear", + multi_class="ovr", + ) + lr1.fit(X, y) + lr2 = LogisticRegression( + random_state=0, + dual=True, + tol=1e-3, + solver="liblinear", + multi_class="ovr", + ) + lr2.fit(X, y) + lr3 = LogisticRegression( + random_state=8, + dual=True, + tol=1e-3, + solver="liblinear", + multi_class="ovr", + ) + lr3.fit(X, y) + + # same result for same random state + assert_array_almost_equal(lr1.coef_, lr2.coef_) + # different results for different random states + msg = "Arrays are not almost equal to 6 decimals" + with pytest.raises(AssertionError, match=msg): + assert_array_almost_equal(lr1.coef_, lr3.coef_) + + +def test_logistic_cv(): + # test for LogisticRegressionCV object + n_samples, n_features = 50, 5 + rng = np.random.RandomState(0) + X_ref = rng.randn(n_samples, n_features) + y = np.sign(X_ref.dot(5 * rng.randn(n_features))) + X_ref -= X_ref.mean() + X_ref /= X_ref.std() + lr_cv = LogisticRegressionCV( + Cs=[1.0], fit_intercept=False, solver="liblinear", multi_class="ovr", cv=3 + ) + lr_cv.fit(X_ref, y) + lr = LogisticRegression( + C=1.0, fit_intercept=False, solver="liblinear", multi_class="ovr" + ) + lr.fit(X_ref, y) + assert_array_almost_equal(lr.coef_, lr_cv.coef_) + + assert_array_equal(lr_cv.coef_.shape, (1, n_features)) + assert_array_equal(lr_cv.classes_, [-1, 1]) + assert len(lr_cv.classes_) == 2 + + coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values())) + assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features)) + assert_array_equal(lr_cv.Cs_.shape, (1,)) + scores = np.asarray(list(lr_cv.scores_.values())) + assert_array_equal(scores.shape, (1, 3, 1)) + + +@pytest.mark.parametrize( + "scoring, multiclass_agg_list", + [ + ("accuracy", [""]), + ("precision", ["_macro", "_weighted"]), + # no need to test for micro averaging because it + # is the same as accuracy for f1, precision, + # and recall (see https://github.com/ + # scikit-learn/scikit-learn/pull/ + # 11578#discussion_r203250062) + ("f1", ["_macro", "_weighted"]), + ("neg_log_loss", [""]), + ("recall", ["_macro", "_weighted"]), + ], +) +def test_logistic_cv_multinomial_score(scoring, multiclass_agg_list): + # test that LogisticRegressionCV uses the right score to compute its + # cross-validation scores when using a multinomial scoring + # see https://github.com/scikit-learn/scikit-learn/issues/8720 + X, y = make_classification( + n_samples=100, random_state=0, n_classes=3, n_informative=6 + ) + train, test = np.arange(80), np.arange(80, 100) + lr = LogisticRegression(C=1.0, multi_class="multinomial") + # we use lbfgs to support multinomial + params = lr.get_params() + # we store the params to set them further in _log_reg_scoring_path + for key in ["C", "n_jobs", "warm_start"]: + del params[key] + lr.fit(X[train], y[train]) + for averaging in multiclass_agg_list: + scorer = get_scorer(scoring + averaging) + assert_array_almost_equal( + _log_reg_scoring_path( + X, + y, + train, + test, + Cs=[1.0], + scoring=scorer, + pos_class=None, + max_squared_sum=None, + sample_weight=None, + score_params=None, + **params, + )[2][0], + scorer(lr, X[test], y[test]), + ) + + +def test_multinomial_logistic_regression_string_inputs(): + # Test with string labels for LogisticRegression(CV) + n_samples, n_features, n_classes = 50, 5, 3 + X_ref, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_classes=n_classes, + n_informative=3, + random_state=0, + ) + y_str = LabelEncoder().fit(["bar", "baz", "foo"]).inverse_transform(y) + # For numerical labels, let y values be taken from set (-1, 0, 1) + y = np.array(y) - 1 + # Test for string labels + lr = LogisticRegression(multi_class="multinomial") + lr_cv = LogisticRegressionCV(multi_class="multinomial", Cs=3) + lr_str = LogisticRegression(multi_class="multinomial") + lr_cv_str = LogisticRegressionCV(multi_class="multinomial", Cs=3) + + lr.fit(X_ref, y) + lr_cv.fit(X_ref, y) + lr_str.fit(X_ref, y_str) + lr_cv_str.fit(X_ref, y_str) + + assert_array_almost_equal(lr.coef_, lr_str.coef_) + assert sorted(lr_str.classes_) == ["bar", "baz", "foo"] + assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_) + assert sorted(lr_str.classes_) == ["bar", "baz", "foo"] + assert sorted(lr_cv_str.classes_) == ["bar", "baz", "foo"] + + # The predictions should be in original labels + assert sorted(np.unique(lr_str.predict(X_ref))) == ["bar", "baz", "foo"] + assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz", "foo"] + + # Make sure class weights can be given with string labels + lr_cv_str = LogisticRegression( + class_weight={"bar": 1, "baz": 2, "foo": 0}, multi_class="multinomial" + ).fit(X_ref, y_str) + assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz"] + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_logistic_cv_sparse(csr_container): + X, y = make_classification(n_samples=50, n_features=5, random_state=0) + X[X < 1.0] = 0.0 + csr = csr_container(X) + + clf = LogisticRegressionCV() + clf.fit(X, y) + clfs = LogisticRegressionCV() + clfs.fit(csr, y) + assert_array_almost_equal(clfs.coef_, clf.coef_) + assert_array_almost_equal(clfs.intercept_, clf.intercept_) + assert clfs.C_ == clf.C_ + + +def test_ovr_multinomial_iris(): + # Test that OvR and multinomial are correct using the iris dataset. + train, target = iris.data, iris.target + n_samples, n_features = train.shape + + # The cv indices from stratified kfold (where stratification is done based + # on the fine-grained iris classes, i.e, before the classes 0 and 1 are + # conflated) is used for both clf and clf1 + n_cv = 2 + cv = StratifiedKFold(n_cv) + precomputed_folds = list(cv.split(train, target)) + + # Train clf on the original dataset where classes 0 and 1 are separated + clf = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr") + clf.fit(train, target) + + # Conflate classes 0 and 1 and train clf1 on this modified dataset + clf1 = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr") + target_copy = target.copy() + target_copy[target_copy == 0] = 1 + clf1.fit(train, target_copy) + + # Ensure that what OvR learns for class2 is same regardless of whether + # classes 0 and 1 are separated or not + assert_allclose(clf.scores_[2], clf1.scores_[2]) + assert_allclose(clf.intercept_[2:], clf1.intercept_) + assert_allclose(clf.coef_[2][np.newaxis, :], clf1.coef_) + + # Test the shape of various attributes. + assert clf.coef_.shape == (3, n_features) + assert_array_equal(clf.classes_, [0, 1, 2]) + coefs_paths = np.asarray(list(clf.coefs_paths_.values())) + assert coefs_paths.shape == (3, n_cv, 10, n_features + 1) + assert clf.Cs_.shape == (10,) + scores = np.asarray(list(clf.scores_.values())) + assert scores.shape == (3, n_cv, 10) + + # Test that for the iris data multinomial gives a better accuracy than OvR + for solver in ["lbfgs", "newton-cg", "sag", "saga"]: + max_iter = 500 if solver in ["sag", "saga"] else 30 + clf_multi = LogisticRegressionCV( + solver=solver, + multi_class="multinomial", + max_iter=max_iter, + random_state=42, + tol=1e-3 if solver in ["sag", "saga"] else 1e-2, + cv=2, + ) + if solver == "lbfgs": + # lbfgs requires scaling to avoid convergence warnings + train = scale(train) + + clf_multi.fit(train, target) + multi_score = clf_multi.score(train, target) + ovr_score = clf.score(train, target) + assert multi_score > ovr_score + + # Test attributes of LogisticRegressionCV + assert clf.coef_.shape == clf_multi.coef_.shape + assert_array_equal(clf_multi.classes_, [0, 1, 2]) + coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values())) + assert coefs_paths.shape == (3, n_cv, 10, n_features + 1) + assert clf_multi.Cs_.shape == (10,) + scores = np.asarray(list(clf_multi.scores_.values())) + assert scores.shape == (3, n_cv, 10) + + +def test_logistic_regression_solvers(): + """Test solvers converge to the same result.""" + X, y = make_classification(n_features=10, n_informative=5, random_state=0) + + params = dict(fit_intercept=False, random_state=42, multi_class="ovr") + + regressors = { + solver: LogisticRegression(solver=solver, **params).fit(X, y) + for solver in SOLVERS + } + + for solver_1, solver_2 in itertools.combinations(regressors, r=2): + assert_array_almost_equal( + regressors[solver_1].coef_, regressors[solver_2].coef_, decimal=3 + ) + + +def test_logistic_regression_solvers_multiclass(): + """Test solvers converge to the same result for multiclass problems.""" + X, y = make_classification( + n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0 + ) + tol = 1e-7 + params = dict(fit_intercept=False, tol=tol, random_state=42, multi_class="ovr") + + # Override max iteration count for specific solvers to allow for + # proper convergence. + solver_max_iter = {"sag": 1000, "saga": 10000} + + regressors = { + solver: LogisticRegression( + solver=solver, max_iter=solver_max_iter.get(solver, 100), **params + ).fit(X, y) + for solver in SOLVERS + } + + for solver_1, solver_2 in itertools.combinations(regressors, r=2): + assert_allclose( + regressors[solver_1].coef_, + regressors[solver_2].coef_, + rtol=5e-3 if solver_2 == "saga" else 1e-3, + err_msg=f"{solver_1} vs {solver_2}", + ) + + +@pytest.mark.parametrize("weight", [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]) +@pytest.mark.parametrize("class_weight", ["weight", "balanced"]) +def test_logistic_regressioncv_class_weights(weight, class_weight, global_random_seed): + """Test class_weight for LogisticRegressionCV.""" + n_classes = len(weight) + if class_weight == "weight": + class_weight = weight + + X, y = make_classification( + n_samples=30, + n_features=3, + n_repeated=0, + n_informative=3, + n_redundant=0, + n_classes=n_classes, + random_state=global_random_seed, + ) + params = dict( + Cs=1, + fit_intercept=False, + multi_class="ovr", + class_weight=class_weight, + tol=1e-8, + ) + clf_lbfgs = LogisticRegressionCV(solver="lbfgs", **params) + + # XXX: lbfgs' line search can fail and cause a ConvergenceWarning for some + # 10% of the random seeds, but only on specific platforms (in particular + # when using Atlas BLAS/LAPACK implementation). Doubling the maxls internal + # parameter of the solver does not help. However this lack of proper + # convergence does not seem to prevent the assertion to pass, so we ignore + # the warning for now. + # See: https://github.com/scikit-learn/scikit-learn/pull/27649 + with ignore_warnings(category=ConvergenceWarning): + clf_lbfgs.fit(X, y) + + for solver in set(SOLVERS) - set(["lbfgs"]): + clf = LogisticRegressionCV(solver=solver, **params) + if solver in ("sag", "saga"): + clf.set_params( + tol=1e-18, max_iter=10000, random_state=global_random_seed + 1 + ) + clf.fit(X, y) + + assert_allclose( + clf.coef_, clf_lbfgs.coef_, rtol=1e-3, err_msg=f"{solver} vs lbfgs" + ) + + +def test_logistic_regression_sample_weights(): + X, y = make_classification( + n_samples=20, n_features=5, n_informative=3, n_classes=2, random_state=0 + ) + sample_weight = y + 1 + + for LR in [LogisticRegression, LogisticRegressionCV]: + kw = {"random_state": 42, "fit_intercept": False, "multi_class": "ovr"} + if LR is LogisticRegressionCV: + kw.update({"Cs": 3, "cv": 3}) + + # Test that passing sample_weight as ones is the same as + # not passing them at all (default None) + for solver in ["lbfgs", "liblinear"]: + clf_sw_none = LR(solver=solver, **kw) + clf_sw_ones = LR(solver=solver, **kw) + clf_sw_none.fit(X, y) + clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0])) + assert_allclose(clf_sw_none.coef_, clf_sw_ones.coef_, rtol=1e-4) + + # Test that sample weights work the same with the lbfgs, + # newton-cg, newton-cholesky and 'sag' solvers + clf_sw_lbfgs = LR(**kw, tol=1e-5) + clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight) + for solver in set(SOLVERS) - set(("lbfgs", "saga")): + clf_sw = LR(solver=solver, tol=1e-10 if solver == "sag" else 1e-5, **kw) + # ignore convergence warning due to small dataset with sag + with ignore_warnings(): + clf_sw.fit(X, y, sample_weight=sample_weight) + assert_allclose(clf_sw_lbfgs.coef_, clf_sw.coef_, rtol=1e-4) + + # Test that passing class_weight as [1,2] is the same as + # passing class weight = [1,1] but adjusting sample weights + # to be 2 for all instances of class 2 + for solver in ["lbfgs", "liblinear"]: + clf_cw_12 = LR(solver=solver, class_weight={0: 1, 1: 2}, **kw) + clf_cw_12.fit(X, y) + clf_sw_12 = LR(solver=solver, **kw) + clf_sw_12.fit(X, y, sample_weight=sample_weight) + assert_allclose(clf_cw_12.coef_, clf_sw_12.coef_, rtol=1e-4) + + # Test the above for l1 penalty and l2 penalty with dual=True. + # since the patched liblinear code is different. + clf_cw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + class_weight={0: 1, 1: 2}, + penalty="l1", + tol=1e-5, + random_state=42, + multi_class="ovr", + ) + clf_cw.fit(X, y) + clf_sw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + penalty="l1", + tol=1e-5, + random_state=42, + multi_class="ovr", + ) + clf_sw.fit(X, y, sample_weight) + assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) + + clf_cw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + class_weight={0: 1, 1: 2}, + penalty="l2", + dual=True, + random_state=42, + multi_class="ovr", + ) + clf_cw.fit(X, y) + clf_sw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + penalty="l2", + dual=True, + random_state=42, + multi_class="ovr", + ) + clf_sw.fit(X, y, sample_weight) + assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) + + +def _compute_class_weight_dictionary(y): + # helper for returning a dictionary instead of an array + classes = np.unique(y) + class_weight = compute_class_weight("balanced", classes=classes, y=y) + class_weight_dict = dict(zip(classes, class_weight)) + return class_weight_dict + + +def test_logistic_regression_class_weights(): + # Scale data to avoid convergence warnings with the lbfgs solver + X_iris = scale(iris.data) + # Multinomial case: remove 90% of class 0 + X = X_iris[45:, :] + y = iris.target[45:] + solvers = ("lbfgs", "newton-cg") + class_weight_dict = _compute_class_weight_dictionary(y) + + for solver in solvers: + clf1 = LogisticRegression( + solver=solver, multi_class="multinomial", class_weight="balanced" + ) + clf2 = LogisticRegression( + solver=solver, multi_class="multinomial", class_weight=class_weight_dict + ) + clf1.fit(X, y) + clf2.fit(X, y) + assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4) + + # Binary case: remove 90% of class 0 and 100% of class 2 + X = X_iris[45:100, :] + y = iris.target[45:100] + class_weight_dict = _compute_class_weight_dictionary(y) + + for solver in set(SOLVERS) - set(("sag", "saga")): + clf1 = LogisticRegression( + solver=solver, multi_class="ovr", class_weight="balanced" + ) + clf2 = LogisticRegression( + solver=solver, multi_class="ovr", class_weight=class_weight_dict + ) + clf1.fit(X, y) + clf2.fit(X, y) + assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6) + + +def test_logistic_regression_multinomial(): + # Tests for the multinomial option in logistic regression + + # Some basic attributes of Logistic Regression + n_samples, n_features, n_classes = 50, 20, 3 + X, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_informative=10, + n_classes=n_classes, + random_state=0, + ) + + X = StandardScaler(with_mean=False).fit_transform(X) + + # 'lbfgs' is used as a referenced + solver = "lbfgs" + ref_i = LogisticRegression(solver=solver, multi_class="multinomial", tol=1e-6) + ref_w = LogisticRegression( + solver=solver, multi_class="multinomial", fit_intercept=False, tol=1e-6 + ) + ref_i.fit(X, y) + ref_w.fit(X, y) + assert ref_i.coef_.shape == (n_classes, n_features) + assert ref_w.coef_.shape == (n_classes, n_features) + for solver in ["sag", "saga", "newton-cg"]: + clf_i = LogisticRegression( + solver=solver, + multi_class="multinomial", + random_state=42, + max_iter=2000, + tol=1e-7, + ) + clf_w = LogisticRegression( + solver=solver, + multi_class="multinomial", + random_state=42, + max_iter=2000, + tol=1e-7, + fit_intercept=False, + ) + clf_i.fit(X, y) + clf_w.fit(X, y) + assert clf_i.coef_.shape == (n_classes, n_features) + assert clf_w.coef_.shape == (n_classes, n_features) + + # Compare solutions between lbfgs and the other solvers + assert_allclose(ref_i.coef_, clf_i.coef_, rtol=1e-3) + assert_allclose(ref_w.coef_, clf_w.coef_, rtol=1e-2) + assert_allclose(ref_i.intercept_, clf_i.intercept_, rtol=1e-3) + + # Test that the path give almost the same results. However since in this + # case we take the average of the coefs after fitting across all the + # folds, it need not be exactly the same. + for solver in ["lbfgs", "newton-cg", "sag", "saga"]: + clf_path = LogisticRegressionCV( + solver=solver, max_iter=2000, tol=1e-6, multi_class="multinomial", Cs=[1.0] + ) + clf_path.fit(X, y) + assert_allclose(clf_path.coef_, ref_i.coef_, rtol=1e-2) + assert_allclose(clf_path.intercept_, ref_i.intercept_, rtol=1e-2) + + +def test_liblinear_decision_function_zero(): + # Test negative prediction when decision_function values are zero. + # Liblinear predicts the positive class when decision_function values + # are zero. This is a test to verify that we do not do the same. + # See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600 + # and the PR https://github.com/scikit-learn/scikit-learn/pull/3623 + X, y = make_classification(n_samples=5, n_features=5, random_state=0) + clf = LogisticRegression(fit_intercept=False, solver="liblinear", multi_class="ovr") + clf.fit(X, y) + + # Dummy data such that the decision function becomes zero. + X = np.zeros((5, 5)) + assert_array_equal(clf.predict(X), np.zeros(5)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_liblinear_logregcv_sparse(csr_container): + # Test LogRegCV with solver='liblinear' works for sparse matrices + + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + clf = LogisticRegressionCV(solver="liblinear", multi_class="ovr") + clf.fit(csr_container(X), y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_saga_sparse(csr_container): + # Test LogRegCV with solver='liblinear' works for sparse matrices + + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + clf = LogisticRegressionCV(solver="saga", tol=1e-2) + clf.fit(csr_container(X), y) + + +def test_logreg_intercept_scaling_zero(): + # Test that intercept_scaling is ignored when fit_intercept is False + + clf = LogisticRegression(fit_intercept=False) + clf.fit(X, Y1) + assert clf.intercept_ == 0.0 + + +def test_logreg_l1(): + # Because liblinear penalizes the intercept and saga does not, we do not + # fit the intercept to make it possible to compare the coefficients of + # the two models at convergence. + rng = np.random.RandomState(42) + n_samples = 50 + X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) + X_noise = rng.normal(size=(n_samples, 3)) + X_constant = np.ones(shape=(n_samples, 2)) + X = np.concatenate((X, X_noise, X_constant), axis=1) + lr_liblinear = LogisticRegression( + penalty="l1", + C=1.0, + solver="liblinear", + fit_intercept=False, + multi_class="ovr", + tol=1e-10, + ) + lr_liblinear.fit(X, y) + + lr_saga = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + multi_class="ovr", + max_iter=1000, + tol=1e-10, + ) + lr_saga.fit(X, y) + assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) + + # Noise and constant features should be regularized to zero by the l1 + # penalty + assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) + assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_logreg_l1_sparse_data(csr_container): + # Because liblinear penalizes the intercept and saga does not, we do not + # fit the intercept to make it possible to compare the coefficients of + # the two models at convergence. + rng = np.random.RandomState(42) + n_samples = 50 + X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) + X_noise = rng.normal(scale=0.1, size=(n_samples, 3)) + X_constant = np.zeros(shape=(n_samples, 2)) + X = np.concatenate((X, X_noise, X_constant), axis=1) + X[X < 1] = 0 + X = csr_container(X) + + lr_liblinear = LogisticRegression( + penalty="l1", + C=1.0, + solver="liblinear", + fit_intercept=False, + multi_class="ovr", + tol=1e-10, + ) + lr_liblinear.fit(X, y) + + lr_saga = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + multi_class="ovr", + max_iter=1000, + tol=1e-10, + ) + lr_saga.fit(X, y) + assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) + # Noise and constant features should be regularized to zero by the l1 + # penalty + assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) + assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) + + # Check that solving on the sparse and dense data yield the same results + lr_saga_dense = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + multi_class="ovr", + max_iter=1000, + tol=1e-10, + ) + lr_saga_dense.fit(X.toarray(), y) + assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_) + + +@pytest.mark.parametrize("random_seed", [42]) +@pytest.mark.parametrize("penalty", ["l1", "l2"]) +def test_logistic_regression_cv_refit(random_seed, penalty): + # Test that when refit=True, logistic regression cv with the saga solver + # converges to the same solution as logistic regression with a fixed + # regularization parameter. + # Internally the LogisticRegressionCV model uses a warm start to refit on + # the full data model with the optimal C found by CV. As the penalized + # logistic regression loss is convex, we should still recover exactly + # the same solution as long as the stopping criterion is strict enough (and + # that there are no exactly duplicated features when penalty='l1'). + X, y = make_classification(n_samples=100, n_features=20, random_state=random_seed) + common_params = dict( + solver="saga", + penalty=penalty, + random_state=random_seed, + max_iter=1000, + tol=1e-12, + ) + lr_cv = LogisticRegressionCV(Cs=[1.0], refit=True, **common_params) + lr_cv.fit(X, y) + lr = LogisticRegression(C=1.0, **common_params) + lr.fit(X, y) + assert_array_almost_equal(lr_cv.coef_, lr.coef_) + + +def test_logreg_predict_proba_multinomial(): + X, y = make_classification( + n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10 + ) + + # Predicted probabilities using the true-entropy loss should give a + # smaller loss than those using the ovr method. + clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs") + clf_multi.fit(X, y) + clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) + clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs") + clf_ovr.fit(X, y) + clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X)) + assert clf_ovr_loss > clf_multi_loss + + # Predicted probabilities using the soft-max function should give a + # smaller loss than those using the logistic function. + clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) + clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X)) + assert clf_wrong_loss > clf_multi_loss + + +@pytest.mark.parametrize("max_iter", np.arange(1, 5)) +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +@pytest.mark.parametrize( + "solver, message", + [ + ( + "newton-cg", + "newton-cg failed to converge. Increase the number of iterations.", + ), + ( + "liblinear", + "Liblinear failed to converge, increase the number of iterations.", + ), + ("sag", "The max_iter was reached which means the coef_ did not converge"), + ("saga", "The max_iter was reached which means the coef_ did not converge"), + ("lbfgs", "lbfgs failed to converge"), + ("newton-cholesky", "Newton solver did not converge after [0-9]* iterations"), + ], +) +def test_max_iter(max_iter, multi_class, solver, message): + # Test that the maximum number of iteration is reached + X, y_bin = iris.data, iris.target.copy() + y_bin[y_bin == 2] = 0 + + if solver in ("liblinear", "newton-cholesky") and multi_class == "multinomial": + pytest.skip("'multinomial' is not supported by liblinear and newton-cholesky") + if solver == "newton-cholesky" and max_iter > 1: + pytest.skip("solver newton-cholesky might converge very fast") + + lr = LogisticRegression( + max_iter=max_iter, + tol=1e-15, + multi_class=multi_class, + random_state=0, + solver=solver, + ) + with pytest.warns(ConvergenceWarning, match=message): + lr.fit(X, y_bin) + + assert lr.n_iter_[0] == max_iter + + +@pytest.mark.parametrize("solver", SOLVERS) +def test_n_iter(solver): + # Test that self.n_iter_ has the correct format. + X, y = iris.data, iris.target + if solver == "lbfgs": + # lbfgs requires scaling to avoid convergence warnings + X = scale(X) + + n_classes = np.unique(y).shape[0] + assert n_classes == 3 + + # Also generate a binary classification sub-problem. + y_bin = y.copy() + y_bin[y_bin == 2] = 0 + + n_Cs = 4 + n_cv_fold = 2 + + # Binary classification case + clf = LogisticRegression(tol=1e-2, C=1.0, solver=solver, random_state=42) + clf.fit(X, y_bin) + assert clf.n_iter_.shape == (1,) + + clf_cv = LogisticRegressionCV( + tol=1e-2, solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42 + ) + clf_cv.fit(X, y_bin) + assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs) + + # OvR case + clf.set_params(multi_class="ovr").fit(X, y) + assert clf.n_iter_.shape == (n_classes,) + + clf_cv.set_params(multi_class="ovr").fit(X, y) + assert clf_cv.n_iter_.shape == (n_classes, n_cv_fold, n_Cs) + + # multinomial case + if solver in ("liblinear", "newton-cholesky"): + # This solver only supports one-vs-rest multiclass classification. + return + + # When using the multinomial objective function, there is a single + # optimization problem to solve for all classes at once: + clf.set_params(multi_class="multinomial").fit(X, y) + assert clf.n_iter_.shape == (1,) + + clf_cv.set_params(multi_class="multinomial").fit(X, y) + assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs) + + +@pytest.mark.parametrize("solver", sorted(set(SOLVERS) - set(["liblinear"]))) +@pytest.mark.parametrize("warm_start", (True, False)) +@pytest.mark.parametrize("fit_intercept", (True, False)) +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +def test_warm_start(solver, warm_start, fit_intercept, multi_class): + # A 1-iteration second fit on same data should give almost same result + # with warm starting, and quite different result without warm starting. + # Warm starting does not work with liblinear solver. + X, y = iris.data, iris.target + + if solver == "newton-cholesky" and multi_class == "multinomial": + # solver does only support OvR + return + + clf = LogisticRegression( + tol=1e-4, + multi_class=multi_class, + warm_start=warm_start, + solver=solver, + random_state=42, + fit_intercept=fit_intercept, + ) + with ignore_warnings(category=ConvergenceWarning): + clf.fit(X, y) + coef_1 = clf.coef_ + + clf.max_iter = 1 + clf.fit(X, y) + cum_diff = np.sum(np.abs(coef_1 - clf.coef_)) + msg = ( + "Warm starting issue with %s solver in %s mode " + "with fit_intercept=%s and warm_start=%s" + % (solver, multi_class, str(fit_intercept), str(warm_start)) + ) + if warm_start: + assert 2.0 > cum_diff, msg + else: + assert cum_diff > 2.0, msg + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_saga_vs_liblinear(csr_container): + iris = load_iris() + X, y = iris.data, iris.target + X = np.concatenate([X] * 3) + y = np.concatenate([y] * 3) + + X_bin = X[y <= 1] + y_bin = y[y <= 1] * 2 - 1 + + X_sparse, y_sparse = make_classification( + n_samples=50, n_features=20, random_state=0 + ) + X_sparse = csr_container(X_sparse) + + for X, y in ((X_bin, y_bin), (X_sparse, y_sparse)): + for penalty in ["l1", "l2"]: + n_samples = X.shape[0] + # alpha=1e-3 is time consuming + for alpha in np.logspace(-1, 1, 3): + saga = LogisticRegression( + C=1.0 / (n_samples * alpha), + solver="saga", + multi_class="ovr", + max_iter=200, + fit_intercept=False, + penalty=penalty, + random_state=0, + tol=1e-6, + ) + + liblinear = LogisticRegression( + C=1.0 / (n_samples * alpha), + solver="liblinear", + multi_class="ovr", + max_iter=200, + fit_intercept=False, + penalty=penalty, + random_state=0, + tol=1e-6, + ) + + saga.fit(X, y) + liblinear.fit(X, y) + # Convergence for alpha=1e-3 is very slow + assert_array_almost_equal(saga.coef_, liblinear.coef_, 3) + + +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +@pytest.mark.parametrize( + "solver", ["liblinear", "newton-cg", "newton-cholesky", "saga"] +) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dtype_match(solver, multi_class, fit_intercept, csr_container): + # Test that np.float32 input data is not cast to np.float64 when possible + # and that the output is approximately the same no matter the input format. + + if solver in ("liblinear", "newton-cholesky") and multi_class == "multinomial": + pytest.skip(f"Solver={solver} does not support multinomial logistic.") + + out32_type = np.float64 if solver == "liblinear" else np.float32 + + X_32 = np.array(X).astype(np.float32) + y_32 = np.array(Y1).astype(np.float32) + X_64 = np.array(X).astype(np.float64) + y_64 = np.array(Y1).astype(np.float64) + X_sparse_32 = csr_container(X, dtype=np.float32) + X_sparse_64 = csr_container(X, dtype=np.float64) + solver_tol = 5e-4 + + lr_templ = LogisticRegression( + solver=solver, + multi_class=multi_class, + random_state=42, + tol=solver_tol, + fit_intercept=fit_intercept, + ) + + # Check 32-bit type consistency + lr_32 = clone(lr_templ) + lr_32.fit(X_32, y_32) + assert lr_32.coef_.dtype == out32_type + + # Check 32-bit type consistency with sparsity + lr_32_sparse = clone(lr_templ) + lr_32_sparse.fit(X_sparse_32, y_32) + assert lr_32_sparse.coef_.dtype == out32_type + + # Check 64-bit type consistency + lr_64 = clone(lr_templ) + lr_64.fit(X_64, y_64) + assert lr_64.coef_.dtype == np.float64 + + # Check 64-bit type consistency with sparsity + lr_64_sparse = clone(lr_templ) + lr_64_sparse.fit(X_sparse_64, y_64) + assert lr_64_sparse.coef_.dtype == np.float64 + + # solver_tol bounds the norm of the loss gradient + # dw ~= inv(H)*grad ==> |dw| ~= |inv(H)| * solver_tol, where H - hessian + # + # See https://github.com/scikit-learn/scikit-learn/pull/13645 + # + # with Z = np.hstack((np.ones((3,1)), np.array(X))) + # In [8]: np.linalg.norm(np.diag([0,2,2]) + np.linalg.inv((Z.T @ Z)/4)) + # Out[8]: 1.7193336918135917 + + # factor of 2 to get the ball diameter + atol = 2 * 1.72 * solver_tol + if os.name == "nt" and _IS_32BIT: + # FIXME + atol = 1e-2 + + # Check accuracy consistency + assert_allclose(lr_32.coef_, lr_64.coef_.astype(np.float32), atol=atol) + + if solver == "saga" and fit_intercept: + # FIXME: SAGA on sparse data fits the intercept inaccurately with the + # default tol and max_iter parameters. + atol = 1e-1 + + assert_allclose(lr_32.coef_, lr_32_sparse.coef_, atol=atol) + assert_allclose(lr_64.coef_, lr_64_sparse.coef_, atol=atol) + + +def test_warm_start_converge_LR(): + # Test to see that the logistic regression converges on warm start, + # with multi_class='multinomial'. Non-regressive test for #10836 + + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = np.array([1] * 100 + [-1] * 100) + lr_no_ws = LogisticRegression( + multi_class="multinomial", solver="sag", warm_start=False, random_state=0 + ) + lr_ws = LogisticRegression( + multi_class="multinomial", solver="sag", warm_start=True, random_state=0 + ) + + lr_no_ws_loss = log_loss(y, lr_no_ws.fit(X, y).predict_proba(X)) + for i in range(5): + lr_ws.fit(X, y) + lr_ws_loss = log_loss(y, lr_ws.predict_proba(X)) + assert_allclose(lr_no_ws_loss, lr_ws_loss, rtol=1e-5) + + +def test_elastic_net_coeffs(): + # make sure elasticnet penalty gives different coefficients from l1 and l2 + # with saga solver (l1_ratio different from 0 or 1) + X, y = make_classification(random_state=0) + + C = 2.0 + l1_ratio = 0.5 + coeffs = list() + for penalty, ratio in (("elasticnet", l1_ratio), ("l1", None), ("l2", None)): + lr = LogisticRegression( + penalty=penalty, + C=C, + solver="saga", + random_state=0, + l1_ratio=ratio, + tol=1e-3, + max_iter=200, + ) + lr.fit(X, y) + coeffs.append(lr.coef_) + + elastic_net_coeffs, l1_coeffs, l2_coeffs = coeffs + # make sure coeffs differ by at least .1 + assert not np.allclose(elastic_net_coeffs, l1_coeffs, rtol=0, atol=0.1) + assert not np.allclose(elastic_net_coeffs, l2_coeffs, rtol=0, atol=0.1) + assert not np.allclose(l2_coeffs, l1_coeffs, rtol=0, atol=0.1) + + +@pytest.mark.parametrize("C", [0.001, 0.1, 1, 10, 100, 1000, 1e6]) +@pytest.mark.parametrize("penalty, l1_ratio", [("l1", 1), ("l2", 0)]) +def test_elastic_net_l1_l2_equivalence(C, penalty, l1_ratio): + # Make sure elasticnet is equivalent to l1 when l1_ratio=1 and to l2 when + # l1_ratio=0. + X, y = make_classification(random_state=0) + + lr_enet = LogisticRegression( + penalty="elasticnet", + C=C, + l1_ratio=l1_ratio, + solver="saga", + random_state=0, + tol=1e-2, + ) + lr_expected = LogisticRegression( + penalty=penalty, C=C, solver="saga", random_state=0, tol=1e-2 + ) + lr_enet.fit(X, y) + lr_expected.fit(X, y) + + assert_array_almost_equal(lr_enet.coef_, lr_expected.coef_) + + +@pytest.mark.parametrize("C", [0.001, 1, 100, 1e6]) +def test_elastic_net_vs_l1_l2(C): + # Make sure that elasticnet with grid search on l1_ratio gives same or + # better results than just l1 or just l2. + + X, y = make_classification(500, random_state=0) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + param_grid = {"l1_ratio": np.linspace(0, 1, 5)} + + enet_clf = LogisticRegression( + penalty="elasticnet", C=C, solver="saga", random_state=0, tol=1e-2 + ) + gs = GridSearchCV(enet_clf, param_grid, refit=True) + + l1_clf = LogisticRegression( + penalty="l1", C=C, solver="saga", random_state=0, tol=1e-2 + ) + l2_clf = LogisticRegression( + penalty="l2", C=C, solver="saga", random_state=0, tol=1e-2 + ) + + for clf in (gs, l1_clf, l2_clf): + clf.fit(X_train, y_train) + + assert gs.score(X_test, y_test) >= l1_clf.score(X_test, y_test) + assert gs.score(X_test, y_test) >= l2_clf.score(X_test, y_test) + + +@pytest.mark.parametrize("C", np.logspace(-3, 2, 4)) +@pytest.mark.parametrize("l1_ratio", [0.1, 0.5, 0.9]) +def test_LogisticRegression_elastic_net_objective(C, l1_ratio): + # Check that training with a penalty matching the objective leads + # to a lower objective. + # Here we train a logistic regression with l2 (a) and elasticnet (b) + # penalties, and compute the elasticnet objective. That of a should be + # greater than that of b (both objectives are convex). + X, y = make_classification( + n_samples=1000, + n_classes=2, + n_features=20, + n_informative=10, + n_redundant=0, + n_repeated=0, + random_state=0, + ) + X = scale(X) + + lr_enet = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + C=C, + l1_ratio=l1_ratio, + fit_intercept=False, + ) + lr_l2 = LogisticRegression( + penalty="l2", solver="saga", random_state=0, C=C, fit_intercept=False + ) + lr_enet.fit(X, y) + lr_l2.fit(X, y) + + def enet_objective(lr): + coef = lr.coef_.ravel() + obj = C * log_loss(y, lr.predict_proba(X)) + obj += l1_ratio * np.sum(np.abs(coef)) + obj += (1.0 - l1_ratio) * 0.5 * np.dot(coef, coef) + return obj + + assert enet_objective(lr_enet) < enet_objective(lr_l2) + + +@pytest.mark.parametrize("multi_class", ("ovr", "multinomial")) +def test_LogisticRegressionCV_GridSearchCV_elastic_net(multi_class): + # make sure LogisticRegressionCV gives same best params (l1 and C) as + # GridSearchCV when penalty is elasticnet + + if multi_class == "ovr": + # This is actually binary classification, ovr multiclass is treated in + # test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr + X, y = make_classification(random_state=0) + else: + X, y = make_classification( + n_samples=100, n_classes=3, n_informative=3, random_state=0 + ) + + cv = StratifiedKFold(5) + + l1_ratios = np.linspace(0, 1, 3) + Cs = np.logspace(-4, 4, 3) + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=cv, + l1_ratios=l1_ratios, + random_state=0, + multi_class=multi_class, + tol=1e-2, + ) + lrcv.fit(X, y) + + param_grid = {"C": Cs, "l1_ratio": l1_ratios} + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + multi_class=multi_class, + tol=1e-2, + ) + gs = GridSearchCV(lr, param_grid, cv=cv) + gs.fit(X, y) + + assert gs.best_params_["l1_ratio"] == lrcv.l1_ratio_[0] + assert gs.best_params_["C"] == lrcv.C_[0] + + +def test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr(): + # make sure LogisticRegressionCV gives same best params (l1 and C) as + # GridSearchCV when penalty is elasticnet and multiclass is ovr. We can't + # compare best_params like in the previous test because + # LogisticRegressionCV with multi_class='ovr' will have one C and one + # l1_param for each class, while LogisticRegression will share the + # parameters over the *n_classes* classifiers. + + X, y = make_classification( + n_samples=100, n_classes=3, n_informative=3, random_state=0 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + cv = StratifiedKFold(5) + + l1_ratios = np.linspace(0, 1, 3) + Cs = np.logspace(-4, 4, 3) + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=cv, + l1_ratios=l1_ratios, + random_state=0, + multi_class="ovr", + tol=1e-2, + ) + lrcv.fit(X_train, y_train) + + param_grid = {"C": Cs, "l1_ratio": l1_ratios} + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + multi_class="ovr", + tol=1e-2, + ) + gs = GridSearchCV(lr, param_grid, cv=cv) + gs.fit(X_train, y_train) + + # Check that predictions are 80% the same + assert (lrcv.predict(X_train) == gs.predict(X_train)).mean() >= 0.8 + assert (lrcv.predict(X_test) == gs.predict(X_test)).mean() >= 0.8 + + +@pytest.mark.parametrize("penalty", ("l2", "elasticnet")) +@pytest.mark.parametrize("multi_class", ("ovr", "multinomial", "auto")) +def test_LogisticRegressionCV_no_refit(penalty, multi_class): + # Test LogisticRegressionCV attribute shapes when refit is False + + n_classes = 3 + n_features = 20 + X, y = make_classification( + n_samples=200, + n_classes=n_classes, + n_informative=n_classes, + n_features=n_features, + random_state=0, + ) + + Cs = np.logspace(-4, 4, 3) + if penalty == "elasticnet": + l1_ratios = np.linspace(0, 1, 2) + else: + l1_ratios = None + + lrcv = LogisticRegressionCV( + penalty=penalty, + Cs=Cs, + solver="saga", + l1_ratios=l1_ratios, + random_state=0, + multi_class=multi_class, + tol=1e-2, + refit=False, + ) + lrcv.fit(X, y) + assert lrcv.C_.shape == (n_classes,) + assert lrcv.l1_ratio_.shape == (n_classes,) + assert lrcv.coef_.shape == (n_classes, n_features) + + +def test_LogisticRegressionCV_elasticnet_attribute_shapes(): + # Make sure the shapes of scores_ and coefs_paths_ attributes are correct + # when using elasticnet (added one dimension for l1_ratios) + + n_classes = 3 + n_features = 20 + X, y = make_classification( + n_samples=200, + n_classes=n_classes, + n_informative=n_classes, + n_features=n_features, + random_state=0, + ) + + Cs = np.logspace(-4, 4, 3) + l1_ratios = np.linspace(0, 1, 2) + + n_folds = 2 + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=n_folds, + l1_ratios=l1_ratios, + multi_class="ovr", + random_state=0, + tol=1e-2, + ) + lrcv.fit(X, y) + coefs_paths = np.asarray(list(lrcv.coefs_paths_.values())) + assert coefs_paths.shape == ( + n_classes, + n_folds, + Cs.size, + l1_ratios.size, + n_features + 1, + ) + scores = np.asarray(list(lrcv.scores_.values())) + assert scores.shape == (n_classes, n_folds, Cs.size, l1_ratios.size) + + assert lrcv.n_iter_.shape == (n_classes, n_folds, Cs.size, l1_ratios.size) + + +def test_l1_ratio_non_elasticnet(): + msg = ( + r"l1_ratio parameter is only used when penalty is" + r" 'elasticnet'\. Got \(penalty=l1\)" + ) + with pytest.warns(UserWarning, match=msg): + LogisticRegression(penalty="l1", solver="saga", l1_ratio=0.5).fit(X, Y1) + + +@pytest.mark.parametrize("C", np.logspace(-3, 2, 4)) +@pytest.mark.parametrize("l1_ratio", [0.1, 0.5, 0.9]) +def test_elastic_net_versus_sgd(C, l1_ratio): + # Compare elasticnet penalty in LogisticRegression() and SGD(loss='log') + n_samples = 500 + X, y = make_classification( + n_samples=n_samples, + n_classes=2, + n_features=5, + n_informative=5, + n_redundant=0, + n_repeated=0, + random_state=1, + ) + X = scale(X) + + sgd = SGDClassifier( + penalty="elasticnet", + random_state=1, + fit_intercept=False, + tol=None, + max_iter=2000, + l1_ratio=l1_ratio, + alpha=1.0 / C / n_samples, + loss="log_loss", + ) + log = LogisticRegression( + penalty="elasticnet", + random_state=1, + fit_intercept=False, + tol=1e-5, + max_iter=1000, + l1_ratio=l1_ratio, + C=C, + solver="saga", + ) + + sgd.fit(X, y) + log.fit(X, y) + assert_array_almost_equal(sgd.coef_, log.coef_, decimal=1) + + +def test_logistic_regression_path_coefs_multinomial(): + # Make sure that the returned coefs by logistic_regression_path when + # multi_class='multinomial' don't override each other (used to be a + # bug). + X, y = make_classification( + n_samples=200, + n_classes=3, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=0, + n_features=2, + ) + Cs = [0.00001, 1, 10000] + coefs, _, _ = _logistic_regression_path( + X, + y, + penalty="l1", + Cs=Cs, + solver="saga", + random_state=0, + multi_class="multinomial", + ) + + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[0], coefs[1], decimal=1) + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[0], coefs[2], decimal=1) + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[1], coefs[2], decimal=1) + + +@pytest.mark.parametrize( + "est", + [ + LogisticRegression(random_state=0, max_iter=500), + LogisticRegressionCV(random_state=0, cv=3, Cs=3, tol=1e-3, max_iter=500), + ], + ids=lambda x: x.__class__.__name__, +) +@pytest.mark.parametrize("solver", SOLVERS) +def test_logistic_regression_multi_class_auto(est, solver): + # check multi_class='auto' => multi_class='ovr' + # iff binary y or liblinear or newton-cholesky + + def fit(X, y, **kw): + return clone(est).set_params(**kw).fit(X, y) + + scaled_data = scale(iris.data) + X = scaled_data[::10] + X2 = scaled_data[1::10] + y_multi = iris.target[::10] + y_bin = y_multi == 0 + est_auto_bin = fit(X, y_bin, multi_class="auto", solver=solver) + est_ovr_bin = fit(X, y_bin, multi_class="ovr", solver=solver) + assert_allclose(est_auto_bin.coef_, est_ovr_bin.coef_) + assert_allclose(est_auto_bin.predict_proba(X2), est_ovr_bin.predict_proba(X2)) + + est_auto_multi = fit(X, y_multi, multi_class="auto", solver=solver) + if solver in ("liblinear", "newton-cholesky"): + est_ovr_multi = fit(X, y_multi, multi_class="ovr", solver=solver) + assert_allclose(est_auto_multi.coef_, est_ovr_multi.coef_) + assert_allclose( + est_auto_multi.predict_proba(X2), est_ovr_multi.predict_proba(X2) + ) + else: + est_multi_multi = fit(X, y_multi, multi_class="multinomial", solver=solver) + assert_allclose(est_auto_multi.coef_, est_multi_multi.coef_) + assert_allclose( + est_auto_multi.predict_proba(X2), est_multi_multi.predict_proba(X2) + ) + + # Make sure multi_class='ovr' is distinct from ='multinomial' + assert not np.allclose( + est_auto_bin.coef_, + fit(X, y_bin, multi_class="multinomial", solver=solver).coef_, + ) + assert not np.allclose( + est_auto_bin.coef_, + fit(X, y_multi, multi_class="multinomial", solver=solver).coef_, + ) + + +@pytest.mark.parametrize("solver", sorted(set(SOLVERS) - set(["liblinear"]))) +def test_penalty_none(solver): + # - Make sure warning is raised if penalty=None and C is set to a + # non-default value. + # - Make sure setting penalty=None is equivalent to setting C=np.inf with + # l2 penalty. + X, y = make_classification(n_samples=1000, n_redundant=0, random_state=0) + + msg = "Setting penalty=None will ignore the C" + lr = LogisticRegression(penalty=None, solver=solver, C=4) + with pytest.warns(UserWarning, match=msg): + lr.fit(X, y) + + lr_none = LogisticRegression(penalty=None, solver=solver, random_state=0) + lr_l2_C_inf = LogisticRegression( + penalty="l2", C=np.inf, solver=solver, random_state=0 + ) + pred_none = lr_none.fit(X, y).predict(X) + pred_l2_C_inf = lr_l2_C_inf.fit(X, y).predict(X) + assert_array_equal(pred_none, pred_l2_C_inf) + + +@pytest.mark.parametrize( + "params", + [ + {"penalty": "l1", "dual": False, "tol": 1e-6, "max_iter": 1000}, + {"penalty": "l2", "dual": True, "tol": 1e-12, "max_iter": 1000}, + {"penalty": "l2", "dual": False, "tol": 1e-12, "max_iter": 1000}, + ], +) +def test_logisticregression_liblinear_sample_weight(params): + # check that we support sample_weight with liblinear in all possible cases: + # l1-primal, l2-primal, l2-dual + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.dtype("float"), + ) + y = np.array( + [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype("int") + ) + + X2 = np.vstack([X, X]) + y2 = np.hstack([y, 3 - y]) + sample_weight = np.ones(shape=len(y) * 2) + sample_weight[len(y) :] = 0 + X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0) + + base_clf = LogisticRegression(solver="liblinear", random_state=42) + base_clf.set_params(**params) + clf_no_weight = clone(base_clf).fit(X, y) + clf_with_weight = clone(base_clf).fit(X2, y2, sample_weight=sample_weight) + + for method in ("predict", "predict_proba", "decision_function"): + X_clf_no_weight = getattr(clf_no_weight, method)(X) + X_clf_with_weight = getattr(clf_with_weight, method)(X) + assert_allclose(X_clf_no_weight, X_clf_with_weight) + + +def test_scores_attribute_layout_elasticnet(): + # Non regression test for issue #14955. + # when penalty is elastic net the scores_ attribute has shape + # (n_classes, n_Cs, n_l1_ratios) + # We here make sure that the second dimension indeed corresponds to Cs and + # the third dimension corresponds to l1_ratios. + + X, y = make_classification(n_samples=1000, random_state=0) + cv = StratifiedKFold(n_splits=5) + + l1_ratios = [0.1, 0.9] + Cs = [0.1, 1, 10] + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + solver="saga", + l1_ratios=l1_ratios, + Cs=Cs, + cv=cv, + random_state=0, + max_iter=250, + tol=1e-3, + ) + lrcv.fit(X, y) + + avg_scores_lrcv = lrcv.scores_[1].mean(axis=0) # average over folds + + for i, C in enumerate(Cs): + for j, l1_ratio in enumerate(l1_ratios): + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + C=C, + l1_ratio=l1_ratio, + random_state=0, + max_iter=250, + tol=1e-3, + ) + + avg_score_lr = cross_val_score(lr, X, y, cv=cv).mean() + assert avg_scores_lrcv[i, j] == pytest.approx(avg_score_lr) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_multinomial_identifiability_on_iris(fit_intercept): + """Test that the multinomial classification is identifiable. + + A multinomial with c classes can be modeled with + probability_k = exp(X@coef_k) / sum(exp(X@coef_l), l=1..c) for k=1..c. + This is not identifiable, unless one chooses a further constraint. + According to [1], the maximum of the L2 penalized likelihood automatically + satisfies the symmetric constraint: + sum(coef_k, k=1..c) = 0 + + Further details can be found in [2]. + + Reference + --------- + .. [1] :doi:`Zhu, Ji and Trevor J. Hastie. "Classification of gene microarrays by + penalized logistic regression". Biostatistics 5 3 (2004): 427-43. + <10.1093/biostatistics/kxg046>` + + .. [2] :arxiv:`Noah Simon and Jerome Friedman and Trevor Hastie. (2013) + "A Blockwise Descent Algorithm for Group-penalized Multiresponse and + Multinomial Regression". <1311.6529>` + """ + # Test logistic regression with the iris dataset + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + clf = LogisticRegression( + C=len(iris.data), + solver="lbfgs", + multi_class="multinomial", + fit_intercept=fit_intercept, + ) + # Scaling X to ease convergence. + X_scaled = scale(iris.data) + clf.fit(X_scaled, target) + + # axis=0 is sum over classes + assert_allclose(clf.coef_.sum(axis=0), 0, atol=1e-10) + if fit_intercept: + clf.intercept_.sum(axis=0) == pytest.approx(0, abs=1e-15) + + +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial", "auto"]) +@pytest.mark.parametrize("class_weight", [{0: 1.0, 1: 10.0, 2: 1.0}, "balanced"]) +def test_sample_weight_not_modified(multi_class, class_weight): + X, y = load_iris(return_X_y=True) + n_features = len(X) + W = np.ones(n_features) + W[: n_features // 2] = 2 + + expected = W.copy() + + clf = LogisticRegression( + random_state=0, class_weight=class_weight, max_iter=200, multi_class=multi_class + ) + clf.fit(X, y, sample_weight=W) + assert_allclose(expected, W) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_large_sparse_matrix(solver, global_random_seed, csr_container): + # Solvers either accept large sparse matrices, or raise helpful error. + # Non-regression test for pull-request #21093. + + # generate sparse matrix with int64 indices + X = csr_container(sparse.rand(20, 10, random_state=global_random_seed)) + for attr in ["indices", "indptr"]: + setattr(X, attr, getattr(X, attr).astype("int64")) + rng = np.random.RandomState(global_random_seed) + y = rng.randint(2, size=X.shape[0]) + + if solver in ["liblinear", "sag", "saga"]: + msg = "Only sparse matrices with 32-bit integer indices" + with pytest.raises(ValueError, match=msg): + LogisticRegression(solver=solver).fit(X, y) + else: + LogisticRegression(solver=solver).fit(X, y) + + +def test_single_feature_newton_cg(): + # Test that Newton-CG works with a single feature and intercept. + # Non-regression test for issue #23605. + + X = np.array([[0.5, 0.65, 1.1, 1.25, 0.8, 0.54, 0.95, 0.7]]).T + y = np.array([1, 1, 0, 0, 1, 1, 0, 1]) + assert X.shape[1] == 1 + LogisticRegression(solver="newton-cg", fit_intercept=True).fit(X, y) + + +def test_liblinear_not_stuck(): + # Non-regression https://github.com/scikit-learn/scikit-learn/issues/18264 + X = iris.data.copy() + y = iris.target.copy() + X = X[y != 2] + y = y[y != 2] + X_prep = StandardScaler().fit_transform(X) + + C = l1_min_c(X, y, loss="log") * 10 ** (10 / 29) + clf = LogisticRegression( + penalty="l1", + solver="liblinear", + tol=1e-6, + max_iter=100, + intercept_scaling=10000.0, + random_state=0, + C=C, + ) + + # test that the fit does not raise a ConvergenceWarning + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + clf.fit(X_prep, y) + + +@pytest.mark.usefixtures("enable_slep006") +def test_lr_cv_scores_differ_when_sample_weight_is_requested(): + """Test that `sample_weight` is correctly passed to the scorer in + `LogisticRegressionCV.fit` and `LogisticRegressionCV.score` by + checking the difference in scores with the case when `sample_weight` + is not requested. + """ + rng = np.random.RandomState(10) + X, y = make_classification(n_samples=10, random_state=rng) + X_t, y_t = make_classification(n_samples=10, random_state=rng) + sample_weight = np.ones(len(y)) + sample_weight[: len(y) // 2] = 2 + kwargs = {"sample_weight": sample_weight} + + scorer1 = get_scorer("accuracy") + lr_cv1 = LogisticRegressionCV(scoring=scorer1) + lr_cv1.fit(X, y, **kwargs) + + scorer2 = get_scorer("accuracy") + scorer2.set_score_request(sample_weight=True) + lr_cv2 = LogisticRegressionCV(scoring=scorer2) + lr_cv2.fit(X, y, **kwargs) + + assert not np.allclose(lr_cv1.scores_[1], lr_cv2.scores_[1]) + + score_1 = lr_cv1.score(X_t, y_t, **kwargs) + score_2 = lr_cv2.score(X_t, y_t, **kwargs) + + assert not np.allclose(score_1, score_2) + + +def test_lr_cv_scores_without_enabling_metadata_routing(): + """Test that `sample_weight` is passed correctly to the scorer in + `LogisticRegressionCV.fit` and `LogisticRegressionCV.score` even + when `enable_metadata_routing=False` + """ + rng = np.random.RandomState(10) + X, y = make_classification(n_samples=10, random_state=rng) + X_t, y_t = make_classification(n_samples=10, random_state=rng) + sample_weight = np.ones(len(y)) + sample_weight[: len(y) // 2] = 2 + kwargs = {"sample_weight": sample_weight} + + with config_context(enable_metadata_routing=False): + scorer1 = get_scorer("accuracy") + lr_cv1 = LogisticRegressionCV(scoring=scorer1) + lr_cv1.fit(X, y, **kwargs) + score_1 = lr_cv1.score(X_t, y_t, **kwargs) + + with config_context(enable_metadata_routing=True): + scorer2 = get_scorer("accuracy") + scorer2.set_score_request(sample_weight=True) + lr_cv2 = LogisticRegressionCV(scoring=scorer2) + lr_cv2.fit(X, y, **kwargs) + score_2 = lr_cv2.score(X_t, y_t, **kwargs) + + assert_allclose(lr_cv1.scores_[1], lr_cv2.scores_[1]) + assert_allclose(score_1, score_2) + + +@pytest.mark.parametrize("solver", SOLVERS) +def test_zero_max_iter(solver): + # Make sure we can inspect the state of LogisticRegression right after + # initialization (before the first weight update). + X, y = load_iris(return_X_y=True) + y = y == 2 + with ignore_warnings(category=ConvergenceWarning): + clf = LogisticRegression(solver=solver, max_iter=0).fit(X, y) + if solver not in ["saga", "sag"]: + # XXX: sag and saga have n_iter_ = [1]... + assert clf.n_iter_ == 0 + + if solver != "lbfgs": + # XXX: lbfgs has already started to update the coefficients... + assert_allclose(clf.coef_, np.zeros_like(clf.coef_)) + assert_allclose( + clf.decision_function(X), + np.full(shape=X.shape[0], fill_value=clf.intercept_), + ) + assert_allclose( + clf.predict_proba(X), + np.full(shape=(X.shape[0], 2), fill_value=0.5), + ) + assert clf.score(X, y) < 0.7 + + +def test_passing_params_without_enabling_metadata_routing(): + """Test that the right error message is raised when metadata params + are passed while not supported when `enable_metadata_routing=False`.""" + X, y = make_classification(n_samples=10, random_state=0) + lr_cv = LogisticRegressionCV() + msg = "is only supported if enable_metadata_routing=True" + + with config_context(enable_metadata_routing=False): + params = {"extra_param": 1.0} + + with pytest.raises(ValueError, match=msg): + lr_cv.fit(X, y, **params) + + with pytest.raises(ValueError, match=msg): + lr_cv.score(X, y, **params) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py new file mode 100644 index 0000000000000000000000000000000000000000..7f4354fc803d24c2396f5105a5a4ce52c0a3e9fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py @@ -0,0 +1,262 @@ +# Author: Vlad Niculae +# License: BSD 3 clause + + +import numpy as np +import pytest + +from sklearn.datasets import make_sparse_coded_signal +from sklearn.linear_model import ( + LinearRegression, + OrthogonalMatchingPursuit, + OrthogonalMatchingPursuitCV, + orthogonal_mp, + orthogonal_mp_gram, +) +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) + +n_samples, n_features, n_nonzero_coefs, n_targets = 25, 35, 5, 3 +y, X, gamma = make_sparse_coded_signal( + n_samples=n_targets, + n_components=n_features, + n_features=n_samples, + n_nonzero_coefs=n_nonzero_coefs, + random_state=0, +) +y, X, gamma = y.T, X.T, gamma.T +# Make X not of norm 1 for testing +X *= 10 +y *= 10 +G, Xy = np.dot(X.T, X), np.dot(X.T, y) +# this makes X (n_samples, n_features) +# and y (n_samples, 3) + + +def test_correct_shapes(): + assert orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape == (n_features,) + assert orthogonal_mp(X, y, n_nonzero_coefs=5).shape == (n_features, 3) + + +def test_correct_shapes_gram(): + assert orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape == (n_features,) + assert orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape == (n_features, 3) + + +def test_n_nonzero_coefs(): + assert np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5)) <= 5 + assert ( + np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5, precompute=True)) + <= 5 + ) + + +def test_tol(): + tol = 0.5 + gamma = orthogonal_mp(X, y[:, 0], tol=tol) + gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True) + assert np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol + assert np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol + + +def test_with_without_gram(): + assert_array_almost_equal( + orthogonal_mp(X, y, n_nonzero_coefs=5), + orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True), + ) + + +def test_with_without_gram_tol(): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=1.0), orthogonal_mp(X, y, tol=1.0, precompute=True) + ) + + +def test_unreachable_accuracy(): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=0), orthogonal_mp(X, y, n_nonzero_coefs=n_features) + ) + warning_message = ( + "Orthogonal matching pursuit ended prematurely " + "due to linear dependence in the dictionary. " + "The requested precision might not have been met." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=0, precompute=True), + orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_features), + ) + + +@pytest.mark.parametrize("positional_params", [(X, y), (G, Xy)]) +@pytest.mark.parametrize( + "keyword_params", + [{"n_nonzero_coefs": n_features + 1}], +) +def test_bad_input(positional_params, keyword_params): + with pytest.raises(ValueError): + orthogonal_mp(*positional_params, **keyword_params) + + +def test_perfect_signal_recovery(): + (idx,) = gamma[:, 0].nonzero() + gamma_rec = orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5) + gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5) + assert_array_equal(idx, np.flatnonzero(gamma_rec)) + assert_array_equal(idx, np.flatnonzero(gamma_gram)) + assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2) + assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2) + + +def test_orthogonal_mp_gram_readonly(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/5956 + (idx,) = gamma[:, 0].nonzero() + G_readonly = G.copy() + G_readonly.setflags(write=False) + Xy_readonly = Xy.copy() + Xy_readonly.setflags(write=False) + gamma_gram = orthogonal_mp_gram( + G_readonly, Xy_readonly[:, 0], n_nonzero_coefs=5, copy_Gram=False, copy_Xy=False + ) + assert_array_equal(idx, np.flatnonzero(gamma_gram)) + assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2) + + +def test_estimator(): + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs) + omp.fit(X, y[:, 0]) + assert omp.coef_.shape == (n_features,) + assert omp.intercept_.shape == () + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs + + omp.fit(X, y) + assert omp.coef_.shape == (n_targets, n_features) + assert omp.intercept_.shape == (n_targets,) + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs + + coef_normalized = omp.coef_[0].copy() + omp.set_params(fit_intercept=True) + omp.fit(X, y[:, 0]) + assert_array_almost_equal(coef_normalized, omp.coef_) + + omp.set_params(fit_intercept=False) + omp.fit(X, y[:, 0]) + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs + assert omp.coef_.shape == (n_features,) + assert omp.intercept_ == 0 + + omp.fit(X, y) + assert omp.coef_.shape == (n_targets, n_features) + assert omp.intercept_ == 0 + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs + + +def test_identical_regressors(): + newX = X.copy() + newX[:, 1] = newX[:, 0] + gamma = np.zeros(n_features) + gamma[0] = gamma[1] = 1.0 + newy = np.dot(newX, gamma) + warning_message = ( + "Orthogonal matching pursuit ended prematurely " + "due to linear dependence in the dictionary. " + "The requested precision might not have been met." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + orthogonal_mp(newX, newy, n_nonzero_coefs=2) + + +def test_swapped_regressors(): + gamma = np.zeros(n_features) + # X[:, 21] should be selected first, then X[:, 0] selected second, + # which will take X[:, 21]'s place in case the algorithm does + # column swapping for optimization (which is the case at the moment) + gamma[21] = 1.0 + gamma[0] = 0.5 + new_y = np.dot(X, gamma) + new_Xy = np.dot(X.T, new_y) + gamma_hat = orthogonal_mp(X, new_y, n_nonzero_coefs=2) + gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, n_nonzero_coefs=2) + assert_array_equal(np.flatnonzero(gamma_hat), [0, 21]) + assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21]) + + +def test_no_atoms(): + y_empty = np.zeros_like(y) + Xy_empty = np.dot(X.T, y_empty) + gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, n_nonzero_coefs=1) + gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, n_nonzero_coefs=1) + assert np.all(gamma_empty == 0) + assert np.all(gamma_empty_gram == 0) + + +def test_omp_path(): + path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True) + last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True) + last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + + +def test_omp_return_path_prop_with_gram(): + path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True, precompute=True) + last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False, precompute=True) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + + +def test_omp_cv(): + y_ = y[:, 0] + gamma_ = gamma[:, 0] + ompcv = OrthogonalMatchingPursuitCV(fit_intercept=False, max_iter=10) + ompcv.fit(X, y_) + assert ompcv.n_nonzero_coefs_ == n_nonzero_coefs + assert_array_almost_equal(ompcv.coef_, gamma_) + omp = OrthogonalMatchingPursuit( + fit_intercept=False, n_nonzero_coefs=ompcv.n_nonzero_coefs_ + ) + omp.fit(X, y_) + assert_array_almost_equal(ompcv.coef_, omp.coef_) + + +def test_omp_reaches_least_squares(): + # Use small simple data; it's a sanity check but OMP can stop early + rng = check_random_state(0) + n_samples, n_features = (10, 8) + n_targets = 3 + X = rng.randn(n_samples, n_features) + Y = rng.randn(n_samples, n_targets) + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features) + lstsq = LinearRegression() + omp.fit(X, Y) + lstsq.fit(X, Y) + assert_array_almost_equal(omp.coef_, lstsq.coef_) + + +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +def test_omp_gram_dtype_match(data_type): + # verify matching input data type and output data type + coef = orthogonal_mp_gram( + G.astype(data_type), Xy.astype(data_type), n_nonzero_coefs=5 + ) + assert coef.dtype == data_type + + +def test_omp_gram_numerical_consistency(): + # verify numericaly consistency among np.float32 and np.float64 + coef_32 = orthogonal_mp_gram( + G.astype(np.float32), Xy.astype(np.float32), n_nonzero_coefs=5 + ) + coef_64 = orthogonal_mp_gram( + G.astype(np.float32), Xy.astype(np.float64), n_nonzero_coefs=5 + ) + assert_allclose(coef_32, coef_64) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_passive_aggressive.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_passive_aggressive.py new file mode 100644 index 0000000000000000000000000000000000000000..bcfd58b1eab2b51ecd8cc1097bd48577e2babe0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_passive_aggressive.py @@ -0,0 +1,268 @@ +import numpy as np +import pytest + +from sklearn.base import ClassifierMixin +from sklearn.datasets import load_iris +from sklearn.linear_model import PassiveAggressiveClassifier, PassiveAggressiveRegressor +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +iris = load_iris() +random_state = check_random_state(12) +indices = np.arange(iris.data.shape[0]) +random_state.shuffle(indices) +X = iris.data[indices] +y = iris.target[indices] + + +class MyPassiveAggressive(ClassifierMixin): + def __init__( + self, + C=1.0, + epsilon=0.01, + loss="hinge", + fit_intercept=True, + n_iter=1, + random_state=None, + ): + self.C = C + self.epsilon = epsilon + self.loss = loss + self.fit_intercept = fit_intercept + self.n_iter = n_iter + + def fit(self, X, y): + n_samples, n_features = X.shape + self.w = np.zeros(n_features, dtype=np.float64) + self.b = 0.0 + + for t in range(self.n_iter): + for i in range(n_samples): + p = self.project(X[i]) + if self.loss in ("hinge", "squared_hinge"): + loss = max(1 - y[i] * p, 0) + else: + loss = max(np.abs(p - y[i]) - self.epsilon, 0) + + sqnorm = np.dot(X[i], X[i]) + + if self.loss in ("hinge", "epsilon_insensitive"): + step = min(self.C, loss / sqnorm) + elif self.loss in ("squared_hinge", "squared_epsilon_insensitive"): + step = loss / (sqnorm + 1.0 / (2 * self.C)) + + if self.loss in ("hinge", "squared_hinge"): + step *= y[i] + else: + step *= np.sign(y[i] - p) + + self.w += step * X[i] + if self.fit_intercept: + self.b += step + + def project(self, X): + return np.dot(X, self.w) + self.b + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_classifier_accuracy(csr_container, fit_intercept, average): + data = csr_container(X) if csr_container is not None else X + clf = PassiveAggressiveClassifier( + C=1.0, + max_iter=30, + fit_intercept=fit_intercept, + random_state=1, + average=average, + tol=None, + ) + clf.fit(data, y) + score = clf.score(data, y) + assert score > 0.79 + if average: + assert hasattr(clf, "_average_coef") + assert hasattr(clf, "_average_intercept") + assert hasattr(clf, "_standard_intercept") + assert hasattr(clf, "_standard_coef") + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_classifier_partial_fit(csr_container, average): + classes = np.unique(y) + data = csr_container(X) if csr_container is not None else X + clf = PassiveAggressiveClassifier(random_state=0, average=average, max_iter=5) + for t in range(30): + clf.partial_fit(data, y, classes) + score = clf.score(data, y) + assert score > 0.79 + if average: + assert hasattr(clf, "_average_coef") + assert hasattr(clf, "_average_intercept") + assert hasattr(clf, "_standard_intercept") + assert hasattr(clf, "_standard_coef") + + +def test_classifier_refit(): + # Classifier can be retrained on different labels and features. + clf = PassiveAggressiveClassifier(max_iter=5).fit(X, y) + assert_array_equal(clf.classes_, np.unique(y)) + + clf.fit(X[:, :-1], iris.target_names[y]) + assert_array_equal(clf.classes_, iris.target_names) + + +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +@pytest.mark.parametrize("loss", ("hinge", "squared_hinge")) +def test_classifier_correctness(loss, csr_container): + y_bin = y.copy() + y_bin[y != 1] = -1 + + clf1 = MyPassiveAggressive(loss=loss, n_iter=2) + clf1.fit(X, y_bin) + + data = csr_container(X) if csr_container is not None else X + clf2 = PassiveAggressiveClassifier(loss=loss, max_iter=2, shuffle=False, tol=None) + clf2.fit(data, y_bin) + + assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2) + + +@pytest.mark.parametrize( + "response_method", ["predict_proba", "predict_log_proba", "transform"] +) +def test_classifier_undefined_methods(response_method): + clf = PassiveAggressiveClassifier(max_iter=100) + with pytest.raises(AttributeError): + getattr(clf, response_method) + + +def test_class_weights(): + # Test class weights. + X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y2 = [1, 1, 1, -1, -1] + + clf = PassiveAggressiveClassifier( + C=0.1, max_iter=100, class_weight=None, random_state=100 + ) + clf.fit(X2, y2) + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) + + # we give a small weights to class 1 + clf = PassiveAggressiveClassifier( + C=0.1, max_iter=100, class_weight={1: 0.001}, random_state=100 + ) + clf.fit(X2, y2) + + # now the hyperplane should rotate clock-wise and + # the prediction on this point should shift + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) + + +def test_partial_fit_weight_class_balanced(): + # partial_fit with class_weight='balanced' not supported + clf = PassiveAggressiveClassifier(class_weight="balanced", max_iter=100) + with pytest.raises(ValueError): + clf.partial_fit(X, y, classes=np.unique(y)) + + +def test_equal_class_weight(): + X2 = [[1, 0], [1, 0], [0, 1], [0, 1]] + y2 = [0, 0, 1, 1] + clf = PassiveAggressiveClassifier(C=0.1, tol=None, class_weight=None) + clf.fit(X2, y2) + + # Already balanced, so "balanced" weights should have no effect + clf_balanced = PassiveAggressiveClassifier(C=0.1, tol=None, class_weight="balanced") + clf_balanced.fit(X2, y2) + + clf_weighted = PassiveAggressiveClassifier( + C=0.1, tol=None, class_weight={0: 0.5, 1: 0.5} + ) + clf_weighted.fit(X2, y2) + + # should be similar up to some epsilon due to learning rate schedule + assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2) + assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2) + + +def test_wrong_class_weight_label(): + # ValueError due to wrong class_weight label. + X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y2 = [1, 1, 1, -1, -1] + + clf = PassiveAggressiveClassifier(class_weight={0: 0.5}, max_iter=100) + with pytest.raises(ValueError): + clf.fit(X2, y2) + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_regressor_mse(csr_container, fit_intercept, average): + y_bin = y.copy() + y_bin[y != 1] = -1 + + data = csr_container(X) if csr_container is not None else X + reg = PassiveAggressiveRegressor( + C=1.0, + fit_intercept=fit_intercept, + random_state=0, + average=average, + max_iter=5, + ) + reg.fit(data, y_bin) + pred = reg.predict(data) + assert np.mean((pred - y_bin) ** 2) < 1.7 + if average: + assert hasattr(reg, "_average_coef") + assert hasattr(reg, "_average_intercept") + assert hasattr(reg, "_standard_intercept") + assert hasattr(reg, "_standard_coef") + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_regressor_partial_fit(csr_container, average): + y_bin = y.copy() + y_bin[y != 1] = -1 + + data = csr_container(X) if csr_container is not None else X + reg = PassiveAggressiveRegressor(random_state=0, average=average, max_iter=100) + for t in range(50): + reg.partial_fit(data, y_bin) + pred = reg.predict(data) + assert np.mean((pred - y_bin) ** 2) < 1.7 + if average: + assert hasattr(reg, "_average_coef") + assert hasattr(reg, "_average_intercept") + assert hasattr(reg, "_standard_intercept") + assert hasattr(reg, "_standard_coef") + + +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +@pytest.mark.parametrize("loss", ("epsilon_insensitive", "squared_epsilon_insensitive")) +def test_regressor_correctness(loss, csr_container): + y_bin = y.copy() + y_bin[y != 1] = -1 + + reg1 = MyPassiveAggressive(loss=loss, n_iter=2) + reg1.fit(X, y_bin) + + data = csr_container(X) if csr_container is not None else X + reg2 = PassiveAggressiveRegressor(tol=None, loss=loss, max_iter=2, shuffle=False) + reg2.fit(data, y_bin) + + assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2) + + +def test_regressor_undefined_methods(): + reg = PassiveAggressiveRegressor(max_iter=100) + with pytest.raises(AttributeError): + reg.transform(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_perceptron.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..71456ae72132ccebc76da96aea9213fd55f47c9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_perceptron.py @@ -0,0 +1,88 @@ +import numpy as np +import pytest + +from sklearn.datasets import load_iris +from sklearn.linear_model import Perceptron +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose, assert_array_almost_equal +from sklearn.utils.fixes import CSR_CONTAINERS + +iris = load_iris() +random_state = check_random_state(12) +indices = np.arange(iris.data.shape[0]) +random_state.shuffle(indices) +X = iris.data[indices] +y = iris.target[indices] + + +class MyPerceptron: + def __init__(self, n_iter=1): + self.n_iter = n_iter + + def fit(self, X, y): + n_samples, n_features = X.shape + self.w = np.zeros(n_features, dtype=np.float64) + self.b = 0.0 + + for t in range(self.n_iter): + for i in range(n_samples): + if self.predict(X[i])[0] != y[i]: + self.w += y[i] * X[i] + self.b += y[i] + + def project(self, X): + return np.dot(X, self.w) + self.b + + def predict(self, X): + X = np.atleast_2d(X) + return np.sign(self.project(X)) + + +@pytest.mark.parametrize("container", CSR_CONTAINERS + [np.array]) +def test_perceptron_accuracy(container): + data = container(X) + clf = Perceptron(max_iter=100, tol=None, shuffle=False) + clf.fit(data, y) + score = clf.score(data, y) + assert score > 0.7 + + +def test_perceptron_correctness(): + y_bin = y.copy() + y_bin[y != 1] = -1 + + clf1 = MyPerceptron(n_iter=2) + clf1.fit(X, y_bin) + + clf2 = Perceptron(max_iter=2, shuffle=False, tol=None) + clf2.fit(X, y_bin) + + assert_array_almost_equal(clf1.w, clf2.coef_.ravel()) + + +def test_undefined_methods(): + clf = Perceptron(max_iter=100) + for meth in ("predict_proba", "predict_log_proba"): + with pytest.raises(AttributeError): + getattr(clf, meth) + + +def test_perceptron_l1_ratio(): + """Check that `l1_ratio` has an impact when `penalty='elasticnet'`""" + clf1 = Perceptron(l1_ratio=0, penalty="elasticnet") + clf1.fit(X, y) + + clf2 = Perceptron(l1_ratio=0.15, penalty="elasticnet") + clf2.fit(X, y) + + assert clf1.score(X, y) != clf2.score(X, y) + + # check that the bounds of elastic net which should correspond to an l1 or + # l2 penalty depending of `l1_ratio` value. + clf_l1 = Perceptron(penalty="l1").fit(X, y) + clf_elasticnet = Perceptron(l1_ratio=1, penalty="elasticnet").fit(X, y) + assert_allclose(clf_l1.coef_, clf_elasticnet.coef_) + + clf_l2 = Perceptron(penalty="l2").fit(X, y) + clf_elasticnet = Perceptron(l1_ratio=0, penalty="elasticnet").fit(X, y) + assert_allclose(clf_l2.coef_, clf_elasticnet.coef_) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ransac.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ransac.py new file mode 100644 index 0000000000000000000000000000000000000000..b442f6b207e708c7f7b2b989afd0c34ff492eddf --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_ransac.py @@ -0,0 +1,545 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal + +from sklearn.datasets import make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + LinearRegression, + OrthogonalMatchingPursuit, + RANSACRegressor, + Ridge, +) +from sklearn.linear_model._ransac import _dynamic_max_trials +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +# Generate coordinates of line +X = np.arange(-200, 200) +y = 0.2 * X + 20 +data = np.column_stack([X, y]) + +# Add some faulty data +rng = np.random.RandomState(1000) +outliers = np.unique(rng.randint(len(X), size=200)) +data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10 + +X = data[:, 0][:, np.newaxis] +y = data[:, 1] + + +def test_ransac_inliers_outliers(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + + # Estimate parameters of corrupted data + ransac_estimator.fit(X, y) + + # Ground truth / reference inlier mask + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_is_data_valid(): + def is_data_valid(X, y): + assert X.shape[0] == 2 + assert y.shape[0] == 2 + return False + + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + y = rng.rand(10, 1) + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + is_data_valid=is_data_valid, + random_state=0, + ) + with pytest.raises(ValueError): + ransac_estimator.fit(X, y) + + +def test_ransac_is_model_valid(): + def is_model_valid(estimator, X, y): + assert X.shape[0] == 2 + assert y.shape[0] == 2 + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + is_model_valid=is_model_valid, + random_state=0, + ) + with pytest.raises(ValueError): + ransac_estimator.fit(X, y) + + +def test_ransac_max_trials(): + estimator = LinearRegression() + + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + max_trials=0, + random_state=0, + ) + with pytest.raises(ValueError): + ransac_estimator.fit(X, y) + + # there is a 1e-9 chance it will take these many trials. No good reason + # 1e-2 isn't enough, can still happen + # 2 is the what ransac defines as min_samples = X.shape[1] + 1 + max_trials = _dynamic_max_trials(len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9) + ransac_estimator = RANSACRegressor(estimator, min_samples=2) + for i in range(50): + ransac_estimator.set_params(min_samples=2, random_state=i) + ransac_estimator.fit(X, y) + assert ransac_estimator.n_trials_ < max_trials + 1 + + +def test_ransac_stop_n_inliers(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + stop_n_inliers=2, + random_state=0, + ) + ransac_estimator.fit(X, y) + + assert ransac_estimator.n_trials_ == 1 + + +def test_ransac_stop_score(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + stop_score=0, + random_state=0, + ) + ransac_estimator.fit(X, y) + + assert ransac_estimator.n_trials_ == 1 + + +def test_ransac_score(): + X = np.arange(100)[:, None] + y = np.zeros((100,)) + y[0] = 1 + y[1] = 100 + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=0.5, random_state=0 + ) + ransac_estimator.fit(X, y) + + assert ransac_estimator.score(X[2:], y[2:]) == 1 + assert ransac_estimator.score(X[:2], y[:2]) < 1 + + +def test_ransac_predict(): + X = np.arange(100)[:, None] + y = np.zeros((100,)) + y[0] = 1 + y[1] = 100 + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=0.5, random_state=0 + ) + ransac_estimator.fit(X, y) + + assert_array_equal(ransac_estimator.predict(X), np.zeros(100)) + + +def test_ransac_no_valid_data(): + def is_data_valid(X, y): + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_data_valid=is_data_valid, max_trials=5 + ) + + msg = "RANSAC could not find a valid consensus set" + with pytest.raises(ValueError, match=msg): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 5 + assert ransac_estimator.n_skips_invalid_model_ == 0 + + +def test_ransac_no_valid_model(): + def is_model_valid(estimator, X, y): + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_model_valid=is_model_valid, max_trials=5 + ) + + msg = "RANSAC could not find a valid consensus set" + with pytest.raises(ValueError, match=msg): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 0 + assert ransac_estimator.n_skips_invalid_model_ == 5 + + +def test_ransac_exceed_max_skips(): + def is_data_valid(X, y): + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_data_valid=is_data_valid, max_trials=5, max_skips=3 + ) + + msg = "RANSAC skipped more iterations than `max_skips`" + with pytest.raises(ValueError, match=msg): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 4 + assert ransac_estimator.n_skips_invalid_model_ == 0 + + +def test_ransac_warn_exceed_max_skips(): + global cause_skip + cause_skip = False + + def is_data_valid(X, y): + global cause_skip + if not cause_skip: + cause_skip = True + return True + else: + return False + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, is_data_valid=is_data_valid, max_skips=3, max_trials=5 + ) + warning_message = ( + "RANSAC found a valid consensus set but exited " + "early due to skipping more iterations than " + "`max_skips`. See estimator attributes for " + "diagnostics." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + ransac_estimator.fit(X, y) + assert ransac_estimator.n_skips_no_inliers_ == 0 + assert ransac_estimator.n_skips_invalid_data_ == 4 + assert ransac_estimator.n_skips_invalid_model_ == 0 + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSR_CONTAINERS + CSC_CONTAINERS +) +def test_ransac_sparse(sparse_container): + X_sparse = sparse_container(X) + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator.fit(X_sparse, y) + + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_none_estimator(): + estimator = LinearRegression() + + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_none_estimator = RANSACRegressor( + None, min_samples=2, residual_threshold=5, random_state=0 + ) + + ransac_estimator.fit(X, y) + ransac_none_estimator.fit(X, y) + + assert_array_almost_equal( + ransac_estimator.predict(X), ransac_none_estimator.predict(X) + ) + + +def test_ransac_min_n_samples(): + estimator = LinearRegression() + ransac_estimator1 = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator2 = RANSACRegressor( + estimator, + min_samples=2.0 / X.shape[0], + residual_threshold=5, + random_state=0, + ) + ransac_estimator5 = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator6 = RANSACRegressor(estimator, residual_threshold=5, random_state=0) + ransac_estimator7 = RANSACRegressor( + estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0 + ) + # GH #19390 + ransac_estimator8 = RANSACRegressor( + Ridge(), min_samples=None, residual_threshold=5, random_state=0 + ) + + ransac_estimator1.fit(X, y) + ransac_estimator2.fit(X, y) + ransac_estimator5.fit(X, y) + ransac_estimator6.fit(X, y) + + assert_array_almost_equal( + ransac_estimator1.predict(X), ransac_estimator2.predict(X) + ) + assert_array_almost_equal( + ransac_estimator1.predict(X), ransac_estimator5.predict(X) + ) + assert_array_almost_equal( + ransac_estimator1.predict(X), ransac_estimator6.predict(X) + ) + + with pytest.raises(ValueError): + ransac_estimator7.fit(X, y) + + err_msg = "`min_samples` needs to be explicitly set" + with pytest.raises(ValueError, match=err_msg): + ransac_estimator8.fit(X, y) + + +def test_ransac_multi_dimensional_targets(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + + # 3-D target values + yyy = np.column_stack([y, y, y]) + + # Estimate parameters of corrupted data + ransac_estimator.fit(X, yyy) + + # Ground truth / reference inlier mask + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_residual_loss(): + def loss_multi1(y_true, y_pred): + return np.sum(np.abs(y_true - y_pred), axis=1) + + def loss_multi2(y_true, y_pred): + return np.sum((y_true - y_pred) ** 2, axis=1) + + def loss_mono(y_true, y_pred): + return np.abs(y_true - y_pred) + + yyy = np.column_stack([y, y, y]) + + estimator = LinearRegression() + ransac_estimator0 = RANSACRegressor( + estimator, min_samples=2, residual_threshold=5, random_state=0 + ) + ransac_estimator1 = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + random_state=0, + loss=loss_multi1, + ) + ransac_estimator2 = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + random_state=0, + loss=loss_multi2, + ) + + # multi-dimensional + ransac_estimator0.fit(X, yyy) + ransac_estimator1.fit(X, yyy) + ransac_estimator2.fit(X, yyy) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator1.predict(X) + ) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator2.predict(X) + ) + + # one-dimensional + ransac_estimator0.fit(X, y) + ransac_estimator2.loss = loss_mono + ransac_estimator2.fit(X, y) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator2.predict(X) + ) + ransac_estimator3 = RANSACRegressor( + estimator, + min_samples=2, + residual_threshold=5, + random_state=0, + loss="squared_error", + ) + ransac_estimator3.fit(X, y) + assert_array_almost_equal( + ransac_estimator0.predict(X), ransac_estimator2.predict(X) + ) + + +def test_ransac_default_residual_threshold(): + estimator = LinearRegression() + ransac_estimator = RANSACRegressor(estimator, min_samples=2, random_state=0) + + # Estimate parameters of corrupted data + ransac_estimator.fit(X, y) + + # Ground truth / reference inlier mask + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + +def test_ransac_dynamic_max_trials(): + # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in + # Hartley, R.~I. and Zisserman, A., 2004, + # Multiple View Geometry in Computer Vision, Second Edition, + # Cambridge University Press, ISBN: 0521540518 + + # e = 0%, min_samples = X + assert _dynamic_max_trials(100, 100, 2, 0.99) == 1 + + # e = 5%, min_samples = 2 + assert _dynamic_max_trials(95, 100, 2, 0.99) == 2 + # e = 10%, min_samples = 2 + assert _dynamic_max_trials(90, 100, 2, 0.99) == 3 + # e = 30%, min_samples = 2 + assert _dynamic_max_trials(70, 100, 2, 0.99) == 7 + # e = 50%, min_samples = 2 + assert _dynamic_max_trials(50, 100, 2, 0.99) == 17 + + # e = 5%, min_samples = 8 + assert _dynamic_max_trials(95, 100, 8, 0.99) == 5 + # e = 10%, min_samples = 8 + assert _dynamic_max_trials(90, 100, 8, 0.99) == 9 + # e = 30%, min_samples = 8 + assert _dynamic_max_trials(70, 100, 8, 0.99) == 78 + # e = 50%, min_samples = 8 + assert _dynamic_max_trials(50, 100, 8, 0.99) == 1177 + + # e = 0%, min_samples = 10 + assert _dynamic_max_trials(1, 100, 10, 0) == 0 + assert _dynamic_max_trials(1, 100, 10, 1) == float("inf") + + +def test_ransac_fit_sample_weight(): + ransac_estimator = RANSACRegressor(random_state=0) + n_samples = y.shape[0] + weights = np.ones(n_samples) + ransac_estimator.fit(X, y, weights) + # sanity check + assert ransac_estimator.inlier_mask_.shape[0] == n_samples + + ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_) + ref_inlier_mask[outliers] = False + # check that mask is correct + assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) + + # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where + # X = X1 repeated n1 times, X2 repeated n2 times and so forth + random_state = check_random_state(0) + X_ = random_state.randint(0, 200, [10, 1]) + y_ = np.ndarray.flatten(0.2 * X_ + 2) + sample_weight = random_state.randint(0, 10, 10) + outlier_X = random_state.randint(0, 1000, [1, 1]) + outlier_weight = random_state.randint(0, 10, 1) + outlier_y = random_state.randint(-1000, 0, 1) + + X_flat = np.append( + np.repeat(X_, sample_weight, axis=0), + np.repeat(outlier_X, outlier_weight, axis=0), + axis=0, + ) + y_flat = np.ndarray.flatten( + np.append( + np.repeat(y_, sample_weight, axis=0), + np.repeat(outlier_y, outlier_weight, axis=0), + axis=0, + ) + ) + ransac_estimator.fit(X_flat, y_flat) + ref_coef_ = ransac_estimator.estimator_.coef_ + + sample_weight = np.append(sample_weight, outlier_weight) + X_ = np.append(X_, outlier_X, axis=0) + y_ = np.append(y_, outlier_y) + ransac_estimator.fit(X_, y_, sample_weight) + + assert_allclose(ransac_estimator.estimator_.coef_, ref_coef_) + + # check that if estimator.fit doesn't support + # sample_weight, raises error + estimator = OrthogonalMatchingPursuit() + ransac_estimator = RANSACRegressor(estimator, min_samples=10) + + err_msg = f"{estimator.__class__.__name__} does not support sample_weight." + with pytest.raises(ValueError, match=err_msg): + ransac_estimator.fit(X, y, weights) + + +def test_ransac_final_model_fit_sample_weight(): + X, y = make_regression(n_samples=1000, random_state=10) + rng = check_random_state(42) + sample_weight = rng.randint(1, 4, size=y.shape[0]) + sample_weight = sample_weight / sample_weight.sum() + ransac = RANSACRegressor(estimator=LinearRegression(), random_state=0) + ransac.fit(X, y, sample_weight=sample_weight) + + final_model = LinearRegression() + mask_samples = ransac.inlier_mask_ + final_model.fit( + X[mask_samples], y[mask_samples], sample_weight=sample_weight[mask_samples] + ) + + assert_allclose(ransac.estimator_.coef_, final_model.coef_, atol=1e-12) + + +def test_perfect_horizontal_line(): + """Check that we can fit a line where all samples are inliers. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19497 + """ + X = np.arange(100)[:, None] + y = np.zeros((100,)) + + estimator = LinearRegression() + ransac_estimator = RANSACRegressor(estimator, random_state=0) + ransac_estimator.fit(X, y) + + assert_allclose(ransac_estimator.estimator_.coef_, 0.0) + assert_allclose(ransac_estimator.estimator_.intercept_, 0.0) diff --git a/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_theil_sen.py b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_theil_sen.py new file mode 100644 index 0000000000000000000000000000000000000000..c8415d02be80aea775334c09c3f845ee4c040886 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/linear_model/tests/test_theil_sen.py @@ -0,0 +1,294 @@ +""" +Testing for Theil-Sen module (sklearn.linear_model.theil_sen) +""" + +# Author: Florian Wilhelm +# License: BSD 3 clause +import os +import re +import sys +from contextlib import contextmanager + +import numpy as np +import pytest +from numpy.testing import ( + assert_array_almost_equal, + assert_array_equal, + assert_array_less, +) +from scipy.linalg import norm +from scipy.optimize import fmin_bfgs + +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import LinearRegression, TheilSenRegressor +from sklearn.linear_model._theil_sen import ( + _breakdown_point, + _modified_weiszfeld_step, + _spatial_median, +) +from sklearn.utils._testing import assert_almost_equal + + +@contextmanager +def no_stdout_stderr(): + old_stdout = sys.stdout + old_stderr = sys.stderr + with open(os.devnull, "w") as devnull: + sys.stdout = devnull + sys.stderr = devnull + yield + devnull.flush() + sys.stdout = old_stdout + sys.stderr = old_stderr + + +def gen_toy_problem_1d(intercept=True): + random_state = np.random.RandomState(0) + # Linear model y = 3*x + N(2, 0.1**2) + w = 3.0 + if intercept: + c = 2.0 + n_samples = 50 + else: + c = 0.1 + n_samples = 100 + x = random_state.normal(size=n_samples) + noise = 0.1 * random_state.normal(size=n_samples) + y = w * x + c + noise + # Add some outliers + if intercept: + x[42], y[42] = (-2, 4) + x[43], y[43] = (-2.5, 8) + x[33], y[33] = (2.5, 1) + x[49], y[49] = (2.1, 2) + else: + x[42], y[42] = (-2, 4) + x[43], y[43] = (-2.5, 8) + x[53], y[53] = (2.5, 1) + x[60], y[60] = (2.1, 2) + x[72], y[72] = (1.8, -7) + return x[:, np.newaxis], y, w, c + + +def gen_toy_problem_2d(): + random_state = np.random.RandomState(0) + n_samples = 100 + # Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2) + X = random_state.normal(size=(n_samples, 2)) + w = np.array([5.0, 10.0]) + c = 1.0 + noise = 0.1 * random_state.normal(size=n_samples) + y = np.dot(X, w) + c + noise + # Add some outliers + n_outliers = n_samples // 10 + ix = random_state.randint(0, n_samples, size=n_outliers) + y[ix] = 50 * random_state.normal(size=n_outliers) + return X, y, w, c + + +def gen_toy_problem_4d(): + random_state = np.random.RandomState(0) + n_samples = 10000 + # Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2) + X = random_state.normal(size=(n_samples, 4)) + w = np.array([5.0, 10.0, 42.0, 7.0]) + c = 1.0 + noise = 0.1 * random_state.normal(size=n_samples) + y = np.dot(X, w) + c + noise + # Add some outliers + n_outliers = n_samples // 10 + ix = random_state.randint(0, n_samples, size=n_outliers) + y[ix] = 50 * random_state.normal(size=n_outliers) + return X, y, w, c + + +def test_modweiszfeld_step_1d(): + X = np.array([1.0, 2.0, 3.0]).reshape(3, 1) + # Check startvalue is element of X and solution + median = 2.0 + new_y = _modified_weiszfeld_step(X, median) + assert_array_almost_equal(new_y, median) + # Check startvalue is not the solution + y = 2.5 + new_y = _modified_weiszfeld_step(X, y) + assert_array_less(median, new_y) + assert_array_less(new_y, y) + # Check startvalue is not the solution but element of X + y = 3.0 + new_y = _modified_weiszfeld_step(X, y) + assert_array_less(median, new_y) + assert_array_less(new_y, y) + # Check that a single vector is identity + X = np.array([1.0, 2.0, 3.0]).reshape(1, 3) + y = X[0] + new_y = _modified_weiszfeld_step(X, y) + assert_array_equal(y, new_y) + + +def test_modweiszfeld_step_2d(): + X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2) + y = np.array([0.5, 0.5]) + # Check first two iterations + new_y = _modified_weiszfeld_step(X, y) + assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3])) + new_y = _modified_weiszfeld_step(X, new_y) + assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592])) + # Check fix point + y = np.array([0.21132505, 0.78867497]) + new_y = _modified_weiszfeld_step(X, y) + assert_array_almost_equal(new_y, y) + + +def test_spatial_median_1d(): + X = np.array([1.0, 2.0, 3.0]).reshape(3, 1) + true_median = 2.0 + _, median = _spatial_median(X) + assert_array_almost_equal(median, true_median) + # Test larger problem and for exact solution in 1d case + random_state = np.random.RandomState(0) + X = random_state.randint(100, size=(1000, 1)) + true_median = np.median(X.ravel()) + _, median = _spatial_median(X) + assert_array_equal(median, true_median) + + +def test_spatial_median_2d(): + X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2) + _, median = _spatial_median(X, max_iter=100, tol=1.0e-6) + + def cost_func(y): + dists = np.array([norm(x - y) for x in X]) + return np.sum(dists) + + # Check if median is solution of the Fermat-Weber location problem + fermat_weber = fmin_bfgs(cost_func, median, disp=False) + assert_array_almost_equal(median, fermat_weber) + # Check when maximum iteration is exceeded a warning is emitted + warning_message = "Maximum number of iterations 30 reached in spatial median." + with pytest.warns(ConvergenceWarning, match=warning_message): + _spatial_median(X, max_iter=30, tol=0.0) + + +def test_theil_sen_1d(): + X, y, w, c = gen_toy_problem_1d() + # Check that Least Squares fails + lstq = LinearRegression().fit(X, y) + assert np.abs(lstq.coef_ - w) > 0.9 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_theil_sen_1d_no_intercept(): + X, y, w, c = gen_toy_problem_1d(intercept=False) + # Check that Least Squares fails + lstq = LinearRegression(fit_intercept=False).fit(X, y) + assert np.abs(lstq.coef_ - w - c) > 0.5 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w + c, 1) + assert_almost_equal(theil_sen.intercept_, 0.0) + + # non-regression test for #18104 + theil_sen.score(X, y) + + +def test_theil_sen_2d(): + X, y, w, c = gen_toy_problem_2d() + # Check that Least Squares fails + lstq = LinearRegression().fit(X, y) + assert norm(lstq.coef_ - w) > 1.0 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(max_subpopulation=1e3, random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_calc_breakdown_point(): + bp = _breakdown_point(1e10, 2) + assert np.abs(bp - 1 + 1 / (np.sqrt(2))) < 1.0e-6 + + +@pytest.mark.parametrize( + "param, ExceptionCls, match", + [ + ( + {"n_subsamples": 1}, + ValueError, + re.escape("Invalid parameter since n_features+1 > n_subsamples (2 > 1)"), + ), + ( + {"n_subsamples": 101}, + ValueError, + re.escape("Invalid parameter since n_subsamples > n_samples (101 > 50)"), + ), + ], +) +def test_checksubparams_invalid_input(param, ExceptionCls, match): + X, y, w, c = gen_toy_problem_1d() + theil_sen = TheilSenRegressor(**param, random_state=0) + with pytest.raises(ExceptionCls, match=match): + theil_sen.fit(X, y) + + +def test_checksubparams_n_subsamples_if_less_samples_than_features(): + random_state = np.random.RandomState(0) + n_samples, n_features = 10, 20 + X = random_state.normal(size=(n_samples, n_features)) + y = random_state.normal(size=n_samples) + theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0) + with pytest.raises(ValueError): + theil_sen.fit(X, y) + + +def test_subpopulation(): + X, y, w, c = gen_toy_problem_4d() + theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_subsamples(): + X, y, w, c = gen_toy_problem_4d() + theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y) + lstq = LinearRegression().fit(X, y) + # Check for exact the same results as Least Squares + assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9) + + +def test_verbosity(): + X, y, w, c = gen_toy_problem_1d() + # Check that Theil-Sen can be verbose + with no_stdout_stderr(): + TheilSenRegressor(verbose=True, random_state=0).fit(X, y) + TheilSenRegressor(verbose=True, max_subpopulation=10, random_state=0).fit(X, y) + + +def test_theil_sen_parallel(): + X, y, w, c = gen_toy_problem_2d() + # Check that Least Squares fails + lstq = LinearRegression().fit(X, y) + assert norm(lstq.coef_ - w) > 1.0 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(n_jobs=2, random_state=0, max_subpopulation=2e3).fit( + X, y + ) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_less_samples_than_features(): + random_state = np.random.RandomState(0) + n_samples, n_features = 10, 20 + X = random_state.normal(size=(n_samples, n_features)) + y = random_state.normal(size=n_samples) + # Check that Theil-Sen falls back to Least Squares if fit_intercept=False + theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) + lstq = LinearRegression(fit_intercept=False).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12) + # Check fit_intercept=True case. This will not be equal to the Least + # Squares solution since the intercept is calculated differently. + theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y) + y_pred = theil_sen.predict(X) + assert_array_almost_equal(y_pred, y, 12)