diff --git a/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..c08c94f7d5b337d41514be3b99c21211489fe783 --- /dev/null +++ b/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfc48c3605ebf51ccaa680d945c26d945ff04748a7f8314b1eac44ffb0122c0e +size 33555627 diff --git a/ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..c0eccb40619e40fa08858afa1fd1fe6b23c41be9 --- /dev/null +++ b/ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fa8aea791a8fa19fe4c81803aa90b8a0b9931f3154aaaa230fe73b237bed222 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..7b8628c37c271082b60ac68c5c7f2d1271b81380 --- /dev/null +++ b/ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0eef265b39db5f3ad5f42b032f58f2480311e771ddb6083c2857d448b6c3f55 +size 33555627 diff --git a/ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ace44f7537c05cdd6971abedf6783b779c790c32 --- /dev/null +++ b/ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63925f8ed3bf866c5203925dcfadb544297213c3a0b2a6ec9bfed86c046e9af7 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..192198d072b3b21a60119b4b616f70a71bbe05df --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:120466dfe5c6ca6778d6ffd4b8e8f4c81a7df365a3a26d1c289c48a640b1b181 +size 9387 diff --git a/ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..1022c90fe7f4a93afee8c4793170a0f8f92fa855 --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53ef0f9fd80e3e2457de9647681c3729516278db8c98967c7188423b479fec25 +size 9293 diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f802ff55133ca777c09de4abfd931cebdd0dd1ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0317bfacf5a6665b3bb1623b616452a7ef2f09ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de5304e28764ffbf858e06919bd0f6876e62c0fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c902f70c027663c7bc193cf103f61822889f5b1a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc45e47005c3d1e532f359f30d6ddde5623cf581 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75e41f1c2cb8448a53296ce7faf40c1ac3de340b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71652d8cb085ffa86e375f49abb77d54c68907f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37c0c3b4261598f09dfde15d7196c36b03d63609 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b28be3e06c51d74d5067a739f3f97749de31b6d5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..373b4dcfe40818d6907c169a7f1e4a399c9dd7fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dd57efff8aec3d2c090f5c6803496df99da8b7d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f550f00d8290c2a500054bcda612b41bfb7101dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4975ea7d684f1bfdf691bfcb39e5aeb96d72735d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03fa1cb0dcb188dbd18e19b967c7d76726f8d821 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..babe10773b35674c4374736b14c95f25b4433910 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abff9c007b3cc6bf50a21f22dc16ee9a577ff7ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffa98d325d4a9b9117562a726a6e632b4963e8d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45a180b84fa289956ff2cf24e1767543a791f3f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/__init__.py b/venv/lib/python3.10/site-packages/sklearn/_loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ee15e693c16f6aa996928f2862da30119b241f6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_loss/__init__.py @@ -0,0 +1,30 @@ +""" +The :mod:`sklearn._loss` module includes loss function classes suitable for +fitting classification and regression tasks. +""" + +from .loss import ( + AbsoluteError, + HalfBinomialLoss, + HalfGammaLoss, + HalfMultinomialLoss, + HalfPoissonLoss, + HalfSquaredError, + HalfTweedieLoss, + HalfTweedieLossIdentity, + HuberLoss, + PinballLoss, +) + +__all__ = [ + "HalfSquaredError", + "AbsoluteError", + "PinballLoss", + "HuberLoss", + "HalfPoissonLoss", + "HalfGammaLoss", + "HalfTweedieLoss", + "HalfTweedieLossIdentity", + "HalfBinomialLoss", + "HalfMultinomialLoss", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e66fe1b0c02e1e565a92e46d78b3c358a597caa Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92607b9211bbd7b0ae0e5262aac60af84eacca6d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae5d9ab14f99009ab1988ddf5328a46ffa49bc53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd b/venv/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd new file mode 100644 index 0000000000000000000000000000000000000000..f38cbe0badc96040426b61aef5e34e348cf7cfd1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd @@ -0,0 +1,91 @@ +# Fused types for input like y_true, raw_prediction, sample_weights. +ctypedef fused floating_in: + double + float + + +# Fused types for output like gradient and hessian +# We use a different fused types for input (floating_in) and output (floating_out), such +# that input and output can have different dtypes in the same function call. A single +# fused type can only take on one single value (type) for all arguments in one function +# call. +ctypedef fused floating_out: + double + float + + +# Struct to return 2 doubles +ctypedef struct double_pair: + double val1 + double val2 + + +# C base class for loss functions +cdef class CyLossFunction: + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfSquaredError(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyAbsoluteError(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyPinballLoss(CyLossFunction): + cdef readonly double quantile # readonly makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHuberLoss(CyLossFunction): + cdef public double delta # public makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfPoissonLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfGammaLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfTweedieLoss(CyLossFunction): + cdef readonly double power # readonly makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfTweedieLossIdentity(CyLossFunction): + cdef readonly double power # readonly makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfBinomialLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyExponentialLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/loss.py b/venv/lib/python3.10/site-packages/sklearn/_loss/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..a3b205ed10687ca9ad28ed310318ffa3c1275980 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_loss/loss.py @@ -0,0 +1,1177 @@ +""" +This module contains loss classes suitable for fitting. + +It is not part of the public API. +Specific losses are used for regression, binary classification or multiclass +classification. +""" +# Goals: +# - Provide a common private module for loss functions/classes. +# - To be used in: +# - LogisticRegression +# - PoissonRegressor, GammaRegressor, TweedieRegressor +# - HistGradientBoostingRegressor, HistGradientBoostingClassifier +# - GradientBoostingRegressor, GradientBoostingClassifier +# - SGDRegressor, SGDClassifier +# - Replace link module of GLMs. + +import numbers + +import numpy as np +from scipy.special import xlogy + +from ..utils import check_scalar +from ..utils.stats import _weighted_percentile +from ._loss import ( + CyAbsoluteError, + CyExponentialLoss, + CyHalfBinomialLoss, + CyHalfGammaLoss, + CyHalfMultinomialLoss, + CyHalfPoissonLoss, + CyHalfSquaredError, + CyHalfTweedieLoss, + CyHalfTweedieLossIdentity, + CyHuberLoss, + CyPinballLoss, +) +from .link import ( + HalfLogitLink, + IdentityLink, + Interval, + LogitLink, + LogLink, + MultinomialLogit, +) + + +# Note: The shape of raw_prediction for multiclass classifications are +# - GradientBoostingClassifier: (n_samples, n_classes) +# - HistGradientBoostingClassifier: (n_classes, n_samples) +# +# Note: Instead of inheritance like +# +# class BaseLoss(BaseLink, CyLossFunction): +# ... +# +# # Note: Naturally, we would inherit in the following order +# # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss) +# # But because of https://github.com/cython/cython/issues/4350 we set BaseLoss as +# # the last one. This, of course, changes the MRO. +# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss): +# +# we use composition. This way we improve maintainability by avoiding the above +# mentioned Cython edge case and have easier to understand code (which method calls +# which code). +class BaseLoss: + """Base class for a loss function of 1-dimensional targets. + + Conventions: + + - y_true.shape = sample_weight.shape = (n_samples,) + - y_pred.shape = raw_prediction.shape = (n_samples,) + - If is_multiclass is true (multiclass classification), then + y_pred.shape = raw_prediction.shape = (n_samples, n_classes) + Note that this corresponds to the return value of decision_function. + + y_true, y_pred, sample_weight and raw_prediction must either be all float64 + or all float32. + gradient and hessian must be either both float64 or both float32. + + Note that y_pred = link.inverse(raw_prediction). + + Specific loss classes can inherit specific link classes to satisfy + BaseLink's abstractmethods. + + Parameters + ---------- + sample_weight : {None, ndarray} + If sample_weight is None, the hessian might be constant. + n_classes : {None, int} + The number of classes for classification, else None. + + Attributes + ---------- + closs: CyLossFunction + link : BaseLink + interval_y_true : Interval + Valid interval for y_true + interval_y_pred : Interval + Valid Interval for y_pred + differentiable : bool + Indicates whether or not loss function is differentiable in + raw_prediction everywhere. + need_update_leaves_values : bool + Indicates whether decision trees in gradient boosting need to uptade + leave values after having been fit to the (negative) gradients. + approx_hessian : bool + Indicates whether the hessian is approximated or exact. If, + approximated, it should be larger or equal to the exact one. + constant_hessian : bool + Indicates whether the hessian is one for this loss. + is_multiclass : bool + Indicates whether n_classes > 2 is allowed. + """ + + # For gradient boosted decision trees: + # This variable indicates whether the loss requires the leaves values to + # be updated once the tree has been trained. The trees are trained to + # predict a Newton-Raphson step (see grower._finalize_leaf()). But for + # some losses (e.g. least absolute deviation) we need to adjust the tree + # values to account for the "line search" of the gradient descent + # procedure. See the original paper Greedy Function Approximation: A + # Gradient Boosting Machine by Friedman + # (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory. + differentiable = True + need_update_leaves_values = False + is_multiclass = False + + def __init__(self, closs, link, n_classes=None): + self.closs = closs + self.link = link + self.approx_hessian = False + self.constant_hessian = False + self.n_classes = n_classes + self.interval_y_true = Interval(-np.inf, np.inf, False, False) + self.interval_y_pred = self.link.interval_y_pred + + def in_y_true_range(self, y): + """Return True if y is in the valid range of y_true. + + Parameters + ---------- + y : ndarray + """ + return self.interval_y_true.includes(y) + + def in_y_pred_range(self, y): + """Return True if y is in the valid range of y_pred. + + Parameters + ---------- + y : ndarray + """ + return self.interval_y_pred.includes(y) + + def loss( + self, + y_true, + raw_prediction, + sample_weight=None, + loss_out=None, + n_threads=1, + ): + """Compute the pointwise loss value for each input. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + loss_out : None or C-contiguous array of shape (n_samples,) + A location into which the result is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + loss : array of shape (n_samples,) + Element-wise loss function. + """ + if loss_out is None: + loss_out = np.empty_like(y_true) + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + + self.closs.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=loss_out, + n_threads=n_threads, + ) + return loss_out + + def loss_gradient( + self, + y_true, + raw_prediction, + sample_weight=None, + loss_out=None, + gradient_out=None, + n_threads=1, + ): + """Compute loss and gradient w.r.t. raw_prediction for each input. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + loss_out : None or C-contiguous array of shape (n_samples,) + A location into which the loss is stored. If None, a new array + might be created. + gradient_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the gradient is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + loss : array of shape (n_samples,) + Element-wise loss function. + + gradient : array of shape (n_samples,) or (n_samples, n_classes) + Element-wise gradients. + """ + if loss_out is None: + if gradient_out is None: + loss_out = np.empty_like(y_true) + gradient_out = np.empty_like(raw_prediction) + else: + loss_out = np.empty_like(y_true, dtype=gradient_out.dtype) + elif gradient_out is None: + gradient_out = np.empty_like(raw_prediction, dtype=loss_out.dtype) + + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + if gradient_out.ndim == 2 and gradient_out.shape[1] == 1: + gradient_out = gradient_out.squeeze(1) + + self.closs.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=loss_out, + gradient_out=gradient_out, + n_threads=n_threads, + ) + return loss_out, gradient_out + + def gradient( + self, + y_true, + raw_prediction, + sample_weight=None, + gradient_out=None, + n_threads=1, + ): + """Compute gradient of loss w.r.t raw_prediction for each input. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + gradient_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the result is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + gradient : array of shape (n_samples,) or (n_samples, n_classes) + Element-wise gradients. + """ + if gradient_out is None: + gradient_out = np.empty_like(raw_prediction) + + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + if gradient_out.ndim == 2 and gradient_out.shape[1] == 1: + gradient_out = gradient_out.squeeze(1) + + self.closs.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=gradient_out, + n_threads=n_threads, + ) + return gradient_out + + def gradient_hessian( + self, + y_true, + raw_prediction, + sample_weight=None, + gradient_out=None, + hessian_out=None, + n_threads=1, + ): + """Compute gradient and hessian of loss w.r.t raw_prediction. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + gradient_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the gradient is stored. If None, a new array + might be created. + hessian_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the hessian is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + gradient : arrays of shape (n_samples,) or (n_samples, n_classes) + Element-wise gradients. + + hessian : arrays of shape (n_samples,) or (n_samples, n_classes) + Element-wise hessians. + """ + if gradient_out is None: + if hessian_out is None: + gradient_out = np.empty_like(raw_prediction) + hessian_out = np.empty_like(raw_prediction) + else: + gradient_out = np.empty_like(hessian_out) + elif hessian_out is None: + hessian_out = np.empty_like(gradient_out) + + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + if gradient_out.ndim == 2 and gradient_out.shape[1] == 1: + gradient_out = gradient_out.squeeze(1) + if hessian_out.ndim == 2 and hessian_out.shape[1] == 1: + hessian_out = hessian_out.squeeze(1) + + self.closs.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=gradient_out, + hessian_out=hessian_out, + n_threads=n_threads, + ) + return gradient_out, hessian_out + + def __call__(self, y_true, raw_prediction, sample_weight=None, n_threads=1): + """Compute the weighted average loss. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + loss : float + Mean or averaged loss function. + """ + return np.average( + self.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + loss_out=None, + n_threads=n_threads, + ), + weights=sample_weight, + ) + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This can be used as initial estimates of predictions, i.e. before the + first iteration in fit. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Observed, true target values. + sample_weight : None or array of shape (n_samples,) + Sample weights. + + Returns + ------- + raw_prediction : numpy scalar or array of shape (n_classes,) + Raw predictions of an intercept-only model. + """ + # As default, take weighted average of the target over the samples + # axis=0 and then transform into link-scale (raw_prediction). + y_pred = np.average(y_true, weights=sample_weight, axis=0) + eps = 10 * np.finfo(y_pred.dtype).eps + + if self.interval_y_pred.low == -np.inf: + a_min = None + elif self.interval_y_pred.low_inclusive: + a_min = self.interval_y_pred.low + else: + a_min = self.interval_y_pred.low + eps + + if self.interval_y_pred.high == np.inf: + a_max = None + elif self.interval_y_pred.high_inclusive: + a_max = self.interval_y_pred.high + else: + a_max = self.interval_y_pred.high - eps + + if a_min is None and a_max is None: + return self.link.link(y_pred) + else: + return self.link.link(np.clip(y_pred, a_min, a_max)) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + """Calculate term dropped in loss. + + With this term added, the loss of perfect predictions is zero. + """ + return np.zeros_like(y_true) + + def init_gradient_and_hessian(self, n_samples, dtype=np.float64, order="F"): + """Initialize arrays for gradients and hessians. + + Unless hessians are constant, arrays are initialized with undefined values. + + Parameters + ---------- + n_samples : int + The number of samples, usually passed to `fit()`. + dtype : {np.float64, np.float32}, default=np.float64 + The dtype of the arrays gradient and hessian. + order : {'C', 'F'}, default='F' + Order of the arrays gradient and hessian. The default 'F' makes the arrays + contiguous along samples. + + Returns + ------- + gradient : C-contiguous array of shape (n_samples,) or array of shape \ + (n_samples, n_classes) + Empty array (allocated but not initialized) to be used as argument + gradient_out. + hessian : C-contiguous array of shape (n_samples,), array of shape + (n_samples, n_classes) or shape (1,) + Empty (allocated but not initialized) array to be used as argument + hessian_out. + If constant_hessian is True (e.g. `HalfSquaredError`), the array is + initialized to ``1``. + """ + if dtype not in (np.float32, np.float64): + raise ValueError( + "Valid options for 'dtype' are np.float32 and np.float64. " + f"Got dtype={dtype} instead." + ) + + if self.is_multiclass: + shape = (n_samples, self.n_classes) + else: + shape = (n_samples,) + gradient = np.empty(shape=shape, dtype=dtype, order=order) + + if self.constant_hessian: + # If the hessians are constant, we consider them equal to 1. + # - This is correct for HalfSquaredError + # - For AbsoluteError, hessians are actually 0, but they are + # always ignored anyway. + hessian = np.ones(shape=(1,), dtype=dtype) + else: + hessian = np.empty(shape=shape, dtype=dtype, order=order) + + return gradient, hessian + + +# Note: Naturally, we would inherit in the following order +# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss) +# But because of https://github.com/cython/cython/issues/4350 we +# set BaseLoss as the last one. This, of course, changes the MRO. +class HalfSquaredError(BaseLoss): + """Half squared error with identity link, for regression. + + Domain: + y_true and y_pred all real numbers + + Link: + y_pred = raw_prediction + + For a given sample x_i, half squared error is defined as:: + + loss(x_i) = 0.5 * (y_true_i - raw_prediction_i)**2 + + The factor of 0.5 simplifies the computation of gradients and results in a + unit hessian (and is consistent with what is done in LightGBM). It is also + half the Normal distribution deviance. + """ + + def __init__(self, sample_weight=None): + super().__init__(closs=CyHalfSquaredError(), link=IdentityLink()) + self.constant_hessian = sample_weight is None + + +class AbsoluteError(BaseLoss): + """Absolute error with identity link, for regression. + + Domain: + y_true and y_pred all real numbers + + Link: + y_pred = raw_prediction + + For a given sample x_i, the absolute error is defined as:: + + loss(x_i) = |y_true_i - raw_prediction_i| + + Note that the exact hessian = 0 almost everywhere (except at one point, therefore + differentiable = False). Optimization routines like in HGBT, however, need a + hessian > 0. Therefore, we assign 1. + """ + + differentiable = False + need_update_leaves_values = True + + def __init__(self, sample_weight=None): + super().__init__(closs=CyAbsoluteError(), link=IdentityLink()) + self.approx_hessian = True + self.constant_hessian = sample_weight is None + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the weighted median of the target, i.e. over the samples + axis=0. + """ + if sample_weight is None: + return np.median(y_true, axis=0) + else: + return _weighted_percentile(y_true, sample_weight, 50) + + +class PinballLoss(BaseLoss): + """Quantile loss aka pinball loss, for regression. + + Domain: + y_true and y_pred all real numbers + quantile in (0, 1) + + Link: + y_pred = raw_prediction + + For a given sample x_i, the pinball loss is defined as:: + + loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i) + + rho_{quantile}(u) = u * (quantile - 1_{u<0}) + = -u *(1 - quantile) if u < 0 + u * quantile if u >= 0 + + Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError(). + + Note that the exact hessian = 0 almost everywhere (except at one point, therefore + differentiable = False). Optimization routines like in HGBT, however, need a + hessian > 0. Therefore, we assign 1. + + Additional Attributes + --------------------- + quantile : float + The quantile level of the quantile to be estimated. Must be in range (0, 1). + """ + + differentiable = False + need_update_leaves_values = True + + def __init__(self, sample_weight=None, quantile=0.5): + check_scalar( + quantile, + "quantile", + target_type=numbers.Real, + min_val=0, + max_val=1, + include_boundaries="neither", + ) + super().__init__( + closs=CyPinballLoss(quantile=float(quantile)), + link=IdentityLink(), + ) + self.approx_hessian = True + self.constant_hessian = sample_weight is None + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the weighted median of the target, i.e. over the samples + axis=0. + """ + if sample_weight is None: + return np.percentile(y_true, 100 * self.closs.quantile, axis=0) + else: + return _weighted_percentile( + y_true, sample_weight, 100 * self.closs.quantile + ) + + +class HuberLoss(BaseLoss): + """Huber loss, for regression. + + Domain: + y_true and y_pred all real numbers + quantile in (0, 1) + + Link: + y_pred = raw_prediction + + For a given sample x_i, the Huber loss is defined as:: + + loss(x_i) = 1/2 * abserr**2 if abserr <= delta + delta * (abserr - delta/2) if abserr > delta + + abserr = |y_true_i - raw_prediction_i| + delta = quantile(abserr, self.quantile) + + Note: HuberLoss(quantile=1) equals HalfSquaredError and HuberLoss(quantile=0) + equals delta * (AbsoluteError() - delta/2). + + Additional Attributes + --------------------- + quantile : float + The quantile level which defines the breaking point `delta` to distinguish + between absolute error and squared error. Must be in range (0, 1). + + Reference + --------- + .. [1] Friedman, J.H. (2001). :doi:`Greedy function approximation: A gradient + boosting machine <10.1214/aos/1013203451>`. + Annals of Statistics, 29, 1189-1232. + """ + + differentiable = False + need_update_leaves_values = True + + def __init__(self, sample_weight=None, quantile=0.9, delta=0.5): + check_scalar( + quantile, + "quantile", + target_type=numbers.Real, + min_val=0, + max_val=1, + include_boundaries="neither", + ) + self.quantile = quantile # This is better stored outside of Cython. + super().__init__( + closs=CyHuberLoss(delta=float(delta)), + link=IdentityLink(), + ) + self.approx_hessian = True + self.constant_hessian = False + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the weighted median of the target, i.e. over the samples + axis=0. + """ + # See formula before algo 4 in Friedman (2001), but we apply it to y_true, + # not to the residual y_true - raw_prediction. An estimator like + # HistGradientBoostingRegressor might then call it on the residual, e.g. + # fit_intercept_only(y_true - raw_prediction). + if sample_weight is None: + median = np.percentile(y_true, 50, axis=0) + else: + median = _weighted_percentile(y_true, sample_weight, 50) + diff = y_true - median + term = np.sign(diff) * np.minimum(self.closs.delta, np.abs(diff)) + return median + np.average(term, weights=sample_weight) + + +class HalfPoissonLoss(BaseLoss): + """Half Poisson deviance loss with log-link, for regression. + + Domain: + y_true in non-negative real numbers + y_pred in positive real numbers + + Link: + y_pred = exp(raw_prediction) + + For a given sample x_i, half the Poisson deviance is defined as:: + + loss(x_i) = y_true_i * log(y_true_i/exp(raw_prediction_i)) + - y_true_i + exp(raw_prediction_i) + + Half the Poisson deviance is actually the negative log-likelihood up to + constant terms (not involving raw_prediction) and simplifies the + computation of the gradients. + We also skip the constant term `y_true_i * log(y_true_i) - y_true_i`. + """ + + def __init__(self, sample_weight=None): + super().__init__(closs=CyHalfPoissonLoss(), link=LogLink()) + self.interval_y_true = Interval(0, np.inf, True, False) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + term = xlogy(y_true, y_true) - y_true + if sample_weight is not None: + term *= sample_weight + return term + + +class HalfGammaLoss(BaseLoss): + """Half Gamma deviance loss with log-link, for regression. + + Domain: + y_true and y_pred in positive real numbers + + Link: + y_pred = exp(raw_prediction) + + For a given sample x_i, half Gamma deviance loss is defined as:: + + loss(x_i) = log(exp(raw_prediction_i)/y_true_i) + + y_true/exp(raw_prediction_i) - 1 + + Half the Gamma deviance is actually proportional to the negative log- + likelihood up to constant terms (not involving raw_prediction) and + simplifies the computation of the gradients. + We also skip the constant term `-log(y_true_i) - 1`. + """ + + def __init__(self, sample_weight=None): + super().__init__(closs=CyHalfGammaLoss(), link=LogLink()) + self.interval_y_true = Interval(0, np.inf, False, False) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + term = -np.log(y_true) - 1 + if sample_weight is not None: + term *= sample_weight + return term + + +class HalfTweedieLoss(BaseLoss): + """Half Tweedie deviance loss with log-link, for regression. + + Domain: + y_true in real numbers for power <= 0 + y_true in non-negative real numbers for 0 < power < 2 + y_true in positive real numbers for 2 <= power + y_pred in positive real numbers + power in real numbers + + Link: + y_pred = exp(raw_prediction) + + For a given sample x_i, half Tweedie deviance loss with p=power is defined + as:: + + loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p) + - y_true_i * exp(raw_prediction_i)**(1-p) / (1-p) + + exp(raw_prediction_i)**(2-p) / (2-p) + + Taking the limits for p=0, 1, 2 gives HalfSquaredError with a log link, + HalfPoissonLoss and HalfGammaLoss. + + We also skip constant terms, but those are different for p=0, 1, 2. + Therefore, the loss is not continuous in `power`. + + Note furthermore that although no Tweedie distribution exists for + 0 < power < 1, it still gives a strictly consistent scoring function for + the expectation. + """ + + def __init__(self, sample_weight=None, power=1.5): + super().__init__( + closs=CyHalfTweedieLoss(power=float(power)), + link=LogLink(), + ) + if self.closs.power <= 0: + self.interval_y_true = Interval(-np.inf, np.inf, False, False) + elif self.closs.power < 2: + self.interval_y_true = Interval(0, np.inf, True, False) + else: + self.interval_y_true = Interval(0, np.inf, False, False) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + if self.closs.power == 0: + return HalfSquaredError().constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + elif self.closs.power == 1: + return HalfPoissonLoss().constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + elif self.closs.power == 2: + return HalfGammaLoss().constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + else: + p = self.closs.power + term = np.power(np.maximum(y_true, 0), 2 - p) / (1 - p) / (2 - p) + if sample_weight is not None: + term *= sample_weight + return term + + +class HalfTweedieLossIdentity(BaseLoss): + """Half Tweedie deviance loss with identity link, for regression. + + Domain: + y_true in real numbers for power <= 0 + y_true in non-negative real numbers for 0 < power < 2 + y_true in positive real numbers for 2 <= power + y_pred in positive real numbers for power != 0 + y_pred in real numbers for power = 0 + power in real numbers + + Link: + y_pred = raw_prediction + + For a given sample x_i, half Tweedie deviance loss with p=power is defined + as:: + + loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p) + - y_true_i * raw_prediction_i**(1-p) / (1-p) + + raw_prediction_i**(2-p) / (2-p) + + Note that the minimum value of this loss is 0. + + Note furthermore that although no Tweedie distribution exists for + 0 < power < 1, it still gives a strictly consistent scoring function for + the expectation. + """ + + def __init__(self, sample_weight=None, power=1.5): + super().__init__( + closs=CyHalfTweedieLossIdentity(power=float(power)), + link=IdentityLink(), + ) + if self.closs.power <= 0: + self.interval_y_true = Interval(-np.inf, np.inf, False, False) + elif self.closs.power < 2: + self.interval_y_true = Interval(0, np.inf, True, False) + else: + self.interval_y_true = Interval(0, np.inf, False, False) + + if self.closs.power == 0: + self.interval_y_pred = Interval(-np.inf, np.inf, False, False) + else: + self.interval_y_pred = Interval(0, np.inf, False, False) + + +class HalfBinomialLoss(BaseLoss): + """Half Binomial deviance loss with logit link, for binary classification. + + This is also know as binary cross entropy, log-loss and logistic loss. + + Domain: + y_true in [0, 1], i.e. regression on the unit interval + y_pred in (0, 1), i.e. boundaries excluded + + Link: + y_pred = expit(raw_prediction) + + For a given sample x_i, half Binomial deviance is defined as the negative + log-likelihood of the Binomial/Bernoulli distribution and can be expressed + as:: + + loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i + + See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman, + section 4.4.1 (about logistic regression). + + Note that the formulation works for classification, y = {0, 1}, as well as + logistic regression, y = [0, 1]. + If you add `constant_to_optimal_zero` to the loss, you get half the + Bernoulli/binomial deviance. + + More details: Inserting the predicted probability y_pred = expit(raw_prediction) + in the loss gives the well known:: + + loss(x_i) = - y_true_i * log(y_pred_i) - (1 - y_true_i) * log(1 - y_pred_i) + """ + + def __init__(self, sample_weight=None): + super().__init__( + closs=CyHalfBinomialLoss(), + link=LogitLink(), + n_classes=2, + ) + self.interval_y_true = Interval(0, 1, True, True) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + # This is non-zero only if y_true is neither 0 nor 1. + term = xlogy(y_true, y_true) + xlogy(1 - y_true, 1 - y_true) + if sample_weight is not None: + term *= sample_weight + return term + + def predict_proba(self, raw_prediction): + """Predict probabilities. + + Parameters + ---------- + raw_prediction : array of shape (n_samples,) or (n_samples, 1) + Raw prediction values (in link space). + + Returns + ------- + proba : array of shape (n_samples, 2) + Element-wise class probabilities. + """ + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype) + proba[:, 1] = self.link.inverse(raw_prediction) + proba[:, 0] = 1 - proba[:, 1] + return proba + + +class HalfMultinomialLoss(BaseLoss): + """Categorical cross-entropy loss, for multiclass classification. + + Domain: + y_true in {0, 1, 2, 3, .., n_classes - 1} + y_pred has n_classes elements, each element in (0, 1) + + Link: + y_pred = softmax(raw_prediction) + + Note: We assume y_true to be already label encoded. The inverse link is + softmax. But the full link function is the symmetric multinomial logit + function. + + For a given sample x_i, the categorical cross-entropy loss is defined as + the negative log-likelihood of the multinomial distribution, it + generalizes the binary cross-entropy to more than 2 classes:: + + loss_i = log(sum(exp(raw_pred_{i, k}), k=0..n_classes-1)) + - sum(y_true_{i, k} * raw_pred_{i, k}, k=0..n_classes-1) + + See [1]. + + Note that for the hessian, we calculate only the diagonal part in the + classes: If the full hessian for classes k and l and sample i is H_i_k_l, + we calculate H_i_k_k, i.e. k=l. + + Reference + --------- + .. [1] :arxiv:`Simon, Noah, J. Friedman and T. Hastie. + "A Blockwise Descent Algorithm for Group-penalized Multiresponse and + Multinomial Regression". + <1311.6529>` + """ + + is_multiclass = True + + def __init__(self, sample_weight=None, n_classes=3): + super().__init__( + closs=CyHalfMultinomialLoss(), + link=MultinomialLogit(), + n_classes=n_classes, + ) + self.interval_y_true = Interval(0, np.inf, True, False) + self.interval_y_pred = Interval(0, 1, False, False) + + def in_y_true_range(self, y): + """Return True if y is in the valid range of y_true. + + Parameters + ---------- + y : ndarray + """ + return self.interval_y_true.includes(y) and np.all(y.astype(int) == y) + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the softmax of the weighted average of the target, i.e. over + the samples axis=0. + """ + out = np.zeros(self.n_classes, dtype=y_true.dtype) + eps = np.finfo(y_true.dtype).eps + for k in range(self.n_classes): + out[k] = np.average(y_true == k, weights=sample_weight, axis=0) + out[k] = np.clip(out[k], eps, 1 - eps) + return self.link.link(out[None, :]).reshape(-1) + + def predict_proba(self, raw_prediction): + """Predict probabilities. + + Parameters + ---------- + raw_prediction : array of shape (n_samples, n_classes) + Raw prediction values (in link space). + + Returns + ------- + proba : array of shape (n_samples, n_classes) + Element-wise class probabilities. + """ + return self.link.inverse(raw_prediction) + + def gradient_proba( + self, + y_true, + raw_prediction, + sample_weight=None, + gradient_out=None, + proba_out=None, + n_threads=1, + ): + """Compute gradient and class probabilities fow raw_prediction. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : array of shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + gradient_out : None or array of shape (n_samples, n_classes) + A location into which the gradient is stored. If None, a new array + might be created. + proba_out : None or array of shape (n_samples, n_classes) + A location into which the class probabilities are stored. If None, + a new array might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + gradient : array of shape (n_samples, n_classes) + Element-wise gradients. + + proba : array of shape (n_samples, n_classes) + Element-wise class probabilities. + """ + if gradient_out is None: + if proba_out is None: + gradient_out = np.empty_like(raw_prediction) + proba_out = np.empty_like(raw_prediction) + else: + gradient_out = np.empty_like(proba_out) + elif proba_out is None: + proba_out = np.empty_like(gradient_out) + + self.closs.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=gradient_out, + proba_out=proba_out, + n_threads=n_threads, + ) + return gradient_out, proba_out + + +class ExponentialLoss(BaseLoss): + """Exponential loss with (half) logit link, for binary classification. + + This is also know as boosting loss. + + Domain: + y_true in [0, 1], i.e. regression on the unit interval + y_pred in (0, 1), i.e. boundaries excluded + + Link: + y_pred = expit(2 * raw_prediction) + + For a given sample x_i, the exponential loss is defined as:: + + loss(x_i) = y_true_i * exp(-raw_pred_i)) + (1 - y_true_i) * exp(raw_pred_i) + + See: + - J. Friedman, T. Hastie, R. Tibshirani. + "Additive logistic regression: a statistical view of boosting (With discussion + and a rejoinder by the authors)." Ann. Statist. 28 (2) 337 - 407, April 2000. + https://doi.org/10.1214/aos/1016218223 + - A. Buja, W. Stuetzle, Y. Shen. (2005). + "Loss Functions for Binary Class Probability Estimation and Classification: + Structure and Applications." + + Note that the formulation works for classification, y = {0, 1}, as well as + "exponential logistic" regression, y = [0, 1]. + Note that this is a proper scoring rule, but without it's canonical link. + + More details: Inserting the predicted probability + y_pred = expit(2 * raw_prediction) in the loss gives:: + + loss(x_i) = y_true_i * sqrt((1 - y_pred_i) / y_pred_i) + + (1 - y_true_i) * sqrt(y_pred_i / (1 - y_pred_i)) + """ + + def __init__(self, sample_weight=None): + super().__init__( + closs=CyExponentialLoss(), + link=HalfLogitLink(), + n_classes=2, + ) + self.interval_y_true = Interval(0, 1, True, True) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + # This is non-zero only if y_true is neither 0 nor 1. + term = -2 * np.sqrt(y_true * (1 - y_true)) + if sample_weight is not None: + term *= sample_weight + return term + + def predict_proba(self, raw_prediction): + """Predict probabilities. + + Parameters + ---------- + raw_prediction : array of shape (n_samples,) or (n_samples, 1) + Raw prediction values (in link space). + + Returns + ------- + proba : array of shape (n_samples, 2) + Element-wise class probabilities. + """ + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype) + proba[:, 1] = self.link.inverse(raw_prediction) + proba[:, 0] = 1 - proba[:, 1] + return proba + + +_LOSSES = { + "squared_error": HalfSquaredError, + "absolute_error": AbsoluteError, + "pinball_loss": PinballLoss, + "huber_loss": HuberLoss, + "poisson_loss": HalfPoissonLoss, + "gamma_loss": HalfGammaLoss, + "tweedie_loss": HalfTweedieLoss, + "binomial_loss": HalfBinomialLoss, + "multinomial_loss": HalfMultinomialLoss, + "exponential_loss": ExponentialLoss, +} diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85d2bea4df28585f9e22db8cd4e3495c2e9f074b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e65c6fadfb3b7bd7b7b055513a9adc7d9516aaf3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a8fe31c44b3793a304ffbbc1a585b503abf63a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a665f8d48ac9e356971346774a125b18d234d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py @@ -0,0 +1,111 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn._loss.link import ( + _LINKS, + HalfLogitLink, + Interval, + MultinomialLogit, + _inclusive_low_high, +) + +LINK_FUNCTIONS = list(_LINKS.values()) + + +def test_interval_raises(): + """Test that interval with low > high raises ValueError.""" + with pytest.raises( + ValueError, match="One must have low <= high; got low=1, high=0." + ): + Interval(1, 0, False, False) + + +@pytest.mark.parametrize( + "interval", + [ + Interval(0, 1, False, False), + Interval(0, 1, False, True), + Interval(0, 1, True, False), + Interval(0, 1, True, True), + Interval(-np.inf, np.inf, False, False), + Interval(-np.inf, np.inf, False, True), + Interval(-np.inf, np.inf, True, False), + Interval(-np.inf, np.inf, True, True), + Interval(-10, -1, False, False), + Interval(-10, -1, False, True), + Interval(-10, -1, True, False), + Interval(-10, -1, True, True), + ], +) +def test_is_in_range(interval): + # make sure low and high are always within the interval, used for linspace + low, high = _inclusive_low_high(interval) + + x = np.linspace(low, high, num=10) + assert interval.includes(x) + + # x contains lower bound + assert interval.includes(np.r_[x, interval.low]) == interval.low_inclusive + + # x contains upper bound + assert interval.includes(np.r_[x, interval.high]) == interval.high_inclusive + + # x contains upper and lower bound + assert interval.includes(np.r_[x, interval.low, interval.high]) == ( + interval.low_inclusive and interval.high_inclusive + ) + + +@pytest.mark.parametrize("link", LINK_FUNCTIONS) +def test_link_inverse_identity(link, global_random_seed): + # Test that link of inverse gives identity. + rng = np.random.RandomState(global_random_seed) + link = link() + n_samples, n_classes = 100, None + # The values for `raw_prediction` are limited from -20 to 20 because in the + # class `LogitLink` the term `expit(x)` comes very close to 1 for large + # positive x and therefore loses precision. + if link.is_multiclass: + n_classes = 10 + raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples, n_classes)) + if isinstance(link, MultinomialLogit): + raw_prediction = link.symmetrize_raw_prediction(raw_prediction) + elif isinstance(link, HalfLogitLink): + raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples)) + else: + raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples)) + + assert_allclose(link.link(link.inverse(raw_prediction)), raw_prediction) + y_pred = link.inverse(raw_prediction) + assert_allclose(link.inverse(link.link(y_pred)), y_pred) + + +@pytest.mark.parametrize("link", LINK_FUNCTIONS) +def test_link_out_argument(link): + # Test that out argument gets assigned the result. + rng = np.random.RandomState(42) + link = link() + n_samples, n_classes = 100, None + if link.is_multiclass: + n_classes = 10 + raw_prediction = rng.normal(loc=0, scale=10, size=(n_samples, n_classes)) + if isinstance(link, MultinomialLogit): + raw_prediction = link.symmetrize_raw_prediction(raw_prediction) + else: + # So far, the valid interval of raw_prediction is (-inf, inf) and + # we do not need to distinguish. + raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples)) + + y_pred = link.inverse(raw_prediction, out=None) + out = np.empty_like(raw_prediction) + y_pred_2 = link.inverse(raw_prediction, out=out) + assert_allclose(y_pred, out) + assert_array_equal(out, y_pred_2) + assert np.shares_memory(out, y_pred_2) + + out = np.empty_like(y_pred) + raw_prediction_2 = link.link(y_pred, out=out) + assert_allclose(raw_prediction, out) + assert_array_equal(out, raw_prediction_2) + assert np.shares_memory(out, raw_prediction_2) diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..9c8bba4d717d174fef91cee2210a52e232f90d5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py @@ -0,0 +1,1320 @@ +import pickle + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal +from pytest import approx +from scipy.optimize import ( + LinearConstraint, + minimize, + minimize_scalar, + newton, +) +from scipy.special import logsumexp + +from sklearn._loss.link import IdentityLink, _inclusive_low_high +from sklearn._loss.loss import ( + _LOSSES, + AbsoluteError, + BaseLoss, + HalfBinomialLoss, + HalfGammaLoss, + HalfMultinomialLoss, + HalfPoissonLoss, + HalfSquaredError, + HalfTweedieLoss, + HalfTweedieLossIdentity, + HuberLoss, + PinballLoss, +) +from sklearn.utils import _IS_WASM, assert_all_finite +from sklearn.utils._testing import create_memmap_backed_data, skip_if_32bit + +ALL_LOSSES = list(_LOSSES.values()) + +LOSS_INSTANCES = [loss() for loss in ALL_LOSSES] +# HalfTweedieLoss(power=1.5) is already there as default +LOSS_INSTANCES += [ + PinballLoss(quantile=0.25), + HuberLoss(quantile=0.75), + HalfTweedieLoss(power=-1.5), + HalfTweedieLoss(power=0), + HalfTweedieLoss(power=1), + HalfTweedieLoss(power=2), + HalfTweedieLoss(power=3.0), + HalfTweedieLossIdentity(power=0), + HalfTweedieLossIdentity(power=1), + HalfTweedieLossIdentity(power=2), + HalfTweedieLossIdentity(power=3.0), +] + + +def loss_instance_name(param): + if isinstance(param, BaseLoss): + loss = param + name = loss.__class__.__name__ + if isinstance(loss, PinballLoss): + name += f"(quantile={loss.closs.quantile})" + elif isinstance(loss, HuberLoss): + name += f"(quantile={loss.quantile}" + elif hasattr(loss, "closs") and hasattr(loss.closs, "power"): + name += f"(power={loss.closs.power})" + return name + else: + return str(param) + + +def random_y_true_raw_prediction( + loss, n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=42 +): + """Random generate y_true and raw_prediction in valid range.""" + rng = np.random.RandomState(seed) + if loss.is_multiclass: + raw_prediction = np.empty((n_samples, loss.n_classes)) + raw_prediction.flat[:] = rng.uniform( + low=raw_bound[0], + high=raw_bound[1], + size=n_samples * loss.n_classes, + ) + y_true = np.arange(n_samples).astype(float) % loss.n_classes + else: + # If link is identity, we must respect the interval of y_pred: + if isinstance(loss.link, IdentityLink): + low, high = _inclusive_low_high(loss.interval_y_pred) + low = np.amax([low, raw_bound[0]]) + high = np.amin([high, raw_bound[1]]) + raw_bound = (low, high) + raw_prediction = rng.uniform( + low=raw_bound[0], high=raw_bound[1], size=n_samples + ) + # generate a y_true in valid range + low, high = _inclusive_low_high(loss.interval_y_true) + low = max(low, y_bound[0]) + high = min(high, y_bound[1]) + y_true = rng.uniform(low, high, size=n_samples) + # set some values at special boundaries + if loss.interval_y_true.low == 0 and loss.interval_y_true.low_inclusive: + y_true[:: (n_samples // 3)] = 0 + if loss.interval_y_true.high == 1 and loss.interval_y_true.high_inclusive: + y_true[1 :: (n_samples // 3)] = 1 + + return y_true, raw_prediction + + +def numerical_derivative(func, x, eps): + """Helper function for numerical (first) derivatives.""" + # For numerical derivatives, see + # https://en.wikipedia.org/wiki/Numerical_differentiation + # https://en.wikipedia.org/wiki/Finite_difference_coefficient + # We use central finite differences of accuracy 4. + h = np.full_like(x, fill_value=eps) + f_minus_2h = func(x - 2 * h) + f_minus_1h = func(x - h) + f_plus_1h = func(x + h) + f_plus_2h = func(x + 2 * h) + return (-f_plus_2h + 8 * f_plus_1h - 8 * f_minus_1h + f_minus_2h) / (12.0 * eps) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_loss_boundary(loss): + """Test interval ranges of y_true and y_pred in losses.""" + # make sure low and high are always within the interval, used for linspace + if loss.is_multiclass: + y_true = np.linspace(0, 9, num=10) + else: + low, high = _inclusive_low_high(loss.interval_y_true) + y_true = np.linspace(low, high, num=10) + + # add boundaries if they are included + if loss.interval_y_true.low_inclusive: + y_true = np.r_[y_true, loss.interval_y_true.low] + if loss.interval_y_true.high_inclusive: + y_true = np.r_[y_true, loss.interval_y_true.high] + + assert loss.in_y_true_range(y_true) + + n = y_true.shape[0] + low, high = _inclusive_low_high(loss.interval_y_pred) + if loss.is_multiclass: + y_pred = np.empty((n, 3)) + y_pred[:, 0] = np.linspace(low, high, num=n) + y_pred[:, 1] = 0.5 * (1 - y_pred[:, 0]) + y_pred[:, 2] = 0.5 * (1 - y_pred[:, 0]) + else: + y_pred = np.linspace(low, high, num=n) + + assert loss.in_y_pred_range(y_pred) + + # calculating losses should not fail + raw_prediction = loss.link.link(y_pred) + loss.loss(y_true=y_true, raw_prediction=raw_prediction) + + +# Fixture to test valid value ranges. +Y_COMMON_PARAMS = [ + # (loss, [y success], [y fail]) + (HalfSquaredError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (AbsoluteError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (PinballLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (HuberLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (HalfPoissonLoss(), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), + (HalfGammaLoss(), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLoss(power=-3), [0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLoss(power=0), [0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLoss(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), + (HalfTweedieLoss(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLoss(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLossIdentity(power=-3), [0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLossIdentity(power=0), [-3, -0.1, 0, 0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLossIdentity(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), + (HalfTweedieLossIdentity(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLossIdentity(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfBinomialLoss(), [0.1, 0.5, 0.9], [-np.inf, -1, 2, np.inf]), + (HalfMultinomialLoss(), [], [-np.inf, -1, 1.1, np.inf]), +] +# y_pred and y_true do not always have the same domain (valid value range). +# Hence, we define extra sets of parameters for each of them. +Y_TRUE_PARAMS = [ # type: ignore + # (loss, [y success], [y fail]) + (HalfPoissonLoss(), [0], []), + (HuberLoss(), [0], []), + (HalfTweedieLoss(power=-3), [-100, -0.1, 0], []), + (HalfTweedieLoss(power=0), [-100, 0], []), + (HalfTweedieLoss(power=1.5), [0], []), + (HalfTweedieLossIdentity(power=-3), [-100, -0.1, 0], []), + (HalfTweedieLossIdentity(power=0), [-100, 0], []), + (HalfTweedieLossIdentity(power=1.5), [0], []), + (HalfBinomialLoss(), [0, 1], []), + (HalfMultinomialLoss(), [0.0, 1.0, 2], []), +] +Y_PRED_PARAMS = [ + # (loss, [y success], [y fail]) + (HalfPoissonLoss(), [], [0]), + (HalfTweedieLoss(power=-3), [], [-3, -0.1, 0]), + (HalfTweedieLoss(power=0), [], [-3, -0.1, 0]), + (HalfTweedieLoss(power=1.5), [], [0]), + (HalfTweedieLossIdentity(power=-3), [], [-3, -0.1, 0]), + (HalfTweedieLossIdentity(power=0), [-3, -0.1, 0], []), + (HalfTweedieLossIdentity(power=1.5), [], [0]), + (HalfBinomialLoss(), [], [0, 1]), + (HalfMultinomialLoss(), [0.1, 0.5], [0, 1]), +] + + +@pytest.mark.parametrize( + "loss, y_true_success, y_true_fail", Y_COMMON_PARAMS + Y_TRUE_PARAMS +) +def test_loss_boundary_y_true(loss, y_true_success, y_true_fail): + """Test boundaries of y_true for loss functions.""" + for y in y_true_success: + assert loss.in_y_true_range(np.array([y])) + for y in y_true_fail: + assert not loss.in_y_true_range(np.array([y])) + + +@pytest.mark.parametrize( + "loss, y_pred_success, y_pred_fail", Y_COMMON_PARAMS + Y_PRED_PARAMS # type: ignore +) +def test_loss_boundary_y_pred(loss, y_pred_success, y_pred_fail): + """Test boundaries of y_pred for loss functions.""" + for y in y_pred_success: + assert loss.in_y_pred_range(np.array([y])) + for y in y_pred_fail: + assert not loss.in_y_pred_range(np.array([y])) + + +@pytest.mark.parametrize( + "loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true", + [ + (HalfSquaredError(), 1.0, 5.0, 8, 4, 1), + (AbsoluteError(), 1.0, 5.0, 4.0, 1.0, None), + (PinballLoss(quantile=0.5), 1.0, 5.0, 2, 0.5, None), + (PinballLoss(quantile=0.25), 1.0, 5.0, 4 * (1 - 0.25), 1 - 0.25, None), + (PinballLoss(quantile=0.25), 5.0, 1.0, 4 * 0.25, -0.25, None), + (HuberLoss(quantile=0.5, delta=3), 1.0, 5.0, 3 * (4 - 3 / 2), None, None), + (HuberLoss(quantile=0.5, delta=3), 1.0, 3.0, 0.5 * 2**2, None, None), + (HalfPoissonLoss(), 2.0, np.log(4), 4 - 2 * np.log(4), 4 - 2, 4), + (HalfGammaLoss(), 2.0, np.log(4), np.log(4) + 2 / 4, 1 - 2 / 4, 2 / 4), + (HalfTweedieLoss(power=3), 2.0, np.log(4), -1 / 4 + 1 / 4**2, None, None), + (HalfTweedieLossIdentity(power=1), 2.0, 4.0, 2 - 2 * np.log(2), None, None), + (HalfTweedieLossIdentity(power=2), 2.0, 4.0, np.log(2) - 1 / 2, None, None), + ( + HalfTweedieLossIdentity(power=3), + 2.0, + 4.0, + -1 / 4 + 1 / 4**2 + 1 / 2 / 2, + None, + None, + ), + ( + HalfBinomialLoss(), + 0.25, + np.log(4), + np.log1p(4) - 0.25 * np.log(4), + None, + None, + ), + # Extreme log loss cases, checked with mpmath: + # import mpmath as mp + # + # # Stolen from scipy + # def mpf2float(x): + # return float(mp.nstr(x, 17, min_fixed=0, max_fixed=0)) + # + # def mp_logloss(y_true, raw): + # with mp.workdps(100): + # y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw)) + # out = mp.log1p(mp.exp(raw)) - y_true * raw + # return mpf2float(out) + # + # def mp_gradient(y_true, raw): + # with mp.workdps(100): + # y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw)) + # out = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw)) - y_true + # return mpf2float(out) + # + # def mp_hessian(y_true, raw): + # with mp.workdps(100): + # y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw)) + # p = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw)) + # out = p * (mp.mpf(1) - p) + # return mpf2float(out) + # + # y, raw = 0.0, 37. + # mp_logloss(y, raw), mp_gradient(y, raw), mp_hessian(y, raw) + (HalfBinomialLoss(), 0.0, -1e20, 0, 0, 0), + (HalfBinomialLoss(), 1.0, -1e20, 1e20, -1, 0), + (HalfBinomialLoss(), 0.0, -1e3, 0, 0, 0), + (HalfBinomialLoss(), 1.0, -1e3, 1e3, -1, 0), + (HalfBinomialLoss(), 1.0, -37.5, 37.5, -1, 0), + (HalfBinomialLoss(), 1.0, -37.0, 37, 1e-16 - 1, 8.533047625744065e-17), + (HalfBinomialLoss(), 0.0, -37.0, *[8.533047625744065e-17] * 3), + (HalfBinomialLoss(), 1.0, -36.9, 36.9, 1e-16 - 1, 9.430476078526806e-17), + (HalfBinomialLoss(), 0.0, -36.9, *[9.430476078526806e-17] * 3), + (HalfBinomialLoss(), 0.0, 37.0, 37, 1 - 1e-16, 8.533047625744065e-17), + (HalfBinomialLoss(), 1.0, 37.0, *[8.533047625744066e-17] * 3), + (HalfBinomialLoss(), 0.0, 37.5, 37.5, 1, 5.175555005801868e-17), + (HalfBinomialLoss(), 0.0, 232.8, 232.8, 1, 1.4287342391028437e-101), + (HalfBinomialLoss(), 1.0, 1e20, 0, 0, 0), + (HalfBinomialLoss(), 0.0, 1e20, 1e20, 1, 0), + ( + HalfBinomialLoss(), + 1.0, + 232.8, + 0, + -1.4287342391028437e-101, + 1.4287342391028437e-101, + ), + (HalfBinomialLoss(), 1.0, 232.9, 0, 0, 0), + (HalfBinomialLoss(), 1.0, 1e3, 0, 0, 0), + (HalfBinomialLoss(), 0.0, 1e3, 1e3, 1, 0), + ( + HalfMultinomialLoss(n_classes=3), + 0.0, + [0.2, 0.5, 0.3], + logsumexp([0.2, 0.5, 0.3]) - 0.2, + None, + None, + ), + ( + HalfMultinomialLoss(n_classes=3), + 1.0, + [0.2, 0.5, 0.3], + logsumexp([0.2, 0.5, 0.3]) - 0.5, + None, + None, + ), + ( + HalfMultinomialLoss(n_classes=3), + 2.0, + [0.2, 0.5, 0.3], + logsumexp([0.2, 0.5, 0.3]) - 0.3, + None, + None, + ), + ( + HalfMultinomialLoss(n_classes=3), + 2.0, + [1e4, 0, 7e-7], + logsumexp([1e4, 0, 7e-7]) - (7e-7), + None, + None, + ), + ], + ids=loss_instance_name, +) +def test_loss_on_specific_values( + loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true +): + """Test losses, gradients and hessians at specific values.""" + loss1 = loss(y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])) + grad1 = loss.gradient( + y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]) + ) + loss2, grad2 = loss.loss_gradient( + y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]) + ) + grad3, hess = loss.gradient_hessian( + y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]) + ) + + assert loss1 == approx(loss_true, rel=1e-15, abs=1e-15) + assert loss2 == approx(loss_true, rel=1e-15, abs=1e-15) + + if gradient_true is not None: + assert grad1 == approx(gradient_true, rel=1e-15, abs=1e-15) + assert grad2 == approx(gradient_true, rel=1e-15, abs=1e-15) + assert grad3 == approx(gradient_true, rel=1e-15, abs=1e-15) + + if hessian_true is not None: + assert hess == approx(hessian_true, rel=1e-15, abs=1e-15) + + +@pytest.mark.parametrize("loss", ALL_LOSSES) +@pytest.mark.parametrize("readonly_memmap", [False, True]) +@pytest.mark.parametrize("dtype_in", [np.float32, np.float64]) +@pytest.mark.parametrize("dtype_out", [np.float32, np.float64]) +@pytest.mark.parametrize("sample_weight", [None, 1]) +@pytest.mark.parametrize("out1", [None, 1]) +@pytest.mark.parametrize("out2", [None, 1]) +@pytest.mark.parametrize("n_threads", [1, 2]) +def test_loss_dtype( + loss, readonly_memmap, dtype_in, dtype_out, sample_weight, out1, out2, n_threads +): + """Test acceptance of dtypes, readonly and writeable arrays in loss functions. + + Check that loss accepts if all input arrays are either all float32 or all + float64, and all output arrays are either all float32 or all float64. + + Also check that input arrays can be readonly, e.g. memory mapped. + """ + if _IS_WASM and readonly_memmap: # pragma: nocover + pytest.xfail(reason="memmap not fully supported") + + loss = loss() + # generate a y_true and raw_prediction in valid range + n_samples = 5 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=42, + ) + y_true = y_true.astype(dtype_in) + raw_prediction = raw_prediction.astype(dtype_in) + + if sample_weight is not None: + sample_weight = np.array([2.0] * n_samples, dtype=dtype_in) + if out1 is not None: + out1 = np.empty_like(y_true, dtype=dtype_out) + if out2 is not None: + out2 = np.empty_like(raw_prediction, dtype=dtype_out) + + if readonly_memmap: + y_true = create_memmap_backed_data(y_true) + raw_prediction = create_memmap_backed_data(raw_prediction) + if sample_weight is not None: + sample_weight = create_memmap_backed_data(sample_weight) + + loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out1, + n_threads=n_threads, + ) + loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out2, + n_threads=n_threads, + ) + loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out1, + gradient_out=out2, + n_threads=n_threads, + ) + if out1 is not None and loss.is_multiclass: + out1 = np.empty_like(raw_prediction, dtype=dtype_out) + loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out1, + hessian_out=out2, + n_threads=n_threads, + ) + loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight) + loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight) + loss.constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight) + if hasattr(loss, "predict_proba"): + loss.predict_proba(raw_prediction=raw_prediction) + if hasattr(loss, "gradient_proba"): + loss.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out1, + proba_out=out2, + n_threads=n_threads, + ) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_same_as_C_functions(loss, sample_weight): + """Test that Python and Cython functions return same results.""" + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=20, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=42, + ) + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + out_l1 = np.empty_like(y_true) + out_l2 = np.empty_like(y_true) + out_g1 = np.empty_like(raw_prediction) + out_g2 = np.empty_like(raw_prediction) + out_h1 = np.empty_like(raw_prediction) + out_h2 = np.empty_like(raw_prediction) + loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l1, + ) + loss.closs.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l2, + ), + assert_allclose(out_l1, out_l2) + loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g1, + ) + loss.closs.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g2, + ) + assert_allclose(out_g1, out_g2) + loss.closs.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l1, + gradient_out=out_g1, + ) + loss.closs.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l2, + gradient_out=out_g2, + ) + assert_allclose(out_l1, out_l2) + assert_allclose(out_g1, out_g2) + loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g1, + hessian_out=out_h1, + ) + loss.closs.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g2, + hessian_out=out_h2, + ) + assert_allclose(out_g1, out_g2) + assert_allclose(out_h1, out_h2) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_gradients_are_the_same(loss, sample_weight, global_random_seed): + """Test that loss and gradient are the same across different functions. + + Also test that output arguments contain correct results. + """ + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=20, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=global_random_seed, + ) + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + out_l1 = np.empty_like(y_true) + out_l2 = np.empty_like(y_true) + out_g1 = np.empty_like(raw_prediction) + out_g2 = np.empty_like(raw_prediction) + out_g3 = np.empty_like(raw_prediction) + out_h3 = np.empty_like(raw_prediction) + + l1 = loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l1, + ) + g1 = loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g1, + ) + l2, g2 = loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l2, + gradient_out=out_g2, + ) + g3, h3 = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g3, + hessian_out=out_h3, + ) + assert_allclose(l1, l2) + assert_array_equal(l1, out_l1) + assert np.shares_memory(l1, out_l1) + assert_array_equal(l2, out_l2) + assert np.shares_memory(l2, out_l2) + assert_allclose(g1, g2) + assert_allclose(g1, g3) + assert_array_equal(g1, out_g1) + assert np.shares_memory(g1, out_g1) + assert_array_equal(g2, out_g2) + assert np.shares_memory(g2, out_g2) + assert_array_equal(g3, out_g3) + assert np.shares_memory(g3, out_g3) + + if hasattr(loss, "gradient_proba"): + assert loss.is_multiclass # only for HalfMultinomialLoss + out_g4 = np.empty_like(raw_prediction) + out_proba = np.empty_like(raw_prediction) + g4, proba = loss.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g4, + proba_out=out_proba, + ) + assert_allclose(g1, out_g4) + assert_allclose(g1, g4) + assert_allclose(proba, out_proba) + assert_allclose(np.sum(proba, axis=1), 1, rtol=1e-11) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", ["ones", "random"]) +def test_sample_weight_multiplies(loss, sample_weight, global_random_seed): + """Test sample weights in loss, gradients and hessians. + + Make sure that passing sample weights to loss, gradient and hessian + computation methods is equivalent to multiplying by the weights. + """ + n_samples = 100 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=global_random_seed, + ) + + if sample_weight == "ones": + sample_weight = np.ones(shape=n_samples, dtype=np.float64) + else: + rng = np.random.RandomState(global_random_seed) + sample_weight = rng.normal(size=n_samples).astype(np.float64) + + assert_allclose( + loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ), + sample_weight + * loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + ), + ) + + losses, gradient = loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + ) + losses_sw, gradient_sw = loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + assert_allclose(losses * sample_weight, losses_sw) + if not loss.is_multiclass: + assert_allclose(gradient * sample_weight, gradient_sw) + else: + assert_allclose(gradient * sample_weight[:, None], gradient_sw) + + gradient, hessian = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + ) + gradient_sw, hessian_sw = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + if not loss.is_multiclass: + assert_allclose(gradient * sample_weight, gradient_sw) + assert_allclose(hessian * sample_weight, hessian_sw) + else: + assert_allclose(gradient * sample_weight[:, None], gradient_sw) + assert_allclose(hessian * sample_weight[:, None], hessian_sw) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_graceful_squeezing(loss): + """Test that reshaped raw_prediction gives same results.""" + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=20, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=42, + ) + + if raw_prediction.ndim == 1: + raw_prediction_2d = raw_prediction[:, None] + assert_allclose( + loss.loss(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.loss(y_true=y_true, raw_prediction=raw_prediction), + ) + assert_allclose( + loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction), + ) + assert_allclose( + loss.gradient(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.gradient(y_true=y_true, raw_prediction=raw_prediction), + ) + assert_allclose( + loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction), + ) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_of_perfect_prediction(loss, sample_weight): + """Test value of perfect predictions. + + Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to + zero. + """ + if not loss.is_multiclass: + # Use small values such that exp(value) is not nan. + raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10]) + # If link is identity, we must respect the interval of y_pred: + if isinstance(loss.link, IdentityLink): + eps = 1e-10 + low = loss.interval_y_pred.low + if not loss.interval_y_pred.low_inclusive: + low = low + eps + high = loss.interval_y_pred.high + if not loss.interval_y_pred.high_inclusive: + high = high - eps + raw_prediction = np.clip(raw_prediction, low, high) + y_true = loss.link.inverse(raw_prediction) + else: + # HalfMultinomialLoss + y_true = np.arange(loss.n_classes).astype(float) + # raw_prediction with entries -exp(10), but +exp(10) on the diagonal + # this is close enough to np.inf which would produce nan + raw_prediction = np.full( + shape=(loss.n_classes, loss.n_classes), + fill_value=-np.exp(10), + dtype=float, + ) + raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10) + + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + loss_value = loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + constant_term = loss.constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + # Comparing loss_value + constant_term to zero would result in large + # round-off errors. + assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_gradients_hessians_numerically(loss, sample_weight, global_random_seed): + """Test gradients and hessians with numerical derivatives. + + Gradient should equal the numerical derivatives of the loss function. + Hessians should equal the numerical derivatives of gradients. + """ + n_samples = 20 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=global_random_seed, + ) + + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + g, h = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + + assert g.shape == raw_prediction.shape + assert h.shape == raw_prediction.shape + + if not loss.is_multiclass: + + def loss_func(x): + return loss.loss( + y_true=y_true, + raw_prediction=x, + sample_weight=sample_weight, + ) + + g_numeric = numerical_derivative(loss_func, raw_prediction, eps=1e-6) + assert_allclose(g, g_numeric, rtol=5e-6, atol=1e-10) + + def grad_func(x): + return loss.gradient( + y_true=y_true, + raw_prediction=x, + sample_weight=sample_weight, + ) + + h_numeric = numerical_derivative(grad_func, raw_prediction, eps=1e-6) + if loss.approx_hessian: + # TODO: What could we test if loss.approx_hessian? + pass + else: + assert_allclose(h, h_numeric, rtol=5e-6, atol=1e-10) + else: + # For multiclass loss, we should only change the predictions of the + # class for which the derivative is taken for, e.g. offset[:, k] = eps + # for class k. + # As a softmax is computed, offsetting the whole array by a constant + # would have no effect on the probabilities, and thus on the loss. + for k in range(loss.n_classes): + + def loss_func(x): + raw = raw_prediction.copy() + raw[:, k] = x + return loss.loss( + y_true=y_true, + raw_prediction=raw, + sample_weight=sample_weight, + ) + + g_numeric = numerical_derivative(loss_func, raw_prediction[:, k], eps=1e-5) + assert_allclose(g[:, k], g_numeric, rtol=5e-6, atol=1e-10) + + def grad_func(x): + raw = raw_prediction.copy() + raw[:, k] = x + return loss.gradient( + y_true=y_true, + raw_prediction=raw, + sample_weight=sample_weight, + )[:, k] + + h_numeric = numerical_derivative(grad_func, raw_prediction[:, k], eps=1e-6) + if loss.approx_hessian: + # TODO: What could we test if loss.approx_hessian? + pass + else: + assert_allclose(h[:, k], h_numeric, rtol=5e-6, atol=1e-10) + + +@pytest.mark.parametrize( + "loss, x0, y_true", + [ + ("squared_error", -2.0, 42), + ("squared_error", 117.0, 1.05), + ("squared_error", 0.0, 0.0), + # The argmin of binomial_loss for y_true=0 and y_true=1 is resp. + # -inf and +inf due to logit, cf. "complete separation". Therefore, we + # use 0 < y_true < 1. + ("binomial_loss", 0.3, 0.1), + ("binomial_loss", -12, 0.2), + ("binomial_loss", 30, 0.9), + ("poisson_loss", 12.0, 1.0), + ("poisson_loss", 0.0, 2.0), + ("poisson_loss", -22.0, 10.0), + ], +) +@skip_if_32bit +def test_derivatives(loss, x0, y_true): + """Test that gradients are zero at the minimum of the loss. + + We check this on a single value/sample using Halley's method with the + first and second order derivatives computed by the Loss instance. + Note that methods of Loss instances operate on arrays while the newton + root finder expects a scalar or a one-element array for this purpose. + """ + loss = _LOSSES[loss](sample_weight=None) + y_true = np.array([y_true], dtype=np.float64) + x0 = np.array([x0], dtype=np.float64) + + def func(x: np.ndarray) -> np.ndarray: + """Compute loss plus constant term. + + The constant term is such that the minimum function value is zero, + which is required by the Newton method. + """ + return loss.loss( + y_true=y_true, raw_prediction=x + ) + loss.constant_to_optimal_zero(y_true=y_true) + + def fprime(x: np.ndarray) -> np.ndarray: + return loss.gradient(y_true=y_true, raw_prediction=x) + + def fprime2(x: np.ndarray) -> np.ndarray: + return loss.gradient_hessian(y_true=y_true, raw_prediction=x)[1] + + optimum = newton( + func, + x0=x0, + fprime=fprime, + fprime2=fprime2, + maxiter=100, + tol=5e-8, + ) + + # Need to ravel arrays because assert_allclose requires matching + # dimensions. + y_true = y_true.ravel() + optimum = optimum.ravel() + assert_allclose(loss.link.inverse(optimum), y_true) + assert_allclose(func(optimum), 0, atol=1e-14) + assert_allclose(loss.gradient(y_true=y_true, raw_prediction=optimum), 0, atol=5e-7) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_intercept_only(loss, sample_weight): + """Test that fit_intercept_only returns the argmin of the loss. + + Also test that the gradient is zero at the minimum. + """ + n_samples = 50 + if not loss.is_multiclass: + y_true = loss.link.inverse(np.linspace(-4, 4, num=n_samples)) + else: + y_true = np.arange(n_samples).astype(np.float64) % loss.n_classes + y_true[::5] = 0 # exceedance of class 0 + + if sample_weight == "range": + sample_weight = np.linspace(0.1, 2, num=n_samples) + + a = loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight) + + # find minimum by optimization + def fun(x): + if not loss.is_multiclass: + raw_prediction = np.full(shape=(n_samples), fill_value=x) + else: + raw_prediction = np.ascontiguousarray( + np.broadcast_to(x, shape=(n_samples, loss.n_classes)) + ) + return loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + + if not loss.is_multiclass: + opt = minimize_scalar(fun, tol=1e-7, options={"maxiter": 100}) + grad = loss.gradient( + y_true=y_true, + raw_prediction=np.full_like(y_true, a), + sample_weight=sample_weight, + ) + assert a.shape == tuple() # scalar + assert a.dtype == y_true.dtype + assert_all_finite(a) + a == approx(opt.x, rel=1e-7) + grad.sum() == approx(0, abs=1e-12) + else: + # The constraint corresponds to sum(raw_prediction) = 0. Without it, we would + # need to apply loss.symmetrize_raw_prediction to opt.x before comparing. + opt = minimize( + fun, + np.zeros((loss.n_classes)), + tol=1e-13, + options={"maxiter": 100}, + method="SLSQP", + constraints=LinearConstraint(np.ones((1, loss.n_classes)), 0, 0), + ) + grad = loss.gradient( + y_true=y_true, + raw_prediction=np.tile(a, (n_samples, 1)), + sample_weight=sample_weight, + ) + assert a.dtype == y_true.dtype + assert_all_finite(a) + assert_allclose(a, opt.x, rtol=5e-6, atol=1e-12) + assert_allclose(grad.sum(axis=0), 0, atol=1e-12) + + +@pytest.mark.parametrize( + "loss, func, random_dist", + [ + (HalfSquaredError(), np.mean, "normal"), + (AbsoluteError(), np.median, "normal"), + (PinballLoss(quantile=0.25), lambda x: np.percentile(x, q=25), "normal"), + (HalfPoissonLoss(), np.mean, "poisson"), + (HalfGammaLoss(), np.mean, "exponential"), + (HalfTweedieLoss(), np.mean, "exponential"), + (HalfBinomialLoss(), np.mean, "binomial"), + ], +) +def test_specific_fit_intercept_only(loss, func, random_dist, global_random_seed): + """Test that fit_intercept_only returns the correct functional. + + We test the functional for specific, meaningful distributions, e.g. + squared error estimates the expectation of a probability distribution. + """ + rng = np.random.RandomState(global_random_seed) + if random_dist == "binomial": + y_train = rng.binomial(1, 0.5, size=100) + else: + y_train = getattr(rng, random_dist)(size=100) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + # Make sure baseline prediction is the expected functional=func, e.g. mean + # or median. + assert_all_finite(baseline_prediction) + assert baseline_prediction == approx(loss.link.link(func(y_train))) + assert loss.link.inverse(baseline_prediction) == approx(func(y_train)) + if isinstance(loss, IdentityLink): + assert_allclose(loss.link.inverse(baseline_prediction), baseline_prediction) + + # Test baseline at boundary + if loss.interval_y_true.low_inclusive: + y_train.fill(loss.interval_y_true.low) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert_all_finite(baseline_prediction) + if loss.interval_y_true.high_inclusive: + y_train.fill(loss.interval_y_true.high) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert_all_finite(baseline_prediction) + + +def test_multinomial_loss_fit_intercept_only(): + """Test that fit_intercept_only returns the mean functional for CCE.""" + rng = np.random.RandomState(0) + n_classes = 4 + loss = HalfMultinomialLoss(n_classes=n_classes) + # Same logic as test_specific_fit_intercept_only. Here inverse link + # function = softmax and link function = log - symmetry term. + y_train = rng.randint(0, n_classes + 1, size=100).astype(np.float64) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert baseline_prediction.shape == (n_classes,) + p = np.zeros(n_classes, dtype=y_train.dtype) + for k in range(n_classes): + p[k] = (y_train == k).mean() + assert_allclose(baseline_prediction, np.log(p) - np.mean(np.log(p))) + assert_allclose(baseline_prediction[None, :], loss.link.link(p[None, :])) + + for y_train in (np.zeros(shape=10), np.ones(shape=10)): + y_train = y_train.astype(np.float64) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert baseline_prediction.dtype == y_train.dtype + assert_all_finite(baseline_prediction) + + +def test_binomial_and_multinomial_loss(global_random_seed): + """Test that multinomial loss with n_classes = 2 is the same as binomial loss.""" + rng = np.random.RandomState(global_random_seed) + n_samples = 20 + binom = HalfBinomialLoss() + multinom = HalfMultinomialLoss(n_classes=2) + y_train = rng.randint(0, 2, size=n_samples).astype(np.float64) + raw_prediction = rng.normal(size=n_samples) + raw_multinom = np.empty((n_samples, 2)) + raw_multinom[:, 0] = -0.5 * raw_prediction + raw_multinom[:, 1] = 0.5 * raw_prediction + assert_allclose( + binom.loss(y_true=y_train, raw_prediction=raw_prediction), + multinom.loss(y_true=y_train, raw_prediction=raw_multinom), + ) + + +@pytest.mark.parametrize("y_true", (np.array([0.0, 0, 0]), np.array([1.0, 1, 1]))) +@pytest.mark.parametrize("y_pred", (np.array([-5.0, -5, -5]), np.array([3.0, 3, 3]))) +def test_binomial_vs_alternative_formulation(y_true, y_pred, global_dtype): + """Test that both formulations of the binomial deviance agree. + + Often, the binomial deviance or log loss is written in terms of a variable + z in {-1, +1}, but we use y in {0, 1}, hence z = 2 * y - 1. + ESL II Eq. (10.18): + + -loglike(z, f) = log(1 + exp(-2 * z * f)) + + Note: + - ESL 2*f = raw_prediction, hence the factor 2 of ESL disappears. + - Deviance = -2*loglike + .., but HalfBinomialLoss is half of the + deviance, hence the factor of 2 cancels in the comparison. + """ + + def alt_loss(y, raw_pred): + z = 2 * y - 1 + return np.mean(np.log(1 + np.exp(-z * raw_pred))) + + def alt_gradient(y, raw_pred): + # alternative gradient formula according to ESL + z = 2 * y - 1 + return -z / (1 + np.exp(z * raw_pred)) + + bin_loss = HalfBinomialLoss() + + y_true = y_true.astype(global_dtype) + y_pred = y_pred.astype(global_dtype) + datum = (y_true, y_pred) + + assert bin_loss(*datum) == approx(alt_loss(*datum)) + assert_allclose(bin_loss.gradient(*datum), alt_gradient(*datum)) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_predict_proba(loss, global_random_seed): + """Test that predict_proba and gradient_proba work as expected.""" + n_samples = 20 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=global_random_seed, + ) + + if hasattr(loss, "predict_proba"): + proba = loss.predict_proba(raw_prediction) + assert proba.shape == (n_samples, loss.n_classes) + assert np.sum(proba, axis=1) == approx(1, rel=1e-11) + + if hasattr(loss, "gradient_proba"): + for grad, proba in ( + (None, None), + (None, np.empty_like(raw_prediction)), + (np.empty_like(raw_prediction), None), + (np.empty_like(raw_prediction), np.empty_like(raw_prediction)), + ): + grad, proba = loss.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + gradient_out=grad, + proba_out=proba, + ) + assert proba.shape == (n_samples, loss.n_classes) + assert np.sum(proba, axis=1) == approx(1, rel=1e-11) + assert_allclose( + grad, + loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + gradient_out=None, + ), + ) + + +@pytest.mark.parametrize("loss", ALL_LOSSES) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +@pytest.mark.parametrize("order", ("C", "F")) +def test_init_gradient_and_hessians(loss, sample_weight, dtype, order): + """Test that init_gradient_and_hessian works as expected. + + passing sample_weight to a loss correctly influences the constant_hessian + attribute, and consequently the shape of the hessian array. + """ + n_samples = 5 + if sample_weight == "range": + sample_weight = np.ones(n_samples) + loss = loss(sample_weight=sample_weight) + gradient, hessian = loss.init_gradient_and_hessian( + n_samples=n_samples, + dtype=dtype, + order=order, + ) + if loss.constant_hessian: + assert gradient.shape == (n_samples,) + assert hessian.shape == (1,) + elif loss.is_multiclass: + assert gradient.shape == (n_samples, loss.n_classes) + assert hessian.shape == (n_samples, loss.n_classes) + else: + assert hessian.shape == (n_samples,) + assert hessian.shape == (n_samples,) + + assert gradient.dtype == dtype + assert hessian.dtype == dtype + + if order == "C": + assert gradient.flags.c_contiguous + assert hessian.flags.c_contiguous + else: + assert gradient.flags.f_contiguous + assert hessian.flags.f_contiguous + + +@pytest.mark.parametrize("loss", ALL_LOSSES) +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"dtype": np.int64}, + f"Valid options for 'dtype' are .* Got dtype={np.int64} instead.", + ), + ], +) +def test_init_gradient_and_hessian_raises(loss, params, err_msg): + """Test that init_gradient_and_hessian raises errors for invalid input.""" + loss = loss() + with pytest.raises((ValueError, TypeError), match=err_msg): + gradient, hessian = loss.init_gradient_and_hessian(n_samples=5, **params) + + +@pytest.mark.parametrize( + "loss, params, err_type, err_msg", + [ + ( + PinballLoss, + {"quantile": None}, + TypeError, + "quantile must be an instance of float, not NoneType.", + ), + ( + PinballLoss, + {"quantile": 0}, + ValueError, + "quantile == 0, must be > 0.", + ), + (PinballLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."), + ( + HuberLoss, + {"quantile": None}, + TypeError, + "quantile must be an instance of float, not NoneType.", + ), + ( + HuberLoss, + {"quantile": 0}, + ValueError, + "quantile == 0, must be > 0.", + ), + (HuberLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."), + ], +) +def test_loss_init_parameter_validation(loss, params, err_type, err_msg): + """Test that loss raises errors for invalid input.""" + with pytest.raises(err_type, match=err_msg): + loss(**params) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_loss_pickle(loss): + """Test that losses can be pickled.""" + n_samples = 20 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=42, + ) + pickled_loss = pickle.dumps(loss) + unpickled_loss = pickle.loads(pickled_loss) + assert loss(y_true=y_true, raw_prediction=raw_prediction) == approx( + unpickled_loss(y_true=y_true, raw_prediction=raw_prediction) + ) + + +@pytest.mark.parametrize("p", [-1.5, 0, 1, 1.5, 2, 3]) +def test_tweedie_log_identity_consistency(p): + """Test for identical losses when only the link function is different.""" + half_tweedie_log = HalfTweedieLoss(power=p) + half_tweedie_identity = HalfTweedieLossIdentity(power=p) + n_samples = 10 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=half_tweedie_log, n_samples=n_samples, seed=42 + ) + y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction) + + # Let's compare the loss values, up to some constant term that is dropped + # in HalfTweedieLoss but not in HalfTweedieLossIdentity. + loss_log = half_tweedie_log.loss( + y_true=y_true, raw_prediction=raw_prediction + ) + half_tweedie_log.constant_to_optimal_zero(y_true) + loss_identity = half_tweedie_identity.loss( + y_true=y_true, raw_prediction=y_pred + ) + half_tweedie_identity.constant_to_optimal_zero(y_true) + # Note that HalfTweedieLoss ignores different constant terms than + # HalfTweedieLossIdentity. Constant terms means terms not depending on + # raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses + # give the same values. + assert_allclose(loss_log, loss_identity) + + # For gradients and hessians, the constant terms do not matter. We have, however, + # to account for the chain rule, i.e. with x=raw_prediction + # gradient_log(x) = d/dx loss_log(x) + # = d/dx loss_identity(exp(x)) + # = exp(x) * gradient_identity(exp(x)) + # Similarly, + # hessian_log(x) = exp(x) * gradient_identity(exp(x)) + # + exp(x)**2 * hessian_identity(x) + gradient_log, hessian_log = half_tweedie_log.gradient_hessian( + y_true=y_true, raw_prediction=raw_prediction + ) + gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian( + y_true=y_true, raw_prediction=y_pred + ) + assert_allclose(gradient_log, y_pred * gradient_identity) + assert_allclose( + hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4fbc631155078f6663beb3c5fcfa7de7fc5878f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py @@ -0,0 +1,47 @@ +""" +The :mod:`sklearn.feature_selection` module implements feature selection +algorithms. It currently includes univariate filter selection methods and the +recursive feature elimination algorithm. +""" + +from ._base import SelectorMixin +from ._from_model import SelectFromModel +from ._mutual_info import mutual_info_classif, mutual_info_regression +from ._rfe import RFE, RFECV +from ._sequential import SequentialFeatureSelector +from ._univariate_selection import ( + GenericUnivariateSelect, + SelectFdr, + SelectFpr, + SelectFwe, + SelectKBest, + SelectPercentile, + chi2, + f_classif, + f_oneway, + f_regression, + r_regression, +) +from ._variance_threshold import VarianceThreshold + +__all__ = [ + "GenericUnivariateSelect", + "SequentialFeatureSelector", + "RFE", + "RFECV", + "SelectFdr", + "SelectFpr", + "SelectFwe", + "SelectKBest", + "SelectFromModel", + "SelectPercentile", + "VarianceThreshold", + "chi2", + "f_classif", + "f_oneway", + "f_regression", + "r_regression", + "mutual_info_classif", + "mutual_info_regression", + "SelectorMixin", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91d633d6db16d7afb2493ed6701ac1f3a662c8f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aaf610f036ef8d940edf37267acc7259455c3fe8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c307d9c324c0d406350db82bc00cc7583982d08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0fb20a161c3db47d0afee67696a47d916975fd6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba92faeb3dd65493ad622caab7b497c3aa8442b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16cafada7fc68d56585e88112d4baa9fe65842e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d8d310855eb96e0425733860b416561069f82be Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9865070112a34c9b4f7726b5cd3cf748c7ba64f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/_base.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..69e40ce08aed005186416588531f644eb566f150 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_base.py @@ -0,0 +1,266 @@ +"""Generic feature selection mixin""" + +# Authors: G. Varoquaux, A. Gramfort, L. Buitinck, J. Nothman +# License: BSD 3 clause + +import warnings +from abc import ABCMeta, abstractmethod +from operator import attrgetter + +import numpy as np +from scipy.sparse import csc_matrix, issparse + +from ..base import TransformerMixin +from ..utils import ( + _is_pandas_df, + _safe_indexing, + check_array, + safe_sqr, +) +from ..utils._set_output import _get_output_config +from ..utils._tags import _safe_tags +from ..utils.validation import _check_feature_names_in, check_is_fitted + + +class SelectorMixin(TransformerMixin, metaclass=ABCMeta): + """ + Transformer mixin that performs feature selection given a support mask + + This mixin provides a feature selector implementation with `transform` and + `inverse_transform` functionality given an implementation of + `_get_support_mask`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> from sklearn.base import BaseEstimator + >>> from sklearn.feature_selection import SelectorMixin + >>> class FeatureSelector(SelectorMixin, BaseEstimator): + ... def fit(self, X, y=None): + ... self.n_features_in_ = X.shape[1] + ... return self + ... def _get_support_mask(self): + ... mask = np.zeros(self.n_features_in_, dtype=bool) + ... mask[:2] = True # select the first two features + ... return mask + >>> X, y = load_iris(return_X_y=True) + >>> FeatureSelector().fit_transform(X, y).shape + (150, 2) + """ + + def get_support(self, indices=False): + """ + Get a mask, or integer index, of the features selected. + + Parameters + ---------- + indices : bool, default=False + If True, the return value will be an array of integers, rather + than a boolean mask. + + Returns + ------- + support : array + An index that selects the retained features from a feature vector. + If `indices` is False, this is a boolean array of shape + [# input features], in which an element is True iff its + corresponding feature is selected for retention. If `indices` is + True, this is an integer array of shape [# output features] whose + values are indices into the input feature vector. + """ + mask = self._get_support_mask() + return mask if not indices else np.where(mask)[0] + + @abstractmethod + def _get_support_mask(self): + """ + Get the boolean mask indicating which features are selected + + Returns + ------- + support : boolean array of shape [# input features] + An element is True iff its corresponding feature is selected for + retention. + """ + + def transform(self, X): + """Reduce X to the selected features. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + Returns + ------- + X_r : array of shape [n_samples, n_selected_features] + The input samples with only the selected features. + """ + # Preserve X when X is a dataframe and the output is configured to + # be pandas. + output_config_dense = _get_output_config("transform", estimator=self)["dense"] + preserve_X = output_config_dense != "default" and _is_pandas_df(X) + + # note: we use _safe_tags instead of _get_tags because this is a + # public Mixin. + X = self._validate_data( + X, + dtype=None, + accept_sparse="csr", + force_all_finite=not _safe_tags(self, key="allow_nan"), + cast_to_ndarray=not preserve_X, + reset=False, + ) + return self._transform(X) + + def _transform(self, X): + """Reduce X to the selected features.""" + mask = self.get_support() + if not mask.any(): + warnings.warn( + ( + "No features were selected: either the data is" + " too noisy or the selection test too strict." + ), + UserWarning, + ) + if hasattr(X, "iloc"): + return X.iloc[:, :0] + return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0)) + return _safe_indexing(X, mask, axis=1) + + def inverse_transform(self, X): + """Reverse the transformation operation. + + Parameters + ---------- + X : array of shape [n_samples, n_selected_features] + The input samples. + + Returns + ------- + X_r : array of shape [n_samples, n_original_features] + `X` with columns of zeros inserted where features would have + been removed by :meth:`transform`. + """ + if issparse(X): + X = X.tocsc() + # insert additional entries in indptr: + # e.g. if transform changed indptr from [0 2 6 7] to [0 2 3] + # col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3] + it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1)) + col_nonzeros = it.ravel() + indptr = np.concatenate([[0], np.cumsum(col_nonzeros)]) + Xt = csc_matrix( + (X.data, X.indices, indptr), + shape=(X.shape[0], len(indptr) - 1), + dtype=X.dtype, + ) + return Xt + + support = self.get_support() + X = check_array(X, dtype=None) + if support.sum() != X.shape[1]: + raise ValueError("X has a different shape than during fitting.") + + if X.ndim == 1: + X = X[None, :] + Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype) + Xt[:, support] = X + return Xt + + def get_feature_names_out(self, input_features=None): + """Mask feature names according to selected features. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self) + input_features = _check_feature_names_in(self, input_features) + return input_features[self.get_support()] + + +def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1): + """ + Retrieve and aggregate (ndim > 1) the feature importances + from an estimator. Also optionally applies transformation. + + Parameters + ---------- + estimator : estimator + A scikit-learn estimator from which we want to get the feature + importances. + + getter : "auto", str or callable + An attribute or a callable to get the feature importance. If `"auto"`, + `estimator` is expected to expose `coef_` or `feature_importances`. + + transform_func : {"norm", "square"}, default=None + The transform to apply to the feature importances. By default (`None`) + no transformation is applied. + + norm_order : int, default=1 + The norm order to apply when `transform_func="norm"`. Only applied + when `importances.ndim > 1`. + + Returns + ------- + importances : ndarray of shape (n_features,) + The features importances, optionally transformed. + """ + if isinstance(getter, str): + if getter == "auto": + if hasattr(estimator, "coef_"): + getter = attrgetter("coef_") + elif hasattr(estimator, "feature_importances_"): + getter = attrgetter("feature_importances_") + else: + raise ValueError( + "when `importance_getter=='auto'`, the underlying " + f"estimator {estimator.__class__.__name__} should have " + "`coef_` or `feature_importances_` attribute. Either " + "pass a fitted estimator to feature selector or call fit " + "before calling transform." + ) + else: + getter = attrgetter(getter) + elif not callable(getter): + raise ValueError("`importance_getter` has to be a string or `callable`") + + importances = getter(estimator) + + if transform_func is None: + return importances + elif transform_func == "norm": + if importances.ndim == 1: + importances = np.abs(importances) + else: + importances = np.linalg.norm(importances, axis=0, ord=norm_order) + elif transform_func == "square": + if importances.ndim == 1: + importances = safe_sqr(importances) + else: + importances = safe_sqr(importances).sum(axis=0) + else: + raise ValueError( + "Valid values for `transform_func` are " + + "None, 'norm' and 'square'. Those two " + + "transformation are only supported now" + ) + + return importances diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py new file mode 100644 index 0000000000000000000000000000000000000000..61addedd2de787ccc38135147c7df6f895dc53b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py @@ -0,0 +1,522 @@ +# Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena +# License: BSD 3 clause + +from copy import deepcopy +from numbers import Integral, Real + +import numpy as np + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone +from ..exceptions import NotFittedError +from ..utils._param_validation import HasMethods, Interval, Options +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import available_if +from ..utils.validation import _num_features, check_is_fitted, check_scalar +from ._base import SelectorMixin, _get_feature_importances + + +def _calculate_threshold(estimator, importances, threshold): + """Interpret the threshold value""" + + if threshold is None: + # determine default from estimator + est_name = estimator.__class__.__name__ + is_l1_penalized = hasattr(estimator, "penalty") and estimator.penalty == "l1" + is_lasso = "Lasso" in est_name + is_elasticnet_l1_penalized = "ElasticNet" in est_name and ( + (hasattr(estimator, "l1_ratio_") and np.isclose(estimator.l1_ratio_, 1.0)) + or (hasattr(estimator, "l1_ratio") and np.isclose(estimator.l1_ratio, 1.0)) + ) + if is_l1_penalized or is_lasso or is_elasticnet_l1_penalized: + # the natural default threshold is 0 when l1 penalty was used + threshold = 1e-5 + else: + threshold = "mean" + + if isinstance(threshold, str): + if "*" in threshold: + scale, reference = threshold.split("*") + scale = float(scale.strip()) + reference = reference.strip() + + if reference == "median": + reference = np.median(importances) + elif reference == "mean": + reference = np.mean(importances) + else: + raise ValueError("Unknown reference: " + reference) + + threshold = scale * reference + + elif threshold == "median": + threshold = np.median(importances) + + elif threshold == "mean": + threshold = np.mean(importances) + + else: + raise ValueError( + "Expected threshold='mean' or threshold='median' got %s" % threshold + ) + + else: + threshold = float(threshold) + + return threshold + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the fitted `estimator_` if available, otherwise we check the + unfitted `estimator`. We raise the original `AttributeError` if `attr` does + not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "estimator_"): + getattr(self.estimator_, attr) + else: + getattr(self.estimator, attr) + + return True + + return check + + +class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator): + """Meta-transformer for selecting features based on importance weights. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + The base estimator from which the transformer is built. + This can be both a fitted (if ``prefit`` is set to True) + or a non-fitted estimator. The estimator should have a + ``feature_importances_`` or ``coef_`` attribute after fitting. + Otherwise, the ``importance_getter`` parameter should be used. + + threshold : str or float, default=None + The threshold value to use for feature selection. Features whose + absolute importance value is greater or equal are kept while the others + are discarded. If "median" (resp. "mean"), then the ``threshold`` value + is the median (resp. the mean) of the feature importances. A scaling + factor (e.g., "1.25*mean") may also be used. If None and if the + estimator has a parameter penalty set to l1, either explicitly + or implicitly (e.g, Lasso), the threshold used is 1e-5. + Otherwise, "mean" is used by default. + + prefit : bool, default=False + Whether a prefit model is expected to be passed into the constructor + directly or not. + If `True`, `estimator` must be a fitted estimator. + If `False`, `estimator` is fitted and updated by calling + `fit` and `partial_fit`, respectively. + + norm_order : non-zero int, inf, -inf, default=1 + Order of the norm used to filter the vectors of coefficients below + ``threshold`` in the case where the ``coef_`` attribute of the + estimator is of dimension 2. + + max_features : int, callable, default=None + The maximum number of features to select. + + - If an integer, then it specifies the maximum number of features to + allow. + - If a callable, then it specifies how to calculate the maximum number of + features allowed by using the output of `max_features(X)`. + - If `None`, then all features are kept. + + To only select based on ``max_features``, set ``threshold=-np.inf``. + + .. versionadded:: 0.20 + .. versionchanged:: 1.1 + `max_features` accepts a callable. + + importance_getter : str or callable, default='auto' + If 'auto', uses the feature importance either through a ``coef_`` + attribute or ``feature_importances_`` attribute of estimator. + + Also accepts a string that specifies an attribute name/path + for extracting feature importance (implemented with `attrgetter`). + For example, give `regressor_.coef_` in case of + :class:`~sklearn.compose.TransformedTargetRegressor` or + `named_steps.clf.feature_importances_` in case of + :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. + + If `callable`, overrides the default feature importance getter. + The callable is passed with the fitted estimator and it should + return importance for each feature. + + .. versionadded:: 0.24 + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the transformer is built. This attribute + exist only when `fit` has been called. + + - If `prefit=True`, it is a deep copy of `estimator`. + - If `prefit=False`, it is a clone of `estimator` and fit on the data + passed to `fit` or `partial_fit`. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + max_features_ : int + Maximum number of features calculated during :term:`fit`. Only defined + if the ``max_features`` is not `None`. + + - If `max_features` is an `int`, then `max_features_ = max_features`. + - If `max_features` is a callable, then `max_features_ = max_features(X)`. + + .. versionadded:: 1.1 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + threshold_ : float + The threshold value used for feature selection. + + See Also + -------- + RFE : Recursive feature elimination based on importance weights. + RFECV : Recursive feature elimination with built-in cross-validated + selection of the best number of features. + SequentialFeatureSelector : Sequential cross-validation based feature + selection. Does not rely on importance weights. + + Notes + ----- + Allows NaN/Inf in the input if the underlying estimator does as well. + + Examples + -------- + >>> from sklearn.feature_selection import SelectFromModel + >>> from sklearn.linear_model import LogisticRegression + >>> X = [[ 0.87, -1.34, 0.31 ], + ... [-2.79, -0.02, -0.85 ], + ... [-1.34, -0.48, -2.55 ], + ... [ 1.92, 1.48, 0.65 ]] + >>> y = [0, 1, 0, 1] + >>> selector = SelectFromModel(estimator=LogisticRegression()).fit(X, y) + >>> selector.estimator_.coef_ + array([[-0.3252..., 0.8345..., 0.4976...]]) + >>> selector.threshold_ + 0.55249... + >>> selector.get_support() + array([False, True, False]) + >>> selector.transform(X) + array([[-1.34], + [-0.02], + [-0.48], + [ 1.48]]) + + Using a callable to create a selector that can use no more than half + of the input features. + + >>> def half_callable(X): + ... return round(len(X[0]) / 2) + >>> half_selector = SelectFromModel(estimator=LogisticRegression(), + ... max_features=half_callable) + >>> _ = half_selector.fit(X, y) + >>> half_selector.max_features_ + 2 + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods("fit")], + "threshold": [Interval(Real, None, None, closed="both"), str, None], + "prefit": ["boolean"], + "norm_order": [ + Interval(Integral, None, -1, closed="right"), + Interval(Integral, 1, None, closed="left"), + Options(Real, {np.inf, -np.inf}), + ], + "max_features": [Interval(Integral, 0, None, closed="left"), callable, None], + "importance_getter": [str, callable], + } + + def __init__( + self, + estimator, + *, + threshold=None, + prefit=False, + norm_order=1, + max_features=None, + importance_getter="auto", + ): + self.estimator = estimator + self.threshold = threshold + self.prefit = prefit + self.importance_getter = importance_getter + self.norm_order = norm_order + self.max_features = max_features + + def _get_support_mask(self): + estimator = getattr(self, "estimator_", self.estimator) + max_features = getattr(self, "max_features_", self.max_features) + + if self.prefit: + try: + check_is_fitted(self.estimator) + except NotFittedError as exc: + raise NotFittedError( + "When `prefit=True`, `estimator` is expected to be a fitted " + "estimator." + ) from exc + if callable(max_features): + # This branch is executed when `transform` is called directly and thus + # `max_features_` is not set and we fallback using `self.max_features` + # that is not validated + raise NotFittedError( + "When `prefit=True` and `max_features` is a callable, call `fit` " + "before calling `transform`." + ) + elif max_features is not None and not isinstance(max_features, Integral): + raise ValueError( + f"`max_features` must be an integer. Got `max_features={max_features}` " + "instead." + ) + + scores = _get_feature_importances( + estimator=estimator, + getter=self.importance_getter, + transform_func="norm", + norm_order=self.norm_order, + ) + threshold = _calculate_threshold(estimator, scores, self.threshold) + if self.max_features is not None: + mask = np.zeros_like(scores, dtype=bool) + candidate_indices = np.argsort(-scores, kind="mergesort")[:max_features] + mask[candidate_indices] = True + else: + mask = np.ones_like(scores, dtype=bool) + mask[scores < threshold] = False + return mask + + def _check_max_features(self, X): + if self.max_features is not None: + n_features = _num_features(X) + + if callable(self.max_features): + max_features = self.max_features(X) + else: # int + max_features = self.max_features + + check_scalar( + max_features, + "max_features", + Integral, + min_val=0, + max_val=n_features, + ) + self.max_features_ = max_features + + @_fit_context( + # SelectFromModel.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **fit_params): + """Fit the SelectFromModel meta-transformer. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,), default=None + The target values (integers that correspond to classes in + classification, real numbers in regression). + + **fit_params : dict + - If `enable_metadata_routing=False` (default): + + Parameters directly passed to the `partial_fit` method of the + sub-estimator. They are ignored if `prefit=True`. + + - If `enable_metadata_routing=True`: + + Parameters safely routed to the `partial_fit` method of the + sub-estimator. They are ignored if `prefit=True`. + + .. versionchanged:: 1.4 + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Fitted estimator. + """ + self._check_max_features(X) + + if self.prefit: + try: + check_is_fitted(self.estimator) + except NotFittedError as exc: + raise NotFittedError( + "When `prefit=True`, `estimator` is expected to be a fitted " + "estimator." + ) from exc + self.estimator_ = deepcopy(self.estimator) + else: + if _routing_enabled(): + routed_params = process_routing(self, "fit", **fit_params) + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X, y, **routed_params.estimator.fit) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X, y, **fit_params) + + if hasattr(self.estimator_, "feature_names_in_"): + self.feature_names_in_ = self.estimator_.feature_names_in_ + else: + self._check_feature_names(X, reset=True) + + return self + + @property + def threshold_(self): + """Threshold value used for feature selection.""" + scores = _get_feature_importances( + estimator=self.estimator_, + getter=self.importance_getter, + transform_func="norm", + norm_order=self.norm_order, + ) + return _calculate_threshold(self.estimator, scores, self.threshold) + + @available_if(_estimator_has("partial_fit")) + @_fit_context( + # SelectFromModel.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def partial_fit(self, X, y=None, **partial_fit_params): + """Fit the SelectFromModel meta-transformer only once. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,), default=None + The target values (integers that correspond to classes in + classification, real numbers in regression). + + **partial_fit_params : dict + - If `enable_metadata_routing=False` (default): + + Parameters directly passed to the `partial_fit` method of the + sub-estimator. + + - If `enable_metadata_routing=True`: + + Parameters passed to the `partial_fit` method of the + sub-estimator. They are ignored if `prefit=True`. + + .. versionchanged:: 1.4 + `**partial_fit_params` are routed to the sub-estimator, if + `enable_metadata_routing=True` is set via + :func:`~sklearn.set_config`, which allows for aliasing. + + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Fitted estimator. + """ + first_call = not hasattr(self, "estimator_") + + if first_call: + self._check_max_features(X) + + if self.prefit: + if first_call: + try: + check_is_fitted(self.estimator) + except NotFittedError as exc: + raise NotFittedError( + "When `prefit=True`, `estimator` is expected to be a fitted " + "estimator." + ) from exc + self.estimator_ = deepcopy(self.estimator) + return self + + if first_call: + self.estimator_ = clone(self.estimator) + if _routing_enabled(): + routed_params = process_routing(self, "partial_fit", **partial_fit_params) + self.estimator_ = clone(self.estimator) + self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + self.estimator_.partial_fit(X, y, **partial_fit_params) + + if hasattr(self.estimator_, "feature_names_in_"): + self.feature_names_in_ = self.estimator_.feature_names_in_ + else: + self._check_feature_names(X, reset=first_call) + + return self + + @property + def n_features_in_(self): + """Number of features seen during `fit`.""" + # For consistency with other estimators we raise a AttributeError so + # that hasattr() fails if the estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.estimator_.n_features_in_ + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(callee="partial_fit", caller="partial_fit") + .add(callee="fit", caller="fit"), + ) + return router + + def _more_tags(self): + return {"allow_nan": _safe_tags(self.estimator, key="allow_nan")} diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py new file mode 100644 index 0000000000000000000000000000000000000000..821ef889e7ed90936d6c9898b4c41744d105cb6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py @@ -0,0 +1,514 @@ +# Author: Nikolay Mayorov +# License: 3-clause BSD + +from numbers import Integral + +import numpy as np +from scipy.sparse import issparse +from scipy.special import digamma + +from ..metrics.cluster import mutual_info_score +from ..neighbors import KDTree, NearestNeighbors +from ..preprocessing import scale +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.multiclass import check_classification_targets +from ..utils.validation import check_array, check_X_y + + +def _compute_mi_cc(x, y, n_neighbors): + """Compute mutual information between two continuous variables. + + Parameters + ---------- + x, y : ndarray, shape (n_samples,) + Samples of two continuous random variables, must have an identical + shape. + + n_neighbors : int + Number of nearest neighbors to search for each point, see [1]_. + + Returns + ------- + mi : float + Estimated mutual information in nat units. If it turned out to be + negative it is replaced by 0. + + Notes + ----- + True mutual information can't be negative. If its estimate by a numerical + method is negative, it means (providing the method is adequate) that the + mutual information is close to 0 and replacing it by 0 is a reasonable + strategy. + + References + ---------- + .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + """ + n_samples = x.size + + x = x.reshape((-1, 1)) + y = y.reshape((-1, 1)) + xy = np.hstack((x, y)) + + # Here we rely on NearestNeighbors to select the fastest algorithm. + nn = NearestNeighbors(metric="chebyshev", n_neighbors=n_neighbors) + + nn.fit(xy) + radius = nn.kneighbors()[0] + radius = np.nextafter(radius[:, -1], 0) + + # KDTree is explicitly fit to allow for the querying of number of + # neighbors within a specified radius + kd = KDTree(x, metric="chebyshev") + nx = kd.query_radius(x, radius, count_only=True, return_distance=False) + nx = np.array(nx) - 1.0 + + kd = KDTree(y, metric="chebyshev") + ny = kd.query_radius(y, radius, count_only=True, return_distance=False) + ny = np.array(ny) - 1.0 + + mi = ( + digamma(n_samples) + + digamma(n_neighbors) + - np.mean(digamma(nx + 1)) + - np.mean(digamma(ny + 1)) + ) + + return max(0, mi) + + +def _compute_mi_cd(c, d, n_neighbors): + """Compute mutual information between continuous and discrete variables. + + Parameters + ---------- + c : ndarray, shape (n_samples,) + Samples of a continuous random variable. + + d : ndarray, shape (n_samples,) + Samples of a discrete random variable. + + n_neighbors : int + Number of nearest neighbors to search for each point, see [1]_. + + Returns + ------- + mi : float + Estimated mutual information in nat units. If it turned out to be + negative it is replaced by 0. + + Notes + ----- + True mutual information can't be negative. If its estimate by a numerical + method is negative, it means (providing the method is adequate) that the + mutual information is close to 0 and replacing it by 0 is a reasonable + strategy. + + References + ---------- + .. [1] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + """ + n_samples = c.shape[0] + c = c.reshape((-1, 1)) + + radius = np.empty(n_samples) + label_counts = np.empty(n_samples) + k_all = np.empty(n_samples) + nn = NearestNeighbors() + for label in np.unique(d): + mask = d == label + count = np.sum(mask) + if count > 1: + k = min(n_neighbors, count - 1) + nn.set_params(n_neighbors=k) + nn.fit(c[mask]) + r = nn.kneighbors()[0] + radius[mask] = np.nextafter(r[:, -1], 0) + k_all[mask] = k + label_counts[mask] = count + + # Ignore points with unique labels. + mask = label_counts > 1 + n_samples = np.sum(mask) + label_counts = label_counts[mask] + k_all = k_all[mask] + c = c[mask] + radius = radius[mask] + + kd = KDTree(c) + m_all = kd.query_radius(c, radius, count_only=True, return_distance=False) + m_all = np.array(m_all) + + mi = ( + digamma(n_samples) + + np.mean(digamma(k_all)) + - np.mean(digamma(label_counts)) + - np.mean(digamma(m_all)) + ) + + return max(0, mi) + + +def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3): + """Compute mutual information between two variables. + + This is a simple wrapper which selects a proper function to call based on + whether `x` and `y` are discrete or not. + """ + if x_discrete and y_discrete: + return mutual_info_score(x, y) + elif x_discrete and not y_discrete: + return _compute_mi_cd(y, x, n_neighbors) + elif not x_discrete and y_discrete: + return _compute_mi_cd(x, y, n_neighbors) + else: + return _compute_mi_cc(x, y, n_neighbors) + + +def _iterate_columns(X, columns=None): + """Iterate over columns of a matrix. + + Parameters + ---------- + X : ndarray or csc_matrix, shape (n_samples, n_features) + Matrix over which to iterate. + + columns : iterable or None, default=None + Indices of columns to iterate over. If None, iterate over all columns. + + Yields + ------ + x : ndarray, shape (n_samples,) + Columns of `X` in dense format. + """ + if columns is None: + columns = range(X.shape[1]) + + if issparse(X): + for i in columns: + x = np.zeros(X.shape[0]) + start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1] + x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr] + yield x + else: + for i in columns: + yield X[:, i] + + +def _estimate_mi( + X, + y, + discrete_features="auto", + discrete_target=False, + n_neighbors=3, + copy=True, + random_state=None, +): + """Estimate mutual information between the features and the target. + + Parameters + ---------- + X : array-like or sparse matrix, shape (n_samples, n_features) + Feature matrix. + + y : array-like of shape (n_samples,) + Target vector. + + discrete_features : {'auto', bool, array-like}, default='auto' + If bool, then determines whether to consider all features discrete + or continuous. If array, then it should be either a boolean mask + with shape (n_features,) or array with indices of discrete features. + If 'auto', it is assigned to False for dense `X` and to True for + sparse `X`. + + discrete_target : bool, default=False + Whether to consider `y` as a discrete variable. + + n_neighbors : int, default=3 + Number of neighbors to use for MI estimation for continuous variables, + see [1]_ and [2]_. Higher values reduce variance of the estimation, but + could introduce a bias. + + copy : bool, default=True + Whether to make a copy of the given data. If set to False, the initial + data will be overwritten. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for adding small noise to + continuous variables in order to remove repeated values. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + mi : ndarray, shape (n_features,) + Estimated mutual information between each feature and the target in + nat units. A negative value will be replaced by 0. + + References + ---------- + .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + .. [2] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + """ + X, y = check_X_y(X, y, accept_sparse="csc", y_numeric=not discrete_target) + n_samples, n_features = X.shape + + if isinstance(discrete_features, (str, bool)): + if isinstance(discrete_features, str): + if discrete_features == "auto": + discrete_features = issparse(X) + else: + raise ValueError("Invalid string value for discrete_features.") + discrete_mask = np.empty(n_features, dtype=bool) + discrete_mask.fill(discrete_features) + else: + discrete_features = check_array(discrete_features, ensure_2d=False) + if discrete_features.dtype != "bool": + discrete_mask = np.zeros(n_features, dtype=bool) + discrete_mask[discrete_features] = True + else: + discrete_mask = discrete_features + + continuous_mask = ~discrete_mask + if np.any(continuous_mask) and issparse(X): + raise ValueError("Sparse matrix `X` can't have continuous features.") + + rng = check_random_state(random_state) + if np.any(continuous_mask): + X = X.astype(np.float64, copy=copy) + X[:, continuous_mask] = scale( + X[:, continuous_mask], with_mean=False, copy=False + ) + + # Add small noise to continuous features as advised in Kraskov et. al. + means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0)) + X[:, continuous_mask] += ( + 1e-10 + * means + * rng.standard_normal(size=(n_samples, np.sum(continuous_mask))) + ) + + if not discrete_target: + y = scale(y, with_mean=False) + y += ( + 1e-10 + * np.maximum(1, np.mean(np.abs(y))) + * rng.standard_normal(size=n_samples) + ) + + mi = [ + _compute_mi(x, y, discrete_feature, discrete_target, n_neighbors) + for x, discrete_feature in zip(_iterate_columns(X), discrete_mask) + ] + + return np.array(mi) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "copy": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def mutual_info_regression( + X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None +): + """Estimate mutual information for a continuous target variable. + + Mutual information (MI) [1]_ between two random variables is a non-negative + value, which measures the dependency between the variables. It is equal + to zero if and only if two random variables are independent, and higher + values mean higher dependency. + + The function relies on nonparametric methods based on entropy estimation + from k-nearest neighbors distances as described in [2]_ and [3]_. Both + methods are based on the idea originally proposed in [4]_. + + It can be used for univariate features selection, read more in the + :ref:`User Guide `. + + Parameters + ---------- + X : array-like or sparse matrix, shape (n_samples, n_features) + Feature matrix. + + y : array-like of shape (n_samples,) + Target vector. + + discrete_features : {'auto', bool, array-like}, default='auto' + If bool, then determines whether to consider all features discrete + or continuous. If array, then it should be either a boolean mask + with shape (n_features,) or array with indices of discrete features. + If 'auto', it is assigned to False for dense `X` and to True for + sparse `X`. + + n_neighbors : int, default=3 + Number of neighbors to use for MI estimation for continuous variables, + see [2]_ and [3]_. Higher values reduce variance of the estimation, but + could introduce a bias. + + copy : bool, default=True + Whether to make a copy of the given data. If set to False, the initial + data will be overwritten. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for adding small noise to + continuous variables in order to remove repeated values. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + mi : ndarray, shape (n_features,) + Estimated mutual information between each feature and the target in + nat units. + + Notes + ----- + 1. The term "discrete features" is used instead of naming them + "categorical", because it describes the essence more accurately. + For example, pixel intensities of an image are discrete features + (but hardly categorical) and you will get better results if mark them + as such. Also note, that treating a continuous variable as discrete and + vice versa will usually give incorrect results, so be attentive about + that. + 2. True mutual information can't be negative. If its estimate turns out + to be negative, it is replaced by zero. + + References + ---------- + .. [1] `Mutual Information + `_ + on Wikipedia. + .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + .. [3] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy + of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16 + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.feature_selection import mutual_info_regression + >>> X, y = make_regression( + ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 + ... ) + >>> mutual_info_regression(X, y) + array([0.1..., 2.6... , 0.0...]) + """ + return _estimate_mi(X, y, discrete_features, False, n_neighbors, copy, random_state) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "copy": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def mutual_info_classif( + X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None +): + """Estimate mutual information for a discrete target variable. + + Mutual information (MI) [1]_ between two random variables is a non-negative + value, which measures the dependency between the variables. It is equal + to zero if and only if two random variables are independent, and higher + values mean higher dependency. + + The function relies on nonparametric methods based on entropy estimation + from k-nearest neighbors distances as described in [2]_ and [3]_. Both + methods are based on the idea originally proposed in [4]_. + + It can be used for univariate features selection, read more in the + :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Feature matrix. + + y : array-like of shape (n_samples,) + Target vector. + + discrete_features : 'auto', bool or array-like, default='auto' + If bool, then determines whether to consider all features discrete + or continuous. If array, then it should be either a boolean mask + with shape (n_features,) or array with indices of discrete features. + If 'auto', it is assigned to False for dense `X` and to True for + sparse `X`. + + n_neighbors : int, default=3 + Number of neighbors to use for MI estimation for continuous variables, + see [2]_ and [3]_. Higher values reduce variance of the estimation, but + could introduce a bias. + + copy : bool, default=True + Whether to make a copy of the given data. If set to False, the initial + data will be overwritten. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for adding small noise to + continuous variables in order to remove repeated values. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + mi : ndarray, shape (n_features,) + Estimated mutual information between each feature and the target in + nat units. + + Notes + ----- + 1. The term "discrete features" is used instead of naming them + "categorical", because it describes the essence more accurately. + For example, pixel intensities of an image are discrete features + (but hardly categorical) and you will get better results if mark them + as such. Also note, that treating a continuous variable as discrete and + vice versa will usually give incorrect results, so be attentive about + that. + 2. True mutual information can't be negative. If its estimate turns out + to be negative, it is replaced by zero. + + References + ---------- + .. [1] `Mutual Information + `_ + on Wikipedia. + .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + .. [3] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy + of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16 + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.feature_selection import mutual_info_classif + >>> X, y = make_classification( + ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1, + ... shuffle=False, random_state=42 + ... ) + >>> mutual_info_classif(X, y) + array([0.58..., 0.10..., 0.19..., 0.09... , 0. , + 0. , 0. , 0. , 0. , 0. ]) + """ + check_classification_targets(y) + return _estimate_mi(X, y, discrete_features, True, n_neighbors, copy, random_state) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d1b71e08609a6fdc9dc8ad6db29b96c1da0822 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py @@ -0,0 +1,792 @@ +# Authors: Alexandre Gramfort +# Vincent Michel +# Gilles Louppe +# +# License: BSD 3 clause + +"""Recursive feature elimination for feature ranking""" + +from numbers import Integral + +import numpy as np +from joblib import effective_n_jobs + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier +from ..metrics import check_scoring +from ..model_selection import check_cv +from ..model_selection._validation import _score +from ..utils._param_validation import HasMethods, Interval, RealNotInt +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.metaestimators import _safe_split, available_if +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin, _get_feature_importances + + +def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer): + """ + Return the score for a fit across one fold. + """ + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + return rfe._fit( + X_train, + y_train, + lambda estimator, features: _score( + # TODO(SLEP6): pass score_params here + estimator, + X_test[:, features], + y_test, + scorer, + score_params=None, + ), + ).scores_ + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the fitted `estimator_` if available, otherwise we check the + unfitted `estimator`. We raise the original `AttributeError` if `attr` does + not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "estimator_"): + getattr(self.estimator_, attr) + else: + getattr(self.estimator, attr) + + return True + + return check + + +class RFE(_RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator): + """Feature ranking with recursive feature elimination. + + Given an external estimator that assigns weights to features (e.g., the + coefficients of a linear model), the goal of recursive feature elimination + (RFE) is to select features by recursively considering smaller and smaller + sets of features. First, the estimator is trained on the initial set of + features and the importance of each feature is obtained either through + any specific attribute or callable. + Then, the least important features are pruned from current set of features. + That procedure is recursively repeated on the pruned set until the desired + number of features to select is eventually reached. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : ``Estimator`` instance + A supervised learning estimator with a ``fit`` method that provides + information about feature importance + (e.g. `coef_`, `feature_importances_`). + + n_features_to_select : int or float, default=None + The number of features to select. If `None`, half of the features are + selected. If integer, the parameter is the absolute number of features + to select. If float between 0 and 1, it is the fraction of features to + select. + + .. versionchanged:: 0.24 + Added float values for fractions. + + step : int or float, default=1 + If greater than or equal to 1, then ``step`` corresponds to the + (integer) number of features to remove at each iteration. + If within (0.0, 1.0), then ``step`` corresponds to the percentage + (rounded down) of features to remove at each iteration. + + verbose : int, default=0 + Controls verbosity of output. + + importance_getter : str or callable, default='auto' + If 'auto', uses the feature importance either through a `coef_` + or `feature_importances_` attributes of estimator. + + Also accepts a string that specifies an attribute name/path + for extracting feature importance (implemented with `attrgetter`). + For example, give `regressor_.coef_` in case of + :class:`~sklearn.compose.TransformedTargetRegressor` or + `named_steps.clf.feature_importances_` in case of + class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. + + If `callable`, overrides the default feature importance getter. + The callable is passed with the fitted estimator and it should + return importance for each feature. + + .. versionadded:: 0.24 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The classes labels. Only available when `estimator` is a classifier. + + estimator_ : ``Estimator`` instance + The fitted estimator used to select features. + + n_features_ : int + The number of selected features. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + ranking_ : ndarray of shape (n_features,) + The feature ranking, such that ``ranking_[i]`` corresponds to the + ranking position of the i-th feature. Selected (i.e., estimated + best) features are assigned rank 1. + + support_ : ndarray of shape (n_features,) + The mask of selected features. + + See Also + -------- + RFECV : Recursive feature elimination with built-in cross-validated + selection of the best number of features. + SelectFromModel : Feature selection based on thresholds of importance + weights. + SequentialFeatureSelector : Sequential cross-validation based feature + selection. Does not rely on importance weights. + + Notes + ----- + Allows NaN/Inf in the input if the underlying estimator does as well. + + References + ---------- + + .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection + for cancer classification using support vector machines", + Mach. Learn., 46(1-3), 389--422, 2002. + + Examples + -------- + The following example shows how to retrieve the 5 most informative + features in the Friedman #1 dataset. + + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.feature_selection import RFE + >>> from sklearn.svm import SVR + >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + >>> estimator = SVR(kernel="linear") + >>> selector = RFE(estimator, n_features_to_select=5, step=1) + >>> selector = selector.fit(X, y) + >>> selector.support_ + array([ True, True, True, True, True, False, False, False, False, + False]) + >>> selector.ranking_ + array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "n_features_to_select": [ + None, + Interval(RealNotInt, 0, 1, closed="right"), + Interval(Integral, 0, None, closed="neither"), + ], + "step": [ + Interval(Integral, 0, None, closed="neither"), + Interval(RealNotInt, 0, 1, closed="neither"), + ], + "verbose": ["verbose"], + "importance_getter": [str, callable], + } + + def __init__( + self, + estimator, + *, + n_features_to_select=None, + step=1, + verbose=0, + importance_getter="auto", + ): + self.estimator = estimator + self.n_features_to_select = n_features_to_select + self.step = step + self.importance_getter = importance_getter + self.verbose = verbose + + @property + def _estimator_type(self): + return self.estimator._estimator_type + + @property + def classes_(self): + """Classes labels available when `estimator` is a classifier. + + Returns + ------- + ndarray of shape (n_classes,) + """ + return self.estimator_.classes_ + + @_fit_context( + # RFE.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit the RFE model and then the underlying estimator on the selected features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) + The target values. + + **fit_params : dict + Additional parameters passed to the `fit` method of the underlying + estimator. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", **fit_params) + return self._fit(X, y, **fit_params) + + def _fit(self, X, y, step_score=None, **fit_params): + # Parameter step_score controls the calculation of self.scores_ + # step_score is not exposed to users + # and is used when implementing RFECV + # self.scores_ will not be calculated when calling _fit through fit + + X, y = self._validate_data( + X, + y, + accept_sparse="csc", + ensure_min_features=2, + force_all_finite=False, + multi_output=True, + ) + + # Initialization + n_features = X.shape[1] + if self.n_features_to_select is None: + n_features_to_select = n_features // 2 + elif isinstance(self.n_features_to_select, Integral): # int + n_features_to_select = self.n_features_to_select + else: # float + n_features_to_select = int(n_features * self.n_features_to_select) + + if 0.0 < self.step < 1.0: + step = int(max(1, self.step * n_features)) + else: + step = int(self.step) + + support_ = np.ones(n_features, dtype=bool) + ranking_ = np.ones(n_features, dtype=int) + + if step_score: + self.scores_ = [] + + # Elimination + while np.sum(support_) > n_features_to_select: + # Remaining features + features = np.arange(n_features)[support_] + + # Rank the remaining features + estimator = clone(self.estimator) + if self.verbose > 0: + print("Fitting estimator with %d features." % np.sum(support_)) + + estimator.fit(X[:, features], y, **fit_params) + + # Get importance and rank them + importances = _get_feature_importances( + estimator, + self.importance_getter, + transform_func="square", + ) + ranks = np.argsort(importances) + + # for sparse case ranks is matrix + ranks = np.ravel(ranks) + + # Eliminate the worse features + threshold = min(step, np.sum(support_) - n_features_to_select) + + # Compute step score on the previous selection iteration + # because 'estimator' must use features + # that have not been eliminated yet + if step_score: + self.scores_.append(step_score(estimator, features)) + support_[features[ranks][:threshold]] = False + ranking_[np.logical_not(support_)] += 1 + + # Set final attributes + features = np.arange(n_features)[support_] + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X[:, features], y, **fit_params) + + # Compute step score when only n_features_to_select features left + if step_score: + self.scores_.append(step_score(self.estimator_, features)) + self.n_features_ = support_.sum() + self.support_ = support_ + self.ranking_ = ranking_ + + return self + + @available_if(_estimator_has("predict")) + def predict(self, X): + """Reduce X to the selected features and predict using the estimator. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + Returns + ------- + y : array of shape [n_samples] + The predicted target values. + """ + check_is_fitted(self) + return self.estimator_.predict(self.transform(X)) + + @available_if(_estimator_has("score")) + def score(self, X, y, **fit_params): + """Reduce X to the selected features and return the score of the estimator. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + y : array of shape [n_samples] + The target values. + + **fit_params : dict + Parameters to pass to the `score` method of the underlying + estimator. + + .. versionadded:: 1.0 + + Returns + ------- + score : float + Score of the underlying base estimator computed with the selected + features returned by `rfe.transform(X)` and `y`. + """ + check_is_fitted(self) + return self.estimator_.score(self.transform(X), y, **fit_params) + + def _get_support_mask(self): + check_is_fitted(self) + return self.support_ + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Compute the decision function of ``X``. + + Parameters + ---------- + X : {array-like or sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + score : array, shape = [n_samples, n_classes] or [n_samples] + The decision function of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + Regression and binary classification produce an array of shape + [n_samples]. + """ + check_is_fitted(self) + return self.estimator_.decision_function(self.transform(X)) + + @available_if(_estimator_has("predict_proba")) + def predict_proba(self, X): + """Predict class probabilities for X. + + Parameters + ---------- + X : {array-like or sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + p : array of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.estimator_.predict_proba(self.transform(X)) + + @available_if(_estimator_has("predict_log_proba")) + def predict_log_proba(self, X): + """Predict class log-probabilities for X. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + Returns + ------- + p : array of shape (n_samples, n_classes) + The class log-probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.estimator_.predict_log_proba(self.transform(X)) + + def _more_tags(self): + tags = { + "poor_score": True, + "requires_y": True, + "allow_nan": True, + } + + # Adjust allow_nan if estimator explicitly defines `allow_nan`. + if hasattr(self.estimator, "_get_tags"): + tags["allow_nan"] = self.estimator._get_tags()["allow_nan"] + + return tags + + +class RFECV(RFE): + """Recursive feature elimination with cross-validation to select features. + + The number of features selected is tuned automatically by fitting an :class:`RFE` + selector on the different cross-validation splits (provided by the `cv` parameter). + The performance of the :class:`RFE` selector are evaluated using `scorer` for + different number of selected features and aggregated together. Finally, the scores + are averaged across folds and the number of features selected is set to the number + of features that maximize the cross-validation score. + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : ``Estimator`` instance + A supervised learning estimator with a ``fit`` method that provides + information about feature importance either through a ``coef_`` + attribute or through a ``feature_importances_`` attribute. + + step : int or float, default=1 + If greater than or equal to 1, then ``step`` corresponds to the + (integer) number of features to remove at each iteration. + If within (0.0, 1.0), then ``step`` corresponds to the percentage + (rounded down) of features to remove at each iteration. + Note that the last iteration may remove fewer than ``step`` features in + order to reach ``min_features_to_select``. + + min_features_to_select : int, default=1 + The minimum number of features to be selected. This number of features + will always be scored, even if the difference between the original + feature count and ``min_features_to_select`` isn't divisible by + ``step``. + + .. versionadded:: 0.20 + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if ``y`` is binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. If the + estimator is a classifier or if ``y`` is neither binary nor multiclass, + :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value of None changed from 3-fold to 5-fold. + + scoring : str, callable or None, default=None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + verbose : int, default=0 + Controls verbosity of output. + + n_jobs : int or None, default=None + Number of cores to run in parallel while fitting across folds. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.18 + + importance_getter : str or callable, default='auto' + If 'auto', uses the feature importance either through a `coef_` + or `feature_importances_` attributes of estimator. + + Also accepts a string that specifies an attribute name/path + for extracting feature importance. + For example, give `regressor_.coef_` in case of + :class:`~sklearn.compose.TransformedTargetRegressor` or + `named_steps.clf.feature_importances_` in case of + :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. + + If `callable`, overrides the default feature importance getter. + The callable is passed with the fitted estimator and it should + return importance for each feature. + + .. versionadded:: 0.24 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The classes labels. Only available when `estimator` is a classifier. + + estimator_ : ``Estimator`` instance + The fitted estimator used to select features. + + cv_results_ : dict of ndarrays + A dict with keys: + + split(k)_test_score : ndarray of shape (n_subsets_of_features,) + The cross-validation scores across (k)th fold. + + mean_test_score : ndarray of shape (n_subsets_of_features,) + Mean of scores over the folds. + + std_test_score : ndarray of shape (n_subsets_of_features,) + Standard deviation of scores over the folds. + + .. versionadded:: 1.0 + + n_features_ : int + The number of selected features with cross-validation. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + ranking_ : narray of shape (n_features,) + The feature ranking, such that `ranking_[i]` + corresponds to the ranking + position of the i-th feature. + Selected (i.e., estimated best) + features are assigned rank 1. + + support_ : ndarray of shape (n_features,) + The mask of selected features. + + See Also + -------- + RFE : Recursive feature elimination. + + Notes + ----- + The size of all values in ``cv_results_`` is equal to + ``ceil((n_features - min_features_to_select) / step) + 1``, + where step is the number of features removed at each iteration. + + Allows NaN/Inf in the input if the underlying estimator does as well. + + References + ---------- + + .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection + for cancer classification using support vector machines", + Mach. Learn., 46(1-3), 389--422, 2002. + + Examples + -------- + The following example shows how to retrieve the a-priori not known 5 + informative features in the Friedman #1 dataset. + + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.feature_selection import RFECV + >>> from sklearn.svm import SVR + >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + >>> estimator = SVR(kernel="linear") + >>> selector = RFECV(estimator, step=1, cv=5) + >>> selector = selector.fit(X, y) + >>> selector.support_ + array([ True, True, True, True, True, False, False, False, False, + False]) + >>> selector.ranking_ + array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) + """ + + _parameter_constraints: dict = { + **RFE._parameter_constraints, + "min_features_to_select": [Interval(Integral, 0, None, closed="neither")], + "cv": ["cv_object"], + "scoring": [None, str, callable], + "n_jobs": [None, Integral], + } + _parameter_constraints.pop("n_features_to_select") + + def __init__( + self, + estimator, + *, + step=1, + min_features_to_select=1, + cv=None, + scoring=None, + verbose=0, + n_jobs=None, + importance_getter="auto", + ): + self.estimator = estimator + self.step = step + self.importance_getter = importance_getter + self.cv = cv + self.scoring = scoring + self.verbose = verbose + self.n_jobs = n_jobs + self.min_features_to_select = min_features_to_select + + @_fit_context( + # RFECV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, groups=None): + """Fit the RFE model and automatically tune the number of selected features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the total number of features. + + y : array-like of shape (n_samples,) + Target values (integers for classification, real numbers for + regression). + + groups : array-like of shape (n_samples,) or None, default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`~sklearn.model_selection.GroupKFold`). + + .. versionadded:: 0.20 + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", groups=groups) + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + ensure_min_features=2, + force_all_finite=False, + multi_output=True, + ) + + # Initialization + cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) + scorer = check_scoring(self.estimator, scoring=self.scoring) + n_features = X.shape[1] + + if 0.0 < self.step < 1.0: + step = int(max(1, self.step * n_features)) + else: + step = int(self.step) + + # Build an RFE object, which will evaluate and score each possible + # feature count, down to self.min_features_to_select + rfe = RFE( + estimator=self.estimator, + n_features_to_select=self.min_features_to_select, + importance_getter=self.importance_getter, + step=self.step, + verbose=self.verbose, + ) + + # Determine the number of subsets of features by fitting across + # the train folds and choosing the "features_to_select" parameter + # that gives the least averaged error across all folds. + + # Note that joblib raises a non-picklable error for bound methods + # even if n_jobs is set to 1 with the default multiprocessing + # backend. + # This branching is done so that to + # make sure that user code that sets n_jobs to 1 + # and provides bound methods as scorers is not broken with the + # addition of n_jobs parameter in version 0.18. + + if effective_n_jobs(self.n_jobs) == 1: + parallel, func = list, _rfe_single_fit + else: + parallel = Parallel(n_jobs=self.n_jobs) + func = delayed(_rfe_single_fit) + + scores = parallel( + func(rfe, self.estimator, X, y, train, test, scorer) + for train, test in cv.split(X, y, groups) + ) + + scores = np.array(scores) + scores_sum = np.sum(scores, axis=0) + scores_sum_rev = scores_sum[::-1] + argmax_idx = len(scores_sum) - np.argmax(scores_sum_rev) - 1 + n_features_to_select = max( + n_features - (argmax_idx * step), self.min_features_to_select + ) + + # Re-execute an elimination with best_k over the whole set + rfe = RFE( + estimator=self.estimator, + n_features_to_select=n_features_to_select, + step=self.step, + importance_getter=self.importance_getter, + verbose=self.verbose, + ) + + rfe.fit(X, y) + + # Set final attributes + self.support_ = rfe.support_ + self.n_features_ = rfe.n_features_ + self.ranking_ = rfe.ranking_ + self.estimator_ = clone(self.estimator) + self.estimator_.fit(self._transform(X), y) + + # reverse to stay consistent with before + scores_rev = scores[:, ::-1] + self.cv_results_ = {} + self.cv_results_["mean_test_score"] = np.mean(scores_rev, axis=0) + self.cv_results_["std_test_score"] = np.std(scores_rev, axis=0) + + for i in range(scores.shape[0]): + self.cv_results_[f"split{i}_test_score"] = scores_rev[i] + + return self diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..5a90d46c9758b47a92121b91bd6e049207dc1c48 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py @@ -0,0 +1,300 @@ +""" +Sequential feature selection +""" +from numbers import Integral, Real + +import numpy as np + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier +from ..metrics import get_scorer_names +from ..model_selection import check_cv, cross_val_score +from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import _RoutingNotSupportedMixin +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin + + +class SequentialFeatureSelector( + _RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator +): + """Transformer that performs Sequential Feature Selection. + + This Sequential Feature Selector adds (forward selection) or + removes (backward selection) features to form a feature subset in a + greedy fashion. At each stage, this estimator chooses the best feature to + add or remove based on the cross-validation score of an estimator. In + the case of unsupervised learning, this Sequential Feature Selector + looks only at the features (X), not the desired outputs (y). + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.24 + + Parameters + ---------- + estimator : estimator instance + An unfitted estimator. + + n_features_to_select : "auto", int or float, default="auto" + If `"auto"`, the behaviour depends on the `tol` parameter: + + - if `tol` is not `None`, then features are selected while the score + change does not exceed `tol`. + - otherwise, half of the features are selected. + + If integer, the parameter is the absolute number of features to select. + If float between 0 and 1, it is the fraction of features to select. + + .. versionadded:: 1.1 + The option `"auto"` was added in version 1.1. + + .. versionchanged:: 1.3 + The default changed from `"warn"` to `"auto"` in 1.3. + + tol : float, default=None + If the score is not incremented by at least `tol` between two + consecutive feature additions or removals, stop adding or removing. + + `tol` can be negative when removing features using `direction="backward"`. + It can be useful to reduce the number of features at the cost of a small + decrease in the score. + + `tol` is enabled only when `n_features_to_select` is `"auto"`. + + .. versionadded:: 1.1 + + direction : {'forward', 'backward'}, default='forward' + Whether to perform forward selection or backward selection. + + scoring : str or callable, default=None + A single str (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + + NOTE that when using a custom scorer, it should return a single + value. + + If None, the estimator's score method is used. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. In all other + cases, :class:`~sklearn.model_selection.KFold` is used. These splitters + are instantiated with `shuffle=False` so the splits will be the same + across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + n_jobs : int, default=None + Number of jobs to run in parallel. When evaluating a new feature to + add or remove, the cross-validation procedure is parallel over the + folds. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_features_to_select_ : int + The number of features that were selected. + + support_ : ndarray of shape (n_features,), dtype=bool + The mask of selected features. + + See Also + -------- + GenericUnivariateSelect : Univariate feature selector with configurable + strategy. + RFE : Recursive feature elimination based on importance weights. + RFECV : Recursive feature elimination based on importance weights, with + automatic selection of the number of features. + SelectFromModel : Feature selection based on thresholds of importance + weights. + + Examples + -------- + >>> from sklearn.feature_selection import SequentialFeatureSelector + >>> from sklearn.neighbors import KNeighborsClassifier + >>> from sklearn.datasets import load_iris + >>> X, y = load_iris(return_X_y=True) + >>> knn = KNeighborsClassifier(n_neighbors=3) + >>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3) + >>> sfs.fit(X, y) + SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3), + n_features_to_select=3) + >>> sfs.get_support() + array([ True, False, True, True]) + >>> sfs.transform(X).shape + (150, 3) + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "n_features_to_select": [ + StrOptions({"auto"}), + Interval(RealNotInt, 0, 1, closed="right"), + Interval(Integral, 0, None, closed="neither"), + ], + "tol": [None, Interval(Real, None, None, closed="neither")], + "direction": [StrOptions({"forward", "backward"})], + "scoring": [None, StrOptions(set(get_scorer_names())), callable], + "cv": ["cv_object"], + "n_jobs": [None, Integral], + } + + def __init__( + self, + estimator, + *, + n_features_to_select="auto", + tol=None, + direction="forward", + scoring=None, + cv=5, + n_jobs=None, + ): + self.estimator = estimator + self.n_features_to_select = n_features_to_select + self.tol = tol + self.direction = direction + self.scoring = scoring + self.cv = cv + self.n_jobs = n_jobs + + @_fit_context( + # SequentialFeatureSelector.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Learn the features to select from X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of predictors. + + y : array-like of shape (n_samples,), default=None + Target values. This parameter may be ignored for + unsupervised learning. + + Returns + ------- + self : object + Returns the instance itself. + """ + tags = self._get_tags() + X = self._validate_data( + X, + accept_sparse="csc", + ensure_min_features=2, + force_all_finite=not tags.get("allow_nan", True), + ) + n_features = X.shape[1] + + if self.n_features_to_select == "auto": + if self.tol is not None: + # With auto feature selection, `n_features_to_select_` will be updated + # to `support_.sum()` after features are selected. + self.n_features_to_select_ = n_features - 1 + else: + self.n_features_to_select_ = n_features // 2 + elif isinstance(self.n_features_to_select, Integral): + if self.n_features_to_select >= n_features: + raise ValueError("n_features_to_select must be < n_features.") + self.n_features_to_select_ = self.n_features_to_select + elif isinstance(self.n_features_to_select, Real): + self.n_features_to_select_ = int(n_features * self.n_features_to_select) + + if self.tol is not None and self.tol < 0 and self.direction == "forward": + raise ValueError("tol must be positive when doing forward selection") + + cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) + + cloned_estimator = clone(self.estimator) + + # the current mask corresponds to the set of features: + # - that we have already *selected* if we do forward selection + # - that we have already *excluded* if we do backward selection + current_mask = np.zeros(shape=n_features, dtype=bool) + n_iterations = ( + self.n_features_to_select_ + if self.n_features_to_select == "auto" or self.direction == "forward" + else n_features - self.n_features_to_select_ + ) + + old_score = -np.inf + is_auto_select = self.tol is not None and self.n_features_to_select == "auto" + for _ in range(n_iterations): + new_feature_idx, new_score = self._get_best_new_feature_score( + cloned_estimator, X, y, cv, current_mask + ) + if is_auto_select and ((new_score - old_score) < self.tol): + break + + old_score = new_score + current_mask[new_feature_idx] = True + + if self.direction == "backward": + current_mask = ~current_mask + + self.support_ = current_mask + self.n_features_to_select_ = self.support_.sum() + + return self + + def _get_best_new_feature_score(self, estimator, X, y, cv, current_mask): + # Return the best new feature and its score to add to the current_mask, + # i.e. return the best new feature and its score to add (resp. remove) + # when doing forward selection (resp. backward selection). + # Feature will be added if the current score and past score are greater + # than tol when n_feature is auto, + candidate_feature_indices = np.flatnonzero(~current_mask) + scores = {} + for feature_idx in candidate_feature_indices: + candidate_mask = current_mask.copy() + candidate_mask[feature_idx] = True + if self.direction == "backward": + candidate_mask = ~candidate_mask + X_new = X[:, candidate_mask] + scores[feature_idx] = cross_val_score( + estimator, + X_new, + y, + cv=cv, + scoring=self.scoring, + n_jobs=self.n_jobs, + ).mean() + new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx]) + return new_feature_idx, scores[new_feature_idx] + + def _get_support_mask(self): + check_is_fitted(self) + return self.support_ + + def _more_tags(self): + return { + "allow_nan": _safe_tags(self.estimator, key="allow_nan"), + } diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py new file mode 100644 index 0000000000000000000000000000000000000000..df1b5072ce7415c21b1e3df922e742d3676b168c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py @@ -0,0 +1,1161 @@ +"""Univariate features selection.""" + +# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay. +# L. Buitinck, A. Joly +# License: BSD 3 clause + + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import special, stats +from scipy.sparse import issparse + +from ..base import BaseEstimator, _fit_context +from ..preprocessing import LabelBinarizer +from ..utils import as_float_array, check_array, check_X_y, safe_mask, safe_sqr +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import row_norms, safe_sparse_dot +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin + + +def _clean_nans(scores): + """ + Fixes Issue #1240: NaNs can't be properly compared, so change them to the + smallest value of scores's dtype. -inf seems to be unreliable. + """ + # XXX where should this function be called? fit? scoring functions + # themselves? + scores = as_float_array(scores, copy=True) + scores[np.isnan(scores)] = np.finfo(scores.dtype).min + return scores + + +###################################################################### +# Scoring functions + + +# The following function is a rewriting of scipy.stats.f_oneway +# Contrary to the scipy.stats.f_oneway implementation it does not +# copy the data while keeping the inputs unchanged. +def f_oneway(*args): + """Perform a 1-way ANOVA. + + The one-way ANOVA tests the null hypothesis that 2 or more groups have + the same population mean. The test is applied to samples from two or + more groups, possibly with differing sizes. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + *args : {array-like, sparse matrix} + Sample1, sample2... The sample measurements should be given as + arguments. + + Returns + ------- + f_statistic : float + The computed F-value of the test. + p_value : float + The associated p-value from the F-distribution. + + Notes + ----- + The ANOVA test has important assumptions that must be satisfied in order + for the associated p-value to be valid. + + 1. The samples are independent + 2. Each sample is from a normally distributed population + 3. The population standard deviations of the groups are all equal. This + property is known as homoscedasticity. + + If these assumptions are not true for a given set of data, it may still be + possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although + with some loss of power. + + The algorithm is from Heiman[2], pp.394-7. + + See ``scipy.stats.f_oneway`` that should give the same results while + being less efficient. + + References + ---------- + .. [1] Lowry, Richard. "Concepts and Applications of Inferential + Statistics". Chapter 14. + http://vassarstats.net/textbook + + .. [2] Heiman, G.W. Research Methods in Statistics. 2002. + """ + n_classes = len(args) + args = [as_float_array(a) for a in args] + n_samples_per_class = np.array([a.shape[0] for a in args]) + n_samples = np.sum(n_samples_per_class) + ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args) + sums_args = [np.asarray(a.sum(axis=0)) for a in args] + square_of_sums_alldata = sum(sums_args) ** 2 + square_of_sums_args = [s**2 for s in sums_args] + sstot = ss_alldata - square_of_sums_alldata / float(n_samples) + ssbn = 0.0 + for k, _ in enumerate(args): + ssbn += square_of_sums_args[k] / n_samples_per_class[k] + ssbn -= square_of_sums_alldata / float(n_samples) + sswn = sstot - ssbn + dfbn = n_classes - 1 + dfwn = n_samples - n_classes + msb = ssbn / float(dfbn) + msw = sswn / float(dfwn) + constant_features_idx = np.where(msw == 0.0)[0] + if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size: + warnings.warn("Features %s are constant." % constant_features_idx, UserWarning) + f = msb / msw + # flatten matrix to vector in sparse case + f = np.asarray(f).ravel() + prob = special.fdtrc(dfbn, dfwn, f) + return f, prob + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def f_classif(X, y): + """Compute the ANOVA F-value for the provided sample. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The set of regressors that will be tested sequentially. + + y : array-like of shape (n_samples,) + The target vector. + + Returns + ------- + f_statistic : ndarray of shape (n_features,) + F-statistic for each feature. + + p_values : ndarray of shape (n_features,) + P-values associated with the F-statistic. + + See Also + -------- + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.feature_selection import f_classif + >>> X, y = make_classification( + ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1, + ... shuffle=False, random_state=42 + ... ) + >>> f_statistic, p_values = f_classif(X, y) + >>> f_statistic + array([2.2...e+02, 7.0...e-01, 1.6...e+00, 9.3...e-01, + 5.4...e+00, 3.2...e-01, 4.7...e-02, 5.7...e-01, + 7.5...e-01, 8.9...e-02]) + >>> p_values + array([7.1...e-27, 4.0...e-01, 1.9...e-01, 3.3...e-01, + 2.2...e-02, 5.7...e-01, 8.2...e-01, 4.5...e-01, + 3.8...e-01, 7.6...e-01]) + """ + X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"]) + args = [X[safe_mask(X, y == k)] for k in np.unique(y)] + return f_oneway(*args) + + +def _chisquare(f_obs, f_exp): + """Fast replacement for scipy.stats.chisquare. + + Version from https://github.com/scipy/scipy/pull/2525 with additional + optimizations. + """ + f_obs = np.asarray(f_obs, dtype=np.float64) + + k = len(f_obs) + # Reuse f_obs for chi-squared statistics + chisq = f_obs + chisq -= f_exp + chisq **= 2 + with np.errstate(invalid="ignore"): + chisq /= f_exp + chisq = chisq.sum(axis=0) + return chisq, special.chdtrc(k - 1, chisq) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def chi2(X, y): + """Compute chi-squared stats between each non-negative feature and class. + + This score can be used to select the `n_features` features with the + highest values for the test chi-squared statistic from X, which must + contain only **non-negative features** such as booleans or frequencies + (e.g., term counts in document classification), relative to the classes. + + Recall that the chi-square test measures dependence between stochastic + variables, so using this function "weeds out" the features that are the + most likely to be independent of class and therefore irrelevant for + classification. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Sample vectors. + + y : array-like of shape (n_samples,) + Target vector (class labels). + + Returns + ------- + chi2 : ndarray of shape (n_features,) + Chi2 statistics for each feature. + + p_values : ndarray of shape (n_features,) + P-values for each feature. + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + f_regression : F-value between label/feature for regression tasks. + + Notes + ----- + Complexity of this algorithm is O(n_classes * n_features). + + Examples + -------- + >>> import numpy as np + >>> from sklearn.feature_selection import chi2 + >>> X = np.array([[1, 1, 3], + ... [0, 1, 5], + ... [5, 4, 1], + ... [6, 6, 2], + ... [1, 4, 0], + ... [0, 0, 0]]) + >>> y = np.array([1, 1, 0, 0, 2, 2]) + >>> chi2_stats, p_values = chi2(X, y) + >>> chi2_stats + array([15.3..., 6.5 , 8.9...]) + >>> p_values + array([0.0004..., 0.0387..., 0.0116... ]) + """ + + # XXX: we might want to do some of the following in logspace instead for + # numerical stability. + # Converting X to float allows getting better performance for the + # safe_sparse_dot call made below. + X = check_array(X, accept_sparse="csr", dtype=(np.float64, np.float32)) + if np.any((X.data if issparse(X) else X) < 0): + raise ValueError("Input X must be non-negative.") + + # Use a sparse representation for Y by default to reduce memory usage when + # y has many unique classes. + Y = LabelBinarizer(sparse_output=True).fit_transform(y) + if Y.shape[1] == 1: + Y = Y.toarray() + Y = np.append(1 - Y, Y, axis=1) + + observed = safe_sparse_dot(Y.T, X) # n_classes * n_features + + if issparse(observed): + # convert back to a dense array before calling _chisquare + # XXX: could _chisquare be reimplement to accept sparse matrices for + # cases where both n_classes and n_features are large (and X is + # sparse)? + observed = observed.toarray() + + feature_count = X.sum(axis=0).reshape(1, -1) + class_prob = Y.mean(axis=0).reshape(1, -1) + expected = np.dot(class_prob.T, feature_count) + + return _chisquare(observed, expected) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "center": ["boolean"], + "force_finite": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def r_regression(X, y, *, center=True, force_finite=True): + """Compute Pearson's r for each features and the target. + + Pearson's r is also known as the Pearson correlation coefficient. + + Linear model for testing the individual effect of each of many regressors. + This is a scoring function to be used in a feature selection procedure, not + a free standing feature selection procedure. + + The cross correlation between each regressor and the target is computed + as:: + + E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y)) + + For more on usage see the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix. + + y : array-like of shape (n_samples,) + The target vector. + + center : bool, default=True + Whether or not to center the data matrix `X` and the target vector `y`. + By default, `X` and `y` will be centered. + + force_finite : bool, default=True + Whether or not to force the Pearson's R correlation to be finite. + In the particular case where some features in `X` or the target `y` + are constant, the Pearson's R correlation is not defined. When + `force_finite=False`, a correlation of `np.nan` is returned to + acknowledge this case. When `force_finite=True`, this value will be + forced to a minimal correlation of `0.0`. + + .. versionadded:: 1.1 + + Returns + ------- + correlation_coefficient : ndarray of shape (n_features,) + Pearson's R correlation coefficients of features. + + See Also + -------- + f_regression: Univariate linear regression tests returning f-statistic + and p-values. + mutual_info_regression: Mutual information for a continuous target. + f_classif: ANOVA F-value between label/feature for classification tasks. + chi2: Chi-squared stats of non-negative features for classification tasks. + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.feature_selection import r_regression + >>> X, y = make_regression( + ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 + ... ) + >>> r_regression(X, y) + array([-0.15..., 1. , -0.22...]) + """ + X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"], dtype=np.float64) + n_samples = X.shape[0] + + # Compute centered values + # Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we + # need not center X + if center: + y = y - np.mean(y) + # TODO: for Scipy <= 1.10, `isspmatrix(X)` returns `True` for sparse arrays. + # Here, we check the output of the `.mean` operation that returns a `np.matrix` + # for sparse matrices while a `np.array` for dense and sparse arrays. + # We can reconsider using `isspmatrix` when the minimum version is + # SciPy >= 1.11 + X_means = X.mean(axis=0) + X_means = X_means.getA1() if isinstance(X_means, np.matrix) else X_means + # Compute the scaled standard deviations via moments + X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means**2) + else: + X_norms = row_norms(X.T) + + correlation_coefficient = safe_sparse_dot(y, X) + with np.errstate(divide="ignore", invalid="ignore"): + correlation_coefficient /= X_norms + correlation_coefficient /= np.linalg.norm(y) + + if force_finite and not np.isfinite(correlation_coefficient).all(): + # case where the target or some features are constant + # the correlation coefficient(s) is/are set to the minimum (i.e. 0.0) + nan_mask = np.isnan(correlation_coefficient) + correlation_coefficient[nan_mask] = 0.0 + return correlation_coefficient + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "center": ["boolean"], + "force_finite": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def f_regression(X, y, *, center=True, force_finite=True): + """Univariate linear regression tests returning F-statistic and p-values. + + Quick linear model for testing the effect of a single regressor, + sequentially for many regressors. + + This is done in 2 steps: + + 1. The cross correlation between each regressor and the target is computed + using :func:`r_regression` as:: + + E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y)) + + 2. It is converted to an F score and then to a p-value. + + :func:`f_regression` is derived from :func:`r_regression` and will rank + features in the same order if all the features are positively correlated + with the target. + + Note however that contrary to :func:`f_regression`, :func:`r_regression` + values lie in [-1, 1] and can thus be negative. :func:`f_regression` is + therefore recommended as a feature selection criterion to identify + potentially predictive feature for a downstream classifier, irrespective of + the sign of the association with the target variable. + + Furthermore :func:`f_regression` returns p-values while + :func:`r_regression` does not. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix. + + y : array-like of shape (n_samples,) + The target vector. + + center : bool, default=True + Whether or not to center the data matrix `X` and the target vector `y`. + By default, `X` and `y` will be centered. + + force_finite : bool, default=True + Whether or not to force the F-statistics and associated p-values to + be finite. There are two cases where the F-statistic is expected to not + be finite: + + - when the target `y` or some features in `X` are constant. In this + case, the Pearson's R correlation is not defined leading to obtain + `np.nan` values in the F-statistic and p-value. When + `force_finite=True`, the F-statistic is set to `0.0` and the + associated p-value is set to `1.0`. + - when a feature in `X` is perfectly correlated (or + anti-correlated) with the target `y`. In this case, the F-statistic + is expected to be `np.inf`. When `force_finite=True`, the F-statistic + is set to `np.finfo(dtype).max` and the associated p-value is set to + `0.0`. + + .. versionadded:: 1.1 + + Returns + ------- + f_statistic : ndarray of shape (n_features,) + F-statistic for each feature. + + p_values : ndarray of shape (n_features,) + P-values associated with the F-statistic. + + See Also + -------- + r_regression: Pearson's R between label/feature for regression tasks. + f_classif: ANOVA F-value between label/feature for classification tasks. + chi2: Chi-squared stats of non-negative features for classification tasks. + SelectKBest: Select features based on the k highest scores. + SelectFpr: Select features based on a false positive rate test. + SelectFdr: Select features based on an estimated false discovery rate. + SelectFwe: Select features based on family-wise error rate. + SelectPercentile: Select features based on percentile of the highest + scores. + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.feature_selection import f_regression + >>> X, y = make_regression( + ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 + ... ) + >>> f_statistic, p_values = f_regression(X, y) + >>> f_statistic + array([1.2...+00, 2.6...+13, 2.6...+00]) + >>> p_values + array([2.7..., 1.5..., 1.0...]) + """ + correlation_coefficient = r_regression( + X, y, center=center, force_finite=force_finite + ) + deg_of_freedom = y.size - (2 if center else 1) + + corr_coef_squared = correlation_coefficient**2 + + with np.errstate(divide="ignore", invalid="ignore"): + f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom + p_values = stats.f.sf(f_statistic, 1, deg_of_freedom) + + if force_finite and not np.isfinite(f_statistic).all(): + # case where there is a perfect (anti-)correlation + # f-statistics can be set to the maximum and p-values to zero + mask_inf = np.isinf(f_statistic) + f_statistic[mask_inf] = np.finfo(f_statistic.dtype).max + # case where the target or some features are constant + # f-statistics would be minimum and thus p-values large + mask_nan = np.isnan(f_statistic) + f_statistic[mask_nan] = 0.0 + p_values[mask_nan] = 1.0 + return f_statistic, p_values + + +###################################################################### +# Base classes + + +class _BaseFilter(SelectorMixin, BaseEstimator): + """Initialize the univariate feature selection. + + Parameters + ---------- + score_func : callable + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues) or a single array with scores. + """ + + _parameter_constraints: dict = {"score_func": [callable]} + + def __init__(self, score_func): + self.score_func = score_func + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Run score function on (X, y) and get the appropriate features. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) or None + The target values (class labels in classification, real numbers in + regression). If the selector is unsupervised then `y` can be set to `None`. + + Returns + ------- + self : object + Returns the instance itself. + """ + if y is None: + X = self._validate_data(X, accept_sparse=["csr", "csc"]) + else: + X, y = self._validate_data( + X, y, accept_sparse=["csr", "csc"], multi_output=True + ) + + self._check_params(X, y) + score_func_ret = self.score_func(X, y) + if isinstance(score_func_ret, (list, tuple)): + self.scores_, self.pvalues_ = score_func_ret + self.pvalues_ = np.asarray(self.pvalues_) + else: + self.scores_ = score_func_ret + self.pvalues_ = None + + self.scores_ = np.asarray(self.scores_) + + return self + + def _check_params(self, X, y): + pass + + def _more_tags(self): + return {"requires_y": True} + + +###################################################################### +# Specific filters +###################################################################### +class SelectPercentile(_BaseFilter): + """Select features according to a percentile of the highest scores. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues) or a single array with scores. + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + .. versionadded:: 0.18 + + percentile : int, default=10 + Percent of features to keep. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores, None if `score_func` returned only scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + mutual_info_classif : Mutual information for a discrete target. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Notes + ----- + Ties between features with equal scores will be broken in an unspecified + way. + + This filter supports unsupervised feature selection that only requests `X` for + computing the scores. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.feature_selection import SelectPercentile, chi2 + >>> X, y = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y) + >>> X_new.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "percentile": [Interval(Real, 0, 100, closed="both")], + } + + def __init__(self, score_func=f_classif, *, percentile=10): + super().__init__(score_func=score_func) + self.percentile = percentile + + def _get_support_mask(self): + check_is_fitted(self) + + # Cater for NaNs + if self.percentile == 100: + return np.ones(len(self.scores_), dtype=bool) + elif self.percentile == 0: + return np.zeros(len(self.scores_), dtype=bool) + + scores = _clean_nans(self.scores_) + threshold = np.percentile(scores, 100 - self.percentile) + mask = scores > threshold + ties = np.where(scores == threshold)[0] + if len(ties): + max_feats = int(len(scores) * self.percentile / 100) + kept_ties = ties[: max_feats - mask.sum()] + mask[kept_ties] = True + return mask + + def _more_tags(self): + return {"requires_y": False} + + +class SelectKBest(_BaseFilter): + """Select features according to the k highest scores. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues) or a single array with scores. + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + .. versionadded:: 0.18 + + k : int or "all", default=10 + Number of top features to select. + The "all" option bypasses selection, for use in a parameter search. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores, None if `score_func` returned only scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif: ANOVA F-value between label/feature for classification tasks. + mutual_info_classif: Mutual information for a discrete target. + chi2: Chi-squared stats of non-negative features for classification tasks. + f_regression: F-value between label/feature for regression tasks. + mutual_info_regression: Mutual information for a continuous target. + SelectPercentile: Select features based on percentile of the highest + scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Notes + ----- + Ties between features with equal scores will be broken in an unspecified + way. + + This filter supports unsupervised feature selection that only requests `X` for + computing the scores. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.feature_selection import SelectKBest, chi2 + >>> X, y = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y) + >>> X_new.shape + (1797, 20) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "k": [StrOptions({"all"}), Interval(Integral, 0, None, closed="left")], + } + + def __init__(self, score_func=f_classif, *, k=10): + super().__init__(score_func=score_func) + self.k = k + + def _check_params(self, X, y): + if not isinstance(self.k, str) and self.k > X.shape[1]: + warnings.warn( + f"k={self.k} is greater than n_features={X.shape[1]}. " + "All the features will be returned." + ) + + def _get_support_mask(self): + check_is_fitted(self) + + if self.k == "all": + return np.ones(self.scores_.shape, dtype=bool) + elif self.k == 0: + return np.zeros(self.scores_.shape, dtype=bool) + else: + scores = _clean_nans(self.scores_) + mask = np.zeros(scores.shape, dtype=bool) + + # Request a stable sort. Mergesort takes more memory (~40MB per + # megafeature on x86-64). + mask[np.argsort(scores, kind="mergesort")[-self.k :]] = 1 + return mask + + def _more_tags(self): + return {"requires_y": False} + + +class SelectFpr(_BaseFilter): + """Filter: Select the pvalues below alpha based on a FPR test. + + FPR test stands for False Positive Rate test. It controls the total + amount of false detections. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + alpha : float, default=5e-2 + Features with p-values less than `alpha` are selected. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + chi2 : Chi-squared stats of non-negative features for classification tasks. + mutual_info_classif: Mutual information for a discrete target. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import SelectFpr, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y) + >>> X_new.shape + (569, 16) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "alpha": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, score_func=f_classif, *, alpha=5e-2): + super().__init__(score_func=score_func) + self.alpha = alpha + + def _get_support_mask(self): + check_is_fitted(self) + + return self.pvalues_ < self.alpha + + +class SelectFdr(_BaseFilter): + """Filter: Select the p-values for an estimated false discovery rate. + + This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound + on the expected false discovery rate. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + alpha : float, default=5e-2 + The highest uncorrected p-value for features to keep. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + mutual_info_classif : Mutual information for a discrete target. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + References + ---------- + https://en.wikipedia.org/wiki/False_discovery_rate + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import SelectFdr, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y) + >>> X_new.shape + (569, 16) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "alpha": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, score_func=f_classif, *, alpha=5e-2): + super().__init__(score_func=score_func) + self.alpha = alpha + + def _get_support_mask(self): + check_is_fitted(self) + + n_features = len(self.pvalues_) + sv = np.sort(self.pvalues_) + selected = sv[ + sv <= float(self.alpha) / n_features * np.arange(1, n_features + 1) + ] + if selected.size == 0: + return np.zeros_like(self.pvalues_, dtype=bool) + return self.pvalues_ <= selected.max() + + +class SelectFwe(_BaseFilter): + """Filter: Select the p-values corresponding to Family-wise error rate. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + alpha : float, default=5e-2 + The highest uncorrected p-value for features to keep. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import SelectFwe, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y) + >>> X_new.shape + (569, 15) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "alpha": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, score_func=f_classif, *, alpha=5e-2): + super().__init__(score_func=score_func) + self.alpha = alpha + + def _get_support_mask(self): + check_is_fitted(self) + + return self.pvalues_ < self.alpha / len(self.pvalues_) + + +###################################################################### +# Generic filter +###################################################################### + + +# TODO this class should fit on either p-values or scores, +# depending on the mode. +class GenericUnivariateSelect(_BaseFilter): + """Univariate feature selector with configurable strategy. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). For modes 'percentile' or 'kbest' it can return + a single array scores. + + mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile' + Feature selection mode. Note that the `'percentile'` and `'kbest'` + modes are supporting unsupervised feature selection (when `y` is `None`). + + param : "all", float or int, default=1e-5 + Parameter of the corresponding mode. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores, None if `score_func` returned scores only. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + mutual_info_classif : Mutual information for a discrete target. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import GenericUnivariateSelect, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> transformer = GenericUnivariateSelect(chi2, mode='k_best', param=20) + >>> X_new = transformer.fit_transform(X, y) + >>> X_new.shape + (569, 20) + """ + + _selection_modes: dict = { + "percentile": SelectPercentile, + "k_best": SelectKBest, + "fpr": SelectFpr, + "fdr": SelectFdr, + "fwe": SelectFwe, + } + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "mode": [StrOptions(set(_selection_modes.keys()))], + "param": [Interval(Real, 0, None, closed="left"), StrOptions({"all"})], + } + + def __init__(self, score_func=f_classif, *, mode="percentile", param=1e-5): + super().__init__(score_func=score_func) + self.mode = mode + self.param = param + + def _make_selector(self): + selector = self._selection_modes[self.mode](score_func=self.score_func) + + # Now perform some acrobatics to set the right named parameter in + # the selector + possible_params = selector._get_param_names() + possible_params.remove("score_func") + selector.set_params(**{possible_params[0]: self.param}) + + return selector + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} + + def _check_params(self, X, y): + self._make_selector()._check_params(X, y) + + def _get_support_mask(self): + check_is_fitted(self) + + selector = self._make_selector() + selector.pvalues_ = self.pvalues_ + selector.scores_ = self.scores_ + return selector._get_support_mask() diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..f97c75db1e34b1a5d6179403ebbaf83902c067ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py @@ -0,0 +1,136 @@ +# Author: Lars Buitinck +# License: 3-clause BSD +from numbers import Real + +import numpy as np + +from ..base import BaseEstimator, _fit_context +from ..utils._param_validation import Interval +from ..utils.sparsefuncs import mean_variance_axis, min_max_axis +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin + + +class VarianceThreshold(SelectorMixin, BaseEstimator): + """Feature selector that removes all low-variance features. + + This feature selection algorithm looks only at the features (X), not the + desired outputs (y), and can thus be used for unsupervised learning. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + threshold : float, default=0 + Features with a training-set variance lower than this threshold will + be removed. The default is to keep all features with non-zero variance, + i.e. remove the features that have the same value in all samples. + + Attributes + ---------- + variances_ : array, shape (n_features,) + Variances of individual features. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SelectFromModel: Meta-transformer for selecting features based on + importance weights. + SelectPercentile : Select features according to a percentile of the highest + scores. + SequentialFeatureSelector : Transformer that performs Sequential Feature + Selection. + + Notes + ----- + Allows NaN in the input. + Raises ValueError if no feature in X meets the variance threshold. + + Examples + -------- + The following dataset has integer features, two of which are the same + in every sample. These are removed with the default setting for threshold:: + + >>> from sklearn.feature_selection import VarianceThreshold + >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]] + >>> selector = VarianceThreshold() + >>> selector.fit_transform(X) + array([[2, 0], + [1, 4], + [1, 1]]) + """ + + _parameter_constraints: dict = { + "threshold": [Interval(Real, 0, None, closed="left")] + } + + def __init__(self, threshold=0.0): + self.threshold = threshold + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn empirical variances from X. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Data from which to compute variances, where `n_samples` is + the number of samples and `n_features` is the number of features. + + y : any, default=None + Ignored. This parameter exists only for compatibility with + sklearn.pipeline.Pipeline. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + dtype=np.float64, + force_all_finite="allow-nan", + ) + + if hasattr(X, "toarray"): # sparse matrix + _, self.variances_ = mean_variance_axis(X, axis=0) + if self.threshold == 0: + mins, maxes = min_max_axis(X, axis=0) + peak_to_peaks = maxes - mins + else: + self.variances_ = np.nanvar(X, axis=0) + if self.threshold == 0: + peak_to_peaks = np.ptp(X, axis=0) + + if self.threshold == 0: + # Use peak-to-peak to avoid numeric precision issues + # for constant features + compare_arr = np.array([self.variances_, peak_to_peaks]) + self.variances_ = np.nanmin(compare_arr, axis=0) + + if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)): + msg = "No feature in X meets the variance threshold {0:.5f}" + if X.shape[0] == 1: + msg += " (X contains only one sample)" + raise ValueError(msg.format(self.threshold)) + + return self + + def _get_support_mask(self): + check_is_fitted(self) + + return self.variances_ > self.threshold + + def _more_tags(self): + return {"allow_nan": True} diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04911627f2a25d68a2980f07411b11505cb2a2a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b713a9f95fa3fbfadc450afa66aa4acc31f7667f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e086927635ac616f11cf60a663c97e1e56930bb8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24c7c2135cb7384f734c3406ed043279f3f93650 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..968b1f9e7cf83168a3b9785b12100512738da821 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..138b9e99284c710f2b12141e701de1f2996ee21d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ceac05d52ef4e243a1b049b9533f01b7dcb7fea Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea345cac2fa84f2ba4792fd787c5c5cc3932ce98 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d79025c90598716e196b5d31b7a14150dfeafb18 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..5e2bb27bafd1767cec33b1c4255b2116e3f8a9e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py @@ -0,0 +1,153 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.base import BaseEstimator +from sklearn.feature_selection._base import SelectorMixin +from sklearn.utils.fixes import CSC_CONTAINERS + + +class StepSelector(SelectorMixin, BaseEstimator): + """Retain every `step` features (beginning with 0). + + If `step < 1`, then no features are selected. + """ + + def __init__(self, step=2): + self.step = step + + def fit(self, X, y=None): + X = self._validate_data(X, accept_sparse="csc") + return self + + def _get_support_mask(self): + mask = np.zeros(self.n_features_in_, dtype=bool) + if self.step >= 1: + mask[:: self.step] = True + return mask + + +support = [True, False] * 5 +support_inds = [0, 2, 4, 6, 8] +X = np.arange(20).reshape(2, 10) +Xt = np.arange(0, 20, 2).reshape(2, 5) +Xinv = X.copy() +Xinv[:, 1::2] = 0 +y = [0, 1] +feature_names = list("ABCDEFGHIJ") +feature_names_t = feature_names[::2] +feature_names_inv = np.array(feature_names) +feature_names_inv[1::2] = "" + + +def test_transform_dense(): + sel = StepSelector() + Xt_actual = sel.fit(X, y).transform(X) + Xt_actual2 = StepSelector().fit_transform(X, y) + assert_array_equal(Xt, Xt_actual) + assert_array_equal(Xt, Xt_actual2) + + # Check dtype matches + assert np.int32 == sel.transform(X.astype(np.int32)).dtype + assert np.float32 == sel.transform(X.astype(np.float32)).dtype + + # Check 1d list and other dtype: + names_t_actual = sel.transform([feature_names]) + assert_array_equal(feature_names_t, names_t_actual.ravel()) + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.transform(np.array([[1], [2]])) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_transform_sparse(csc_container): + X_sp = csc_container(X) + sel = StepSelector() + Xt_actual = sel.fit(X_sp).transform(X_sp) + Xt_actual2 = sel.fit_transform(X_sp) + assert_array_equal(Xt, Xt_actual.toarray()) + assert_array_equal(Xt, Xt_actual2.toarray()) + + # Check dtype matches + assert np.int32 == sel.transform(X_sp.astype(np.int32)).dtype + assert np.float32 == sel.transform(X_sp.astype(np.float32)).dtype + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.transform(np.array([[1], [2]])) + + +def test_inverse_transform_dense(): + sel = StepSelector() + Xinv_actual = sel.fit(X, y).inverse_transform(Xt) + assert_array_equal(Xinv, Xinv_actual) + + # Check dtype matches + assert np.int32 == sel.inverse_transform(Xt.astype(np.int32)).dtype + assert np.float32 == sel.inverse_transform(Xt.astype(np.float32)).dtype + + # Check 1d list and other dtype: + names_inv_actual = sel.inverse_transform([feature_names_t]) + assert_array_equal(feature_names_inv, names_inv_actual.ravel()) + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.inverse_transform(np.array([[1], [2]])) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_inverse_transform_sparse(csc_container): + X_sp = csc_container(X) + Xt_sp = csc_container(Xt) + sel = StepSelector() + Xinv_actual = sel.fit(X_sp).inverse_transform(Xt_sp) + assert_array_equal(Xinv, Xinv_actual.toarray()) + + # Check dtype matches + assert np.int32 == sel.inverse_transform(Xt_sp.astype(np.int32)).dtype + assert np.float32 == sel.inverse_transform(Xt_sp.astype(np.float32)).dtype + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.inverse_transform(np.array([[1], [2]])) + + +def test_get_support(): + sel = StepSelector() + sel.fit(X, y) + assert_array_equal(support, sel.get_support()) + assert_array_equal(support_inds, sel.get_support(indices=True)) + + +def test_output_dataframe(): + """Check output dtypes for dataframes is consistent with the input dtypes.""" + pd = pytest.importorskip("pandas") + + X = pd.DataFrame( + { + "a": pd.Series([1.0, 2.4, 4.5], dtype=np.float32), + "b": pd.Series(["a", "b", "a"], dtype="category"), + "c": pd.Series(["j", "b", "b"], dtype="category"), + "d": pd.Series([3.0, 2.4, 1.2], dtype=np.float64), + } + ) + + for step in [2, 3]: + sel = StepSelector(step=step).set_output(transform="pandas") + sel.fit(X) + + output = sel.transform(X) + for name, dtype in output.dtypes.items(): + assert dtype == X.dtypes[name] + + # step=0 will select nothing + sel0 = StepSelector(step=0).set_output(transform="pandas") + sel0.fit(X, y) + + msg = "No features were selected" + with pytest.warns(UserWarning, match=msg): + output0 = sel0.transform(X) + + assert_array_equal(output0.index, X.index) + assert output0.shape == (X.shape[0], 0) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py new file mode 100644 index 0000000000000000000000000000000000000000..c50def36f1b6c281e6c96019355b901bf4326a38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py @@ -0,0 +1,93 @@ +""" +Tests for chi2, currently the only feature selection function designed +specifically to work with sparse matrices. +""" + +import warnings + +import numpy as np +import pytest +import scipy.stats + +from sklearn.feature_selection import SelectKBest, chi2 +from sklearn.feature_selection._univariate_selection import _chisquare +from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS + +# Feature 0 is highly informative for class 1; +# feature 1 is the same everywhere; +# feature 2 is a bit informative for class 2. +X = [[2, 1, 2], [9, 1, 1], [6, 1, 2], [0, 1, 2]] +y = [0, 1, 2, 2] + + +def mkchi2(k): + """Make k-best chi2 selector""" + return SelectKBest(chi2, k=k) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_chi2(csr_container): + # Test Chi2 feature extraction + + chi2 = mkchi2(k=1).fit(X, y) + chi2 = mkchi2(k=1).fit(X, y) + assert_array_equal(chi2.get_support(indices=True), [0]) + assert_array_equal(chi2.transform(X), np.array(X)[:, [0]]) + + chi2 = mkchi2(k=2).fit(X, y) + assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) + + Xsp = csr_container(X, dtype=np.float64) + chi2 = mkchi2(k=2).fit(Xsp, y) + assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) + Xtrans = chi2.transform(Xsp) + assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2]) + + # == doesn't work on scipy.sparse matrices + Xtrans = Xtrans.toarray() + Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray() + assert_array_almost_equal(Xtrans, Xtrans2) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_chi2_coo(coo_container): + # Check that chi2 works with a COO matrix + # (as returned by CountVectorizer, DictVectorizer) + Xcoo = coo_container(X) + mkchi2(k=2).fit_transform(Xcoo, y) + # if we got here without an exception, we're safe + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_chi2_negative(csr_container): + # Check for proper error on negative numbers in the input X. + X, y = [[0, 1], [-1e-20, 1]], [0, 1] + for X in (X, np.array(X), csr_container(X)): + with pytest.raises(ValueError): + chi2(X, y) + + +def test_chi2_unused_feature(): + # Unused feature should evaluate to NaN + # and should issue no runtime warning + with warnings.catch_warnings(record=True) as warned: + warnings.simplefilter("always") + chi, p = chi2([[1, 0], [0, 0]], [1, 0]) + for w in warned: + if "divide by zero" in repr(w): + raise AssertionError("Found unexpected warning %s" % w) + assert_array_equal(chi, [1, np.nan]) + assert_array_equal(p[1], np.nan) + + +def test_chisquare(): + # Test replacement for scipy.stats.chisquare against the original. + obs = np.array([[2.0, 2.0], [1.0, 1.0]]) + exp = np.array([[1.5, 1.5], [1.5, 1.5]]) + # call SciPy first because our version overwrites obs + chi_scp, p_scp = scipy.stats.chisquare(obs, exp) + chi_our, p_our = _chisquare(obs, exp) + + assert_array_almost_equal(chi_scp, chi_our) + assert_array_almost_equal(p_scp, p_our) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py new file mode 100644 index 0000000000000000000000000000000000000000..3815a88c374e8611dee49e78fe90bd2653efc969 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py @@ -0,0 +1,1017 @@ +""" +Todo: cross-check the F-value with stats model +""" +import itertools +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import sparse, stats + +from sklearn.datasets import load_iris, make_classification, make_regression +from sklearn.feature_selection import ( + GenericUnivariateSelect, + SelectFdr, + SelectFpr, + SelectFwe, + SelectKBest, + SelectPercentile, + chi2, + f_classif, + f_oneway, + f_regression, + mutual_info_classif, + mutual_info_regression, + r_regression, +) +from sklearn.utils import safe_mask +from sklearn.utils._testing import ( + _convert_container, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +############################################################################## +# Test the score functions + + +def test_f_oneway_vs_scipy_stats(): + # Test that our f_oneway gives the same result as scipy.stats + rng = np.random.RandomState(0) + X1 = rng.randn(10, 3) + X2 = 1 + rng.randn(10, 3) + f, pv = stats.f_oneway(X1, X2) + f2, pv2 = f_oneway(X1, X2) + assert np.allclose(f, f2) + assert np.allclose(pv, pv2) + + +def test_f_oneway_ints(): + # Smoke test f_oneway on integers: that it does raise casting errors + # with recent numpys + rng = np.random.RandomState(0) + X = rng.randint(10, size=(10, 10)) + y = np.arange(10) + fint, pint = f_oneway(X, y) + + # test that is gives the same result as with float + f, p = f_oneway(X.astype(float), y) + assert_array_almost_equal(f, fint, decimal=4) + assert_array_almost_equal(p, pint, decimal=4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_f_classif(csr_container): + # Test whether the F test yields meaningful results + # on a simple simulated classification problem + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + F, pv = f_classif(X, y) + F_sparse, pv_sparse = f_classif(csr_container(X), y) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.0e-4).all() + assert_array_almost_equal(F_sparse, F) + assert_array_almost_equal(pv_sparse, pv) + + +@pytest.mark.parametrize("center", [True, False]) +def test_r_regression(center): + X, y = make_regression( + n_samples=2000, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + corr_coeffs = r_regression(X, y, center=center) + assert (-1 < corr_coeffs).all() + assert (corr_coeffs < 1).all() + + sparse_X = _convert_container(X, "sparse") + + sparse_corr_coeffs = r_regression(sparse_X, y, center=center) + assert_allclose(sparse_corr_coeffs, corr_coeffs) + + # Testing against numpy for reference + Z = np.hstack((X, y[:, np.newaxis])) + correlation_matrix = np.corrcoef(Z, rowvar=False) + np_corr_coeffs = correlation_matrix[:-1, -1] + assert_array_almost_equal(np_corr_coeffs, corr_coeffs, decimal=3) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_f_regression(csr_container): + # Test whether the F test yields meaningful results + # on a simple simulated regression problem + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + F, pv = f_regression(X, y) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.0e-4).all() + + # with centering, compare with sparse + F, pv = f_regression(X, y, center=True) + F_sparse, pv_sparse = f_regression(csr_container(X), y, center=True) + assert_allclose(F_sparse, F) + assert_allclose(pv_sparse, pv) + + # again without centering, compare with sparse + F, pv = f_regression(X, y, center=False) + F_sparse, pv_sparse = f_regression(csr_container(X), y, center=False) + assert_allclose(F_sparse, F) + assert_allclose(pv_sparse, pv) + + +def test_f_regression_input_dtype(): + # Test whether f_regression returns the same value + # for any numeric data_type + rng = np.random.RandomState(0) + X = rng.rand(10, 20) + y = np.arange(10).astype(int) + + F1, pv1 = f_regression(X, y) + F2, pv2 = f_regression(X, y.astype(float)) + assert_allclose(F1, F2, 5) + assert_allclose(pv1, pv2, 5) + + +def test_f_regression_center(): + # Test whether f_regression preserves dof according to 'center' argument + # We use two centered variates so we have a simple relationship between + # F-score with variates centering and F-score without variates centering. + # Create toy example + X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean + n_samples = X.size + Y = np.ones(n_samples) + Y[::2] *= -1.0 + Y[0] = 0.0 # have Y mean being null + + F1, _ = f_regression(X, Y, center=True) + F2, _ = f_regression(X, Y, center=False) + assert_allclose(F1 * (n_samples - 1.0) / (n_samples - 2.0), F2) + assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS + + +@pytest.mark.parametrize( + "X, y, expected_corr_coef, force_finite", + [ + ( + # A feature in X is constant - forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([0.0, 0.32075]), + True, + ), + ( + # The target y is constant - forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([0.0, 0.0]), + True, + ), + ( + # A feature in X is constant - not forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([np.nan, 0.32075]), + False, + ), + ( + # The target y is constant - not forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([np.nan, np.nan]), + False, + ), + ], +) +def test_r_regression_force_finite(X, y, expected_corr_coef, force_finite): + """Check the behaviour of `force_finite` for some corner cases with `r_regression`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/15672 + """ + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + corr_coef = r_regression(X, y, force_finite=force_finite) + np.testing.assert_array_almost_equal(corr_coef, expected_corr_coef) + + +@pytest.mark.parametrize( + "X, y, expected_f_statistic, expected_p_values, force_finite", + [ + ( + # A feature in X is constant - forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([0.0, 0.2293578]), + np.array([1.0, 0.67924985]), + True, + ), + ( + # The target y is constant - forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([0.0, 0.0]), + np.array([1.0, 1.0]), + True, + ), + ( + # Feature in X correlated with y - forcing finite + np.array([[0, 1], [1, 0], [2, 10], [3, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.finfo(np.float64).max, 0.845433]), + np.array([0.0, 0.454913]), + True, + ), + ( + # Feature in X anti-correlated with y - forcing finite + np.array([[3, 1], [2, 0], [1, 10], [0, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.finfo(np.float64).max, 0.845433]), + np.array([0.0, 0.454913]), + True, + ), + ( + # A feature in X is constant - not forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([np.nan, 0.2293578]), + np.array([np.nan, 0.67924985]), + False, + ), + ( + # The target y is constant - not forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([np.nan, np.nan]), + np.array([np.nan, np.nan]), + False, + ), + ( + # Feature in X correlated with y - not forcing finite + np.array([[0, 1], [1, 0], [2, 10], [3, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.inf, 0.845433]), + np.array([0.0, 0.454913]), + False, + ), + ( + # Feature in X anti-correlated with y - not forcing finite + np.array([[3, 1], [2, 0], [1, 10], [0, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.inf, 0.845433]), + np.array([0.0, 0.454913]), + False, + ), + ], +) +def test_f_regression_corner_case( + X, y, expected_f_statistic, expected_p_values, force_finite +): + """Check the behaviour of `force_finite` for some corner cases with `f_regression`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/15672 + """ + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + f_statistic, p_values = f_regression(X, y, force_finite=force_finite) + np.testing.assert_array_almost_equal(f_statistic, expected_f_statistic) + np.testing.assert_array_almost_equal(p_values, expected_p_values) + + +def test_f_classif_multi_class(): + # Test whether the F test yields meaningful results + # on a simple simulated classification problem + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + F, pv = f_classif(X, y) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.0e-4).all() + + +def test_select_percentile_classif(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the percentile heuristic + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + univariate_filter = SelectPercentile(f_classif, percentile=25) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="percentile", param=25) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_select_percentile_classif_sparse(csr_container): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the percentile heuristic + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + X = csr_container(X) + univariate_filter = SelectPercentile(f_classif, percentile=25) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="percentile", param=25) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r.toarray(), X_r2.toarray()) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + X_r2inv = univariate_filter.inverse_transform(X_r2) + assert sparse.issparse(X_r2inv) + support_mask = safe_mask(X_r2inv, support) + assert X_r2inv.shape == X.shape + assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray()) + # Check other columns are empty + assert X_r2inv.nnz == X_r.nnz + + +############################################################################## +# Test univariate selection in classification settings + + +def test_select_kbest_classif(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the k best heuristic + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + univariate_filter = SelectKBest(f_classif, k=5) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="k_best", param=5) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + +def test_select_kbest_all(): + # Test whether k="all" correctly returns all features. + X, y = make_classification( + n_samples=20, n_features=10, shuffle=False, random_state=0 + ) + + univariate_filter = SelectKBest(f_classif, k="all") + X_r = univariate_filter.fit(X, y).transform(X) + assert_array_equal(X, X_r) + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/24949 + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="k_best", param="all") + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + + +@pytest.mark.parametrize("dtype_in", [np.float32, np.float64]) +def test_select_kbest_zero(dtype_in): + # Test whether k=0 correctly returns no features. + X, y = make_classification( + n_samples=20, n_features=10, shuffle=False, random_state=0 + ) + X = X.astype(dtype_in) + + univariate_filter = SelectKBest(f_classif, k=0) + univariate_filter.fit(X, y) + support = univariate_filter.get_support() + gtruth = np.zeros(10, dtype=bool) + assert_array_equal(support, gtruth) + with pytest.warns(UserWarning, match="No features were selected"): + X_selected = univariate_filter.transform(X) + assert X_selected.shape == (20, 0) + assert X_selected.dtype == dtype_in + + +def test_select_heuristics_classif(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the fdr, fwe and fpr heuristics + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + univariate_filter = SelectFwe(f_classif, alpha=0.01) + X_r = univariate_filter.fit(X, y).transform(X) + gtruth = np.zeros(20) + gtruth[:5] = 1 + for mode in ["fdr", "fpr", "fwe"]: + X_r2 = ( + GenericUnivariateSelect(f_classif, mode=mode, param=0.01) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + assert_allclose(support, gtruth) + + +############################################################################## +# Test univariate selection in regression settings + + +def assert_best_scores_kept(score_filter): + scores = score_filter.scores_ + support = score_filter.get_support() + assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum() :]) + + +def test_select_percentile_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the percentile heuristic + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + univariate_filter = SelectPercentile(f_regression, percentile=25) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="percentile", param=25) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + X_2 = X.copy() + X_2[:, np.logical_not(support)] = 0 + assert_array_equal(X_2, univariate_filter.inverse_transform(X_r)) + # Check inverse_transform respects dtype + assert_array_equal( + X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool)) + ) + + +def test_select_percentile_regression_full(): + # Test whether the relative univariate feature selection + # selects all features when '100%' is asked. + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + univariate_filter = SelectPercentile(f_regression, percentile=100) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="percentile", param=100) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.ones(20) + assert_array_equal(support, gtruth) + + +def test_select_kbest_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the k best heuristic + X, y = make_regression( + n_samples=200, + n_features=20, + n_informative=5, + shuffle=False, + random_state=0, + noise=10, + ) + + univariate_filter = SelectKBest(f_regression, k=5) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="k_best", param=5) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + +def test_select_heuristics_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the fpr, fdr or fwe heuristics + X, y = make_regression( + n_samples=200, + n_features=20, + n_informative=5, + shuffle=False, + random_state=0, + noise=10, + ) + + univariate_filter = SelectFpr(f_regression, alpha=0.01) + X_r = univariate_filter.fit(X, y).transform(X) + gtruth = np.zeros(20) + gtruth[:5] = 1 + for mode in ["fdr", "fpr", "fwe"]: + X_r2 = ( + GenericUnivariateSelect(f_regression, mode=mode, param=0.01) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + assert_array_equal(support[:5], np.ones((5,), dtype=bool)) + assert np.sum(support[5:] == 1) < 3 + + +def test_boundary_case_ch2(): + # Test boundary case, and always aim to select 1 feature. + X = np.array([[10, 20], [20, 20], [20, 30]]) + y = np.array([[1], [0], [0]]) + scores, pvalues = chi2(X, y) + assert_array_almost_equal(scores, np.array([4.0, 0.71428571])) + assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472])) + + filter_fdr = SelectFdr(chi2, alpha=0.1) + filter_fdr.fit(X, y) + support_fdr = filter_fdr.get_support() + assert_array_equal(support_fdr, np.array([True, False])) + + filter_kbest = SelectKBest(chi2, k=1) + filter_kbest.fit(X, y) + support_kbest = filter_kbest.get_support() + assert_array_equal(support_kbest, np.array([True, False])) + + filter_percentile = SelectPercentile(chi2, percentile=50) + filter_percentile.fit(X, y) + support_percentile = filter_percentile.get_support() + assert_array_equal(support_percentile, np.array([True, False])) + + filter_fpr = SelectFpr(chi2, alpha=0.1) + filter_fpr.fit(X, y) + support_fpr = filter_fpr.get_support() + assert_array_equal(support_fpr, np.array([True, False])) + + filter_fwe = SelectFwe(chi2, alpha=0.1) + filter_fwe.fit(X, y) + support_fwe = filter_fwe.get_support() + assert_array_equal(support_fwe, np.array([True, False])) + + +@pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1]) +@pytest.mark.parametrize("n_informative", [1, 5, 10]) +def test_select_fdr_regression(alpha, n_informative): + # Test that fdr heuristic actually has low FDR. + def single_fdr(alpha, n_informative, random_state): + X, y = make_regression( + n_samples=150, + n_features=20, + n_informative=n_informative, + shuffle=False, + random_state=random_state, + noise=10, + ) + + with warnings.catch_warnings(record=True): + # Warnings can be raised when no features are selected + # (low alpha or very noisy data) + univariate_filter = SelectFdr(f_regression, alpha=alpha) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="fdr", param=alpha) + .fit(X, y) + .transform(X) + ) + + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + num_false_positives = np.sum(support[n_informative:] == 1) + num_true_positives = np.sum(support[:n_informative] == 1) + + if num_false_positives == 0: + return 0.0 + false_discovery_rate = num_false_positives / ( + num_true_positives + num_false_positives + ) + return false_discovery_rate + + # As per Benjamini-Hochberg, the expected false discovery rate + # should be lower than alpha: + # FDR = E(FP / (TP + FP)) <= alpha + false_discovery_rate = np.mean( + [single_fdr(alpha, n_informative, random_state) for random_state in range(100)] + ) + assert alpha >= false_discovery_rate + + # Make sure that the empirical false discovery rate increases + # with alpha: + if false_discovery_rate != 0: + assert false_discovery_rate > alpha / 10 + + +def test_select_fwe_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the fwe heuristic + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + univariate_filter = SelectFwe(f_regression, alpha=0.01) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="fwe", param=0.01) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support[:5], np.ones((5,), dtype=bool)) + assert np.sum(support[5:] == 1) < 2 + + +def test_selectkbest_tiebreaking(): + # Test whether SelectKBest actually selects k features in case of ties. + # Prior to 0.11, SelectKBest would return more features than requested. + Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] + y = [1] + dummy_score = lambda X, y: (X[0], X[0]) + for X in Xs: + sel = SelectKBest(dummy_score, k=1) + X1 = ignore_warnings(sel.fit_transform)([X], y) + assert X1.shape[1] == 1 + assert_best_scores_kept(sel) + + sel = SelectKBest(dummy_score, k=2) + X2 = ignore_warnings(sel.fit_transform)([X], y) + assert X2.shape[1] == 2 + assert_best_scores_kept(sel) + + +def test_selectpercentile_tiebreaking(): + # Test if SelectPercentile selects the right n_features in case of ties. + Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] + y = [1] + dummy_score = lambda X, y: (X[0], X[0]) + for X in Xs: + sel = SelectPercentile(dummy_score, percentile=34) + X1 = ignore_warnings(sel.fit_transform)([X], y) + assert X1.shape[1] == 1 + assert_best_scores_kept(sel) + + sel = SelectPercentile(dummy_score, percentile=67) + X2 = ignore_warnings(sel.fit_transform)([X], y) + assert X2.shape[1] == 2 + assert_best_scores_kept(sel) + + +def test_tied_pvalues(): + # Test whether k-best and percentiles work with tied pvalues from chi2. + # chi2 will return the same p-values for the following features, but it + # will return different scores. + X0 = np.array([[10000, 9999, 9998], [1, 1, 1]]) + y = [0, 1] + + for perm in itertools.permutations((0, 1, 2)): + X = X0[:, perm] + Xt = SelectKBest(chi2, k=2).fit_transform(X, y) + assert Xt.shape == (2, 2) + assert 9998 not in Xt + + Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) + assert Xt.shape == (2, 2) + assert 9998 not in Xt + + +def test_scorefunc_multilabel(): + # Test whether k-best and percentiles works with multilabels with chi2. + + X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]]) + y = [[1, 1], [0, 1], [1, 0]] + + Xt = SelectKBest(chi2, k=2).fit_transform(X, y) + assert Xt.shape == (3, 2) + assert 0 not in Xt + + Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) + assert Xt.shape == (3, 2) + assert 0 not in Xt + + +def test_tied_scores(): + # Test for stable sorting in k-best with tied scores. + X_train = np.array([[0, 0, 0], [1, 1, 1]]) + y_train = [0, 1] + + for n_features in [1, 2, 3]: + sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train) + X_test = sel.transform([[0, 1, 2]]) + assert_array_equal(X_test[0], np.arange(3)[-n_features:]) + + +def test_nans(): + # Assert that SelectKBest and SelectPercentile can handle NaNs. + # First feature has zero variance to confuse f_classif (ANOVA) and + # make it return a NaN. + X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]] + y = [1, 0, 1] + + for select in ( + SelectKBest(f_classif, k=2), + SelectPercentile(f_classif, percentile=67), + ): + ignore_warnings(select.fit)(X, y) + assert_array_equal(select.get_support(indices=True), np.array([1, 2])) + + +def test_invalid_k(): + X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]] + y = [1, 0, 1] + + msg = "k=4 is greater than n_features=3. All the features will be returned." + with pytest.warns(UserWarning, match=msg): + SelectKBest(k=4).fit(X, y) + with pytest.warns(UserWarning, match=msg): + GenericUnivariateSelect(mode="k_best", param=4).fit(X, y) + + +def test_f_classif_constant_feature(): + # Test that f_classif warns if a feature is constant throughout. + + X, y = make_classification(n_samples=10, n_features=5) + X[:, 0] = 2.0 + with pytest.warns(UserWarning): + f_classif(X, y) + + +def test_no_feature_selected(): + rng = np.random.RandomState(0) + + # Generate random uncorrelated data: a strict univariate test should + # rejects all the features + X = rng.rand(40, 10) + y = rng.randint(0, 4, size=40) + strict_selectors = [ + SelectFwe(alpha=0.01).fit(X, y), + SelectFdr(alpha=0.01).fit(X, y), + SelectFpr(alpha=0.01).fit(X, y), + SelectPercentile(percentile=0).fit(X, y), + SelectKBest(k=0).fit(X, y), + ] + for selector in strict_selectors: + assert_array_equal(selector.get_support(), np.zeros(10)) + with pytest.warns(UserWarning, match="No features were selected"): + X_selected = selector.transform(X) + assert X_selected.shape == (40, 0) + + +def test_mutual_info_classif(): + X, y = make_classification( + n_samples=100, + n_features=5, + n_informative=1, + n_redundant=1, + n_repeated=0, + n_classes=2, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + # Test in KBest mode. + univariate_filter = SelectKBest(mutual_info_classif, k=2) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(mutual_info_classif, mode="k_best", param=2) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(5) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + # Test in Percentile mode. + univariate_filter = SelectPercentile(mutual_info_classif, percentile=40) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(mutual_info_classif, mode="percentile", param=40) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(5) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + +def test_mutual_info_regression(): + X, y = make_regression( + n_samples=100, + n_features=10, + n_informative=2, + shuffle=False, + random_state=0, + noise=10, + ) + + # Test in KBest mode. + univariate_filter = SelectKBest(mutual_info_regression, k=2) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(mutual_info_regression, mode="k_best", param=2) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(10) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + # Test in Percentile mode. + univariate_filter = SelectPercentile(mutual_info_regression, percentile=20) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(mutual_info_regression, mode="percentile", param=20) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(10) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + +def test_dataframe_output_dtypes(): + """Check that the output datafarme dtypes are the same as the input. + + Non-regression test for gh-24860. + """ + pd = pytest.importorskip("pandas") + + X, y = load_iris(return_X_y=True, as_frame=True) + X = X.astype( + { + "petal length (cm)": np.float32, + "petal width (cm)": np.float64, + } + ) + X["petal_width_binned"] = pd.cut(X["petal width (cm)"], bins=10) + + column_order = X.columns + + def selector(X, y): + ranking = { + "sepal length (cm)": 1, + "sepal width (cm)": 2, + "petal length (cm)": 3, + "petal width (cm)": 4, + "petal_width_binned": 5, + } + return np.asarray([ranking[name] for name in column_order]) + + univariate_filter = SelectKBest(selector, k=3).set_output(transform="pandas") + output = univariate_filter.fit_transform(X, y) + + assert_array_equal( + output.columns, ["petal length (cm)", "petal width (cm)", "petal_width_binned"] + ) + for name, dtype in output.dtypes.items(): + assert dtype == X.dtypes[name] + + +@pytest.mark.parametrize( + "selector", + [ + SelectKBest(k=4), + SelectPercentile(percentile=80), + GenericUnivariateSelect(mode="k_best", param=4), + GenericUnivariateSelect(mode="percentile", param=80), + ], +) +def test_unsupervised_filter(selector): + """Check support for unsupervised feature selection for the filter that could + require only `X`. + """ + rng = np.random.RandomState(0) + X = rng.randn(10, 5) + + def score_func(X, y=None): + return np.array([1, 1, 1, 1, 0]) + + selector.set_params(score_func=score_func) + selector.fit(X) + X_trans = selector.transform(X) + assert_allclose(X_trans, X[:, :4]) + X_trans = selector.fit_transform(X) + assert_allclose(X_trans, X[:, :4]) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py new file mode 100644 index 0000000000000000000000000000000000000000..3573b7a078294f6284920c5f387fce5f9625906b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py @@ -0,0 +1,684 @@ +import re +import warnings +from unittest.mock import Mock + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator +from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression +from sklearn.datasets import make_friedman1 +from sklearn.decomposition import PCA +from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier +from sklearn.exceptions import NotFittedError +from sklearn.feature_selection import SelectFromModel +from sklearn.linear_model import ( + ElasticNet, + ElasticNetCV, + Lasso, + LassoCV, + LinearRegression, + LogisticRegression, + PassiveAggressiveClassifier, + SGDClassifier, +) +from sklearn.pipeline import make_pipeline +from sklearn.svm import LinearSVC +from sklearn.utils._testing import ( + MinimalClassifier, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + skip_if_32bit, +) + + +class NaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": True} + + +class NoNaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": False} + + +class NaNTagRandomForest(RandomForestClassifier): + def _more_tags(self): + return {"allow_nan": True} + + +iris = datasets.load_iris() +data, y = iris.data, iris.target +rng = np.random.RandomState(0) + + +def test_invalid_input(): + clf = SGDClassifier( + alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None + ) + for threshold in ["gobbledigook", ".5 * gobbledigook"]: + model = SelectFromModel(clf, threshold=threshold) + model.fit(data, y) + with pytest.raises(ValueError): + model.transform(data) + + +def test_input_estimator_unchanged(): + # Test that SelectFromModel fits on a clone of the estimator. + est = RandomForestClassifier() + transformer = SelectFromModel(estimator=est) + transformer.fit(data, y) + assert transformer.estimator is est + + +@pytest.mark.parametrize( + "max_features, err_type, err_msg", + [ + ( + data.shape[1] + 1, + ValueError, + "max_features ==", + ), + ( + lambda X: 1.5, + TypeError, + "max_features must be an instance of int, not float.", + ), + ( + lambda X: data.shape[1] + 1, + ValueError, + "max_features ==", + ), + ( + lambda X: -1, + ValueError, + "max_features ==", + ), + ], +) +def test_max_features_error(max_features, err_type, err_msg): + err_msg = re.escape(err_msg) + clf = RandomForestClassifier(n_estimators=5, random_state=0) + + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + with pytest.raises(err_type, match=err_msg): + transformer.fit(data, y) + + +@pytest.mark.parametrize("max_features", [0, 2, data.shape[1], None]) +def test_inferred_max_features_integer(max_features): + """Check max_features_ and output shape for integer max_features.""" + clf = RandomForestClassifier(n_estimators=5, random_state=0) + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + X_trans = transformer.fit_transform(data, y) + if max_features is not None: + assert transformer.max_features_ == max_features + assert X_trans.shape[1] == transformer.max_features_ + else: + assert not hasattr(transformer, "max_features_") + assert X_trans.shape[1] == data.shape[1] + + +@pytest.mark.parametrize( + "max_features", + [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], +) +def test_inferred_max_features_callable(max_features): + """Check max_features_ and output shape for callable max_features.""" + clf = RandomForestClassifier(n_estimators=5, random_state=0) + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + X_trans = transformer.fit_transform(data, y) + assert transformer.max_features_ == max_features(data) + assert X_trans.shape[1] == transformer.max_features_ + + +@pytest.mark.parametrize("max_features", [lambda X: round(len(X[0]) / 2), 2]) +def test_max_features_array_like(max_features): + X = [ + [0.87, -1.34, 0.31], + [-2.79, -0.02, -0.85], + [-1.34, -0.48, -2.55], + [1.92, 1.48, 0.65], + ] + y = [0, 1, 0, 1] + + clf = RandomForestClassifier(n_estimators=5, random_state=0) + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + X_trans = transformer.fit_transform(X, y) + assert X_trans.shape[1] == transformer.max_features_ + + +@pytest.mark.parametrize( + "max_features", + [lambda X: min(X.shape[1], 10000), lambda X: X.shape[1], lambda X: 1], +) +def test_max_features_callable_data(max_features): + """Tests that the callable passed to `fit` is called on X.""" + clf = RandomForestClassifier(n_estimators=50, random_state=0) + m = Mock(side_effect=max_features) + transformer = SelectFromModel(estimator=clf, max_features=m, threshold=-np.inf) + transformer.fit_transform(data, y) + m.assert_called_with(data) + + +class FixedImportanceEstimator(BaseEstimator): + def __init__(self, importances): + self.importances = importances + + def fit(self, X, y=None): + self.feature_importances_ = np.array(self.importances) + + +def test_max_features(): + # Test max_features parameter using various values + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + max_features = X.shape[1] + est = RandomForestClassifier(n_estimators=50, random_state=0) + + transformer1 = SelectFromModel(estimator=est, threshold=-np.inf) + transformer2 = SelectFromModel( + estimator=est, max_features=max_features, threshold=-np.inf + ) + X_new1 = transformer1.fit_transform(X, y) + X_new2 = transformer2.fit_transform(X, y) + assert_allclose(X_new1, X_new2) + + # Test max_features against actual model. + transformer1 = SelectFromModel(estimator=Lasso(alpha=0.025, random_state=42)) + X_new1 = transformer1.fit_transform(X, y) + scores1 = np.abs(transformer1.estimator_.coef_) + candidate_indices1 = np.argsort(-scores1, kind="mergesort") + + for n_features in range(1, X_new1.shape[1] + 1): + transformer2 = SelectFromModel( + estimator=Lasso(alpha=0.025, random_state=42), + max_features=n_features, + threshold=-np.inf, + ) + X_new2 = transformer2.fit_transform(X, y) + scores2 = np.abs(transformer2.estimator_.coef_) + candidate_indices2 = np.argsort(-scores2, kind="mergesort") + assert_allclose( + X[:, candidate_indices1[:n_features]], X[:, candidate_indices2[:n_features]] + ) + assert_allclose(transformer1.estimator_.coef_, transformer2.estimator_.coef_) + + +def test_max_features_tiebreak(): + # Test if max_features can break tie among feature importance + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + max_features = X.shape[1] + + feature_importances = np.array([4, 4, 4, 4, 3, 3, 3, 2, 2, 1]) + for n_features in range(1, max_features + 1): + transformer = SelectFromModel( + FixedImportanceEstimator(feature_importances), + max_features=n_features, + threshold=-np.inf, + ) + X_new = transformer.fit_transform(X, y) + selected_feature_indices = np.where(transformer._get_support_mask())[0] + assert_array_equal(selected_feature_indices, np.arange(n_features)) + assert X_new.shape[1] == n_features + + +def test_threshold_and_max_features(): + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + est = RandomForestClassifier(n_estimators=50, random_state=0) + + transformer1 = SelectFromModel(estimator=est, max_features=3, threshold=-np.inf) + X_new1 = transformer1.fit_transform(X, y) + + transformer2 = SelectFromModel(estimator=est, threshold=0.04) + X_new2 = transformer2.fit_transform(X, y) + + transformer3 = SelectFromModel(estimator=est, max_features=3, threshold=0.04) + X_new3 = transformer3.fit_transform(X, y) + assert X_new3.shape[1] == min(X_new1.shape[1], X_new2.shape[1]) + selected_indices = transformer3.transform(np.arange(X.shape[1])[np.newaxis, :]) + assert_allclose(X_new3, X[:, selected_indices[0]]) + + +@skip_if_32bit +def test_feature_importances(): + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + est = RandomForestClassifier(n_estimators=50, random_state=0) + for threshold, func in zip(["mean", "median"], [np.mean, np.median]): + transformer = SelectFromModel(estimator=est, threshold=threshold) + transformer.fit(X, y) + assert hasattr(transformer.estimator_, "feature_importances_") + + X_new = transformer.transform(X) + assert X_new.shape[1] < X.shape[1] + importances = transformer.estimator_.feature_importances_ + + feature_mask = np.abs(importances) > func(importances) + assert_array_almost_equal(X_new, X[:, feature_mask]) + + +def test_sample_weight(): + # Ensure sample weights are passed to underlying estimator + X, y = datasets.make_classification( + n_samples=100, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + # Check with sample weights + sample_weight = np.ones(y.shape) + sample_weight[y == 1] *= 100 + + est = LogisticRegression(random_state=0, fit_intercept=False) + transformer = SelectFromModel(estimator=est) + transformer.fit(X, y, sample_weight=None) + mask = transformer._get_support_mask() + transformer.fit(X, y, sample_weight=sample_weight) + weighted_mask = transformer._get_support_mask() + assert not np.all(weighted_mask == mask) + transformer.fit(X, y, sample_weight=3 * sample_weight) + reweighted_mask = transformer._get_support_mask() + assert np.all(weighted_mask == reweighted_mask) + + +@pytest.mark.parametrize( + "estimator", + [ + Lasso(alpha=0.1, random_state=42), + LassoCV(random_state=42), + ElasticNet(l1_ratio=1, random_state=42), + ElasticNetCV(l1_ratio=[1], random_state=42), + ], +) +def test_coef_default_threshold(estimator): + X, y = datasets.make_classification( + n_samples=100, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + # For the Lasso and related models, the threshold defaults to 1e-5 + transformer = SelectFromModel(estimator=estimator) + transformer.fit(X, y) + X_new = transformer.transform(X) + mask = np.abs(transformer.estimator_.coef_) > 1e-5 + assert_array_almost_equal(X_new, X[:, mask]) + + +@skip_if_32bit +def test_2d_coef(): + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + n_classes=4, + ) + + est = LogisticRegression() + for threshold, func in zip(["mean", "median"], [np.mean, np.median]): + for order in [1, 2, np.inf]: + # Fit SelectFromModel a multi-class problem + transformer = SelectFromModel( + estimator=LogisticRegression(), threshold=threshold, norm_order=order + ) + transformer.fit(X, y) + assert hasattr(transformer.estimator_, "coef_") + X_new = transformer.transform(X) + assert X_new.shape[1] < X.shape[1] + + # Manually check that the norm is correctly performed + est.fit(X, y) + importances = np.linalg.norm(est.coef_, axis=0, ord=order) + feature_mask = importances > func(importances) + assert_array_almost_equal(X_new, X[:, feature_mask]) + + +def test_partial_fit(): + est = PassiveAggressiveClassifier( + random_state=0, shuffle=False, max_iter=5, tol=None + ) + transformer = SelectFromModel(estimator=est) + transformer.partial_fit(data, y, classes=np.unique(y)) + old_model = transformer.estimator_ + transformer.partial_fit(data, y, classes=np.unique(y)) + new_model = transformer.estimator_ + assert old_model is new_model + + X_transform = transformer.transform(data) + transformer.fit(np.vstack((data, data)), np.concatenate((y, y))) + assert_array_almost_equal(X_transform, transformer.transform(data)) + + # check that if est doesn't have partial_fit, neither does SelectFromModel + transformer = SelectFromModel(estimator=RandomForestClassifier()) + assert not hasattr(transformer, "partial_fit") + + +def test_calling_fit_reinitializes(): + est = LinearSVC(dual="auto", random_state=0) + transformer = SelectFromModel(estimator=est) + transformer.fit(data, y) + transformer.set_params(estimator__C=100) + transformer.fit(data, y) + assert transformer.estimator_.C == 100 + + +def test_prefit(): + # Test all possible combinations of the prefit parameter. + + # Passing a prefit parameter with the selected model + # and fitting a unfit model with prefit=False should give same results. + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) + model = SelectFromModel(clf) + model.fit(data, y) + X_transform = model.transform(data) + clf.fit(data, y) + model = SelectFromModel(clf, prefit=True) + assert_array_almost_equal(model.transform(data), X_transform) + model.fit(data, y) + assert model.estimator_ is not clf + + # Check that the model is rewritten if prefit=False and a fitted model is + # passed + model = SelectFromModel(clf, prefit=False) + model.fit(data, y) + assert_array_almost_equal(model.transform(data), X_transform) + + # Check that passing an unfitted estimator with `prefit=True` raises a + # `ValueError` + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) + model = SelectFromModel(clf, prefit=True) + err_msg = "When `prefit=True`, `estimator` is expected to be a fitted estimator." + with pytest.raises(NotFittedError, match=err_msg): + model.fit(data, y) + with pytest.raises(NotFittedError, match=err_msg): + model.partial_fit(data, y) + with pytest.raises(NotFittedError, match=err_msg): + model.transform(data) + + # Check that the internal parameters of prefitted model are not changed + # when calling `fit` or `partial_fit` with `prefit=True` + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, tol=None).fit(data, y) + model = SelectFromModel(clf, prefit=True) + model.fit(data, y) + assert_allclose(model.estimator_.coef_, clf.coef_) + model.partial_fit(data, y) + assert_allclose(model.estimator_.coef_, clf.coef_) + + +def test_prefit_max_features(): + """Check the interaction between `prefit` and `max_features`.""" + # case 1: an error should be raised at `transform` if `fit` was not called to + # validate the attributes + estimator = RandomForestClassifier(n_estimators=5, random_state=0) + estimator.fit(data, y) + model = SelectFromModel(estimator, prefit=True, max_features=lambda X: X.shape[1]) + + err_msg = ( + "When `prefit=True` and `max_features` is a callable, call `fit` " + "before calling `transform`." + ) + with pytest.raises(NotFittedError, match=err_msg): + model.transform(data) + + # case 2: `max_features` is not validated and different from an integer + # FIXME: we cannot validate the upper bound of the attribute at transform + # and we should force calling `fit` if we intend to force the attribute + # to have such an upper bound. + max_features = 2.5 + model.set_params(max_features=max_features) + with pytest.raises(ValueError, match="`max_features` must be an integer"): + model.transform(data) + + +def test_prefit_get_feature_names_out(): + """Check the interaction between prefit and the feature names.""" + clf = RandomForestClassifier(n_estimators=2, random_state=0) + clf.fit(data, y) + model = SelectFromModel(clf, prefit=True, max_features=1) + + name = type(model).__name__ + err_msg = ( + f"This {name} instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=err_msg): + model.get_feature_names_out() + + model.fit(data, y) + feature_names = model.get_feature_names_out() + assert feature_names == ["x3"] + + +def test_threshold_string(): + est = RandomForestClassifier(n_estimators=50, random_state=0) + model = SelectFromModel(est, threshold="0.5*mean") + model.fit(data, y) + X_transform = model.transform(data) + + # Calculate the threshold from the estimator directly. + est.fit(data, y) + threshold = 0.5 * np.mean(est.feature_importances_) + mask = est.feature_importances_ > threshold + assert_array_almost_equal(X_transform, data[:, mask]) + + +def test_threshold_without_refitting(): + # Test that the threshold can be set without refitting the model. + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) + model = SelectFromModel(clf, threshold="0.1 * mean") + model.fit(data, y) + X_transform = model.transform(data) + + # Set a higher threshold to filter out more features. + model.threshold = "1.0 * mean" + assert X_transform.shape[1] > model.transform(data).shape[1] + + +def test_fit_accepts_nan_inf(): + # Test that fit doesn't check for np.inf and np.nan values. + clf = HistGradientBoostingClassifier(random_state=0) + + model = SelectFromModel(estimator=clf) + + nan_data = data.copy() + nan_data[0] = np.nan + nan_data[1] = np.inf + + model.fit(data, y) + + +def test_transform_accepts_nan_inf(): + # Test that transform doesn't check for np.inf and np.nan values. + clf = NaNTagRandomForest(n_estimators=100, random_state=0) + nan_data = data.copy() + + model = SelectFromModel(estimator=clf) + model.fit(nan_data, y) + + nan_data[0] = np.nan + nan_data[1] = np.inf + + model.transform(nan_data) + + +def test_allow_nan_tag_comes_from_estimator(): + allow_nan_est = NaNTag() + model = SelectFromModel(estimator=allow_nan_est) + assert model._get_tags()["allow_nan"] is True + + no_nan_est = NoNaNTag() + model = SelectFromModel(estimator=no_nan_est) + assert model._get_tags()["allow_nan"] is False + + +def _pca_importances(pca_estimator): + return np.abs(pca_estimator.explained_variance_) + + +@pytest.mark.parametrize( + "estimator, importance_getter", + [ + ( + make_pipeline(PCA(random_state=0), LogisticRegression()), + "named_steps.logisticregression.coef_", + ), + (PCA(random_state=0), _pca_importances), + ], +) +def test_importance_getter(estimator, importance_getter): + selector = SelectFromModel( + estimator, threshold="mean", importance_getter=importance_getter + ) + selector.fit(data, y) + assert selector.transform(data).shape[1] == 1 + + +@pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression]) +def test_select_from_model_pls(PLSEstimator): + """Check the behaviour of SelectFromModel with PLS estimators. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12410 + """ + X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + estimator = PLSEstimator(n_components=1) + model = make_pipeline(SelectFromModel(estimator), estimator).fit(X, y) + assert model.score(X, y) > 0.5 + + +def test_estimator_does_not_support_feature_names(): + """SelectFromModel works with estimators that do not support feature_names_in_. + + Non-regression test for #21949. + """ + pytest.importorskip("pandas") + X, y = datasets.load_iris(as_frame=True, return_X_y=True) + all_feature_names = set(X.columns) + + def importance_getter(estimator): + return np.arange(X.shape[1]) + + selector = SelectFromModel( + MinimalClassifier(), importance_getter=importance_getter + ).fit(X, y) + + # selector learns the feature names itself + assert_array_equal(selector.feature_names_in_, X.columns) + + feature_names_out = set(selector.get_feature_names_out()) + assert feature_names_out < all_feature_names + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + + selector.transform(X.iloc[1:3]) + + +@pytest.mark.parametrize( + "error, err_msg, max_features", + ( + [ValueError, "max_features == 10, must be <= 4", 10], + [ValueError, "max_features == 5, must be <= 4", lambda x: x.shape[1] + 1], + ), +) +def test_partial_fit_validate_max_features(error, err_msg, max_features): + """Test that partial_fit from SelectFromModel validates `max_features`.""" + X, y = datasets.make_classification( + n_samples=100, + n_features=4, + random_state=0, + ) + + with pytest.raises(error, match=err_msg): + SelectFromModel( + estimator=SGDClassifier(), max_features=max_features + ).partial_fit(X, y, classes=[0, 1]) + + +@pytest.mark.parametrize("as_frame", [True, False]) +def test_partial_fit_validate_feature_names(as_frame): + """Test that partial_fit from SelectFromModel validates `feature_names_in_`.""" + pytest.importorskip("pandas") + X, y = datasets.load_iris(as_frame=as_frame, return_X_y=True) + + selector = SelectFromModel(estimator=SGDClassifier(), max_features=4).partial_fit( + X, y, classes=[0, 1, 2] + ) + if as_frame: + assert_array_equal(selector.feature_names_in_, X.columns) + else: + assert not hasattr(selector, "feature_names_in_") + + +def test_from_model_estimator_attribute_error(): + """Check that we raise the proper AttributeError when the estimator + does not implement the `partial_fit` method, which is decorated with + `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + # `LinearRegression` does not implement 'partial_fit' and should raise an + # AttributeError + from_model = SelectFromModel(estimator=LinearRegression()) + + outer_msg = "This 'SelectFromModel' has no attribute 'partial_fit'" + inner_msg = "'LinearRegression' object has no attribute 'partial_fit'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + from_model.fit(data, y).partial_fit(data) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py new file mode 100644 index 0000000000000000000000000000000000000000..26367544baa539d8daa7b6508f4ae23cbf4da31c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py @@ -0,0 +1,254 @@ +import numpy as np +import pytest + +from sklearn.feature_selection import mutual_info_classif, mutual_info_regression +from sklearn.feature_selection._mutual_info import _compute_mi +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_compute_mi_dd(): + # In discrete case computations are straightforward and can be done + # by hand on given vectors. + x = np.array([0, 1, 1, 0, 0]) + y = np.array([1, 0, 0, 0, 1]) + + H_x = H_y = -(3 / 5) * np.log(3 / 5) - (2 / 5) * np.log(2 / 5) + H_xy = -1 / 5 * np.log(1 / 5) - 2 / 5 * np.log(2 / 5) - 2 / 5 * np.log(2 / 5) + I_xy = H_x + H_y - H_xy + + assert_allclose(_compute_mi(x, y, x_discrete=True, y_discrete=True), I_xy) + + +def test_compute_mi_cc(global_dtype): + # For two continuous variables a good approach is to test on bivariate + # normal distribution, where mutual information is known. + + # Mean of the distribution, irrelevant for mutual information. + mean = np.zeros(2) + + # Setup covariance matrix with correlation coeff. equal 0.5. + sigma_1 = 1 + sigma_2 = 10 + corr = 0.5 + cov = np.array( + [ + [sigma_1**2, corr * sigma_1 * sigma_2], + [corr * sigma_1 * sigma_2, sigma_2**2], + ] + ) + + # True theoretical mutual information. + I_theory = np.log(sigma_1) + np.log(sigma_2) - 0.5 * np.log(np.linalg.det(cov)) + + rng = check_random_state(0) + Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False) + + x, y = Z[:, 0], Z[:, 1] + + # Theory and computed values won't be very close + # We here check with a large relative tolerance + for n_neighbors in [3, 5, 7]: + I_computed = _compute_mi( + x, y, x_discrete=False, y_discrete=False, n_neighbors=n_neighbors + ) + assert_allclose(I_computed, I_theory, rtol=1e-1) + + +def test_compute_mi_cd(global_dtype): + # To test define a joint distribution as follows: + # p(x, y) = p(x) p(y | x) + # X ~ Bernoulli(p) + # (Y | x = 0) ~ Uniform(-1, 1) + # (Y | x = 1) ~ Uniform(0, 2) + + # Use the following formula for mutual information: + # I(X; Y) = H(Y) - H(Y | X) + # Two entropies can be computed by hand: + # H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2) + # H(Y | X) = ln(2) + + # Now we need to implement sampling from out distribution, which is + # done easily using conditional distribution logic. + + n_samples = 1000 + rng = check_random_state(0) + + for p in [0.3, 0.5, 0.7]: + x = rng.uniform(size=n_samples) > p + + y = np.empty(n_samples, global_dtype) + mask = x == 0 + y[mask] = rng.uniform(-1, 1, size=np.sum(mask)) + y[~mask] = rng.uniform(0, 2, size=np.sum(~mask)) + + I_theory = -0.5 * ( + (1 - p) * np.log(0.5 * (1 - p)) + p * np.log(0.5 * p) + np.log(0.5) + ) - np.log(2) + + # Assert the same tolerance. + for n_neighbors in [3, 5, 7]: + I_computed = _compute_mi( + x, y, x_discrete=True, y_discrete=False, n_neighbors=n_neighbors + ) + assert_allclose(I_computed, I_theory, rtol=1e-1) + + +def test_compute_mi_cd_unique_label(global_dtype): + # Test that adding unique label doesn't change MI. + n_samples = 100 + x = np.random.uniform(size=n_samples) > 0.5 + + y = np.empty(n_samples, global_dtype) + mask = x == 0 + y[mask] = np.random.uniform(-1, 1, size=np.sum(mask)) + y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask)) + + mi_1 = _compute_mi(x, y, x_discrete=True, y_discrete=False) + + x = np.hstack((x, 2)) + y = np.hstack((y, 10)) + mi_2 = _compute_mi(x, y, x_discrete=True, y_discrete=False) + + assert_allclose(mi_1, mi_2) + + +# We are going test that feature ordering by MI matches our expectations. +def test_mutual_info_classif_discrete(global_dtype): + X = np.array( + [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype + ) + y = np.array([0, 1, 2, 2, 1]) + + # Here X[:, 0] is the most informative feature, and X[:, 1] is weakly + # informative. + mi = mutual_info_classif(X, y, discrete_features=True) + assert_array_equal(np.argsort(-mi), np.array([0, 2, 1])) + + +def test_mutual_info_regression(global_dtype): + # We generate sample from multivariate normal distribution, using + # transformation from initially uncorrelated variables. The zero + # variables after transformation is selected as the target vector, + # it has the strongest correlation with the variable 2, and + # the weakest correlation with the variable 1. + T = np.array([[1, 0.5, 2, 1], [0, 1, 0.1, 0.0], [0, 0.1, 1, 0.1], [0, 0.1, 0.1, 1]]) + cov = T.dot(T.T) + mean = np.zeros(4) + + rng = check_random_state(0) + Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False) + X = Z[:, 1:] + y = Z[:, 0] + + mi = mutual_info_regression(X, y, random_state=0) + assert_array_equal(np.argsort(-mi), np.array([1, 2, 0])) + # XXX: should mutual_info_regression be fixed to avoid + # up-casting float32 inputs to float64? + assert mi.dtype == np.float64 + + +def test_mutual_info_classif_mixed(global_dtype): + # Here the target is discrete and there are two continuous and one + # discrete feature. The idea of this test is clear from the code. + rng = check_random_state(0) + X = rng.rand(1000, 3).astype(global_dtype, copy=False) + X[:, 1] += X[:, 0] + y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int) + X[:, 2] = X[:, 2] > 0.5 + + mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3, random_state=0) + assert_array_equal(np.argsort(-mi), [2, 0, 1]) + for n_neighbors in [5, 7, 9]: + mi_nn = mutual_info_classif( + X, y, discrete_features=[2], n_neighbors=n_neighbors, random_state=0 + ) + # Check that the continuous values have an higher MI with greater + # n_neighbors + assert mi_nn[0] > mi[0] + assert mi_nn[1] > mi[1] + # The n_neighbors should not have any effect on the discrete value + # The MI should be the same + assert mi_nn[2] == mi[2] + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_mutual_info_options(global_dtype, csr_container): + X = np.array( + [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype + ) + y = np.array([0, 1, 2, 2, 1], dtype=global_dtype) + X_csr = csr_container(X) + + for mutual_info in (mutual_info_regression, mutual_info_classif): + with pytest.raises(ValueError): + mutual_info(X_csr, y, discrete_features=False) + with pytest.raises(ValueError): + mutual_info(X, y, discrete_features="manual") + with pytest.raises(ValueError): + mutual_info(X_csr, y, discrete_features=[True, False, True]) + with pytest.raises(IndexError): + mutual_info(X, y, discrete_features=[True, False, True, False]) + with pytest.raises(IndexError): + mutual_info(X, y, discrete_features=[1, 4]) + + mi_1 = mutual_info(X, y, discrete_features="auto", random_state=0) + mi_2 = mutual_info(X, y, discrete_features=False, random_state=0) + mi_3 = mutual_info(X_csr, y, discrete_features="auto", random_state=0) + mi_4 = mutual_info(X_csr, y, discrete_features=True, random_state=0) + mi_5 = mutual_info(X, y, discrete_features=[True, False, True], random_state=0) + mi_6 = mutual_info(X, y, discrete_features=[0, 2], random_state=0) + + assert_allclose(mi_1, mi_2) + assert_allclose(mi_3, mi_4) + assert_allclose(mi_5, mi_6) + + assert not np.allclose(mi_1, mi_3) + + +@pytest.mark.parametrize("correlated", [True, False]) +def test_mutual_information_symmetry_classif_regression(correlated, global_random_seed): + """Check that `mutual_info_classif` and `mutual_info_regression` are + symmetric by switching the target `y` as `feature` in `X` and vice + versa. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23720 + """ + rng = np.random.RandomState(global_random_seed) + n = 100 + d = rng.randint(10, size=n) + + if correlated: + c = d.astype(np.float64) + else: + c = rng.normal(0, 1, size=n) + + mi_classif = mutual_info_classif( + c[:, None], d, discrete_features=[False], random_state=global_random_seed + ) + + mi_regression = mutual_info_regression( + d[:, None], c, discrete_features=[True], random_state=global_random_seed + ) + + assert mi_classif == pytest.approx(mi_regression) + + +def test_mutual_info_regression_X_int_dtype(global_random_seed): + """Check that results agree when X is integer dtype and float dtype. + + Non-regression test for Issue #26696. + """ + rng = np.random.RandomState(global_random_seed) + X = rng.randint(100, size=(100, 10)) + X_float = X.astype(np.float64, copy=True) + y = rng.randint(100, size=100) + + expected = mutual_info_regression(X_float, y, random_state=global_random_seed) + result = mutual_info_regression(X, y, random_state=global_random_seed) + assert_allclose(result, expected) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py new file mode 100644 index 0000000000000000000000000000000000000000..e3edb0e7b5d213dc4b9445a3cf971a1bc4d28398 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py @@ -0,0 +1,615 @@ +""" +Testing Recursive feature elimination +""" + +from operator import attrgetter + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal + +from sklearn.base import BaseEstimator, ClassifierMixin +from sklearn.compose import TransformedTargetRegressor +from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression +from sklearn.datasets import load_iris, make_friedman1 +from sklearn.ensemble import RandomForestClassifier +from sklearn.feature_selection import RFE, RFECV +from sklearn.impute import SimpleImputer +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.metrics import get_scorer, make_scorer, zero_one_loss +from sklearn.model_selection import GroupKFold, cross_val_score +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC, SVR, LinearSVR +from sklearn.utils import check_random_state +from sklearn.utils._testing import ignore_warnings +from sklearn.utils.fixes import CSR_CONTAINERS + + +class MockClassifier: + """ + Dummy classifier to test recursive feature elimination + """ + + def __init__(self, foo_param=0): + self.foo_param = foo_param + + def fit(self, X, y): + assert len(X) == len(y) + self.coef_ = np.ones(X.shape[1], dtype=np.float64) + return self + + def predict(self, T): + return T.shape[0] + + predict_proba = predict + decision_function = predict + transform = predict + + def score(self, X=None, y=None): + return 0.0 + + def get_params(self, deep=True): + return {"foo_param": self.foo_param} + + def set_params(self, **params): + return self + + def _more_tags(self): + return {"allow_nan": True} + + +def test_rfe_features_importance(): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2) + rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe.fit(X, y) + assert len(rfe.ranking_) == X.shape[1] + + clf_svc = SVC(kernel="linear") + rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1) + rfe_svc.fit(X, y) + + # Check if the supports are equal + assert_array_equal(rfe.get_support(), rfe_svc.get_support()) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_rfe(csr_container): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + X_sparse = csr_container(X) + y = iris.target + + # dense model + clf = SVC(kernel="linear") + rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe.fit(X, y) + X_r = rfe.transform(X) + clf.fit(X_r, y) + assert len(rfe.ranking_) == X.shape[1] + + # sparse model + clf_sparse = SVC(kernel="linear") + rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1) + rfe_sparse.fit(X_sparse, y) + X_r_sparse = rfe_sparse.transform(X_sparse) + + assert X_r.shape == iris.data.shape + assert_array_almost_equal(X_r[:10], iris.data[:10]) + + assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data)) + assert rfe.score(X, y) == clf.score(iris.data, iris.target) + assert_array_almost_equal(X_r, X_r_sparse.toarray()) + + +def test_RFE_fit_score_params(): + # Make sure RFE passes the metadata down to fit and score methods of the + # underlying estimator + class TestEstimator(BaseEstimator, ClassifierMixin): + def fit(self, X, y, prop=None): + if prop is None: + raise ValueError("fit: prop cannot be None") + self.svc_ = SVC(kernel="linear").fit(X, y) + self.coef_ = self.svc_.coef_ + return self + + def score(self, X, y, prop=None): + if prop is None: + raise ValueError("score: prop cannot be None") + return self.svc_.score(X, y) + + X, y = load_iris(return_X_y=True) + with pytest.raises(ValueError, match="fit: prop cannot be None"): + RFE(estimator=TestEstimator()).fit(X, y) + with pytest.raises(ValueError, match="score: prop cannot be None"): + RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y) + + RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y, prop="foo") + + +def test_rfe_percent_n_features(): + # test that the results are the same + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + # there are 10 features in the data. We select 40%. + clf = SVC(kernel="linear") + rfe_num = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe_num.fit(X, y) + + rfe_perc = RFE(estimator=clf, n_features_to_select=0.4, step=0.1) + rfe_perc.fit(X, y) + + assert_array_equal(rfe_perc.ranking_, rfe_num.ranking_) + assert_array_equal(rfe_perc.support_, rfe_num.support_) + + +def test_rfe_mockclassifier(): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + # dense model + clf = MockClassifier() + rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe.fit(X, y) + X_r = rfe.transform(X) + clf.fit(X_r, y) + assert len(rfe.ranking_) == X.shape[1] + assert X_r.shape == iris.data.shape + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_rfecv(csr_container): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) # regression test: list should be supported + + # Test using the score function + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1) + rfecv.fit(X, y) + # non-regression test for missing worst feature: + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == X.shape[1] + + assert len(rfecv.ranking_) == X.shape[1] + X_r = rfecv.transform(X) + + # All the noisy variable were filtered out + assert_array_equal(X_r, iris.data) + + # same in sparse + rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1) + X_sparse = csr_container(X) + rfecv_sparse.fit(X_sparse, y) + X_r_sparse = rfecv_sparse.transform(X_sparse) + assert_array_equal(X_r_sparse.toarray(), iris.data) + + # Test using a customized loss function + scoring = make_scorer(zero_one_loss, greater_is_better=False) + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scoring) + ignore_warnings(rfecv.fit)(X, y) + X_r = rfecv.transform(X) + assert_array_equal(X_r, iris.data) + + # Test using a scorer + scorer = get_scorer("accuracy") + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scorer) + rfecv.fit(X, y) + X_r = rfecv.transform(X) + assert_array_equal(X_r, iris.data) + + # Test fix on cv_results_ + def test_scorer(estimator, X, y): + return 1.0 + + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=test_scorer) + rfecv.fit(X, y) + + # In the event of cross validation score ties, the expected behavior of + # RFECV is to return the FEWEST features that maximize the CV score. + # Because test_scorer always returns 1.0 in this example, RFECV should + # reduce the dimensionality to a single feature (i.e. n_features_ = 1) + assert rfecv.n_features_ == 1 + + # Same as the first two tests, but with step=2 + rfecv = RFECV(estimator=SVC(kernel="linear"), step=2) + rfecv.fit(X, y) + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == 6 + + assert len(rfecv.ranking_) == X.shape[1] + X_r = rfecv.transform(X) + assert_array_equal(X_r, iris.data) + + rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2) + X_sparse = csr_container(X) + rfecv_sparse.fit(X_sparse, y) + X_r_sparse = rfecv_sparse.transform(X_sparse) + assert_array_equal(X_r_sparse.toarray(), iris.data) + + # Verifying that steps < 1 don't blow up. + rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=0.2) + X_sparse = csr_container(X) + rfecv_sparse.fit(X_sparse, y) + X_r_sparse = rfecv_sparse.transform(X_sparse) + assert_array_equal(X_r_sparse.toarray(), iris.data) + + +def test_rfecv_mockclassifier(): + generator = check_random_state(0) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) # regression test: list should be supported + + # Test using the score function + rfecv = RFECV(estimator=MockClassifier(), step=1) + rfecv.fit(X, y) + # non-regression test for missing worst feature: + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == X.shape[1] + + assert len(rfecv.ranking_) == X.shape[1] + + +def test_rfecv_verbose_output(): + # Check verbose=1 is producing an output. + import sys + from io import StringIO + + sys.stdout = StringIO() + + generator = check_random_state(0) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) + + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, verbose=1) + rfecv.fit(X, y) + + verbose_output = sys.stdout + verbose_output.seek(0) + assert len(verbose_output.readline()) > 0 + + +def test_rfecv_cv_results_size(global_random_seed): + generator = check_random_state(global_random_seed) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) # regression test: list should be supported + + # Non-regression test for varying combinations of step and + # min_features_to_select. + for step, min_features_to_select in [[2, 1], [2, 2], [3, 3]]: + rfecv = RFECV( + estimator=MockClassifier(), + step=step, + min_features_to_select=min_features_to_select, + ) + rfecv.fit(X, y) + + score_len = np.ceil((X.shape[1] - min_features_to_select) / step) + 1 + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == score_len + + assert len(rfecv.ranking_) == X.shape[1] + assert rfecv.n_features_ >= min_features_to_select + + +def test_rfe_estimator_tags(): + rfe = RFE(SVC(kernel="linear")) + assert rfe._estimator_type == "classifier" + # make sure that cross-validation is stratified + iris = load_iris() + score = cross_val_score(rfe, iris.data, iris.target) + assert score.min() > 0.7 + + +def test_rfe_min_step(global_random_seed): + n_features = 10 + X, y = make_friedman1( + n_samples=50, n_features=n_features, random_state=global_random_seed + ) + n_samples, n_features = X.shape + estimator = SVR(kernel="linear") + + # Test when floor(step * n_features) <= 0 + selector = RFE(estimator, step=0.01) + sel = selector.fit(X, y) + assert sel.support_.sum() == n_features // 2 + + # Test when step is between (0,1) and floor(step * n_features) > 0 + selector = RFE(estimator, step=0.20) + sel = selector.fit(X, y) + assert sel.support_.sum() == n_features // 2 + + # Test when step is an integer + selector = RFE(estimator, step=5) + sel = selector.fit(X, y) + assert sel.support_.sum() == n_features // 2 + + +def test_number_of_subsets_of_features(global_random_seed): + # In RFE, 'number_of_subsets_of_features' + # = the number of iterations in '_fit' + # = max(ranking_) + # = 1 + (n_features + step - n_features_to_select - 1) // step + # After optimization #4534, this number + # = 1 + np.ceil((n_features - n_features_to_select) / float(step)) + # This test case is to test their equivalence, refer to #4534 and #3824 + + def formula1(n_features, n_features_to_select, step): + return 1 + ((n_features + step - n_features_to_select - 1) // step) + + def formula2(n_features, n_features_to_select, step): + return 1 + np.ceil((n_features - n_features_to_select) / float(step)) + + # RFE + # Case 1, n_features - n_features_to_select is divisible by step + # Case 2, n_features - n_features_to_select is not divisible by step + n_features_list = [11, 11] + n_features_to_select_list = [3, 3] + step_list = [2, 3] + for n_features, n_features_to_select, step in zip( + n_features_list, n_features_to_select_list, step_list + ): + generator = check_random_state(global_random_seed) + X = generator.normal(size=(100, n_features)) + y = generator.rand(100).round() + rfe = RFE( + estimator=SVC(kernel="linear"), + n_features_to_select=n_features_to_select, + step=step, + ) + rfe.fit(X, y) + # this number also equals to the maximum of ranking_ + assert np.max(rfe.ranking_) == formula1(n_features, n_features_to_select, step) + assert np.max(rfe.ranking_) == formula2(n_features, n_features_to_select, step) + + # In RFECV, 'fit' calls 'RFE._fit' + # 'number_of_subsets_of_features' of RFE + # = the size of each score in 'cv_results_' of RFECV + # = the number of iterations of the for loop before optimization #4534 + + # RFECV, n_features_to_select = 1 + # Case 1, n_features - 1 is divisible by step + # Case 2, n_features - 1 is not divisible by step + + n_features_to_select = 1 + n_features_list = [11, 10] + step_list = [2, 2] + for n_features, step in zip(n_features_list, step_list): + generator = check_random_state(global_random_seed) + X = generator.normal(size=(100, n_features)) + y = generator.rand(100).round() + rfecv = RFECV(estimator=SVC(kernel="linear"), step=step) + rfecv.fit(X, y) + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == formula1( + n_features, n_features_to_select, step + ) + assert len(rfecv.cv_results_[key]) == formula2( + n_features, n_features_to_select, step + ) + + +def test_rfe_cv_n_jobs(global_random_seed): + generator = check_random_state(global_random_seed) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + rfecv = RFECV(estimator=SVC(kernel="linear")) + rfecv.fit(X, y) + rfecv_ranking = rfecv.ranking_ + + rfecv_cv_results_ = rfecv.cv_results_ + + rfecv.set_params(n_jobs=2) + rfecv.fit(X, y) + assert_array_almost_equal(rfecv.ranking_, rfecv_ranking) + + assert rfecv_cv_results_.keys() == rfecv.cv_results_.keys() + for key in rfecv_cv_results_.keys(): + assert rfecv_cv_results_[key] == pytest.approx(rfecv.cv_results_[key]) + + +def test_rfe_cv_groups(): + generator = check_random_state(0) + iris = load_iris() + number_groups = 4 + groups = np.floor(np.linspace(0, number_groups, len(iris.target))) + X = iris.data + y = (iris.target > 0).astype(int) + + est_groups = RFECV( + estimator=RandomForestClassifier(random_state=generator), + step=1, + scoring="accuracy", + cv=GroupKFold(n_splits=2), + ) + est_groups.fit(X, y, groups=groups) + assert est_groups.n_features_ > 0 + + +@pytest.mark.parametrize( + "importance_getter", [attrgetter("regressor_.coef_"), "regressor_.coef_"] +) +@pytest.mark.parametrize("selector, expected_n_features", [(RFE, 5), (RFECV, 4)]) +def test_rfe_wrapped_estimator(importance_getter, selector, expected_n_features): + # Non-regression test for + # https://github.com/scikit-learn/scikit-learn/issues/15312 + X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + estimator = LinearSVR(dual="auto", random_state=0) + + log_estimator = TransformedTargetRegressor( + regressor=estimator, func=np.log, inverse_func=np.exp + ) + + selector = selector(log_estimator, importance_getter=importance_getter) + sel = selector.fit(X, y) + assert sel.support_.sum() == expected_n_features + + +@pytest.mark.parametrize( + "importance_getter, err_type", + [ + ("auto", ValueError), + ("random", AttributeError), + (lambda x: x.importance, AttributeError), + ], +) +@pytest.mark.parametrize("Selector", [RFE, RFECV]) +def test_rfe_importance_getter_validation(importance_getter, err_type, Selector): + X, y = make_friedman1(n_samples=50, n_features=10, random_state=42) + estimator = LinearSVR(dual="auto") + log_estimator = TransformedTargetRegressor( + regressor=estimator, func=np.log, inverse_func=np.exp + ) + + with pytest.raises(err_type): + model = Selector(log_estimator, importance_getter=importance_getter) + model.fit(X, y) + + +@pytest.mark.parametrize("cv", [None, 5]) +def test_rfe_allow_nan_inf_in_x(cv): + iris = load_iris() + X = iris.data + y = iris.target + + # add nan and inf value to X + X[0][0] = np.nan + X[0][1] = np.inf + + clf = MockClassifier() + if cv is not None: + rfe = RFECV(estimator=clf, cv=cv) + else: + rfe = RFE(estimator=clf) + rfe.fit(X, y) + rfe.transform(X) + + +def test_w_pipeline_2d_coef_(): + pipeline = make_pipeline(StandardScaler(), LogisticRegression()) + + data, y = load_iris(return_X_y=True) + sfm = RFE( + pipeline, + n_features_to_select=2, + importance_getter="named_steps.logisticregression.coef_", + ) + + sfm.fit(data, y) + assert sfm.transform(data).shape[1] == 2 + + +def test_rfecv_std_and_mean(global_random_seed): + generator = check_random_state(global_random_seed) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + rfecv = RFECV(estimator=SVC(kernel="linear")) + rfecv.fit(X, y) + n_split_keys = len(rfecv.cv_results_) - 2 + split_keys = [f"split{i}_test_score" for i in range(n_split_keys)] + + cv_scores = np.asarray([rfecv.cv_results_[key] for key in split_keys]) + expected_mean = np.mean(cv_scores, axis=0) + expected_std = np.std(cv_scores, axis=0) + + assert_allclose(rfecv.cv_results_["mean_test_score"], expected_mean) + assert_allclose(rfecv.cv_results_["std_test_score"], expected_std) + + +@pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) +def test_multioutput(ClsRFE): + X = np.random.normal(size=(10, 3)) + y = np.random.randint(2, size=(10, 2)) + clf = RandomForestClassifier(n_estimators=5) + rfe_test = ClsRFE(clf) + rfe_test.fit(X, y) + + +@pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) +def test_pipeline_with_nans(ClsRFE): + """Check that RFE works with pipeline that accept nans. + + Non-regression test for gh-21743. + """ + X, y = load_iris(return_X_y=True) + X[0, 0] = np.nan + + pipe = make_pipeline( + SimpleImputer(), + StandardScaler(), + LogisticRegression(), + ) + + fs = ClsRFE( + estimator=pipe, + importance_getter="named_steps.logisticregression.coef_", + ) + fs.fit(X, y) + + +@pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) +@pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression]) +def test_rfe_pls(ClsRFE, PLSEstimator): + """Check the behaviour of RFE with PLS estimators. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12410 + """ + X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + estimator = PLSEstimator(n_components=1) + selector = ClsRFE(estimator, step=1).fit(X, y) + assert selector.score(X, y) > 0.5 + + +def test_rfe_estimator_attribute_error(): + """Check that we raise the proper AttributeError when the estimator + does not implement the `decision_function` method, which is decorated with + `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + iris = load_iris() + + # `LinearRegression` does not implement 'decision_function' and should raise an + # AttributeError + rfe = RFE(estimator=LinearRegression()) + + outer_msg = "This 'RFE' has no attribute 'decision_function'" + inner_msg = "'LinearRegression' object has no attribute 'decision_function'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + rfe.fit(iris.data, iris.target).decision_function(iris.data) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..82d65c55a019512ecef189a881fd9316bd813d70 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py @@ -0,0 +1,323 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.cluster import KMeans +from sklearn.datasets import make_blobs, make_classification, make_regression +from sklearn.ensemble import HistGradientBoostingRegressor +from sklearn.feature_selection import SequentialFeatureSelector +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import LeaveOneGroupOut, cross_val_score +from sklearn.neighbors import KNeighborsClassifier +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_bad_n_features_to_select(): + n_features = 5 + X, y = make_regression(n_features=n_features) + sfs = SequentialFeatureSelector(LinearRegression(), n_features_to_select=n_features) + with pytest.raises(ValueError, match="n_features_to_select must be < n_features"): + sfs.fit(X, y) + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +@pytest.mark.parametrize("n_features_to_select", (1, 5, 9, "auto")) +def test_n_features_to_select(direction, n_features_to_select): + # Make sure n_features_to_select is respected + + n_features = 10 + X, y = make_regression(n_features=n_features, random_state=0) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select=n_features_to_select, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + + if n_features_to_select == "auto": + n_features_to_select = n_features // 2 + + assert sfs.get_support(indices=True).shape[0] == n_features_to_select + assert sfs.n_features_to_select_ == n_features_to_select + assert sfs.transform(X).shape[1] == n_features_to_select + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +def test_n_features_to_select_auto(direction): + """Check the behaviour of `n_features_to_select="auto"` with different + values for the parameter `tol`. + """ + + n_features = 10 + tol = 1e-3 + X, y = make_regression(n_features=n_features, random_state=0) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select="auto", + tol=tol, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + + max_features_to_select = n_features - 1 + + assert sfs.get_support(indices=True).shape[0] <= max_features_to_select + assert sfs.n_features_to_select_ <= max_features_to_select + assert sfs.transform(X).shape[1] <= max_features_to_select + assert sfs.get_support(indices=True).shape[0] == sfs.n_features_to_select_ + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +def test_n_features_to_select_stopping_criterion(direction): + """Check the behaviour stopping criterion for feature selection + depending on the values of `n_features_to_select` and `tol`. + + When `direction` is `'forward'`, select a new features at random + among those not currently selected in selector.support_, + build a new version of the data that includes all the features + in selector.support_ + this newly selected feature. + And check that the cross-validation score of the model trained on + this new dataset variant is lower than the model with + the selected forward selected features or at least does not improve + by more than the tol margin. + + When `direction` is `'backward'`, instead of adding a new feature + to selector.support_, try to remove one of those selected features at random + And check that the cross-validation score is either decreasing or + not improving by more than the tol margin. + """ + + X, y = make_regression(n_features=50, n_informative=10, random_state=0) + + tol = 1e-3 + + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select="auto", + tol=tol, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + selected_X = sfs.transform(X) + + rng = np.random.RandomState(0) + + added_candidates = list(set(range(X.shape[1])) - set(sfs.get_support(indices=True))) + added_X = np.hstack( + [ + selected_X, + (X[:, rng.choice(added_candidates)])[:, np.newaxis], + ] + ) + + removed_candidate = rng.choice(list(range(sfs.n_features_to_select_))) + removed_X = np.delete(selected_X, removed_candidate, axis=1) + + plain_cv_score = cross_val_score(LinearRegression(), X, y, cv=2).mean() + sfs_cv_score = cross_val_score(LinearRegression(), selected_X, y, cv=2).mean() + added_cv_score = cross_val_score(LinearRegression(), added_X, y, cv=2).mean() + removed_cv_score = cross_val_score(LinearRegression(), removed_X, y, cv=2).mean() + + assert sfs_cv_score >= plain_cv_score + + if direction == "forward": + assert (sfs_cv_score - added_cv_score) <= tol + assert (sfs_cv_score - removed_cv_score) >= tol + else: + assert (added_cv_score - sfs_cv_score) <= tol + assert (removed_cv_score - sfs_cv_score) <= tol + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +@pytest.mark.parametrize( + "n_features_to_select, expected", + ( + (0.1, 1), + (1.0, 10), + (0.5, 5), + ), +) +def test_n_features_to_select_float(direction, n_features_to_select, expected): + # Test passing a float as n_features_to_select + X, y = make_regression(n_features=10) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select=n_features_to_select, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + assert sfs.n_features_to_select_ == expected + + +@pytest.mark.parametrize("seed", range(10)) +@pytest.mark.parametrize("direction", ("forward", "backward")) +@pytest.mark.parametrize( + "n_features_to_select, expected_selected_features", + [ + (2, [0, 2]), # f1 is dropped since it has no predictive power + (1, [2]), # f2 is more predictive than f0 so it's kept + ], +) +def test_sanity(seed, direction, n_features_to_select, expected_selected_features): + # Basic sanity check: 3 features, only f0 and f2 are correlated with the + # target, f2 having a stronger correlation than f0. We expect f1 to be + # dropped, and f2 to always be selected. + + rng = np.random.RandomState(seed) + n_samples = 100 + X = rng.randn(n_samples, 3) + y = 3 * X[:, 0] - 10 * X[:, 2] + + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select=n_features_to_select, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + assert_array_equal(sfs.get_support(indices=True), expected_selected_features) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_support(csr_container): + # Make sure sparse data is supported + + X, y = make_regression(n_features=10) + X = csr_container(X) + sfs = SequentialFeatureSelector( + LinearRegression(), n_features_to_select="auto", cv=2 + ) + sfs.fit(X, y) + sfs.transform(X) + + +def test_nan_support(): + # Make sure nans are OK if the underlying estimator supports nans + + rng = np.random.RandomState(0) + n_samples, n_features = 40, 4 + X, y = make_regression(n_samples, n_features, random_state=0) + nan_mask = rng.randint(0, 2, size=(n_samples, n_features), dtype=bool) + X[nan_mask] = np.nan + sfs = SequentialFeatureSelector( + HistGradientBoostingRegressor(), n_features_to_select="auto", cv=2 + ) + sfs.fit(X, y) + sfs.transform(X) + + with pytest.raises(ValueError, match="Input X contains NaN"): + # LinearRegression does not support nans + SequentialFeatureSelector( + LinearRegression(), n_features_to_select="auto", cv=2 + ).fit(X, y) + + +def test_pipeline_support(): + # Make sure that pipelines can be passed into SFS and that SFS can be + # passed into a pipeline + + n_samples, n_features = 50, 3 + X, y = make_regression(n_samples, n_features, random_state=0) + + # pipeline in SFS + pipe = make_pipeline(StandardScaler(), LinearRegression()) + sfs = SequentialFeatureSelector(pipe, n_features_to_select="auto", cv=2) + sfs.fit(X, y) + sfs.transform(X) + + # SFS in pipeline + sfs = SequentialFeatureSelector( + LinearRegression(), n_features_to_select="auto", cv=2 + ) + pipe = make_pipeline(StandardScaler(), sfs) + pipe.fit(X, y) + pipe.transform(X) + + +@pytest.mark.parametrize("n_features_to_select", (2, 3)) +def test_unsupervised_model_fit(n_features_to_select): + # Make sure that models without classification labels are not being + # validated + + X, y = make_blobs(n_features=4) + sfs = SequentialFeatureSelector( + KMeans(n_init=1), + n_features_to_select=n_features_to_select, + ) + sfs.fit(X) + assert sfs.transform(X).shape[1] == n_features_to_select + + +@pytest.mark.parametrize("y", ("no_validation", 1j, 99.9, np.nan, 3)) +def test_no_y_validation_model_fit(y): + # Make sure that other non-conventional y labels are not accepted + + X, clusters = make_blobs(n_features=6) + sfs = SequentialFeatureSelector( + KMeans(), + n_features_to_select=3, + ) + + with pytest.raises((TypeError, ValueError)): + sfs.fit(X, y) + + +def test_forward_neg_tol_error(): + """Check that we raise an error when tol<0 and direction='forward'""" + X, y = make_regression(n_features=10, random_state=0) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select="auto", + direction="forward", + tol=-1e-3, + ) + + with pytest.raises(ValueError, match="tol must be positive"): + sfs.fit(X, y) + + +def test_backward_neg_tol(): + """Check that SequentialFeatureSelector works negative tol + + non-regression test for #25525 + """ + X, y = make_regression(n_features=10, random_state=0) + lr = LinearRegression() + initial_score = lr.fit(X, y).score(X, y) + + sfs = SequentialFeatureSelector( + lr, + n_features_to_select="auto", + direction="backward", + tol=-1e-3, + ) + Xr = sfs.fit_transform(X, y) + new_score = lr.fit(Xr, y).score(Xr, y) + + assert 0 < sfs.get_support().sum() < X.shape[1] + assert new_score < initial_score + + +def test_cv_generator_support(): + """Check that no exception raised when cv is generator + + non-regression test for #25957 + """ + X, y = make_classification(random_state=0) + + groups = np.zeros_like(y, dtype=int) + groups[y.size // 2 :] = 1 + + cv = LeaveOneGroupOut() + splits = cv.split(X, y, groups=groups) + + knc = KNeighborsClassifier(n_neighbors=5) + + sfs = SequentialFeatureSelector(knc, n_features_to_select=5, cv=splits) + sfs.fit(X, y) diff --git a/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..45e66cb338a4b7a5a410db669a13f6f9213451dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py @@ -0,0 +1,72 @@ +import numpy as np +import pytest + +from sklearn.feature_selection import VarianceThreshold +from sklearn.utils._testing import assert_array_equal +from sklearn.utils.fixes import BSR_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +data = [[0, 1, 2, 3, 4], [0, 2, 2, 3, 5], [1, 1, 2, 4, 0]] + +data2 = [[-0.13725701]] * 10 + + +@pytest.mark.parametrize( + "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_zero_variance(sparse_container): + # Test VarianceThreshold with default setting, zero variance. + X = data if sparse_container is None else sparse_container(data) + sel = VarianceThreshold().fit(X) + assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True)) + + +def test_zero_variance_value_error(): + # Test VarianceThreshold with default setting, zero variance, error cases. + with pytest.raises(ValueError): + VarianceThreshold().fit([[0, 1, 2, 3]]) + with pytest.raises(ValueError): + VarianceThreshold().fit([[0, 1], [0, 1]]) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_variance_threshold(sparse_container): + # Test VarianceThreshold with custom variance. + X = data if sparse_container is None else sparse_container(data) + X = VarianceThreshold(threshold=0.4).fit_transform(X) + assert (len(data), 1) == X.shape + + +@pytest.mark.skipif( + np.var(data2) == 0, + reason=( + "This test is not valid for this platform, " + "as it relies on numerical instabilities." + ), +) +@pytest.mark.parametrize( + "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_zero_variance_floating_point_error(sparse_container): + # Test that VarianceThreshold(0.0).fit eliminates features that have + # the same value in every sample, even when floating point errors + # cause np.var not to be 0 for the feature. + # See #13691 + X = data2 if sparse_container is None else sparse_container(data2) + msg = "No feature in X meets the variance threshold 0.00000" + with pytest.raises(ValueError, match=msg): + VarianceThreshold().fit(X) + + +@pytest.mark.parametrize( + "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_variance_nan(sparse_container): + arr = np.array(data, dtype=np.float64) + # add single NaN and feature should still be included + arr[0, 0] = np.nan + # make all values in feature NaN and feature should be rejected + arr[:, 1] = np.nan + + X = arr if sparse_container is None else sparse_container(arr) + sel = VarianceThreshold().fit(X) + assert_array_equal([0, 3, 4], sel.get_support(indices=True)) diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/_barnes_hut_tsne.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/manifold/_barnes_hut_tsne.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9397320f0065fb36c810edcee91b79605425af23 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/manifold/_barnes_hut_tsne.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/_isomap.py b/venv/lib/python3.10/site-packages/sklearn/manifold/_isomap.py new file mode 100644 index 0000000000000000000000000000000000000000..c6e8bfdc4268534cca3409b8469edc09e8138cdb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/manifold/_isomap.py @@ -0,0 +1,438 @@ +"""Isomap for manifold learning""" + +# Author: Jake Vanderplas -- +# License: BSD 3 clause (C) 2011 +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import issparse +from scipy.sparse.csgraph import connected_components, shortest_path + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..decomposition import KernelPCA +from ..metrics.pairwise import _VALID_METRICS +from ..neighbors import NearestNeighbors, kneighbors_graph, radius_neighbors_graph +from ..preprocessing import KernelCenterer +from ..utils._param_validation import Interval, StrOptions +from ..utils.graph import _fix_connected_components +from ..utils.validation import check_is_fitted + + +class Isomap(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Isomap Embedding. + + Non-linear dimensionality reduction through Isometric Mapping + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_neighbors : int or None, default=5 + Number of neighbors to consider for each point. If `n_neighbors` is an int, + then `radius` must be `None`. + + radius : float or None, default=None + Limiting distance of neighbors to return. If `radius` is a float, + then `n_neighbors` must be set to `None`. + + .. versionadded:: 1.1 + + n_components : int, default=2 + Number of coordinates for the manifold. + + eigen_solver : {'auto', 'arpack', 'dense'}, default='auto' + 'auto' : Attempt to choose the most efficient solver + for the given problem. + + 'arpack' : Use Arnoldi decomposition to find the eigenvalues + and eigenvectors. + + 'dense' : Use a direct solver (i.e. LAPACK) + for the eigenvalue decomposition. + + tol : float, default=0 + Convergence tolerance passed to arpack or lobpcg. + not used if eigen_solver == 'dense'. + + max_iter : int, default=None + Maximum number of iterations for the arpack solver. + not used if eigen_solver == 'dense'. + + path_method : {'auto', 'FW', 'D'}, default='auto' + Method to use in finding shortest path. + + 'auto' : attempt to choose the best algorithm automatically. + + 'FW' : Floyd-Warshall algorithm. + + 'D' : Dijkstra's algorithm. + + neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \ + default='auto' + Algorithm to use for nearest neighbors search, + passed to neighbors.NearestNeighbors instance. + + n_jobs : int or None, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + metric : str, or callable, default="minkowski" + The metric to use when calculating distance between instances in a + feature array. If metric is a string or callable, it must be one of + the options allowed by :func:`sklearn.metrics.pairwise_distances` for + its metric parameter. + If metric is "precomputed", X is assumed to be a distance matrix and + must be square. X may be a :term:`Glossary `. + + .. versionadded:: 0.22 + + p : float, default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + .. versionadded:: 0.22 + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + .. versionadded:: 0.22 + + Attributes + ---------- + embedding_ : array-like, shape (n_samples, n_components) + Stores the embedding vectors. + + kernel_pca_ : object + :class:`~sklearn.decomposition.KernelPCA` object used to implement the + embedding. + + nbrs_ : sklearn.neighbors.NearestNeighbors instance + Stores nearest neighbors instance, including BallTree or KDtree + if applicable. + + dist_matrix_ : array-like, shape (n_samples, n_samples) + Stores the geodesic distance matrix of training data. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.decomposition.PCA : Principal component analysis that is a linear + dimensionality reduction method. + sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using + kernels and PCA. + MDS : Manifold learning using multidimensional scaling. + TSNE : T-distributed Stochastic Neighbor Embedding. + LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding. + SpectralEmbedding : Spectral embedding for non-linear dimensionality. + + References + ---------- + + .. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric + framework for nonlinear dimensionality reduction. Science 290 (5500) + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.manifold import Isomap + >>> X, _ = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> embedding = Isomap(n_components=2) + >>> X_transformed = embedding.fit_transform(X[:100]) + >>> X_transformed.shape + (100, 2) + """ + + _parameter_constraints: dict = { + "n_neighbors": [Interval(Integral, 1, None, closed="left"), None], + "radius": [Interval(Real, 0, None, closed="both"), None], + "n_components": [Interval(Integral, 1, None, closed="left")], + "eigen_solver": [StrOptions({"auto", "arpack", "dense"})], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "path_method": [StrOptions({"auto", "FW", "D"})], + "neighbors_algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})], + "n_jobs": [Integral, None], + "p": [Interval(Real, 1, None, closed="left")], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + "metric_params": [dict, None], + } + + def __init__( + self, + *, + n_neighbors=5, + radius=None, + n_components=2, + eigen_solver="auto", + tol=0, + max_iter=None, + path_method="auto", + neighbors_algorithm="auto", + n_jobs=None, + metric="minkowski", + p=2, + metric_params=None, + ): + self.n_neighbors = n_neighbors + self.radius = radius + self.n_components = n_components + self.eigen_solver = eigen_solver + self.tol = tol + self.max_iter = max_iter + self.path_method = path_method + self.neighbors_algorithm = neighbors_algorithm + self.n_jobs = n_jobs + self.metric = metric + self.p = p + self.metric_params = metric_params + + def _fit_transform(self, X): + if self.n_neighbors is not None and self.radius is not None: + raise ValueError( + "Both n_neighbors and radius are provided. Use" + f" Isomap(radius={self.radius}, n_neighbors=None) if intended to use" + " radius-based neighbors" + ) + + self.nbrs_ = NearestNeighbors( + n_neighbors=self.n_neighbors, + radius=self.radius, + algorithm=self.neighbors_algorithm, + metric=self.metric, + p=self.p, + metric_params=self.metric_params, + n_jobs=self.n_jobs, + ) + self.nbrs_.fit(X) + self.n_features_in_ = self.nbrs_.n_features_in_ + if hasattr(self.nbrs_, "feature_names_in_"): + self.feature_names_in_ = self.nbrs_.feature_names_in_ + + self.kernel_pca_ = KernelPCA( + n_components=self.n_components, + kernel="precomputed", + eigen_solver=self.eigen_solver, + tol=self.tol, + max_iter=self.max_iter, + n_jobs=self.n_jobs, + ).set_output(transform="default") + + if self.n_neighbors is not None: + nbg = kneighbors_graph( + self.nbrs_, + self.n_neighbors, + metric=self.metric, + p=self.p, + metric_params=self.metric_params, + mode="distance", + n_jobs=self.n_jobs, + ) + else: + nbg = radius_neighbors_graph( + self.nbrs_, + radius=self.radius, + metric=self.metric, + p=self.p, + metric_params=self.metric_params, + mode="distance", + n_jobs=self.n_jobs, + ) + + # Compute the number of connected components, and connect the different + # components to be able to compute a shortest path between all pairs + # of samples in the graph. + # Similar fix to cluster._agglomerative._fix_connectivity. + n_connected_components, labels = connected_components(nbg) + if n_connected_components > 1: + if self.metric == "precomputed" and issparse(X): + raise RuntimeError( + "The number of connected components of the neighbors graph" + f" is {n_connected_components} > 1. The graph cannot be " + "completed with metric='precomputed', and Isomap cannot be" + "fitted. Increase the number of neighbors to avoid this " + "issue, or precompute the full distance matrix instead " + "of passing a sparse neighbors graph." + ) + warnings.warn( + ( + "The number of connected components of the neighbors graph " + f"is {n_connected_components} > 1. Completing the graph to fit" + " Isomap might be slow. Increase the number of neighbors to " + "avoid this issue." + ), + stacklevel=2, + ) + + # use array validated by NearestNeighbors + nbg = _fix_connected_components( + X=self.nbrs_._fit_X, + graph=nbg, + n_connected_components=n_connected_components, + component_labels=labels, + mode="distance", + metric=self.nbrs_.effective_metric_, + **self.nbrs_.effective_metric_params_, + ) + + self.dist_matrix_ = shortest_path(nbg, method=self.path_method, directed=False) + + if self.nbrs_._fit_X.dtype == np.float32: + self.dist_matrix_ = self.dist_matrix_.astype( + self.nbrs_._fit_X.dtype, copy=False + ) + + G = self.dist_matrix_**2 + G *= -0.5 + + self.embedding_ = self.kernel_pca_.fit_transform(G) + self._n_features_out = self.embedding_.shape[1] + + def reconstruction_error(self): + """Compute the reconstruction error for the embedding. + + Returns + ------- + reconstruction_error : float + Reconstruction error. + + Notes + ----- + The cost function of an isomap embedding is + + ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples`` + + Where D is the matrix of distances for the input data X, + D_fit is the matrix of distances for the output embedding X_fit, + and K is the isomap kernel: + + ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)`` + """ + G = -0.5 * self.dist_matrix_**2 + G_center = KernelCenterer().fit_transform(G) + evals = self.kernel_pca_.eigenvalues_ + return np.sqrt(np.sum(G_center**2) - np.sum(evals**2)) / G.shape[0] + + @_fit_context( + # Isomap.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Compute the embedding vectors for data X. + + Parameters + ---------- + X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} + Sample data, shape = (n_samples, n_features), in the form of a + numpy array, sparse matrix, precomputed tree, or NearestNeighbors + object. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + self._fit_transform(X) + return self + + @_fit_context( + # Isomap.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None): + """Fit the model from data in X and transform X. + + Parameters + ---------- + X : {array-like, sparse matrix, BallTree, KDTree} + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + X transformed in the new space. + """ + self._fit_transform(X) + return self.embedding_ + + def transform(self, X): + """Transform X. + + This is implemented by linking the points X into the graph of geodesic + distances of the training data. First the `n_neighbors` nearest + neighbors of X are found in the training data, and from these the + shortest geodesic distances from each point in X to each point in + the training data are computed in order to construct the kernel. + The embedding of X is the projection of this kernel onto the + embedding vectors of the training set. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_queries, n_features) + If neighbors_algorithm='precomputed', X is assumed to be a + distance matrix or a sparse graph of shape + (n_queries, n_samples_fit). + + Returns + ------- + X_new : array-like, shape (n_queries, n_components) + X transformed in the new space. + """ + check_is_fitted(self) + if self.n_neighbors is not None: + distances, indices = self.nbrs_.kneighbors(X, return_distance=True) + else: + distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True) + + # Create the graph of shortest distances from X to + # training data via the nearest neighbors of X. + # This can be done as a single array operation, but it potentially + # takes a lot of memory. To avoid that, use a loop: + + n_samples_fit = self.nbrs_.n_samples_fit_ + n_queries = distances.shape[0] + + if hasattr(X, "dtype") and X.dtype == np.float32: + dtype = np.float32 + else: + dtype = np.float64 + + G_X = np.zeros((n_queries, n_samples_fit), dtype) + for i in range(n_queries): + G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0) + + G_X **= 2 + G_X *= -0.5 + + return self.kernel_pca_.transform(G_X) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/_locally_linear.py b/venv/lib/python3.10/site-packages/sklearn/manifold/_locally_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..41d0c233b8f764e4e3b4f5cef88006d7a77a160b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/manifold/_locally_linear.py @@ -0,0 +1,841 @@ +"""Locally Linear Embedding""" + +# Author: Fabian Pedregosa -- +# Jake Vanderplas -- +# License: BSD 3 clause (C) INRIA 2011 + +from numbers import Integral, Real + +import numpy as np +from scipy.linalg import eigh, qr, solve, svd +from scipy.sparse import csr_matrix, eye +from scipy.sparse.linalg import eigsh + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, + _UnstableArchMixin, +) +from ..neighbors import NearestNeighbors +from ..utils import check_array, check_random_state +from ..utils._arpack import _init_arpack_v0 +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import stable_cumsum +from ..utils.validation import FLOAT_DTYPES, check_is_fitted + + +def barycenter_weights(X, Y, indices, reg=1e-3): + """Compute barycenter weights of X from Y along the first axis + + We estimate the weights to assign to each point in Y[indices] to recover + the point X[i]. The barycenter weights sum to 1. + + Parameters + ---------- + X : array-like, shape (n_samples, n_dim) + + Y : array-like, shape (n_samples, n_dim) + + indices : array-like, shape (n_samples, n_dim) + Indices of the points in Y used to compute the barycenter + + reg : float, default=1e-3 + Amount of regularization to add for the problem to be + well-posed in the case of n_neighbors > n_dim + + Returns + ------- + B : array-like, shape (n_samples, n_neighbors) + + Notes + ----- + See developers note for more information. + """ + X = check_array(X, dtype=FLOAT_DTYPES) + Y = check_array(Y, dtype=FLOAT_DTYPES) + indices = check_array(indices, dtype=int) + + n_samples, n_neighbors = indices.shape + assert X.shape[0] == n_samples + + B = np.empty((n_samples, n_neighbors), dtype=X.dtype) + v = np.ones(n_neighbors, dtype=X.dtype) + + # this might raise a LinalgError if G is singular and has trace + # zero + for i, ind in enumerate(indices): + A = Y[ind] + C = A - X[i] # broadcasting + G = np.dot(C, C.T) + trace = np.trace(G) + if trace > 0: + R = reg * trace + else: + R = reg + G.flat[:: n_neighbors + 1] += R + w = solve(G, v, assume_a="pos") + B[i, :] = w / np.sum(w) + return B + + +def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=None): + """Computes the barycenter weighted graph of k-Neighbors for points in X + + Parameters + ---------- + X : {array-like, NearestNeighbors} + Sample data, shape = (n_samples, n_features), in the form of a + numpy array or a NearestNeighbors object. + + n_neighbors : int + Number of neighbors for each sample. + + reg : float, default=1e-3 + Amount of regularization when solving the least-squares + problem. Only relevant if mode='barycenter'. If None, use the + default. + + n_jobs : int or None, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + A : sparse matrix in CSR format, shape = [n_samples, n_samples] + A[i, j] is assigned the weight of edge that connects i to j. + + See Also + -------- + sklearn.neighbors.kneighbors_graph + sklearn.neighbors.radius_neighbors_graph + """ + knn = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs).fit(X) + X = knn._fit_X + n_samples = knn.n_samples_fit_ + ind = knn.kneighbors(X, return_distance=False)[:, 1:] + data = barycenter_weights(X, X, ind, reg=reg) + indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors) + return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples)) + + +def null_space( + M, k, k_skip=1, eigen_solver="arpack", tol=1e-6, max_iter=100, random_state=None +): + """ + Find the null space of a matrix M. + + Parameters + ---------- + M : {array, matrix, sparse matrix, LinearOperator} + Input covariance matrix: should be symmetric positive semi-definite + + k : int + Number of eigenvalues/vectors to return + + k_skip : int, default=1 + Number of low eigenvalues to skip. + + eigen_solver : {'auto', 'arpack', 'dense'}, default='arpack' + auto : algorithm will attempt to choose the best method for input data + arpack : use arnoldi iteration in shift-invert mode. + For this method, M may be a dense matrix, sparse matrix, + or general linear operator. + Warning: ARPACK can be unstable for some problems. It is + best to try several random seeds in order to check results. + dense : use standard dense matrix operations for the eigenvalue + decomposition. For this method, M must be an array + or matrix type. This method should be avoided for + large problems. + + tol : float, default=1e-6 + Tolerance for 'arpack' method. + Not used if eigen_solver=='dense'. + + max_iter : int, default=100 + Maximum number of iterations for 'arpack' method. + Not used if eigen_solver=='dense' + + random_state : int, RandomState instance, default=None + Determines the random number generator when ``solver`` == 'arpack'. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + """ + if eigen_solver == "auto": + if M.shape[0] > 200 and k + k_skip < 10: + eigen_solver = "arpack" + else: + eigen_solver = "dense" + + if eigen_solver == "arpack": + v0 = _init_arpack_v0(M.shape[0], random_state) + try: + eigen_values, eigen_vectors = eigsh( + M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0 + ) + except RuntimeError as e: + raise ValueError( + "Error in determining null-space with ARPACK. Error message: " + "'%s'. Note that eigen_solver='arpack' can fail when the " + "weight matrix is singular or otherwise ill-behaved. In that " + "case, eigen_solver='dense' is recommended. See online " + "documentation for more information." % e + ) from e + + return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:]) + elif eigen_solver == "dense": + if hasattr(M, "toarray"): + M = M.toarray() + eigen_values, eigen_vectors = eigh( + M, subset_by_index=(k_skip, k + k_skip - 1), overwrite_a=True + ) + index = np.argsort(np.abs(eigen_values)) + return eigen_vectors[:, index], np.sum(eigen_values) + else: + raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver) + + +def locally_linear_embedding( + X, + *, + n_neighbors, + n_components, + reg=1e-3, + eigen_solver="auto", + tol=1e-6, + max_iter=100, + method="standard", + hessian_tol=1e-4, + modified_tol=1e-12, + random_state=None, + n_jobs=None, +): + """Perform a Locally Linear Embedding analysis on the data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, NearestNeighbors} + Sample data, shape = (n_samples, n_features), in the form of a + numpy array or a NearestNeighbors object. + + n_neighbors : int + Number of neighbors to consider for each point. + + n_components : int + Number of coordinates for the manifold. + + reg : float, default=1e-3 + Regularization constant, multiplies the trace of the local covariance + matrix of the distances. + + eigen_solver : {'auto', 'arpack', 'dense'}, default='auto' + auto : algorithm will attempt to choose the best method for input data + + arpack : use arnoldi iteration in shift-invert mode. + For this method, M may be a dense matrix, sparse matrix, + or general linear operator. + Warning: ARPACK can be unstable for some problems. It is + best to try several random seeds in order to check results. + + dense : use standard dense matrix operations for the eigenvalue + decomposition. For this method, M must be an array + or matrix type. This method should be avoided for + large problems. + + tol : float, default=1e-6 + Tolerance for 'arpack' method + Not used if eigen_solver=='dense'. + + max_iter : int, default=100 + Maximum number of iterations for the arpack solver. + + method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard' + standard : use the standard locally linear embedding algorithm. + see reference [1]_ + hessian : use the Hessian eigenmap method. This method requires + n_neighbors > n_components * (1 + (n_components + 1) / 2. + see reference [2]_ + modified : use the modified locally linear embedding algorithm. + see reference [3]_ + ltsa : use local tangent space alignment algorithm + see reference [4]_ + + hessian_tol : float, default=1e-4 + Tolerance for Hessian eigenmapping method. + Only used if method == 'hessian'. + + modified_tol : float, default=1e-12 + Tolerance for modified LLE method. + Only used if method == 'modified'. + + random_state : int, RandomState instance, default=None + Determines the random number generator when ``solver`` == 'arpack'. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + n_jobs : int or None, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + Y : array-like, shape [n_samples, n_components] + Embedding vectors. + + squared_error : float + Reconstruction error for the embedding vectors. Equivalent to + ``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights. + + References + ---------- + + .. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction + by locally linear embedding. Science 290:2323 (2000). + .. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally + linear embedding techniques for high-dimensional data. + Proc Natl Acad Sci U S A. 100:5591 (2003). + .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear + Embedding Using Multiple Weights. + `_ + .. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear + dimensionality reduction via tangent space alignment. + Journal of Shanghai Univ. 8:406 (2004) + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.manifold import locally_linear_embedding + >>> X, _ = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> embedding, _ = locally_linear_embedding(X[:100],n_neighbors=5, n_components=2) + >>> embedding.shape + (100, 2) + """ + if eigen_solver not in ("auto", "arpack", "dense"): + raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver) + + if method not in ("standard", "hessian", "modified", "ltsa"): + raise ValueError("unrecognized method '%s'" % method) + + nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs) + nbrs.fit(X) + X = nbrs._fit_X + + N, d_in = X.shape + + if n_components > d_in: + raise ValueError( + "output dimension must be less than or equal to input dimension" + ) + if n_neighbors >= N: + raise ValueError( + "Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d" + % (N, n_neighbors) + ) + + if n_neighbors <= 0: + raise ValueError("n_neighbors must be positive") + + M_sparse = eigen_solver != "dense" + + if method == "standard": + W = barycenter_kneighbors_graph( + nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs + ) + + # we'll compute M = (I-W)'(I-W) + # depending on the solver, we'll do this differently + if M_sparse: + M = eye(*W.shape, format=W.format) - W + M = (M.T * M).tocsr() + else: + M = (W.T * W - W.T - W).toarray() + M.flat[:: M.shape[0] + 1] += 1 # W = W - I = W - I + + elif method == "hessian": + dp = n_components * (n_components + 1) // 2 + + if n_neighbors <= n_components + dp: + raise ValueError( + "for method='hessian', n_neighbors must be " + "greater than " + "[n_components * (n_components + 3) / 2]" + ) + + neighbors = nbrs.kneighbors( + X, n_neighbors=n_neighbors + 1, return_distance=False + ) + neighbors = neighbors[:, 1:] + + Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64) + Yi[:, 0] = 1 + + M = np.zeros((N, N), dtype=np.float64) + + use_svd = n_neighbors > d_in + + for i in range(N): + Gi = X[neighbors[i]] + Gi -= Gi.mean(0) + + # build Hessian estimator + if use_svd: + U = svd(Gi, full_matrices=0)[0] + else: + Ci = np.dot(Gi, Gi.T) + U = eigh(Ci)[1][:, ::-1] + + Yi[:, 1 : 1 + n_components] = U[:, :n_components] + + j = 1 + n_components + for k in range(n_components): + Yi[:, j : j + n_components - k] = U[:, k : k + 1] * U[:, k:n_components] + j += n_components - k + + Q, R = qr(Yi) + + w = Q[:, n_components + 1 :] + S = w.sum(0) + + S[np.where(abs(S) < hessian_tol)] = 1 + w /= S + + nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) + M[nbrs_x, nbrs_y] += np.dot(w, w.T) + + if M_sparse: + M = csr_matrix(M) + + elif method == "modified": + if n_neighbors < n_components: + raise ValueError("modified LLE requires n_neighbors >= n_components") + + neighbors = nbrs.kneighbors( + X, n_neighbors=n_neighbors + 1, return_distance=False + ) + neighbors = neighbors[:, 1:] + + # find the eigenvectors and eigenvalues of each local covariance + # matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix, + # where the columns are eigenvectors + V = np.zeros((N, n_neighbors, n_neighbors)) + nev = min(d_in, n_neighbors) + evals = np.zeros([N, nev]) + + # choose the most efficient way to find the eigenvectors + use_svd = n_neighbors > d_in + + if use_svd: + for i in range(N): + X_nbrs = X[neighbors[i]] - X[i] + V[i], evals[i], _ = svd(X_nbrs, full_matrices=True) + evals **= 2 + else: + for i in range(N): + X_nbrs = X[neighbors[i]] - X[i] + C_nbrs = np.dot(X_nbrs, X_nbrs.T) + evi, vi = eigh(C_nbrs) + evals[i] = evi[::-1] + V[i] = vi[:, ::-1] + + # find regularized weights: this is like normal LLE. + # because we've already computed the SVD of each covariance matrix, + # it's faster to use this rather than np.linalg.solve + reg = 1e-3 * evals.sum(1) + + tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors)) + tmp[:, :nev] /= evals + reg[:, None] + tmp[:, nev:] /= reg[:, None] + + w_reg = np.zeros((N, n_neighbors)) + for i in range(N): + w_reg[i] = np.dot(V[i], tmp[i]) + w_reg /= w_reg.sum(1)[:, None] + + # calculate eta: the median of the ratio of small to large eigenvalues + # across the points. This is used to determine s_i, below + rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1) + eta = np.median(rho) + + # find s_i, the size of the "almost null space" for each point: + # this is the size of the largest set of eigenvalues + # such that Sum[v; v in set]/Sum[v; v not in set] < eta + s_range = np.zeros(N, dtype=int) + evals_cumsum = stable_cumsum(evals, 1) + eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1 + for i in range(N): + s_range[i] = np.searchsorted(eta_range[i, ::-1], eta) + s_range += n_neighbors - nev # number of zero eigenvalues + + # Now calculate M. + # This is the [N x N] matrix whose null space is the desired embedding + M = np.zeros((N, N), dtype=np.float64) + for i in range(N): + s_i = s_range[i] + + # select bottom s_i eigenvectors and calculate alpha + Vi = V[i, :, n_neighbors - s_i :] + alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i) + + # compute Householder matrix which satisfies + # Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s) + # using prescription from paper + h = np.full(s_i, alpha_i) - np.dot(Vi.T, np.ones(n_neighbors)) + + norm_h = np.linalg.norm(h) + if norm_h < modified_tol: + h *= 0 + else: + h /= norm_h + + # Householder matrix is + # >> Hi = np.identity(s_i) - 2*np.outer(h,h) + # Then the weight matrix is + # >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None] + # We do this much more efficiently: + Wi = Vi - 2 * np.outer(np.dot(Vi, h), h) + (1 - alpha_i) * w_reg[i, :, None] + + # Update M as follows: + # >> W_hat = np.zeros( (N,s_i) ) + # >> W_hat[neighbors[i],:] = Wi + # >> W_hat[i] -= 1 + # >> M += np.dot(W_hat,W_hat.T) + # We can do this much more efficiently: + nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) + M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T) + Wi_sum1 = Wi.sum(1) + M[i, neighbors[i]] -= Wi_sum1 + M[neighbors[i], i] -= Wi_sum1 + M[i, i] += s_i + + if M_sparse: + M = csr_matrix(M) + + elif method == "ltsa": + neighbors = nbrs.kneighbors( + X, n_neighbors=n_neighbors + 1, return_distance=False + ) + neighbors = neighbors[:, 1:] + + M = np.zeros((N, N)) + + use_svd = n_neighbors > d_in + + for i in range(N): + Xi = X[neighbors[i]] + Xi -= Xi.mean(0) + + # compute n_components largest eigenvalues of Xi * Xi^T + if use_svd: + v = svd(Xi, full_matrices=True)[0] + else: + Ci = np.dot(Xi, Xi.T) + v = eigh(Ci)[1][:, ::-1] + + Gi = np.zeros((n_neighbors, n_components + 1)) + Gi[:, 1:] = v[:, :n_components] + Gi[:, 0] = 1.0 / np.sqrt(n_neighbors) + + GiGiT = np.dot(Gi, Gi.T) + + nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) + M[nbrs_x, nbrs_y] -= GiGiT + M[neighbors[i], neighbors[i]] += 1 + + return null_space( + M, + n_components, + k_skip=1, + eigen_solver=eigen_solver, + tol=tol, + max_iter=max_iter, + random_state=random_state, + ) + + +class LocallyLinearEmbedding( + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _UnstableArchMixin, + BaseEstimator, +): + """Locally Linear Embedding. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_neighbors : int, default=5 + Number of neighbors to consider for each point. + + n_components : int, default=2 + Number of coordinates for the manifold. + + reg : float, default=1e-3 + Regularization constant, multiplies the trace of the local covariance + matrix of the distances. + + eigen_solver : {'auto', 'arpack', 'dense'}, default='auto' + The solver used to compute the eigenvectors. The available options are: + + - `'auto'` : algorithm will attempt to choose the best method for input + data. + - `'arpack'` : use arnoldi iteration in shift-invert mode. For this + method, M may be a dense matrix, sparse matrix, or general linear + operator. + - `'dense'` : use standard dense matrix operations for the eigenvalue + decomposition. For this method, M must be an array or matrix type. + This method should be avoided for large problems. + + .. warning:: + ARPACK can be unstable for some problems. It is best to try several + random seeds in order to check results. + + tol : float, default=1e-6 + Tolerance for 'arpack' method + Not used if eigen_solver=='dense'. + + max_iter : int, default=100 + Maximum number of iterations for the arpack solver. + Not used if eigen_solver=='dense'. + + method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard' + - `standard`: use the standard locally linear embedding algorithm. see + reference [1]_ + - `hessian`: use the Hessian eigenmap method. This method requires + ``n_neighbors > n_components * (1 + (n_components + 1) / 2``. see + reference [2]_ + - `modified`: use the modified locally linear embedding algorithm. + see reference [3]_ + - `ltsa`: use local tangent space alignment algorithm. see + reference [4]_ + + hessian_tol : float, default=1e-4 + Tolerance for Hessian eigenmapping method. + Only used if ``method == 'hessian'``. + + modified_tol : float, default=1e-12 + Tolerance for modified LLE method. + Only used if ``method == 'modified'``. + + neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \ + default='auto' + Algorithm to use for nearest neighbors search, passed to + :class:`~sklearn.neighbors.NearestNeighbors` instance. + + random_state : int, RandomState instance, default=None + Determines the random number generator when + ``eigen_solver`` == 'arpack'. Pass an int for reproducible results + across multiple function calls. See :term:`Glossary `. + + n_jobs : int or None, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + embedding_ : array-like, shape [n_samples, n_components] + Stores the embedding vectors + + reconstruction_error_ : float + Reconstruction error associated with `embedding_` + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + nbrs_ : NearestNeighbors object + Stores nearest neighbors instance, including BallTree or KDtree + if applicable. + + See Also + -------- + SpectralEmbedding : Spectral embedding for non-linear dimensionality + reduction. + TSNE : Distributed Stochastic Neighbor Embedding. + + References + ---------- + + .. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction + by locally linear embedding. Science 290:2323 (2000). + .. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally + linear embedding techniques for high-dimensional data. + Proc Natl Acad Sci U S A. 100:5591 (2003). + .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear + Embedding Using Multiple Weights. + `_ + .. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear + dimensionality reduction via tangent space alignment. + Journal of Shanghai Univ. 8:406 (2004) + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.manifold import LocallyLinearEmbedding + >>> X, _ = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> embedding = LocallyLinearEmbedding(n_components=2) + >>> X_transformed = embedding.fit_transform(X[:100]) + >>> X_transformed.shape + (100, 2) + """ + + _parameter_constraints: dict = { + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "n_components": [Interval(Integral, 1, None, closed="left")], + "reg": [Interval(Real, 0, None, closed="left")], + "eigen_solver": [StrOptions({"auto", "arpack", "dense"})], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "method": [StrOptions({"standard", "hessian", "modified", "ltsa"})], + "hessian_tol": [Interval(Real, 0, None, closed="left")], + "modified_tol": [Interval(Real, 0, None, closed="left")], + "neighbors_algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})], + "random_state": ["random_state"], + "n_jobs": [None, Integral], + } + + def __init__( + self, + *, + n_neighbors=5, + n_components=2, + reg=1e-3, + eigen_solver="auto", + tol=1e-6, + max_iter=100, + method="standard", + hessian_tol=1e-4, + modified_tol=1e-12, + neighbors_algorithm="auto", + random_state=None, + n_jobs=None, + ): + self.n_neighbors = n_neighbors + self.n_components = n_components + self.reg = reg + self.eigen_solver = eigen_solver + self.tol = tol + self.max_iter = max_iter + self.method = method + self.hessian_tol = hessian_tol + self.modified_tol = modified_tol + self.random_state = random_state + self.neighbors_algorithm = neighbors_algorithm + self.n_jobs = n_jobs + + def _fit_transform(self, X): + self.nbrs_ = NearestNeighbors( + n_neighbors=self.n_neighbors, + algorithm=self.neighbors_algorithm, + n_jobs=self.n_jobs, + ) + + random_state = check_random_state(self.random_state) + X = self._validate_data(X, dtype=float) + self.nbrs_.fit(X) + self.embedding_, self.reconstruction_error_ = locally_linear_embedding( + X=self.nbrs_, + n_neighbors=self.n_neighbors, + n_components=self.n_components, + eigen_solver=self.eigen_solver, + tol=self.tol, + max_iter=self.max_iter, + method=self.method, + hessian_tol=self.hessian_tol, + modified_tol=self.modified_tol, + random_state=random_state, + reg=self.reg, + n_jobs=self.n_jobs, + ) + self._n_features_out = self.embedding_.shape[1] + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Compute the embedding vectors for data X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted `LocallyLinearEmbedding` class instance. + """ + self._fit_transform(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Compute the embedding vectors for data X and transform X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + Returns the instance itself. + """ + self._fit_transform(X) + return self.embedding_ + + def transform(self, X): + """ + Transform new points into embedding space. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Returns the instance itself. + + Notes + ----- + Because of scaling performed by this method, it is discouraged to use + it together with methods that are not scale-invariant (like SVMs). + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + ind = self.nbrs_.kneighbors( + X, n_neighbors=self.n_neighbors, return_distance=False + ) + weights = barycenter_weights(X, self.nbrs_._fit_X, ind, reg=self.reg) + X_new = np.empty((X.shape[0], self.n_components)) + for i in range(X.shape[0]): + X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i]) + return X_new diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/_utils.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/manifold/_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2018207539b6ff2113cf154ee3a070d84e5d7d9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/manifold/_utils.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_locally_linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_locally_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b261e71e802960764b6f57ba1c1b2cb72edfc44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_locally_linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py new file mode 100644 index 0000000000000000000000000000000000000000..e38b92442e58d9881726bdee85073ad38a7c95e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py @@ -0,0 +1,348 @@ +import math +from itertools import product + +import numpy as np +import pytest +from scipy.sparse import rand as sparse_rand + +from sklearn import clone, datasets, manifold, neighbors, pipeline, preprocessing +from sklearn.datasets import make_blobs +from sklearn.metrics.pairwise import pairwise_distances +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +eigen_solvers = ["auto", "dense", "arpack"] +path_methods = ["auto", "FW", "D"] + + +def create_sample_data(dtype, n_pts=25, add_noise=False): + # grid of equidistant points in 2D, n_components = n_dim + n_per_side = int(math.sqrt(n_pts)) + X = np.array(list(product(range(n_per_side), repeat=2))).astype(dtype, copy=False) + if add_noise: + # add noise in a third dimension + rng = np.random.RandomState(0) + noise = 0.1 * rng.randn(n_pts, 1).astype(dtype, copy=False) + X = np.concatenate((X, noise), 1) + return X + + +@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)]) +@pytest.mark.parametrize("eigen_solver", eigen_solvers) +@pytest.mark.parametrize("path_method", path_methods) +def test_isomap_simple_grid( + global_dtype, n_neighbors, radius, eigen_solver, path_method +): + # Isomap should preserve distances when all neighbors are used + n_pts = 25 + X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=False) + + # distances from each point to all others + if n_neighbors is not None: + G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance") + else: + G = neighbors.radius_neighbors_graph(X, radius, mode="distance") + + clf = manifold.Isomap( + n_neighbors=n_neighbors, + radius=radius, + n_components=2, + eigen_solver=eigen_solver, + path_method=path_method, + ) + clf.fit(X) + + if n_neighbors is not None: + G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance") + else: + G_iso = neighbors.radius_neighbors_graph( + clf.embedding_, radius, mode="distance" + ) + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose_dense_sparse(G, G_iso, atol=atol) + + +@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)]) +@pytest.mark.parametrize("eigen_solver", eigen_solvers) +@pytest.mark.parametrize("path_method", path_methods) +def test_isomap_reconstruction_error( + global_dtype, n_neighbors, radius, eigen_solver, path_method +): + if global_dtype is np.float32: + pytest.skip( + "Skipping test due to numerical instabilities on float32 data" + "from KernelCenterer used in the reconstruction_error method" + ) + + # Same setup as in test_isomap_simple_grid, with an added dimension + n_pts = 25 + X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=True) + + # compute input kernel + if n_neighbors is not None: + G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance").toarray() + else: + G = neighbors.radius_neighbors_graph(X, radius, mode="distance").toarray() + centerer = preprocessing.KernelCenterer() + K = centerer.fit_transform(-0.5 * G**2) + + clf = manifold.Isomap( + n_neighbors=n_neighbors, + radius=radius, + n_components=2, + eigen_solver=eigen_solver, + path_method=path_method, + ) + clf.fit(X) + + # compute output kernel + if n_neighbors is not None: + G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance") + else: + G_iso = neighbors.radius_neighbors_graph( + clf.embedding_, radius, mode="distance" + ) + G_iso = G_iso.toarray() + K_iso = centerer.fit_transform(-0.5 * G_iso**2) + + # make sure error agrees + reconstruction_error = np.linalg.norm(K - K_iso) / n_pts + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose(reconstruction_error, clf.reconstruction_error(), atol=atol) + + +@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 0.5)]) +def test_transform(global_dtype, n_neighbors, radius): + n_samples = 200 + n_components = 10 + noise_scale = 0.01 + + # Create S-curve dataset + X, y = datasets.make_s_curve(n_samples, random_state=0) + + X = X.astype(global_dtype, copy=False) + + # Compute isomap embedding + iso = manifold.Isomap( + n_components=n_components, n_neighbors=n_neighbors, radius=radius + ) + X_iso = iso.fit_transform(X) + + # Re-embed a noisy version of the points + rng = np.random.RandomState(0) + noise = noise_scale * rng.randn(*X.shape) + X_iso2 = iso.transform(X + noise) + + # Make sure the rms error on re-embedding is comparable to noise_scale + assert np.sqrt(np.mean((X_iso - X_iso2) ** 2)) < 2 * noise_scale + + +@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 10.0)]) +def test_pipeline(n_neighbors, radius, global_dtype): + # check that Isomap works fine as a transformer in a Pipeline + # only checks that no error is raised. + # TODO check that it actually does something useful + X, y = datasets.make_blobs(random_state=0) + X = X.astype(global_dtype, copy=False) + clf = pipeline.Pipeline( + [ + ("isomap", manifold.Isomap(n_neighbors=n_neighbors, radius=radius)), + ("clf", neighbors.KNeighborsClassifier()), + ] + ) + clf.fit(X, y) + assert 0.9 < clf.score(X, y) + + +def test_pipeline_with_nearest_neighbors_transformer(global_dtype): + # Test chaining NearestNeighborsTransformer and Isomap with + # neighbors_algorithm='precomputed' + algorithm = "auto" + n_neighbors = 10 + + X, _ = datasets.make_blobs(random_state=0) + X2, _ = datasets.make_blobs(random_state=1) + + X = X.astype(global_dtype, copy=False) + X2 = X2.astype(global_dtype, copy=False) + + # compare the chained version and the compact version + est_chain = pipeline.make_pipeline( + neighbors.KNeighborsTransformer( + n_neighbors=n_neighbors, algorithm=algorithm, mode="distance" + ), + manifold.Isomap(n_neighbors=n_neighbors, metric="precomputed"), + ) + est_compact = manifold.Isomap( + n_neighbors=n_neighbors, neighbors_algorithm=algorithm + ) + + Xt_chain = est_chain.fit_transform(X) + Xt_compact = est_compact.fit_transform(X) + assert_allclose(Xt_chain, Xt_compact) + + Xt_chain = est_chain.transform(X2) + Xt_compact = est_compact.transform(X2) + assert_allclose(Xt_chain, Xt_compact) + + +@pytest.mark.parametrize( + "metric, p, is_euclidean", + [ + ("euclidean", 2, True), + ("manhattan", 1, False), + ("minkowski", 1, False), + ("minkowski", 2, True), + (lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False), + ], +) +def test_different_metric(global_dtype, metric, p, is_euclidean): + # Isomap must work on various metric parameters work correctly + # and must default to euclidean. + X, _ = datasets.make_blobs(random_state=0) + X = X.astype(global_dtype, copy=False) + + reference = manifold.Isomap().fit_transform(X) + embedding = manifold.Isomap(metric=metric, p=p).fit_transform(X) + + if is_euclidean: + assert_allclose(embedding, reference) + else: + with pytest.raises(AssertionError, match="Not equal to tolerance"): + assert_allclose(embedding, reference) + + +def test_isomap_clone_bug(): + # regression test for bug reported in #6062 + model = manifold.Isomap() + for n_neighbors in [10, 15, 20]: + model.set_params(n_neighbors=n_neighbors) + model.fit(np.random.rand(50, 2)) + assert model.nbrs_.n_neighbors == n_neighbors + + +@pytest.mark.parametrize("eigen_solver", eigen_solvers) +@pytest.mark.parametrize("path_method", path_methods) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input( + global_dtype, eigen_solver, path_method, global_random_seed, csr_container +): + # TODO: compare results on dense and sparse data as proposed in: + # https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186 + X = csr_container( + sparse_rand( + 100, + 3, + density=0.1, + format="csr", + dtype=global_dtype, + random_state=global_random_seed, + ) + ) + + iso_dense = manifold.Isomap( + n_components=2, + eigen_solver=eigen_solver, + path_method=path_method, + n_neighbors=8, + ) + iso_sparse = clone(iso_dense) + + X_trans_dense = iso_dense.fit_transform(X.toarray()) + X_trans_sparse = iso_sparse.fit_transform(X) + + assert_allclose(X_trans_sparse, X_trans_dense, rtol=1e-4, atol=1e-4) + + +def test_isomap_fit_precomputed_radius_graph(global_dtype): + # Isomap.fit_transform must yield similar result when using + # a precomputed distance matrix. + + X, y = datasets.make_s_curve(200, random_state=0) + X = X.astype(global_dtype, copy=False) + radius = 10 + + g = neighbors.radius_neighbors_graph(X, radius=radius, mode="distance") + isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="precomputed") + isomap.fit(g) + precomputed_result = isomap.embedding_ + + isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="minkowski") + result = isomap.fit_transform(X) + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose(precomputed_result, result, atol=atol) + + +def test_isomap_fitted_attributes_dtype(global_dtype): + """Check that the fitted attributes are stored accordingly to the + data type of X.""" + iso = manifold.Isomap(n_neighbors=2) + + X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype) + + iso.fit(X) + + assert iso.dist_matrix_.dtype == global_dtype + assert iso.embedding_.dtype == global_dtype + + +def test_isomap_dtype_equivalence(): + """Check the equivalence of the results with 32 and 64 bits input.""" + iso_32 = manifold.Isomap(n_neighbors=2) + X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + iso_32.fit(X_32) + + iso_64 = manifold.Isomap(n_neighbors=2) + X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64) + iso_64.fit(X_64) + + assert_allclose(iso_32.dist_matrix_, iso_64.dist_matrix_) + + +def test_isomap_raise_error_when_neighbor_and_radius_both_set(): + # Isomap.fit_transform must raise a ValueError if + # radius and n_neighbors are provided. + + X, _ = datasets.load_digits(return_X_y=True) + isomap = manifold.Isomap(n_neighbors=3, radius=5.5) + msg = "Both n_neighbors and radius are provided" + with pytest.raises(ValueError, match=msg): + isomap.fit_transform(X) + + +def test_multiple_connected_components(): + # Test that a warning is raised when the graph has multiple components + X = np.array([0, 1, 2, 5, 6, 7])[:, None] + with pytest.warns(UserWarning, match="number of connected components"): + manifold.Isomap(n_neighbors=2).fit(X) + + +def test_multiple_connected_components_metric_precomputed(global_dtype): + # Test that an error is raised when the graph has multiple components + # and when X is a precomputed neighbors graph. + X = np.array([0, 1, 2, 5, 6, 7])[:, None].astype(global_dtype, copy=False) + + # works with a precomputed distance matrix (dense) + X_distances = pairwise_distances(X) + with pytest.warns(UserWarning, match="number of connected components"): + manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_distances) + + # does not work with a precomputed neighbors graph (sparse) + X_graph = neighbors.kneighbors_graph(X, n_neighbors=2, mode="distance") + with pytest.raises(RuntimeError, match="number of connected components"): + manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_graph) + + +def test_get_feature_names_out(): + """Check get_feature_names_out for Isomap.""" + X, y = make_blobs(random_state=0, n_features=4) + n_components = 2 + + iso = manifold.Isomap(n_components=n_components) + iso.fit_transform(X) + names = iso.get_feature_names_out() + assert_array_equal([f"isomap{i}" for i in range(n_components)], names) diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_locally_linear.py b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_locally_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..835aa20fd1d32ace684eea9afd451bcdcf695f79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_locally_linear.py @@ -0,0 +1,171 @@ +from itertools import product + +import numpy as np +import pytest +from scipy import linalg + +from sklearn import manifold, neighbors +from sklearn.datasets import make_blobs +from sklearn.manifold._locally_linear import barycenter_kneighbors_graph +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, + ignore_warnings, +) + +eigen_solvers = ["dense", "arpack"] + + +# ---------------------------------------------------------------------- +# Test utility routines +def test_barycenter_kneighbors_graph(global_dtype): + X = np.array([[0, 1], [1.01, 1.0], [2, 0]], dtype=global_dtype) + + graph = barycenter_kneighbors_graph(X, 1) + expected_graph = np.array( + [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=global_dtype + ) + + assert graph.dtype == global_dtype + + assert_allclose(graph.toarray(), expected_graph) + + graph = barycenter_kneighbors_graph(X, 2) + # check that columns sum to one + assert_allclose(np.sum(graph.toarray(), axis=1), np.ones(3)) + pred = np.dot(graph.toarray(), X) + assert linalg.norm(pred - X) / X.shape[0] < 1 + + +# ---------------------------------------------------------------------- +# Test LLE by computing the reconstruction error on some manifolds. + + +def test_lle_simple_grid(global_dtype): + # note: ARPACK is numerically unstable, so this test will fail for + # some random seeds. We choose 42 because the tests pass. + # for arm64 platforms 2 makes the test fail. + # TODO: rewrite this test to make less sensitive to the random seed, + # irrespective of the platform. + rng = np.random.RandomState(42) + + # grid of equidistant points in 2D, n_components = n_dim + X = np.array(list(product(range(5), repeat=2))) + X = X + 1e-10 * rng.uniform(size=X.shape) + X = X.astype(global_dtype, copy=False) + + n_components = 2 + clf = manifold.LocallyLinearEmbedding( + n_neighbors=5, n_components=n_components, random_state=rng + ) + tol = 0.1 + + N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray() + reconstruction_error = linalg.norm(np.dot(N, X) - X, "fro") + assert reconstruction_error < tol + + for solver in eigen_solvers: + clf.set_params(eigen_solver=solver) + clf.fit(X) + assert clf.embedding_.shape[1] == n_components + reconstruction_error = ( + linalg.norm(np.dot(N, clf.embedding_) - clf.embedding_, "fro") ** 2 + ) + + assert reconstruction_error < tol + assert_allclose(clf.reconstruction_error_, reconstruction_error, atol=1e-1) + + # re-embed a noisy version of X using the transform method + noise = rng.randn(*X.shape).astype(global_dtype, copy=False) / 100 + X_reembedded = clf.transform(X + noise) + assert linalg.norm(X_reembedded - clf.embedding_) < tol + + +@pytest.mark.parametrize("method", ["standard", "hessian", "modified", "ltsa"]) +@pytest.mark.parametrize("solver", eigen_solvers) +def test_lle_manifold(global_dtype, method, solver): + rng = np.random.RandomState(0) + # similar test on a slightly more complex manifold + X = np.array(list(product(np.arange(18), repeat=2))) + X = np.c_[X, X[:, 0] ** 2 / 18] + X = X + 1e-10 * rng.uniform(size=X.shape) + X = X.astype(global_dtype, copy=False) + n_components = 2 + + clf = manifold.LocallyLinearEmbedding( + n_neighbors=6, n_components=n_components, method=method, random_state=0 + ) + tol = 1.5 if method == "standard" else 3 + + N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray() + reconstruction_error = linalg.norm(np.dot(N, X) - X) + assert reconstruction_error < tol + + clf.set_params(eigen_solver=solver) + clf.fit(X) + assert clf.embedding_.shape[1] == n_components + reconstruction_error = ( + linalg.norm(np.dot(N, clf.embedding_) - clf.embedding_, "fro") ** 2 + ) + details = "solver: %s, method: %s" % (solver, method) + assert reconstruction_error < tol, details + assert ( + np.abs(clf.reconstruction_error_ - reconstruction_error) + < tol * reconstruction_error + ), details + + +def test_pipeline(): + # check that LocallyLinearEmbedding works fine as a Pipeline + # only checks that no error is raised. + # TODO check that it actually does something useful + from sklearn import datasets, pipeline + + X, y = datasets.make_blobs(random_state=0) + clf = pipeline.Pipeline( + [ + ("filter", manifold.LocallyLinearEmbedding(random_state=0)), + ("clf", neighbors.KNeighborsClassifier()), + ] + ) + clf.fit(X, y) + assert 0.9 < clf.score(X, y) + + +# Test the error raised when the weight matrix is singular +def test_singular_matrix(): + M = np.ones((200, 3)) + f = ignore_warnings + with pytest.raises(ValueError, match="Error in determining null-space with ARPACK"): + f( + manifold.locally_linear_embedding( + M, + n_neighbors=2, + n_components=1, + method="standard", + eigen_solver="arpack", + ) + ) + + +# regression test for #6033 +def test_integer_input(): + rand = np.random.RandomState(0) + X = rand.randint(0, 100, size=(20, 3)) + + for method in ["standard", "hessian", "modified", "ltsa"]: + clf = manifold.LocallyLinearEmbedding(method=method, n_neighbors=10) + clf.fit(X) # this previously raised a TypeError + + +def test_get_feature_names_out(): + """Check get_feature_names_out for LocallyLinearEmbedding.""" + X, y = make_blobs(random_state=0, n_features=4) + n_components = 2 + + iso = manifold.LocallyLinearEmbedding(n_components=n_components) + iso.fit(X) + names = iso.get_feature_names_out() + assert_array_equal( + [f"locallylinearembedding{i}" for i in range(n_components)], names + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_mds.py b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_mds.py new file mode 100644 index 0000000000000000000000000000000000000000..2d286ef0942bfe65802dad803da5c2eee8c0e89e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_mds.py @@ -0,0 +1,87 @@ +from unittest.mock import Mock + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal + +from sklearn.manifold import _mds as mds +from sklearn.metrics import euclidean_distances + + +def test_smacof(): + # test metric smacof using the data of "Modern Multidimensional Scaling", + # Borg & Groenen, p 154 + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + Z = np.array([[-0.266, -0.539], [0.451, 0.252], [0.016, -0.238], [-0.200, 0.524]]) + X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1) + X_true = np.array( + [[-1.415, -2.471], [1.633, 1.107], [0.249, -0.067], [-0.468, 1.431]] + ) + assert_array_almost_equal(X, X_true, decimal=3) + + +def test_smacof_error(): + # Not symmetric similarity matrix: + sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + + with pytest.raises(ValueError): + mds.smacof(sim) + + # Not squared similarity matrix: + sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [4, 2, 1, 0]]) + + with pytest.raises(ValueError): + mds.smacof(sim) + + # init not None and not correct format: + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + + Z = np.array([[-0.266, -0.539], [0.016, -0.238], [-0.200, 0.524]]) + with pytest.raises(ValueError): + mds.smacof(sim, init=Z, n_init=1) + + +def test_MDS(): + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed") + mds_clf.fit(sim) + + +@pytest.mark.parametrize("k", [0.5, 1.5, 2]) +def test_normed_stress(k): + """Test that non-metric MDS normalized stress is scale-invariant.""" + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + + X1, stress1 = mds.smacof(sim, metric=False, max_iter=5, random_state=0) + X2, stress2 = mds.smacof(k * sim, metric=False, max_iter=5, random_state=0) + + assert_allclose(stress1, stress2, rtol=1e-5) + assert_allclose(X1, X2, rtol=1e-5) + + +def test_normalize_metric_warning(): + """ + Test that a UserWarning is emitted when using normalized stress with + metric-MDS. + """ + msg = "Normalized stress is not supported" + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + with pytest.raises(ValueError, match=msg): + mds.smacof(sim, metric=True, normalized_stress=True) + + +@pytest.mark.parametrize("metric", [True, False]) +def test_normalized_stress_auto(metric, monkeypatch): + rng = np.random.RandomState(0) + X = rng.randn(4, 3) + dist = euclidean_distances(X) + + mock = Mock(side_effect=mds._smacof_single) + monkeypatch.setattr("sklearn.manifold._mds._smacof_single", mock) + + est = mds.MDS(metric=metric, normalized_stress="auto", random_state=rng) + est.fit_transform(X) + assert mock.call_args[1]["normalized_stress"] != metric + + mds.smacof(dist, metric=metric, normalized_stress="auto", random_state=rng) + assert mock.call_args[1]["normalized_stress"] != metric diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_spectral_embedding.py b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_spectral_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..14bb13c0800992e6520dc00f05d7795021887849 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_spectral_embedding.py @@ -0,0 +1,541 @@ +from unittest.mock import Mock + +import numpy as np +import pytest +from scipy import sparse +from scipy.linalg import eigh +from scipy.sparse.linalg import eigsh, lobpcg + +from sklearn.cluster import KMeans +from sklearn.datasets import make_blobs +from sklearn.manifold import SpectralEmbedding, _spectral_embedding, spectral_embedding +from sklearn.manifold._spectral_embedding import ( + _graph_connected_component, + _graph_is_connected, +) +from sklearn.metrics import normalized_mutual_info_score, pairwise_distances +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.neighbors import NearestNeighbors +from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal +from sklearn.utils.extmath import _deterministic_vector_sign_flip +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + parse_version, + sp_version, +) +from sklearn.utils.fixes import laplacian as csgraph_laplacian + +try: + from pyamg import smoothed_aggregation_solver # noqa + + pyamg_available = True +except ImportError: + pyamg_available = False +skip_if_no_pyamg = pytest.mark.skipif( + not pyamg_available, reason="PyAMG is required for the tests in this function." +) + +# non centered, sparse centers to check the +centers = np.array( + [ + [0.0, 5.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 4.0, 0.0, 0.0], + [1.0, 0.0, 0.0, 5.0, 1.0], + ] +) +n_samples = 1000 +n_clusters, n_features = centers.shape +S, true_labels = make_blobs( + n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42 +) + + +def _assert_equal_with_sign_flipping(A, B, tol=0.0): + """Check array A and B are equal with possible sign flipping on + each columns""" + tol_squared = tol**2 + for A_col, B_col in zip(A.T, B.T): + assert ( + np.max((A_col - B_col) ** 2) <= tol_squared + or np.max((A_col + B_col) ** 2) <= tol_squared + ) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_sparse_graph_connected_component(coo_container): + rng = np.random.RandomState(42) + n_samples = 300 + boundaries = [0, 42, 121, 200, n_samples] + p = rng.permutation(n_samples) + connections = [] + + for start, stop in zip(boundaries[:-1], boundaries[1:]): + group = p[start:stop] + # Connect all elements within the group at least once via an + # arbitrary path that spans the group. + for i in range(len(group) - 1): + connections.append((group[i], group[i + 1])) + + # Add some more random connections within the group + min_idx, max_idx = 0, len(group) - 1 + n_random_connections = 1000 + source = rng.randint(min_idx, max_idx, size=n_random_connections) + target = rng.randint(min_idx, max_idx, size=n_random_connections) + connections.extend(zip(group[source], group[target])) + + # Build a symmetric affinity matrix + row_idx, column_idx = tuple(np.array(connections).T) + data = rng.uniform(0.1, 42, size=len(connections)) + affinity = coo_container((data, (row_idx, column_idx))) + affinity = 0.5 * (affinity + affinity.T) + + for start, stop in zip(boundaries[:-1], boundaries[1:]): + component_1 = _graph_connected_component(affinity, p[start]) + component_size = stop - start + assert component_1.sum() == component_size + + # We should retrieve the same component mask by starting by both ends + # of the group + component_2 = _graph_connected_component(affinity, p[stop - 1]) + assert component_2.sum() == component_size + assert_array_equal(component_1, component_2) + + +# TODO: investigate why this test is seed-sensitive on 32-bit Python +# runtimes. Is this revealing a numerical stability problem ? Or is it +# expected from the test numerical design ? In the latter case the test +# should be made less seed-sensitive instead. +@pytest.mark.parametrize( + "eigen_solver", + [ + "arpack", + "lobpcg", + pytest.param("amg", marks=skip_if_no_pyamg), + ], +) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_spectral_embedding_two_components(eigen_solver, dtype, seed=0): + # Test spectral embedding with two components + random_state = np.random.RandomState(seed) + n_sample = 100 + affinity = np.zeros(shape=[n_sample * 2, n_sample * 2]) + # first component + affinity[0:n_sample, 0:n_sample] = ( + np.abs(random_state.randn(n_sample, n_sample)) + 2 + ) + # second component + affinity[n_sample::, n_sample::] = ( + np.abs(random_state.randn(n_sample, n_sample)) + 2 + ) + + # Test of internal _graph_connected_component before connection + component = _graph_connected_component(affinity, 0) + assert component[:n_sample].all() + assert not component[n_sample:].any() + component = _graph_connected_component(affinity, -1) + assert not component[:n_sample].any() + assert component[n_sample:].all() + + # connection + affinity[0, n_sample + 1] = 1 + affinity[n_sample + 1, 0] = 1 + affinity.flat[:: 2 * n_sample + 1] = 0 + affinity = 0.5 * (affinity + affinity.T) + + true_label = np.zeros(shape=2 * n_sample) + true_label[0:n_sample] = 1 + + se_precomp = SpectralEmbedding( + n_components=1, + affinity="precomputed", + random_state=np.random.RandomState(seed), + eigen_solver=eigen_solver, + ) + + embedded_coordinate = se_precomp.fit_transform(affinity.astype(dtype)) + # thresholding on the first components using 0. + label_ = np.array(embedded_coordinate.ravel() < 0, dtype=np.int64) + assert normalized_mutual_info_score(true_label, label_) == pytest.approx(1.0) + + +@pytest.mark.parametrize("sparse_container", [None, *CSR_CONTAINERS]) +@pytest.mark.parametrize( + "eigen_solver", + [ + "arpack", + "lobpcg", + pytest.param("amg", marks=skip_if_no_pyamg), + ], +) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +def test_spectral_embedding_precomputed_affinity( + sparse_container, eigen_solver, dtype, seed=36 +): + # Test spectral embedding with precomputed kernel + gamma = 1.0 + X = S if sparse_container is None else sparse_container(S) + + se_precomp = SpectralEmbedding( + n_components=2, + affinity="precomputed", + random_state=np.random.RandomState(seed), + eigen_solver=eigen_solver, + ) + se_rbf = SpectralEmbedding( + n_components=2, + affinity="rbf", + gamma=gamma, + random_state=np.random.RandomState(seed), + eigen_solver=eigen_solver, + ) + embed_precomp = se_precomp.fit_transform(rbf_kernel(X.astype(dtype), gamma=gamma)) + embed_rbf = se_rbf.fit_transform(X.astype(dtype)) + assert_array_almost_equal(se_precomp.affinity_matrix_, se_rbf.affinity_matrix_) + _assert_equal_with_sign_flipping(embed_precomp, embed_rbf, 0.05) + + +def test_precomputed_nearest_neighbors_filtering(): + # Test precomputed graph filtering when containing too many neighbors + n_neighbors = 2 + results = [] + for additional_neighbors in [0, 10]: + nn = NearestNeighbors(n_neighbors=n_neighbors + additional_neighbors).fit(S) + graph = nn.kneighbors_graph(S, mode="connectivity") + embedding = ( + SpectralEmbedding( + random_state=0, + n_components=2, + affinity="precomputed_nearest_neighbors", + n_neighbors=n_neighbors, + ) + .fit(graph) + .embedding_ + ) + results.append(embedding) + + assert_array_equal(results[0], results[1]) + + +@pytest.mark.parametrize("sparse_container", [None, *CSR_CONTAINERS]) +def test_spectral_embedding_callable_affinity(sparse_container, seed=36): + # Test spectral embedding with callable affinity + gamma = 0.9 + kern = rbf_kernel(S, gamma=gamma) + X = S if sparse_container is None else sparse_container(S) + + se_callable = SpectralEmbedding( + n_components=2, + affinity=(lambda x: rbf_kernel(x, gamma=gamma)), + gamma=gamma, + random_state=np.random.RandomState(seed), + ) + se_rbf = SpectralEmbedding( + n_components=2, + affinity="rbf", + gamma=gamma, + random_state=np.random.RandomState(seed), + ) + embed_rbf = se_rbf.fit_transform(X) + embed_callable = se_callable.fit_transform(X) + assert_array_almost_equal(se_callable.affinity_matrix_, se_rbf.affinity_matrix_) + assert_array_almost_equal(kern, se_rbf.affinity_matrix_) + _assert_equal_with_sign_flipping(embed_rbf, embed_callable, 0.05) + + +# TODO: Remove when pyamg does replaces sp.rand call with np.random.rand +# https://github.com/scikit-learn/scikit-learn/issues/15913 +@pytest.mark.filterwarnings( + "ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of np.float +@pytest.mark.filterwarnings( + "ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of pinv2 +@pytest.mark.filterwarnings( + "ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.filterwarnings( + "ignore:np.find_common_type is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.skipif( + not pyamg_available, reason="PyAMG is required for the tests in this function." +) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_spectral_embedding_amg_solver(dtype, coo_container, seed=36): + se_amg = SpectralEmbedding( + n_components=2, + affinity="nearest_neighbors", + eigen_solver="amg", + n_neighbors=5, + random_state=np.random.RandomState(seed), + ) + se_arpack = SpectralEmbedding( + n_components=2, + affinity="nearest_neighbors", + eigen_solver="arpack", + n_neighbors=5, + random_state=np.random.RandomState(seed), + ) + embed_amg = se_amg.fit_transform(S.astype(dtype)) + embed_arpack = se_arpack.fit_transform(S.astype(dtype)) + _assert_equal_with_sign_flipping(embed_amg, embed_arpack, 1e-5) + + # same with special case in which amg is not actually used + # regression test for #10715 + # affinity between nodes + row = np.array([0, 0, 1, 2, 3, 3, 4], dtype=np.int32) + col = np.array([1, 2, 2, 3, 4, 5, 5], dtype=np.int32) + val = np.array([100, 100, 100, 1, 100, 100, 100], dtype=np.int64) + + affinity = coo_container( + (np.hstack([val, val]), (np.hstack([row, col]), np.hstack([col, row]))), + shape=(6, 6), + ) + se_amg.affinity = "precomputed" + se_arpack.affinity = "precomputed" + embed_amg = se_amg.fit_transform(affinity.astype(dtype)) + embed_arpack = se_arpack.fit_transform(affinity.astype(dtype)) + _assert_equal_with_sign_flipping(embed_amg, embed_arpack, 1e-5) + + # Check that passing a sparse matrix with `np.int64` indices dtype raises an error + # or is successful based on the version of SciPy which is installed. + # Use a CSR matrix to avoid any conversion during the validation + affinity = affinity.tocsr() + affinity.indptr = affinity.indptr.astype(np.int64) + affinity.indices = affinity.indices.astype(np.int64) + + # PR: https://github.com/scipy/scipy/pull/18913 + # First integration in 1.11.3: https://github.com/scipy/scipy/pull/19279 + scipy_graph_traversal_supports_int64_index = sp_version >= parse_version("1.11.3") + if scipy_graph_traversal_supports_int64_index: + se_amg.fit_transform(affinity) + else: + err_msg = "Only sparse matrices with 32-bit integer indices are accepted" + with pytest.raises(ValueError, match=err_msg): + se_amg.fit_transform(affinity) + + +# TODO: Remove filterwarnings when pyamg does replaces sp.rand call with +# np.random.rand: +# https://github.com/scikit-learn/scikit-learn/issues/15913 +@pytest.mark.filterwarnings( + "ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of np.float +@pytest.mark.filterwarnings( + "ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of pinv2 +@pytest.mark.filterwarnings( + "ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.skipif( + not pyamg_available, reason="PyAMG is required for the tests in this function." +) +# TODO: Remove when pyamg removes the use of np.find_common_type +@pytest.mark.filterwarnings( + "ignore:np.find_common_type is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +def test_spectral_embedding_amg_solver_failure(dtype, seed=36): + # Non-regression test for amg solver failure (issue #13393 on github) + num_nodes = 100 + X = sparse.rand(num_nodes, num_nodes, density=0.1, random_state=seed) + X = X.astype(dtype) + upper = sparse.triu(X) - sparse.diags(X.diagonal()) + sym_matrix = upper + upper.T + embedding = spectral_embedding( + sym_matrix, n_components=10, eigen_solver="amg", random_state=0 + ) + + # Check that the learned embedding is stable w.r.t. random solver init: + for i in range(3): + new_embedding = spectral_embedding( + sym_matrix, n_components=10, eigen_solver="amg", random_state=i + 1 + ) + _assert_equal_with_sign_flipping(embedding, new_embedding, tol=0.05) + + +@pytest.mark.filterwarnings("ignore:the behavior of nmi will change in version 0.22") +def test_pipeline_spectral_clustering(seed=36): + # Test using pipeline to do spectral clustering + random_state = np.random.RandomState(seed) + se_rbf = SpectralEmbedding( + n_components=n_clusters, affinity="rbf", random_state=random_state + ) + se_knn = SpectralEmbedding( + n_components=n_clusters, + affinity="nearest_neighbors", + n_neighbors=5, + random_state=random_state, + ) + for se in [se_rbf, se_knn]: + km = KMeans(n_clusters=n_clusters, random_state=random_state, n_init=10) + km.fit(se.fit_transform(S)) + assert_array_almost_equal( + normalized_mutual_info_score(km.labels_, true_labels), 1.0, 2 + ) + + +def test_connectivity(seed=36): + # Test that graph connectivity test works as expected + graph = np.array( + [ + [1, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 1, 1], + [0, 0, 0, 1, 1], + ] + ) + assert not _graph_is_connected(graph) + for csr_container in CSR_CONTAINERS: + assert not _graph_is_connected(csr_container(graph)) + for csc_container in CSC_CONTAINERS: + assert not _graph_is_connected(csc_container(graph)) + + graph = np.array( + [ + [1, 1, 0, 0, 0], + [1, 1, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 1, 1], + [0, 0, 0, 1, 1], + ] + ) + assert _graph_is_connected(graph) + for csr_container in CSR_CONTAINERS: + assert _graph_is_connected(csr_container(graph)) + for csc_container in CSC_CONTAINERS: + assert _graph_is_connected(csc_container(graph)) + + +def test_spectral_embedding_deterministic(): + # Test that Spectral Embedding is deterministic + random_state = np.random.RandomState(36) + data = random_state.randn(10, 30) + sims = rbf_kernel(data) + embedding_1 = spectral_embedding(sims) + embedding_2 = spectral_embedding(sims) + assert_array_almost_equal(embedding_1, embedding_2) + + +def test_spectral_embedding_unnormalized(): + # Test that spectral_embedding is also processing unnormalized laplacian + # correctly + random_state = np.random.RandomState(36) + data = random_state.randn(10, 30) + sims = rbf_kernel(data) + n_components = 8 + embedding_1 = spectral_embedding( + sims, norm_laplacian=False, n_components=n_components, drop_first=False + ) + + # Verify using manual computation with dense eigh + laplacian, dd = csgraph_laplacian(sims, normed=False, return_diag=True) + _, diffusion_map = eigh(laplacian) + embedding_2 = diffusion_map.T[:n_components] + embedding_2 = _deterministic_vector_sign_flip(embedding_2).T + + assert_array_almost_equal(embedding_1, embedding_2) + + +def test_spectral_embedding_first_eigen_vector(): + # Test that the first eigenvector of spectral_embedding + # is constant and that the second is not (for a connected graph) + random_state = np.random.RandomState(36) + data = random_state.randn(10, 30) + sims = rbf_kernel(data) + n_components = 2 + + for seed in range(10): + embedding = spectral_embedding( + sims, + norm_laplacian=False, + n_components=n_components, + drop_first=False, + random_state=seed, + ) + + assert np.std(embedding[:, 0]) == pytest.approx(0) + assert np.std(embedding[:, 1]) > 1e-3 + + +@pytest.mark.parametrize( + "eigen_solver", + [ + "arpack", + "lobpcg", + pytest.param("amg", marks=skip_if_no_pyamg), + ], +) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_spectral_embedding_preserves_dtype(eigen_solver, dtype): + """Check that `SpectralEmbedding is preserving the dtype of the fitted + attribute and transformed data. + + Ideally, this test should be covered by the common test + `check_transformer_preserve_dtypes`. However, this test only run + with transformers implementing `transform` while `SpectralEmbedding` + implements only `fit_transform`. + """ + X = S.astype(dtype) + se = SpectralEmbedding( + n_components=2, affinity="rbf", eigen_solver=eigen_solver, random_state=0 + ) + X_trans = se.fit_transform(X) + + assert X_trans.dtype == dtype + assert se.embedding_.dtype == dtype + assert se.affinity_matrix_.dtype == dtype + + +@pytest.mark.skipif( + pyamg_available, + reason="PyAMG is installed and we should not test for an error.", +) +def test_error_pyamg_not_available(): + se_precomp = SpectralEmbedding( + n_components=2, + affinity="rbf", + eigen_solver="amg", + ) + err_msg = "The eigen_solver was set to 'amg', but pyamg is not available." + with pytest.raises(ValueError, match=err_msg): + se_precomp.fit_transform(S) + + +# TODO: Remove when pyamg removes the use of np.find_common_type +@pytest.mark.filterwarnings( + "ignore:np.find_common_type is deprecated:DeprecationWarning:pyamg.*" +) +@pytest.mark.parametrize("solver", ["arpack", "amg", "lobpcg"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_spectral_eigen_tol_auto(monkeypatch, solver, csr_container): + """Test that `eigen_tol="auto"` is resolved correctly""" + if solver == "amg" and not pyamg_available: + pytest.skip("PyAMG is not available.") + X, _ = make_blobs( + n_samples=200, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + D = pairwise_distances(X) # Distance matrix + S = np.max(D) - D # Similarity matrix + + solver_func = eigsh if solver == "arpack" else lobpcg + default_value = 0 if solver == "arpack" else None + if solver == "amg": + S = csr_container(S) + + mocked_solver = Mock(side_effect=solver_func) + + monkeypatch.setattr(_spectral_embedding, solver_func.__qualname__, mocked_solver) + + spectral_embedding(S, random_state=42, eigen_solver=solver, eigen_tol="auto") + mocked_solver.assert_called() + + _, kwargs = mocked_solver.call_args + assert kwargs["tol"] == default_value diff --git a/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_t_sne.py b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_t_sne.py new file mode 100644 index 0000000000000000000000000000000000000000..ea037fa5f83910988275e3b0ddf8ec5ef36fcd58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/manifold/tests/test_t_sne.py @@ -0,0 +1,1181 @@ +import sys +from io import StringIO + +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_allclose +from scipy.optimize import check_grad +from scipy.spatial.distance import pdist, squareform + +from sklearn import config_context +from sklearn.datasets import make_blobs +from sklearn.exceptions import EfficiencyWarning + +# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne' +from sklearn.manifold import ( # type: ignore + TSNE, + _barnes_hut_tsne, +) +from sklearn.manifold._t_sne import ( + _gradient_descent, + _joint_probabilities, + _joint_probabilities_nn, + _kl_divergence, + _kl_divergence_bh, + trustworthiness, +) +from sklearn.manifold._utils import _binary_search_perplexity +from sklearn.metrics.pairwise import ( + cosine_distances, + manhattan_distances, + pairwise_distances, +) +from sklearn.neighbors import NearestNeighbors, kneighbors_graph +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, + skip_if_32bit, +) +from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS + +x = np.linspace(0, 1, 10) +xx, yy = np.meshgrid(x, x) +X_2d_grid = np.hstack( + [ + xx.ravel().reshape(-1, 1), + yy.ravel().reshape(-1, 1), + ] +) + + +def test_gradient_descent_stops(): + # Test stopping conditions of gradient descent. + class ObjectiveSmallGradient: + def __init__(self): + self.it = -1 + + def __call__(self, _, compute_error=True): + self.it += 1 + return (10 - self.it) / 10.0, np.array([1e-5]) + + def flat_function(_, compute_error=True): + return 0.0, np.ones(1) + + # Gradient norm + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + ObjectiveSmallGradient(), + np.zeros(1), + 0, + n_iter=100, + n_iter_without_progress=100, + momentum=0.0, + learning_rate=0.0, + min_gain=0.0, + min_grad_norm=1e-5, + verbose=2, + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert error == 1.0 + assert it == 0 + assert "gradient norm" in out + + # Maximum number of iterations without improvement + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + flat_function, + np.zeros(1), + 0, + n_iter=100, + n_iter_without_progress=10, + momentum=0.0, + learning_rate=0.0, + min_gain=0.0, + min_grad_norm=0.0, + verbose=2, + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert error == 0.0 + assert it == 11 + assert "did not make any progress" in out + + # Maximum number of iterations + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + ObjectiveSmallGradient(), + np.zeros(1), + 0, + n_iter=11, + n_iter_without_progress=100, + momentum=0.0, + learning_rate=0.0, + min_gain=0.0, + min_grad_norm=0.0, + verbose=2, + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert error == 0.0 + assert it == 10 + assert "Iteration 10" in out + + +def test_binary_search(): + # Test if the binary search finds Gaussians with desired perplexity. + random_state = check_random_state(0) + data = random_state.randn(50, 5) + distances = pairwise_distances(data).astype(np.float32) + desired_perplexity = 25.0 + P = _binary_search_perplexity(distances, desired_perplexity, verbose=0) + P = np.maximum(P, np.finfo(np.double).eps) + mean_perplexity = np.mean( + [np.exp(-np.sum(P[i] * np.log(P[i]))) for i in range(P.shape[0])] + ) + assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3) + + +def test_binary_search_underflow(): + # Test if the binary search finds Gaussians with desired perplexity. + # A more challenging case than the one above, producing numeric + # underflow in float precision (see issue #19471 and PR #19472). + random_state = check_random_state(42) + data = random_state.randn(1, 90).astype(np.float32) + 100 + desired_perplexity = 30.0 + P = _binary_search_perplexity(data, desired_perplexity, verbose=0) + perplexity = 2 ** -np.nansum(P[0, 1:] * np.log2(P[0, 1:])) + assert_almost_equal(perplexity, desired_perplexity, decimal=3) + + +def test_binary_search_neighbors(): + # Binary perplexity search approximation. + # Should be approximately equal to the slow method when we use + # all points as neighbors. + n_samples = 200 + desired_perplexity = 25.0 + random_state = check_random_state(0) + data = random_state.randn(n_samples, 2).astype(np.float32, copy=False) + distances = pairwise_distances(data) + P1 = _binary_search_perplexity(distances, desired_perplexity, verbose=0) + + # Test that when we use all the neighbors the results are identical + n_neighbors = n_samples - 1 + nn = NearestNeighbors().fit(data) + distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + distances_nn = distance_graph.data.astype(np.float32, copy=False) + distances_nn = distances_nn.reshape(n_samples, n_neighbors) + P2 = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0) + + indptr = distance_graph.indptr + P1_nn = np.array( + [ + P1[k, distance_graph.indices[indptr[k] : indptr[k + 1]]] + for k in range(n_samples) + ] + ) + assert_array_almost_equal(P1_nn, P2, decimal=4) + + # Test that the highest P_ij are the same when fewer neighbors are used + for k in np.linspace(150, n_samples - 1, 5): + k = int(k) + topn = k * 10 # check the top 10 * k entries out of k * k entries + distance_graph = nn.kneighbors_graph(n_neighbors=k, mode="distance") + distances_nn = distance_graph.data.astype(np.float32, copy=False) + distances_nn = distances_nn.reshape(n_samples, k) + P2k = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0) + assert_array_almost_equal(P1_nn, P2, decimal=2) + idx = np.argsort(P1.ravel())[::-1] + P1top = P1.ravel()[idx][:topn] + idx = np.argsort(P2k.ravel())[::-1] + P2top = P2k.ravel()[idx][:topn] + assert_array_almost_equal(P1top, P2top, decimal=2) + + +def test_binary_perplexity_stability(): + # Binary perplexity search should be stable. + # The binary_search_perplexity had a bug wherein the P array + # was uninitialized, leading to sporadically failing tests. + n_neighbors = 10 + n_samples = 100 + random_state = check_random_state(0) + data = random_state.randn(n_samples, 5) + nn = NearestNeighbors().fit(data) + distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + distances = distance_graph.data.astype(np.float32, copy=False) + distances = distances.reshape(n_samples, n_neighbors) + last_P = None + desired_perplexity = 3 + for _ in range(100): + P = _binary_search_perplexity(distances.copy(), desired_perplexity, verbose=0) + P1 = _joint_probabilities_nn(distance_graph, desired_perplexity, verbose=0) + # Convert the sparse matrix to a dense one for testing + P1 = P1.toarray() + if last_P is None: + last_P = P + last_P1 = P1 + else: + assert_array_almost_equal(P, last_P, decimal=4) + assert_array_almost_equal(P1, last_P1, decimal=4) + + +def test_gradient(): + # Test gradient of Kullback-Leibler divergence. + random_state = check_random_state(0) + + n_samples = 50 + n_features = 2 + n_components = 2 + alpha = 1.0 + + distances = random_state.randn(n_samples, n_features).astype(np.float32) + distances = np.abs(distances.dot(distances.T)) + np.fill_diagonal(distances, 0.0) + X_embedded = random_state.randn(n_samples, n_components).astype(np.float32) + + P = _joint_probabilities(distances, desired_perplexity=25.0, verbose=0) + + def fun(params): + return _kl_divergence(params, P, alpha, n_samples, n_components)[0] + + def grad(params): + return _kl_divergence(params, P, alpha, n_samples, n_components)[1] + + assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0, decimal=5) + + +def test_trustworthiness(): + # Test trustworthiness score. + random_state = check_random_state(0) + + # Affine transformation + X = random_state.randn(100, 2) + assert trustworthiness(X, 5.0 + X / 10.0) == 1.0 + + # Randomly shuffled + X = np.arange(100).reshape(-1, 1) + X_embedded = X.copy() + random_state.shuffle(X_embedded) + assert trustworthiness(X, X_embedded) < 0.6 + + # Completely different + X = np.arange(5).reshape(-1, 1) + X_embedded = np.array([[0], [2], [4], [1], [3]]) + assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2) + + +def test_trustworthiness_n_neighbors_error(): + """Raise an error when n_neighbors >= n_samples / 2. + + Non-regression test for #18567. + """ + regex = "n_neighbors .+ should be less than .+" + rng = np.random.RandomState(42) + X = rng.rand(7, 4) + X_embedded = rng.rand(7, 2) + with pytest.raises(ValueError, match=regex): + trustworthiness(X, X_embedded, n_neighbors=5) + + trust = trustworthiness(X, X_embedded, n_neighbors=3) + assert 0 <= trust <= 1 + + +@pytest.mark.parametrize("method", ["exact", "barnes_hut"]) +@pytest.mark.parametrize("init", ("random", "pca")) +def test_preserve_trustworthiness_approximately(method, init): + # Nearest neighbors should be preserved approximately. + random_state = check_random_state(0) + n_components = 2 + X = random_state.randn(50, n_components).astype(np.float32) + tsne = TSNE( + n_components=n_components, + init=init, + random_state=0, + method=method, + n_iter=700, + learning_rate="auto", + ) + X_embedded = tsne.fit_transform(X) + t = trustworthiness(X, X_embedded, n_neighbors=1) + assert t > 0.85 + + +def test_optimization_minimizes_kl_divergence(): + """t-SNE should give a lower KL divergence with more iterations.""" + random_state = check_random_state(0) + X, _ = make_blobs(n_features=3, random_state=random_state) + kl_divergences = [] + for n_iter in [250, 300, 350]: + tsne = TSNE( + n_components=2, + init="random", + perplexity=10, + learning_rate=100.0, + n_iter=n_iter, + random_state=0, + ) + tsne.fit_transform(X) + kl_divergences.append(tsne.kl_divergence_) + assert kl_divergences[1] <= kl_divergences[0] + assert kl_divergences[2] <= kl_divergences[1] + + +@pytest.mark.parametrize("method", ["exact", "barnes_hut"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_fit_transform_csr_matrix(method, csr_container): + # TODO: compare results on dense and sparse data as proposed in: + # https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186 + # X can be a sparse matrix. + rng = check_random_state(0) + X = rng.randn(50, 2) + X[(rng.randint(0, 50, 25), rng.randint(0, 2, 25))] = 0.0 + X_csr = csr_container(X) + tsne = TSNE( + n_components=2, + init="random", + perplexity=10, + learning_rate=100.0, + random_state=0, + method=method, + n_iter=750, + ) + X_embedded = tsne.fit_transform(X_csr) + assert_allclose(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, rtol=1.1e-1) + + +def test_preserve_trustworthiness_approximately_with_precomputed_distances(): + # Nearest neighbors should be preserved approximately. + random_state = check_random_state(0) + for i in range(3): + X = random_state.randn(80, 2) + D = squareform(pdist(X), "sqeuclidean") + tsne = TSNE( + n_components=2, + perplexity=2, + learning_rate=100.0, + early_exaggeration=2.0, + metric="precomputed", + random_state=i, + verbose=0, + n_iter=500, + init="random", + ) + X_embedded = tsne.fit_transform(D) + t = trustworthiness(D, X_embedded, n_neighbors=1, metric="precomputed") + assert t > 0.95 + + +def test_trustworthiness_not_euclidean_metric(): + # Test trustworthiness with a metric different from 'euclidean' and + # 'precomputed' + random_state = check_random_state(0) + X = random_state.randn(100, 2) + assert trustworthiness(X, X, metric="cosine") == trustworthiness( + pairwise_distances(X, metric="cosine"), X, metric="precomputed" + ) + + +@pytest.mark.parametrize( + "method, retype", + [ + ("exact", np.asarray), + ("barnes_hut", np.asarray), + *[("barnes_hut", csr_container) for csr_container in CSR_CONTAINERS], + ], +) +@pytest.mark.parametrize( + "D, message_regex", + [ + ([[0.0], [1.0]], ".* square distance matrix"), + ([[0.0, -1.0], [1.0, 0.0]], ".* positive.*"), + ], +) +def test_bad_precomputed_distances(method, D, retype, message_regex): + tsne = TSNE( + metric="precomputed", + method=method, + init="random", + random_state=42, + perplexity=1, + ) + with pytest.raises(ValueError, match=message_regex): + tsne.fit_transform(retype(D)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_exact_no_precomputed_sparse(csr_container): + tsne = TSNE( + metric="precomputed", + method="exact", + init="random", + random_state=42, + perplexity=1, + ) + with pytest.raises(TypeError, match="sparse"): + tsne.fit_transform(csr_container([[0, 5], [5, 0]])) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_high_perplexity_precomputed_sparse_distances(csr_container): + # Perplexity should be less than 50 + dist = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) + bad_dist = csr_container(dist) + tsne = TSNE(metric="precomputed", init="random", random_state=42, perplexity=1) + msg = "3 neighbors per samples are required, but some samples have only 1" + with pytest.raises(ValueError, match=msg): + tsne.fit_transform(bad_dist) + + +@ignore_warnings(category=EfficiencyWarning) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS) +def test_sparse_precomputed_distance(sparse_container): + """Make sure that TSNE works identically for sparse and dense matrix""" + random_state = check_random_state(0) + X = random_state.randn(100, 2) + + D_sparse = kneighbors_graph(X, n_neighbors=100, mode="distance", include_self=True) + D = pairwise_distances(X) + assert sp.issparse(D_sparse) + assert_almost_equal(D_sparse.toarray(), D) + + tsne = TSNE( + metric="precomputed", random_state=0, init="random", learning_rate="auto" + ) + Xt_dense = tsne.fit_transform(D) + + Xt_sparse = tsne.fit_transform(sparse_container(D_sparse)) + assert_almost_equal(Xt_dense, Xt_sparse) + + +def test_non_positive_computed_distances(): + # Computed distance matrices must be positive. + def metric(x, y): + return -1 + + # Negative computed distances should be caught even if result is squared + tsne = TSNE(metric=metric, method="exact", perplexity=1) + X = np.array([[0.0, 0.0], [1.0, 1.0]]) + with pytest.raises(ValueError, match="All distances .*metric given.*"): + tsne.fit_transform(X) + + +def test_init_ndarray(): + # Initialize TSNE with ndarray and test fit + tsne = TSNE(init=np.zeros((100, 2)), learning_rate="auto") + X_embedded = tsne.fit_transform(np.ones((100, 5))) + assert_array_equal(np.zeros((100, 2)), X_embedded) + + +def test_init_ndarray_precomputed(): + # Initialize TSNE with ndarray and metric 'precomputed' + # Make sure no FutureWarning is thrown from _fit + tsne = TSNE( + init=np.zeros((100, 2)), + metric="precomputed", + learning_rate=50.0, + ) + tsne.fit(np.zeros((100, 100))) + + +def test_pca_initialization_not_compatible_with_precomputed_kernel(): + # Precomputed distance matrices cannot use PCA initialization. + tsne = TSNE(metric="precomputed", init="pca", perplexity=1) + with pytest.raises( + ValueError, + match='The parameter init="pca" cannot be used with metric="precomputed".', + ): + tsne.fit_transform(np.array([[0.0], [1.0]])) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_pca_initialization_not_compatible_with_sparse_input(csr_container): + # Sparse input matrices cannot use PCA initialization. + tsne = TSNE(init="pca", learning_rate=100.0, perplexity=1) + with pytest.raises(TypeError, match="PCA initialization.*"): + tsne.fit_transform(csr_container([[0, 5], [5, 0]])) + + +def test_n_components_range(): + # barnes_hut method should only be used with n_components <= 3 + tsne = TSNE(n_components=4, method="barnes_hut", perplexity=1) + with pytest.raises(ValueError, match="'n_components' should be .*"): + tsne.fit_transform(np.array([[0.0], [1.0]])) + + +def test_early_exaggeration_used(): + # check that the ``early_exaggeration`` parameter has an effect + random_state = check_random_state(0) + n_components = 2 + methods = ["exact", "barnes_hut"] + X = random_state.randn(25, n_components).astype(np.float32) + for method in methods: + tsne = TSNE( + n_components=n_components, + perplexity=1, + learning_rate=100.0, + init="pca", + random_state=0, + method=method, + early_exaggeration=1.0, + n_iter=250, + ) + X_embedded1 = tsne.fit_transform(X) + tsne = TSNE( + n_components=n_components, + perplexity=1, + learning_rate=100.0, + init="pca", + random_state=0, + method=method, + early_exaggeration=10.0, + n_iter=250, + ) + X_embedded2 = tsne.fit_transform(X) + + assert not np.allclose(X_embedded1, X_embedded2) + + +def test_n_iter_used(): + # check that the ``n_iter`` parameter has an effect + random_state = check_random_state(0) + n_components = 2 + methods = ["exact", "barnes_hut"] + X = random_state.randn(25, n_components).astype(np.float32) + for method in methods: + for n_iter in [251, 500]: + tsne = TSNE( + n_components=n_components, + perplexity=1, + learning_rate=0.5, + init="random", + random_state=0, + method=method, + early_exaggeration=1.0, + n_iter=n_iter, + ) + tsne.fit_transform(X) + + assert tsne.n_iter_ == n_iter - 1 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_answer_gradient_two_points(csr_container): + # Test the tree with only a single set of children. + # + # These tests & answers have been checked against the reference + # implementation by LvdM. + pos_input = np.array([[1.0, 0.0], [0.0, 1.0]]) + pos_output = np.array( + [[-4.961291e-05, -1.072243e-04], [9.259460e-05, 2.702024e-04]] + ) + neighbors = np.array([[1], [0]]) + grad_output = np.array( + [[-2.37012478e-05, -6.29044398e-05], [2.37012478e-05, 6.29044398e-05]] + ) + _run_answer_test(pos_input, pos_output, neighbors, grad_output, csr_container) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_answer_gradient_four_points(csr_container): + # Four points tests the tree with multiple levels of children. + # + # These tests & answers have been checked against the reference + # implementation by LvdM. + pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]]) + pos_output = np.array( + [ + [6.080564e-05, -7.120823e-05], + [-1.718945e-04, -4.000536e-05], + [-2.271720e-04, 8.663310e-05], + [-1.032577e-04, -3.582033e-05], + ] + ) + neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]]) + grad_output = np.array( + [ + [5.81128448e-05, -7.78033454e-06], + [-5.81526851e-05, 7.80976444e-06], + [4.24275173e-08, -3.69569698e-08], + [-2.58720939e-09, 7.52706374e-09], + ] + ) + _run_answer_test(pos_input, pos_output, neighbors, grad_output, csr_container) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_skip_num_points_gradient(csr_container): + # Test the kwargs option skip_num_points. + # + # Skip num points should make it such that the Barnes_hut gradient + # is not calculated for indices below skip_num_point. + # Aside from skip_num_points=2 and the first two gradient rows + # being set to zero, these data points are the same as in + # test_answer_gradient_four_points() + pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]]) + pos_output = np.array( + [ + [6.080564e-05, -7.120823e-05], + [-1.718945e-04, -4.000536e-05], + [-2.271720e-04, 8.663310e-05], + [-1.032577e-04, -3.582033e-05], + ] + ) + neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]]) + grad_output = np.array( + [ + [0.0, 0.0], + [0.0, 0.0], + [4.24275173e-08, -3.69569698e-08], + [-2.58720939e-09, 7.52706374e-09], + ] + ) + _run_answer_test( + pos_input, pos_output, neighbors, grad_output, csr_container, False, 0.1, 2 + ) + + +def _run_answer_test( + pos_input, + pos_output, + neighbors, + grad_output, + csr_container, + verbose=False, + perplexity=0.1, + skip_num_points=0, +): + distances = pairwise_distances(pos_input).astype(np.float32) + args = distances, perplexity, verbose + pos_output = pos_output.astype(np.float32) + neighbors = neighbors.astype(np.int64, copy=False) + pij_input = _joint_probabilities(*args) + pij_input = squareform(pij_input).astype(np.float32) + grad_bh = np.zeros(pos_output.shape, dtype=np.float32) + + P = csr_container(pij_input) + + neighbors = P.indices.astype(np.int64) + indptr = P.indptr.astype(np.int64) + + _barnes_hut_tsne.gradient( + P.data, pos_output, neighbors, indptr, grad_bh, 0.5, 2, 1, skip_num_points=0 + ) + assert_array_almost_equal(grad_bh, grad_output, decimal=4) + + +def test_verbose(): + # Verbose options write to stdout. + random_state = check_random_state(0) + tsne = TSNE(verbose=2, perplexity=4) + X = random_state.randn(5, 2) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + assert "[t-SNE]" in out + assert "nearest neighbors..." in out + assert "Computed conditional probabilities" in out + assert "Mean sigma" in out + assert "early exaggeration" in out + + +def test_chebyshev_metric(): + # t-SNE should allow metrics that cannot be squared (issue #3526). + random_state = check_random_state(0) + tsne = TSNE(metric="chebyshev", perplexity=4) + X = random_state.randn(5, 2) + tsne.fit_transform(X) + + +def test_reduction_to_one_component(): + # t-SNE should allow reduction to one component (issue #4154). + random_state = check_random_state(0) + tsne = TSNE(n_components=1, perplexity=4) + X = random_state.randn(5, 2) + X_embedded = tsne.fit(X).embedding_ + assert np.all(np.isfinite(X_embedded)) + + +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +@pytest.mark.parametrize("dt", [np.float32, np.float64]) +def test_64bit(method, dt): + # Ensure 64bit arrays are handled correctly. + random_state = check_random_state(0) + + X = random_state.randn(10, 2).astype(dt, copy=False) + tsne = TSNE( + n_components=2, + perplexity=2, + learning_rate=100.0, + random_state=0, + method=method, + verbose=0, + n_iter=300, + init="random", + ) + X_embedded = tsne.fit_transform(X) + effective_type = X_embedded.dtype + + # tsne cython code is only single precision, so the output will + # always be single precision, irrespectively of the input dtype + assert effective_type == np.float32 + + +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +def test_kl_divergence_not_nan(method): + # Ensure kl_divergence_ is computed at last iteration + # even though n_iter % n_iter_check != 0, i.e. 1003 % 50 != 0 + random_state = check_random_state(0) + + X = random_state.randn(50, 2) + tsne = TSNE( + n_components=2, + perplexity=2, + learning_rate=100.0, + random_state=0, + method=method, + verbose=0, + n_iter=503, + init="random", + ) + tsne.fit_transform(X) + + assert not np.isnan(tsne.kl_divergence_) + + +def test_barnes_hut_angle(): + # When Barnes-Hut's angle=0 this corresponds to the exact method. + angle = 0.0 + perplexity = 10 + n_samples = 100 + for n_components in [2, 3]: + n_features = 5 + degrees_of_freedom = float(n_components - 1.0) + + random_state = check_random_state(0) + data = random_state.randn(n_samples, n_features) + distances = pairwise_distances(data) + params = random_state.randn(n_samples, n_components) + P = _joint_probabilities(distances, perplexity, verbose=0) + kl_exact, grad_exact = _kl_divergence( + params, P, degrees_of_freedom, n_samples, n_components + ) + + n_neighbors = n_samples - 1 + distances_csr = ( + NearestNeighbors() + .fit(data) + .kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + ) + P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0) + kl_bh, grad_bh = _kl_divergence_bh( + params, + P_bh, + degrees_of_freedom, + n_samples, + n_components, + angle=angle, + skip_num_points=0, + verbose=0, + ) + + P = squareform(P) + P_bh = P_bh.toarray() + assert_array_almost_equal(P_bh, P, decimal=5) + assert_almost_equal(kl_exact, kl_bh, decimal=3) + + +@skip_if_32bit +def test_n_iter_without_progress(): + # Use a dummy negative n_iter_without_progress and check output on stdout + random_state = check_random_state(0) + X = random_state.randn(100, 10) + for method in ["barnes_hut", "exact"]: + tsne = TSNE( + n_iter_without_progress=-1, + verbose=2, + learning_rate=1e8, + random_state=0, + method=method, + n_iter=351, + init="random", + ) + tsne._N_ITER_CHECK = 1 + tsne._EXPLORATION_N_ITER = 0 + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + # The output needs to contain the value of n_iter_without_progress + assert "did not make any progress during the last -1 episodes. Finished." in out + + +def test_min_grad_norm(): + # Make sure that the parameter min_grad_norm is used correctly + random_state = check_random_state(0) + X = random_state.randn(100, 2) + min_grad_norm = 0.002 + tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2, random_state=0, method="exact") + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + lines_out = out.split("\n") + + # extract the gradient norm from the verbose output + gradient_norm_values = [] + for line in lines_out: + # When the computation is Finished just an old gradient norm value + # is repeated that we do not need to store + if "Finished" in line: + break + + start_grad_norm = line.find("gradient norm") + if start_grad_norm >= 0: + line = line[start_grad_norm:] + line = line.replace("gradient norm = ", "").split(" ")[0] + gradient_norm_values.append(float(line)) + + # Compute how often the gradient norm is smaller than min_grad_norm + gradient_norm_values = np.array(gradient_norm_values) + n_smaller_gradient_norms = len( + gradient_norm_values[gradient_norm_values <= min_grad_norm] + ) + + # The gradient norm can be smaller than min_grad_norm at most once, + # because in the moment it becomes smaller the optimization stops + assert n_smaller_gradient_norms <= 1 + + +def test_accessible_kl_divergence(): + # Ensures that the accessible kl_divergence matches the computed value + random_state = check_random_state(0) + X = random_state.randn(50, 2) + tsne = TSNE( + n_iter_without_progress=2, verbose=2, random_state=0, method="exact", n_iter=500 + ) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + # The output needs to contain the accessible kl_divergence as the error at + # the last iteration + for line in out.split("\n")[::-1]: + if "Iteration" in line: + _, _, error = line.partition("error = ") + if error: + error, _, _ = error.partition(",") + break + assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5) + + +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +def test_uniform_grid(method): + """Make sure that TSNE can approximately recover a uniform 2D grid + + Due to ties in distances between point in X_2d_grid, this test is platform + dependent for ``method='barnes_hut'`` due to numerical imprecision. + + Also, t-SNE is not assured to converge to the right solution because bad + initialization can lead to convergence to bad local minimum (the + optimization problem is non-convex). To avoid breaking the test too often, + we re-run t-SNE from the final point when the convergence is not good + enough. + """ + seeds = range(3) + n_iter = 500 + for seed in seeds: + tsne = TSNE( + n_components=2, + init="random", + random_state=seed, + perplexity=50, + n_iter=n_iter, + method=method, + learning_rate="auto", + ) + Y = tsne.fit_transform(X_2d_grid) + + try_name = "{}_{}".format(method, seed) + try: + assert_uniform_grid(Y, try_name) + except AssertionError: + # If the test fails a first time, re-run with init=Y to see if + # this was caused by a bad initialization. Note that this will + # also run an early_exaggeration step. + try_name += ":rerun" + tsne.init = Y + Y = tsne.fit_transform(X_2d_grid) + assert_uniform_grid(Y, try_name) + + +def assert_uniform_grid(Y, try_name=None): + # Ensure that the resulting embedding leads to approximately + # uniformly spaced points: the distance to the closest neighbors + # should be non-zero and approximately constant. + nn = NearestNeighbors(n_neighbors=1).fit(Y) + dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel() + assert dist_to_nn.min() > 0.1 + + smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn) + largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn) + + assert smallest_to_mean > 0.5, try_name + assert largest_to_mean < 2, try_name + + +def test_bh_match_exact(): + # check that the ``barnes_hut`` method match the exact one when + # ``angle = 0`` and ``perplexity > n_samples / 3`` + random_state = check_random_state(0) + n_features = 10 + X = random_state.randn(30, n_features).astype(np.float32) + X_embeddeds = {} + n_iter = {} + for method in ["exact", "barnes_hut"]: + tsne = TSNE( + n_components=2, + method=method, + learning_rate=1.0, + init="random", + random_state=0, + n_iter=251, + perplexity=29.5, + angle=0, + ) + # Kill the early_exaggeration + tsne._EXPLORATION_N_ITER = 0 + X_embeddeds[method] = tsne.fit_transform(X) + n_iter[method] = tsne.n_iter_ + + assert n_iter["exact"] == n_iter["barnes_hut"] + assert_allclose(X_embeddeds["exact"], X_embeddeds["barnes_hut"], rtol=1e-4) + + +def test_gradient_bh_multithread_match_sequential(): + # check that the bh gradient with different num_threads gives the same + # results + + n_features = 10 + n_samples = 30 + n_components = 2 + degrees_of_freedom = 1 + + angle = 3 + perplexity = 5 + + random_state = check_random_state(0) + data = random_state.randn(n_samples, n_features).astype(np.float32) + params = random_state.randn(n_samples, n_components) + + n_neighbors = n_samples - 1 + distances_csr = ( + NearestNeighbors() + .fit(data) + .kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + ) + P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0) + kl_sequential, grad_sequential = _kl_divergence_bh( + params, + P_bh, + degrees_of_freedom, + n_samples, + n_components, + angle=angle, + skip_num_points=0, + verbose=0, + num_threads=1, + ) + for num_threads in [2, 4]: + kl_multithread, grad_multithread = _kl_divergence_bh( + params, + P_bh, + degrees_of_freedom, + n_samples, + n_components, + angle=angle, + skip_num_points=0, + verbose=0, + num_threads=num_threads, + ) + + assert_allclose(kl_multithread, kl_sequential, rtol=1e-6) + assert_allclose(grad_multithread, grad_multithread) + + +@pytest.mark.parametrize( + "metric, dist_func", + [("manhattan", manhattan_distances), ("cosine", cosine_distances)], +) +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +def test_tsne_with_different_distance_metrics(metric, dist_func, method): + """Make sure that TSNE works for different distance metrics""" + + if method == "barnes_hut" and metric == "manhattan": + # The distances computed by `manhattan_distances` differ slightly from those + # computed internally by NearestNeighbors via the PairwiseDistancesReduction + # Cython code-based. This in turns causes T-SNE to converge to a different + # solution but this should not impact the qualitative results as both + # methods. + # NOTE: it's probably not valid from a mathematical point of view to use the + # Manhattan distance for T-SNE... + # TODO: re-enable this test if/when `manhattan_distances` is refactored to + # reuse the same underlying Cython code NearestNeighbors. + # For reference, see: + # https://github.com/scikit-learn/scikit-learn/pull/23865/files#r925721573 + pytest.xfail( + "Distance computations are different for method == 'barnes_hut' and metric" + " == 'manhattan', but this is expected." + ) + + random_state = check_random_state(0) + n_components_original = 3 + n_components_embedding = 2 + X = random_state.randn(50, n_components_original).astype(np.float32) + X_transformed_tsne = TSNE( + metric=metric, + method=method, + n_components=n_components_embedding, + random_state=0, + n_iter=300, + init="random", + learning_rate="auto", + ).fit_transform(X) + X_transformed_tsne_precomputed = TSNE( + metric="precomputed", + method=method, + n_components=n_components_embedding, + random_state=0, + n_iter=300, + init="random", + learning_rate="auto", + ).fit_transform(dist_func(X)) + assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed) + + +@pytest.mark.parametrize("method", ["exact", "barnes_hut"]) +def test_tsne_n_jobs(method): + """Make sure that the n_jobs parameter doesn't impact the output""" + random_state = check_random_state(0) + n_features = 10 + X = random_state.randn(30, n_features) + X_tr_ref = TSNE( + n_components=2, + method=method, + perplexity=25.0, + angle=0, + n_jobs=1, + random_state=0, + init="random", + learning_rate="auto", + ).fit_transform(X) + X_tr = TSNE( + n_components=2, + method=method, + perplexity=25.0, + angle=0, + n_jobs=2, + random_state=0, + init="random", + learning_rate="auto", + ).fit_transform(X) + + assert_allclose(X_tr_ref, X_tr) + + +def test_tsne_with_mahalanobis_distance(): + """Make sure that method_parameters works with mahalanobis distance.""" + random_state = check_random_state(0) + n_samples, n_features = 300, 10 + X = random_state.randn(n_samples, n_features) + default_params = { + "perplexity": 40, + "n_iter": 250, + "learning_rate": "auto", + "init": "random", + "n_components": 3, + "random_state": 0, + } + + tsne = TSNE(metric="mahalanobis", **default_params) + msg = "Must provide either V or VI for Mahalanobis distance" + with pytest.raises(ValueError, match=msg): + tsne.fit_transform(X) + + precomputed_X = squareform(pdist(X, metric="mahalanobis"), checks=True) + X_trans_expected = TSNE(metric="precomputed", **default_params).fit_transform( + precomputed_X + ) + + X_trans = TSNE( + metric="mahalanobis", metric_params={"V": np.cov(X.T)}, **default_params + ).fit_transform(X) + assert_allclose(X_trans, X_trans_expected) + + +@pytest.mark.parametrize("perplexity", (20, 30)) +def test_tsne_perplexity_validation(perplexity): + """Make sure that perplexity > n_samples results in a ValueError""" + + random_state = check_random_state(0) + X = random_state.randn(20, 2) + est = TSNE( + learning_rate="auto", + init="pca", + perplexity=perplexity, + random_state=random_state, + ) + msg = "perplexity must be less than n_samples" + with pytest.raises(ValueError, match=msg): + est.fit_transform(X) + + +def test_tsne_works_with_pandas_output(): + """Make sure that TSNE works when the output is set to "pandas". + + Non-regression test for gh-25365. + """ + pytest.importorskip("pandas") + with config_context(transform_output="pandas"): + arr = np.arange(35 * 4).reshape(35, 4) + TSNE(n_components=2).fit_transform(arr)