applied-ai-018 commited on
Commit
68c2db5
·
verified ·
1 Parent(s): af78e4f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/fp32.pt +3 -0
  7. venv/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/sklearn/_loss/__init__.py +30 -0
  26. venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd +91 -0
  30. venv/lib/python3.10/site-packages/sklearn/_loss/loss.py +1177 -0
  31. venv/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py +0 -0
  32. venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py +111 -0
  36. venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py +1320 -0
  37. venv/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py +47 -0
  38. venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/sklearn/feature_selection/_base.py +266 -0
  47. venv/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py +522 -0
  48. venv/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py +514 -0
  49. venv/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py +792 -0
  50. venv/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py +300 -0
ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfc48c3605ebf51ccaa680d945c26d945ff04748a7f8314b1eac44ffb0122c0e
3
+ size 33555627
ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fa8aea791a8fa19fe4c81803aa90b8a0b9931f3154aaaa230fe73b237bed222
3
+ size 33555612
ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0eef265b39db5f3ad5f42b032f58f2480311e771ddb6083c2857d448b6c3f55
3
+ size 33555627
ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63925f8ed3bf866c5203925dcfadb544297213c3a0b2a6ec9bfed86c046e9af7
3
+ size 33555533
ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:120466dfe5c6ca6778d6ffd4b8e8f4c81a7df365a3a26d1c289c48a640b1b181
3
+ size 9387
ckpts/universal/global_step40/zero/25.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ef0f9fd80e3e2457de9647681c3729516278db8c98967c7188423b479fec25
3
+ size 9293
venv/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc ADDED
Binary file (537 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc ADDED
Binary file (1.97 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc ADDED
Binary file (46.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc ADDED
Binary file (40.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (7.67 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc ADDED
Binary file (32.3 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (6.99 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc ADDED
Binary file (35.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc ADDED
Binary file (9.04 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc ADDED
Binary file (38.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc ADDED
Binary file (37.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc ADDED
Binary file (49.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc ADDED
Binary file (58.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_loss/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn._loss` module includes loss function classes suitable for
3
+ fitting classification and regression tasks.
4
+ """
5
+
6
+ from .loss import (
7
+ AbsoluteError,
8
+ HalfBinomialLoss,
9
+ HalfGammaLoss,
10
+ HalfMultinomialLoss,
11
+ HalfPoissonLoss,
12
+ HalfSquaredError,
13
+ HalfTweedieLoss,
14
+ HalfTweedieLossIdentity,
15
+ HuberLoss,
16
+ PinballLoss,
17
+ )
18
+
19
+ __all__ = [
20
+ "HalfSquaredError",
21
+ "AbsoluteError",
22
+ "PinballLoss",
23
+ "HuberLoss",
24
+ "HalfPoissonLoss",
25
+ "HalfGammaLoss",
26
+ "HalfTweedieLoss",
27
+ "HalfTweedieLossIdentity",
28
+ "HalfBinomialLoss",
29
+ "HalfMultinomialLoss",
30
+ ]
venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (669 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc ADDED
Binary file (8.96 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc ADDED
Binary file (33.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fused types for input like y_true, raw_prediction, sample_weights.
2
+ ctypedef fused floating_in:
3
+ double
4
+ float
5
+
6
+
7
+ # Fused types for output like gradient and hessian
8
+ # We use a different fused types for input (floating_in) and output (floating_out), such
9
+ # that input and output can have different dtypes in the same function call. A single
10
+ # fused type can only take on one single value (type) for all arguments in one function
11
+ # call.
12
+ ctypedef fused floating_out:
13
+ double
14
+ float
15
+
16
+
17
+ # Struct to return 2 doubles
18
+ ctypedef struct double_pair:
19
+ double val1
20
+ double val2
21
+
22
+
23
+ # C base class for loss functions
24
+ cdef class CyLossFunction:
25
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
26
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
27
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
28
+
29
+
30
+ cdef class CyHalfSquaredError(CyLossFunction):
31
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
32
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
33
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
34
+
35
+
36
+ cdef class CyAbsoluteError(CyLossFunction):
37
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
38
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
39
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
40
+
41
+
42
+ cdef class CyPinballLoss(CyLossFunction):
43
+ cdef readonly double quantile # readonly makes it accessible from Python
44
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
45
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
46
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
47
+
48
+
49
+ cdef class CyHuberLoss(CyLossFunction):
50
+ cdef public double delta # public makes it accessible from Python
51
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
52
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
53
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
54
+
55
+
56
+ cdef class CyHalfPoissonLoss(CyLossFunction):
57
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
58
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
59
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
60
+
61
+
62
+ cdef class CyHalfGammaLoss(CyLossFunction):
63
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
64
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
65
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
66
+
67
+
68
+ cdef class CyHalfTweedieLoss(CyLossFunction):
69
+ cdef readonly double power # readonly makes it accessible from Python
70
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
71
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
72
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
73
+
74
+
75
+ cdef class CyHalfTweedieLossIdentity(CyLossFunction):
76
+ cdef readonly double power # readonly makes it accessible from Python
77
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
78
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
79
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
80
+
81
+
82
+ cdef class CyHalfBinomialLoss(CyLossFunction):
83
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
84
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
85
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
86
+
87
+
88
+ cdef class CyExponentialLoss(CyLossFunction):
89
+ cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
90
+ cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
91
+ cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
venv/lib/python3.10/site-packages/sklearn/_loss/loss.py ADDED
@@ -0,0 +1,1177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains loss classes suitable for fitting.
3
+
4
+ It is not part of the public API.
5
+ Specific losses are used for regression, binary classification or multiclass
6
+ classification.
7
+ """
8
+ # Goals:
9
+ # - Provide a common private module for loss functions/classes.
10
+ # - To be used in:
11
+ # - LogisticRegression
12
+ # - PoissonRegressor, GammaRegressor, TweedieRegressor
13
+ # - HistGradientBoostingRegressor, HistGradientBoostingClassifier
14
+ # - GradientBoostingRegressor, GradientBoostingClassifier
15
+ # - SGDRegressor, SGDClassifier
16
+ # - Replace link module of GLMs.
17
+
18
+ import numbers
19
+
20
+ import numpy as np
21
+ from scipy.special import xlogy
22
+
23
+ from ..utils import check_scalar
24
+ from ..utils.stats import _weighted_percentile
25
+ from ._loss import (
26
+ CyAbsoluteError,
27
+ CyExponentialLoss,
28
+ CyHalfBinomialLoss,
29
+ CyHalfGammaLoss,
30
+ CyHalfMultinomialLoss,
31
+ CyHalfPoissonLoss,
32
+ CyHalfSquaredError,
33
+ CyHalfTweedieLoss,
34
+ CyHalfTweedieLossIdentity,
35
+ CyHuberLoss,
36
+ CyPinballLoss,
37
+ )
38
+ from .link import (
39
+ HalfLogitLink,
40
+ IdentityLink,
41
+ Interval,
42
+ LogitLink,
43
+ LogLink,
44
+ MultinomialLogit,
45
+ )
46
+
47
+
48
+ # Note: The shape of raw_prediction for multiclass classifications are
49
+ # - GradientBoostingClassifier: (n_samples, n_classes)
50
+ # - HistGradientBoostingClassifier: (n_classes, n_samples)
51
+ #
52
+ # Note: Instead of inheritance like
53
+ #
54
+ # class BaseLoss(BaseLink, CyLossFunction):
55
+ # ...
56
+ #
57
+ # # Note: Naturally, we would inherit in the following order
58
+ # # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
59
+ # # But because of https://github.com/cython/cython/issues/4350 we set BaseLoss as
60
+ # # the last one. This, of course, changes the MRO.
61
+ # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss):
62
+ #
63
+ # we use composition. This way we improve maintainability by avoiding the above
64
+ # mentioned Cython edge case and have easier to understand code (which method calls
65
+ # which code).
66
+ class BaseLoss:
67
+ """Base class for a loss function of 1-dimensional targets.
68
+
69
+ Conventions:
70
+
71
+ - y_true.shape = sample_weight.shape = (n_samples,)
72
+ - y_pred.shape = raw_prediction.shape = (n_samples,)
73
+ - If is_multiclass is true (multiclass classification), then
74
+ y_pred.shape = raw_prediction.shape = (n_samples, n_classes)
75
+ Note that this corresponds to the return value of decision_function.
76
+
77
+ y_true, y_pred, sample_weight and raw_prediction must either be all float64
78
+ or all float32.
79
+ gradient and hessian must be either both float64 or both float32.
80
+
81
+ Note that y_pred = link.inverse(raw_prediction).
82
+
83
+ Specific loss classes can inherit specific link classes to satisfy
84
+ BaseLink's abstractmethods.
85
+
86
+ Parameters
87
+ ----------
88
+ sample_weight : {None, ndarray}
89
+ If sample_weight is None, the hessian might be constant.
90
+ n_classes : {None, int}
91
+ The number of classes for classification, else None.
92
+
93
+ Attributes
94
+ ----------
95
+ closs: CyLossFunction
96
+ link : BaseLink
97
+ interval_y_true : Interval
98
+ Valid interval for y_true
99
+ interval_y_pred : Interval
100
+ Valid Interval for y_pred
101
+ differentiable : bool
102
+ Indicates whether or not loss function is differentiable in
103
+ raw_prediction everywhere.
104
+ need_update_leaves_values : bool
105
+ Indicates whether decision trees in gradient boosting need to uptade
106
+ leave values after having been fit to the (negative) gradients.
107
+ approx_hessian : bool
108
+ Indicates whether the hessian is approximated or exact. If,
109
+ approximated, it should be larger or equal to the exact one.
110
+ constant_hessian : bool
111
+ Indicates whether the hessian is one for this loss.
112
+ is_multiclass : bool
113
+ Indicates whether n_classes > 2 is allowed.
114
+ """
115
+
116
+ # For gradient boosted decision trees:
117
+ # This variable indicates whether the loss requires the leaves values to
118
+ # be updated once the tree has been trained. The trees are trained to
119
+ # predict a Newton-Raphson step (see grower._finalize_leaf()). But for
120
+ # some losses (e.g. least absolute deviation) we need to adjust the tree
121
+ # values to account for the "line search" of the gradient descent
122
+ # procedure. See the original paper Greedy Function Approximation: A
123
+ # Gradient Boosting Machine by Friedman
124
+ # (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.
125
+ differentiable = True
126
+ need_update_leaves_values = False
127
+ is_multiclass = False
128
+
129
+ def __init__(self, closs, link, n_classes=None):
130
+ self.closs = closs
131
+ self.link = link
132
+ self.approx_hessian = False
133
+ self.constant_hessian = False
134
+ self.n_classes = n_classes
135
+ self.interval_y_true = Interval(-np.inf, np.inf, False, False)
136
+ self.interval_y_pred = self.link.interval_y_pred
137
+
138
+ def in_y_true_range(self, y):
139
+ """Return True if y is in the valid range of y_true.
140
+
141
+ Parameters
142
+ ----------
143
+ y : ndarray
144
+ """
145
+ return self.interval_y_true.includes(y)
146
+
147
+ def in_y_pred_range(self, y):
148
+ """Return True if y is in the valid range of y_pred.
149
+
150
+ Parameters
151
+ ----------
152
+ y : ndarray
153
+ """
154
+ return self.interval_y_pred.includes(y)
155
+
156
+ def loss(
157
+ self,
158
+ y_true,
159
+ raw_prediction,
160
+ sample_weight=None,
161
+ loss_out=None,
162
+ n_threads=1,
163
+ ):
164
+ """Compute the pointwise loss value for each input.
165
+
166
+ Parameters
167
+ ----------
168
+ y_true : C-contiguous array of shape (n_samples,)
169
+ Observed, true target values.
170
+ raw_prediction : C-contiguous array of shape (n_samples,) or array of \
171
+ shape (n_samples, n_classes)
172
+ Raw prediction values (in link space).
173
+ sample_weight : None or C-contiguous array of shape (n_samples,)
174
+ Sample weights.
175
+ loss_out : None or C-contiguous array of shape (n_samples,)
176
+ A location into which the result is stored. If None, a new array
177
+ might be created.
178
+ n_threads : int, default=1
179
+ Might use openmp thread parallelism.
180
+
181
+ Returns
182
+ -------
183
+ loss : array of shape (n_samples,)
184
+ Element-wise loss function.
185
+ """
186
+ if loss_out is None:
187
+ loss_out = np.empty_like(y_true)
188
+ # Be graceful to shape (n_samples, 1) -> (n_samples,)
189
+ if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
190
+ raw_prediction = raw_prediction.squeeze(1)
191
+
192
+ self.closs.loss(
193
+ y_true=y_true,
194
+ raw_prediction=raw_prediction,
195
+ sample_weight=sample_weight,
196
+ loss_out=loss_out,
197
+ n_threads=n_threads,
198
+ )
199
+ return loss_out
200
+
201
+ def loss_gradient(
202
+ self,
203
+ y_true,
204
+ raw_prediction,
205
+ sample_weight=None,
206
+ loss_out=None,
207
+ gradient_out=None,
208
+ n_threads=1,
209
+ ):
210
+ """Compute loss and gradient w.r.t. raw_prediction for each input.
211
+
212
+ Parameters
213
+ ----------
214
+ y_true : C-contiguous array of shape (n_samples,)
215
+ Observed, true target values.
216
+ raw_prediction : C-contiguous array of shape (n_samples,) or array of \
217
+ shape (n_samples, n_classes)
218
+ Raw prediction values (in link space).
219
+ sample_weight : None or C-contiguous array of shape (n_samples,)
220
+ Sample weights.
221
+ loss_out : None or C-contiguous array of shape (n_samples,)
222
+ A location into which the loss is stored. If None, a new array
223
+ might be created.
224
+ gradient_out : None or C-contiguous array of shape (n_samples,) or array \
225
+ of shape (n_samples, n_classes)
226
+ A location into which the gradient is stored. If None, a new array
227
+ might be created.
228
+ n_threads : int, default=1
229
+ Might use openmp thread parallelism.
230
+
231
+ Returns
232
+ -------
233
+ loss : array of shape (n_samples,)
234
+ Element-wise loss function.
235
+
236
+ gradient : array of shape (n_samples,) or (n_samples, n_classes)
237
+ Element-wise gradients.
238
+ """
239
+ if loss_out is None:
240
+ if gradient_out is None:
241
+ loss_out = np.empty_like(y_true)
242
+ gradient_out = np.empty_like(raw_prediction)
243
+ else:
244
+ loss_out = np.empty_like(y_true, dtype=gradient_out.dtype)
245
+ elif gradient_out is None:
246
+ gradient_out = np.empty_like(raw_prediction, dtype=loss_out.dtype)
247
+
248
+ # Be graceful to shape (n_samples, 1) -> (n_samples,)
249
+ if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
250
+ raw_prediction = raw_prediction.squeeze(1)
251
+ if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
252
+ gradient_out = gradient_out.squeeze(1)
253
+
254
+ self.closs.loss_gradient(
255
+ y_true=y_true,
256
+ raw_prediction=raw_prediction,
257
+ sample_weight=sample_weight,
258
+ loss_out=loss_out,
259
+ gradient_out=gradient_out,
260
+ n_threads=n_threads,
261
+ )
262
+ return loss_out, gradient_out
263
+
264
+ def gradient(
265
+ self,
266
+ y_true,
267
+ raw_prediction,
268
+ sample_weight=None,
269
+ gradient_out=None,
270
+ n_threads=1,
271
+ ):
272
+ """Compute gradient of loss w.r.t raw_prediction for each input.
273
+
274
+ Parameters
275
+ ----------
276
+ y_true : C-contiguous array of shape (n_samples,)
277
+ Observed, true target values.
278
+ raw_prediction : C-contiguous array of shape (n_samples,) or array of \
279
+ shape (n_samples, n_classes)
280
+ Raw prediction values (in link space).
281
+ sample_weight : None or C-contiguous array of shape (n_samples,)
282
+ Sample weights.
283
+ gradient_out : None or C-contiguous array of shape (n_samples,) or array \
284
+ of shape (n_samples, n_classes)
285
+ A location into which the result is stored. If None, a new array
286
+ might be created.
287
+ n_threads : int, default=1
288
+ Might use openmp thread parallelism.
289
+
290
+ Returns
291
+ -------
292
+ gradient : array of shape (n_samples,) or (n_samples, n_classes)
293
+ Element-wise gradients.
294
+ """
295
+ if gradient_out is None:
296
+ gradient_out = np.empty_like(raw_prediction)
297
+
298
+ # Be graceful to shape (n_samples, 1) -> (n_samples,)
299
+ if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
300
+ raw_prediction = raw_prediction.squeeze(1)
301
+ if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
302
+ gradient_out = gradient_out.squeeze(1)
303
+
304
+ self.closs.gradient(
305
+ y_true=y_true,
306
+ raw_prediction=raw_prediction,
307
+ sample_weight=sample_weight,
308
+ gradient_out=gradient_out,
309
+ n_threads=n_threads,
310
+ )
311
+ return gradient_out
312
+
313
+ def gradient_hessian(
314
+ self,
315
+ y_true,
316
+ raw_prediction,
317
+ sample_weight=None,
318
+ gradient_out=None,
319
+ hessian_out=None,
320
+ n_threads=1,
321
+ ):
322
+ """Compute gradient and hessian of loss w.r.t raw_prediction.
323
+
324
+ Parameters
325
+ ----------
326
+ y_true : C-contiguous array of shape (n_samples,)
327
+ Observed, true target values.
328
+ raw_prediction : C-contiguous array of shape (n_samples,) or array of \
329
+ shape (n_samples, n_classes)
330
+ Raw prediction values (in link space).
331
+ sample_weight : None or C-contiguous array of shape (n_samples,)
332
+ Sample weights.
333
+ gradient_out : None or C-contiguous array of shape (n_samples,) or array \
334
+ of shape (n_samples, n_classes)
335
+ A location into which the gradient is stored. If None, a new array
336
+ might be created.
337
+ hessian_out : None or C-contiguous array of shape (n_samples,) or array \
338
+ of shape (n_samples, n_classes)
339
+ A location into which the hessian is stored. If None, a new array
340
+ might be created.
341
+ n_threads : int, default=1
342
+ Might use openmp thread parallelism.
343
+
344
+ Returns
345
+ -------
346
+ gradient : arrays of shape (n_samples,) or (n_samples, n_classes)
347
+ Element-wise gradients.
348
+
349
+ hessian : arrays of shape (n_samples,) or (n_samples, n_classes)
350
+ Element-wise hessians.
351
+ """
352
+ if gradient_out is None:
353
+ if hessian_out is None:
354
+ gradient_out = np.empty_like(raw_prediction)
355
+ hessian_out = np.empty_like(raw_prediction)
356
+ else:
357
+ gradient_out = np.empty_like(hessian_out)
358
+ elif hessian_out is None:
359
+ hessian_out = np.empty_like(gradient_out)
360
+
361
+ # Be graceful to shape (n_samples, 1) -> (n_samples,)
362
+ if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
363
+ raw_prediction = raw_prediction.squeeze(1)
364
+ if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
365
+ gradient_out = gradient_out.squeeze(1)
366
+ if hessian_out.ndim == 2 and hessian_out.shape[1] == 1:
367
+ hessian_out = hessian_out.squeeze(1)
368
+
369
+ self.closs.gradient_hessian(
370
+ y_true=y_true,
371
+ raw_prediction=raw_prediction,
372
+ sample_weight=sample_weight,
373
+ gradient_out=gradient_out,
374
+ hessian_out=hessian_out,
375
+ n_threads=n_threads,
376
+ )
377
+ return gradient_out, hessian_out
378
+
379
+ def __call__(self, y_true, raw_prediction, sample_weight=None, n_threads=1):
380
+ """Compute the weighted average loss.
381
+
382
+ Parameters
383
+ ----------
384
+ y_true : C-contiguous array of shape (n_samples,)
385
+ Observed, true target values.
386
+ raw_prediction : C-contiguous array of shape (n_samples,) or array of \
387
+ shape (n_samples, n_classes)
388
+ Raw prediction values (in link space).
389
+ sample_weight : None or C-contiguous array of shape (n_samples,)
390
+ Sample weights.
391
+ n_threads : int, default=1
392
+ Might use openmp thread parallelism.
393
+
394
+ Returns
395
+ -------
396
+ loss : float
397
+ Mean or averaged loss function.
398
+ """
399
+ return np.average(
400
+ self.loss(
401
+ y_true=y_true,
402
+ raw_prediction=raw_prediction,
403
+ sample_weight=None,
404
+ loss_out=None,
405
+ n_threads=n_threads,
406
+ ),
407
+ weights=sample_weight,
408
+ )
409
+
410
+ def fit_intercept_only(self, y_true, sample_weight=None):
411
+ """Compute raw_prediction of an intercept-only model.
412
+
413
+ This can be used as initial estimates of predictions, i.e. before the
414
+ first iteration in fit.
415
+
416
+ Parameters
417
+ ----------
418
+ y_true : array-like of shape (n_samples,)
419
+ Observed, true target values.
420
+ sample_weight : None or array of shape (n_samples,)
421
+ Sample weights.
422
+
423
+ Returns
424
+ -------
425
+ raw_prediction : numpy scalar or array of shape (n_classes,)
426
+ Raw predictions of an intercept-only model.
427
+ """
428
+ # As default, take weighted average of the target over the samples
429
+ # axis=0 and then transform into link-scale (raw_prediction).
430
+ y_pred = np.average(y_true, weights=sample_weight, axis=0)
431
+ eps = 10 * np.finfo(y_pred.dtype).eps
432
+
433
+ if self.interval_y_pred.low == -np.inf:
434
+ a_min = None
435
+ elif self.interval_y_pred.low_inclusive:
436
+ a_min = self.interval_y_pred.low
437
+ else:
438
+ a_min = self.interval_y_pred.low + eps
439
+
440
+ if self.interval_y_pred.high == np.inf:
441
+ a_max = None
442
+ elif self.interval_y_pred.high_inclusive:
443
+ a_max = self.interval_y_pred.high
444
+ else:
445
+ a_max = self.interval_y_pred.high - eps
446
+
447
+ if a_min is None and a_max is None:
448
+ return self.link.link(y_pred)
449
+ else:
450
+ return self.link.link(np.clip(y_pred, a_min, a_max))
451
+
452
+ def constant_to_optimal_zero(self, y_true, sample_weight=None):
453
+ """Calculate term dropped in loss.
454
+
455
+ With this term added, the loss of perfect predictions is zero.
456
+ """
457
+ return np.zeros_like(y_true)
458
+
459
+ def init_gradient_and_hessian(self, n_samples, dtype=np.float64, order="F"):
460
+ """Initialize arrays for gradients and hessians.
461
+
462
+ Unless hessians are constant, arrays are initialized with undefined values.
463
+
464
+ Parameters
465
+ ----------
466
+ n_samples : int
467
+ The number of samples, usually passed to `fit()`.
468
+ dtype : {np.float64, np.float32}, default=np.float64
469
+ The dtype of the arrays gradient and hessian.
470
+ order : {'C', 'F'}, default='F'
471
+ Order of the arrays gradient and hessian. The default 'F' makes the arrays
472
+ contiguous along samples.
473
+
474
+ Returns
475
+ -------
476
+ gradient : C-contiguous array of shape (n_samples,) or array of shape \
477
+ (n_samples, n_classes)
478
+ Empty array (allocated but not initialized) to be used as argument
479
+ gradient_out.
480
+ hessian : C-contiguous array of shape (n_samples,), array of shape
481
+ (n_samples, n_classes) or shape (1,)
482
+ Empty (allocated but not initialized) array to be used as argument
483
+ hessian_out.
484
+ If constant_hessian is True (e.g. `HalfSquaredError`), the array is
485
+ initialized to ``1``.
486
+ """
487
+ if dtype not in (np.float32, np.float64):
488
+ raise ValueError(
489
+ "Valid options for 'dtype' are np.float32 and np.float64. "
490
+ f"Got dtype={dtype} instead."
491
+ )
492
+
493
+ if self.is_multiclass:
494
+ shape = (n_samples, self.n_classes)
495
+ else:
496
+ shape = (n_samples,)
497
+ gradient = np.empty(shape=shape, dtype=dtype, order=order)
498
+
499
+ if self.constant_hessian:
500
+ # If the hessians are constant, we consider them equal to 1.
501
+ # - This is correct for HalfSquaredError
502
+ # - For AbsoluteError, hessians are actually 0, but they are
503
+ # always ignored anyway.
504
+ hessian = np.ones(shape=(1,), dtype=dtype)
505
+ else:
506
+ hessian = np.empty(shape=shape, dtype=dtype, order=order)
507
+
508
+ return gradient, hessian
509
+
510
+
511
+ # Note: Naturally, we would inherit in the following order
512
+ # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
513
+ # But because of https://github.com/cython/cython/issues/4350 we
514
+ # set BaseLoss as the last one. This, of course, changes the MRO.
515
+ class HalfSquaredError(BaseLoss):
516
+ """Half squared error with identity link, for regression.
517
+
518
+ Domain:
519
+ y_true and y_pred all real numbers
520
+
521
+ Link:
522
+ y_pred = raw_prediction
523
+
524
+ For a given sample x_i, half squared error is defined as::
525
+
526
+ loss(x_i) = 0.5 * (y_true_i - raw_prediction_i)**2
527
+
528
+ The factor of 0.5 simplifies the computation of gradients and results in a
529
+ unit hessian (and is consistent with what is done in LightGBM). It is also
530
+ half the Normal distribution deviance.
531
+ """
532
+
533
+ def __init__(self, sample_weight=None):
534
+ super().__init__(closs=CyHalfSquaredError(), link=IdentityLink())
535
+ self.constant_hessian = sample_weight is None
536
+
537
+
538
+ class AbsoluteError(BaseLoss):
539
+ """Absolute error with identity link, for regression.
540
+
541
+ Domain:
542
+ y_true and y_pred all real numbers
543
+
544
+ Link:
545
+ y_pred = raw_prediction
546
+
547
+ For a given sample x_i, the absolute error is defined as::
548
+
549
+ loss(x_i) = |y_true_i - raw_prediction_i|
550
+
551
+ Note that the exact hessian = 0 almost everywhere (except at one point, therefore
552
+ differentiable = False). Optimization routines like in HGBT, however, need a
553
+ hessian > 0. Therefore, we assign 1.
554
+ """
555
+
556
+ differentiable = False
557
+ need_update_leaves_values = True
558
+
559
+ def __init__(self, sample_weight=None):
560
+ super().__init__(closs=CyAbsoluteError(), link=IdentityLink())
561
+ self.approx_hessian = True
562
+ self.constant_hessian = sample_weight is None
563
+
564
+ def fit_intercept_only(self, y_true, sample_weight=None):
565
+ """Compute raw_prediction of an intercept-only model.
566
+
567
+ This is the weighted median of the target, i.e. over the samples
568
+ axis=0.
569
+ """
570
+ if sample_weight is None:
571
+ return np.median(y_true, axis=0)
572
+ else:
573
+ return _weighted_percentile(y_true, sample_weight, 50)
574
+
575
+
576
+ class PinballLoss(BaseLoss):
577
+ """Quantile loss aka pinball loss, for regression.
578
+
579
+ Domain:
580
+ y_true and y_pred all real numbers
581
+ quantile in (0, 1)
582
+
583
+ Link:
584
+ y_pred = raw_prediction
585
+
586
+ For a given sample x_i, the pinball loss is defined as::
587
+
588
+ loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i)
589
+
590
+ rho_{quantile}(u) = u * (quantile - 1_{u<0})
591
+ = -u *(1 - quantile) if u < 0
592
+ u * quantile if u >= 0
593
+
594
+ Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError().
595
+
596
+ Note that the exact hessian = 0 almost everywhere (except at one point, therefore
597
+ differentiable = False). Optimization routines like in HGBT, however, need a
598
+ hessian > 0. Therefore, we assign 1.
599
+
600
+ Additional Attributes
601
+ ---------------------
602
+ quantile : float
603
+ The quantile level of the quantile to be estimated. Must be in range (0, 1).
604
+ """
605
+
606
+ differentiable = False
607
+ need_update_leaves_values = True
608
+
609
+ def __init__(self, sample_weight=None, quantile=0.5):
610
+ check_scalar(
611
+ quantile,
612
+ "quantile",
613
+ target_type=numbers.Real,
614
+ min_val=0,
615
+ max_val=1,
616
+ include_boundaries="neither",
617
+ )
618
+ super().__init__(
619
+ closs=CyPinballLoss(quantile=float(quantile)),
620
+ link=IdentityLink(),
621
+ )
622
+ self.approx_hessian = True
623
+ self.constant_hessian = sample_weight is None
624
+
625
+ def fit_intercept_only(self, y_true, sample_weight=None):
626
+ """Compute raw_prediction of an intercept-only model.
627
+
628
+ This is the weighted median of the target, i.e. over the samples
629
+ axis=0.
630
+ """
631
+ if sample_weight is None:
632
+ return np.percentile(y_true, 100 * self.closs.quantile, axis=0)
633
+ else:
634
+ return _weighted_percentile(
635
+ y_true, sample_weight, 100 * self.closs.quantile
636
+ )
637
+
638
+
639
+ class HuberLoss(BaseLoss):
640
+ """Huber loss, for regression.
641
+
642
+ Domain:
643
+ y_true and y_pred all real numbers
644
+ quantile in (0, 1)
645
+
646
+ Link:
647
+ y_pred = raw_prediction
648
+
649
+ For a given sample x_i, the Huber loss is defined as::
650
+
651
+ loss(x_i) = 1/2 * abserr**2 if abserr <= delta
652
+ delta * (abserr - delta/2) if abserr > delta
653
+
654
+ abserr = |y_true_i - raw_prediction_i|
655
+ delta = quantile(abserr, self.quantile)
656
+
657
+ Note: HuberLoss(quantile=1) equals HalfSquaredError and HuberLoss(quantile=0)
658
+ equals delta * (AbsoluteError() - delta/2).
659
+
660
+ Additional Attributes
661
+ ---------------------
662
+ quantile : float
663
+ The quantile level which defines the breaking point `delta` to distinguish
664
+ between absolute error and squared error. Must be in range (0, 1).
665
+
666
+ Reference
667
+ ---------
668
+ .. [1] Friedman, J.H. (2001). :doi:`Greedy function approximation: A gradient
669
+ boosting machine <10.1214/aos/1013203451>`.
670
+ Annals of Statistics, 29, 1189-1232.
671
+ """
672
+
673
+ differentiable = False
674
+ need_update_leaves_values = True
675
+
676
+ def __init__(self, sample_weight=None, quantile=0.9, delta=0.5):
677
+ check_scalar(
678
+ quantile,
679
+ "quantile",
680
+ target_type=numbers.Real,
681
+ min_val=0,
682
+ max_val=1,
683
+ include_boundaries="neither",
684
+ )
685
+ self.quantile = quantile # This is better stored outside of Cython.
686
+ super().__init__(
687
+ closs=CyHuberLoss(delta=float(delta)),
688
+ link=IdentityLink(),
689
+ )
690
+ self.approx_hessian = True
691
+ self.constant_hessian = False
692
+
693
+ def fit_intercept_only(self, y_true, sample_weight=None):
694
+ """Compute raw_prediction of an intercept-only model.
695
+
696
+ This is the weighted median of the target, i.e. over the samples
697
+ axis=0.
698
+ """
699
+ # See formula before algo 4 in Friedman (2001), but we apply it to y_true,
700
+ # not to the residual y_true - raw_prediction. An estimator like
701
+ # HistGradientBoostingRegressor might then call it on the residual, e.g.
702
+ # fit_intercept_only(y_true - raw_prediction).
703
+ if sample_weight is None:
704
+ median = np.percentile(y_true, 50, axis=0)
705
+ else:
706
+ median = _weighted_percentile(y_true, sample_weight, 50)
707
+ diff = y_true - median
708
+ term = np.sign(diff) * np.minimum(self.closs.delta, np.abs(diff))
709
+ return median + np.average(term, weights=sample_weight)
710
+
711
+
712
+ class HalfPoissonLoss(BaseLoss):
713
+ """Half Poisson deviance loss with log-link, for regression.
714
+
715
+ Domain:
716
+ y_true in non-negative real numbers
717
+ y_pred in positive real numbers
718
+
719
+ Link:
720
+ y_pred = exp(raw_prediction)
721
+
722
+ For a given sample x_i, half the Poisson deviance is defined as::
723
+
724
+ loss(x_i) = y_true_i * log(y_true_i/exp(raw_prediction_i))
725
+ - y_true_i + exp(raw_prediction_i)
726
+
727
+ Half the Poisson deviance is actually the negative log-likelihood up to
728
+ constant terms (not involving raw_prediction) and simplifies the
729
+ computation of the gradients.
730
+ We also skip the constant term `y_true_i * log(y_true_i) - y_true_i`.
731
+ """
732
+
733
+ def __init__(self, sample_weight=None):
734
+ super().__init__(closs=CyHalfPoissonLoss(), link=LogLink())
735
+ self.interval_y_true = Interval(0, np.inf, True, False)
736
+
737
+ def constant_to_optimal_zero(self, y_true, sample_weight=None):
738
+ term = xlogy(y_true, y_true) - y_true
739
+ if sample_weight is not None:
740
+ term *= sample_weight
741
+ return term
742
+
743
+
744
+ class HalfGammaLoss(BaseLoss):
745
+ """Half Gamma deviance loss with log-link, for regression.
746
+
747
+ Domain:
748
+ y_true and y_pred in positive real numbers
749
+
750
+ Link:
751
+ y_pred = exp(raw_prediction)
752
+
753
+ For a given sample x_i, half Gamma deviance loss is defined as::
754
+
755
+ loss(x_i) = log(exp(raw_prediction_i)/y_true_i)
756
+ + y_true/exp(raw_prediction_i) - 1
757
+
758
+ Half the Gamma deviance is actually proportional to the negative log-
759
+ likelihood up to constant terms (not involving raw_prediction) and
760
+ simplifies the computation of the gradients.
761
+ We also skip the constant term `-log(y_true_i) - 1`.
762
+ """
763
+
764
+ def __init__(self, sample_weight=None):
765
+ super().__init__(closs=CyHalfGammaLoss(), link=LogLink())
766
+ self.interval_y_true = Interval(0, np.inf, False, False)
767
+
768
+ def constant_to_optimal_zero(self, y_true, sample_weight=None):
769
+ term = -np.log(y_true) - 1
770
+ if sample_weight is not None:
771
+ term *= sample_weight
772
+ return term
773
+
774
+
775
+ class HalfTweedieLoss(BaseLoss):
776
+ """Half Tweedie deviance loss with log-link, for regression.
777
+
778
+ Domain:
779
+ y_true in real numbers for power <= 0
780
+ y_true in non-negative real numbers for 0 < power < 2
781
+ y_true in positive real numbers for 2 <= power
782
+ y_pred in positive real numbers
783
+ power in real numbers
784
+
785
+ Link:
786
+ y_pred = exp(raw_prediction)
787
+
788
+ For a given sample x_i, half Tweedie deviance loss with p=power is defined
789
+ as::
790
+
791
+ loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p)
792
+ - y_true_i * exp(raw_prediction_i)**(1-p) / (1-p)
793
+ + exp(raw_prediction_i)**(2-p) / (2-p)
794
+
795
+ Taking the limits for p=0, 1, 2 gives HalfSquaredError with a log link,
796
+ HalfPoissonLoss and HalfGammaLoss.
797
+
798
+ We also skip constant terms, but those are different for p=0, 1, 2.
799
+ Therefore, the loss is not continuous in `power`.
800
+
801
+ Note furthermore that although no Tweedie distribution exists for
802
+ 0 < power < 1, it still gives a strictly consistent scoring function for
803
+ the expectation.
804
+ """
805
+
806
+ def __init__(self, sample_weight=None, power=1.5):
807
+ super().__init__(
808
+ closs=CyHalfTweedieLoss(power=float(power)),
809
+ link=LogLink(),
810
+ )
811
+ if self.closs.power <= 0:
812
+ self.interval_y_true = Interval(-np.inf, np.inf, False, False)
813
+ elif self.closs.power < 2:
814
+ self.interval_y_true = Interval(0, np.inf, True, False)
815
+ else:
816
+ self.interval_y_true = Interval(0, np.inf, False, False)
817
+
818
+ def constant_to_optimal_zero(self, y_true, sample_weight=None):
819
+ if self.closs.power == 0:
820
+ return HalfSquaredError().constant_to_optimal_zero(
821
+ y_true=y_true, sample_weight=sample_weight
822
+ )
823
+ elif self.closs.power == 1:
824
+ return HalfPoissonLoss().constant_to_optimal_zero(
825
+ y_true=y_true, sample_weight=sample_weight
826
+ )
827
+ elif self.closs.power == 2:
828
+ return HalfGammaLoss().constant_to_optimal_zero(
829
+ y_true=y_true, sample_weight=sample_weight
830
+ )
831
+ else:
832
+ p = self.closs.power
833
+ term = np.power(np.maximum(y_true, 0), 2 - p) / (1 - p) / (2 - p)
834
+ if sample_weight is not None:
835
+ term *= sample_weight
836
+ return term
837
+
838
+
839
+ class HalfTweedieLossIdentity(BaseLoss):
840
+ """Half Tweedie deviance loss with identity link, for regression.
841
+
842
+ Domain:
843
+ y_true in real numbers for power <= 0
844
+ y_true in non-negative real numbers for 0 < power < 2
845
+ y_true in positive real numbers for 2 <= power
846
+ y_pred in positive real numbers for power != 0
847
+ y_pred in real numbers for power = 0
848
+ power in real numbers
849
+
850
+ Link:
851
+ y_pred = raw_prediction
852
+
853
+ For a given sample x_i, half Tweedie deviance loss with p=power is defined
854
+ as::
855
+
856
+ loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p)
857
+ - y_true_i * raw_prediction_i**(1-p) / (1-p)
858
+ + raw_prediction_i**(2-p) / (2-p)
859
+
860
+ Note that the minimum value of this loss is 0.
861
+
862
+ Note furthermore that although no Tweedie distribution exists for
863
+ 0 < power < 1, it still gives a strictly consistent scoring function for
864
+ the expectation.
865
+ """
866
+
867
+ def __init__(self, sample_weight=None, power=1.5):
868
+ super().__init__(
869
+ closs=CyHalfTweedieLossIdentity(power=float(power)),
870
+ link=IdentityLink(),
871
+ )
872
+ if self.closs.power <= 0:
873
+ self.interval_y_true = Interval(-np.inf, np.inf, False, False)
874
+ elif self.closs.power < 2:
875
+ self.interval_y_true = Interval(0, np.inf, True, False)
876
+ else:
877
+ self.interval_y_true = Interval(0, np.inf, False, False)
878
+
879
+ if self.closs.power == 0:
880
+ self.interval_y_pred = Interval(-np.inf, np.inf, False, False)
881
+ else:
882
+ self.interval_y_pred = Interval(0, np.inf, False, False)
883
+
884
+
885
+ class HalfBinomialLoss(BaseLoss):
886
+ """Half Binomial deviance loss with logit link, for binary classification.
887
+
888
+ This is also know as binary cross entropy, log-loss and logistic loss.
889
+
890
+ Domain:
891
+ y_true in [0, 1], i.e. regression on the unit interval
892
+ y_pred in (0, 1), i.e. boundaries excluded
893
+
894
+ Link:
895
+ y_pred = expit(raw_prediction)
896
+
897
+ For a given sample x_i, half Binomial deviance is defined as the negative
898
+ log-likelihood of the Binomial/Bernoulli distribution and can be expressed
899
+ as::
900
+
901
+ loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i
902
+
903
+ See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman,
904
+ section 4.4.1 (about logistic regression).
905
+
906
+ Note that the formulation works for classification, y = {0, 1}, as well as
907
+ logistic regression, y = [0, 1].
908
+ If you add `constant_to_optimal_zero` to the loss, you get half the
909
+ Bernoulli/binomial deviance.
910
+
911
+ More details: Inserting the predicted probability y_pred = expit(raw_prediction)
912
+ in the loss gives the well known::
913
+
914
+ loss(x_i) = - y_true_i * log(y_pred_i) - (1 - y_true_i) * log(1 - y_pred_i)
915
+ """
916
+
917
+ def __init__(self, sample_weight=None):
918
+ super().__init__(
919
+ closs=CyHalfBinomialLoss(),
920
+ link=LogitLink(),
921
+ n_classes=2,
922
+ )
923
+ self.interval_y_true = Interval(0, 1, True, True)
924
+
925
+ def constant_to_optimal_zero(self, y_true, sample_weight=None):
926
+ # This is non-zero only if y_true is neither 0 nor 1.
927
+ term = xlogy(y_true, y_true) + xlogy(1 - y_true, 1 - y_true)
928
+ if sample_weight is not None:
929
+ term *= sample_weight
930
+ return term
931
+
932
+ def predict_proba(self, raw_prediction):
933
+ """Predict probabilities.
934
+
935
+ Parameters
936
+ ----------
937
+ raw_prediction : array of shape (n_samples,) or (n_samples, 1)
938
+ Raw prediction values (in link space).
939
+
940
+ Returns
941
+ -------
942
+ proba : array of shape (n_samples, 2)
943
+ Element-wise class probabilities.
944
+ """
945
+ # Be graceful to shape (n_samples, 1) -> (n_samples,)
946
+ if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
947
+ raw_prediction = raw_prediction.squeeze(1)
948
+ proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)
949
+ proba[:, 1] = self.link.inverse(raw_prediction)
950
+ proba[:, 0] = 1 - proba[:, 1]
951
+ return proba
952
+
953
+
954
+ class HalfMultinomialLoss(BaseLoss):
955
+ """Categorical cross-entropy loss, for multiclass classification.
956
+
957
+ Domain:
958
+ y_true in {0, 1, 2, 3, .., n_classes - 1}
959
+ y_pred has n_classes elements, each element in (0, 1)
960
+
961
+ Link:
962
+ y_pred = softmax(raw_prediction)
963
+
964
+ Note: We assume y_true to be already label encoded. The inverse link is
965
+ softmax. But the full link function is the symmetric multinomial logit
966
+ function.
967
+
968
+ For a given sample x_i, the categorical cross-entropy loss is defined as
969
+ the negative log-likelihood of the multinomial distribution, it
970
+ generalizes the binary cross-entropy to more than 2 classes::
971
+
972
+ loss_i = log(sum(exp(raw_pred_{i, k}), k=0..n_classes-1))
973
+ - sum(y_true_{i, k} * raw_pred_{i, k}, k=0..n_classes-1)
974
+
975
+ See [1].
976
+
977
+ Note that for the hessian, we calculate only the diagonal part in the
978
+ classes: If the full hessian for classes k and l and sample i is H_i_k_l,
979
+ we calculate H_i_k_k, i.e. k=l.
980
+
981
+ Reference
982
+ ---------
983
+ .. [1] :arxiv:`Simon, Noah, J. Friedman and T. Hastie.
984
+ "A Blockwise Descent Algorithm for Group-penalized Multiresponse and
985
+ Multinomial Regression".
986
+ <1311.6529>`
987
+ """
988
+
989
+ is_multiclass = True
990
+
991
+ def __init__(self, sample_weight=None, n_classes=3):
992
+ super().__init__(
993
+ closs=CyHalfMultinomialLoss(),
994
+ link=MultinomialLogit(),
995
+ n_classes=n_classes,
996
+ )
997
+ self.interval_y_true = Interval(0, np.inf, True, False)
998
+ self.interval_y_pred = Interval(0, 1, False, False)
999
+
1000
+ def in_y_true_range(self, y):
1001
+ """Return True if y is in the valid range of y_true.
1002
+
1003
+ Parameters
1004
+ ----------
1005
+ y : ndarray
1006
+ """
1007
+ return self.interval_y_true.includes(y) and np.all(y.astype(int) == y)
1008
+
1009
+ def fit_intercept_only(self, y_true, sample_weight=None):
1010
+ """Compute raw_prediction of an intercept-only model.
1011
+
1012
+ This is the softmax of the weighted average of the target, i.e. over
1013
+ the samples axis=0.
1014
+ """
1015
+ out = np.zeros(self.n_classes, dtype=y_true.dtype)
1016
+ eps = np.finfo(y_true.dtype).eps
1017
+ for k in range(self.n_classes):
1018
+ out[k] = np.average(y_true == k, weights=sample_weight, axis=0)
1019
+ out[k] = np.clip(out[k], eps, 1 - eps)
1020
+ return self.link.link(out[None, :]).reshape(-1)
1021
+
1022
+ def predict_proba(self, raw_prediction):
1023
+ """Predict probabilities.
1024
+
1025
+ Parameters
1026
+ ----------
1027
+ raw_prediction : array of shape (n_samples, n_classes)
1028
+ Raw prediction values (in link space).
1029
+
1030
+ Returns
1031
+ -------
1032
+ proba : array of shape (n_samples, n_classes)
1033
+ Element-wise class probabilities.
1034
+ """
1035
+ return self.link.inverse(raw_prediction)
1036
+
1037
+ def gradient_proba(
1038
+ self,
1039
+ y_true,
1040
+ raw_prediction,
1041
+ sample_weight=None,
1042
+ gradient_out=None,
1043
+ proba_out=None,
1044
+ n_threads=1,
1045
+ ):
1046
+ """Compute gradient and class probabilities fow raw_prediction.
1047
+
1048
+ Parameters
1049
+ ----------
1050
+ y_true : C-contiguous array of shape (n_samples,)
1051
+ Observed, true target values.
1052
+ raw_prediction : array of shape (n_samples, n_classes)
1053
+ Raw prediction values (in link space).
1054
+ sample_weight : None or C-contiguous array of shape (n_samples,)
1055
+ Sample weights.
1056
+ gradient_out : None or array of shape (n_samples, n_classes)
1057
+ A location into which the gradient is stored. If None, a new array
1058
+ might be created.
1059
+ proba_out : None or array of shape (n_samples, n_classes)
1060
+ A location into which the class probabilities are stored. If None,
1061
+ a new array might be created.
1062
+ n_threads : int, default=1
1063
+ Might use openmp thread parallelism.
1064
+
1065
+ Returns
1066
+ -------
1067
+ gradient : array of shape (n_samples, n_classes)
1068
+ Element-wise gradients.
1069
+
1070
+ proba : array of shape (n_samples, n_classes)
1071
+ Element-wise class probabilities.
1072
+ """
1073
+ if gradient_out is None:
1074
+ if proba_out is None:
1075
+ gradient_out = np.empty_like(raw_prediction)
1076
+ proba_out = np.empty_like(raw_prediction)
1077
+ else:
1078
+ gradient_out = np.empty_like(proba_out)
1079
+ elif proba_out is None:
1080
+ proba_out = np.empty_like(gradient_out)
1081
+
1082
+ self.closs.gradient_proba(
1083
+ y_true=y_true,
1084
+ raw_prediction=raw_prediction,
1085
+ sample_weight=sample_weight,
1086
+ gradient_out=gradient_out,
1087
+ proba_out=proba_out,
1088
+ n_threads=n_threads,
1089
+ )
1090
+ return gradient_out, proba_out
1091
+
1092
+
1093
+ class ExponentialLoss(BaseLoss):
1094
+ """Exponential loss with (half) logit link, for binary classification.
1095
+
1096
+ This is also know as boosting loss.
1097
+
1098
+ Domain:
1099
+ y_true in [0, 1], i.e. regression on the unit interval
1100
+ y_pred in (0, 1), i.e. boundaries excluded
1101
+
1102
+ Link:
1103
+ y_pred = expit(2 * raw_prediction)
1104
+
1105
+ For a given sample x_i, the exponential loss is defined as::
1106
+
1107
+ loss(x_i) = y_true_i * exp(-raw_pred_i)) + (1 - y_true_i) * exp(raw_pred_i)
1108
+
1109
+ See:
1110
+ - J. Friedman, T. Hastie, R. Tibshirani.
1111
+ "Additive logistic regression: a statistical view of boosting (With discussion
1112
+ and a rejoinder by the authors)." Ann. Statist. 28 (2) 337 - 407, April 2000.
1113
+ https://doi.org/10.1214/aos/1016218223
1114
+ - A. Buja, W. Stuetzle, Y. Shen. (2005).
1115
+ "Loss Functions for Binary Class Probability Estimation and Classification:
1116
+ Structure and Applications."
1117
+
1118
+ Note that the formulation works for classification, y = {0, 1}, as well as
1119
+ "exponential logistic" regression, y = [0, 1].
1120
+ Note that this is a proper scoring rule, but without it's canonical link.
1121
+
1122
+ More details: Inserting the predicted probability
1123
+ y_pred = expit(2 * raw_prediction) in the loss gives::
1124
+
1125
+ loss(x_i) = y_true_i * sqrt((1 - y_pred_i) / y_pred_i)
1126
+ + (1 - y_true_i) * sqrt(y_pred_i / (1 - y_pred_i))
1127
+ """
1128
+
1129
+ def __init__(self, sample_weight=None):
1130
+ super().__init__(
1131
+ closs=CyExponentialLoss(),
1132
+ link=HalfLogitLink(),
1133
+ n_classes=2,
1134
+ )
1135
+ self.interval_y_true = Interval(0, 1, True, True)
1136
+
1137
+ def constant_to_optimal_zero(self, y_true, sample_weight=None):
1138
+ # This is non-zero only if y_true is neither 0 nor 1.
1139
+ term = -2 * np.sqrt(y_true * (1 - y_true))
1140
+ if sample_weight is not None:
1141
+ term *= sample_weight
1142
+ return term
1143
+
1144
+ def predict_proba(self, raw_prediction):
1145
+ """Predict probabilities.
1146
+
1147
+ Parameters
1148
+ ----------
1149
+ raw_prediction : array of shape (n_samples,) or (n_samples, 1)
1150
+ Raw prediction values (in link space).
1151
+
1152
+ Returns
1153
+ -------
1154
+ proba : array of shape (n_samples, 2)
1155
+ Element-wise class probabilities.
1156
+ """
1157
+ # Be graceful to shape (n_samples, 1) -> (n_samples,)
1158
+ if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
1159
+ raw_prediction = raw_prediction.squeeze(1)
1160
+ proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)
1161
+ proba[:, 1] = self.link.inverse(raw_prediction)
1162
+ proba[:, 0] = 1 - proba[:, 1]
1163
+ return proba
1164
+
1165
+
1166
+ _LOSSES = {
1167
+ "squared_error": HalfSquaredError,
1168
+ "absolute_error": AbsoluteError,
1169
+ "pinball_loss": PinballLoss,
1170
+ "huber_loss": HuberLoss,
1171
+ "poisson_loss": HalfPoissonLoss,
1172
+ "gamma_loss": HalfGammaLoss,
1173
+ "tweedie_loss": HalfTweedieLoss,
1174
+ "binomial_loss": HalfBinomialLoss,
1175
+ "multinomial_loss": HalfMultinomialLoss,
1176
+ "exponential_loss": ExponentialLoss,
1177
+ }
venv/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc ADDED
Binary file (2.66 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose, assert_array_equal
4
+
5
+ from sklearn._loss.link import (
6
+ _LINKS,
7
+ HalfLogitLink,
8
+ Interval,
9
+ MultinomialLogit,
10
+ _inclusive_low_high,
11
+ )
12
+
13
+ LINK_FUNCTIONS = list(_LINKS.values())
14
+
15
+
16
+ def test_interval_raises():
17
+ """Test that interval with low > high raises ValueError."""
18
+ with pytest.raises(
19
+ ValueError, match="One must have low <= high; got low=1, high=0."
20
+ ):
21
+ Interval(1, 0, False, False)
22
+
23
+
24
+ @pytest.mark.parametrize(
25
+ "interval",
26
+ [
27
+ Interval(0, 1, False, False),
28
+ Interval(0, 1, False, True),
29
+ Interval(0, 1, True, False),
30
+ Interval(0, 1, True, True),
31
+ Interval(-np.inf, np.inf, False, False),
32
+ Interval(-np.inf, np.inf, False, True),
33
+ Interval(-np.inf, np.inf, True, False),
34
+ Interval(-np.inf, np.inf, True, True),
35
+ Interval(-10, -1, False, False),
36
+ Interval(-10, -1, False, True),
37
+ Interval(-10, -1, True, False),
38
+ Interval(-10, -1, True, True),
39
+ ],
40
+ )
41
+ def test_is_in_range(interval):
42
+ # make sure low and high are always within the interval, used for linspace
43
+ low, high = _inclusive_low_high(interval)
44
+
45
+ x = np.linspace(low, high, num=10)
46
+ assert interval.includes(x)
47
+
48
+ # x contains lower bound
49
+ assert interval.includes(np.r_[x, interval.low]) == interval.low_inclusive
50
+
51
+ # x contains upper bound
52
+ assert interval.includes(np.r_[x, interval.high]) == interval.high_inclusive
53
+
54
+ # x contains upper and lower bound
55
+ assert interval.includes(np.r_[x, interval.low, interval.high]) == (
56
+ interval.low_inclusive and interval.high_inclusive
57
+ )
58
+
59
+
60
+ @pytest.mark.parametrize("link", LINK_FUNCTIONS)
61
+ def test_link_inverse_identity(link, global_random_seed):
62
+ # Test that link of inverse gives identity.
63
+ rng = np.random.RandomState(global_random_seed)
64
+ link = link()
65
+ n_samples, n_classes = 100, None
66
+ # The values for `raw_prediction` are limited from -20 to 20 because in the
67
+ # class `LogitLink` the term `expit(x)` comes very close to 1 for large
68
+ # positive x and therefore loses precision.
69
+ if link.is_multiclass:
70
+ n_classes = 10
71
+ raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples, n_classes))
72
+ if isinstance(link, MultinomialLogit):
73
+ raw_prediction = link.symmetrize_raw_prediction(raw_prediction)
74
+ elif isinstance(link, HalfLogitLink):
75
+ raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples))
76
+ else:
77
+ raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples))
78
+
79
+ assert_allclose(link.link(link.inverse(raw_prediction)), raw_prediction)
80
+ y_pred = link.inverse(raw_prediction)
81
+ assert_allclose(link.inverse(link.link(y_pred)), y_pred)
82
+
83
+
84
+ @pytest.mark.parametrize("link", LINK_FUNCTIONS)
85
+ def test_link_out_argument(link):
86
+ # Test that out argument gets assigned the result.
87
+ rng = np.random.RandomState(42)
88
+ link = link()
89
+ n_samples, n_classes = 100, None
90
+ if link.is_multiclass:
91
+ n_classes = 10
92
+ raw_prediction = rng.normal(loc=0, scale=10, size=(n_samples, n_classes))
93
+ if isinstance(link, MultinomialLogit):
94
+ raw_prediction = link.symmetrize_raw_prediction(raw_prediction)
95
+ else:
96
+ # So far, the valid interval of raw_prediction is (-inf, inf) and
97
+ # we do not need to distinguish.
98
+ raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples))
99
+
100
+ y_pred = link.inverse(raw_prediction, out=None)
101
+ out = np.empty_like(raw_prediction)
102
+ y_pred_2 = link.inverse(raw_prediction, out=out)
103
+ assert_allclose(y_pred, out)
104
+ assert_array_equal(out, y_pred_2)
105
+ assert np.shares_memory(out, y_pred_2)
106
+
107
+ out = np.empty_like(y_pred)
108
+ raw_prediction_2 = link.link(y_pred, out=out)
109
+ assert_allclose(raw_prediction, out)
110
+ assert_array_equal(out, raw_prediction_2)
111
+ assert np.shares_memory(out, raw_prediction_2)
venv/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py ADDED
@@ -0,0 +1,1320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+
3
+ import numpy as np
4
+ import pytest
5
+ from numpy.testing import assert_allclose, assert_array_equal
6
+ from pytest import approx
7
+ from scipy.optimize import (
8
+ LinearConstraint,
9
+ minimize,
10
+ minimize_scalar,
11
+ newton,
12
+ )
13
+ from scipy.special import logsumexp
14
+
15
+ from sklearn._loss.link import IdentityLink, _inclusive_low_high
16
+ from sklearn._loss.loss import (
17
+ _LOSSES,
18
+ AbsoluteError,
19
+ BaseLoss,
20
+ HalfBinomialLoss,
21
+ HalfGammaLoss,
22
+ HalfMultinomialLoss,
23
+ HalfPoissonLoss,
24
+ HalfSquaredError,
25
+ HalfTweedieLoss,
26
+ HalfTweedieLossIdentity,
27
+ HuberLoss,
28
+ PinballLoss,
29
+ )
30
+ from sklearn.utils import _IS_WASM, assert_all_finite
31
+ from sklearn.utils._testing import create_memmap_backed_data, skip_if_32bit
32
+
33
+ ALL_LOSSES = list(_LOSSES.values())
34
+
35
+ LOSS_INSTANCES = [loss() for loss in ALL_LOSSES]
36
+ # HalfTweedieLoss(power=1.5) is already there as default
37
+ LOSS_INSTANCES += [
38
+ PinballLoss(quantile=0.25),
39
+ HuberLoss(quantile=0.75),
40
+ HalfTweedieLoss(power=-1.5),
41
+ HalfTweedieLoss(power=0),
42
+ HalfTweedieLoss(power=1),
43
+ HalfTweedieLoss(power=2),
44
+ HalfTweedieLoss(power=3.0),
45
+ HalfTweedieLossIdentity(power=0),
46
+ HalfTweedieLossIdentity(power=1),
47
+ HalfTweedieLossIdentity(power=2),
48
+ HalfTweedieLossIdentity(power=3.0),
49
+ ]
50
+
51
+
52
+ def loss_instance_name(param):
53
+ if isinstance(param, BaseLoss):
54
+ loss = param
55
+ name = loss.__class__.__name__
56
+ if isinstance(loss, PinballLoss):
57
+ name += f"(quantile={loss.closs.quantile})"
58
+ elif isinstance(loss, HuberLoss):
59
+ name += f"(quantile={loss.quantile}"
60
+ elif hasattr(loss, "closs") and hasattr(loss.closs, "power"):
61
+ name += f"(power={loss.closs.power})"
62
+ return name
63
+ else:
64
+ return str(param)
65
+
66
+
67
+ def random_y_true_raw_prediction(
68
+ loss, n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=42
69
+ ):
70
+ """Random generate y_true and raw_prediction in valid range."""
71
+ rng = np.random.RandomState(seed)
72
+ if loss.is_multiclass:
73
+ raw_prediction = np.empty((n_samples, loss.n_classes))
74
+ raw_prediction.flat[:] = rng.uniform(
75
+ low=raw_bound[0],
76
+ high=raw_bound[1],
77
+ size=n_samples * loss.n_classes,
78
+ )
79
+ y_true = np.arange(n_samples).astype(float) % loss.n_classes
80
+ else:
81
+ # If link is identity, we must respect the interval of y_pred:
82
+ if isinstance(loss.link, IdentityLink):
83
+ low, high = _inclusive_low_high(loss.interval_y_pred)
84
+ low = np.amax([low, raw_bound[0]])
85
+ high = np.amin([high, raw_bound[1]])
86
+ raw_bound = (low, high)
87
+ raw_prediction = rng.uniform(
88
+ low=raw_bound[0], high=raw_bound[1], size=n_samples
89
+ )
90
+ # generate a y_true in valid range
91
+ low, high = _inclusive_low_high(loss.interval_y_true)
92
+ low = max(low, y_bound[0])
93
+ high = min(high, y_bound[1])
94
+ y_true = rng.uniform(low, high, size=n_samples)
95
+ # set some values at special boundaries
96
+ if loss.interval_y_true.low == 0 and loss.interval_y_true.low_inclusive:
97
+ y_true[:: (n_samples // 3)] = 0
98
+ if loss.interval_y_true.high == 1 and loss.interval_y_true.high_inclusive:
99
+ y_true[1 :: (n_samples // 3)] = 1
100
+
101
+ return y_true, raw_prediction
102
+
103
+
104
+ def numerical_derivative(func, x, eps):
105
+ """Helper function for numerical (first) derivatives."""
106
+ # For numerical derivatives, see
107
+ # https://en.wikipedia.org/wiki/Numerical_differentiation
108
+ # https://en.wikipedia.org/wiki/Finite_difference_coefficient
109
+ # We use central finite differences of accuracy 4.
110
+ h = np.full_like(x, fill_value=eps)
111
+ f_minus_2h = func(x - 2 * h)
112
+ f_minus_1h = func(x - h)
113
+ f_plus_1h = func(x + h)
114
+ f_plus_2h = func(x + 2 * h)
115
+ return (-f_plus_2h + 8 * f_plus_1h - 8 * f_minus_1h + f_minus_2h) / (12.0 * eps)
116
+
117
+
118
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
119
+ def test_loss_boundary(loss):
120
+ """Test interval ranges of y_true and y_pred in losses."""
121
+ # make sure low and high are always within the interval, used for linspace
122
+ if loss.is_multiclass:
123
+ y_true = np.linspace(0, 9, num=10)
124
+ else:
125
+ low, high = _inclusive_low_high(loss.interval_y_true)
126
+ y_true = np.linspace(low, high, num=10)
127
+
128
+ # add boundaries if they are included
129
+ if loss.interval_y_true.low_inclusive:
130
+ y_true = np.r_[y_true, loss.interval_y_true.low]
131
+ if loss.interval_y_true.high_inclusive:
132
+ y_true = np.r_[y_true, loss.interval_y_true.high]
133
+
134
+ assert loss.in_y_true_range(y_true)
135
+
136
+ n = y_true.shape[0]
137
+ low, high = _inclusive_low_high(loss.interval_y_pred)
138
+ if loss.is_multiclass:
139
+ y_pred = np.empty((n, 3))
140
+ y_pred[:, 0] = np.linspace(low, high, num=n)
141
+ y_pred[:, 1] = 0.5 * (1 - y_pred[:, 0])
142
+ y_pred[:, 2] = 0.5 * (1 - y_pred[:, 0])
143
+ else:
144
+ y_pred = np.linspace(low, high, num=n)
145
+
146
+ assert loss.in_y_pred_range(y_pred)
147
+
148
+ # calculating losses should not fail
149
+ raw_prediction = loss.link.link(y_pred)
150
+ loss.loss(y_true=y_true, raw_prediction=raw_prediction)
151
+
152
+
153
+ # Fixture to test valid value ranges.
154
+ Y_COMMON_PARAMS = [
155
+ # (loss, [y success], [y fail])
156
+ (HalfSquaredError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
157
+ (AbsoluteError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
158
+ (PinballLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
159
+ (HuberLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
160
+ (HalfPoissonLoss(), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
161
+ (HalfGammaLoss(), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
162
+ (HalfTweedieLoss(power=-3), [0.1, 100], [-np.inf, np.inf]),
163
+ (HalfTweedieLoss(power=0), [0.1, 100], [-np.inf, np.inf]),
164
+ (HalfTweedieLoss(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
165
+ (HalfTweedieLoss(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
166
+ (HalfTweedieLoss(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
167
+ (HalfTweedieLossIdentity(power=-3), [0.1, 100], [-np.inf, np.inf]),
168
+ (HalfTweedieLossIdentity(power=0), [-3, -0.1, 0, 0.1, 100], [-np.inf, np.inf]),
169
+ (HalfTweedieLossIdentity(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
170
+ (HalfTweedieLossIdentity(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
171
+ (HalfTweedieLossIdentity(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
172
+ (HalfBinomialLoss(), [0.1, 0.5, 0.9], [-np.inf, -1, 2, np.inf]),
173
+ (HalfMultinomialLoss(), [], [-np.inf, -1, 1.1, np.inf]),
174
+ ]
175
+ # y_pred and y_true do not always have the same domain (valid value range).
176
+ # Hence, we define extra sets of parameters for each of them.
177
+ Y_TRUE_PARAMS = [ # type: ignore
178
+ # (loss, [y success], [y fail])
179
+ (HalfPoissonLoss(), [0], []),
180
+ (HuberLoss(), [0], []),
181
+ (HalfTweedieLoss(power=-3), [-100, -0.1, 0], []),
182
+ (HalfTweedieLoss(power=0), [-100, 0], []),
183
+ (HalfTweedieLoss(power=1.5), [0], []),
184
+ (HalfTweedieLossIdentity(power=-3), [-100, -0.1, 0], []),
185
+ (HalfTweedieLossIdentity(power=0), [-100, 0], []),
186
+ (HalfTweedieLossIdentity(power=1.5), [0], []),
187
+ (HalfBinomialLoss(), [0, 1], []),
188
+ (HalfMultinomialLoss(), [0.0, 1.0, 2], []),
189
+ ]
190
+ Y_PRED_PARAMS = [
191
+ # (loss, [y success], [y fail])
192
+ (HalfPoissonLoss(), [], [0]),
193
+ (HalfTweedieLoss(power=-3), [], [-3, -0.1, 0]),
194
+ (HalfTweedieLoss(power=0), [], [-3, -0.1, 0]),
195
+ (HalfTweedieLoss(power=1.5), [], [0]),
196
+ (HalfTweedieLossIdentity(power=-3), [], [-3, -0.1, 0]),
197
+ (HalfTweedieLossIdentity(power=0), [-3, -0.1, 0], []),
198
+ (HalfTweedieLossIdentity(power=1.5), [], [0]),
199
+ (HalfBinomialLoss(), [], [0, 1]),
200
+ (HalfMultinomialLoss(), [0.1, 0.5], [0, 1]),
201
+ ]
202
+
203
+
204
+ @pytest.mark.parametrize(
205
+ "loss, y_true_success, y_true_fail", Y_COMMON_PARAMS + Y_TRUE_PARAMS
206
+ )
207
+ def test_loss_boundary_y_true(loss, y_true_success, y_true_fail):
208
+ """Test boundaries of y_true for loss functions."""
209
+ for y in y_true_success:
210
+ assert loss.in_y_true_range(np.array([y]))
211
+ for y in y_true_fail:
212
+ assert not loss.in_y_true_range(np.array([y]))
213
+
214
+
215
+ @pytest.mark.parametrize(
216
+ "loss, y_pred_success, y_pred_fail", Y_COMMON_PARAMS + Y_PRED_PARAMS # type: ignore
217
+ )
218
+ def test_loss_boundary_y_pred(loss, y_pred_success, y_pred_fail):
219
+ """Test boundaries of y_pred for loss functions."""
220
+ for y in y_pred_success:
221
+ assert loss.in_y_pred_range(np.array([y]))
222
+ for y in y_pred_fail:
223
+ assert not loss.in_y_pred_range(np.array([y]))
224
+
225
+
226
+ @pytest.mark.parametrize(
227
+ "loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true",
228
+ [
229
+ (HalfSquaredError(), 1.0, 5.0, 8, 4, 1),
230
+ (AbsoluteError(), 1.0, 5.0, 4.0, 1.0, None),
231
+ (PinballLoss(quantile=0.5), 1.0, 5.0, 2, 0.5, None),
232
+ (PinballLoss(quantile=0.25), 1.0, 5.0, 4 * (1 - 0.25), 1 - 0.25, None),
233
+ (PinballLoss(quantile=0.25), 5.0, 1.0, 4 * 0.25, -0.25, None),
234
+ (HuberLoss(quantile=0.5, delta=3), 1.0, 5.0, 3 * (4 - 3 / 2), None, None),
235
+ (HuberLoss(quantile=0.5, delta=3), 1.0, 3.0, 0.5 * 2**2, None, None),
236
+ (HalfPoissonLoss(), 2.0, np.log(4), 4 - 2 * np.log(4), 4 - 2, 4),
237
+ (HalfGammaLoss(), 2.0, np.log(4), np.log(4) + 2 / 4, 1 - 2 / 4, 2 / 4),
238
+ (HalfTweedieLoss(power=3), 2.0, np.log(4), -1 / 4 + 1 / 4**2, None, None),
239
+ (HalfTweedieLossIdentity(power=1), 2.0, 4.0, 2 - 2 * np.log(2), None, None),
240
+ (HalfTweedieLossIdentity(power=2), 2.0, 4.0, np.log(2) - 1 / 2, None, None),
241
+ (
242
+ HalfTweedieLossIdentity(power=3),
243
+ 2.0,
244
+ 4.0,
245
+ -1 / 4 + 1 / 4**2 + 1 / 2 / 2,
246
+ None,
247
+ None,
248
+ ),
249
+ (
250
+ HalfBinomialLoss(),
251
+ 0.25,
252
+ np.log(4),
253
+ np.log1p(4) - 0.25 * np.log(4),
254
+ None,
255
+ None,
256
+ ),
257
+ # Extreme log loss cases, checked with mpmath:
258
+ # import mpmath as mp
259
+ #
260
+ # # Stolen from scipy
261
+ # def mpf2float(x):
262
+ # return float(mp.nstr(x, 17, min_fixed=0, max_fixed=0))
263
+ #
264
+ # def mp_logloss(y_true, raw):
265
+ # with mp.workdps(100):
266
+ # y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw))
267
+ # out = mp.log1p(mp.exp(raw)) - y_true * raw
268
+ # return mpf2float(out)
269
+ #
270
+ # def mp_gradient(y_true, raw):
271
+ # with mp.workdps(100):
272
+ # y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw))
273
+ # out = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw)) - y_true
274
+ # return mpf2float(out)
275
+ #
276
+ # def mp_hessian(y_true, raw):
277
+ # with mp.workdps(100):
278
+ # y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw))
279
+ # p = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw))
280
+ # out = p * (mp.mpf(1) - p)
281
+ # return mpf2float(out)
282
+ #
283
+ # y, raw = 0.0, 37.
284
+ # mp_logloss(y, raw), mp_gradient(y, raw), mp_hessian(y, raw)
285
+ (HalfBinomialLoss(), 0.0, -1e20, 0, 0, 0),
286
+ (HalfBinomialLoss(), 1.0, -1e20, 1e20, -1, 0),
287
+ (HalfBinomialLoss(), 0.0, -1e3, 0, 0, 0),
288
+ (HalfBinomialLoss(), 1.0, -1e3, 1e3, -1, 0),
289
+ (HalfBinomialLoss(), 1.0, -37.5, 37.5, -1, 0),
290
+ (HalfBinomialLoss(), 1.0, -37.0, 37, 1e-16 - 1, 8.533047625744065e-17),
291
+ (HalfBinomialLoss(), 0.0, -37.0, *[8.533047625744065e-17] * 3),
292
+ (HalfBinomialLoss(), 1.0, -36.9, 36.9, 1e-16 - 1, 9.430476078526806e-17),
293
+ (HalfBinomialLoss(), 0.0, -36.9, *[9.430476078526806e-17] * 3),
294
+ (HalfBinomialLoss(), 0.0, 37.0, 37, 1 - 1e-16, 8.533047625744065e-17),
295
+ (HalfBinomialLoss(), 1.0, 37.0, *[8.533047625744066e-17] * 3),
296
+ (HalfBinomialLoss(), 0.0, 37.5, 37.5, 1, 5.175555005801868e-17),
297
+ (HalfBinomialLoss(), 0.0, 232.8, 232.8, 1, 1.4287342391028437e-101),
298
+ (HalfBinomialLoss(), 1.0, 1e20, 0, 0, 0),
299
+ (HalfBinomialLoss(), 0.0, 1e20, 1e20, 1, 0),
300
+ (
301
+ HalfBinomialLoss(),
302
+ 1.0,
303
+ 232.8,
304
+ 0,
305
+ -1.4287342391028437e-101,
306
+ 1.4287342391028437e-101,
307
+ ),
308
+ (HalfBinomialLoss(), 1.0, 232.9, 0, 0, 0),
309
+ (HalfBinomialLoss(), 1.0, 1e3, 0, 0, 0),
310
+ (HalfBinomialLoss(), 0.0, 1e3, 1e3, 1, 0),
311
+ (
312
+ HalfMultinomialLoss(n_classes=3),
313
+ 0.0,
314
+ [0.2, 0.5, 0.3],
315
+ logsumexp([0.2, 0.5, 0.3]) - 0.2,
316
+ None,
317
+ None,
318
+ ),
319
+ (
320
+ HalfMultinomialLoss(n_classes=3),
321
+ 1.0,
322
+ [0.2, 0.5, 0.3],
323
+ logsumexp([0.2, 0.5, 0.3]) - 0.5,
324
+ None,
325
+ None,
326
+ ),
327
+ (
328
+ HalfMultinomialLoss(n_classes=3),
329
+ 2.0,
330
+ [0.2, 0.5, 0.3],
331
+ logsumexp([0.2, 0.5, 0.3]) - 0.3,
332
+ None,
333
+ None,
334
+ ),
335
+ (
336
+ HalfMultinomialLoss(n_classes=3),
337
+ 2.0,
338
+ [1e4, 0, 7e-7],
339
+ logsumexp([1e4, 0, 7e-7]) - (7e-7),
340
+ None,
341
+ None,
342
+ ),
343
+ ],
344
+ ids=loss_instance_name,
345
+ )
346
+ def test_loss_on_specific_values(
347
+ loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true
348
+ ):
349
+ """Test losses, gradients and hessians at specific values."""
350
+ loss1 = loss(y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]))
351
+ grad1 = loss.gradient(
352
+ y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
353
+ )
354
+ loss2, grad2 = loss.loss_gradient(
355
+ y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
356
+ )
357
+ grad3, hess = loss.gradient_hessian(
358
+ y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
359
+ )
360
+
361
+ assert loss1 == approx(loss_true, rel=1e-15, abs=1e-15)
362
+ assert loss2 == approx(loss_true, rel=1e-15, abs=1e-15)
363
+
364
+ if gradient_true is not None:
365
+ assert grad1 == approx(gradient_true, rel=1e-15, abs=1e-15)
366
+ assert grad2 == approx(gradient_true, rel=1e-15, abs=1e-15)
367
+ assert grad3 == approx(gradient_true, rel=1e-15, abs=1e-15)
368
+
369
+ if hessian_true is not None:
370
+ assert hess == approx(hessian_true, rel=1e-15, abs=1e-15)
371
+
372
+
373
+ @pytest.mark.parametrize("loss", ALL_LOSSES)
374
+ @pytest.mark.parametrize("readonly_memmap", [False, True])
375
+ @pytest.mark.parametrize("dtype_in", [np.float32, np.float64])
376
+ @pytest.mark.parametrize("dtype_out", [np.float32, np.float64])
377
+ @pytest.mark.parametrize("sample_weight", [None, 1])
378
+ @pytest.mark.parametrize("out1", [None, 1])
379
+ @pytest.mark.parametrize("out2", [None, 1])
380
+ @pytest.mark.parametrize("n_threads", [1, 2])
381
+ def test_loss_dtype(
382
+ loss, readonly_memmap, dtype_in, dtype_out, sample_weight, out1, out2, n_threads
383
+ ):
384
+ """Test acceptance of dtypes, readonly and writeable arrays in loss functions.
385
+
386
+ Check that loss accepts if all input arrays are either all float32 or all
387
+ float64, and all output arrays are either all float32 or all float64.
388
+
389
+ Also check that input arrays can be readonly, e.g. memory mapped.
390
+ """
391
+ if _IS_WASM and readonly_memmap: # pragma: nocover
392
+ pytest.xfail(reason="memmap not fully supported")
393
+
394
+ loss = loss()
395
+ # generate a y_true and raw_prediction in valid range
396
+ n_samples = 5
397
+ y_true, raw_prediction = random_y_true_raw_prediction(
398
+ loss=loss,
399
+ n_samples=n_samples,
400
+ y_bound=(-100, 100),
401
+ raw_bound=(-10, 10),
402
+ seed=42,
403
+ )
404
+ y_true = y_true.astype(dtype_in)
405
+ raw_prediction = raw_prediction.astype(dtype_in)
406
+
407
+ if sample_weight is not None:
408
+ sample_weight = np.array([2.0] * n_samples, dtype=dtype_in)
409
+ if out1 is not None:
410
+ out1 = np.empty_like(y_true, dtype=dtype_out)
411
+ if out2 is not None:
412
+ out2 = np.empty_like(raw_prediction, dtype=dtype_out)
413
+
414
+ if readonly_memmap:
415
+ y_true = create_memmap_backed_data(y_true)
416
+ raw_prediction = create_memmap_backed_data(raw_prediction)
417
+ if sample_weight is not None:
418
+ sample_weight = create_memmap_backed_data(sample_weight)
419
+
420
+ loss.loss(
421
+ y_true=y_true,
422
+ raw_prediction=raw_prediction,
423
+ sample_weight=sample_weight,
424
+ loss_out=out1,
425
+ n_threads=n_threads,
426
+ )
427
+ loss.gradient(
428
+ y_true=y_true,
429
+ raw_prediction=raw_prediction,
430
+ sample_weight=sample_weight,
431
+ gradient_out=out2,
432
+ n_threads=n_threads,
433
+ )
434
+ loss.loss_gradient(
435
+ y_true=y_true,
436
+ raw_prediction=raw_prediction,
437
+ sample_weight=sample_weight,
438
+ loss_out=out1,
439
+ gradient_out=out2,
440
+ n_threads=n_threads,
441
+ )
442
+ if out1 is not None and loss.is_multiclass:
443
+ out1 = np.empty_like(raw_prediction, dtype=dtype_out)
444
+ loss.gradient_hessian(
445
+ y_true=y_true,
446
+ raw_prediction=raw_prediction,
447
+ sample_weight=sample_weight,
448
+ gradient_out=out1,
449
+ hessian_out=out2,
450
+ n_threads=n_threads,
451
+ )
452
+ loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight)
453
+ loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight)
454
+ loss.constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight)
455
+ if hasattr(loss, "predict_proba"):
456
+ loss.predict_proba(raw_prediction=raw_prediction)
457
+ if hasattr(loss, "gradient_proba"):
458
+ loss.gradient_proba(
459
+ y_true=y_true,
460
+ raw_prediction=raw_prediction,
461
+ sample_weight=sample_weight,
462
+ gradient_out=out1,
463
+ proba_out=out2,
464
+ n_threads=n_threads,
465
+ )
466
+
467
+
468
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
469
+ @pytest.mark.parametrize("sample_weight", [None, "range"])
470
+ def test_loss_same_as_C_functions(loss, sample_weight):
471
+ """Test that Python and Cython functions return same results."""
472
+ y_true, raw_prediction = random_y_true_raw_prediction(
473
+ loss=loss,
474
+ n_samples=20,
475
+ y_bound=(-100, 100),
476
+ raw_bound=(-10, 10),
477
+ seed=42,
478
+ )
479
+ if sample_weight == "range":
480
+ sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
481
+
482
+ out_l1 = np.empty_like(y_true)
483
+ out_l2 = np.empty_like(y_true)
484
+ out_g1 = np.empty_like(raw_prediction)
485
+ out_g2 = np.empty_like(raw_prediction)
486
+ out_h1 = np.empty_like(raw_prediction)
487
+ out_h2 = np.empty_like(raw_prediction)
488
+ loss.loss(
489
+ y_true=y_true,
490
+ raw_prediction=raw_prediction,
491
+ sample_weight=sample_weight,
492
+ loss_out=out_l1,
493
+ )
494
+ loss.closs.loss(
495
+ y_true=y_true,
496
+ raw_prediction=raw_prediction,
497
+ sample_weight=sample_weight,
498
+ loss_out=out_l2,
499
+ ),
500
+ assert_allclose(out_l1, out_l2)
501
+ loss.gradient(
502
+ y_true=y_true,
503
+ raw_prediction=raw_prediction,
504
+ sample_weight=sample_weight,
505
+ gradient_out=out_g1,
506
+ )
507
+ loss.closs.gradient(
508
+ y_true=y_true,
509
+ raw_prediction=raw_prediction,
510
+ sample_weight=sample_weight,
511
+ gradient_out=out_g2,
512
+ )
513
+ assert_allclose(out_g1, out_g2)
514
+ loss.closs.loss_gradient(
515
+ y_true=y_true,
516
+ raw_prediction=raw_prediction,
517
+ sample_weight=sample_weight,
518
+ loss_out=out_l1,
519
+ gradient_out=out_g1,
520
+ )
521
+ loss.closs.loss_gradient(
522
+ y_true=y_true,
523
+ raw_prediction=raw_prediction,
524
+ sample_weight=sample_weight,
525
+ loss_out=out_l2,
526
+ gradient_out=out_g2,
527
+ )
528
+ assert_allclose(out_l1, out_l2)
529
+ assert_allclose(out_g1, out_g2)
530
+ loss.gradient_hessian(
531
+ y_true=y_true,
532
+ raw_prediction=raw_prediction,
533
+ sample_weight=sample_weight,
534
+ gradient_out=out_g1,
535
+ hessian_out=out_h1,
536
+ )
537
+ loss.closs.gradient_hessian(
538
+ y_true=y_true,
539
+ raw_prediction=raw_prediction,
540
+ sample_weight=sample_weight,
541
+ gradient_out=out_g2,
542
+ hessian_out=out_h2,
543
+ )
544
+ assert_allclose(out_g1, out_g2)
545
+ assert_allclose(out_h1, out_h2)
546
+
547
+
548
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
549
+ @pytest.mark.parametrize("sample_weight", [None, "range"])
550
+ def test_loss_gradients_are_the_same(loss, sample_weight, global_random_seed):
551
+ """Test that loss and gradient are the same across different functions.
552
+
553
+ Also test that output arguments contain correct results.
554
+ """
555
+ y_true, raw_prediction = random_y_true_raw_prediction(
556
+ loss=loss,
557
+ n_samples=20,
558
+ y_bound=(-100, 100),
559
+ raw_bound=(-10, 10),
560
+ seed=global_random_seed,
561
+ )
562
+ if sample_weight == "range":
563
+ sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
564
+
565
+ out_l1 = np.empty_like(y_true)
566
+ out_l2 = np.empty_like(y_true)
567
+ out_g1 = np.empty_like(raw_prediction)
568
+ out_g2 = np.empty_like(raw_prediction)
569
+ out_g3 = np.empty_like(raw_prediction)
570
+ out_h3 = np.empty_like(raw_prediction)
571
+
572
+ l1 = loss.loss(
573
+ y_true=y_true,
574
+ raw_prediction=raw_prediction,
575
+ sample_weight=sample_weight,
576
+ loss_out=out_l1,
577
+ )
578
+ g1 = loss.gradient(
579
+ y_true=y_true,
580
+ raw_prediction=raw_prediction,
581
+ sample_weight=sample_weight,
582
+ gradient_out=out_g1,
583
+ )
584
+ l2, g2 = loss.loss_gradient(
585
+ y_true=y_true,
586
+ raw_prediction=raw_prediction,
587
+ sample_weight=sample_weight,
588
+ loss_out=out_l2,
589
+ gradient_out=out_g2,
590
+ )
591
+ g3, h3 = loss.gradient_hessian(
592
+ y_true=y_true,
593
+ raw_prediction=raw_prediction,
594
+ sample_weight=sample_weight,
595
+ gradient_out=out_g3,
596
+ hessian_out=out_h3,
597
+ )
598
+ assert_allclose(l1, l2)
599
+ assert_array_equal(l1, out_l1)
600
+ assert np.shares_memory(l1, out_l1)
601
+ assert_array_equal(l2, out_l2)
602
+ assert np.shares_memory(l2, out_l2)
603
+ assert_allclose(g1, g2)
604
+ assert_allclose(g1, g3)
605
+ assert_array_equal(g1, out_g1)
606
+ assert np.shares_memory(g1, out_g1)
607
+ assert_array_equal(g2, out_g2)
608
+ assert np.shares_memory(g2, out_g2)
609
+ assert_array_equal(g3, out_g3)
610
+ assert np.shares_memory(g3, out_g3)
611
+
612
+ if hasattr(loss, "gradient_proba"):
613
+ assert loss.is_multiclass # only for HalfMultinomialLoss
614
+ out_g4 = np.empty_like(raw_prediction)
615
+ out_proba = np.empty_like(raw_prediction)
616
+ g4, proba = loss.gradient_proba(
617
+ y_true=y_true,
618
+ raw_prediction=raw_prediction,
619
+ sample_weight=sample_weight,
620
+ gradient_out=out_g4,
621
+ proba_out=out_proba,
622
+ )
623
+ assert_allclose(g1, out_g4)
624
+ assert_allclose(g1, g4)
625
+ assert_allclose(proba, out_proba)
626
+ assert_allclose(np.sum(proba, axis=1), 1, rtol=1e-11)
627
+
628
+
629
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
630
+ @pytest.mark.parametrize("sample_weight", ["ones", "random"])
631
+ def test_sample_weight_multiplies(loss, sample_weight, global_random_seed):
632
+ """Test sample weights in loss, gradients and hessians.
633
+
634
+ Make sure that passing sample weights to loss, gradient and hessian
635
+ computation methods is equivalent to multiplying by the weights.
636
+ """
637
+ n_samples = 100
638
+ y_true, raw_prediction = random_y_true_raw_prediction(
639
+ loss=loss,
640
+ n_samples=n_samples,
641
+ y_bound=(-100, 100),
642
+ raw_bound=(-5, 5),
643
+ seed=global_random_seed,
644
+ )
645
+
646
+ if sample_weight == "ones":
647
+ sample_weight = np.ones(shape=n_samples, dtype=np.float64)
648
+ else:
649
+ rng = np.random.RandomState(global_random_seed)
650
+ sample_weight = rng.normal(size=n_samples).astype(np.float64)
651
+
652
+ assert_allclose(
653
+ loss.loss(
654
+ y_true=y_true,
655
+ raw_prediction=raw_prediction,
656
+ sample_weight=sample_weight,
657
+ ),
658
+ sample_weight
659
+ * loss.loss(
660
+ y_true=y_true,
661
+ raw_prediction=raw_prediction,
662
+ sample_weight=None,
663
+ ),
664
+ )
665
+
666
+ losses, gradient = loss.loss_gradient(
667
+ y_true=y_true,
668
+ raw_prediction=raw_prediction,
669
+ sample_weight=None,
670
+ )
671
+ losses_sw, gradient_sw = loss.loss_gradient(
672
+ y_true=y_true,
673
+ raw_prediction=raw_prediction,
674
+ sample_weight=sample_weight,
675
+ )
676
+ assert_allclose(losses * sample_weight, losses_sw)
677
+ if not loss.is_multiclass:
678
+ assert_allclose(gradient * sample_weight, gradient_sw)
679
+ else:
680
+ assert_allclose(gradient * sample_weight[:, None], gradient_sw)
681
+
682
+ gradient, hessian = loss.gradient_hessian(
683
+ y_true=y_true,
684
+ raw_prediction=raw_prediction,
685
+ sample_weight=None,
686
+ )
687
+ gradient_sw, hessian_sw = loss.gradient_hessian(
688
+ y_true=y_true,
689
+ raw_prediction=raw_prediction,
690
+ sample_weight=sample_weight,
691
+ )
692
+ if not loss.is_multiclass:
693
+ assert_allclose(gradient * sample_weight, gradient_sw)
694
+ assert_allclose(hessian * sample_weight, hessian_sw)
695
+ else:
696
+ assert_allclose(gradient * sample_weight[:, None], gradient_sw)
697
+ assert_allclose(hessian * sample_weight[:, None], hessian_sw)
698
+
699
+
700
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
701
+ def test_graceful_squeezing(loss):
702
+ """Test that reshaped raw_prediction gives same results."""
703
+ y_true, raw_prediction = random_y_true_raw_prediction(
704
+ loss=loss,
705
+ n_samples=20,
706
+ y_bound=(-100, 100),
707
+ raw_bound=(-10, 10),
708
+ seed=42,
709
+ )
710
+
711
+ if raw_prediction.ndim == 1:
712
+ raw_prediction_2d = raw_prediction[:, None]
713
+ assert_allclose(
714
+ loss.loss(y_true=y_true, raw_prediction=raw_prediction_2d),
715
+ loss.loss(y_true=y_true, raw_prediction=raw_prediction),
716
+ )
717
+ assert_allclose(
718
+ loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
719
+ loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction),
720
+ )
721
+ assert_allclose(
722
+ loss.gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
723
+ loss.gradient(y_true=y_true, raw_prediction=raw_prediction),
724
+ )
725
+ assert_allclose(
726
+ loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction_2d),
727
+ loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction),
728
+ )
729
+
730
+
731
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
732
+ @pytest.mark.parametrize("sample_weight", [None, "range"])
733
+ def test_loss_of_perfect_prediction(loss, sample_weight):
734
+ """Test value of perfect predictions.
735
+
736
+ Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to
737
+ zero.
738
+ """
739
+ if not loss.is_multiclass:
740
+ # Use small values such that exp(value) is not nan.
741
+ raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10])
742
+ # If link is identity, we must respect the interval of y_pred:
743
+ if isinstance(loss.link, IdentityLink):
744
+ eps = 1e-10
745
+ low = loss.interval_y_pred.low
746
+ if not loss.interval_y_pred.low_inclusive:
747
+ low = low + eps
748
+ high = loss.interval_y_pred.high
749
+ if not loss.interval_y_pred.high_inclusive:
750
+ high = high - eps
751
+ raw_prediction = np.clip(raw_prediction, low, high)
752
+ y_true = loss.link.inverse(raw_prediction)
753
+ else:
754
+ # HalfMultinomialLoss
755
+ y_true = np.arange(loss.n_classes).astype(float)
756
+ # raw_prediction with entries -exp(10), but +exp(10) on the diagonal
757
+ # this is close enough to np.inf which would produce nan
758
+ raw_prediction = np.full(
759
+ shape=(loss.n_classes, loss.n_classes),
760
+ fill_value=-np.exp(10),
761
+ dtype=float,
762
+ )
763
+ raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10)
764
+
765
+ if sample_weight == "range":
766
+ sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
767
+
768
+ loss_value = loss.loss(
769
+ y_true=y_true,
770
+ raw_prediction=raw_prediction,
771
+ sample_weight=sample_weight,
772
+ )
773
+ constant_term = loss.constant_to_optimal_zero(
774
+ y_true=y_true, sample_weight=sample_weight
775
+ )
776
+ # Comparing loss_value + constant_term to zero would result in large
777
+ # round-off errors.
778
+ assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15)
779
+
780
+
781
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
782
+ @pytest.mark.parametrize("sample_weight", [None, "range"])
783
+ def test_gradients_hessians_numerically(loss, sample_weight, global_random_seed):
784
+ """Test gradients and hessians with numerical derivatives.
785
+
786
+ Gradient should equal the numerical derivatives of the loss function.
787
+ Hessians should equal the numerical derivatives of gradients.
788
+ """
789
+ n_samples = 20
790
+ y_true, raw_prediction = random_y_true_raw_prediction(
791
+ loss=loss,
792
+ n_samples=n_samples,
793
+ y_bound=(-100, 100),
794
+ raw_bound=(-5, 5),
795
+ seed=global_random_seed,
796
+ )
797
+
798
+ if sample_weight == "range":
799
+ sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
800
+
801
+ g, h = loss.gradient_hessian(
802
+ y_true=y_true,
803
+ raw_prediction=raw_prediction,
804
+ sample_weight=sample_weight,
805
+ )
806
+
807
+ assert g.shape == raw_prediction.shape
808
+ assert h.shape == raw_prediction.shape
809
+
810
+ if not loss.is_multiclass:
811
+
812
+ def loss_func(x):
813
+ return loss.loss(
814
+ y_true=y_true,
815
+ raw_prediction=x,
816
+ sample_weight=sample_weight,
817
+ )
818
+
819
+ g_numeric = numerical_derivative(loss_func, raw_prediction, eps=1e-6)
820
+ assert_allclose(g, g_numeric, rtol=5e-6, atol=1e-10)
821
+
822
+ def grad_func(x):
823
+ return loss.gradient(
824
+ y_true=y_true,
825
+ raw_prediction=x,
826
+ sample_weight=sample_weight,
827
+ )
828
+
829
+ h_numeric = numerical_derivative(grad_func, raw_prediction, eps=1e-6)
830
+ if loss.approx_hessian:
831
+ # TODO: What could we test if loss.approx_hessian?
832
+ pass
833
+ else:
834
+ assert_allclose(h, h_numeric, rtol=5e-6, atol=1e-10)
835
+ else:
836
+ # For multiclass loss, we should only change the predictions of the
837
+ # class for which the derivative is taken for, e.g. offset[:, k] = eps
838
+ # for class k.
839
+ # As a softmax is computed, offsetting the whole array by a constant
840
+ # would have no effect on the probabilities, and thus on the loss.
841
+ for k in range(loss.n_classes):
842
+
843
+ def loss_func(x):
844
+ raw = raw_prediction.copy()
845
+ raw[:, k] = x
846
+ return loss.loss(
847
+ y_true=y_true,
848
+ raw_prediction=raw,
849
+ sample_weight=sample_weight,
850
+ )
851
+
852
+ g_numeric = numerical_derivative(loss_func, raw_prediction[:, k], eps=1e-5)
853
+ assert_allclose(g[:, k], g_numeric, rtol=5e-6, atol=1e-10)
854
+
855
+ def grad_func(x):
856
+ raw = raw_prediction.copy()
857
+ raw[:, k] = x
858
+ return loss.gradient(
859
+ y_true=y_true,
860
+ raw_prediction=raw,
861
+ sample_weight=sample_weight,
862
+ )[:, k]
863
+
864
+ h_numeric = numerical_derivative(grad_func, raw_prediction[:, k], eps=1e-6)
865
+ if loss.approx_hessian:
866
+ # TODO: What could we test if loss.approx_hessian?
867
+ pass
868
+ else:
869
+ assert_allclose(h[:, k], h_numeric, rtol=5e-6, atol=1e-10)
870
+
871
+
872
+ @pytest.mark.parametrize(
873
+ "loss, x0, y_true",
874
+ [
875
+ ("squared_error", -2.0, 42),
876
+ ("squared_error", 117.0, 1.05),
877
+ ("squared_error", 0.0, 0.0),
878
+ # The argmin of binomial_loss for y_true=0 and y_true=1 is resp.
879
+ # -inf and +inf due to logit, cf. "complete separation". Therefore, we
880
+ # use 0 < y_true < 1.
881
+ ("binomial_loss", 0.3, 0.1),
882
+ ("binomial_loss", -12, 0.2),
883
+ ("binomial_loss", 30, 0.9),
884
+ ("poisson_loss", 12.0, 1.0),
885
+ ("poisson_loss", 0.0, 2.0),
886
+ ("poisson_loss", -22.0, 10.0),
887
+ ],
888
+ )
889
+ @skip_if_32bit
890
+ def test_derivatives(loss, x0, y_true):
891
+ """Test that gradients are zero at the minimum of the loss.
892
+
893
+ We check this on a single value/sample using Halley's method with the
894
+ first and second order derivatives computed by the Loss instance.
895
+ Note that methods of Loss instances operate on arrays while the newton
896
+ root finder expects a scalar or a one-element array for this purpose.
897
+ """
898
+ loss = _LOSSES[loss](sample_weight=None)
899
+ y_true = np.array([y_true], dtype=np.float64)
900
+ x0 = np.array([x0], dtype=np.float64)
901
+
902
+ def func(x: np.ndarray) -> np.ndarray:
903
+ """Compute loss plus constant term.
904
+
905
+ The constant term is such that the minimum function value is zero,
906
+ which is required by the Newton method.
907
+ """
908
+ return loss.loss(
909
+ y_true=y_true, raw_prediction=x
910
+ ) + loss.constant_to_optimal_zero(y_true=y_true)
911
+
912
+ def fprime(x: np.ndarray) -> np.ndarray:
913
+ return loss.gradient(y_true=y_true, raw_prediction=x)
914
+
915
+ def fprime2(x: np.ndarray) -> np.ndarray:
916
+ return loss.gradient_hessian(y_true=y_true, raw_prediction=x)[1]
917
+
918
+ optimum = newton(
919
+ func,
920
+ x0=x0,
921
+ fprime=fprime,
922
+ fprime2=fprime2,
923
+ maxiter=100,
924
+ tol=5e-8,
925
+ )
926
+
927
+ # Need to ravel arrays because assert_allclose requires matching
928
+ # dimensions.
929
+ y_true = y_true.ravel()
930
+ optimum = optimum.ravel()
931
+ assert_allclose(loss.link.inverse(optimum), y_true)
932
+ assert_allclose(func(optimum), 0, atol=1e-14)
933
+ assert_allclose(loss.gradient(y_true=y_true, raw_prediction=optimum), 0, atol=5e-7)
934
+
935
+
936
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
937
+ @pytest.mark.parametrize("sample_weight", [None, "range"])
938
+ def test_loss_intercept_only(loss, sample_weight):
939
+ """Test that fit_intercept_only returns the argmin of the loss.
940
+
941
+ Also test that the gradient is zero at the minimum.
942
+ """
943
+ n_samples = 50
944
+ if not loss.is_multiclass:
945
+ y_true = loss.link.inverse(np.linspace(-4, 4, num=n_samples))
946
+ else:
947
+ y_true = np.arange(n_samples).astype(np.float64) % loss.n_classes
948
+ y_true[::5] = 0 # exceedance of class 0
949
+
950
+ if sample_weight == "range":
951
+ sample_weight = np.linspace(0.1, 2, num=n_samples)
952
+
953
+ a = loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight)
954
+
955
+ # find minimum by optimization
956
+ def fun(x):
957
+ if not loss.is_multiclass:
958
+ raw_prediction = np.full(shape=(n_samples), fill_value=x)
959
+ else:
960
+ raw_prediction = np.ascontiguousarray(
961
+ np.broadcast_to(x, shape=(n_samples, loss.n_classes))
962
+ )
963
+ return loss(
964
+ y_true=y_true,
965
+ raw_prediction=raw_prediction,
966
+ sample_weight=sample_weight,
967
+ )
968
+
969
+ if not loss.is_multiclass:
970
+ opt = minimize_scalar(fun, tol=1e-7, options={"maxiter": 100})
971
+ grad = loss.gradient(
972
+ y_true=y_true,
973
+ raw_prediction=np.full_like(y_true, a),
974
+ sample_weight=sample_weight,
975
+ )
976
+ assert a.shape == tuple() # scalar
977
+ assert a.dtype == y_true.dtype
978
+ assert_all_finite(a)
979
+ a == approx(opt.x, rel=1e-7)
980
+ grad.sum() == approx(0, abs=1e-12)
981
+ else:
982
+ # The constraint corresponds to sum(raw_prediction) = 0. Without it, we would
983
+ # need to apply loss.symmetrize_raw_prediction to opt.x before comparing.
984
+ opt = minimize(
985
+ fun,
986
+ np.zeros((loss.n_classes)),
987
+ tol=1e-13,
988
+ options={"maxiter": 100},
989
+ method="SLSQP",
990
+ constraints=LinearConstraint(np.ones((1, loss.n_classes)), 0, 0),
991
+ )
992
+ grad = loss.gradient(
993
+ y_true=y_true,
994
+ raw_prediction=np.tile(a, (n_samples, 1)),
995
+ sample_weight=sample_weight,
996
+ )
997
+ assert a.dtype == y_true.dtype
998
+ assert_all_finite(a)
999
+ assert_allclose(a, opt.x, rtol=5e-6, atol=1e-12)
1000
+ assert_allclose(grad.sum(axis=0), 0, atol=1e-12)
1001
+
1002
+
1003
+ @pytest.mark.parametrize(
1004
+ "loss, func, random_dist",
1005
+ [
1006
+ (HalfSquaredError(), np.mean, "normal"),
1007
+ (AbsoluteError(), np.median, "normal"),
1008
+ (PinballLoss(quantile=0.25), lambda x: np.percentile(x, q=25), "normal"),
1009
+ (HalfPoissonLoss(), np.mean, "poisson"),
1010
+ (HalfGammaLoss(), np.mean, "exponential"),
1011
+ (HalfTweedieLoss(), np.mean, "exponential"),
1012
+ (HalfBinomialLoss(), np.mean, "binomial"),
1013
+ ],
1014
+ )
1015
+ def test_specific_fit_intercept_only(loss, func, random_dist, global_random_seed):
1016
+ """Test that fit_intercept_only returns the correct functional.
1017
+
1018
+ We test the functional for specific, meaningful distributions, e.g.
1019
+ squared error estimates the expectation of a probability distribution.
1020
+ """
1021
+ rng = np.random.RandomState(global_random_seed)
1022
+ if random_dist == "binomial":
1023
+ y_train = rng.binomial(1, 0.5, size=100)
1024
+ else:
1025
+ y_train = getattr(rng, random_dist)(size=100)
1026
+ baseline_prediction = loss.fit_intercept_only(y_true=y_train)
1027
+ # Make sure baseline prediction is the expected functional=func, e.g. mean
1028
+ # or median.
1029
+ assert_all_finite(baseline_prediction)
1030
+ assert baseline_prediction == approx(loss.link.link(func(y_train)))
1031
+ assert loss.link.inverse(baseline_prediction) == approx(func(y_train))
1032
+ if isinstance(loss, IdentityLink):
1033
+ assert_allclose(loss.link.inverse(baseline_prediction), baseline_prediction)
1034
+
1035
+ # Test baseline at boundary
1036
+ if loss.interval_y_true.low_inclusive:
1037
+ y_train.fill(loss.interval_y_true.low)
1038
+ baseline_prediction = loss.fit_intercept_only(y_true=y_train)
1039
+ assert_all_finite(baseline_prediction)
1040
+ if loss.interval_y_true.high_inclusive:
1041
+ y_train.fill(loss.interval_y_true.high)
1042
+ baseline_prediction = loss.fit_intercept_only(y_true=y_train)
1043
+ assert_all_finite(baseline_prediction)
1044
+
1045
+
1046
+ def test_multinomial_loss_fit_intercept_only():
1047
+ """Test that fit_intercept_only returns the mean functional for CCE."""
1048
+ rng = np.random.RandomState(0)
1049
+ n_classes = 4
1050
+ loss = HalfMultinomialLoss(n_classes=n_classes)
1051
+ # Same logic as test_specific_fit_intercept_only. Here inverse link
1052
+ # function = softmax and link function = log - symmetry term.
1053
+ y_train = rng.randint(0, n_classes + 1, size=100).astype(np.float64)
1054
+ baseline_prediction = loss.fit_intercept_only(y_true=y_train)
1055
+ assert baseline_prediction.shape == (n_classes,)
1056
+ p = np.zeros(n_classes, dtype=y_train.dtype)
1057
+ for k in range(n_classes):
1058
+ p[k] = (y_train == k).mean()
1059
+ assert_allclose(baseline_prediction, np.log(p) - np.mean(np.log(p)))
1060
+ assert_allclose(baseline_prediction[None, :], loss.link.link(p[None, :]))
1061
+
1062
+ for y_train in (np.zeros(shape=10), np.ones(shape=10)):
1063
+ y_train = y_train.astype(np.float64)
1064
+ baseline_prediction = loss.fit_intercept_only(y_true=y_train)
1065
+ assert baseline_prediction.dtype == y_train.dtype
1066
+ assert_all_finite(baseline_prediction)
1067
+
1068
+
1069
+ def test_binomial_and_multinomial_loss(global_random_seed):
1070
+ """Test that multinomial loss with n_classes = 2 is the same as binomial loss."""
1071
+ rng = np.random.RandomState(global_random_seed)
1072
+ n_samples = 20
1073
+ binom = HalfBinomialLoss()
1074
+ multinom = HalfMultinomialLoss(n_classes=2)
1075
+ y_train = rng.randint(0, 2, size=n_samples).astype(np.float64)
1076
+ raw_prediction = rng.normal(size=n_samples)
1077
+ raw_multinom = np.empty((n_samples, 2))
1078
+ raw_multinom[:, 0] = -0.5 * raw_prediction
1079
+ raw_multinom[:, 1] = 0.5 * raw_prediction
1080
+ assert_allclose(
1081
+ binom.loss(y_true=y_train, raw_prediction=raw_prediction),
1082
+ multinom.loss(y_true=y_train, raw_prediction=raw_multinom),
1083
+ )
1084
+
1085
+
1086
+ @pytest.mark.parametrize("y_true", (np.array([0.0, 0, 0]), np.array([1.0, 1, 1])))
1087
+ @pytest.mark.parametrize("y_pred", (np.array([-5.0, -5, -5]), np.array([3.0, 3, 3])))
1088
+ def test_binomial_vs_alternative_formulation(y_true, y_pred, global_dtype):
1089
+ """Test that both formulations of the binomial deviance agree.
1090
+
1091
+ Often, the binomial deviance or log loss is written in terms of a variable
1092
+ z in {-1, +1}, but we use y in {0, 1}, hence z = 2 * y - 1.
1093
+ ESL II Eq. (10.18):
1094
+
1095
+ -loglike(z, f) = log(1 + exp(-2 * z * f))
1096
+
1097
+ Note:
1098
+ - ESL 2*f = raw_prediction, hence the factor 2 of ESL disappears.
1099
+ - Deviance = -2*loglike + .., but HalfBinomialLoss is half of the
1100
+ deviance, hence the factor of 2 cancels in the comparison.
1101
+ """
1102
+
1103
+ def alt_loss(y, raw_pred):
1104
+ z = 2 * y - 1
1105
+ return np.mean(np.log(1 + np.exp(-z * raw_pred)))
1106
+
1107
+ def alt_gradient(y, raw_pred):
1108
+ # alternative gradient formula according to ESL
1109
+ z = 2 * y - 1
1110
+ return -z / (1 + np.exp(z * raw_pred))
1111
+
1112
+ bin_loss = HalfBinomialLoss()
1113
+
1114
+ y_true = y_true.astype(global_dtype)
1115
+ y_pred = y_pred.astype(global_dtype)
1116
+ datum = (y_true, y_pred)
1117
+
1118
+ assert bin_loss(*datum) == approx(alt_loss(*datum))
1119
+ assert_allclose(bin_loss.gradient(*datum), alt_gradient(*datum))
1120
+
1121
+
1122
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
1123
+ def test_predict_proba(loss, global_random_seed):
1124
+ """Test that predict_proba and gradient_proba work as expected."""
1125
+ n_samples = 20
1126
+ y_true, raw_prediction = random_y_true_raw_prediction(
1127
+ loss=loss,
1128
+ n_samples=n_samples,
1129
+ y_bound=(-100, 100),
1130
+ raw_bound=(-5, 5),
1131
+ seed=global_random_seed,
1132
+ )
1133
+
1134
+ if hasattr(loss, "predict_proba"):
1135
+ proba = loss.predict_proba(raw_prediction)
1136
+ assert proba.shape == (n_samples, loss.n_classes)
1137
+ assert np.sum(proba, axis=1) == approx(1, rel=1e-11)
1138
+
1139
+ if hasattr(loss, "gradient_proba"):
1140
+ for grad, proba in (
1141
+ (None, None),
1142
+ (None, np.empty_like(raw_prediction)),
1143
+ (np.empty_like(raw_prediction), None),
1144
+ (np.empty_like(raw_prediction), np.empty_like(raw_prediction)),
1145
+ ):
1146
+ grad, proba = loss.gradient_proba(
1147
+ y_true=y_true,
1148
+ raw_prediction=raw_prediction,
1149
+ sample_weight=None,
1150
+ gradient_out=grad,
1151
+ proba_out=proba,
1152
+ )
1153
+ assert proba.shape == (n_samples, loss.n_classes)
1154
+ assert np.sum(proba, axis=1) == approx(1, rel=1e-11)
1155
+ assert_allclose(
1156
+ grad,
1157
+ loss.gradient(
1158
+ y_true=y_true,
1159
+ raw_prediction=raw_prediction,
1160
+ sample_weight=None,
1161
+ gradient_out=None,
1162
+ ),
1163
+ )
1164
+
1165
+
1166
+ @pytest.mark.parametrize("loss", ALL_LOSSES)
1167
+ @pytest.mark.parametrize("sample_weight", [None, "range"])
1168
+ @pytest.mark.parametrize("dtype", (np.float32, np.float64))
1169
+ @pytest.mark.parametrize("order", ("C", "F"))
1170
+ def test_init_gradient_and_hessians(loss, sample_weight, dtype, order):
1171
+ """Test that init_gradient_and_hessian works as expected.
1172
+
1173
+ passing sample_weight to a loss correctly influences the constant_hessian
1174
+ attribute, and consequently the shape of the hessian array.
1175
+ """
1176
+ n_samples = 5
1177
+ if sample_weight == "range":
1178
+ sample_weight = np.ones(n_samples)
1179
+ loss = loss(sample_weight=sample_weight)
1180
+ gradient, hessian = loss.init_gradient_and_hessian(
1181
+ n_samples=n_samples,
1182
+ dtype=dtype,
1183
+ order=order,
1184
+ )
1185
+ if loss.constant_hessian:
1186
+ assert gradient.shape == (n_samples,)
1187
+ assert hessian.shape == (1,)
1188
+ elif loss.is_multiclass:
1189
+ assert gradient.shape == (n_samples, loss.n_classes)
1190
+ assert hessian.shape == (n_samples, loss.n_classes)
1191
+ else:
1192
+ assert hessian.shape == (n_samples,)
1193
+ assert hessian.shape == (n_samples,)
1194
+
1195
+ assert gradient.dtype == dtype
1196
+ assert hessian.dtype == dtype
1197
+
1198
+ if order == "C":
1199
+ assert gradient.flags.c_contiguous
1200
+ assert hessian.flags.c_contiguous
1201
+ else:
1202
+ assert gradient.flags.f_contiguous
1203
+ assert hessian.flags.f_contiguous
1204
+
1205
+
1206
+ @pytest.mark.parametrize("loss", ALL_LOSSES)
1207
+ @pytest.mark.parametrize(
1208
+ "params, err_msg",
1209
+ [
1210
+ (
1211
+ {"dtype": np.int64},
1212
+ f"Valid options for 'dtype' are .* Got dtype={np.int64} instead.",
1213
+ ),
1214
+ ],
1215
+ )
1216
+ def test_init_gradient_and_hessian_raises(loss, params, err_msg):
1217
+ """Test that init_gradient_and_hessian raises errors for invalid input."""
1218
+ loss = loss()
1219
+ with pytest.raises((ValueError, TypeError), match=err_msg):
1220
+ gradient, hessian = loss.init_gradient_and_hessian(n_samples=5, **params)
1221
+
1222
+
1223
+ @pytest.mark.parametrize(
1224
+ "loss, params, err_type, err_msg",
1225
+ [
1226
+ (
1227
+ PinballLoss,
1228
+ {"quantile": None},
1229
+ TypeError,
1230
+ "quantile must be an instance of float, not NoneType.",
1231
+ ),
1232
+ (
1233
+ PinballLoss,
1234
+ {"quantile": 0},
1235
+ ValueError,
1236
+ "quantile == 0, must be > 0.",
1237
+ ),
1238
+ (PinballLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."),
1239
+ (
1240
+ HuberLoss,
1241
+ {"quantile": None},
1242
+ TypeError,
1243
+ "quantile must be an instance of float, not NoneType.",
1244
+ ),
1245
+ (
1246
+ HuberLoss,
1247
+ {"quantile": 0},
1248
+ ValueError,
1249
+ "quantile == 0, must be > 0.",
1250
+ ),
1251
+ (HuberLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."),
1252
+ ],
1253
+ )
1254
+ def test_loss_init_parameter_validation(loss, params, err_type, err_msg):
1255
+ """Test that loss raises errors for invalid input."""
1256
+ with pytest.raises(err_type, match=err_msg):
1257
+ loss(**params)
1258
+
1259
+
1260
+ @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
1261
+ def test_loss_pickle(loss):
1262
+ """Test that losses can be pickled."""
1263
+ n_samples = 20
1264
+ y_true, raw_prediction = random_y_true_raw_prediction(
1265
+ loss=loss,
1266
+ n_samples=n_samples,
1267
+ y_bound=(-100, 100),
1268
+ raw_bound=(-5, 5),
1269
+ seed=42,
1270
+ )
1271
+ pickled_loss = pickle.dumps(loss)
1272
+ unpickled_loss = pickle.loads(pickled_loss)
1273
+ assert loss(y_true=y_true, raw_prediction=raw_prediction) == approx(
1274
+ unpickled_loss(y_true=y_true, raw_prediction=raw_prediction)
1275
+ )
1276
+
1277
+
1278
+ @pytest.mark.parametrize("p", [-1.5, 0, 1, 1.5, 2, 3])
1279
+ def test_tweedie_log_identity_consistency(p):
1280
+ """Test for identical losses when only the link function is different."""
1281
+ half_tweedie_log = HalfTweedieLoss(power=p)
1282
+ half_tweedie_identity = HalfTweedieLossIdentity(power=p)
1283
+ n_samples = 10
1284
+ y_true, raw_prediction = random_y_true_raw_prediction(
1285
+ loss=half_tweedie_log, n_samples=n_samples, seed=42
1286
+ )
1287
+ y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction)
1288
+
1289
+ # Let's compare the loss values, up to some constant term that is dropped
1290
+ # in HalfTweedieLoss but not in HalfTweedieLossIdentity.
1291
+ loss_log = half_tweedie_log.loss(
1292
+ y_true=y_true, raw_prediction=raw_prediction
1293
+ ) + half_tweedie_log.constant_to_optimal_zero(y_true)
1294
+ loss_identity = half_tweedie_identity.loss(
1295
+ y_true=y_true, raw_prediction=y_pred
1296
+ ) + half_tweedie_identity.constant_to_optimal_zero(y_true)
1297
+ # Note that HalfTweedieLoss ignores different constant terms than
1298
+ # HalfTweedieLossIdentity. Constant terms means terms not depending on
1299
+ # raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses
1300
+ # give the same values.
1301
+ assert_allclose(loss_log, loss_identity)
1302
+
1303
+ # For gradients and hessians, the constant terms do not matter. We have, however,
1304
+ # to account for the chain rule, i.e. with x=raw_prediction
1305
+ # gradient_log(x) = d/dx loss_log(x)
1306
+ # = d/dx loss_identity(exp(x))
1307
+ # = exp(x) * gradient_identity(exp(x))
1308
+ # Similarly,
1309
+ # hessian_log(x) = exp(x) * gradient_identity(exp(x))
1310
+ # + exp(x)**2 * hessian_identity(x)
1311
+ gradient_log, hessian_log = half_tweedie_log.gradient_hessian(
1312
+ y_true=y_true, raw_prediction=raw_prediction
1313
+ )
1314
+ gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian(
1315
+ y_true=y_true, raw_prediction=y_pred
1316
+ )
1317
+ assert_allclose(gradient_log, y_pred * gradient_identity)
1318
+ assert_allclose(
1319
+ hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity
1320
+ )
venv/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.feature_selection` module implements feature selection
3
+ algorithms. It currently includes univariate filter selection methods and the
4
+ recursive feature elimination algorithm.
5
+ """
6
+
7
+ from ._base import SelectorMixin
8
+ from ._from_model import SelectFromModel
9
+ from ._mutual_info import mutual_info_classif, mutual_info_regression
10
+ from ._rfe import RFE, RFECV
11
+ from ._sequential import SequentialFeatureSelector
12
+ from ._univariate_selection import (
13
+ GenericUnivariateSelect,
14
+ SelectFdr,
15
+ SelectFpr,
16
+ SelectFwe,
17
+ SelectKBest,
18
+ SelectPercentile,
19
+ chi2,
20
+ f_classif,
21
+ f_oneway,
22
+ f_regression,
23
+ r_regression,
24
+ )
25
+ from ._variance_threshold import VarianceThreshold
26
+
27
+ __all__ = [
28
+ "GenericUnivariateSelect",
29
+ "SequentialFeatureSelector",
30
+ "RFE",
31
+ "RFECV",
32
+ "SelectFdr",
33
+ "SelectFpr",
34
+ "SelectFwe",
35
+ "SelectKBest",
36
+ "SelectFromModel",
37
+ "SelectPercentile",
38
+ "VarianceThreshold",
39
+ "chi2",
40
+ "f_classif",
41
+ "f_oneway",
42
+ "f_regression",
43
+ "r_regression",
44
+ "mutual_info_classif",
45
+ "mutual_info_regression",
46
+ "SelectorMixin",
47
+ ]
venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc ADDED
Binary file (8.54 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc ADDED
Binary file (9.81 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc ADDED
Binary file (38.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc ADDED
Binary file (4.65 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/feature_selection/_base.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generic feature selection mixin"""
2
+
3
+ # Authors: G. Varoquaux, A. Gramfort, L. Buitinck, J. Nothman
4
+ # License: BSD 3 clause
5
+
6
+ import warnings
7
+ from abc import ABCMeta, abstractmethod
8
+ from operator import attrgetter
9
+
10
+ import numpy as np
11
+ from scipy.sparse import csc_matrix, issparse
12
+
13
+ from ..base import TransformerMixin
14
+ from ..utils import (
15
+ _is_pandas_df,
16
+ _safe_indexing,
17
+ check_array,
18
+ safe_sqr,
19
+ )
20
+ from ..utils._set_output import _get_output_config
21
+ from ..utils._tags import _safe_tags
22
+ from ..utils.validation import _check_feature_names_in, check_is_fitted
23
+
24
+
25
+ class SelectorMixin(TransformerMixin, metaclass=ABCMeta):
26
+ """
27
+ Transformer mixin that performs feature selection given a support mask
28
+
29
+ This mixin provides a feature selector implementation with `transform` and
30
+ `inverse_transform` functionality given an implementation of
31
+ `_get_support_mask`.
32
+
33
+ Examples
34
+ --------
35
+ >>> import numpy as np
36
+ >>> from sklearn.datasets import load_iris
37
+ >>> from sklearn.base import BaseEstimator
38
+ >>> from sklearn.feature_selection import SelectorMixin
39
+ >>> class FeatureSelector(SelectorMixin, BaseEstimator):
40
+ ... def fit(self, X, y=None):
41
+ ... self.n_features_in_ = X.shape[1]
42
+ ... return self
43
+ ... def _get_support_mask(self):
44
+ ... mask = np.zeros(self.n_features_in_, dtype=bool)
45
+ ... mask[:2] = True # select the first two features
46
+ ... return mask
47
+ >>> X, y = load_iris(return_X_y=True)
48
+ >>> FeatureSelector().fit_transform(X, y).shape
49
+ (150, 2)
50
+ """
51
+
52
+ def get_support(self, indices=False):
53
+ """
54
+ Get a mask, or integer index, of the features selected.
55
+
56
+ Parameters
57
+ ----------
58
+ indices : bool, default=False
59
+ If True, the return value will be an array of integers, rather
60
+ than a boolean mask.
61
+
62
+ Returns
63
+ -------
64
+ support : array
65
+ An index that selects the retained features from a feature vector.
66
+ If `indices` is False, this is a boolean array of shape
67
+ [# input features], in which an element is True iff its
68
+ corresponding feature is selected for retention. If `indices` is
69
+ True, this is an integer array of shape [# output features] whose
70
+ values are indices into the input feature vector.
71
+ """
72
+ mask = self._get_support_mask()
73
+ return mask if not indices else np.where(mask)[0]
74
+
75
+ @abstractmethod
76
+ def _get_support_mask(self):
77
+ """
78
+ Get the boolean mask indicating which features are selected
79
+
80
+ Returns
81
+ -------
82
+ support : boolean array of shape [# input features]
83
+ An element is True iff its corresponding feature is selected for
84
+ retention.
85
+ """
86
+
87
+ def transform(self, X):
88
+ """Reduce X to the selected features.
89
+
90
+ Parameters
91
+ ----------
92
+ X : array of shape [n_samples, n_features]
93
+ The input samples.
94
+
95
+ Returns
96
+ -------
97
+ X_r : array of shape [n_samples, n_selected_features]
98
+ The input samples with only the selected features.
99
+ """
100
+ # Preserve X when X is a dataframe and the output is configured to
101
+ # be pandas.
102
+ output_config_dense = _get_output_config("transform", estimator=self)["dense"]
103
+ preserve_X = output_config_dense != "default" and _is_pandas_df(X)
104
+
105
+ # note: we use _safe_tags instead of _get_tags because this is a
106
+ # public Mixin.
107
+ X = self._validate_data(
108
+ X,
109
+ dtype=None,
110
+ accept_sparse="csr",
111
+ force_all_finite=not _safe_tags(self, key="allow_nan"),
112
+ cast_to_ndarray=not preserve_X,
113
+ reset=False,
114
+ )
115
+ return self._transform(X)
116
+
117
+ def _transform(self, X):
118
+ """Reduce X to the selected features."""
119
+ mask = self.get_support()
120
+ if not mask.any():
121
+ warnings.warn(
122
+ (
123
+ "No features were selected: either the data is"
124
+ " too noisy or the selection test too strict."
125
+ ),
126
+ UserWarning,
127
+ )
128
+ if hasattr(X, "iloc"):
129
+ return X.iloc[:, :0]
130
+ return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0))
131
+ return _safe_indexing(X, mask, axis=1)
132
+
133
+ def inverse_transform(self, X):
134
+ """Reverse the transformation operation.
135
+
136
+ Parameters
137
+ ----------
138
+ X : array of shape [n_samples, n_selected_features]
139
+ The input samples.
140
+
141
+ Returns
142
+ -------
143
+ X_r : array of shape [n_samples, n_original_features]
144
+ `X` with columns of zeros inserted where features would have
145
+ been removed by :meth:`transform`.
146
+ """
147
+ if issparse(X):
148
+ X = X.tocsc()
149
+ # insert additional entries in indptr:
150
+ # e.g. if transform changed indptr from [0 2 6 7] to [0 2 3]
151
+ # col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3]
152
+ it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1))
153
+ col_nonzeros = it.ravel()
154
+ indptr = np.concatenate([[0], np.cumsum(col_nonzeros)])
155
+ Xt = csc_matrix(
156
+ (X.data, X.indices, indptr),
157
+ shape=(X.shape[0], len(indptr) - 1),
158
+ dtype=X.dtype,
159
+ )
160
+ return Xt
161
+
162
+ support = self.get_support()
163
+ X = check_array(X, dtype=None)
164
+ if support.sum() != X.shape[1]:
165
+ raise ValueError("X has a different shape than during fitting.")
166
+
167
+ if X.ndim == 1:
168
+ X = X[None, :]
169
+ Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype)
170
+ Xt[:, support] = X
171
+ return Xt
172
+
173
+ def get_feature_names_out(self, input_features=None):
174
+ """Mask feature names according to selected features.
175
+
176
+ Parameters
177
+ ----------
178
+ input_features : array-like of str or None, default=None
179
+ Input features.
180
+
181
+ - If `input_features` is `None`, then `feature_names_in_` is
182
+ used as feature names in. If `feature_names_in_` is not defined,
183
+ then the following input feature names are generated:
184
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
185
+ - If `input_features` is an array-like, then `input_features` must
186
+ match `feature_names_in_` if `feature_names_in_` is defined.
187
+
188
+ Returns
189
+ -------
190
+ feature_names_out : ndarray of str objects
191
+ Transformed feature names.
192
+ """
193
+ check_is_fitted(self)
194
+ input_features = _check_feature_names_in(self, input_features)
195
+ return input_features[self.get_support()]
196
+
197
+
198
+ def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1):
199
+ """
200
+ Retrieve and aggregate (ndim > 1) the feature importances
201
+ from an estimator. Also optionally applies transformation.
202
+
203
+ Parameters
204
+ ----------
205
+ estimator : estimator
206
+ A scikit-learn estimator from which we want to get the feature
207
+ importances.
208
+
209
+ getter : "auto", str or callable
210
+ An attribute or a callable to get the feature importance. If `"auto"`,
211
+ `estimator` is expected to expose `coef_` or `feature_importances`.
212
+
213
+ transform_func : {"norm", "square"}, default=None
214
+ The transform to apply to the feature importances. By default (`None`)
215
+ no transformation is applied.
216
+
217
+ norm_order : int, default=1
218
+ The norm order to apply when `transform_func="norm"`. Only applied
219
+ when `importances.ndim > 1`.
220
+
221
+ Returns
222
+ -------
223
+ importances : ndarray of shape (n_features,)
224
+ The features importances, optionally transformed.
225
+ """
226
+ if isinstance(getter, str):
227
+ if getter == "auto":
228
+ if hasattr(estimator, "coef_"):
229
+ getter = attrgetter("coef_")
230
+ elif hasattr(estimator, "feature_importances_"):
231
+ getter = attrgetter("feature_importances_")
232
+ else:
233
+ raise ValueError(
234
+ "when `importance_getter=='auto'`, the underlying "
235
+ f"estimator {estimator.__class__.__name__} should have "
236
+ "`coef_` or `feature_importances_` attribute. Either "
237
+ "pass a fitted estimator to feature selector or call fit "
238
+ "before calling transform."
239
+ )
240
+ else:
241
+ getter = attrgetter(getter)
242
+ elif not callable(getter):
243
+ raise ValueError("`importance_getter` has to be a string or `callable`")
244
+
245
+ importances = getter(estimator)
246
+
247
+ if transform_func is None:
248
+ return importances
249
+ elif transform_func == "norm":
250
+ if importances.ndim == 1:
251
+ importances = np.abs(importances)
252
+ else:
253
+ importances = np.linalg.norm(importances, axis=0, ord=norm_order)
254
+ elif transform_func == "square":
255
+ if importances.ndim == 1:
256
+ importances = safe_sqr(importances)
257
+ else:
258
+ importances = safe_sqr(importances).sum(axis=0)
259
+ else:
260
+ raise ValueError(
261
+ "Valid values for `transform_func` are "
262
+ + "None, 'norm' and 'square'. Those two "
263
+ + "transformation are only supported now"
264
+ )
265
+
266
+ return importances
venv/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
2
+ # License: BSD 3 clause
3
+
4
+ from copy import deepcopy
5
+ from numbers import Integral, Real
6
+
7
+ import numpy as np
8
+
9
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone
10
+ from ..exceptions import NotFittedError
11
+ from ..utils._param_validation import HasMethods, Interval, Options
12
+ from ..utils._tags import _safe_tags
13
+ from ..utils.metadata_routing import (
14
+ MetadataRouter,
15
+ MethodMapping,
16
+ _routing_enabled,
17
+ process_routing,
18
+ )
19
+ from ..utils.metaestimators import available_if
20
+ from ..utils.validation import _num_features, check_is_fitted, check_scalar
21
+ from ._base import SelectorMixin, _get_feature_importances
22
+
23
+
24
+ def _calculate_threshold(estimator, importances, threshold):
25
+ """Interpret the threshold value"""
26
+
27
+ if threshold is None:
28
+ # determine default from estimator
29
+ est_name = estimator.__class__.__name__
30
+ is_l1_penalized = hasattr(estimator, "penalty") and estimator.penalty == "l1"
31
+ is_lasso = "Lasso" in est_name
32
+ is_elasticnet_l1_penalized = "ElasticNet" in est_name and (
33
+ (hasattr(estimator, "l1_ratio_") and np.isclose(estimator.l1_ratio_, 1.0))
34
+ or (hasattr(estimator, "l1_ratio") and np.isclose(estimator.l1_ratio, 1.0))
35
+ )
36
+ if is_l1_penalized or is_lasso or is_elasticnet_l1_penalized:
37
+ # the natural default threshold is 0 when l1 penalty was used
38
+ threshold = 1e-5
39
+ else:
40
+ threshold = "mean"
41
+
42
+ if isinstance(threshold, str):
43
+ if "*" in threshold:
44
+ scale, reference = threshold.split("*")
45
+ scale = float(scale.strip())
46
+ reference = reference.strip()
47
+
48
+ if reference == "median":
49
+ reference = np.median(importances)
50
+ elif reference == "mean":
51
+ reference = np.mean(importances)
52
+ else:
53
+ raise ValueError("Unknown reference: " + reference)
54
+
55
+ threshold = scale * reference
56
+
57
+ elif threshold == "median":
58
+ threshold = np.median(importances)
59
+
60
+ elif threshold == "mean":
61
+ threshold = np.mean(importances)
62
+
63
+ else:
64
+ raise ValueError(
65
+ "Expected threshold='mean' or threshold='median' got %s" % threshold
66
+ )
67
+
68
+ else:
69
+ threshold = float(threshold)
70
+
71
+ return threshold
72
+
73
+
74
+ def _estimator_has(attr):
75
+ """Check if we can delegate a method to the underlying estimator.
76
+
77
+ First, we check the fitted `estimator_` if available, otherwise we check the
78
+ unfitted `estimator`. We raise the original `AttributeError` if `attr` does
79
+ not exist. This function is used together with `available_if`.
80
+ """
81
+
82
+ def check(self):
83
+ if hasattr(self, "estimator_"):
84
+ getattr(self.estimator_, attr)
85
+ else:
86
+ getattr(self.estimator, attr)
87
+
88
+ return True
89
+
90
+ return check
91
+
92
+
93
+ class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator):
94
+ """Meta-transformer for selecting features based on importance weights.
95
+
96
+ .. versionadded:: 0.17
97
+
98
+ Read more in the :ref:`User Guide <select_from_model>`.
99
+
100
+ Parameters
101
+ ----------
102
+ estimator : object
103
+ The base estimator from which the transformer is built.
104
+ This can be both a fitted (if ``prefit`` is set to True)
105
+ or a non-fitted estimator. The estimator should have a
106
+ ``feature_importances_`` or ``coef_`` attribute after fitting.
107
+ Otherwise, the ``importance_getter`` parameter should be used.
108
+
109
+ threshold : str or float, default=None
110
+ The threshold value to use for feature selection. Features whose
111
+ absolute importance value is greater or equal are kept while the others
112
+ are discarded. If "median" (resp. "mean"), then the ``threshold`` value
113
+ is the median (resp. the mean) of the feature importances. A scaling
114
+ factor (e.g., "1.25*mean") may also be used. If None and if the
115
+ estimator has a parameter penalty set to l1, either explicitly
116
+ or implicitly (e.g, Lasso), the threshold used is 1e-5.
117
+ Otherwise, "mean" is used by default.
118
+
119
+ prefit : bool, default=False
120
+ Whether a prefit model is expected to be passed into the constructor
121
+ directly or not.
122
+ If `True`, `estimator` must be a fitted estimator.
123
+ If `False`, `estimator` is fitted and updated by calling
124
+ `fit` and `partial_fit`, respectively.
125
+
126
+ norm_order : non-zero int, inf, -inf, default=1
127
+ Order of the norm used to filter the vectors of coefficients below
128
+ ``threshold`` in the case where the ``coef_`` attribute of the
129
+ estimator is of dimension 2.
130
+
131
+ max_features : int, callable, default=None
132
+ The maximum number of features to select.
133
+
134
+ - If an integer, then it specifies the maximum number of features to
135
+ allow.
136
+ - If a callable, then it specifies how to calculate the maximum number of
137
+ features allowed by using the output of `max_features(X)`.
138
+ - If `None`, then all features are kept.
139
+
140
+ To only select based on ``max_features``, set ``threshold=-np.inf``.
141
+
142
+ .. versionadded:: 0.20
143
+ .. versionchanged:: 1.1
144
+ `max_features` accepts a callable.
145
+
146
+ importance_getter : str or callable, default='auto'
147
+ If 'auto', uses the feature importance either through a ``coef_``
148
+ attribute or ``feature_importances_`` attribute of estimator.
149
+
150
+ Also accepts a string that specifies an attribute name/path
151
+ for extracting feature importance (implemented with `attrgetter`).
152
+ For example, give `regressor_.coef_` in case of
153
+ :class:`~sklearn.compose.TransformedTargetRegressor` or
154
+ `named_steps.clf.feature_importances_` in case of
155
+ :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
156
+
157
+ If `callable`, overrides the default feature importance getter.
158
+ The callable is passed with the fitted estimator and it should
159
+ return importance for each feature.
160
+
161
+ .. versionadded:: 0.24
162
+
163
+ Attributes
164
+ ----------
165
+ estimator_ : estimator
166
+ The base estimator from which the transformer is built. This attribute
167
+ exist only when `fit` has been called.
168
+
169
+ - If `prefit=True`, it is a deep copy of `estimator`.
170
+ - If `prefit=False`, it is a clone of `estimator` and fit on the data
171
+ passed to `fit` or `partial_fit`.
172
+
173
+ n_features_in_ : int
174
+ Number of features seen during :term:`fit`. Only defined if the
175
+ underlying estimator exposes such an attribute when fit.
176
+
177
+ .. versionadded:: 0.24
178
+
179
+ max_features_ : int
180
+ Maximum number of features calculated during :term:`fit`. Only defined
181
+ if the ``max_features`` is not `None`.
182
+
183
+ - If `max_features` is an `int`, then `max_features_ = max_features`.
184
+ - If `max_features` is a callable, then `max_features_ = max_features(X)`.
185
+
186
+ .. versionadded:: 1.1
187
+
188
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
189
+ Names of features seen during :term:`fit`. Defined only when `X`
190
+ has feature names that are all strings.
191
+
192
+ .. versionadded:: 1.0
193
+
194
+ threshold_ : float
195
+ The threshold value used for feature selection.
196
+
197
+ See Also
198
+ --------
199
+ RFE : Recursive feature elimination based on importance weights.
200
+ RFECV : Recursive feature elimination with built-in cross-validated
201
+ selection of the best number of features.
202
+ SequentialFeatureSelector : Sequential cross-validation based feature
203
+ selection. Does not rely on importance weights.
204
+
205
+ Notes
206
+ -----
207
+ Allows NaN/Inf in the input if the underlying estimator does as well.
208
+
209
+ Examples
210
+ --------
211
+ >>> from sklearn.feature_selection import SelectFromModel
212
+ >>> from sklearn.linear_model import LogisticRegression
213
+ >>> X = [[ 0.87, -1.34, 0.31 ],
214
+ ... [-2.79, -0.02, -0.85 ],
215
+ ... [-1.34, -0.48, -2.55 ],
216
+ ... [ 1.92, 1.48, 0.65 ]]
217
+ >>> y = [0, 1, 0, 1]
218
+ >>> selector = SelectFromModel(estimator=LogisticRegression()).fit(X, y)
219
+ >>> selector.estimator_.coef_
220
+ array([[-0.3252..., 0.8345..., 0.4976...]])
221
+ >>> selector.threshold_
222
+ 0.55249...
223
+ >>> selector.get_support()
224
+ array([False, True, False])
225
+ >>> selector.transform(X)
226
+ array([[-1.34],
227
+ [-0.02],
228
+ [-0.48],
229
+ [ 1.48]])
230
+
231
+ Using a callable to create a selector that can use no more than half
232
+ of the input features.
233
+
234
+ >>> def half_callable(X):
235
+ ... return round(len(X[0]) / 2)
236
+ >>> half_selector = SelectFromModel(estimator=LogisticRegression(),
237
+ ... max_features=half_callable)
238
+ >>> _ = half_selector.fit(X, y)
239
+ >>> half_selector.max_features_
240
+ 2
241
+ """
242
+
243
+ _parameter_constraints: dict = {
244
+ "estimator": [HasMethods("fit")],
245
+ "threshold": [Interval(Real, None, None, closed="both"), str, None],
246
+ "prefit": ["boolean"],
247
+ "norm_order": [
248
+ Interval(Integral, None, -1, closed="right"),
249
+ Interval(Integral, 1, None, closed="left"),
250
+ Options(Real, {np.inf, -np.inf}),
251
+ ],
252
+ "max_features": [Interval(Integral, 0, None, closed="left"), callable, None],
253
+ "importance_getter": [str, callable],
254
+ }
255
+
256
+ def __init__(
257
+ self,
258
+ estimator,
259
+ *,
260
+ threshold=None,
261
+ prefit=False,
262
+ norm_order=1,
263
+ max_features=None,
264
+ importance_getter="auto",
265
+ ):
266
+ self.estimator = estimator
267
+ self.threshold = threshold
268
+ self.prefit = prefit
269
+ self.importance_getter = importance_getter
270
+ self.norm_order = norm_order
271
+ self.max_features = max_features
272
+
273
+ def _get_support_mask(self):
274
+ estimator = getattr(self, "estimator_", self.estimator)
275
+ max_features = getattr(self, "max_features_", self.max_features)
276
+
277
+ if self.prefit:
278
+ try:
279
+ check_is_fitted(self.estimator)
280
+ except NotFittedError as exc:
281
+ raise NotFittedError(
282
+ "When `prefit=True`, `estimator` is expected to be a fitted "
283
+ "estimator."
284
+ ) from exc
285
+ if callable(max_features):
286
+ # This branch is executed when `transform` is called directly and thus
287
+ # `max_features_` is not set and we fallback using `self.max_features`
288
+ # that is not validated
289
+ raise NotFittedError(
290
+ "When `prefit=True` and `max_features` is a callable, call `fit` "
291
+ "before calling `transform`."
292
+ )
293
+ elif max_features is not None and not isinstance(max_features, Integral):
294
+ raise ValueError(
295
+ f"`max_features` must be an integer. Got `max_features={max_features}` "
296
+ "instead."
297
+ )
298
+
299
+ scores = _get_feature_importances(
300
+ estimator=estimator,
301
+ getter=self.importance_getter,
302
+ transform_func="norm",
303
+ norm_order=self.norm_order,
304
+ )
305
+ threshold = _calculate_threshold(estimator, scores, self.threshold)
306
+ if self.max_features is not None:
307
+ mask = np.zeros_like(scores, dtype=bool)
308
+ candidate_indices = np.argsort(-scores, kind="mergesort")[:max_features]
309
+ mask[candidate_indices] = True
310
+ else:
311
+ mask = np.ones_like(scores, dtype=bool)
312
+ mask[scores < threshold] = False
313
+ return mask
314
+
315
+ def _check_max_features(self, X):
316
+ if self.max_features is not None:
317
+ n_features = _num_features(X)
318
+
319
+ if callable(self.max_features):
320
+ max_features = self.max_features(X)
321
+ else: # int
322
+ max_features = self.max_features
323
+
324
+ check_scalar(
325
+ max_features,
326
+ "max_features",
327
+ Integral,
328
+ min_val=0,
329
+ max_val=n_features,
330
+ )
331
+ self.max_features_ = max_features
332
+
333
+ @_fit_context(
334
+ # SelectFromModel.estimator is not validated yet
335
+ prefer_skip_nested_validation=False
336
+ )
337
+ def fit(self, X, y=None, **fit_params):
338
+ """Fit the SelectFromModel meta-transformer.
339
+
340
+ Parameters
341
+ ----------
342
+ X : array-like of shape (n_samples, n_features)
343
+ The training input samples.
344
+
345
+ y : array-like of shape (n_samples,), default=None
346
+ The target values (integers that correspond to classes in
347
+ classification, real numbers in regression).
348
+
349
+ **fit_params : dict
350
+ - If `enable_metadata_routing=False` (default):
351
+
352
+ Parameters directly passed to the `partial_fit` method of the
353
+ sub-estimator. They are ignored if `prefit=True`.
354
+
355
+ - If `enable_metadata_routing=True`:
356
+
357
+ Parameters safely routed to the `partial_fit` method of the
358
+ sub-estimator. They are ignored if `prefit=True`.
359
+
360
+ .. versionchanged:: 1.4
361
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
362
+ more details.
363
+
364
+ Returns
365
+ -------
366
+ self : object
367
+ Fitted estimator.
368
+ """
369
+ self._check_max_features(X)
370
+
371
+ if self.prefit:
372
+ try:
373
+ check_is_fitted(self.estimator)
374
+ except NotFittedError as exc:
375
+ raise NotFittedError(
376
+ "When `prefit=True`, `estimator` is expected to be a fitted "
377
+ "estimator."
378
+ ) from exc
379
+ self.estimator_ = deepcopy(self.estimator)
380
+ else:
381
+ if _routing_enabled():
382
+ routed_params = process_routing(self, "fit", **fit_params)
383
+ self.estimator_ = clone(self.estimator)
384
+ self.estimator_.fit(X, y, **routed_params.estimator.fit)
385
+ else:
386
+ # TODO(SLEP6): remove when metadata routing cannot be disabled.
387
+ self.estimator_ = clone(self.estimator)
388
+ self.estimator_.fit(X, y, **fit_params)
389
+
390
+ if hasattr(self.estimator_, "feature_names_in_"):
391
+ self.feature_names_in_ = self.estimator_.feature_names_in_
392
+ else:
393
+ self._check_feature_names(X, reset=True)
394
+
395
+ return self
396
+
397
+ @property
398
+ def threshold_(self):
399
+ """Threshold value used for feature selection."""
400
+ scores = _get_feature_importances(
401
+ estimator=self.estimator_,
402
+ getter=self.importance_getter,
403
+ transform_func="norm",
404
+ norm_order=self.norm_order,
405
+ )
406
+ return _calculate_threshold(self.estimator, scores, self.threshold)
407
+
408
+ @available_if(_estimator_has("partial_fit"))
409
+ @_fit_context(
410
+ # SelectFromModel.estimator is not validated yet
411
+ prefer_skip_nested_validation=False
412
+ )
413
+ def partial_fit(self, X, y=None, **partial_fit_params):
414
+ """Fit the SelectFromModel meta-transformer only once.
415
+
416
+ Parameters
417
+ ----------
418
+ X : array-like of shape (n_samples, n_features)
419
+ The training input samples.
420
+
421
+ y : array-like of shape (n_samples,), default=None
422
+ The target values (integers that correspond to classes in
423
+ classification, real numbers in regression).
424
+
425
+ **partial_fit_params : dict
426
+ - If `enable_metadata_routing=False` (default):
427
+
428
+ Parameters directly passed to the `partial_fit` method of the
429
+ sub-estimator.
430
+
431
+ - If `enable_metadata_routing=True`:
432
+
433
+ Parameters passed to the `partial_fit` method of the
434
+ sub-estimator. They are ignored if `prefit=True`.
435
+
436
+ .. versionchanged:: 1.4
437
+ `**partial_fit_params` are routed to the sub-estimator, if
438
+ `enable_metadata_routing=True` is set via
439
+ :func:`~sklearn.set_config`, which allows for aliasing.
440
+
441
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
442
+ more details.
443
+
444
+ Returns
445
+ -------
446
+ self : object
447
+ Fitted estimator.
448
+ """
449
+ first_call = not hasattr(self, "estimator_")
450
+
451
+ if first_call:
452
+ self._check_max_features(X)
453
+
454
+ if self.prefit:
455
+ if first_call:
456
+ try:
457
+ check_is_fitted(self.estimator)
458
+ except NotFittedError as exc:
459
+ raise NotFittedError(
460
+ "When `prefit=True`, `estimator` is expected to be a fitted "
461
+ "estimator."
462
+ ) from exc
463
+ self.estimator_ = deepcopy(self.estimator)
464
+ return self
465
+
466
+ if first_call:
467
+ self.estimator_ = clone(self.estimator)
468
+ if _routing_enabled():
469
+ routed_params = process_routing(self, "partial_fit", **partial_fit_params)
470
+ self.estimator_ = clone(self.estimator)
471
+ self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit)
472
+ else:
473
+ # TODO(SLEP6): remove when metadata routing cannot be disabled.
474
+ self.estimator_.partial_fit(X, y, **partial_fit_params)
475
+
476
+ if hasattr(self.estimator_, "feature_names_in_"):
477
+ self.feature_names_in_ = self.estimator_.feature_names_in_
478
+ else:
479
+ self._check_feature_names(X, reset=first_call)
480
+
481
+ return self
482
+
483
+ @property
484
+ def n_features_in_(self):
485
+ """Number of features seen during `fit`."""
486
+ # For consistency with other estimators we raise a AttributeError so
487
+ # that hasattr() fails if the estimator isn't fitted.
488
+ try:
489
+ check_is_fitted(self)
490
+ except NotFittedError as nfe:
491
+ raise AttributeError(
492
+ "{} object has no n_features_in_ attribute.".format(
493
+ self.__class__.__name__
494
+ )
495
+ ) from nfe
496
+
497
+ return self.estimator_.n_features_in_
498
+
499
+ def get_metadata_routing(self):
500
+ """Get metadata routing of this object.
501
+
502
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
503
+ mechanism works.
504
+
505
+ .. versionadded:: 1.4
506
+
507
+ Returns
508
+ -------
509
+ routing : MetadataRouter
510
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
511
+ routing information.
512
+ """
513
+ router = MetadataRouter(owner=self.__class__.__name__).add(
514
+ estimator=self.estimator,
515
+ method_mapping=MethodMapping()
516
+ .add(callee="partial_fit", caller="partial_fit")
517
+ .add(callee="fit", caller="fit"),
518
+ )
519
+ return router
520
+
521
+ def _more_tags(self):
522
+ return {"allow_nan": _safe_tags(self.estimator, key="allow_nan")}
venv/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Nikolay Mayorov <[email protected]>
2
+ # License: 3-clause BSD
3
+
4
+ from numbers import Integral
5
+
6
+ import numpy as np
7
+ from scipy.sparse import issparse
8
+ from scipy.special import digamma
9
+
10
+ from ..metrics.cluster import mutual_info_score
11
+ from ..neighbors import KDTree, NearestNeighbors
12
+ from ..preprocessing import scale
13
+ from ..utils import check_random_state
14
+ from ..utils._param_validation import Interval, StrOptions, validate_params
15
+ from ..utils.multiclass import check_classification_targets
16
+ from ..utils.validation import check_array, check_X_y
17
+
18
+
19
+ def _compute_mi_cc(x, y, n_neighbors):
20
+ """Compute mutual information between two continuous variables.
21
+
22
+ Parameters
23
+ ----------
24
+ x, y : ndarray, shape (n_samples,)
25
+ Samples of two continuous random variables, must have an identical
26
+ shape.
27
+
28
+ n_neighbors : int
29
+ Number of nearest neighbors to search for each point, see [1]_.
30
+
31
+ Returns
32
+ -------
33
+ mi : float
34
+ Estimated mutual information in nat units. If it turned out to be
35
+ negative it is replaced by 0.
36
+
37
+ Notes
38
+ -----
39
+ True mutual information can't be negative. If its estimate by a numerical
40
+ method is negative, it means (providing the method is adequate) that the
41
+ mutual information is close to 0 and replacing it by 0 is a reasonable
42
+ strategy.
43
+
44
+ References
45
+ ----------
46
+ .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
47
+ information". Phys. Rev. E 69, 2004.
48
+ """
49
+ n_samples = x.size
50
+
51
+ x = x.reshape((-1, 1))
52
+ y = y.reshape((-1, 1))
53
+ xy = np.hstack((x, y))
54
+
55
+ # Here we rely on NearestNeighbors to select the fastest algorithm.
56
+ nn = NearestNeighbors(metric="chebyshev", n_neighbors=n_neighbors)
57
+
58
+ nn.fit(xy)
59
+ radius = nn.kneighbors()[0]
60
+ radius = np.nextafter(radius[:, -1], 0)
61
+
62
+ # KDTree is explicitly fit to allow for the querying of number of
63
+ # neighbors within a specified radius
64
+ kd = KDTree(x, metric="chebyshev")
65
+ nx = kd.query_radius(x, radius, count_only=True, return_distance=False)
66
+ nx = np.array(nx) - 1.0
67
+
68
+ kd = KDTree(y, metric="chebyshev")
69
+ ny = kd.query_radius(y, radius, count_only=True, return_distance=False)
70
+ ny = np.array(ny) - 1.0
71
+
72
+ mi = (
73
+ digamma(n_samples)
74
+ + digamma(n_neighbors)
75
+ - np.mean(digamma(nx + 1))
76
+ - np.mean(digamma(ny + 1))
77
+ )
78
+
79
+ return max(0, mi)
80
+
81
+
82
+ def _compute_mi_cd(c, d, n_neighbors):
83
+ """Compute mutual information between continuous and discrete variables.
84
+
85
+ Parameters
86
+ ----------
87
+ c : ndarray, shape (n_samples,)
88
+ Samples of a continuous random variable.
89
+
90
+ d : ndarray, shape (n_samples,)
91
+ Samples of a discrete random variable.
92
+
93
+ n_neighbors : int
94
+ Number of nearest neighbors to search for each point, see [1]_.
95
+
96
+ Returns
97
+ -------
98
+ mi : float
99
+ Estimated mutual information in nat units. If it turned out to be
100
+ negative it is replaced by 0.
101
+
102
+ Notes
103
+ -----
104
+ True mutual information can't be negative. If its estimate by a numerical
105
+ method is negative, it means (providing the method is adequate) that the
106
+ mutual information is close to 0 and replacing it by 0 is a reasonable
107
+ strategy.
108
+
109
+ References
110
+ ----------
111
+ .. [1] B. C. Ross "Mutual Information between Discrete and Continuous
112
+ Data Sets". PLoS ONE 9(2), 2014.
113
+ """
114
+ n_samples = c.shape[0]
115
+ c = c.reshape((-1, 1))
116
+
117
+ radius = np.empty(n_samples)
118
+ label_counts = np.empty(n_samples)
119
+ k_all = np.empty(n_samples)
120
+ nn = NearestNeighbors()
121
+ for label in np.unique(d):
122
+ mask = d == label
123
+ count = np.sum(mask)
124
+ if count > 1:
125
+ k = min(n_neighbors, count - 1)
126
+ nn.set_params(n_neighbors=k)
127
+ nn.fit(c[mask])
128
+ r = nn.kneighbors()[0]
129
+ radius[mask] = np.nextafter(r[:, -1], 0)
130
+ k_all[mask] = k
131
+ label_counts[mask] = count
132
+
133
+ # Ignore points with unique labels.
134
+ mask = label_counts > 1
135
+ n_samples = np.sum(mask)
136
+ label_counts = label_counts[mask]
137
+ k_all = k_all[mask]
138
+ c = c[mask]
139
+ radius = radius[mask]
140
+
141
+ kd = KDTree(c)
142
+ m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)
143
+ m_all = np.array(m_all)
144
+
145
+ mi = (
146
+ digamma(n_samples)
147
+ + np.mean(digamma(k_all))
148
+ - np.mean(digamma(label_counts))
149
+ - np.mean(digamma(m_all))
150
+ )
151
+
152
+ return max(0, mi)
153
+
154
+
155
+ def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):
156
+ """Compute mutual information between two variables.
157
+
158
+ This is a simple wrapper which selects a proper function to call based on
159
+ whether `x` and `y` are discrete or not.
160
+ """
161
+ if x_discrete and y_discrete:
162
+ return mutual_info_score(x, y)
163
+ elif x_discrete and not y_discrete:
164
+ return _compute_mi_cd(y, x, n_neighbors)
165
+ elif not x_discrete and y_discrete:
166
+ return _compute_mi_cd(x, y, n_neighbors)
167
+ else:
168
+ return _compute_mi_cc(x, y, n_neighbors)
169
+
170
+
171
+ def _iterate_columns(X, columns=None):
172
+ """Iterate over columns of a matrix.
173
+
174
+ Parameters
175
+ ----------
176
+ X : ndarray or csc_matrix, shape (n_samples, n_features)
177
+ Matrix over which to iterate.
178
+
179
+ columns : iterable or None, default=None
180
+ Indices of columns to iterate over. If None, iterate over all columns.
181
+
182
+ Yields
183
+ ------
184
+ x : ndarray, shape (n_samples,)
185
+ Columns of `X` in dense format.
186
+ """
187
+ if columns is None:
188
+ columns = range(X.shape[1])
189
+
190
+ if issparse(X):
191
+ for i in columns:
192
+ x = np.zeros(X.shape[0])
193
+ start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1]
194
+ x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]
195
+ yield x
196
+ else:
197
+ for i in columns:
198
+ yield X[:, i]
199
+
200
+
201
+ def _estimate_mi(
202
+ X,
203
+ y,
204
+ discrete_features="auto",
205
+ discrete_target=False,
206
+ n_neighbors=3,
207
+ copy=True,
208
+ random_state=None,
209
+ ):
210
+ """Estimate mutual information between the features and the target.
211
+
212
+ Parameters
213
+ ----------
214
+ X : array-like or sparse matrix, shape (n_samples, n_features)
215
+ Feature matrix.
216
+
217
+ y : array-like of shape (n_samples,)
218
+ Target vector.
219
+
220
+ discrete_features : {'auto', bool, array-like}, default='auto'
221
+ If bool, then determines whether to consider all features discrete
222
+ or continuous. If array, then it should be either a boolean mask
223
+ with shape (n_features,) or array with indices of discrete features.
224
+ If 'auto', it is assigned to False for dense `X` and to True for
225
+ sparse `X`.
226
+
227
+ discrete_target : bool, default=False
228
+ Whether to consider `y` as a discrete variable.
229
+
230
+ n_neighbors : int, default=3
231
+ Number of neighbors to use for MI estimation for continuous variables,
232
+ see [1]_ and [2]_. Higher values reduce variance of the estimation, but
233
+ could introduce a bias.
234
+
235
+ copy : bool, default=True
236
+ Whether to make a copy of the given data. If set to False, the initial
237
+ data will be overwritten.
238
+
239
+ random_state : int, RandomState instance or None, default=None
240
+ Determines random number generation for adding small noise to
241
+ continuous variables in order to remove repeated values.
242
+ Pass an int for reproducible results across multiple function calls.
243
+ See :term:`Glossary <random_state>`.
244
+
245
+ Returns
246
+ -------
247
+ mi : ndarray, shape (n_features,)
248
+ Estimated mutual information between each feature and the target in
249
+ nat units. A negative value will be replaced by 0.
250
+
251
+ References
252
+ ----------
253
+ .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
254
+ information". Phys. Rev. E 69, 2004.
255
+ .. [2] B. C. Ross "Mutual Information between Discrete and Continuous
256
+ Data Sets". PLoS ONE 9(2), 2014.
257
+ """
258
+ X, y = check_X_y(X, y, accept_sparse="csc", y_numeric=not discrete_target)
259
+ n_samples, n_features = X.shape
260
+
261
+ if isinstance(discrete_features, (str, bool)):
262
+ if isinstance(discrete_features, str):
263
+ if discrete_features == "auto":
264
+ discrete_features = issparse(X)
265
+ else:
266
+ raise ValueError("Invalid string value for discrete_features.")
267
+ discrete_mask = np.empty(n_features, dtype=bool)
268
+ discrete_mask.fill(discrete_features)
269
+ else:
270
+ discrete_features = check_array(discrete_features, ensure_2d=False)
271
+ if discrete_features.dtype != "bool":
272
+ discrete_mask = np.zeros(n_features, dtype=bool)
273
+ discrete_mask[discrete_features] = True
274
+ else:
275
+ discrete_mask = discrete_features
276
+
277
+ continuous_mask = ~discrete_mask
278
+ if np.any(continuous_mask) and issparse(X):
279
+ raise ValueError("Sparse matrix `X` can't have continuous features.")
280
+
281
+ rng = check_random_state(random_state)
282
+ if np.any(continuous_mask):
283
+ X = X.astype(np.float64, copy=copy)
284
+ X[:, continuous_mask] = scale(
285
+ X[:, continuous_mask], with_mean=False, copy=False
286
+ )
287
+
288
+ # Add small noise to continuous features as advised in Kraskov et. al.
289
+ means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0))
290
+ X[:, continuous_mask] += (
291
+ 1e-10
292
+ * means
293
+ * rng.standard_normal(size=(n_samples, np.sum(continuous_mask)))
294
+ )
295
+
296
+ if not discrete_target:
297
+ y = scale(y, with_mean=False)
298
+ y += (
299
+ 1e-10
300
+ * np.maximum(1, np.mean(np.abs(y)))
301
+ * rng.standard_normal(size=n_samples)
302
+ )
303
+
304
+ mi = [
305
+ _compute_mi(x, y, discrete_feature, discrete_target, n_neighbors)
306
+ for x, discrete_feature in zip(_iterate_columns(X), discrete_mask)
307
+ ]
308
+
309
+ return np.array(mi)
310
+
311
+
312
+ @validate_params(
313
+ {
314
+ "X": ["array-like", "sparse matrix"],
315
+ "y": ["array-like"],
316
+ "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"],
317
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
318
+ "copy": ["boolean"],
319
+ "random_state": ["random_state"],
320
+ },
321
+ prefer_skip_nested_validation=True,
322
+ )
323
+ def mutual_info_regression(
324
+ X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None
325
+ ):
326
+ """Estimate mutual information for a continuous target variable.
327
+
328
+ Mutual information (MI) [1]_ between two random variables is a non-negative
329
+ value, which measures the dependency between the variables. It is equal
330
+ to zero if and only if two random variables are independent, and higher
331
+ values mean higher dependency.
332
+
333
+ The function relies on nonparametric methods based on entropy estimation
334
+ from k-nearest neighbors distances as described in [2]_ and [3]_. Both
335
+ methods are based on the idea originally proposed in [4]_.
336
+
337
+ It can be used for univariate features selection, read more in the
338
+ :ref:`User Guide <univariate_feature_selection>`.
339
+
340
+ Parameters
341
+ ----------
342
+ X : array-like or sparse matrix, shape (n_samples, n_features)
343
+ Feature matrix.
344
+
345
+ y : array-like of shape (n_samples,)
346
+ Target vector.
347
+
348
+ discrete_features : {'auto', bool, array-like}, default='auto'
349
+ If bool, then determines whether to consider all features discrete
350
+ or continuous. If array, then it should be either a boolean mask
351
+ with shape (n_features,) or array with indices of discrete features.
352
+ If 'auto', it is assigned to False for dense `X` and to True for
353
+ sparse `X`.
354
+
355
+ n_neighbors : int, default=3
356
+ Number of neighbors to use for MI estimation for continuous variables,
357
+ see [2]_ and [3]_. Higher values reduce variance of the estimation, but
358
+ could introduce a bias.
359
+
360
+ copy : bool, default=True
361
+ Whether to make a copy of the given data. If set to False, the initial
362
+ data will be overwritten.
363
+
364
+ random_state : int, RandomState instance or None, default=None
365
+ Determines random number generation for adding small noise to
366
+ continuous variables in order to remove repeated values.
367
+ Pass an int for reproducible results across multiple function calls.
368
+ See :term:`Glossary <random_state>`.
369
+
370
+ Returns
371
+ -------
372
+ mi : ndarray, shape (n_features,)
373
+ Estimated mutual information between each feature and the target in
374
+ nat units.
375
+
376
+ Notes
377
+ -----
378
+ 1. The term "discrete features" is used instead of naming them
379
+ "categorical", because it describes the essence more accurately.
380
+ For example, pixel intensities of an image are discrete features
381
+ (but hardly categorical) and you will get better results if mark them
382
+ as such. Also note, that treating a continuous variable as discrete and
383
+ vice versa will usually give incorrect results, so be attentive about
384
+ that.
385
+ 2. True mutual information can't be negative. If its estimate turns out
386
+ to be negative, it is replaced by zero.
387
+
388
+ References
389
+ ----------
390
+ .. [1] `Mutual Information
391
+ <https://en.wikipedia.org/wiki/Mutual_information>`_
392
+ on Wikipedia.
393
+ .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
394
+ information". Phys. Rev. E 69, 2004.
395
+ .. [3] B. C. Ross "Mutual Information between Discrete and Continuous
396
+ Data Sets". PLoS ONE 9(2), 2014.
397
+ .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
398
+ of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16
399
+
400
+ Examples
401
+ --------
402
+ >>> from sklearn.datasets import make_regression
403
+ >>> from sklearn.feature_selection import mutual_info_regression
404
+ >>> X, y = make_regression(
405
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
406
+ ... )
407
+ >>> mutual_info_regression(X, y)
408
+ array([0.1..., 2.6... , 0.0...])
409
+ """
410
+ return _estimate_mi(X, y, discrete_features, False, n_neighbors, copy, random_state)
411
+
412
+
413
+ @validate_params(
414
+ {
415
+ "X": ["array-like", "sparse matrix"],
416
+ "y": ["array-like"],
417
+ "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"],
418
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
419
+ "copy": ["boolean"],
420
+ "random_state": ["random_state"],
421
+ },
422
+ prefer_skip_nested_validation=True,
423
+ )
424
+ def mutual_info_classif(
425
+ X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None
426
+ ):
427
+ """Estimate mutual information for a discrete target variable.
428
+
429
+ Mutual information (MI) [1]_ between two random variables is a non-negative
430
+ value, which measures the dependency between the variables. It is equal
431
+ to zero if and only if two random variables are independent, and higher
432
+ values mean higher dependency.
433
+
434
+ The function relies on nonparametric methods based on entropy estimation
435
+ from k-nearest neighbors distances as described in [2]_ and [3]_. Both
436
+ methods are based on the idea originally proposed in [4]_.
437
+
438
+ It can be used for univariate features selection, read more in the
439
+ :ref:`User Guide <univariate_feature_selection>`.
440
+
441
+ Parameters
442
+ ----------
443
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
444
+ Feature matrix.
445
+
446
+ y : array-like of shape (n_samples,)
447
+ Target vector.
448
+
449
+ discrete_features : 'auto', bool or array-like, default='auto'
450
+ If bool, then determines whether to consider all features discrete
451
+ or continuous. If array, then it should be either a boolean mask
452
+ with shape (n_features,) or array with indices of discrete features.
453
+ If 'auto', it is assigned to False for dense `X` and to True for
454
+ sparse `X`.
455
+
456
+ n_neighbors : int, default=3
457
+ Number of neighbors to use for MI estimation for continuous variables,
458
+ see [2]_ and [3]_. Higher values reduce variance of the estimation, but
459
+ could introduce a bias.
460
+
461
+ copy : bool, default=True
462
+ Whether to make a copy of the given data. If set to False, the initial
463
+ data will be overwritten.
464
+
465
+ random_state : int, RandomState instance or None, default=None
466
+ Determines random number generation for adding small noise to
467
+ continuous variables in order to remove repeated values.
468
+ Pass an int for reproducible results across multiple function calls.
469
+ See :term:`Glossary <random_state>`.
470
+
471
+ Returns
472
+ -------
473
+ mi : ndarray, shape (n_features,)
474
+ Estimated mutual information between each feature and the target in
475
+ nat units.
476
+
477
+ Notes
478
+ -----
479
+ 1. The term "discrete features" is used instead of naming them
480
+ "categorical", because it describes the essence more accurately.
481
+ For example, pixel intensities of an image are discrete features
482
+ (but hardly categorical) and you will get better results if mark them
483
+ as such. Also note, that treating a continuous variable as discrete and
484
+ vice versa will usually give incorrect results, so be attentive about
485
+ that.
486
+ 2. True mutual information can't be negative. If its estimate turns out
487
+ to be negative, it is replaced by zero.
488
+
489
+ References
490
+ ----------
491
+ .. [1] `Mutual Information
492
+ <https://en.wikipedia.org/wiki/Mutual_information>`_
493
+ on Wikipedia.
494
+ .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
495
+ information". Phys. Rev. E 69, 2004.
496
+ .. [3] B. C. Ross "Mutual Information between Discrete and Continuous
497
+ Data Sets". PLoS ONE 9(2), 2014.
498
+ .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
499
+ of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16
500
+
501
+ Examples
502
+ --------
503
+ >>> from sklearn.datasets import make_classification
504
+ >>> from sklearn.feature_selection import mutual_info_classif
505
+ >>> X, y = make_classification(
506
+ ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
507
+ ... shuffle=False, random_state=42
508
+ ... )
509
+ >>> mutual_info_classif(X, y)
510
+ array([0.58..., 0.10..., 0.19..., 0.09... , 0. ,
511
+ 0. , 0. , 0. , 0. , 0. ])
512
+ """
513
+ check_classification_targets(y)
514
+ return _estimate_mi(X, y, discrete_features, True, n_neighbors, copy, random_state)
venv/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py ADDED
@@ -0,0 +1,792 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Alexandre Gramfort <[email protected]>
2
+ # Vincent Michel <[email protected]>
3
+ # Gilles Louppe <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ """Recursive feature elimination for feature ranking"""
8
+
9
+ from numbers import Integral
10
+
11
+ import numpy as np
12
+ from joblib import effective_n_jobs
13
+
14
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier
15
+ from ..metrics import check_scoring
16
+ from ..model_selection import check_cv
17
+ from ..model_selection._validation import _score
18
+ from ..utils._param_validation import HasMethods, Interval, RealNotInt
19
+ from ..utils.metadata_routing import (
20
+ _raise_for_unsupported_routing,
21
+ _RoutingNotSupportedMixin,
22
+ )
23
+ from ..utils.metaestimators import _safe_split, available_if
24
+ from ..utils.parallel import Parallel, delayed
25
+ from ..utils.validation import check_is_fitted
26
+ from ._base import SelectorMixin, _get_feature_importances
27
+
28
+
29
+ def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
30
+ """
31
+ Return the score for a fit across one fold.
32
+ """
33
+ X_train, y_train = _safe_split(estimator, X, y, train)
34
+ X_test, y_test = _safe_split(estimator, X, y, test, train)
35
+ return rfe._fit(
36
+ X_train,
37
+ y_train,
38
+ lambda estimator, features: _score(
39
+ # TODO(SLEP6): pass score_params here
40
+ estimator,
41
+ X_test[:, features],
42
+ y_test,
43
+ scorer,
44
+ score_params=None,
45
+ ),
46
+ ).scores_
47
+
48
+
49
+ def _estimator_has(attr):
50
+ """Check if we can delegate a method to the underlying estimator.
51
+
52
+ First, we check the fitted `estimator_` if available, otherwise we check the
53
+ unfitted `estimator`. We raise the original `AttributeError` if `attr` does
54
+ not exist. This function is used together with `available_if`.
55
+ """
56
+
57
+ def check(self):
58
+ if hasattr(self, "estimator_"):
59
+ getattr(self.estimator_, attr)
60
+ else:
61
+ getattr(self.estimator, attr)
62
+
63
+ return True
64
+
65
+ return check
66
+
67
+
68
+ class RFE(_RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator):
69
+ """Feature ranking with recursive feature elimination.
70
+
71
+ Given an external estimator that assigns weights to features (e.g., the
72
+ coefficients of a linear model), the goal of recursive feature elimination
73
+ (RFE) is to select features by recursively considering smaller and smaller
74
+ sets of features. First, the estimator is trained on the initial set of
75
+ features and the importance of each feature is obtained either through
76
+ any specific attribute or callable.
77
+ Then, the least important features are pruned from current set of features.
78
+ That procedure is recursively repeated on the pruned set until the desired
79
+ number of features to select is eventually reached.
80
+
81
+ Read more in the :ref:`User Guide <rfe>`.
82
+
83
+ Parameters
84
+ ----------
85
+ estimator : ``Estimator`` instance
86
+ A supervised learning estimator with a ``fit`` method that provides
87
+ information about feature importance
88
+ (e.g. `coef_`, `feature_importances_`).
89
+
90
+ n_features_to_select : int or float, default=None
91
+ The number of features to select. If `None`, half of the features are
92
+ selected. If integer, the parameter is the absolute number of features
93
+ to select. If float between 0 and 1, it is the fraction of features to
94
+ select.
95
+
96
+ .. versionchanged:: 0.24
97
+ Added float values for fractions.
98
+
99
+ step : int or float, default=1
100
+ If greater than or equal to 1, then ``step`` corresponds to the
101
+ (integer) number of features to remove at each iteration.
102
+ If within (0.0, 1.0), then ``step`` corresponds to the percentage
103
+ (rounded down) of features to remove at each iteration.
104
+
105
+ verbose : int, default=0
106
+ Controls verbosity of output.
107
+
108
+ importance_getter : str or callable, default='auto'
109
+ If 'auto', uses the feature importance either through a `coef_`
110
+ or `feature_importances_` attributes of estimator.
111
+
112
+ Also accepts a string that specifies an attribute name/path
113
+ for extracting feature importance (implemented with `attrgetter`).
114
+ For example, give `regressor_.coef_` in case of
115
+ :class:`~sklearn.compose.TransformedTargetRegressor` or
116
+ `named_steps.clf.feature_importances_` in case of
117
+ class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
118
+
119
+ If `callable`, overrides the default feature importance getter.
120
+ The callable is passed with the fitted estimator and it should
121
+ return importance for each feature.
122
+
123
+ .. versionadded:: 0.24
124
+
125
+ Attributes
126
+ ----------
127
+ classes_ : ndarray of shape (n_classes,)
128
+ The classes labels. Only available when `estimator` is a classifier.
129
+
130
+ estimator_ : ``Estimator`` instance
131
+ The fitted estimator used to select features.
132
+
133
+ n_features_ : int
134
+ The number of selected features.
135
+
136
+ n_features_in_ : int
137
+ Number of features seen during :term:`fit`. Only defined if the
138
+ underlying estimator exposes such an attribute when fit.
139
+
140
+ .. versionadded:: 0.24
141
+
142
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
143
+ Names of features seen during :term:`fit`. Defined only when `X`
144
+ has feature names that are all strings.
145
+
146
+ .. versionadded:: 1.0
147
+
148
+ ranking_ : ndarray of shape (n_features,)
149
+ The feature ranking, such that ``ranking_[i]`` corresponds to the
150
+ ranking position of the i-th feature. Selected (i.e., estimated
151
+ best) features are assigned rank 1.
152
+
153
+ support_ : ndarray of shape (n_features,)
154
+ The mask of selected features.
155
+
156
+ See Also
157
+ --------
158
+ RFECV : Recursive feature elimination with built-in cross-validated
159
+ selection of the best number of features.
160
+ SelectFromModel : Feature selection based on thresholds of importance
161
+ weights.
162
+ SequentialFeatureSelector : Sequential cross-validation based feature
163
+ selection. Does not rely on importance weights.
164
+
165
+ Notes
166
+ -----
167
+ Allows NaN/Inf in the input if the underlying estimator does as well.
168
+
169
+ References
170
+ ----------
171
+
172
+ .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
173
+ for cancer classification using support vector machines",
174
+ Mach. Learn., 46(1-3), 389--422, 2002.
175
+
176
+ Examples
177
+ --------
178
+ The following example shows how to retrieve the 5 most informative
179
+ features in the Friedman #1 dataset.
180
+
181
+ >>> from sklearn.datasets import make_friedman1
182
+ >>> from sklearn.feature_selection import RFE
183
+ >>> from sklearn.svm import SVR
184
+ >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
185
+ >>> estimator = SVR(kernel="linear")
186
+ >>> selector = RFE(estimator, n_features_to_select=5, step=1)
187
+ >>> selector = selector.fit(X, y)
188
+ >>> selector.support_
189
+ array([ True, True, True, True, True, False, False, False, False,
190
+ False])
191
+ >>> selector.ranking_
192
+ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
193
+ """
194
+
195
+ _parameter_constraints: dict = {
196
+ "estimator": [HasMethods(["fit"])],
197
+ "n_features_to_select": [
198
+ None,
199
+ Interval(RealNotInt, 0, 1, closed="right"),
200
+ Interval(Integral, 0, None, closed="neither"),
201
+ ],
202
+ "step": [
203
+ Interval(Integral, 0, None, closed="neither"),
204
+ Interval(RealNotInt, 0, 1, closed="neither"),
205
+ ],
206
+ "verbose": ["verbose"],
207
+ "importance_getter": [str, callable],
208
+ }
209
+
210
+ def __init__(
211
+ self,
212
+ estimator,
213
+ *,
214
+ n_features_to_select=None,
215
+ step=1,
216
+ verbose=0,
217
+ importance_getter="auto",
218
+ ):
219
+ self.estimator = estimator
220
+ self.n_features_to_select = n_features_to_select
221
+ self.step = step
222
+ self.importance_getter = importance_getter
223
+ self.verbose = verbose
224
+
225
+ @property
226
+ def _estimator_type(self):
227
+ return self.estimator._estimator_type
228
+
229
+ @property
230
+ def classes_(self):
231
+ """Classes labels available when `estimator` is a classifier.
232
+
233
+ Returns
234
+ -------
235
+ ndarray of shape (n_classes,)
236
+ """
237
+ return self.estimator_.classes_
238
+
239
+ @_fit_context(
240
+ # RFE.estimator is not validated yet
241
+ prefer_skip_nested_validation=False
242
+ )
243
+ def fit(self, X, y, **fit_params):
244
+ """Fit the RFE model and then the underlying estimator on the selected features.
245
+
246
+ Parameters
247
+ ----------
248
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
249
+ The training input samples.
250
+
251
+ y : array-like of shape (n_samples,)
252
+ The target values.
253
+
254
+ **fit_params : dict
255
+ Additional parameters passed to the `fit` method of the underlying
256
+ estimator.
257
+
258
+ Returns
259
+ -------
260
+ self : object
261
+ Fitted estimator.
262
+ """
263
+ _raise_for_unsupported_routing(self, "fit", **fit_params)
264
+ return self._fit(X, y, **fit_params)
265
+
266
+ def _fit(self, X, y, step_score=None, **fit_params):
267
+ # Parameter step_score controls the calculation of self.scores_
268
+ # step_score is not exposed to users
269
+ # and is used when implementing RFECV
270
+ # self.scores_ will not be calculated when calling _fit through fit
271
+
272
+ X, y = self._validate_data(
273
+ X,
274
+ y,
275
+ accept_sparse="csc",
276
+ ensure_min_features=2,
277
+ force_all_finite=False,
278
+ multi_output=True,
279
+ )
280
+
281
+ # Initialization
282
+ n_features = X.shape[1]
283
+ if self.n_features_to_select is None:
284
+ n_features_to_select = n_features // 2
285
+ elif isinstance(self.n_features_to_select, Integral): # int
286
+ n_features_to_select = self.n_features_to_select
287
+ else: # float
288
+ n_features_to_select = int(n_features * self.n_features_to_select)
289
+
290
+ if 0.0 < self.step < 1.0:
291
+ step = int(max(1, self.step * n_features))
292
+ else:
293
+ step = int(self.step)
294
+
295
+ support_ = np.ones(n_features, dtype=bool)
296
+ ranking_ = np.ones(n_features, dtype=int)
297
+
298
+ if step_score:
299
+ self.scores_ = []
300
+
301
+ # Elimination
302
+ while np.sum(support_) > n_features_to_select:
303
+ # Remaining features
304
+ features = np.arange(n_features)[support_]
305
+
306
+ # Rank the remaining features
307
+ estimator = clone(self.estimator)
308
+ if self.verbose > 0:
309
+ print("Fitting estimator with %d features." % np.sum(support_))
310
+
311
+ estimator.fit(X[:, features], y, **fit_params)
312
+
313
+ # Get importance and rank them
314
+ importances = _get_feature_importances(
315
+ estimator,
316
+ self.importance_getter,
317
+ transform_func="square",
318
+ )
319
+ ranks = np.argsort(importances)
320
+
321
+ # for sparse case ranks is matrix
322
+ ranks = np.ravel(ranks)
323
+
324
+ # Eliminate the worse features
325
+ threshold = min(step, np.sum(support_) - n_features_to_select)
326
+
327
+ # Compute step score on the previous selection iteration
328
+ # because 'estimator' must use features
329
+ # that have not been eliminated yet
330
+ if step_score:
331
+ self.scores_.append(step_score(estimator, features))
332
+ support_[features[ranks][:threshold]] = False
333
+ ranking_[np.logical_not(support_)] += 1
334
+
335
+ # Set final attributes
336
+ features = np.arange(n_features)[support_]
337
+ self.estimator_ = clone(self.estimator)
338
+ self.estimator_.fit(X[:, features], y, **fit_params)
339
+
340
+ # Compute step score when only n_features_to_select features left
341
+ if step_score:
342
+ self.scores_.append(step_score(self.estimator_, features))
343
+ self.n_features_ = support_.sum()
344
+ self.support_ = support_
345
+ self.ranking_ = ranking_
346
+
347
+ return self
348
+
349
+ @available_if(_estimator_has("predict"))
350
+ def predict(self, X):
351
+ """Reduce X to the selected features and predict using the estimator.
352
+
353
+ Parameters
354
+ ----------
355
+ X : array of shape [n_samples, n_features]
356
+ The input samples.
357
+
358
+ Returns
359
+ -------
360
+ y : array of shape [n_samples]
361
+ The predicted target values.
362
+ """
363
+ check_is_fitted(self)
364
+ return self.estimator_.predict(self.transform(X))
365
+
366
+ @available_if(_estimator_has("score"))
367
+ def score(self, X, y, **fit_params):
368
+ """Reduce X to the selected features and return the score of the estimator.
369
+
370
+ Parameters
371
+ ----------
372
+ X : array of shape [n_samples, n_features]
373
+ The input samples.
374
+
375
+ y : array of shape [n_samples]
376
+ The target values.
377
+
378
+ **fit_params : dict
379
+ Parameters to pass to the `score` method of the underlying
380
+ estimator.
381
+
382
+ .. versionadded:: 1.0
383
+
384
+ Returns
385
+ -------
386
+ score : float
387
+ Score of the underlying base estimator computed with the selected
388
+ features returned by `rfe.transform(X)` and `y`.
389
+ """
390
+ check_is_fitted(self)
391
+ return self.estimator_.score(self.transform(X), y, **fit_params)
392
+
393
+ def _get_support_mask(self):
394
+ check_is_fitted(self)
395
+ return self.support_
396
+
397
+ @available_if(_estimator_has("decision_function"))
398
+ def decision_function(self, X):
399
+ """Compute the decision function of ``X``.
400
+
401
+ Parameters
402
+ ----------
403
+ X : {array-like or sparse matrix} of shape (n_samples, n_features)
404
+ The input samples. Internally, it will be converted to
405
+ ``dtype=np.float32`` and if a sparse matrix is provided
406
+ to a sparse ``csr_matrix``.
407
+
408
+ Returns
409
+ -------
410
+ score : array, shape = [n_samples, n_classes] or [n_samples]
411
+ The decision function of the input samples. The order of the
412
+ classes corresponds to that in the attribute :term:`classes_`.
413
+ Regression and binary classification produce an array of shape
414
+ [n_samples].
415
+ """
416
+ check_is_fitted(self)
417
+ return self.estimator_.decision_function(self.transform(X))
418
+
419
+ @available_if(_estimator_has("predict_proba"))
420
+ def predict_proba(self, X):
421
+ """Predict class probabilities for X.
422
+
423
+ Parameters
424
+ ----------
425
+ X : {array-like or sparse matrix} of shape (n_samples, n_features)
426
+ The input samples. Internally, it will be converted to
427
+ ``dtype=np.float32`` and if a sparse matrix is provided
428
+ to a sparse ``csr_matrix``.
429
+
430
+ Returns
431
+ -------
432
+ p : array of shape (n_samples, n_classes)
433
+ The class probabilities of the input samples. The order of the
434
+ classes corresponds to that in the attribute :term:`classes_`.
435
+ """
436
+ check_is_fitted(self)
437
+ return self.estimator_.predict_proba(self.transform(X))
438
+
439
+ @available_if(_estimator_has("predict_log_proba"))
440
+ def predict_log_proba(self, X):
441
+ """Predict class log-probabilities for X.
442
+
443
+ Parameters
444
+ ----------
445
+ X : array of shape [n_samples, n_features]
446
+ The input samples.
447
+
448
+ Returns
449
+ -------
450
+ p : array of shape (n_samples, n_classes)
451
+ The class log-probabilities of the input samples. The order of the
452
+ classes corresponds to that in the attribute :term:`classes_`.
453
+ """
454
+ check_is_fitted(self)
455
+ return self.estimator_.predict_log_proba(self.transform(X))
456
+
457
+ def _more_tags(self):
458
+ tags = {
459
+ "poor_score": True,
460
+ "requires_y": True,
461
+ "allow_nan": True,
462
+ }
463
+
464
+ # Adjust allow_nan if estimator explicitly defines `allow_nan`.
465
+ if hasattr(self.estimator, "_get_tags"):
466
+ tags["allow_nan"] = self.estimator._get_tags()["allow_nan"]
467
+
468
+ return tags
469
+
470
+
471
+ class RFECV(RFE):
472
+ """Recursive feature elimination with cross-validation to select features.
473
+
474
+ The number of features selected is tuned automatically by fitting an :class:`RFE`
475
+ selector on the different cross-validation splits (provided by the `cv` parameter).
476
+ The performance of the :class:`RFE` selector are evaluated using `scorer` for
477
+ different number of selected features and aggregated together. Finally, the scores
478
+ are averaged across folds and the number of features selected is set to the number
479
+ of features that maximize the cross-validation score.
480
+ See glossary entry for :term:`cross-validation estimator`.
481
+
482
+ Read more in the :ref:`User Guide <rfe>`.
483
+
484
+ Parameters
485
+ ----------
486
+ estimator : ``Estimator`` instance
487
+ A supervised learning estimator with a ``fit`` method that provides
488
+ information about feature importance either through a ``coef_``
489
+ attribute or through a ``feature_importances_`` attribute.
490
+
491
+ step : int or float, default=1
492
+ If greater than or equal to 1, then ``step`` corresponds to the
493
+ (integer) number of features to remove at each iteration.
494
+ If within (0.0, 1.0), then ``step`` corresponds to the percentage
495
+ (rounded down) of features to remove at each iteration.
496
+ Note that the last iteration may remove fewer than ``step`` features in
497
+ order to reach ``min_features_to_select``.
498
+
499
+ min_features_to_select : int, default=1
500
+ The minimum number of features to be selected. This number of features
501
+ will always be scored, even if the difference between the original
502
+ feature count and ``min_features_to_select`` isn't divisible by
503
+ ``step``.
504
+
505
+ .. versionadded:: 0.20
506
+
507
+ cv : int, cross-validation generator or an iterable, default=None
508
+ Determines the cross-validation splitting strategy.
509
+ Possible inputs for cv are:
510
+
511
+ - None, to use the default 5-fold cross-validation,
512
+ - integer, to specify the number of folds.
513
+ - :term:`CV splitter`,
514
+ - An iterable yielding (train, test) splits as arrays of indices.
515
+
516
+ For integer/None inputs, if ``y`` is binary or multiclass,
517
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. If the
518
+ estimator is a classifier or if ``y`` is neither binary nor multiclass,
519
+ :class:`~sklearn.model_selection.KFold` is used.
520
+
521
+ Refer :ref:`User Guide <cross_validation>` for the various
522
+ cross-validation strategies that can be used here.
523
+
524
+ .. versionchanged:: 0.22
525
+ ``cv`` default value of None changed from 3-fold to 5-fold.
526
+
527
+ scoring : str, callable or None, default=None
528
+ A string (see model evaluation documentation) or
529
+ a scorer callable object / function with signature
530
+ ``scorer(estimator, X, y)``.
531
+
532
+ verbose : int, default=0
533
+ Controls verbosity of output.
534
+
535
+ n_jobs : int or None, default=None
536
+ Number of cores to run in parallel while fitting across folds.
537
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
538
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
539
+ for more details.
540
+
541
+ .. versionadded:: 0.18
542
+
543
+ importance_getter : str or callable, default='auto'
544
+ If 'auto', uses the feature importance either through a `coef_`
545
+ or `feature_importances_` attributes of estimator.
546
+
547
+ Also accepts a string that specifies an attribute name/path
548
+ for extracting feature importance.
549
+ For example, give `regressor_.coef_` in case of
550
+ :class:`~sklearn.compose.TransformedTargetRegressor` or
551
+ `named_steps.clf.feature_importances_` in case of
552
+ :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
553
+
554
+ If `callable`, overrides the default feature importance getter.
555
+ The callable is passed with the fitted estimator and it should
556
+ return importance for each feature.
557
+
558
+ .. versionadded:: 0.24
559
+
560
+ Attributes
561
+ ----------
562
+ classes_ : ndarray of shape (n_classes,)
563
+ The classes labels. Only available when `estimator` is a classifier.
564
+
565
+ estimator_ : ``Estimator`` instance
566
+ The fitted estimator used to select features.
567
+
568
+ cv_results_ : dict of ndarrays
569
+ A dict with keys:
570
+
571
+ split(k)_test_score : ndarray of shape (n_subsets_of_features,)
572
+ The cross-validation scores across (k)th fold.
573
+
574
+ mean_test_score : ndarray of shape (n_subsets_of_features,)
575
+ Mean of scores over the folds.
576
+
577
+ std_test_score : ndarray of shape (n_subsets_of_features,)
578
+ Standard deviation of scores over the folds.
579
+
580
+ .. versionadded:: 1.0
581
+
582
+ n_features_ : int
583
+ The number of selected features with cross-validation.
584
+
585
+ n_features_in_ : int
586
+ Number of features seen during :term:`fit`. Only defined if the
587
+ underlying estimator exposes such an attribute when fit.
588
+
589
+ .. versionadded:: 0.24
590
+
591
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
592
+ Names of features seen during :term:`fit`. Defined only when `X`
593
+ has feature names that are all strings.
594
+
595
+ .. versionadded:: 1.0
596
+
597
+ ranking_ : narray of shape (n_features,)
598
+ The feature ranking, such that `ranking_[i]`
599
+ corresponds to the ranking
600
+ position of the i-th feature.
601
+ Selected (i.e., estimated best)
602
+ features are assigned rank 1.
603
+
604
+ support_ : ndarray of shape (n_features,)
605
+ The mask of selected features.
606
+
607
+ See Also
608
+ --------
609
+ RFE : Recursive feature elimination.
610
+
611
+ Notes
612
+ -----
613
+ The size of all values in ``cv_results_`` is equal to
614
+ ``ceil((n_features - min_features_to_select) / step) + 1``,
615
+ where step is the number of features removed at each iteration.
616
+
617
+ Allows NaN/Inf in the input if the underlying estimator does as well.
618
+
619
+ References
620
+ ----------
621
+
622
+ .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
623
+ for cancer classification using support vector machines",
624
+ Mach. Learn., 46(1-3), 389--422, 2002.
625
+
626
+ Examples
627
+ --------
628
+ The following example shows how to retrieve the a-priori not known 5
629
+ informative features in the Friedman #1 dataset.
630
+
631
+ >>> from sklearn.datasets import make_friedman1
632
+ >>> from sklearn.feature_selection import RFECV
633
+ >>> from sklearn.svm import SVR
634
+ >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
635
+ >>> estimator = SVR(kernel="linear")
636
+ >>> selector = RFECV(estimator, step=1, cv=5)
637
+ >>> selector = selector.fit(X, y)
638
+ >>> selector.support_
639
+ array([ True, True, True, True, True, False, False, False, False,
640
+ False])
641
+ >>> selector.ranking_
642
+ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
643
+ """
644
+
645
+ _parameter_constraints: dict = {
646
+ **RFE._parameter_constraints,
647
+ "min_features_to_select": [Interval(Integral, 0, None, closed="neither")],
648
+ "cv": ["cv_object"],
649
+ "scoring": [None, str, callable],
650
+ "n_jobs": [None, Integral],
651
+ }
652
+ _parameter_constraints.pop("n_features_to_select")
653
+
654
+ def __init__(
655
+ self,
656
+ estimator,
657
+ *,
658
+ step=1,
659
+ min_features_to_select=1,
660
+ cv=None,
661
+ scoring=None,
662
+ verbose=0,
663
+ n_jobs=None,
664
+ importance_getter="auto",
665
+ ):
666
+ self.estimator = estimator
667
+ self.step = step
668
+ self.importance_getter = importance_getter
669
+ self.cv = cv
670
+ self.scoring = scoring
671
+ self.verbose = verbose
672
+ self.n_jobs = n_jobs
673
+ self.min_features_to_select = min_features_to_select
674
+
675
+ @_fit_context(
676
+ # RFECV.estimator is not validated yet
677
+ prefer_skip_nested_validation=False
678
+ )
679
+ def fit(self, X, y, groups=None):
680
+ """Fit the RFE model and automatically tune the number of selected features.
681
+
682
+ Parameters
683
+ ----------
684
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
685
+ Training vector, where `n_samples` is the number of samples and
686
+ `n_features` is the total number of features.
687
+
688
+ y : array-like of shape (n_samples,)
689
+ Target values (integers for classification, real numbers for
690
+ regression).
691
+
692
+ groups : array-like of shape (n_samples,) or None, default=None
693
+ Group labels for the samples used while splitting the dataset into
694
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
695
+ instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
696
+
697
+ .. versionadded:: 0.20
698
+
699
+ Returns
700
+ -------
701
+ self : object
702
+ Fitted estimator.
703
+ """
704
+ _raise_for_unsupported_routing(self, "fit", groups=groups)
705
+ X, y = self._validate_data(
706
+ X,
707
+ y,
708
+ accept_sparse="csr",
709
+ ensure_min_features=2,
710
+ force_all_finite=False,
711
+ multi_output=True,
712
+ )
713
+
714
+ # Initialization
715
+ cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
716
+ scorer = check_scoring(self.estimator, scoring=self.scoring)
717
+ n_features = X.shape[1]
718
+
719
+ if 0.0 < self.step < 1.0:
720
+ step = int(max(1, self.step * n_features))
721
+ else:
722
+ step = int(self.step)
723
+
724
+ # Build an RFE object, which will evaluate and score each possible
725
+ # feature count, down to self.min_features_to_select
726
+ rfe = RFE(
727
+ estimator=self.estimator,
728
+ n_features_to_select=self.min_features_to_select,
729
+ importance_getter=self.importance_getter,
730
+ step=self.step,
731
+ verbose=self.verbose,
732
+ )
733
+
734
+ # Determine the number of subsets of features by fitting across
735
+ # the train folds and choosing the "features_to_select" parameter
736
+ # that gives the least averaged error across all folds.
737
+
738
+ # Note that joblib raises a non-picklable error for bound methods
739
+ # even if n_jobs is set to 1 with the default multiprocessing
740
+ # backend.
741
+ # This branching is done so that to
742
+ # make sure that user code that sets n_jobs to 1
743
+ # and provides bound methods as scorers is not broken with the
744
+ # addition of n_jobs parameter in version 0.18.
745
+
746
+ if effective_n_jobs(self.n_jobs) == 1:
747
+ parallel, func = list, _rfe_single_fit
748
+ else:
749
+ parallel = Parallel(n_jobs=self.n_jobs)
750
+ func = delayed(_rfe_single_fit)
751
+
752
+ scores = parallel(
753
+ func(rfe, self.estimator, X, y, train, test, scorer)
754
+ for train, test in cv.split(X, y, groups)
755
+ )
756
+
757
+ scores = np.array(scores)
758
+ scores_sum = np.sum(scores, axis=0)
759
+ scores_sum_rev = scores_sum[::-1]
760
+ argmax_idx = len(scores_sum) - np.argmax(scores_sum_rev) - 1
761
+ n_features_to_select = max(
762
+ n_features - (argmax_idx * step), self.min_features_to_select
763
+ )
764
+
765
+ # Re-execute an elimination with best_k over the whole set
766
+ rfe = RFE(
767
+ estimator=self.estimator,
768
+ n_features_to_select=n_features_to_select,
769
+ step=self.step,
770
+ importance_getter=self.importance_getter,
771
+ verbose=self.verbose,
772
+ )
773
+
774
+ rfe.fit(X, y)
775
+
776
+ # Set final attributes
777
+ self.support_ = rfe.support_
778
+ self.n_features_ = rfe.n_features_
779
+ self.ranking_ = rfe.ranking_
780
+ self.estimator_ = clone(self.estimator)
781
+ self.estimator_.fit(self._transform(X), y)
782
+
783
+ # reverse to stay consistent with before
784
+ scores_rev = scores[:, ::-1]
785
+ self.cv_results_ = {}
786
+ self.cv_results_["mean_test_score"] = np.mean(scores_rev, axis=0)
787
+ self.cv_results_["std_test_score"] = np.std(scores_rev, axis=0)
788
+
789
+ for i in range(scores.shape[0]):
790
+ self.cv_results_[f"split{i}_test_score"] = scores_rev[i]
791
+
792
+ return self
venv/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sequential feature selection
3
+ """
4
+ from numbers import Integral, Real
5
+
6
+ import numpy as np
7
+
8
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier
9
+ from ..metrics import get_scorer_names
10
+ from ..model_selection import check_cv, cross_val_score
11
+ from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions
12
+ from ..utils._tags import _safe_tags
13
+ from ..utils.metadata_routing import _RoutingNotSupportedMixin
14
+ from ..utils.validation import check_is_fitted
15
+ from ._base import SelectorMixin
16
+
17
+
18
+ class SequentialFeatureSelector(
19
+ _RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator
20
+ ):
21
+ """Transformer that performs Sequential Feature Selection.
22
+
23
+ This Sequential Feature Selector adds (forward selection) or
24
+ removes (backward selection) features to form a feature subset in a
25
+ greedy fashion. At each stage, this estimator chooses the best feature to
26
+ add or remove based on the cross-validation score of an estimator. In
27
+ the case of unsupervised learning, this Sequential Feature Selector
28
+ looks only at the features (X), not the desired outputs (y).
29
+
30
+ Read more in the :ref:`User Guide <sequential_feature_selection>`.
31
+
32
+ .. versionadded:: 0.24
33
+
34
+ Parameters
35
+ ----------
36
+ estimator : estimator instance
37
+ An unfitted estimator.
38
+
39
+ n_features_to_select : "auto", int or float, default="auto"
40
+ If `"auto"`, the behaviour depends on the `tol` parameter:
41
+
42
+ - if `tol` is not `None`, then features are selected while the score
43
+ change does not exceed `tol`.
44
+ - otherwise, half of the features are selected.
45
+
46
+ If integer, the parameter is the absolute number of features to select.
47
+ If float between 0 and 1, it is the fraction of features to select.
48
+
49
+ .. versionadded:: 1.1
50
+ The option `"auto"` was added in version 1.1.
51
+
52
+ .. versionchanged:: 1.3
53
+ The default changed from `"warn"` to `"auto"` in 1.3.
54
+
55
+ tol : float, default=None
56
+ If the score is not incremented by at least `tol` between two
57
+ consecutive feature additions or removals, stop adding or removing.
58
+
59
+ `tol` can be negative when removing features using `direction="backward"`.
60
+ It can be useful to reduce the number of features at the cost of a small
61
+ decrease in the score.
62
+
63
+ `tol` is enabled only when `n_features_to_select` is `"auto"`.
64
+
65
+ .. versionadded:: 1.1
66
+
67
+ direction : {'forward', 'backward'}, default='forward'
68
+ Whether to perform forward selection or backward selection.
69
+
70
+ scoring : str or callable, default=None
71
+ A single str (see :ref:`scoring_parameter`) or a callable
72
+ (see :ref:`scoring`) to evaluate the predictions on the test set.
73
+
74
+ NOTE that when using a custom scorer, it should return a single
75
+ value.
76
+
77
+ If None, the estimator's score method is used.
78
+
79
+ cv : int, cross-validation generator or an iterable, default=None
80
+ Determines the cross-validation splitting strategy.
81
+ Possible inputs for cv are:
82
+
83
+ - None, to use the default 5-fold cross validation,
84
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
85
+ - :term:`CV splitter`,
86
+ - An iterable yielding (train, test) splits as arrays of indices.
87
+
88
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
89
+ either binary or multiclass,
90
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. In all other
91
+ cases, :class:`~sklearn.model_selection.KFold` is used. These splitters
92
+ are instantiated with `shuffle=False` so the splits will be the same
93
+ across calls.
94
+
95
+ Refer :ref:`User Guide <cross_validation>` for the various
96
+ cross-validation strategies that can be used here.
97
+
98
+ n_jobs : int, default=None
99
+ Number of jobs to run in parallel. When evaluating a new feature to
100
+ add or remove, the cross-validation procedure is parallel over the
101
+ folds.
102
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
103
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
104
+ for more details.
105
+
106
+ Attributes
107
+ ----------
108
+ n_features_in_ : int
109
+ Number of features seen during :term:`fit`. Only defined if the
110
+ underlying estimator exposes such an attribute when fit.
111
+
112
+ .. versionadded:: 0.24
113
+
114
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
115
+ Names of features seen during :term:`fit`. Defined only when `X`
116
+ has feature names that are all strings.
117
+
118
+ .. versionadded:: 1.0
119
+
120
+ n_features_to_select_ : int
121
+ The number of features that were selected.
122
+
123
+ support_ : ndarray of shape (n_features,), dtype=bool
124
+ The mask of selected features.
125
+
126
+ See Also
127
+ --------
128
+ GenericUnivariateSelect : Univariate feature selector with configurable
129
+ strategy.
130
+ RFE : Recursive feature elimination based on importance weights.
131
+ RFECV : Recursive feature elimination based on importance weights, with
132
+ automatic selection of the number of features.
133
+ SelectFromModel : Feature selection based on thresholds of importance
134
+ weights.
135
+
136
+ Examples
137
+ --------
138
+ >>> from sklearn.feature_selection import SequentialFeatureSelector
139
+ >>> from sklearn.neighbors import KNeighborsClassifier
140
+ >>> from sklearn.datasets import load_iris
141
+ >>> X, y = load_iris(return_X_y=True)
142
+ >>> knn = KNeighborsClassifier(n_neighbors=3)
143
+ >>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3)
144
+ >>> sfs.fit(X, y)
145
+ SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3),
146
+ n_features_to_select=3)
147
+ >>> sfs.get_support()
148
+ array([ True, False, True, True])
149
+ >>> sfs.transform(X).shape
150
+ (150, 3)
151
+ """
152
+
153
+ _parameter_constraints: dict = {
154
+ "estimator": [HasMethods(["fit"])],
155
+ "n_features_to_select": [
156
+ StrOptions({"auto"}),
157
+ Interval(RealNotInt, 0, 1, closed="right"),
158
+ Interval(Integral, 0, None, closed="neither"),
159
+ ],
160
+ "tol": [None, Interval(Real, None, None, closed="neither")],
161
+ "direction": [StrOptions({"forward", "backward"})],
162
+ "scoring": [None, StrOptions(set(get_scorer_names())), callable],
163
+ "cv": ["cv_object"],
164
+ "n_jobs": [None, Integral],
165
+ }
166
+
167
+ def __init__(
168
+ self,
169
+ estimator,
170
+ *,
171
+ n_features_to_select="auto",
172
+ tol=None,
173
+ direction="forward",
174
+ scoring=None,
175
+ cv=5,
176
+ n_jobs=None,
177
+ ):
178
+ self.estimator = estimator
179
+ self.n_features_to_select = n_features_to_select
180
+ self.tol = tol
181
+ self.direction = direction
182
+ self.scoring = scoring
183
+ self.cv = cv
184
+ self.n_jobs = n_jobs
185
+
186
+ @_fit_context(
187
+ # SequentialFeatureSelector.estimator is not validated yet
188
+ prefer_skip_nested_validation=False
189
+ )
190
+ def fit(self, X, y=None):
191
+ """Learn the features to select from X.
192
+
193
+ Parameters
194
+ ----------
195
+ X : array-like of shape (n_samples, n_features)
196
+ Training vectors, where `n_samples` is the number of samples and
197
+ `n_features` is the number of predictors.
198
+
199
+ y : array-like of shape (n_samples,), default=None
200
+ Target values. This parameter may be ignored for
201
+ unsupervised learning.
202
+
203
+ Returns
204
+ -------
205
+ self : object
206
+ Returns the instance itself.
207
+ """
208
+ tags = self._get_tags()
209
+ X = self._validate_data(
210
+ X,
211
+ accept_sparse="csc",
212
+ ensure_min_features=2,
213
+ force_all_finite=not tags.get("allow_nan", True),
214
+ )
215
+ n_features = X.shape[1]
216
+
217
+ if self.n_features_to_select == "auto":
218
+ if self.tol is not None:
219
+ # With auto feature selection, `n_features_to_select_` will be updated
220
+ # to `support_.sum()` after features are selected.
221
+ self.n_features_to_select_ = n_features - 1
222
+ else:
223
+ self.n_features_to_select_ = n_features // 2
224
+ elif isinstance(self.n_features_to_select, Integral):
225
+ if self.n_features_to_select >= n_features:
226
+ raise ValueError("n_features_to_select must be < n_features.")
227
+ self.n_features_to_select_ = self.n_features_to_select
228
+ elif isinstance(self.n_features_to_select, Real):
229
+ self.n_features_to_select_ = int(n_features * self.n_features_to_select)
230
+
231
+ if self.tol is not None and self.tol < 0 and self.direction == "forward":
232
+ raise ValueError("tol must be positive when doing forward selection")
233
+
234
+ cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
235
+
236
+ cloned_estimator = clone(self.estimator)
237
+
238
+ # the current mask corresponds to the set of features:
239
+ # - that we have already *selected* if we do forward selection
240
+ # - that we have already *excluded* if we do backward selection
241
+ current_mask = np.zeros(shape=n_features, dtype=bool)
242
+ n_iterations = (
243
+ self.n_features_to_select_
244
+ if self.n_features_to_select == "auto" or self.direction == "forward"
245
+ else n_features - self.n_features_to_select_
246
+ )
247
+
248
+ old_score = -np.inf
249
+ is_auto_select = self.tol is not None and self.n_features_to_select == "auto"
250
+ for _ in range(n_iterations):
251
+ new_feature_idx, new_score = self._get_best_new_feature_score(
252
+ cloned_estimator, X, y, cv, current_mask
253
+ )
254
+ if is_auto_select and ((new_score - old_score) < self.tol):
255
+ break
256
+
257
+ old_score = new_score
258
+ current_mask[new_feature_idx] = True
259
+
260
+ if self.direction == "backward":
261
+ current_mask = ~current_mask
262
+
263
+ self.support_ = current_mask
264
+ self.n_features_to_select_ = self.support_.sum()
265
+
266
+ return self
267
+
268
+ def _get_best_new_feature_score(self, estimator, X, y, cv, current_mask):
269
+ # Return the best new feature and its score to add to the current_mask,
270
+ # i.e. return the best new feature and its score to add (resp. remove)
271
+ # when doing forward selection (resp. backward selection).
272
+ # Feature will be added if the current score and past score are greater
273
+ # than tol when n_feature is auto,
274
+ candidate_feature_indices = np.flatnonzero(~current_mask)
275
+ scores = {}
276
+ for feature_idx in candidate_feature_indices:
277
+ candidate_mask = current_mask.copy()
278
+ candidate_mask[feature_idx] = True
279
+ if self.direction == "backward":
280
+ candidate_mask = ~candidate_mask
281
+ X_new = X[:, candidate_mask]
282
+ scores[feature_idx] = cross_val_score(
283
+ estimator,
284
+ X_new,
285
+ y,
286
+ cv=cv,
287
+ scoring=self.scoring,
288
+ n_jobs=self.n_jobs,
289
+ ).mean()
290
+ new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx])
291
+ return new_feature_idx, scores[new_feature_idx]
292
+
293
+ def _get_support_mask(self):
294
+ check_is_fitted(self)
295
+ return self.support_
296
+
297
+ def _more_tags(self):
298
+ return {
299
+ "allow_nan": _safe_tags(self.estimator, key="allow_nan"),
300
+ }