applied-ai-018 commited on
Commit
4fe907d
·
verified ·
1 Parent(s): 9e65f67

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/19.attention.dense.weight/fp32.pt +3 -0
  3. venv/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py +15 -0
  4. venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/__init__.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpc.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpr.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/kernels.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py +902 -0
  9. venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py +673 -0
  10. venv/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py +2415 -0
  11. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py +0 -0
  12. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/_mini_sequence_kernel.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_kernels.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/_mini_sequence_kernel.py +54 -0
  18. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpc.py +288 -0
  19. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py +853 -0
  20. venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_kernels.py +388 -0
  21. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py +15 -0
  39. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py +525 -0
  43. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py +904 -0
  44. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py +1 -0
  45. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py +1112 -0
  48. venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py +0 -0
  49. venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/19.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad37a0acc46ab46511ce568a17497d8c55fa5c71a08bcf004b3438ea60054721
3
+ size 16778396
ckpts/universal/global_step40/zero/19.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b00e249ac56688ceb8f4c14ab09acb2661b3eb3af6f3d5b8339599114a1ddabd
3
+ size 16778317
venv/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Jan Hendrik Metzen <[email protected]>
2
+ # Vincent Dubourg <[email protected]>
3
+ # (mostly translation, see implementation details)
4
+ # License: BSD 3 clause
5
+
6
+ """
7
+ The :mod:`sklearn.gaussian_process` module implements Gaussian Process
8
+ based regression and classification.
9
+ """
10
+
11
+ from . import kernels
12
+ from ._gpc import GaussianProcessClassifier
13
+ from ._gpr import GaussianProcessRegressor
14
+
15
+ __all__ = ["GaussianProcessRegressor", "GaussianProcessClassifier", "kernels"]
venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (493 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpc.cpython-310.pyc ADDED
Binary file (29.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpr.cpython-310.pyc ADDED
Binary file (19.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/kernels.cpython-310.pyc ADDED
Binary file (71.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py ADDED
@@ -0,0 +1,902 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gaussian processes classification."""
2
+
3
+ # Authors: Jan Hendrik Metzen <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ from numbers import Integral
8
+ from operator import itemgetter
9
+
10
+ import numpy as np
11
+ import scipy.optimize
12
+ from scipy.linalg import cho_solve, cholesky, solve
13
+ from scipy.special import erf, expit
14
+
15
+ from ..base import BaseEstimator, ClassifierMixin, _fit_context, clone
16
+ from ..multiclass import OneVsOneClassifier, OneVsRestClassifier
17
+ from ..preprocessing import LabelEncoder
18
+ from ..utils import check_random_state
19
+ from ..utils._param_validation import Interval, StrOptions
20
+ from ..utils.optimize import _check_optimize_result
21
+ from ..utils.validation import check_is_fitted
22
+ from .kernels import RBF, CompoundKernel, Kernel
23
+ from .kernels import ConstantKernel as C
24
+
25
+ # Values required for approximating the logistic sigmoid by
26
+ # error functions. coefs are obtained via:
27
+ # x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
28
+ # b = logistic(x)
29
+ # A = (erf(np.dot(x, self.lambdas)) + 1) / 2
30
+ # coefs = lstsq(A, b)[0]
31
+ LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
32
+ COEFS = np.array(
33
+ [-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654]
34
+ )[:, np.newaxis]
35
+
36
+
37
+ class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
38
+ """Binary Gaussian process classification based on Laplace approximation.
39
+
40
+ The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_.
41
+
42
+ Internally, the Laplace approximation is used for approximating the
43
+ non-Gaussian posterior by a Gaussian.
44
+
45
+ Currently, the implementation is restricted to using the logistic link
46
+ function.
47
+
48
+ .. versionadded:: 0.18
49
+
50
+ Parameters
51
+ ----------
52
+ kernel : kernel instance, default=None
53
+ The kernel specifying the covariance function of the GP. If None is
54
+ passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
55
+ the kernel's hyperparameters are optimized during fitting.
56
+
57
+ optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'
58
+ Can either be one of the internally supported optimizers for optimizing
59
+ the kernel's parameters, specified by a string, or an externally
60
+ defined optimizer passed as a callable. If a callable is passed, it
61
+ must have the signature::
62
+
63
+ def optimizer(obj_func, initial_theta, bounds):
64
+ # * 'obj_func' is the objective function to be maximized, which
65
+ # takes the hyperparameters theta as parameter and an
66
+ # optional flag eval_gradient, which determines if the
67
+ # gradient is returned additionally to the function value
68
+ # * 'initial_theta': the initial value for theta, which can be
69
+ # used by local optimizers
70
+ # * 'bounds': the bounds on the values of theta
71
+ ....
72
+ # Returned are the best found hyperparameters theta and
73
+ # the corresponding value of the target function.
74
+ return theta_opt, func_min
75
+
76
+ Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
77
+ is used. If None is passed, the kernel's parameters are kept fixed.
78
+ Available internal optimizers are::
79
+
80
+ 'fmin_l_bfgs_b'
81
+
82
+ n_restarts_optimizer : int, default=0
83
+ The number of restarts of the optimizer for finding the kernel's
84
+ parameters which maximize the log-marginal likelihood. The first run
85
+ of the optimizer is performed from the kernel's initial parameters,
86
+ the remaining ones (if any) from thetas sampled log-uniform randomly
87
+ from the space of allowed theta-values. If greater than 0, all bounds
88
+ must be finite. Note that n_restarts_optimizer=0 implies that one
89
+ run is performed.
90
+
91
+ max_iter_predict : int, default=100
92
+ The maximum number of iterations in Newton's method for approximating
93
+ the posterior during predict. Smaller values will reduce computation
94
+ time at the cost of worse results.
95
+
96
+ warm_start : bool, default=False
97
+ If warm-starts are enabled, the solution of the last Newton iteration
98
+ on the Laplace approximation of the posterior mode is used as
99
+ initialization for the next call of _posterior_mode(). This can speed
100
+ up convergence when _posterior_mode is called several times on similar
101
+ problems as in hyperparameter optimization. See :term:`the Glossary
102
+ <warm_start>`.
103
+
104
+ copy_X_train : bool, default=True
105
+ If True, a persistent copy of the training data is stored in the
106
+ object. Otherwise, just a reference to the training data is stored,
107
+ which might cause predictions to change if the data is modified
108
+ externally.
109
+
110
+ random_state : int, RandomState instance or None, default=None
111
+ Determines random number generation used to initialize the centers.
112
+ Pass an int for reproducible results across multiple function calls.
113
+ See :term:`Glossary <random_state>`.
114
+
115
+ Attributes
116
+ ----------
117
+ X_train_ : array-like of shape (n_samples, n_features) or list of object
118
+ Feature vectors or other representations of training data (also
119
+ required for prediction).
120
+
121
+ y_train_ : array-like of shape (n_samples,)
122
+ Target values in training data (also required for prediction)
123
+
124
+ classes_ : array-like of shape (n_classes,)
125
+ Unique class labels.
126
+
127
+ kernel_ : kernl instance
128
+ The kernel used for prediction. The structure of the kernel is the
129
+ same as the one passed as parameter but with optimized hyperparameters
130
+
131
+ L_ : array-like of shape (n_samples, n_samples)
132
+ Lower-triangular Cholesky decomposition of the kernel in X_train_
133
+
134
+ pi_ : array-like of shape (n_samples,)
135
+ The probabilities of the positive class for the training points
136
+ X_train_
137
+
138
+ W_sr_ : array-like of shape (n_samples,)
139
+ Square root of W, the Hessian of log-likelihood of the latent function
140
+ values for the observed labels. Since W is diagonal, only the diagonal
141
+ of sqrt(W) is stored.
142
+
143
+ log_marginal_likelihood_value_ : float
144
+ The log-marginal-likelihood of ``self.kernel_.theta``
145
+
146
+ References
147
+ ----------
148
+ .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
149
+ "Gaussian Processes for Machine Learning",
150
+ MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
151
+ """
152
+
153
+ def __init__(
154
+ self,
155
+ kernel=None,
156
+ *,
157
+ optimizer="fmin_l_bfgs_b",
158
+ n_restarts_optimizer=0,
159
+ max_iter_predict=100,
160
+ warm_start=False,
161
+ copy_X_train=True,
162
+ random_state=None,
163
+ ):
164
+ self.kernel = kernel
165
+ self.optimizer = optimizer
166
+ self.n_restarts_optimizer = n_restarts_optimizer
167
+ self.max_iter_predict = max_iter_predict
168
+ self.warm_start = warm_start
169
+ self.copy_X_train = copy_X_train
170
+ self.random_state = random_state
171
+
172
+ def fit(self, X, y):
173
+ """Fit Gaussian process classification model.
174
+
175
+ Parameters
176
+ ----------
177
+ X : array-like of shape (n_samples, n_features) or list of object
178
+ Feature vectors or other representations of training data.
179
+
180
+ y : array-like of shape (n_samples,)
181
+ Target values, must be binary.
182
+
183
+ Returns
184
+ -------
185
+ self : returns an instance of self.
186
+ """
187
+ if self.kernel is None: # Use an RBF kernel as default
188
+ self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
189
+ 1.0, length_scale_bounds="fixed"
190
+ )
191
+ else:
192
+ self.kernel_ = clone(self.kernel)
193
+
194
+ self.rng = check_random_state(self.random_state)
195
+
196
+ self.X_train_ = np.copy(X) if self.copy_X_train else X
197
+
198
+ # Encode class labels and check that it is a binary classification
199
+ # problem
200
+ label_encoder = LabelEncoder()
201
+ self.y_train_ = label_encoder.fit_transform(y)
202
+ self.classes_ = label_encoder.classes_
203
+ if self.classes_.size > 2:
204
+ raise ValueError(
205
+ "%s supports only binary classification. y contains classes %s"
206
+ % (self.__class__.__name__, self.classes_)
207
+ )
208
+ elif self.classes_.size == 1:
209
+ raise ValueError(
210
+ "{0:s} requires 2 classes; got {1:d} class".format(
211
+ self.__class__.__name__, self.classes_.size
212
+ )
213
+ )
214
+
215
+ if self.optimizer is not None and self.kernel_.n_dims > 0:
216
+ # Choose hyperparameters based on maximizing the log-marginal
217
+ # likelihood (potentially starting from several initial values)
218
+ def obj_func(theta, eval_gradient=True):
219
+ if eval_gradient:
220
+ lml, grad = self.log_marginal_likelihood(
221
+ theta, eval_gradient=True, clone_kernel=False
222
+ )
223
+ return -lml, -grad
224
+ else:
225
+ return -self.log_marginal_likelihood(theta, clone_kernel=False)
226
+
227
+ # First optimize starting from theta specified in kernel
228
+ optima = [
229
+ self._constrained_optimization(
230
+ obj_func, self.kernel_.theta, self.kernel_.bounds
231
+ )
232
+ ]
233
+
234
+ # Additional runs are performed from log-uniform chosen initial
235
+ # theta
236
+ if self.n_restarts_optimizer > 0:
237
+ if not np.isfinite(self.kernel_.bounds).all():
238
+ raise ValueError(
239
+ "Multiple optimizer restarts (n_restarts_optimizer>0) "
240
+ "requires that all bounds are finite."
241
+ )
242
+ bounds = self.kernel_.bounds
243
+ for iteration in range(self.n_restarts_optimizer):
244
+ theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1]))
245
+ optima.append(
246
+ self._constrained_optimization(obj_func, theta_initial, bounds)
247
+ )
248
+ # Select result from run with minimal (negative) log-marginal
249
+ # likelihood
250
+ lml_values = list(map(itemgetter(1), optima))
251
+ self.kernel_.theta = optima[np.argmin(lml_values)][0]
252
+ self.kernel_._check_bounds_params()
253
+
254
+ self.log_marginal_likelihood_value_ = -np.min(lml_values)
255
+ else:
256
+ self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
257
+ self.kernel_.theta
258
+ )
259
+
260
+ # Precompute quantities required for predictions which are independent
261
+ # of actual query points
262
+ K = self.kernel_(self.X_train_)
263
+
264
+ _, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode(
265
+ K, return_temporaries=True
266
+ )
267
+
268
+ return self
269
+
270
+ def predict(self, X):
271
+ """Perform classification on an array of test vectors X.
272
+
273
+ Parameters
274
+ ----------
275
+ X : array-like of shape (n_samples, n_features) or list of object
276
+ Query points where the GP is evaluated for classification.
277
+
278
+ Returns
279
+ -------
280
+ C : ndarray of shape (n_samples,)
281
+ Predicted target values for X, values are from ``classes_``
282
+ """
283
+ check_is_fitted(self)
284
+
285
+ # As discussed on Section 3.4.2 of GPML, for making hard binary
286
+ # decisions, it is enough to compute the MAP of the posterior and
287
+ # pass it through the link function
288
+ K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
289
+ f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
290
+
291
+ return np.where(f_star > 0, self.classes_[1], self.classes_[0])
292
+
293
+ def predict_proba(self, X):
294
+ """Return probability estimates for the test vector X.
295
+
296
+ Parameters
297
+ ----------
298
+ X : array-like of shape (n_samples, n_features) or list of object
299
+ Query points where the GP is evaluated for classification.
300
+
301
+ Returns
302
+ -------
303
+ C : array-like of shape (n_samples, n_classes)
304
+ Returns the probability of the samples for each class in
305
+ the model. The columns correspond to the classes in sorted
306
+ order, as they appear in the attribute ``classes_``.
307
+ """
308
+ check_is_fitted(self)
309
+
310
+ # Based on Algorithm 3.2 of GPML
311
+ K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
312
+ f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
313
+ v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
314
+ # Line 6 (compute np.diag(v.T.dot(v)) via einsum)
315
+ var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
316
+
317
+ # Line 7:
318
+ # Approximate \int log(z) * N(z | f_star, var_f_star)
319
+ # Approximation is due to Williams & Barber, "Bayesian Classification
320
+ # with Gaussian Processes", Appendix A: Approximate the logistic
321
+ # sigmoid by a linear combination of 5 error functions.
322
+ # For information on how this integral can be computed see
323
+ # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
324
+ alpha = 1 / (2 * var_f_star)
325
+ gamma = LAMBDAS * f_star
326
+ integrals = (
327
+ np.sqrt(np.pi / alpha)
328
+ * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2)))
329
+ / (2 * np.sqrt(var_f_star * 2 * np.pi))
330
+ )
331
+ pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum()
332
+
333
+ return np.vstack((1 - pi_star, pi_star)).T
334
+
335
+ def log_marginal_likelihood(
336
+ self, theta=None, eval_gradient=False, clone_kernel=True
337
+ ):
338
+ """Returns log-marginal likelihood of theta for training data.
339
+
340
+ Parameters
341
+ ----------
342
+ theta : array-like of shape (n_kernel_params,), default=None
343
+ Kernel hyperparameters for which the log-marginal likelihood is
344
+ evaluated. If None, the precomputed log_marginal_likelihood
345
+ of ``self.kernel_.theta`` is returned.
346
+
347
+ eval_gradient : bool, default=False
348
+ If True, the gradient of the log-marginal likelihood with respect
349
+ to the kernel hyperparameters at position theta is returned
350
+ additionally. If True, theta must not be None.
351
+
352
+ clone_kernel : bool, default=True
353
+ If True, the kernel attribute is copied. If False, the kernel
354
+ attribute is modified, but may result in a performance improvement.
355
+
356
+ Returns
357
+ -------
358
+ log_likelihood : float
359
+ Log-marginal likelihood of theta for training data.
360
+
361
+ log_likelihood_gradient : ndarray of shape (n_kernel_params,), \
362
+ optional
363
+ Gradient of the log-marginal likelihood with respect to the kernel
364
+ hyperparameters at position theta.
365
+ Only returned when `eval_gradient` is True.
366
+ """
367
+ if theta is None:
368
+ if eval_gradient:
369
+ raise ValueError("Gradient can only be evaluated for theta!=None")
370
+ return self.log_marginal_likelihood_value_
371
+
372
+ if clone_kernel:
373
+ kernel = self.kernel_.clone_with_theta(theta)
374
+ else:
375
+ kernel = self.kernel_
376
+ kernel.theta = theta
377
+
378
+ if eval_gradient:
379
+ K, K_gradient = kernel(self.X_train_, eval_gradient=True)
380
+ else:
381
+ K = kernel(self.X_train_)
382
+
383
+ # Compute log-marginal-likelihood Z and also store some temporaries
384
+ # which can be reused for computing Z's gradient
385
+ Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True)
386
+
387
+ if not eval_gradient:
388
+ return Z
389
+
390
+ # Compute gradient based on Algorithm 5.1 of GPML
391
+ d_Z = np.empty(theta.shape[0])
392
+ # XXX: Get rid of the np.diag() in the next line
393
+ R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
394
+ C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
395
+ # Line 9: (use einsum to compute np.diag(C.T.dot(C))))
396
+ s_2 = (
397
+ -0.5
398
+ * (np.diag(K) - np.einsum("ij, ij -> j", C, C))
399
+ * (pi * (1 - pi) * (1 - 2 * pi))
400
+ ) # third derivative
401
+
402
+ for j in range(d_Z.shape[0]):
403
+ C = K_gradient[:, :, j] # Line 11
404
+ # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
405
+ s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel())
406
+
407
+ b = C.dot(self.y_train_ - pi) # Line 13
408
+ s_3 = b - K.dot(R.dot(b)) # Line 14
409
+
410
+ d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
411
+
412
+ return Z, d_Z
413
+
414
+ def _posterior_mode(self, K, return_temporaries=False):
415
+ """Mode-finding for binary Laplace GPC and fixed kernel.
416
+
417
+ This approximates the posterior of the latent function values for given
418
+ inputs and target observations with a Gaussian approximation and uses
419
+ Newton's iteration to find the mode of this approximation.
420
+ """
421
+ # Based on Algorithm 3.1 of GPML
422
+
423
+ # If warm_start are enabled, we reuse the last solution for the
424
+ # posterior mode as initialization; otherwise, we initialize with 0
425
+ if (
426
+ self.warm_start
427
+ and hasattr(self, "f_cached")
428
+ and self.f_cached.shape == self.y_train_.shape
429
+ ):
430
+ f = self.f_cached
431
+ else:
432
+ f = np.zeros_like(self.y_train_, dtype=np.float64)
433
+
434
+ # Use Newton's iteration method to find mode of Laplace approximation
435
+ log_marginal_likelihood = -np.inf
436
+ for _ in range(self.max_iter_predict):
437
+ # Line 4
438
+ pi = expit(f)
439
+ W = pi * (1 - pi)
440
+ # Line 5
441
+ W_sr = np.sqrt(W)
442
+ W_sr_K = W_sr[:, np.newaxis] * K
443
+ B = np.eye(W.shape[0]) + W_sr_K * W_sr
444
+ L = cholesky(B, lower=True)
445
+ # Line 6
446
+ b = W * f + (self.y_train_ - pi)
447
+ # Line 7
448
+ a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
449
+ # Line 8
450
+ f = K.dot(a)
451
+
452
+ # Line 10: Compute log marginal likelihood in loop and use as
453
+ # convergence criterion
454
+ lml = (
455
+ -0.5 * a.T.dot(f)
456
+ - np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum()
457
+ - np.log(np.diag(L)).sum()
458
+ )
459
+ # Check if we have converged (log marginal likelihood does
460
+ # not decrease)
461
+ # XXX: more complex convergence criterion
462
+ if lml - log_marginal_likelihood < 1e-10:
463
+ break
464
+ log_marginal_likelihood = lml
465
+
466
+ self.f_cached = f # Remember solution for later warm-starts
467
+ if return_temporaries:
468
+ return log_marginal_likelihood, (pi, W_sr, L, b, a)
469
+ else:
470
+ return log_marginal_likelihood
471
+
472
+ def _constrained_optimization(self, obj_func, initial_theta, bounds):
473
+ if self.optimizer == "fmin_l_bfgs_b":
474
+ opt_res = scipy.optimize.minimize(
475
+ obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds
476
+ )
477
+ _check_optimize_result("lbfgs", opt_res)
478
+ theta_opt, func_min = opt_res.x, opt_res.fun
479
+ elif callable(self.optimizer):
480
+ theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
481
+ else:
482
+ raise ValueError("Unknown optimizer %s." % self.optimizer)
483
+
484
+ return theta_opt, func_min
485
+
486
+
487
+ class GaussianProcessClassifier(ClassifierMixin, BaseEstimator):
488
+ """Gaussian process classification (GPC) based on Laplace approximation.
489
+
490
+ The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_.
491
+
492
+ Internally, the Laplace approximation is used for approximating the
493
+ non-Gaussian posterior by a Gaussian.
494
+
495
+ Currently, the implementation is restricted to using the logistic link
496
+ function. For multi-class classification, several binary one-versus rest
497
+ classifiers are fitted. Note that this class thus does not implement
498
+ a true multi-class Laplace approximation.
499
+
500
+ Read more in the :ref:`User Guide <gaussian_process>`.
501
+
502
+ .. versionadded:: 0.18
503
+
504
+ Parameters
505
+ ----------
506
+ kernel : kernel instance, default=None
507
+ The kernel specifying the covariance function of the GP. If None is
508
+ passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
509
+ the kernel's hyperparameters are optimized during fitting. Also kernel
510
+ cannot be a `CompoundKernel`.
511
+
512
+ optimizer : 'fmin_l_bfgs_b', callable or None, default='fmin_l_bfgs_b'
513
+ Can either be one of the internally supported optimizers for optimizing
514
+ the kernel's parameters, specified by a string, or an externally
515
+ defined optimizer passed as a callable. If a callable is passed, it
516
+ must have the signature::
517
+
518
+ def optimizer(obj_func, initial_theta, bounds):
519
+ # * 'obj_func' is the objective function to be maximized, which
520
+ # takes the hyperparameters theta as parameter and an
521
+ # optional flag eval_gradient, which determines if the
522
+ # gradient is returned additionally to the function value
523
+ # * 'initial_theta': the initial value for theta, which can be
524
+ # used by local optimizers
525
+ # * 'bounds': the bounds on the values of theta
526
+ ....
527
+ # Returned are the best found hyperparameters theta and
528
+ # the corresponding value of the target function.
529
+ return theta_opt, func_min
530
+
531
+ Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
532
+ is used. If None is passed, the kernel's parameters are kept fixed.
533
+ Available internal optimizers are::
534
+
535
+ 'fmin_l_bfgs_b'
536
+
537
+ n_restarts_optimizer : int, default=0
538
+ The number of restarts of the optimizer for finding the kernel's
539
+ parameters which maximize the log-marginal likelihood. The first run
540
+ of the optimizer is performed from the kernel's initial parameters,
541
+ the remaining ones (if any) from thetas sampled log-uniform randomly
542
+ from the space of allowed theta-values. If greater than 0, all bounds
543
+ must be finite. Note that n_restarts_optimizer=0 implies that one
544
+ run is performed.
545
+
546
+ max_iter_predict : int, default=100
547
+ The maximum number of iterations in Newton's method for approximating
548
+ the posterior during predict. Smaller values will reduce computation
549
+ time at the cost of worse results.
550
+
551
+ warm_start : bool, default=False
552
+ If warm-starts are enabled, the solution of the last Newton iteration
553
+ on the Laplace approximation of the posterior mode is used as
554
+ initialization for the next call of _posterior_mode(). This can speed
555
+ up convergence when _posterior_mode is called several times on similar
556
+ problems as in hyperparameter optimization. See :term:`the Glossary
557
+ <warm_start>`.
558
+
559
+ copy_X_train : bool, default=True
560
+ If True, a persistent copy of the training data is stored in the
561
+ object. Otherwise, just a reference to the training data is stored,
562
+ which might cause predictions to change if the data is modified
563
+ externally.
564
+
565
+ random_state : int, RandomState instance or None, default=None
566
+ Determines random number generation used to initialize the centers.
567
+ Pass an int for reproducible results across multiple function calls.
568
+ See :term:`Glossary <random_state>`.
569
+
570
+ multi_class : {'one_vs_rest', 'one_vs_one'}, default='one_vs_rest'
571
+ Specifies how multi-class classification problems are handled.
572
+ Supported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest',
573
+ one binary Gaussian process classifier is fitted for each class, which
574
+ is trained to separate this class from the rest. In 'one_vs_one', one
575
+ binary Gaussian process classifier is fitted for each pair of classes,
576
+ which is trained to separate these two classes. The predictions of
577
+ these binary predictors are combined into multi-class predictions.
578
+ Note that 'one_vs_one' does not support predicting probability
579
+ estimates.
580
+
581
+ n_jobs : int, default=None
582
+ The number of jobs to use for the computation: the specified
583
+ multiclass problems are computed in parallel.
584
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
585
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
586
+ for more details.
587
+
588
+ Attributes
589
+ ----------
590
+ base_estimator_ : ``Estimator`` instance
591
+ The estimator instance that defines the likelihood function
592
+ using the observed data.
593
+
594
+ kernel_ : kernel instance
595
+ The kernel used for prediction. In case of binary classification,
596
+ the structure of the kernel is the same as the one passed as parameter
597
+ but with optimized hyperparameters. In case of multi-class
598
+ classification, a CompoundKernel is returned which consists of the
599
+ different kernels used in the one-versus-rest classifiers.
600
+
601
+ log_marginal_likelihood_value_ : float
602
+ The log-marginal-likelihood of ``self.kernel_.theta``
603
+
604
+ classes_ : array-like of shape (n_classes,)
605
+ Unique class labels.
606
+
607
+ n_classes_ : int
608
+ The number of classes in the training data
609
+
610
+ n_features_in_ : int
611
+ Number of features seen during :term:`fit`.
612
+
613
+ .. versionadded:: 0.24
614
+
615
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
616
+ Names of features seen during :term:`fit`. Defined only when `X`
617
+ has feature names that are all strings.
618
+
619
+ .. versionadded:: 1.0
620
+
621
+ See Also
622
+ --------
623
+ GaussianProcessRegressor : Gaussian process regression (GPR).
624
+
625
+ References
626
+ ----------
627
+ .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
628
+ "Gaussian Processes for Machine Learning",
629
+ MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
630
+
631
+ Examples
632
+ --------
633
+ >>> from sklearn.datasets import load_iris
634
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
635
+ >>> from sklearn.gaussian_process.kernels import RBF
636
+ >>> X, y = load_iris(return_X_y=True)
637
+ >>> kernel = 1.0 * RBF(1.0)
638
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
639
+ ... random_state=0).fit(X, y)
640
+ >>> gpc.score(X, y)
641
+ 0.9866...
642
+ >>> gpc.predict_proba(X[:2,:])
643
+ array([[0.83548752, 0.03228706, 0.13222543],
644
+ [0.79064206, 0.06525643, 0.14410151]])
645
+ """
646
+
647
+ _parameter_constraints: dict = {
648
+ "kernel": [Kernel, None],
649
+ "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None],
650
+ "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")],
651
+ "max_iter_predict": [Interval(Integral, 1, None, closed="left")],
652
+ "warm_start": ["boolean"],
653
+ "copy_X_train": ["boolean"],
654
+ "random_state": ["random_state"],
655
+ "multi_class": [StrOptions({"one_vs_rest", "one_vs_one"})],
656
+ "n_jobs": [Integral, None],
657
+ }
658
+
659
+ def __init__(
660
+ self,
661
+ kernel=None,
662
+ *,
663
+ optimizer="fmin_l_bfgs_b",
664
+ n_restarts_optimizer=0,
665
+ max_iter_predict=100,
666
+ warm_start=False,
667
+ copy_X_train=True,
668
+ random_state=None,
669
+ multi_class="one_vs_rest",
670
+ n_jobs=None,
671
+ ):
672
+ self.kernel = kernel
673
+ self.optimizer = optimizer
674
+ self.n_restarts_optimizer = n_restarts_optimizer
675
+ self.max_iter_predict = max_iter_predict
676
+ self.warm_start = warm_start
677
+ self.copy_X_train = copy_X_train
678
+ self.random_state = random_state
679
+ self.multi_class = multi_class
680
+ self.n_jobs = n_jobs
681
+
682
+ @_fit_context(prefer_skip_nested_validation=True)
683
+ def fit(self, X, y):
684
+ """Fit Gaussian process classification model.
685
+
686
+ Parameters
687
+ ----------
688
+ X : array-like of shape (n_samples, n_features) or list of object
689
+ Feature vectors or other representations of training data.
690
+
691
+ y : array-like of shape (n_samples,)
692
+ Target values, must be binary.
693
+
694
+ Returns
695
+ -------
696
+ self : object
697
+ Returns an instance of self.
698
+ """
699
+ if isinstance(self.kernel, CompoundKernel):
700
+ raise ValueError("kernel cannot be a CompoundKernel")
701
+
702
+ if self.kernel is None or self.kernel.requires_vector_input:
703
+ X, y = self._validate_data(
704
+ X, y, multi_output=False, ensure_2d=True, dtype="numeric"
705
+ )
706
+ else:
707
+ X, y = self._validate_data(
708
+ X, y, multi_output=False, ensure_2d=False, dtype=None
709
+ )
710
+
711
+ self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
712
+ kernel=self.kernel,
713
+ optimizer=self.optimizer,
714
+ n_restarts_optimizer=self.n_restarts_optimizer,
715
+ max_iter_predict=self.max_iter_predict,
716
+ warm_start=self.warm_start,
717
+ copy_X_train=self.copy_X_train,
718
+ random_state=self.random_state,
719
+ )
720
+
721
+ self.classes_ = np.unique(y)
722
+ self.n_classes_ = self.classes_.size
723
+ if self.n_classes_ == 1:
724
+ raise ValueError(
725
+ "GaussianProcessClassifier requires 2 or more "
726
+ "distinct classes; got %d class (only class %s "
727
+ "is present)" % (self.n_classes_, self.classes_[0])
728
+ )
729
+ if self.n_classes_ > 2:
730
+ if self.multi_class == "one_vs_rest":
731
+ self.base_estimator_ = OneVsRestClassifier(
732
+ self.base_estimator_, n_jobs=self.n_jobs
733
+ )
734
+ elif self.multi_class == "one_vs_one":
735
+ self.base_estimator_ = OneVsOneClassifier(
736
+ self.base_estimator_, n_jobs=self.n_jobs
737
+ )
738
+ else:
739
+ raise ValueError("Unknown multi-class mode %s" % self.multi_class)
740
+
741
+ self.base_estimator_.fit(X, y)
742
+
743
+ if self.n_classes_ > 2:
744
+ self.log_marginal_likelihood_value_ = np.mean(
745
+ [
746
+ estimator.log_marginal_likelihood()
747
+ for estimator in self.base_estimator_.estimators_
748
+ ]
749
+ )
750
+ else:
751
+ self.log_marginal_likelihood_value_ = (
752
+ self.base_estimator_.log_marginal_likelihood()
753
+ )
754
+
755
+ return self
756
+
757
+ def predict(self, X):
758
+ """Perform classification on an array of test vectors X.
759
+
760
+ Parameters
761
+ ----------
762
+ X : array-like of shape (n_samples, n_features) or list of object
763
+ Query points where the GP is evaluated for classification.
764
+
765
+ Returns
766
+ -------
767
+ C : ndarray of shape (n_samples,)
768
+ Predicted target values for X, values are from ``classes_``.
769
+ """
770
+ check_is_fitted(self)
771
+
772
+ if self.kernel is None or self.kernel.requires_vector_input:
773
+ X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False)
774
+ else:
775
+ X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False)
776
+
777
+ return self.base_estimator_.predict(X)
778
+
779
+ def predict_proba(self, X):
780
+ """Return probability estimates for the test vector X.
781
+
782
+ Parameters
783
+ ----------
784
+ X : array-like of shape (n_samples, n_features) or list of object
785
+ Query points where the GP is evaluated for classification.
786
+
787
+ Returns
788
+ -------
789
+ C : array-like of shape (n_samples, n_classes)
790
+ Returns the probability of the samples for each class in
791
+ the model. The columns correspond to the classes in sorted
792
+ order, as they appear in the attribute :term:`classes_`.
793
+ """
794
+ check_is_fitted(self)
795
+ if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
796
+ raise ValueError(
797
+ "one_vs_one multi-class mode does not support "
798
+ "predicting probability estimates. Use "
799
+ "one_vs_rest mode instead."
800
+ )
801
+
802
+ if self.kernel is None or self.kernel.requires_vector_input:
803
+ X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False)
804
+ else:
805
+ X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False)
806
+
807
+ return self.base_estimator_.predict_proba(X)
808
+
809
+ @property
810
+ def kernel_(self):
811
+ """Return the kernel of the base estimator."""
812
+ if self.n_classes_ == 2:
813
+ return self.base_estimator_.kernel_
814
+ else:
815
+ return CompoundKernel(
816
+ [estimator.kernel_ for estimator in self.base_estimator_.estimators_]
817
+ )
818
+
819
+ def log_marginal_likelihood(
820
+ self, theta=None, eval_gradient=False, clone_kernel=True
821
+ ):
822
+ """Return log-marginal likelihood of theta for training data.
823
+
824
+ In the case of multi-class classification, the mean log-marginal
825
+ likelihood of the one-versus-rest classifiers are returned.
826
+
827
+ Parameters
828
+ ----------
829
+ theta : array-like of shape (n_kernel_params,), default=None
830
+ Kernel hyperparameters for which the log-marginal likelihood is
831
+ evaluated. In the case of multi-class classification, theta may
832
+ be the hyperparameters of the compound kernel or of an individual
833
+ kernel. In the latter case, all individual kernel get assigned the
834
+ same theta values. If None, the precomputed log_marginal_likelihood
835
+ of ``self.kernel_.theta`` is returned.
836
+
837
+ eval_gradient : bool, default=False
838
+ If True, the gradient of the log-marginal likelihood with respect
839
+ to the kernel hyperparameters at position theta is returned
840
+ additionally. Note that gradient computation is not supported
841
+ for non-binary classification. If True, theta must not be None.
842
+
843
+ clone_kernel : bool, default=True
844
+ If True, the kernel attribute is copied. If False, the kernel
845
+ attribute is modified, but may result in a performance improvement.
846
+
847
+ Returns
848
+ -------
849
+ log_likelihood : float
850
+ Log-marginal likelihood of theta for training data.
851
+
852
+ log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
853
+ Gradient of the log-marginal likelihood with respect to the kernel
854
+ hyperparameters at position theta.
855
+ Only returned when `eval_gradient` is True.
856
+ """
857
+ check_is_fitted(self)
858
+
859
+ if theta is None:
860
+ if eval_gradient:
861
+ raise ValueError("Gradient can only be evaluated for theta!=None")
862
+ return self.log_marginal_likelihood_value_
863
+
864
+ theta = np.asarray(theta)
865
+ if self.n_classes_ == 2:
866
+ return self.base_estimator_.log_marginal_likelihood(
867
+ theta, eval_gradient, clone_kernel=clone_kernel
868
+ )
869
+ else:
870
+ if eval_gradient:
871
+ raise NotImplementedError(
872
+ "Gradient of log-marginal-likelihood not implemented for "
873
+ "multi-class GPC."
874
+ )
875
+ estimators = self.base_estimator_.estimators_
876
+ n_dims = estimators[0].kernel_.n_dims
877
+ if theta.shape[0] == n_dims: # use same theta for all sub-kernels
878
+ return np.mean(
879
+ [
880
+ estimator.log_marginal_likelihood(
881
+ theta, clone_kernel=clone_kernel
882
+ )
883
+ for i, estimator in enumerate(estimators)
884
+ ]
885
+ )
886
+ elif theta.shape[0] == n_dims * self.classes_.shape[0]:
887
+ # theta for compound kernel
888
+ return np.mean(
889
+ [
890
+ estimator.log_marginal_likelihood(
891
+ theta[n_dims * i : n_dims * (i + 1)],
892
+ clone_kernel=clone_kernel,
893
+ )
894
+ for i, estimator in enumerate(estimators)
895
+ ]
896
+ )
897
+ else:
898
+ raise ValueError(
899
+ "Shape of theta must be either %d or %d. "
900
+ "Obtained theta with shape %d."
901
+ % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0])
902
+ )
venv/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gaussian processes regression."""
2
+
3
+ # Authors: Jan Hendrik Metzen <[email protected]>
4
+ # Modified by: Pete Green <[email protected]>
5
+ # License: BSD 3 clause
6
+
7
+ import warnings
8
+ from numbers import Integral, Real
9
+ from operator import itemgetter
10
+
11
+ import numpy as np
12
+ import scipy.optimize
13
+ from scipy.linalg import cho_solve, cholesky, solve_triangular
14
+
15
+ from ..base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context, clone
16
+ from ..preprocessing._data import _handle_zeros_in_scale
17
+ from ..utils import check_random_state
18
+ from ..utils._param_validation import Interval, StrOptions
19
+ from ..utils.optimize import _check_optimize_result
20
+ from .kernels import RBF, Kernel
21
+ from .kernels import ConstantKernel as C
22
+
23
+ GPR_CHOLESKY_LOWER = True
24
+
25
+
26
+ class GaussianProcessRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
27
+ """Gaussian process regression (GPR).
28
+
29
+ The implementation is based on Algorithm 2.1 of [RW2006]_.
30
+
31
+ In addition to standard scikit-learn estimator API,
32
+ :class:`GaussianProcessRegressor`:
33
+
34
+ * allows prediction without prior fitting (based on the GP prior)
35
+ * provides an additional method `sample_y(X)`, which evaluates samples
36
+ drawn from the GPR (prior or posterior) at given inputs
37
+ * exposes a method `log_marginal_likelihood(theta)`, which can be used
38
+ externally for other ways of selecting hyperparameters, e.g., via
39
+ Markov chain Monte Carlo.
40
+
41
+ To learn the difference between a point-estimate approach vs. a more
42
+ Bayesian modelling approach, refer to the example entitled
43
+ :ref:`sphx_glr_auto_examples_gaussian_process_plot_compare_gpr_krr.py`.
44
+
45
+ Read more in the :ref:`User Guide <gaussian_process>`.
46
+
47
+ .. versionadded:: 0.18
48
+
49
+ Parameters
50
+ ----------
51
+ kernel : kernel instance, default=None
52
+ The kernel specifying the covariance function of the GP. If None is
53
+ passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed")
54
+ * RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that
55
+ the kernel hyperparameters are optimized during fitting unless the
56
+ bounds are marked as "fixed".
57
+
58
+ alpha : float or ndarray of shape (n_samples,), default=1e-10
59
+ Value added to the diagonal of the kernel matrix during fitting.
60
+ This can prevent a potential numerical issue during fitting, by
61
+ ensuring that the calculated values form a positive definite matrix.
62
+ It can also be interpreted as the variance of additional Gaussian
63
+ measurement noise on the training observations. Note that this is
64
+ different from using a `WhiteKernel`. If an array is passed, it must
65
+ have the same number of entries as the data used for fitting and is
66
+ used as datapoint-dependent noise level. Allowing to specify the
67
+ noise level directly as a parameter is mainly for convenience and
68
+ for consistency with :class:`~sklearn.linear_model.Ridge`.
69
+
70
+ optimizer : "fmin_l_bfgs_b", callable or None, default="fmin_l_bfgs_b"
71
+ Can either be one of the internally supported optimizers for optimizing
72
+ the kernel's parameters, specified by a string, or an externally
73
+ defined optimizer passed as a callable. If a callable is passed, it
74
+ must have the signature::
75
+
76
+ def optimizer(obj_func, initial_theta, bounds):
77
+ # * 'obj_func': the objective function to be minimized, which
78
+ # takes the hyperparameters theta as a parameter and an
79
+ # optional flag eval_gradient, which determines if the
80
+ # gradient is returned additionally to the function value
81
+ # * 'initial_theta': the initial value for theta, which can be
82
+ # used by local optimizers
83
+ # * 'bounds': the bounds on the values of theta
84
+ ....
85
+ # Returned are the best found hyperparameters theta and
86
+ # the corresponding value of the target function.
87
+ return theta_opt, func_min
88
+
89
+ Per default, the L-BFGS-B algorithm from `scipy.optimize.minimize`
90
+ is used. If None is passed, the kernel's parameters are kept fixed.
91
+ Available internal optimizers are: `{'fmin_l_bfgs_b'}`.
92
+
93
+ n_restarts_optimizer : int, default=0
94
+ The number of restarts of the optimizer for finding the kernel's
95
+ parameters which maximize the log-marginal likelihood. The first run
96
+ of the optimizer is performed from the kernel's initial parameters,
97
+ the remaining ones (if any) from thetas sampled log-uniform randomly
98
+ from the space of allowed theta-values. If greater than 0, all bounds
99
+ must be finite. Note that `n_restarts_optimizer == 0` implies that one
100
+ run is performed.
101
+
102
+ normalize_y : bool, default=False
103
+ Whether or not to normalize the target values `y` by removing the mean
104
+ and scaling to unit-variance. This is recommended for cases where
105
+ zero-mean, unit-variance priors are used. Note that, in this
106
+ implementation, the normalisation is reversed before the GP predictions
107
+ are reported.
108
+
109
+ .. versionchanged:: 0.23
110
+
111
+ copy_X_train : bool, default=True
112
+ If True, a persistent copy of the training data is stored in the
113
+ object. Otherwise, just a reference to the training data is stored,
114
+ which might cause predictions to change if the data is modified
115
+ externally.
116
+
117
+ n_targets : int, default=None
118
+ The number of dimensions of the target values. Used to decide the number
119
+ of outputs when sampling from the prior distributions (i.e. calling
120
+ :meth:`sample_y` before :meth:`fit`). This parameter is ignored once
121
+ :meth:`fit` has been called.
122
+
123
+ .. versionadded:: 1.3
124
+
125
+ random_state : int, RandomState instance or None, default=None
126
+ Determines random number generation used to initialize the centers.
127
+ Pass an int for reproducible results across multiple function calls.
128
+ See :term:`Glossary <random_state>`.
129
+
130
+ Attributes
131
+ ----------
132
+ X_train_ : array-like of shape (n_samples, n_features) or list of object
133
+ Feature vectors or other representations of training data (also
134
+ required for prediction).
135
+
136
+ y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets)
137
+ Target values in training data (also required for prediction).
138
+
139
+ kernel_ : kernel instance
140
+ The kernel used for prediction. The structure of the kernel is the
141
+ same as the one passed as parameter but with optimized hyperparameters.
142
+
143
+ L_ : array-like of shape (n_samples, n_samples)
144
+ Lower-triangular Cholesky decomposition of the kernel in ``X_train_``.
145
+
146
+ alpha_ : array-like of shape (n_samples,)
147
+ Dual coefficients of training data points in kernel space.
148
+
149
+ log_marginal_likelihood_value_ : float
150
+ The log-marginal-likelihood of ``self.kernel_.theta``.
151
+
152
+ n_features_in_ : int
153
+ Number of features seen during :term:`fit`.
154
+
155
+ .. versionadded:: 0.24
156
+
157
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
158
+ Names of features seen during :term:`fit`. Defined only when `X`
159
+ has feature names that are all strings.
160
+
161
+ .. versionadded:: 1.0
162
+
163
+ See Also
164
+ --------
165
+ GaussianProcessClassifier : Gaussian process classification (GPC)
166
+ based on Laplace approximation.
167
+
168
+ References
169
+ ----------
170
+ .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
171
+ "Gaussian Processes for Machine Learning",
172
+ MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
173
+
174
+ Examples
175
+ --------
176
+ >>> from sklearn.datasets import make_friedman2
177
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
178
+ >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
179
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
180
+ >>> kernel = DotProduct() + WhiteKernel()
181
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
182
+ ... random_state=0).fit(X, y)
183
+ >>> gpr.score(X, y)
184
+ 0.3680...
185
+ >>> gpr.predict(X[:2,:], return_std=True)
186
+ (array([653.0..., 592.1...]), array([316.6..., 316.6...]))
187
+ """
188
+
189
+ _parameter_constraints: dict = {
190
+ "kernel": [None, Kernel],
191
+ "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray],
192
+ "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None],
193
+ "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")],
194
+ "normalize_y": ["boolean"],
195
+ "copy_X_train": ["boolean"],
196
+ "n_targets": [Interval(Integral, 1, None, closed="left"), None],
197
+ "random_state": ["random_state"],
198
+ }
199
+
200
+ def __init__(
201
+ self,
202
+ kernel=None,
203
+ *,
204
+ alpha=1e-10,
205
+ optimizer="fmin_l_bfgs_b",
206
+ n_restarts_optimizer=0,
207
+ normalize_y=False,
208
+ copy_X_train=True,
209
+ n_targets=None,
210
+ random_state=None,
211
+ ):
212
+ self.kernel = kernel
213
+ self.alpha = alpha
214
+ self.optimizer = optimizer
215
+ self.n_restarts_optimizer = n_restarts_optimizer
216
+ self.normalize_y = normalize_y
217
+ self.copy_X_train = copy_X_train
218
+ self.n_targets = n_targets
219
+ self.random_state = random_state
220
+
221
+ @_fit_context(prefer_skip_nested_validation=True)
222
+ def fit(self, X, y):
223
+ """Fit Gaussian process regression model.
224
+
225
+ Parameters
226
+ ----------
227
+ X : array-like of shape (n_samples, n_features) or list of object
228
+ Feature vectors or other representations of training data.
229
+
230
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
231
+ Target values.
232
+
233
+ Returns
234
+ -------
235
+ self : object
236
+ GaussianProcessRegressor class instance.
237
+ """
238
+ if self.kernel is None: # Use an RBF kernel as default
239
+ self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
240
+ 1.0, length_scale_bounds="fixed"
241
+ )
242
+ else:
243
+ self.kernel_ = clone(self.kernel)
244
+
245
+ self._rng = check_random_state(self.random_state)
246
+
247
+ if self.kernel_.requires_vector_input:
248
+ dtype, ensure_2d = "numeric", True
249
+ else:
250
+ dtype, ensure_2d = None, False
251
+ X, y = self._validate_data(
252
+ X,
253
+ y,
254
+ multi_output=True,
255
+ y_numeric=True,
256
+ ensure_2d=ensure_2d,
257
+ dtype=dtype,
258
+ )
259
+
260
+ n_targets_seen = y.shape[1] if y.ndim > 1 else 1
261
+ if self.n_targets is not None and n_targets_seen != self.n_targets:
262
+ raise ValueError(
263
+ "The number of targets seen in `y` is different from the parameter "
264
+ f"`n_targets`. Got {n_targets_seen} != {self.n_targets}."
265
+ )
266
+
267
+ # Normalize target value
268
+ if self.normalize_y:
269
+ self._y_train_mean = np.mean(y, axis=0)
270
+ self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False)
271
+
272
+ # Remove mean and make unit variance
273
+ y = (y - self._y_train_mean) / self._y_train_std
274
+
275
+ else:
276
+ shape_y_stats = (y.shape[1],) if y.ndim == 2 else 1
277
+ self._y_train_mean = np.zeros(shape=shape_y_stats)
278
+ self._y_train_std = np.ones(shape=shape_y_stats)
279
+
280
+ if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]:
281
+ if self.alpha.shape[0] == 1:
282
+ self.alpha = self.alpha[0]
283
+ else:
284
+ raise ValueError(
285
+ "alpha must be a scalar or an array with same number of "
286
+ f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})"
287
+ )
288
+
289
+ self.X_train_ = np.copy(X) if self.copy_X_train else X
290
+ self.y_train_ = np.copy(y) if self.copy_X_train else y
291
+
292
+ if self.optimizer is not None and self.kernel_.n_dims > 0:
293
+ # Choose hyperparameters based on maximizing the log-marginal
294
+ # likelihood (potentially starting from several initial values)
295
+ def obj_func(theta, eval_gradient=True):
296
+ if eval_gradient:
297
+ lml, grad = self.log_marginal_likelihood(
298
+ theta, eval_gradient=True, clone_kernel=False
299
+ )
300
+ return -lml, -grad
301
+ else:
302
+ return -self.log_marginal_likelihood(theta, clone_kernel=False)
303
+
304
+ # First optimize starting from theta specified in kernel
305
+ optima = [
306
+ (
307
+ self._constrained_optimization(
308
+ obj_func, self.kernel_.theta, self.kernel_.bounds
309
+ )
310
+ )
311
+ ]
312
+
313
+ # Additional runs are performed from log-uniform chosen initial
314
+ # theta
315
+ if self.n_restarts_optimizer > 0:
316
+ if not np.isfinite(self.kernel_.bounds).all():
317
+ raise ValueError(
318
+ "Multiple optimizer restarts (n_restarts_optimizer>0) "
319
+ "requires that all bounds are finite."
320
+ )
321
+ bounds = self.kernel_.bounds
322
+ for iteration in range(self.n_restarts_optimizer):
323
+ theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1])
324
+ optima.append(
325
+ self._constrained_optimization(obj_func, theta_initial, bounds)
326
+ )
327
+ # Select result from run with minimal (negative) log-marginal
328
+ # likelihood
329
+ lml_values = list(map(itemgetter(1), optima))
330
+ self.kernel_.theta = optima[np.argmin(lml_values)][0]
331
+ self.kernel_._check_bounds_params()
332
+
333
+ self.log_marginal_likelihood_value_ = -np.min(lml_values)
334
+ else:
335
+ self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
336
+ self.kernel_.theta, clone_kernel=False
337
+ )
338
+
339
+ # Precompute quantities required for predictions which are independent
340
+ # of actual query points
341
+ # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
342
+ K = self.kernel_(self.X_train_)
343
+ K[np.diag_indices_from(K)] += self.alpha
344
+ try:
345
+ self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
346
+ except np.linalg.LinAlgError as exc:
347
+ exc.args = (
348
+ (
349
+ f"The kernel, {self.kernel_}, is not returning a positive "
350
+ "definite matrix. Try gradually increasing the 'alpha' "
351
+ "parameter of your GaussianProcessRegressor estimator."
352
+ ),
353
+ ) + exc.args
354
+ raise
355
+ # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
356
+ self.alpha_ = cho_solve(
357
+ (self.L_, GPR_CHOLESKY_LOWER),
358
+ self.y_train_,
359
+ check_finite=False,
360
+ )
361
+ return self
362
+
363
+ def predict(self, X, return_std=False, return_cov=False):
364
+ """Predict using the Gaussian process regression model.
365
+
366
+ We can also predict based on an unfitted model by using the GP prior.
367
+ In addition to the mean of the predictive distribution, optionally also
368
+ returns its standard deviation (`return_std=True`) or covariance
369
+ (`return_cov=True`). Note that at most one of the two can be requested.
370
+
371
+ Parameters
372
+ ----------
373
+ X : array-like of shape (n_samples, n_features) or list of object
374
+ Query points where the GP is evaluated.
375
+
376
+ return_std : bool, default=False
377
+ If True, the standard-deviation of the predictive distribution at
378
+ the query points is returned along with the mean.
379
+
380
+ return_cov : bool, default=False
381
+ If True, the covariance of the joint predictive distribution at
382
+ the query points is returned along with the mean.
383
+
384
+ Returns
385
+ -------
386
+ y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
387
+ Mean of predictive distribution a query points.
388
+
389
+ y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
390
+ Standard deviation of predictive distribution at query points.
391
+ Only returned when `return_std` is True.
392
+
393
+ y_cov : ndarray of shape (n_samples, n_samples) or \
394
+ (n_samples, n_samples, n_targets), optional
395
+ Covariance of joint predictive distribution a query points.
396
+ Only returned when `return_cov` is True.
397
+ """
398
+ if return_std and return_cov:
399
+ raise RuntimeError(
400
+ "At most one of return_std or return_cov can be requested."
401
+ )
402
+
403
+ if self.kernel is None or self.kernel.requires_vector_input:
404
+ dtype, ensure_2d = "numeric", True
405
+ else:
406
+ dtype, ensure_2d = None, False
407
+
408
+ X = self._validate_data(X, ensure_2d=ensure_2d, dtype=dtype, reset=False)
409
+
410
+ if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
411
+ if self.kernel is None:
412
+ kernel = C(1.0, constant_value_bounds="fixed") * RBF(
413
+ 1.0, length_scale_bounds="fixed"
414
+ )
415
+ else:
416
+ kernel = self.kernel
417
+
418
+ n_targets = self.n_targets if self.n_targets is not None else 1
419
+ y_mean = np.zeros(shape=(X.shape[0], n_targets)).squeeze()
420
+
421
+ if return_cov:
422
+ y_cov = kernel(X)
423
+ if n_targets > 1:
424
+ y_cov = np.repeat(
425
+ np.expand_dims(y_cov, -1), repeats=n_targets, axis=-1
426
+ )
427
+ return y_mean, y_cov
428
+ elif return_std:
429
+ y_var = kernel.diag(X)
430
+ if n_targets > 1:
431
+ y_var = np.repeat(
432
+ np.expand_dims(y_var, -1), repeats=n_targets, axis=-1
433
+ )
434
+ return y_mean, np.sqrt(y_var)
435
+ else:
436
+ return y_mean
437
+ else: # Predict based on GP posterior
438
+ # Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha
439
+ K_trans = self.kernel_(X, self.X_train_)
440
+ y_mean = K_trans @ self.alpha_
441
+
442
+ # undo normalisation
443
+ y_mean = self._y_train_std * y_mean + self._y_train_mean
444
+
445
+ # if y_mean has shape (n_samples, 1), reshape to (n_samples,)
446
+ if y_mean.ndim > 1 and y_mean.shape[1] == 1:
447
+ y_mean = np.squeeze(y_mean, axis=1)
448
+
449
+ # Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T
450
+ V = solve_triangular(
451
+ self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False
452
+ )
453
+
454
+ if return_cov:
455
+ # Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v
456
+ y_cov = self.kernel_(X) - V.T @ V
457
+
458
+ # undo normalisation
459
+ y_cov = np.outer(y_cov, self._y_train_std**2).reshape(
460
+ *y_cov.shape, -1
461
+ )
462
+ # if y_cov has shape (n_samples, n_samples, 1), reshape to
463
+ # (n_samples, n_samples)
464
+ if y_cov.shape[2] == 1:
465
+ y_cov = np.squeeze(y_cov, axis=2)
466
+
467
+ return y_mean, y_cov
468
+ elif return_std:
469
+ # Compute variance of predictive distribution
470
+ # Use einsum to avoid explicitly forming the large matrix
471
+ # V^T @ V just to extract its diagonal afterward.
472
+ y_var = self.kernel_.diag(X).copy()
473
+ y_var -= np.einsum("ij,ji->i", V.T, V)
474
+
475
+ # Check if any of the variances is negative because of
476
+ # numerical issues. If yes: set the variance to 0.
477
+ y_var_negative = y_var < 0
478
+ if np.any(y_var_negative):
479
+ warnings.warn(
480
+ "Predicted variances smaller than 0. "
481
+ "Setting those variances to 0."
482
+ )
483
+ y_var[y_var_negative] = 0.0
484
+
485
+ # undo normalisation
486
+ y_var = np.outer(y_var, self._y_train_std**2).reshape(
487
+ *y_var.shape, -1
488
+ )
489
+
490
+ # if y_var has shape (n_samples, 1), reshape to (n_samples,)
491
+ if y_var.shape[1] == 1:
492
+ y_var = np.squeeze(y_var, axis=1)
493
+
494
+ return y_mean, np.sqrt(y_var)
495
+ else:
496
+ return y_mean
497
+
498
+ def sample_y(self, X, n_samples=1, random_state=0):
499
+ """Draw samples from Gaussian process and evaluate at X.
500
+
501
+ Parameters
502
+ ----------
503
+ X : array-like of shape (n_samples_X, n_features) or list of object
504
+ Query points where the GP is evaluated.
505
+
506
+ n_samples : int, default=1
507
+ Number of samples drawn from the Gaussian process per query point.
508
+
509
+ random_state : int, RandomState instance or None, default=0
510
+ Determines random number generation to randomly draw samples.
511
+ Pass an int for reproducible results across multiple function
512
+ calls.
513
+ See :term:`Glossary <random_state>`.
514
+
515
+ Returns
516
+ -------
517
+ y_samples : ndarray of shape (n_samples_X, n_samples), or \
518
+ (n_samples_X, n_targets, n_samples)
519
+ Values of n_samples samples drawn from Gaussian process and
520
+ evaluated at query points.
521
+ """
522
+ rng = check_random_state(random_state)
523
+
524
+ y_mean, y_cov = self.predict(X, return_cov=True)
525
+ if y_mean.ndim == 1:
526
+ y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
527
+ else:
528
+ y_samples = [
529
+ rng.multivariate_normal(
530
+ y_mean[:, target], y_cov[..., target], n_samples
531
+ ).T[:, np.newaxis]
532
+ for target in range(y_mean.shape[1])
533
+ ]
534
+ y_samples = np.hstack(y_samples)
535
+ return y_samples
536
+
537
+ def log_marginal_likelihood(
538
+ self, theta=None, eval_gradient=False, clone_kernel=True
539
+ ):
540
+ """Return log-marginal likelihood of theta for training data.
541
+
542
+ Parameters
543
+ ----------
544
+ theta : array-like of shape (n_kernel_params,) default=None
545
+ Kernel hyperparameters for which the log-marginal likelihood is
546
+ evaluated. If None, the precomputed log_marginal_likelihood
547
+ of ``self.kernel_.theta`` is returned.
548
+
549
+ eval_gradient : bool, default=False
550
+ If True, the gradient of the log-marginal likelihood with respect
551
+ to the kernel hyperparameters at position theta is returned
552
+ additionally. If True, theta must not be None.
553
+
554
+ clone_kernel : bool, default=True
555
+ If True, the kernel attribute is copied. If False, the kernel
556
+ attribute is modified, but may result in a performance improvement.
557
+
558
+ Returns
559
+ -------
560
+ log_likelihood : float
561
+ Log-marginal likelihood of theta for training data.
562
+
563
+ log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
564
+ Gradient of the log-marginal likelihood with respect to the kernel
565
+ hyperparameters at position theta.
566
+ Only returned when eval_gradient is True.
567
+ """
568
+ if theta is None:
569
+ if eval_gradient:
570
+ raise ValueError("Gradient can only be evaluated for theta!=None")
571
+ return self.log_marginal_likelihood_value_
572
+
573
+ if clone_kernel:
574
+ kernel = self.kernel_.clone_with_theta(theta)
575
+ else:
576
+ kernel = self.kernel_
577
+ kernel.theta = theta
578
+
579
+ if eval_gradient:
580
+ K, K_gradient = kernel(self.X_train_, eval_gradient=True)
581
+ else:
582
+ K = kernel(self.X_train_)
583
+
584
+ # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
585
+ K[np.diag_indices_from(K)] += self.alpha
586
+ try:
587
+ L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
588
+ except np.linalg.LinAlgError:
589
+ return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf
590
+
591
+ # Support multi-dimensional output of self.y_train_
592
+ y_train = self.y_train_
593
+ if y_train.ndim == 1:
594
+ y_train = y_train[:, np.newaxis]
595
+
596
+ # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
597
+ alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False)
598
+
599
+ # Alg 2.1, page 19, line 7
600
+ # -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi)
601
+ # y is originally thought to be a (1, n_samples) row vector. However,
602
+ # in multioutputs, y is of shape (n_samples, 2) and we need to compute
603
+ # y^T . alpha for each output, independently using einsum. Thus, it
604
+ # is equivalent to:
605
+ # for output_idx in range(n_outputs):
606
+ # log_likelihood_dims[output_idx] = (
607
+ # y_train[:, [output_idx]] @ alpha[:, [output_idx]]
608
+ # )
609
+ log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
610
+ log_likelihood_dims -= np.log(np.diag(L)).sum()
611
+ log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
612
+ # the log likehood is sum-up across the outputs
613
+ log_likelihood = log_likelihood_dims.sum(axis=-1)
614
+
615
+ if eval_gradient:
616
+ # Eq. 5.9, p. 114, and footnote 5 in p. 114
617
+ # 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient)
618
+ # alpha is supposed to be a vector of (n_samples,) elements. With
619
+ # multioutputs, alpha is a matrix of size (n_samples, n_outputs).
620
+ # Therefore, we want to construct a matrix of
621
+ # (n_samples, n_samples, n_outputs) equivalent to
622
+ # for output_idx in range(n_outputs):
623
+ # output_alpha = alpha[:, [output_idx]]
624
+ # inner_term[..., output_idx] = output_alpha @ output_alpha.T
625
+ inner_term = np.einsum("ik,jk->ijk", alpha, alpha)
626
+ # compute K^-1 of shape (n_samples, n_samples)
627
+ K_inv = cho_solve(
628
+ (L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False
629
+ )
630
+ # create a new axis to use broadcasting between inner_term and
631
+ # K_inv
632
+ inner_term -= K_inv[..., np.newaxis]
633
+ # Since we are interested about the trace of
634
+ # inner_term @ K_gradient, we don't explicitly compute the
635
+ # matrix-by-matrix operation and instead use an einsum. Therefore
636
+ # it is equivalent to:
637
+ # for param_idx in range(n_kernel_params):
638
+ # for output_idx in range(n_output):
639
+ # log_likehood_gradient_dims[param_idx, output_idx] = (
640
+ # inner_term[..., output_idx] @
641
+ # K_gradient[..., param_idx]
642
+ # )
643
+ log_likelihood_gradient_dims = 0.5 * np.einsum(
644
+ "ijl,jik->kl", inner_term, K_gradient
645
+ )
646
+ # the log likehood gradient is the sum-up across the outputs
647
+ log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1)
648
+
649
+ if eval_gradient:
650
+ return log_likelihood, log_likelihood_gradient
651
+ else:
652
+ return log_likelihood
653
+
654
+ def _constrained_optimization(self, obj_func, initial_theta, bounds):
655
+ if self.optimizer == "fmin_l_bfgs_b":
656
+ opt_res = scipy.optimize.minimize(
657
+ obj_func,
658
+ initial_theta,
659
+ method="L-BFGS-B",
660
+ jac=True,
661
+ bounds=bounds,
662
+ )
663
+ _check_optimize_result("lbfgs", opt_res)
664
+ theta_opt, func_min = opt_res.x, opt_res.fun
665
+ elif callable(self.optimizer):
666
+ theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
667
+ else:
668
+ raise ValueError(f"Unknown optimizer {self.optimizer}.")
669
+
670
+ return theta_opt, func_min
671
+
672
+ def _more_tags(self):
673
+ return {"requires_fit": False}
venv/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py ADDED
@@ -0,0 +1,2415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.gaussian_process.kernels` module implements a set of kernels that
3
+ can be combined by operators and used in Gaussian processes.
4
+ """
5
+
6
+ # Kernels for Gaussian process regression and classification.
7
+ #
8
+ # The kernels in this module allow kernel-engineering, i.e., they can be
9
+ # combined via the "+" and "*" operators or be exponentiated with a scalar
10
+ # via "**". These sum and product expressions can also contain scalar values,
11
+ # which are automatically converted to a constant kernel.
12
+ #
13
+ # All kernels allow (analytic) gradient-based hyperparameter optimization.
14
+ # The space of hyperparameters can be specified by giving lower und upper
15
+ # boundaries for the value of each hyperparameter (the search space is thus
16
+ # rectangular). Instead of specifying bounds, hyperparameters can also be
17
+ # declared to be "fixed", which causes these hyperparameters to be excluded from
18
+ # optimization.
19
+
20
+
21
+ # Author: Jan Hendrik Metzen <[email protected]>
22
+ # License: BSD 3 clause
23
+
24
+ # Note: this module is strongly inspired by the kernel module of the george
25
+ # package.
26
+
27
+ import math
28
+ import warnings
29
+ from abc import ABCMeta, abstractmethod
30
+ from collections import namedtuple
31
+ from inspect import signature
32
+
33
+ import numpy as np
34
+ from scipy.spatial.distance import cdist, pdist, squareform
35
+ from scipy.special import gamma, kv
36
+
37
+ from ..base import clone
38
+ from ..exceptions import ConvergenceWarning
39
+ from ..metrics.pairwise import pairwise_kernels
40
+ from ..utils.validation import _num_samples
41
+
42
+
43
+ def _check_length_scale(X, length_scale):
44
+ length_scale = np.squeeze(length_scale).astype(float)
45
+ if np.ndim(length_scale) > 1:
46
+ raise ValueError("length_scale cannot be of dimension greater than 1")
47
+ if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
48
+ raise ValueError(
49
+ "Anisotropic kernel must have the same number of "
50
+ "dimensions as data (%d!=%d)" % (length_scale.shape[0], X.shape[1])
51
+ )
52
+ return length_scale
53
+
54
+
55
+ class Hyperparameter(
56
+ namedtuple(
57
+ "Hyperparameter", ("name", "value_type", "bounds", "n_elements", "fixed")
58
+ )
59
+ ):
60
+ """A kernel hyperparameter's specification in form of a namedtuple.
61
+
62
+ .. versionadded:: 0.18
63
+
64
+ Attributes
65
+ ----------
66
+ name : str
67
+ The name of the hyperparameter. Note that a kernel using a
68
+ hyperparameter with name "x" must have the attributes self.x and
69
+ self.x_bounds
70
+
71
+ value_type : str
72
+ The type of the hyperparameter. Currently, only "numeric"
73
+ hyperparameters are supported.
74
+
75
+ bounds : pair of floats >= 0 or "fixed"
76
+ The lower and upper bound on the parameter. If n_elements>1, a pair
77
+ of 1d array with n_elements each may be given alternatively. If
78
+ the string "fixed" is passed as bounds, the hyperparameter's value
79
+ cannot be changed.
80
+
81
+ n_elements : int, default=1
82
+ The number of elements of the hyperparameter value. Defaults to 1,
83
+ which corresponds to a scalar hyperparameter. n_elements > 1
84
+ corresponds to a hyperparameter which is vector-valued,
85
+ such as, e.g., anisotropic length-scales.
86
+
87
+ fixed : bool, default=None
88
+ Whether the value of this hyperparameter is fixed, i.e., cannot be
89
+ changed during hyperparameter tuning. If None is passed, the "fixed" is
90
+ derived based on the given bounds.
91
+
92
+ Examples
93
+ --------
94
+ >>> from sklearn.gaussian_process.kernels import ConstantKernel
95
+ >>> from sklearn.datasets import make_friedman2
96
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
97
+ >>> from sklearn.gaussian_process.kernels import Hyperparameter
98
+ >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
99
+ >>> kernel = ConstantKernel(constant_value=1.0,
100
+ ... constant_value_bounds=(0.0, 10.0))
101
+
102
+ We can access each hyperparameter:
103
+
104
+ >>> for hyperparameter in kernel.hyperparameters:
105
+ ... print(hyperparameter)
106
+ Hyperparameter(name='constant_value', value_type='numeric',
107
+ bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)
108
+
109
+ >>> params = kernel.get_params()
110
+ >>> for key in sorted(params): print(f"{key} : {params[key]}")
111
+ constant_value : 1.0
112
+ constant_value_bounds : (0.0, 10.0)
113
+ """
114
+
115
+ # A raw namedtuple is very memory efficient as it packs the attributes
116
+ # in a struct to get rid of the __dict__ of attributes in particular it
117
+ # does not copy the string for the keys on each instance.
118
+ # By deriving a namedtuple class just to introduce the __init__ method we
119
+ # would also reintroduce the __dict__ on the instance. By telling the
120
+ # Python interpreter that this subclass uses static __slots__ instead of
121
+ # dynamic attributes. Furthermore we don't need any additional slot in the
122
+ # subclass so we set __slots__ to the empty tuple.
123
+ __slots__ = ()
124
+
125
+ def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
126
+ if not isinstance(bounds, str) or bounds != "fixed":
127
+ bounds = np.atleast_2d(bounds)
128
+ if n_elements > 1: # vector-valued parameter
129
+ if bounds.shape[0] == 1:
130
+ bounds = np.repeat(bounds, n_elements, 0)
131
+ elif bounds.shape[0] != n_elements:
132
+ raise ValueError(
133
+ "Bounds on %s should have either 1 or "
134
+ "%d dimensions. Given are %d"
135
+ % (name, n_elements, bounds.shape[0])
136
+ )
137
+
138
+ if fixed is None:
139
+ fixed = isinstance(bounds, str) and bounds == "fixed"
140
+ return super(Hyperparameter, cls).__new__(
141
+ cls, name, value_type, bounds, n_elements, fixed
142
+ )
143
+
144
+ # This is mainly a testing utility to check that two hyperparameters
145
+ # are equal.
146
+ def __eq__(self, other):
147
+ return (
148
+ self.name == other.name
149
+ and self.value_type == other.value_type
150
+ and np.all(self.bounds == other.bounds)
151
+ and self.n_elements == other.n_elements
152
+ and self.fixed == other.fixed
153
+ )
154
+
155
+
156
+ class Kernel(metaclass=ABCMeta):
157
+ """Base class for all kernels.
158
+
159
+ .. versionadded:: 0.18
160
+
161
+ Examples
162
+ --------
163
+ >>> from sklearn.gaussian_process.kernels import Kernel, RBF
164
+ >>> import numpy as np
165
+ >>> class CustomKernel(Kernel):
166
+ ... def __init__(self, length_scale=1.0):
167
+ ... self.length_scale = length_scale
168
+ ... def __call__(self, X, Y=None):
169
+ ... if Y is None:
170
+ ... Y = X
171
+ ... return np.inner(X, X if Y is None else Y) ** 2
172
+ ... def diag(self, X):
173
+ ... return np.ones(X.shape[0])
174
+ ... def is_stationary(self):
175
+ ... return True
176
+ >>> kernel = CustomKernel(length_scale=2.0)
177
+ >>> X = np.array([[1, 2], [3, 4]])
178
+ >>> print(kernel(X))
179
+ [[ 25 121]
180
+ [121 625]]
181
+ """
182
+
183
+ def get_params(self, deep=True):
184
+ """Get parameters of this kernel.
185
+
186
+ Parameters
187
+ ----------
188
+ deep : bool, default=True
189
+ If True, will return the parameters for this estimator and
190
+ contained subobjects that are estimators.
191
+
192
+ Returns
193
+ -------
194
+ params : dict
195
+ Parameter names mapped to their values.
196
+ """
197
+ params = dict()
198
+
199
+ # introspect the constructor arguments to find the model parameters
200
+ # to represent
201
+ cls = self.__class__
202
+ init = getattr(cls.__init__, "deprecated_original", cls.__init__)
203
+ init_sign = signature(init)
204
+ args, varargs = [], []
205
+ for parameter in init_sign.parameters.values():
206
+ if parameter.kind != parameter.VAR_KEYWORD and parameter.name != "self":
207
+ args.append(parameter.name)
208
+ if parameter.kind == parameter.VAR_POSITIONAL:
209
+ varargs.append(parameter.name)
210
+
211
+ if len(varargs) != 0:
212
+ raise RuntimeError(
213
+ "scikit-learn kernels should always "
214
+ "specify their parameters in the signature"
215
+ " of their __init__ (no varargs)."
216
+ " %s doesn't follow this convention." % (cls,)
217
+ )
218
+ for arg in args:
219
+ params[arg] = getattr(self, arg)
220
+
221
+ return params
222
+
223
+ def set_params(self, **params):
224
+ """Set the parameters of this kernel.
225
+
226
+ The method works on simple kernels as well as on nested kernels.
227
+ The latter have parameters of the form ``<component>__<parameter>``
228
+ so that it's possible to update each component of a nested object.
229
+
230
+ Returns
231
+ -------
232
+ self
233
+ """
234
+ if not params:
235
+ # Simple optimisation to gain speed (inspect is slow)
236
+ return self
237
+ valid_params = self.get_params(deep=True)
238
+ for key, value in params.items():
239
+ split = key.split("__", 1)
240
+ if len(split) > 1:
241
+ # nested objects case
242
+ name, sub_name = split
243
+ if name not in valid_params:
244
+ raise ValueError(
245
+ "Invalid parameter %s for kernel %s. "
246
+ "Check the list of available parameters "
247
+ "with `kernel.get_params().keys()`." % (name, self)
248
+ )
249
+ sub_object = valid_params[name]
250
+ sub_object.set_params(**{sub_name: value})
251
+ else:
252
+ # simple objects case
253
+ if key not in valid_params:
254
+ raise ValueError(
255
+ "Invalid parameter %s for kernel %s. "
256
+ "Check the list of available parameters "
257
+ "with `kernel.get_params().keys()`."
258
+ % (key, self.__class__.__name__)
259
+ )
260
+ setattr(self, key, value)
261
+ return self
262
+
263
+ def clone_with_theta(self, theta):
264
+ """Returns a clone of self with given hyperparameters theta.
265
+
266
+ Parameters
267
+ ----------
268
+ theta : ndarray of shape (n_dims,)
269
+ The hyperparameters
270
+ """
271
+ cloned = clone(self)
272
+ cloned.theta = theta
273
+ return cloned
274
+
275
+ @property
276
+ def n_dims(self):
277
+ """Returns the number of non-fixed hyperparameters of the kernel."""
278
+ return self.theta.shape[0]
279
+
280
+ @property
281
+ def hyperparameters(self):
282
+ """Returns a list of all hyperparameter specifications."""
283
+ r = [
284
+ getattr(self, attr)
285
+ for attr in dir(self)
286
+ if attr.startswith("hyperparameter_")
287
+ ]
288
+ return r
289
+
290
+ @property
291
+ def theta(self):
292
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
293
+
294
+ Note that theta are typically the log-transformed values of the
295
+ kernel's hyperparameters as this representation of the search space
296
+ is more amenable for hyperparameter search, as hyperparameters like
297
+ length-scales naturally live on a log-scale.
298
+
299
+ Returns
300
+ -------
301
+ theta : ndarray of shape (n_dims,)
302
+ The non-fixed, log-transformed hyperparameters of the kernel
303
+ """
304
+ theta = []
305
+ params = self.get_params()
306
+ for hyperparameter in self.hyperparameters:
307
+ if not hyperparameter.fixed:
308
+ theta.append(params[hyperparameter.name])
309
+ if len(theta) > 0:
310
+ return np.log(np.hstack(theta))
311
+ else:
312
+ return np.array([])
313
+
314
+ @theta.setter
315
+ def theta(self, theta):
316
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
317
+
318
+ Parameters
319
+ ----------
320
+ theta : ndarray of shape (n_dims,)
321
+ The non-fixed, log-transformed hyperparameters of the kernel
322
+ """
323
+ params = self.get_params()
324
+ i = 0
325
+ for hyperparameter in self.hyperparameters:
326
+ if hyperparameter.fixed:
327
+ continue
328
+ if hyperparameter.n_elements > 1:
329
+ # vector-valued parameter
330
+ params[hyperparameter.name] = np.exp(
331
+ theta[i : i + hyperparameter.n_elements]
332
+ )
333
+ i += hyperparameter.n_elements
334
+ else:
335
+ params[hyperparameter.name] = np.exp(theta[i])
336
+ i += 1
337
+
338
+ if i != len(theta):
339
+ raise ValueError(
340
+ "theta has not the correct number of entries."
341
+ " Should be %d; given are %d" % (i, len(theta))
342
+ )
343
+ self.set_params(**params)
344
+
345
+ @property
346
+ def bounds(self):
347
+ """Returns the log-transformed bounds on the theta.
348
+
349
+ Returns
350
+ -------
351
+ bounds : ndarray of shape (n_dims, 2)
352
+ The log-transformed bounds on the kernel's hyperparameters theta
353
+ """
354
+ bounds = [
355
+ hyperparameter.bounds
356
+ for hyperparameter in self.hyperparameters
357
+ if not hyperparameter.fixed
358
+ ]
359
+ if len(bounds) > 0:
360
+ return np.log(np.vstack(bounds))
361
+ else:
362
+ return np.array([])
363
+
364
+ def __add__(self, b):
365
+ if not isinstance(b, Kernel):
366
+ return Sum(self, ConstantKernel(b))
367
+ return Sum(self, b)
368
+
369
+ def __radd__(self, b):
370
+ if not isinstance(b, Kernel):
371
+ return Sum(ConstantKernel(b), self)
372
+ return Sum(b, self)
373
+
374
+ def __mul__(self, b):
375
+ if not isinstance(b, Kernel):
376
+ return Product(self, ConstantKernel(b))
377
+ return Product(self, b)
378
+
379
+ def __rmul__(self, b):
380
+ if not isinstance(b, Kernel):
381
+ return Product(ConstantKernel(b), self)
382
+ return Product(b, self)
383
+
384
+ def __pow__(self, b):
385
+ return Exponentiation(self, b)
386
+
387
+ def __eq__(self, b):
388
+ if type(self) != type(b):
389
+ return False
390
+ params_a = self.get_params()
391
+ params_b = b.get_params()
392
+ for key in set(list(params_a.keys()) + list(params_b.keys())):
393
+ if np.any(params_a.get(key, None) != params_b.get(key, None)):
394
+ return False
395
+ return True
396
+
397
+ def __repr__(self):
398
+ return "{0}({1})".format(
399
+ self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta))
400
+ )
401
+
402
+ @abstractmethod
403
+ def __call__(self, X, Y=None, eval_gradient=False):
404
+ """Evaluate the kernel."""
405
+
406
+ @abstractmethod
407
+ def diag(self, X):
408
+ """Returns the diagonal of the kernel k(X, X).
409
+
410
+ The result of this method is identical to np.diag(self(X)); however,
411
+ it can be evaluated more efficiently since only the diagonal is
412
+ evaluated.
413
+
414
+ Parameters
415
+ ----------
416
+ X : array-like of shape (n_samples,)
417
+ Left argument of the returned kernel k(X, Y)
418
+
419
+ Returns
420
+ -------
421
+ K_diag : ndarray of shape (n_samples_X,)
422
+ Diagonal of kernel k(X, X)
423
+ """
424
+
425
+ @abstractmethod
426
+ def is_stationary(self):
427
+ """Returns whether the kernel is stationary."""
428
+
429
+ @property
430
+ def requires_vector_input(self):
431
+ """Returns whether the kernel is defined on fixed-length feature
432
+ vectors or generic objects. Defaults to True for backward
433
+ compatibility."""
434
+ return True
435
+
436
+ def _check_bounds_params(self):
437
+ """Called after fitting to warn if bounds may have been too tight."""
438
+ list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T)
439
+ idx = 0
440
+ for hyp in self.hyperparameters:
441
+ if hyp.fixed:
442
+ continue
443
+ for dim in range(hyp.n_elements):
444
+ if list_close[idx, 0]:
445
+ warnings.warn(
446
+ "The optimal value found for "
447
+ "dimension %s of parameter %s is "
448
+ "close to the specified lower "
449
+ "bound %s. Decreasing the bound and"
450
+ " calling fit again may find a "
451
+ "better value." % (dim, hyp.name, hyp.bounds[dim][0]),
452
+ ConvergenceWarning,
453
+ )
454
+ elif list_close[idx, 1]:
455
+ warnings.warn(
456
+ "The optimal value found for "
457
+ "dimension %s of parameter %s is "
458
+ "close to the specified upper "
459
+ "bound %s. Increasing the bound and"
460
+ " calling fit again may find a "
461
+ "better value." % (dim, hyp.name, hyp.bounds[dim][1]),
462
+ ConvergenceWarning,
463
+ )
464
+ idx += 1
465
+
466
+
467
+ class NormalizedKernelMixin:
468
+ """Mixin for kernels which are normalized: k(X, X)=1.
469
+
470
+ .. versionadded:: 0.18
471
+ """
472
+
473
+ def diag(self, X):
474
+ """Returns the diagonal of the kernel k(X, X).
475
+
476
+ The result of this method is identical to np.diag(self(X)); however,
477
+ it can be evaluated more efficiently since only the diagonal is
478
+ evaluated.
479
+
480
+ Parameters
481
+ ----------
482
+ X : ndarray of shape (n_samples_X, n_features)
483
+ Left argument of the returned kernel k(X, Y)
484
+
485
+ Returns
486
+ -------
487
+ K_diag : ndarray of shape (n_samples_X,)
488
+ Diagonal of kernel k(X, X)
489
+ """
490
+ return np.ones(X.shape[0])
491
+
492
+
493
+ class StationaryKernelMixin:
494
+ """Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
495
+
496
+ .. versionadded:: 0.18
497
+ """
498
+
499
+ def is_stationary(self):
500
+ """Returns whether the kernel is stationary."""
501
+ return True
502
+
503
+
504
+ class GenericKernelMixin:
505
+ """Mixin for kernels which operate on generic objects such as variable-
506
+ length sequences, trees, and graphs.
507
+
508
+ .. versionadded:: 0.22
509
+ """
510
+
511
+ @property
512
+ def requires_vector_input(self):
513
+ """Whether the kernel works only on fixed-length feature vectors."""
514
+ return False
515
+
516
+
517
+ class CompoundKernel(Kernel):
518
+ """Kernel which is composed of a set of other kernels.
519
+
520
+ .. versionadded:: 0.18
521
+
522
+ Parameters
523
+ ----------
524
+ kernels : list of Kernels
525
+ The other kernels
526
+
527
+ Examples
528
+ --------
529
+ >>> from sklearn.gaussian_process.kernels import WhiteKernel
530
+ >>> from sklearn.gaussian_process.kernels import RBF
531
+ >>> from sklearn.gaussian_process.kernels import CompoundKernel
532
+ >>> kernel = CompoundKernel(
533
+ ... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
534
+ >>> print(kernel.bounds)
535
+ [[-11.51292546 11.51292546]
536
+ [-11.51292546 11.51292546]]
537
+ >>> print(kernel.n_dims)
538
+ 2
539
+ >>> print(kernel.theta)
540
+ [1.09861229 0.69314718]
541
+ """
542
+
543
+ def __init__(self, kernels):
544
+ self.kernels = kernels
545
+
546
+ def get_params(self, deep=True):
547
+ """Get parameters of this kernel.
548
+
549
+ Parameters
550
+ ----------
551
+ deep : bool, default=True
552
+ If True, will return the parameters for this estimator and
553
+ contained subobjects that are estimators.
554
+
555
+ Returns
556
+ -------
557
+ params : dict
558
+ Parameter names mapped to their values.
559
+ """
560
+ return dict(kernels=self.kernels)
561
+
562
+ @property
563
+ def theta(self):
564
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
565
+
566
+ Note that theta are typically the log-transformed values of the
567
+ kernel's hyperparameters as this representation of the search space
568
+ is more amenable for hyperparameter search, as hyperparameters like
569
+ length-scales naturally live on a log-scale.
570
+
571
+ Returns
572
+ -------
573
+ theta : ndarray of shape (n_dims,)
574
+ The non-fixed, log-transformed hyperparameters of the kernel
575
+ """
576
+ return np.hstack([kernel.theta for kernel in self.kernels])
577
+
578
+ @theta.setter
579
+ def theta(self, theta):
580
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
581
+
582
+ Parameters
583
+ ----------
584
+ theta : array of shape (n_dims,)
585
+ The non-fixed, log-transformed hyperparameters of the kernel
586
+ """
587
+ k_dims = self.k1.n_dims
588
+ for i, kernel in enumerate(self.kernels):
589
+ kernel.theta = theta[i * k_dims : (i + 1) * k_dims]
590
+
591
+ @property
592
+ def bounds(self):
593
+ """Returns the log-transformed bounds on the theta.
594
+
595
+ Returns
596
+ -------
597
+ bounds : array of shape (n_dims, 2)
598
+ The log-transformed bounds on the kernel's hyperparameters theta
599
+ """
600
+ return np.vstack([kernel.bounds for kernel in self.kernels])
601
+
602
+ def __call__(self, X, Y=None, eval_gradient=False):
603
+ """Return the kernel k(X, Y) and optionally its gradient.
604
+
605
+ Note that this compound kernel returns the results of all simple kernel
606
+ stacked along an additional axis.
607
+
608
+ Parameters
609
+ ----------
610
+ X : array-like of shape (n_samples_X, n_features) or list of object, \
611
+ default=None
612
+ Left argument of the returned kernel k(X, Y)
613
+
614
+ Y : array-like of shape (n_samples_X, n_features) or list of object, \
615
+ default=None
616
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
617
+ is evaluated instead.
618
+
619
+ eval_gradient : bool, default=False
620
+ Determines whether the gradient with respect to the log of the
621
+ kernel hyperparameter is computed.
622
+
623
+ Returns
624
+ -------
625
+ K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
626
+ Kernel k(X, Y)
627
+
628
+ K_gradient : ndarray of shape \
629
+ (n_samples_X, n_samples_X, n_dims, n_kernels), optional
630
+ The gradient of the kernel k(X, X) with respect to the log of the
631
+ hyperparameter of the kernel. Only returned when `eval_gradient`
632
+ is True.
633
+ """
634
+ if eval_gradient:
635
+ K = []
636
+ K_grad = []
637
+ for kernel in self.kernels:
638
+ K_single, K_grad_single = kernel(X, Y, eval_gradient)
639
+ K.append(K_single)
640
+ K_grad.append(K_grad_single[..., np.newaxis])
641
+ return np.dstack(K), np.concatenate(K_grad, 3)
642
+ else:
643
+ return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels])
644
+
645
+ def __eq__(self, b):
646
+ if type(self) != type(b) or len(self.kernels) != len(b.kernels):
647
+ return False
648
+ return np.all(
649
+ [self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))]
650
+ )
651
+
652
+ def is_stationary(self):
653
+ """Returns whether the kernel is stationary."""
654
+ return np.all([kernel.is_stationary() for kernel in self.kernels])
655
+
656
+ @property
657
+ def requires_vector_input(self):
658
+ """Returns whether the kernel is defined on discrete structures."""
659
+ return np.any([kernel.requires_vector_input for kernel in self.kernels])
660
+
661
+ def diag(self, X):
662
+ """Returns the diagonal of the kernel k(X, X).
663
+
664
+ The result of this method is identical to `np.diag(self(X))`; however,
665
+ it can be evaluated more efficiently since only the diagonal is
666
+ evaluated.
667
+
668
+ Parameters
669
+ ----------
670
+ X : array-like of shape (n_samples_X, n_features) or list of object
671
+ Argument to the kernel.
672
+
673
+ Returns
674
+ -------
675
+ K_diag : ndarray of shape (n_samples_X, n_kernels)
676
+ Diagonal of kernel k(X, X)
677
+ """
678
+ return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
679
+
680
+
681
+ class KernelOperator(Kernel):
682
+ """Base class for all kernel operators.
683
+
684
+ .. versionadded:: 0.18
685
+ """
686
+
687
+ def __init__(self, k1, k2):
688
+ self.k1 = k1
689
+ self.k2 = k2
690
+
691
+ def get_params(self, deep=True):
692
+ """Get parameters of this kernel.
693
+
694
+ Parameters
695
+ ----------
696
+ deep : bool, default=True
697
+ If True, will return the parameters for this estimator and
698
+ contained subobjects that are estimators.
699
+
700
+ Returns
701
+ -------
702
+ params : dict
703
+ Parameter names mapped to their values.
704
+ """
705
+ params = dict(k1=self.k1, k2=self.k2)
706
+ if deep:
707
+ deep_items = self.k1.get_params().items()
708
+ params.update(("k1__" + k, val) for k, val in deep_items)
709
+ deep_items = self.k2.get_params().items()
710
+ params.update(("k2__" + k, val) for k, val in deep_items)
711
+
712
+ return params
713
+
714
+ @property
715
+ def hyperparameters(self):
716
+ """Returns a list of all hyperparameter."""
717
+ r = [
718
+ Hyperparameter(
719
+ "k1__" + hyperparameter.name,
720
+ hyperparameter.value_type,
721
+ hyperparameter.bounds,
722
+ hyperparameter.n_elements,
723
+ )
724
+ for hyperparameter in self.k1.hyperparameters
725
+ ]
726
+
727
+ for hyperparameter in self.k2.hyperparameters:
728
+ r.append(
729
+ Hyperparameter(
730
+ "k2__" + hyperparameter.name,
731
+ hyperparameter.value_type,
732
+ hyperparameter.bounds,
733
+ hyperparameter.n_elements,
734
+ )
735
+ )
736
+ return r
737
+
738
+ @property
739
+ def theta(self):
740
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
741
+
742
+ Note that theta are typically the log-transformed values of the
743
+ kernel's hyperparameters as this representation of the search space
744
+ is more amenable for hyperparameter search, as hyperparameters like
745
+ length-scales naturally live on a log-scale.
746
+
747
+ Returns
748
+ -------
749
+ theta : ndarray of shape (n_dims,)
750
+ The non-fixed, log-transformed hyperparameters of the kernel
751
+ """
752
+ return np.append(self.k1.theta, self.k2.theta)
753
+
754
+ @theta.setter
755
+ def theta(self, theta):
756
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
757
+
758
+ Parameters
759
+ ----------
760
+ theta : ndarray of shape (n_dims,)
761
+ The non-fixed, log-transformed hyperparameters of the kernel
762
+ """
763
+ k1_dims = self.k1.n_dims
764
+ self.k1.theta = theta[:k1_dims]
765
+ self.k2.theta = theta[k1_dims:]
766
+
767
+ @property
768
+ def bounds(self):
769
+ """Returns the log-transformed bounds on the theta.
770
+
771
+ Returns
772
+ -------
773
+ bounds : ndarray of shape (n_dims, 2)
774
+ The log-transformed bounds on the kernel's hyperparameters theta
775
+ """
776
+ if self.k1.bounds.size == 0:
777
+ return self.k2.bounds
778
+ if self.k2.bounds.size == 0:
779
+ return self.k1.bounds
780
+ return np.vstack((self.k1.bounds, self.k2.bounds))
781
+
782
+ def __eq__(self, b):
783
+ if type(self) != type(b):
784
+ return False
785
+ return (self.k1 == b.k1 and self.k2 == b.k2) or (
786
+ self.k1 == b.k2 and self.k2 == b.k1
787
+ )
788
+
789
+ def is_stationary(self):
790
+ """Returns whether the kernel is stationary."""
791
+ return self.k1.is_stationary() and self.k2.is_stationary()
792
+
793
+ @property
794
+ def requires_vector_input(self):
795
+ """Returns whether the kernel is stationary."""
796
+ return self.k1.requires_vector_input or self.k2.requires_vector_input
797
+
798
+
799
+ class Sum(KernelOperator):
800
+ """The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2`
801
+ and combines them via
802
+
803
+ .. math::
804
+ k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y)
805
+
806
+ Note that the `__add__` magic method is overridden, so
807
+ `Sum(RBF(), RBF())` is equivalent to using the + operator
808
+ with `RBF() + RBF()`.
809
+
810
+
811
+ Read more in the :ref:`User Guide <gp_kernels>`.
812
+
813
+ .. versionadded:: 0.18
814
+
815
+ Parameters
816
+ ----------
817
+ k1 : Kernel
818
+ The first base-kernel of the sum-kernel
819
+
820
+ k2 : Kernel
821
+ The second base-kernel of the sum-kernel
822
+
823
+ Examples
824
+ --------
825
+ >>> from sklearn.datasets import make_friedman2
826
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
827
+ >>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel
828
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
829
+ >>> kernel = Sum(ConstantKernel(2), RBF())
830
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
831
+ ... random_state=0).fit(X, y)
832
+ >>> gpr.score(X, y)
833
+ 1.0
834
+ >>> kernel
835
+ 1.41**2 + RBF(length_scale=1)
836
+ """
837
+
838
+ def __call__(self, X, Y=None, eval_gradient=False):
839
+ """Return the kernel k(X, Y) and optionally its gradient.
840
+
841
+ Parameters
842
+ ----------
843
+ X : array-like of shape (n_samples_X, n_features) or list of object
844
+ Left argument of the returned kernel k(X, Y)
845
+
846
+ Y : array-like of shape (n_samples_X, n_features) or list of object,\
847
+ default=None
848
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
849
+ is evaluated instead.
850
+
851
+ eval_gradient : bool, default=False
852
+ Determines whether the gradient with respect to the log of
853
+ the kernel hyperparameter is computed.
854
+
855
+ Returns
856
+ -------
857
+ K : ndarray of shape (n_samples_X, n_samples_Y)
858
+ Kernel k(X, Y)
859
+
860
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
861
+ optional
862
+ The gradient of the kernel k(X, X) with respect to the log of the
863
+ hyperparameter of the kernel. Only returned when `eval_gradient`
864
+ is True.
865
+ """
866
+ if eval_gradient:
867
+ K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
868
+ K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
869
+ return K1 + K2, np.dstack((K1_gradient, K2_gradient))
870
+ else:
871
+ return self.k1(X, Y) + self.k2(X, Y)
872
+
873
+ def diag(self, X):
874
+ """Returns the diagonal of the kernel k(X, X).
875
+
876
+ The result of this method is identical to `np.diag(self(X))`; however,
877
+ it can be evaluated more efficiently since only the diagonal is
878
+ evaluated.
879
+
880
+ Parameters
881
+ ----------
882
+ X : array-like of shape (n_samples_X, n_features) or list of object
883
+ Argument to the kernel.
884
+
885
+ Returns
886
+ -------
887
+ K_diag : ndarray of shape (n_samples_X,)
888
+ Diagonal of kernel k(X, X)
889
+ """
890
+ return self.k1.diag(X) + self.k2.diag(X)
891
+
892
+ def __repr__(self):
893
+ return "{0} + {1}".format(self.k1, self.k2)
894
+
895
+
896
+ class Product(KernelOperator):
897
+ """The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2`
898
+ and combines them via
899
+
900
+ .. math::
901
+ k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y)
902
+
903
+ Note that the `__mul__` magic method is overridden, so
904
+ `Product(RBF(), RBF())` is equivalent to using the * operator
905
+ with `RBF() * RBF()`.
906
+
907
+ Read more in the :ref:`User Guide <gp_kernels>`.
908
+
909
+ .. versionadded:: 0.18
910
+
911
+ Parameters
912
+ ----------
913
+ k1 : Kernel
914
+ The first base-kernel of the product-kernel
915
+
916
+ k2 : Kernel
917
+ The second base-kernel of the product-kernel
918
+
919
+
920
+ Examples
921
+ --------
922
+ >>> from sklearn.datasets import make_friedman2
923
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
924
+ >>> from sklearn.gaussian_process.kernels import (RBF, Product,
925
+ ... ConstantKernel)
926
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
927
+ >>> kernel = Product(ConstantKernel(2), RBF())
928
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
929
+ ... random_state=0).fit(X, y)
930
+ >>> gpr.score(X, y)
931
+ 1.0
932
+ >>> kernel
933
+ 1.41**2 * RBF(length_scale=1)
934
+ """
935
+
936
+ def __call__(self, X, Y=None, eval_gradient=False):
937
+ """Return the kernel k(X, Y) and optionally its gradient.
938
+
939
+ Parameters
940
+ ----------
941
+ X : array-like of shape (n_samples_X, n_features) or list of object
942
+ Left argument of the returned kernel k(X, Y)
943
+
944
+ Y : array-like of shape (n_samples_Y, n_features) or list of object,\
945
+ default=None
946
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
947
+ is evaluated instead.
948
+
949
+ eval_gradient : bool, default=False
950
+ Determines whether the gradient with respect to the log of
951
+ the kernel hyperparameter is computed.
952
+
953
+ Returns
954
+ -------
955
+ K : ndarray of shape (n_samples_X, n_samples_Y)
956
+ Kernel k(X, Y)
957
+
958
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
959
+ optional
960
+ The gradient of the kernel k(X, X) with respect to the log of the
961
+ hyperparameter of the kernel. Only returned when `eval_gradient`
962
+ is True.
963
+ """
964
+ if eval_gradient:
965
+ K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
966
+ K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
967
+ return K1 * K2, np.dstack(
968
+ (K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis])
969
+ )
970
+ else:
971
+ return self.k1(X, Y) * self.k2(X, Y)
972
+
973
+ def diag(self, X):
974
+ """Returns the diagonal of the kernel k(X, X).
975
+
976
+ The result of this method is identical to np.diag(self(X)); however,
977
+ it can be evaluated more efficiently since only the diagonal is
978
+ evaluated.
979
+
980
+ Parameters
981
+ ----------
982
+ X : array-like of shape (n_samples_X, n_features) or list of object
983
+ Argument to the kernel.
984
+
985
+ Returns
986
+ -------
987
+ K_diag : ndarray of shape (n_samples_X,)
988
+ Diagonal of kernel k(X, X)
989
+ """
990
+ return self.k1.diag(X) * self.k2.diag(X)
991
+
992
+ def __repr__(self):
993
+ return "{0} * {1}".format(self.k1, self.k2)
994
+
995
+
996
+ class Exponentiation(Kernel):
997
+ """The Exponentiation kernel takes one base kernel and a scalar parameter
998
+ :math:`p` and combines them via
999
+
1000
+ .. math::
1001
+ k_{exp}(X, Y) = k(X, Y) ^p
1002
+
1003
+ Note that the `__pow__` magic method is overridden, so
1004
+ `Exponentiation(RBF(), 2)` is equivalent to using the ** operator
1005
+ with `RBF() ** 2`.
1006
+
1007
+
1008
+ Read more in the :ref:`User Guide <gp_kernels>`.
1009
+
1010
+ .. versionadded:: 0.18
1011
+
1012
+ Parameters
1013
+ ----------
1014
+ kernel : Kernel
1015
+ The base kernel
1016
+
1017
+ exponent : float
1018
+ The exponent for the base kernel
1019
+
1020
+
1021
+ Examples
1022
+ --------
1023
+ >>> from sklearn.datasets import make_friedman2
1024
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1025
+ >>> from sklearn.gaussian_process.kernels import (RationalQuadratic,
1026
+ ... Exponentiation)
1027
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
1028
+ >>> kernel = Exponentiation(RationalQuadratic(), exponent=2)
1029
+ >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
1030
+ ... random_state=0).fit(X, y)
1031
+ >>> gpr.score(X, y)
1032
+ 0.419...
1033
+ >>> gpr.predict(X[:1,:], return_std=True)
1034
+ (array([635.5...]), array([0.559...]))
1035
+ """
1036
+
1037
+ def __init__(self, kernel, exponent):
1038
+ self.kernel = kernel
1039
+ self.exponent = exponent
1040
+
1041
+ def get_params(self, deep=True):
1042
+ """Get parameters of this kernel.
1043
+
1044
+ Parameters
1045
+ ----------
1046
+ deep : bool, default=True
1047
+ If True, will return the parameters for this estimator and
1048
+ contained subobjects that are estimators.
1049
+
1050
+ Returns
1051
+ -------
1052
+ params : dict
1053
+ Parameter names mapped to their values.
1054
+ """
1055
+ params = dict(kernel=self.kernel, exponent=self.exponent)
1056
+ if deep:
1057
+ deep_items = self.kernel.get_params().items()
1058
+ params.update(("kernel__" + k, val) for k, val in deep_items)
1059
+ return params
1060
+
1061
+ @property
1062
+ def hyperparameters(self):
1063
+ """Returns a list of all hyperparameter."""
1064
+ r = []
1065
+ for hyperparameter in self.kernel.hyperparameters:
1066
+ r.append(
1067
+ Hyperparameter(
1068
+ "kernel__" + hyperparameter.name,
1069
+ hyperparameter.value_type,
1070
+ hyperparameter.bounds,
1071
+ hyperparameter.n_elements,
1072
+ )
1073
+ )
1074
+ return r
1075
+
1076
+ @property
1077
+ def theta(self):
1078
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
1079
+
1080
+ Note that theta are typically the log-transformed values of the
1081
+ kernel's hyperparameters as this representation of the search space
1082
+ is more amenable for hyperparameter search, as hyperparameters like
1083
+ length-scales naturally live on a log-scale.
1084
+
1085
+ Returns
1086
+ -------
1087
+ theta : ndarray of shape (n_dims,)
1088
+ The non-fixed, log-transformed hyperparameters of the kernel
1089
+ """
1090
+ return self.kernel.theta
1091
+
1092
+ @theta.setter
1093
+ def theta(self, theta):
1094
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
1095
+
1096
+ Parameters
1097
+ ----------
1098
+ theta : ndarray of shape (n_dims,)
1099
+ The non-fixed, log-transformed hyperparameters of the kernel
1100
+ """
1101
+ self.kernel.theta = theta
1102
+
1103
+ @property
1104
+ def bounds(self):
1105
+ """Returns the log-transformed bounds on the theta.
1106
+
1107
+ Returns
1108
+ -------
1109
+ bounds : ndarray of shape (n_dims, 2)
1110
+ The log-transformed bounds on the kernel's hyperparameters theta
1111
+ """
1112
+ return self.kernel.bounds
1113
+
1114
+ def __eq__(self, b):
1115
+ if type(self) != type(b):
1116
+ return False
1117
+ return self.kernel == b.kernel and self.exponent == b.exponent
1118
+
1119
+ def __call__(self, X, Y=None, eval_gradient=False):
1120
+ """Return the kernel k(X, Y) and optionally its gradient.
1121
+
1122
+ Parameters
1123
+ ----------
1124
+ X : array-like of shape (n_samples_X, n_features) or list of object
1125
+ Left argument of the returned kernel k(X, Y)
1126
+
1127
+ Y : array-like of shape (n_samples_Y, n_features) or list of object,\
1128
+ default=None
1129
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1130
+ is evaluated instead.
1131
+
1132
+ eval_gradient : bool, default=False
1133
+ Determines whether the gradient with respect to the log of
1134
+ the kernel hyperparameter is computed.
1135
+
1136
+ Returns
1137
+ -------
1138
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1139
+ Kernel k(X, Y)
1140
+
1141
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
1142
+ optional
1143
+ The gradient of the kernel k(X, X) with respect to the log of the
1144
+ hyperparameter of the kernel. Only returned when `eval_gradient`
1145
+ is True.
1146
+ """
1147
+ if eval_gradient:
1148
+ K, K_gradient = self.kernel(X, Y, eval_gradient=True)
1149
+ K_gradient *= self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
1150
+ return K**self.exponent, K_gradient
1151
+ else:
1152
+ K = self.kernel(X, Y, eval_gradient=False)
1153
+ return K**self.exponent
1154
+
1155
+ def diag(self, X):
1156
+ """Returns the diagonal of the kernel k(X, X).
1157
+
1158
+ The result of this method is identical to np.diag(self(X)); however,
1159
+ it can be evaluated more efficiently since only the diagonal is
1160
+ evaluated.
1161
+
1162
+ Parameters
1163
+ ----------
1164
+ X : array-like of shape (n_samples_X, n_features) or list of object
1165
+ Argument to the kernel.
1166
+
1167
+ Returns
1168
+ -------
1169
+ K_diag : ndarray of shape (n_samples_X,)
1170
+ Diagonal of kernel k(X, X)
1171
+ """
1172
+ return self.kernel.diag(X) ** self.exponent
1173
+
1174
+ def __repr__(self):
1175
+ return "{0} ** {1}".format(self.kernel, self.exponent)
1176
+
1177
+ def is_stationary(self):
1178
+ """Returns whether the kernel is stationary."""
1179
+ return self.kernel.is_stationary()
1180
+
1181
+ @property
1182
+ def requires_vector_input(self):
1183
+ """Returns whether the kernel is defined on discrete structures."""
1184
+ return self.kernel.requires_vector_input
1185
+
1186
+
1187
+ class ConstantKernel(StationaryKernelMixin, GenericKernelMixin, Kernel):
1188
+ """Constant kernel.
1189
+
1190
+ Can be used as part of a product-kernel where it scales the magnitude of
1191
+ the other factor (kernel) or as part of a sum-kernel, where it modifies
1192
+ the mean of the Gaussian process.
1193
+
1194
+ .. math::
1195
+ k(x_1, x_2) = constant\\_value \\;\\forall\\; x_1, x_2
1196
+
1197
+ Adding a constant kernel is equivalent to adding a constant::
1198
+
1199
+ kernel = RBF() + ConstantKernel(constant_value=2)
1200
+
1201
+ is the same as::
1202
+
1203
+ kernel = RBF() + 2
1204
+
1205
+
1206
+ Read more in the :ref:`User Guide <gp_kernels>`.
1207
+
1208
+ .. versionadded:: 0.18
1209
+
1210
+ Parameters
1211
+ ----------
1212
+ constant_value : float, default=1.0
1213
+ The constant value which defines the covariance:
1214
+ k(x_1, x_2) = constant_value
1215
+
1216
+ constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1217
+ The lower and upper bound on `constant_value`.
1218
+ If set to "fixed", `constant_value` cannot be changed during
1219
+ hyperparameter tuning.
1220
+
1221
+ Examples
1222
+ --------
1223
+ >>> from sklearn.datasets import make_friedman2
1224
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1225
+ >>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel
1226
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
1227
+ >>> kernel = RBF() + ConstantKernel(constant_value=2)
1228
+ >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
1229
+ ... random_state=0).fit(X, y)
1230
+ >>> gpr.score(X, y)
1231
+ 0.3696...
1232
+ >>> gpr.predict(X[:1,:], return_std=True)
1233
+ (array([606.1...]), array([0.24...]))
1234
+ """
1235
+
1236
+ def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
1237
+ self.constant_value = constant_value
1238
+ self.constant_value_bounds = constant_value_bounds
1239
+
1240
+ @property
1241
+ def hyperparameter_constant_value(self):
1242
+ return Hyperparameter("constant_value", "numeric", self.constant_value_bounds)
1243
+
1244
+ def __call__(self, X, Y=None, eval_gradient=False):
1245
+ """Return the kernel k(X, Y) and optionally its gradient.
1246
+
1247
+ Parameters
1248
+ ----------
1249
+ X : array-like of shape (n_samples_X, n_features) or list of object
1250
+ Left argument of the returned kernel k(X, Y)
1251
+
1252
+ Y : array-like of shape (n_samples_X, n_features) or list of object, \
1253
+ default=None
1254
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1255
+ is evaluated instead.
1256
+
1257
+ eval_gradient : bool, default=False
1258
+ Determines whether the gradient with respect to the log of
1259
+ the kernel hyperparameter is computed.
1260
+ Only supported when Y is None.
1261
+
1262
+ Returns
1263
+ -------
1264
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1265
+ Kernel k(X, Y)
1266
+
1267
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
1268
+ optional
1269
+ The gradient of the kernel k(X, X) with respect to the log of the
1270
+ hyperparameter of the kernel. Only returned when eval_gradient
1271
+ is True.
1272
+ """
1273
+ if Y is None:
1274
+ Y = X
1275
+ elif eval_gradient:
1276
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1277
+
1278
+ K = np.full(
1279
+ (_num_samples(X), _num_samples(Y)),
1280
+ self.constant_value,
1281
+ dtype=np.array(self.constant_value).dtype,
1282
+ )
1283
+ if eval_gradient:
1284
+ if not self.hyperparameter_constant_value.fixed:
1285
+ return (
1286
+ K,
1287
+ np.full(
1288
+ (_num_samples(X), _num_samples(X), 1),
1289
+ self.constant_value,
1290
+ dtype=np.array(self.constant_value).dtype,
1291
+ ),
1292
+ )
1293
+ else:
1294
+ return K, np.empty((_num_samples(X), _num_samples(X), 0))
1295
+ else:
1296
+ return K
1297
+
1298
+ def diag(self, X):
1299
+ """Returns the diagonal of the kernel k(X, X).
1300
+
1301
+ The result of this method is identical to np.diag(self(X)); however,
1302
+ it can be evaluated more efficiently since only the diagonal is
1303
+ evaluated.
1304
+
1305
+ Parameters
1306
+ ----------
1307
+ X : array-like of shape (n_samples_X, n_features) or list of object
1308
+ Argument to the kernel.
1309
+
1310
+ Returns
1311
+ -------
1312
+ K_diag : ndarray of shape (n_samples_X,)
1313
+ Diagonal of kernel k(X, X)
1314
+ """
1315
+ return np.full(
1316
+ _num_samples(X),
1317
+ self.constant_value,
1318
+ dtype=np.array(self.constant_value).dtype,
1319
+ )
1320
+
1321
+ def __repr__(self):
1322
+ return "{0:.3g}**2".format(np.sqrt(self.constant_value))
1323
+
1324
+
1325
+ class WhiteKernel(StationaryKernelMixin, GenericKernelMixin, Kernel):
1326
+ """White kernel.
1327
+
1328
+ The main use-case of this kernel is as part of a sum-kernel where it
1329
+ explains the noise of the signal as independently and identically
1330
+ normally-distributed. The parameter noise_level equals the variance of this
1331
+ noise.
1332
+
1333
+ .. math::
1334
+ k(x_1, x_2) = noise\\_level \\text{ if } x_i == x_j \\text{ else } 0
1335
+
1336
+
1337
+ Read more in the :ref:`User Guide <gp_kernels>`.
1338
+
1339
+ .. versionadded:: 0.18
1340
+
1341
+ Parameters
1342
+ ----------
1343
+ noise_level : float, default=1.0
1344
+ Parameter controlling the noise level (variance)
1345
+
1346
+ noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1347
+ The lower and upper bound on 'noise_level'.
1348
+ If set to "fixed", 'noise_level' cannot be changed during
1349
+ hyperparameter tuning.
1350
+
1351
+ Examples
1352
+ --------
1353
+ >>> from sklearn.datasets import make_friedman2
1354
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1355
+ >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
1356
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
1357
+ >>> kernel = DotProduct() + WhiteKernel(noise_level=0.5)
1358
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
1359
+ ... random_state=0).fit(X, y)
1360
+ >>> gpr.score(X, y)
1361
+ 0.3680...
1362
+ >>> gpr.predict(X[:2,:], return_std=True)
1363
+ (array([653.0..., 592.1... ]), array([316.6..., 316.6...]))
1364
+ """
1365
+
1366
+ def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
1367
+ self.noise_level = noise_level
1368
+ self.noise_level_bounds = noise_level_bounds
1369
+
1370
+ @property
1371
+ def hyperparameter_noise_level(self):
1372
+ return Hyperparameter("noise_level", "numeric", self.noise_level_bounds)
1373
+
1374
+ def __call__(self, X, Y=None, eval_gradient=False):
1375
+ """Return the kernel k(X, Y) and optionally its gradient.
1376
+
1377
+ Parameters
1378
+ ----------
1379
+ X : array-like of shape (n_samples_X, n_features) or list of object
1380
+ Left argument of the returned kernel k(X, Y)
1381
+
1382
+ Y : array-like of shape (n_samples_X, n_features) or list of object,\
1383
+ default=None
1384
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1385
+ is evaluated instead.
1386
+
1387
+ eval_gradient : bool, default=False
1388
+ Determines whether the gradient with respect to the log of
1389
+ the kernel hyperparameter is computed.
1390
+ Only supported when Y is None.
1391
+
1392
+ Returns
1393
+ -------
1394
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1395
+ Kernel k(X, Y)
1396
+
1397
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
1398
+ optional
1399
+ The gradient of the kernel k(X, X) with respect to the log of the
1400
+ hyperparameter of the kernel. Only returned when eval_gradient
1401
+ is True.
1402
+ """
1403
+ if Y is not None and eval_gradient:
1404
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1405
+
1406
+ if Y is None:
1407
+ K = self.noise_level * np.eye(_num_samples(X))
1408
+ if eval_gradient:
1409
+ if not self.hyperparameter_noise_level.fixed:
1410
+ return (
1411
+ K,
1412
+ self.noise_level * np.eye(_num_samples(X))[:, :, np.newaxis],
1413
+ )
1414
+ else:
1415
+ return K, np.empty((_num_samples(X), _num_samples(X), 0))
1416
+ else:
1417
+ return K
1418
+ else:
1419
+ return np.zeros((_num_samples(X), _num_samples(Y)))
1420
+
1421
+ def diag(self, X):
1422
+ """Returns the diagonal of the kernel k(X, X).
1423
+
1424
+ The result of this method is identical to np.diag(self(X)); however,
1425
+ it can be evaluated more efficiently since only the diagonal is
1426
+ evaluated.
1427
+
1428
+ Parameters
1429
+ ----------
1430
+ X : array-like of shape (n_samples_X, n_features) or list of object
1431
+ Argument to the kernel.
1432
+
1433
+ Returns
1434
+ -------
1435
+ K_diag : ndarray of shape (n_samples_X,)
1436
+ Diagonal of kernel k(X, X)
1437
+ """
1438
+ return np.full(
1439
+ _num_samples(X), self.noise_level, dtype=np.array(self.noise_level).dtype
1440
+ )
1441
+
1442
+ def __repr__(self):
1443
+ return "{0}(noise_level={1:.3g})".format(
1444
+ self.__class__.__name__, self.noise_level
1445
+ )
1446
+
1447
+
1448
+ class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
1449
+ """Radial basis function kernel (aka squared-exponential kernel).
1450
+
1451
+ The RBF kernel is a stationary kernel. It is also known as the
1452
+ "squared exponential" kernel. It is parameterized by a length scale
1453
+ parameter :math:`l>0`, which can either be a scalar (isotropic variant
1454
+ of the kernel) or a vector with the same number of dimensions as the inputs
1455
+ X (anisotropic variant of the kernel). The kernel is given by:
1456
+
1457
+ .. math::
1458
+ k(x_i, x_j) = \\exp\\left(- \\frac{d(x_i, x_j)^2}{2l^2} \\right)
1459
+
1460
+ where :math:`l` is the length scale of the kernel and
1461
+ :math:`d(\\cdot,\\cdot)` is the Euclidean distance.
1462
+ For advice on how to set the length scale parameter, see e.g. [1]_.
1463
+
1464
+ This kernel is infinitely differentiable, which implies that GPs with this
1465
+ kernel as covariance function have mean square derivatives of all orders,
1466
+ and are thus very smooth.
1467
+ See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel.
1468
+
1469
+ Read more in the :ref:`User Guide <gp_kernels>`.
1470
+
1471
+ .. versionadded:: 0.18
1472
+
1473
+ Parameters
1474
+ ----------
1475
+ length_scale : float or ndarray of shape (n_features,), default=1.0
1476
+ The length scale of the kernel. If a float, an isotropic kernel is
1477
+ used. If an array, an anisotropic kernel is used where each dimension
1478
+ of l defines the length-scale of the respective feature dimension.
1479
+
1480
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1481
+ The lower and upper bound on 'length_scale'.
1482
+ If set to "fixed", 'length_scale' cannot be changed during
1483
+ hyperparameter tuning.
1484
+
1485
+ References
1486
+ ----------
1487
+ .. [1] `David Duvenaud (2014). "The Kernel Cookbook:
1488
+ Advice on Covariance functions".
1489
+ <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
1490
+
1491
+ .. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
1492
+ "Gaussian Processes for Machine Learning". The MIT Press.
1493
+ <http://www.gaussianprocess.org/gpml/>`_
1494
+
1495
+ Examples
1496
+ --------
1497
+ >>> from sklearn.datasets import load_iris
1498
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
1499
+ >>> from sklearn.gaussian_process.kernels import RBF
1500
+ >>> X, y = load_iris(return_X_y=True)
1501
+ >>> kernel = 1.0 * RBF(1.0)
1502
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
1503
+ ... random_state=0).fit(X, y)
1504
+ >>> gpc.score(X, y)
1505
+ 0.9866...
1506
+ >>> gpc.predict_proba(X[:2,:])
1507
+ array([[0.8354..., 0.03228..., 0.1322...],
1508
+ [0.7906..., 0.0652..., 0.1441...]])
1509
+ """
1510
+
1511
+ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
1512
+ self.length_scale = length_scale
1513
+ self.length_scale_bounds = length_scale_bounds
1514
+
1515
+ @property
1516
+ def anisotropic(self):
1517
+ return np.iterable(self.length_scale) and len(self.length_scale) > 1
1518
+
1519
+ @property
1520
+ def hyperparameter_length_scale(self):
1521
+ if self.anisotropic:
1522
+ return Hyperparameter(
1523
+ "length_scale",
1524
+ "numeric",
1525
+ self.length_scale_bounds,
1526
+ len(self.length_scale),
1527
+ )
1528
+ return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
1529
+
1530
+ def __call__(self, X, Y=None, eval_gradient=False):
1531
+ """Return the kernel k(X, Y) and optionally its gradient.
1532
+
1533
+ Parameters
1534
+ ----------
1535
+ X : ndarray of shape (n_samples_X, n_features)
1536
+ Left argument of the returned kernel k(X, Y)
1537
+
1538
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
1539
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1540
+ if evaluated instead.
1541
+
1542
+ eval_gradient : bool, default=False
1543
+ Determines whether the gradient with respect to the log of
1544
+ the kernel hyperparameter is computed.
1545
+ Only supported when Y is None.
1546
+
1547
+ Returns
1548
+ -------
1549
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1550
+ Kernel k(X, Y)
1551
+
1552
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
1553
+ optional
1554
+ The gradient of the kernel k(X, X) with respect to the log of the
1555
+ hyperparameter of the kernel. Only returned when `eval_gradient`
1556
+ is True.
1557
+ """
1558
+ X = np.atleast_2d(X)
1559
+ length_scale = _check_length_scale(X, self.length_scale)
1560
+ if Y is None:
1561
+ dists = pdist(X / length_scale, metric="sqeuclidean")
1562
+ K = np.exp(-0.5 * dists)
1563
+ # convert from upper-triangular matrix to square matrix
1564
+ K = squareform(K)
1565
+ np.fill_diagonal(K, 1)
1566
+ else:
1567
+ if eval_gradient:
1568
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1569
+ dists = cdist(X / length_scale, Y / length_scale, metric="sqeuclidean")
1570
+ K = np.exp(-0.5 * dists)
1571
+
1572
+ if eval_gradient:
1573
+ if self.hyperparameter_length_scale.fixed:
1574
+ # Hyperparameter l kept fixed
1575
+ return K, np.empty((X.shape[0], X.shape[0], 0))
1576
+ elif not self.anisotropic or length_scale.shape[0] == 1:
1577
+ K_gradient = (K * squareform(dists))[:, :, np.newaxis]
1578
+ return K, K_gradient
1579
+ elif self.anisotropic:
1580
+ # We need to recompute the pairwise dimension-wise distances
1581
+ K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
1582
+ length_scale**2
1583
+ )
1584
+ K_gradient *= K[..., np.newaxis]
1585
+ return K, K_gradient
1586
+ else:
1587
+ return K
1588
+
1589
+ def __repr__(self):
1590
+ if self.anisotropic:
1591
+ return "{0}(length_scale=[{1}])".format(
1592
+ self.__class__.__name__,
1593
+ ", ".join(map("{0:.3g}".format, self.length_scale)),
1594
+ )
1595
+ else: # isotropic
1596
+ return "{0}(length_scale={1:.3g})".format(
1597
+ self.__class__.__name__, np.ravel(self.length_scale)[0]
1598
+ )
1599
+
1600
+
1601
+ class Matern(RBF):
1602
+ """Matern kernel.
1603
+
1604
+ The class of Matern kernels is a generalization of the :class:`RBF`.
1605
+ It has an additional parameter :math:`\\nu` which controls the
1606
+ smoothness of the resulting function. The smaller :math:`\\nu`,
1607
+ the less smooth the approximated function is.
1608
+ As :math:`\\nu\\rightarrow\\infty`, the kernel becomes equivalent to
1609
+ the :class:`RBF` kernel. When :math:`\\nu = 1/2`, the Matérn kernel
1610
+ becomes identical to the absolute exponential kernel.
1611
+ Important intermediate values are
1612
+ :math:`\\nu=1.5` (once differentiable functions)
1613
+ and :math:`\\nu=2.5` (twice differentiable functions).
1614
+
1615
+ The kernel is given by:
1616
+
1617
+ .. math::
1618
+ k(x_i, x_j) = \\frac{1}{\\Gamma(\\nu)2^{\\nu-1}}\\Bigg(
1619
+ \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )
1620
+ \\Bigg)^\\nu K_\\nu\\Bigg(
1621
+ \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )\\Bigg)
1622
+
1623
+
1624
+
1625
+ where :math:`d(\\cdot,\\cdot)` is the Euclidean distance,
1626
+ :math:`K_{\\nu}(\\cdot)` is a modified Bessel function and
1627
+ :math:`\\Gamma(\\cdot)` is the gamma function.
1628
+ See [1]_, Chapter 4, Section 4.2, for details regarding the different
1629
+ variants of the Matern kernel.
1630
+
1631
+ Read more in the :ref:`User Guide <gp_kernels>`.
1632
+
1633
+ .. versionadded:: 0.18
1634
+
1635
+ Parameters
1636
+ ----------
1637
+ length_scale : float or ndarray of shape (n_features,), default=1.0
1638
+ The length scale of the kernel. If a float, an isotropic kernel is
1639
+ used. If an array, an anisotropic kernel is used where each dimension
1640
+ of l defines the length-scale of the respective feature dimension.
1641
+
1642
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1643
+ The lower and upper bound on 'length_scale'.
1644
+ If set to "fixed", 'length_scale' cannot be changed during
1645
+ hyperparameter tuning.
1646
+
1647
+ nu : float, default=1.5
1648
+ The parameter nu controlling the smoothness of the learned function.
1649
+ The smaller nu, the less smooth the approximated function is.
1650
+ For nu=inf, the kernel becomes equivalent to the RBF kernel and for
1651
+ nu=0.5 to the absolute exponential kernel. Important intermediate
1652
+ values are nu=1.5 (once differentiable functions) and nu=2.5
1653
+ (twice differentiable functions). Note that values of nu not in
1654
+ [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
1655
+ (appr. 10 times higher) since they require to evaluate the modified
1656
+ Bessel function. Furthermore, in contrast to l, nu is kept fixed to
1657
+ its initial value and not optimized.
1658
+
1659
+ References
1660
+ ----------
1661
+ .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
1662
+ "Gaussian Processes for Machine Learning". The MIT Press.
1663
+ <http://www.gaussianprocess.org/gpml/>`_
1664
+
1665
+ Examples
1666
+ --------
1667
+ >>> from sklearn.datasets import load_iris
1668
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
1669
+ >>> from sklearn.gaussian_process.kernels import Matern
1670
+ >>> X, y = load_iris(return_X_y=True)
1671
+ >>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5)
1672
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
1673
+ ... random_state=0).fit(X, y)
1674
+ >>> gpc.score(X, y)
1675
+ 0.9866...
1676
+ >>> gpc.predict_proba(X[:2,:])
1677
+ array([[0.8513..., 0.0368..., 0.1117...],
1678
+ [0.8086..., 0.0693..., 0.1220...]])
1679
+ """
1680
+
1681
+ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5):
1682
+ super().__init__(length_scale, length_scale_bounds)
1683
+ self.nu = nu
1684
+
1685
+ def __call__(self, X, Y=None, eval_gradient=False):
1686
+ """Return the kernel k(X, Y) and optionally its gradient.
1687
+
1688
+ Parameters
1689
+ ----------
1690
+ X : ndarray of shape (n_samples_X, n_features)
1691
+ Left argument of the returned kernel k(X, Y)
1692
+
1693
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
1694
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1695
+ if evaluated instead.
1696
+
1697
+ eval_gradient : bool, default=False
1698
+ Determines whether the gradient with respect to the log of
1699
+ the kernel hyperparameter is computed.
1700
+ Only supported when Y is None.
1701
+
1702
+ Returns
1703
+ -------
1704
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1705
+ Kernel k(X, Y)
1706
+
1707
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
1708
+ optional
1709
+ The gradient of the kernel k(X, X) with respect to the log of the
1710
+ hyperparameter of the kernel. Only returned when `eval_gradient`
1711
+ is True.
1712
+ """
1713
+ X = np.atleast_2d(X)
1714
+ length_scale = _check_length_scale(X, self.length_scale)
1715
+ if Y is None:
1716
+ dists = pdist(X / length_scale, metric="euclidean")
1717
+ else:
1718
+ if eval_gradient:
1719
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1720
+ dists = cdist(X / length_scale, Y / length_scale, metric="euclidean")
1721
+
1722
+ if self.nu == 0.5:
1723
+ K = np.exp(-dists)
1724
+ elif self.nu == 1.5:
1725
+ K = dists * math.sqrt(3)
1726
+ K = (1.0 + K) * np.exp(-K)
1727
+ elif self.nu == 2.5:
1728
+ K = dists * math.sqrt(5)
1729
+ K = (1.0 + K + K**2 / 3.0) * np.exp(-K)
1730
+ elif self.nu == np.inf:
1731
+ K = np.exp(-(dists**2) / 2.0)
1732
+ else: # general case; expensive to evaluate
1733
+ K = dists
1734
+ K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
1735
+ tmp = math.sqrt(2 * self.nu) * K
1736
+ K.fill((2 ** (1.0 - self.nu)) / gamma(self.nu))
1737
+ K *= tmp**self.nu
1738
+ K *= kv(self.nu, tmp)
1739
+
1740
+ if Y is None:
1741
+ # convert from upper-triangular matrix to square matrix
1742
+ K = squareform(K)
1743
+ np.fill_diagonal(K, 1)
1744
+
1745
+ if eval_gradient:
1746
+ if self.hyperparameter_length_scale.fixed:
1747
+ # Hyperparameter l kept fixed
1748
+ K_gradient = np.empty((X.shape[0], X.shape[0], 0))
1749
+ return K, K_gradient
1750
+
1751
+ # We need to recompute the pairwise dimension-wise distances
1752
+ if self.anisotropic:
1753
+ D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
1754
+ length_scale**2
1755
+ )
1756
+ else:
1757
+ D = squareform(dists**2)[:, :, np.newaxis]
1758
+
1759
+ if self.nu == 0.5:
1760
+ denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis]
1761
+ divide_result = np.zeros_like(D)
1762
+ np.divide(
1763
+ D,
1764
+ denominator,
1765
+ out=divide_result,
1766
+ where=denominator != 0,
1767
+ )
1768
+ K_gradient = K[..., np.newaxis] * divide_result
1769
+ elif self.nu == 1.5:
1770
+ K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
1771
+ elif self.nu == 2.5:
1772
+ tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
1773
+ K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
1774
+ elif self.nu == np.inf:
1775
+ K_gradient = D * K[..., np.newaxis]
1776
+ else:
1777
+ # approximate gradient numerically
1778
+ def f(theta): # helper function
1779
+ return self.clone_with_theta(theta)(X, Y)
1780
+
1781
+ return K, _approx_fprime(self.theta, f, 1e-10)
1782
+
1783
+ if not self.anisotropic:
1784
+ return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
1785
+ else:
1786
+ return K, K_gradient
1787
+ else:
1788
+ return K
1789
+
1790
+ def __repr__(self):
1791
+ if self.anisotropic:
1792
+ return "{0}(length_scale=[{1}], nu={2:.3g})".format(
1793
+ self.__class__.__name__,
1794
+ ", ".join(map("{0:.3g}".format, self.length_scale)),
1795
+ self.nu,
1796
+ )
1797
+ else:
1798
+ return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
1799
+ self.__class__.__name__, np.ravel(self.length_scale)[0], self.nu
1800
+ )
1801
+
1802
+
1803
+ class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
1804
+ """Rational Quadratic kernel.
1805
+
1806
+ The RationalQuadratic kernel can be seen as a scale mixture (an infinite
1807
+ sum) of RBF kernels with different characteristic length scales. It is
1808
+ parameterized by a length scale parameter :math:`l>0` and a scale
1809
+ mixture parameter :math:`\\alpha>0`. Only the isotropic variant
1810
+ where length_scale :math:`l` is a scalar is supported at the moment.
1811
+ The kernel is given by:
1812
+
1813
+ .. math::
1814
+ k(x_i, x_j) = \\left(
1815
+ 1 + \\frac{d(x_i, x_j)^2 }{ 2\\alpha l^2}\\right)^{-\\alpha}
1816
+
1817
+ where :math:`\\alpha` is the scale mixture parameter, :math:`l` is
1818
+ the length scale of the kernel and :math:`d(\\cdot,\\cdot)` is the
1819
+ Euclidean distance.
1820
+ For advice on how to set the parameters, see e.g. [1]_.
1821
+
1822
+ Read more in the :ref:`User Guide <gp_kernels>`.
1823
+
1824
+ .. versionadded:: 0.18
1825
+
1826
+ Parameters
1827
+ ----------
1828
+ length_scale : float > 0, default=1.0
1829
+ The length scale of the kernel.
1830
+
1831
+ alpha : float > 0, default=1.0
1832
+ Scale mixture parameter
1833
+
1834
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1835
+ The lower and upper bound on 'length_scale'.
1836
+ If set to "fixed", 'length_scale' cannot be changed during
1837
+ hyperparameter tuning.
1838
+
1839
+ alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1840
+ The lower and upper bound on 'alpha'.
1841
+ If set to "fixed", 'alpha' cannot be changed during
1842
+ hyperparameter tuning.
1843
+
1844
+ References
1845
+ ----------
1846
+ .. [1] `David Duvenaud (2014). "The Kernel Cookbook:
1847
+ Advice on Covariance functions".
1848
+ <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
1849
+
1850
+ Examples
1851
+ --------
1852
+ >>> from sklearn.datasets import load_iris
1853
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
1854
+ >>> from sklearn.gaussian_process.kernels import RationalQuadratic
1855
+ >>> X, y = load_iris(return_X_y=True)
1856
+ >>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5)
1857
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
1858
+ ... random_state=0).fit(X, y)
1859
+ >>> gpc.score(X, y)
1860
+ 0.9733...
1861
+ >>> gpc.predict_proba(X[:2,:])
1862
+ array([[0.8881..., 0.0566..., 0.05518...],
1863
+ [0.8678..., 0.0707... , 0.0614...]])
1864
+ """
1865
+
1866
+ def __init__(
1867
+ self,
1868
+ length_scale=1.0,
1869
+ alpha=1.0,
1870
+ length_scale_bounds=(1e-5, 1e5),
1871
+ alpha_bounds=(1e-5, 1e5),
1872
+ ):
1873
+ self.length_scale = length_scale
1874
+ self.alpha = alpha
1875
+ self.length_scale_bounds = length_scale_bounds
1876
+ self.alpha_bounds = alpha_bounds
1877
+
1878
+ @property
1879
+ def hyperparameter_length_scale(self):
1880
+ return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
1881
+
1882
+ @property
1883
+ def hyperparameter_alpha(self):
1884
+ return Hyperparameter("alpha", "numeric", self.alpha_bounds)
1885
+
1886
+ def __call__(self, X, Y=None, eval_gradient=False):
1887
+ """Return the kernel k(X, Y) and optionally its gradient.
1888
+
1889
+ Parameters
1890
+ ----------
1891
+ X : ndarray of shape (n_samples_X, n_features)
1892
+ Left argument of the returned kernel k(X, Y)
1893
+
1894
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
1895
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1896
+ if evaluated instead.
1897
+
1898
+ eval_gradient : bool, default=False
1899
+ Determines whether the gradient with respect to the log of
1900
+ the kernel hyperparameter is computed.
1901
+ Only supported when Y is None.
1902
+
1903
+ Returns
1904
+ -------
1905
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1906
+ Kernel k(X, Y)
1907
+
1908
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
1909
+ The gradient of the kernel k(X, X) with respect to the log of the
1910
+ hyperparameter of the kernel. Only returned when eval_gradient
1911
+ is True.
1912
+ """
1913
+ if len(np.atleast_1d(self.length_scale)) > 1:
1914
+ raise AttributeError(
1915
+ "RationalQuadratic kernel only supports isotropic version, "
1916
+ "please use a single scalar for length_scale"
1917
+ )
1918
+ X = np.atleast_2d(X)
1919
+ if Y is None:
1920
+ dists = squareform(pdist(X, metric="sqeuclidean"))
1921
+ tmp = dists / (2 * self.alpha * self.length_scale**2)
1922
+ base = 1 + tmp
1923
+ K = base**-self.alpha
1924
+ np.fill_diagonal(K, 1)
1925
+ else:
1926
+ if eval_gradient:
1927
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1928
+ dists = cdist(X, Y, metric="sqeuclidean")
1929
+ K = (1 + dists / (2 * self.alpha * self.length_scale**2)) ** -self.alpha
1930
+
1931
+ if eval_gradient:
1932
+ # gradient with respect to length_scale
1933
+ if not self.hyperparameter_length_scale.fixed:
1934
+ length_scale_gradient = dists * K / (self.length_scale**2 * base)
1935
+ length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
1936
+ else: # l is kept fixed
1937
+ length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
1938
+
1939
+ # gradient with respect to alpha
1940
+ if not self.hyperparameter_alpha.fixed:
1941
+ alpha_gradient = K * (
1942
+ -self.alpha * np.log(base)
1943
+ + dists / (2 * self.length_scale**2 * base)
1944
+ )
1945
+ alpha_gradient = alpha_gradient[:, :, np.newaxis]
1946
+ else: # alpha is kept fixed
1947
+ alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
1948
+
1949
+ return K, np.dstack((alpha_gradient, length_scale_gradient))
1950
+ else:
1951
+ return K
1952
+
1953
+ def __repr__(self):
1954
+ return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
1955
+ self.__class__.__name__, self.alpha, self.length_scale
1956
+ )
1957
+
1958
+
1959
+ class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
1960
+ r"""Exp-Sine-Squared kernel (aka periodic kernel).
1961
+
1962
+ The ExpSineSquared kernel allows one to model functions which repeat
1963
+ themselves exactly. It is parameterized by a length scale
1964
+ parameter :math:`l>0` and a periodicity parameter :math:`p>0`.
1965
+ Only the isotropic variant where :math:`l` is a scalar is
1966
+ supported at the moment. The kernel is given by:
1967
+
1968
+ .. math::
1969
+ k(x_i, x_j) = \text{exp}\left(-
1970
+ \frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right)
1971
+
1972
+ where :math:`l` is the length scale of the kernel, :math:`p` the
1973
+ periodicity of the kernel and :math:`d(\cdot,\cdot)` is the
1974
+ Euclidean distance.
1975
+
1976
+ Read more in the :ref:`User Guide <gp_kernels>`.
1977
+
1978
+ .. versionadded:: 0.18
1979
+
1980
+ Parameters
1981
+ ----------
1982
+
1983
+ length_scale : float > 0, default=1.0
1984
+ The length scale of the kernel.
1985
+
1986
+ periodicity : float > 0, default=1.0
1987
+ The periodicity of the kernel.
1988
+
1989
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1990
+ The lower and upper bound on 'length_scale'.
1991
+ If set to "fixed", 'length_scale' cannot be changed during
1992
+ hyperparameter tuning.
1993
+
1994
+ periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1995
+ The lower and upper bound on 'periodicity'.
1996
+ If set to "fixed", 'periodicity' cannot be changed during
1997
+ hyperparameter tuning.
1998
+
1999
+ Examples
2000
+ --------
2001
+ >>> from sklearn.datasets import make_friedman2
2002
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
2003
+ >>> from sklearn.gaussian_process.kernels import ExpSineSquared
2004
+ >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
2005
+ >>> kernel = ExpSineSquared(length_scale=1, periodicity=1)
2006
+ >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
2007
+ ... random_state=0).fit(X, y)
2008
+ >>> gpr.score(X, y)
2009
+ 0.0144...
2010
+ >>> gpr.predict(X[:2,:], return_std=True)
2011
+ (array([425.6..., 457.5...]), array([0.3894..., 0.3467...]))
2012
+ """
2013
+
2014
+ def __init__(
2015
+ self,
2016
+ length_scale=1.0,
2017
+ periodicity=1.0,
2018
+ length_scale_bounds=(1e-5, 1e5),
2019
+ periodicity_bounds=(1e-5, 1e5),
2020
+ ):
2021
+ self.length_scale = length_scale
2022
+ self.periodicity = periodicity
2023
+ self.length_scale_bounds = length_scale_bounds
2024
+ self.periodicity_bounds = periodicity_bounds
2025
+
2026
+ @property
2027
+ def hyperparameter_length_scale(self):
2028
+ """Returns the length scale"""
2029
+ return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
2030
+
2031
+ @property
2032
+ def hyperparameter_periodicity(self):
2033
+ return Hyperparameter("periodicity", "numeric", self.periodicity_bounds)
2034
+
2035
+ def __call__(self, X, Y=None, eval_gradient=False):
2036
+ """Return the kernel k(X, Y) and optionally its gradient.
2037
+
2038
+ Parameters
2039
+ ----------
2040
+ X : ndarray of shape (n_samples_X, n_features)
2041
+ Left argument of the returned kernel k(X, Y)
2042
+
2043
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
2044
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
2045
+ if evaluated instead.
2046
+
2047
+ eval_gradient : bool, default=False
2048
+ Determines whether the gradient with respect to the log of
2049
+ the kernel hyperparameter is computed.
2050
+ Only supported when Y is None.
2051
+
2052
+ Returns
2053
+ -------
2054
+ K : ndarray of shape (n_samples_X, n_samples_Y)
2055
+ Kernel k(X, Y)
2056
+
2057
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
2058
+ optional
2059
+ The gradient of the kernel k(X, X) with respect to the log of the
2060
+ hyperparameter of the kernel. Only returned when `eval_gradient`
2061
+ is True.
2062
+ """
2063
+ X = np.atleast_2d(X)
2064
+ if Y is None:
2065
+ dists = squareform(pdist(X, metric="euclidean"))
2066
+ arg = np.pi * dists / self.periodicity
2067
+ sin_of_arg = np.sin(arg)
2068
+ K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2)
2069
+ else:
2070
+ if eval_gradient:
2071
+ raise ValueError("Gradient can only be evaluated when Y is None.")
2072
+ dists = cdist(X, Y, metric="euclidean")
2073
+ K = np.exp(
2074
+ -2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2
2075
+ )
2076
+
2077
+ if eval_gradient:
2078
+ cos_of_arg = np.cos(arg)
2079
+ # gradient with respect to length_scale
2080
+ if not self.hyperparameter_length_scale.fixed:
2081
+ length_scale_gradient = 4 / self.length_scale**2 * sin_of_arg**2 * K
2082
+ length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
2083
+ else: # length_scale is kept fixed
2084
+ length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
2085
+ # gradient with respect to p
2086
+ if not self.hyperparameter_periodicity.fixed:
2087
+ periodicity_gradient = (
2088
+ 4 * arg / self.length_scale**2 * cos_of_arg * sin_of_arg * K
2089
+ )
2090
+ periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
2091
+ else: # p is kept fixed
2092
+ periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
2093
+
2094
+ return K, np.dstack((length_scale_gradient, periodicity_gradient))
2095
+ else:
2096
+ return K
2097
+
2098
+ def __repr__(self):
2099
+ return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
2100
+ self.__class__.__name__, self.length_scale, self.periodicity
2101
+ )
2102
+
2103
+
2104
+ class DotProduct(Kernel):
2105
+ r"""Dot-Product kernel.
2106
+
2107
+ The DotProduct kernel is non-stationary and can be obtained from linear
2108
+ regression by putting :math:`N(0, 1)` priors on the coefficients
2109
+ of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)`
2110
+ on the bias. The DotProduct kernel is invariant to a rotation of
2111
+ the coordinates about the origin, but not translations.
2112
+ It is parameterized by a parameter sigma_0 :math:`\sigma`
2113
+ which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`,
2114
+ the kernel is called the homogeneous linear kernel, otherwise
2115
+ it is inhomogeneous. The kernel is given by
2116
+
2117
+ .. math::
2118
+ k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j
2119
+
2120
+ The DotProduct kernel is commonly combined with exponentiation.
2121
+
2122
+ See [1]_, Chapter 4, Section 4.2, for further details regarding the
2123
+ DotProduct kernel.
2124
+
2125
+ Read more in the :ref:`User Guide <gp_kernels>`.
2126
+
2127
+ .. versionadded:: 0.18
2128
+
2129
+ Parameters
2130
+ ----------
2131
+ sigma_0 : float >= 0, default=1.0
2132
+ Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
2133
+ the kernel is homogeneous.
2134
+
2135
+ sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
2136
+ The lower and upper bound on 'sigma_0'.
2137
+ If set to "fixed", 'sigma_0' cannot be changed during
2138
+ hyperparameter tuning.
2139
+
2140
+ References
2141
+ ----------
2142
+ .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
2143
+ "Gaussian Processes for Machine Learning". The MIT Press.
2144
+ <http://www.gaussianprocess.org/gpml/>`_
2145
+
2146
+ Examples
2147
+ --------
2148
+ >>> from sklearn.datasets import make_friedman2
2149
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
2150
+ >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
2151
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
2152
+ >>> kernel = DotProduct() + WhiteKernel()
2153
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
2154
+ ... random_state=0).fit(X, y)
2155
+ >>> gpr.score(X, y)
2156
+ 0.3680...
2157
+ >>> gpr.predict(X[:2,:], return_std=True)
2158
+ (array([653.0..., 592.1...]), array([316.6..., 316.6...]))
2159
+ """
2160
+
2161
+ def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
2162
+ self.sigma_0 = sigma_0
2163
+ self.sigma_0_bounds = sigma_0_bounds
2164
+
2165
+ @property
2166
+ def hyperparameter_sigma_0(self):
2167
+ return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
2168
+
2169
+ def __call__(self, X, Y=None, eval_gradient=False):
2170
+ """Return the kernel k(X, Y) and optionally its gradient.
2171
+
2172
+ Parameters
2173
+ ----------
2174
+ X : ndarray of shape (n_samples_X, n_features)
2175
+ Left argument of the returned kernel k(X, Y)
2176
+
2177
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
2178
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
2179
+ if evaluated instead.
2180
+
2181
+ eval_gradient : bool, default=False
2182
+ Determines whether the gradient with respect to the log of
2183
+ the kernel hyperparameter is computed.
2184
+ Only supported when Y is None.
2185
+
2186
+ Returns
2187
+ -------
2188
+ K : ndarray of shape (n_samples_X, n_samples_Y)
2189
+ Kernel k(X, Y)
2190
+
2191
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
2192
+ optional
2193
+ The gradient of the kernel k(X, X) with respect to the log of the
2194
+ hyperparameter of the kernel. Only returned when `eval_gradient`
2195
+ is True.
2196
+ """
2197
+ X = np.atleast_2d(X)
2198
+ if Y is None:
2199
+ K = np.inner(X, X) + self.sigma_0**2
2200
+ else:
2201
+ if eval_gradient:
2202
+ raise ValueError("Gradient can only be evaluated when Y is None.")
2203
+ K = np.inner(X, Y) + self.sigma_0**2
2204
+
2205
+ if eval_gradient:
2206
+ if not self.hyperparameter_sigma_0.fixed:
2207
+ K_gradient = np.empty((K.shape[0], K.shape[1], 1))
2208
+ K_gradient[..., 0] = 2 * self.sigma_0**2
2209
+ return K, K_gradient
2210
+ else:
2211
+ return K, np.empty((X.shape[0], X.shape[0], 0))
2212
+ else:
2213
+ return K
2214
+
2215
+ def diag(self, X):
2216
+ """Returns the diagonal of the kernel k(X, X).
2217
+
2218
+ The result of this method is identical to np.diag(self(X)); however,
2219
+ it can be evaluated more efficiently since only the diagonal is
2220
+ evaluated.
2221
+
2222
+ Parameters
2223
+ ----------
2224
+ X : ndarray of shape (n_samples_X, n_features)
2225
+ Left argument of the returned kernel k(X, Y).
2226
+
2227
+ Returns
2228
+ -------
2229
+ K_diag : ndarray of shape (n_samples_X,)
2230
+ Diagonal of kernel k(X, X).
2231
+ """
2232
+ return np.einsum("ij,ij->i", X, X) + self.sigma_0**2
2233
+
2234
+ def is_stationary(self):
2235
+ """Returns whether the kernel is stationary."""
2236
+ return False
2237
+
2238
+ def __repr__(self):
2239
+ return "{0}(sigma_0={1:.3g})".format(self.__class__.__name__, self.sigma_0)
2240
+
2241
+
2242
+ # adapted from scipy/optimize/optimize.py for functions with 2d output
2243
+ def _approx_fprime(xk, f, epsilon, args=()):
2244
+ f0 = f(*((xk,) + args))
2245
+ grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
2246
+ ei = np.zeros((len(xk),), float)
2247
+ for k in range(len(xk)):
2248
+ ei[k] = 1.0
2249
+ d = epsilon * ei
2250
+ grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
2251
+ ei[k] = 0.0
2252
+ return grad
2253
+
2254
+
2255
+ class PairwiseKernel(Kernel):
2256
+ """Wrapper for kernels in sklearn.metrics.pairwise.
2257
+
2258
+ A thin wrapper around the functionality of the kernels in
2259
+ sklearn.metrics.pairwise.
2260
+
2261
+ Note: Evaluation of eval_gradient is not analytic but numeric and all
2262
+ kernels support only isotropic distances. The parameter gamma is
2263
+ considered to be a hyperparameter and may be optimized. The other
2264
+ kernel parameters are set directly at initialization and are kept
2265
+ fixed.
2266
+
2267
+ .. versionadded:: 0.18
2268
+
2269
+ Parameters
2270
+ ----------
2271
+ gamma : float, default=1.0
2272
+ Parameter gamma of the pairwise kernel specified by metric. It should
2273
+ be positive.
2274
+
2275
+ gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
2276
+ The lower and upper bound on 'gamma'.
2277
+ If set to "fixed", 'gamma' cannot be changed during
2278
+ hyperparameter tuning.
2279
+
2280
+ metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial", \
2281
+ "rbf", "laplacian", "sigmoid", "cosine"} or callable, \
2282
+ default="linear"
2283
+ The metric to use when calculating kernel between instances in a
2284
+ feature array. If metric is a string, it must be one of the metrics
2285
+ in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
2286
+ If metric is "precomputed", X is assumed to be a kernel matrix.
2287
+ Alternatively, if metric is a callable function, it is called on each
2288
+ pair of instances (rows) and the resulting value recorded. The callable
2289
+ should take two arrays from X as input and return a value indicating
2290
+ the distance between them.
2291
+
2292
+ pairwise_kernels_kwargs : dict, default=None
2293
+ All entries of this dict (if any) are passed as keyword arguments to
2294
+ the pairwise kernel function.
2295
+
2296
+ Examples
2297
+ --------
2298
+ >>> from sklearn.datasets import load_iris
2299
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
2300
+ >>> from sklearn.gaussian_process.kernels import PairwiseKernel
2301
+ >>> X, y = load_iris(return_X_y=True)
2302
+ >>> kernel = PairwiseKernel(metric='rbf')
2303
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
2304
+ ... random_state=0).fit(X, y)
2305
+ >>> gpc.score(X, y)
2306
+ 0.9733...
2307
+ >>> gpc.predict_proba(X[:2,:])
2308
+ array([[0.8880..., 0.05663..., 0.05532...],
2309
+ [0.8676..., 0.07073..., 0.06165...]])
2310
+ """
2311
+
2312
+ def __init__(
2313
+ self,
2314
+ gamma=1.0,
2315
+ gamma_bounds=(1e-5, 1e5),
2316
+ metric="linear",
2317
+ pairwise_kernels_kwargs=None,
2318
+ ):
2319
+ self.gamma = gamma
2320
+ self.gamma_bounds = gamma_bounds
2321
+ self.metric = metric
2322
+ self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
2323
+
2324
+ @property
2325
+ def hyperparameter_gamma(self):
2326
+ return Hyperparameter("gamma", "numeric", self.gamma_bounds)
2327
+
2328
+ def __call__(self, X, Y=None, eval_gradient=False):
2329
+ """Return the kernel k(X, Y) and optionally its gradient.
2330
+
2331
+ Parameters
2332
+ ----------
2333
+ X : ndarray of shape (n_samples_X, n_features)
2334
+ Left argument of the returned kernel k(X, Y)
2335
+
2336
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
2337
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
2338
+ if evaluated instead.
2339
+
2340
+ eval_gradient : bool, default=False
2341
+ Determines whether the gradient with respect to the log of
2342
+ the kernel hyperparameter is computed.
2343
+ Only supported when Y is None.
2344
+
2345
+ Returns
2346
+ -------
2347
+ K : ndarray of shape (n_samples_X, n_samples_Y)
2348
+ Kernel k(X, Y)
2349
+
2350
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
2351
+ optional
2352
+ The gradient of the kernel k(X, X) with respect to the log of the
2353
+ hyperparameter of the kernel. Only returned when `eval_gradient`
2354
+ is True.
2355
+ """
2356
+ pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
2357
+ if self.pairwise_kernels_kwargs is None:
2358
+ pairwise_kernels_kwargs = {}
2359
+
2360
+ X = np.atleast_2d(X)
2361
+ K = pairwise_kernels(
2362
+ X,
2363
+ Y,
2364
+ metric=self.metric,
2365
+ gamma=self.gamma,
2366
+ filter_params=True,
2367
+ **pairwise_kernels_kwargs,
2368
+ )
2369
+ if eval_gradient:
2370
+ if self.hyperparameter_gamma.fixed:
2371
+ return K, np.empty((X.shape[0], X.shape[0], 0))
2372
+ else:
2373
+ # approximate gradient numerically
2374
+ def f(gamma): # helper function
2375
+ return pairwise_kernels(
2376
+ X,
2377
+ Y,
2378
+ metric=self.metric,
2379
+ gamma=np.exp(gamma),
2380
+ filter_params=True,
2381
+ **pairwise_kernels_kwargs,
2382
+ )
2383
+
2384
+ return K, _approx_fprime(self.theta, f, 1e-10)
2385
+ else:
2386
+ return K
2387
+
2388
+ def diag(self, X):
2389
+ """Returns the diagonal of the kernel k(X, X).
2390
+
2391
+ The result of this method is identical to np.diag(self(X)); however,
2392
+ it can be evaluated more efficiently since only the diagonal is
2393
+ evaluated.
2394
+
2395
+ Parameters
2396
+ ----------
2397
+ X : ndarray of shape (n_samples_X, n_features)
2398
+ Left argument of the returned kernel k(X, Y)
2399
+
2400
+ Returns
2401
+ -------
2402
+ K_diag : ndarray of shape (n_samples_X,)
2403
+ Diagonal of kernel k(X, X)
2404
+ """
2405
+ # We have to fall back to slow way of computing diagonal
2406
+ return np.apply_along_axis(self, 1, X).ravel()
2407
+
2408
+ def is_stationary(self):
2409
+ """Returns whether the kernel is stationary."""
2410
+ return self.metric in ["rbf"]
2411
+
2412
+ def __repr__(self):
2413
+ return "{0}(gamma={1}, metric={2})".format(
2414
+ self.__class__.__name__, self.gamma, self.metric
2415
+ )
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/_mini_sequence_kernel.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc ADDED
Binary file (8.28 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_kernels.cpython-310.pyc ADDED
Binary file (9.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/_mini_sequence_kernel.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from sklearn.base import clone
4
+ from sklearn.gaussian_process.kernels import (
5
+ GenericKernelMixin,
6
+ Hyperparameter,
7
+ Kernel,
8
+ StationaryKernelMixin,
9
+ )
10
+
11
+
12
+ class MiniSeqKernel(GenericKernelMixin, StationaryKernelMixin, Kernel):
13
+ """
14
+ A minimal (but valid) convolutional kernel for sequences of variable
15
+ length.
16
+ """
17
+
18
+ def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)):
19
+ self.baseline_similarity = baseline_similarity
20
+ self.baseline_similarity_bounds = baseline_similarity_bounds
21
+
22
+ @property
23
+ def hyperparameter_baseline_similarity(self):
24
+ return Hyperparameter(
25
+ "baseline_similarity", "numeric", self.baseline_similarity_bounds
26
+ )
27
+
28
+ def _f(self, s1, s2):
29
+ return sum(
30
+ [1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2]
31
+ )
32
+
33
+ def _g(self, s1, s2):
34
+ return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2])
35
+
36
+ def __call__(self, X, Y=None, eval_gradient=False):
37
+ if Y is None:
38
+ Y = X
39
+
40
+ if eval_gradient:
41
+ return (
42
+ np.array([[self._f(x, y) for y in Y] for x in X]),
43
+ np.array([[[self._g(x, y)] for y in Y] for x in X]),
44
+ )
45
+ else:
46
+ return np.array([[self._f(x, y) for y in Y] for x in X])
47
+
48
+ def diag(self, X):
49
+ return np.array([self._f(x, x) for x in X])
50
+
51
+ def clone_with_theta(self, theta):
52
+ cloned = clone(self)
53
+ cloned.theta = theta
54
+ return cloned
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpc.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing for Gaussian process classification """
2
+
3
+ # Author: Jan Hendrik Metzen <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import warnings
7
+
8
+ import numpy as np
9
+ import pytest
10
+ from scipy.optimize import approx_fprime
11
+
12
+ from sklearn.exceptions import ConvergenceWarning
13
+ from sklearn.gaussian_process import GaussianProcessClassifier
14
+ from sklearn.gaussian_process.kernels import (
15
+ RBF,
16
+ CompoundKernel,
17
+ WhiteKernel,
18
+ )
19
+ from sklearn.gaussian_process.kernels import (
20
+ ConstantKernel as C,
21
+ )
22
+ from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
23
+ from sklearn.utils._testing import assert_almost_equal, assert_array_equal
24
+
25
+
26
+ def f(x):
27
+ return np.sin(x)
28
+
29
+
30
+ X = np.atleast_2d(np.linspace(0, 10, 30)).T
31
+ X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T
32
+ y = np.array(f(X).ravel() > 0, dtype=int)
33
+ fX = f(X).ravel()
34
+ y_mc = np.empty(y.shape, dtype=int) # multi-class
35
+ y_mc[fX < -0.35] = 0
36
+ y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
37
+ y_mc[fX > 0.35] = 2
38
+
39
+
40
+ fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
41
+ kernels = [
42
+ RBF(length_scale=0.1),
43
+ fixed_kernel,
44
+ RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
45
+ C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
46
+ ]
47
+ non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel]
48
+
49
+
50
+ @pytest.mark.parametrize("kernel", kernels)
51
+ def test_predict_consistent(kernel):
52
+ # Check binary predict decision has also predicted probability above 0.5.
53
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
54
+ assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5)
55
+
56
+
57
+ def test_predict_consistent_structured():
58
+ # Check binary predict decision has also predicted probability above 0.5.
59
+ X = ["A", "AB", "B"]
60
+ y = np.array([True, False, True])
61
+ kernel = MiniSeqKernel(baseline_similarity_bounds="fixed")
62
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
63
+ assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5)
64
+
65
+
66
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
67
+ def test_lml_improving(kernel):
68
+ # Test that hyperparameter-tuning improves log-marginal likelihood.
69
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
70
+ assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood(
71
+ kernel.theta
72
+ )
73
+
74
+
75
+ @pytest.mark.parametrize("kernel", kernels)
76
+ def test_lml_precomputed(kernel):
77
+ # Test that lml of optimized kernel is stored correctly.
78
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
79
+ assert_almost_equal(
80
+ gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(), 7
81
+ )
82
+
83
+
84
+ @pytest.mark.parametrize("kernel", kernels)
85
+ def test_lml_without_cloning_kernel(kernel):
86
+ # Test that clone_kernel=False has side-effects of kernel.theta.
87
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
88
+ input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64)
89
+
90
+ gpc.log_marginal_likelihood(input_theta, clone_kernel=False)
91
+ assert_almost_equal(gpc.kernel_.theta, input_theta, 7)
92
+
93
+
94
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
95
+ def test_converged_to_local_maximum(kernel):
96
+ # Test that we are in local maximum after hyperparameter-optimization.
97
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
98
+
99
+ lml, lml_gradient = gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
100
+
101
+ assert np.all(
102
+ (np.abs(lml_gradient) < 1e-4)
103
+ | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0])
104
+ | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])
105
+ )
106
+
107
+
108
+ @pytest.mark.parametrize("kernel", kernels)
109
+ def test_lml_gradient(kernel):
110
+ # Compare analytic and numeric gradient of log marginal likelihood.
111
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
112
+
113
+ lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
114
+ lml_gradient_approx = approx_fprime(
115
+ kernel.theta, lambda theta: gpc.log_marginal_likelihood(theta, False), 1e-10
116
+ )
117
+
118
+ assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
119
+
120
+
121
+ def test_random_starts(global_random_seed):
122
+ # Test that an increasing number of random-starts of GP fitting only
123
+ # increases the log marginal likelihood of the chosen theta.
124
+ n_samples, n_features = 25, 2
125
+ rng = np.random.RandomState(global_random_seed)
126
+ X = rng.randn(n_samples, n_features) * 2 - 1
127
+ y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
128
+
129
+ kernel = C(1.0, (1e-2, 1e2)) * RBF(
130
+ length_scale=[1e-3] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features
131
+ )
132
+ last_lml = -np.inf
133
+ for n_restarts_optimizer in range(5):
134
+ gp = GaussianProcessClassifier(
135
+ kernel=kernel,
136
+ n_restarts_optimizer=n_restarts_optimizer,
137
+ random_state=global_random_seed,
138
+ ).fit(X, y)
139
+ lml = gp.log_marginal_likelihood(gp.kernel_.theta)
140
+ assert lml > last_lml - np.finfo(np.float32).eps
141
+ last_lml = lml
142
+
143
+
144
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
145
+ def test_custom_optimizer(kernel, global_random_seed):
146
+ # Test that GPC can use externally defined optimizers.
147
+ # Define a dummy optimizer that simply tests 10 random hyperparameters
148
+ def optimizer(obj_func, initial_theta, bounds):
149
+ rng = np.random.RandomState(global_random_seed)
150
+ theta_opt, func_min = initial_theta, obj_func(
151
+ initial_theta, eval_gradient=False
152
+ )
153
+ for _ in range(10):
154
+ theta = np.atleast_1d(
155
+ rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))
156
+ )
157
+ f = obj_func(theta, eval_gradient=False)
158
+ if f < func_min:
159
+ theta_opt, func_min = theta, f
160
+ return theta_opt, func_min
161
+
162
+ gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
163
+ gpc.fit(X, y_mc)
164
+ # Checks that optimizer improved marginal likelihood
165
+ assert gpc.log_marginal_likelihood(
166
+ gpc.kernel_.theta
167
+ ) >= gpc.log_marginal_likelihood(kernel.theta)
168
+
169
+
170
+ @pytest.mark.parametrize("kernel", kernels)
171
+ def test_multi_class(kernel):
172
+ # Test GPC for multi-class classification problems.
173
+ gpc = GaussianProcessClassifier(kernel=kernel)
174
+ gpc.fit(X, y_mc)
175
+
176
+ y_prob = gpc.predict_proba(X2)
177
+ assert_almost_equal(y_prob.sum(1), 1)
178
+
179
+ y_pred = gpc.predict(X2)
180
+ assert_array_equal(np.argmax(y_prob, 1), y_pred)
181
+
182
+
183
+ @pytest.mark.parametrize("kernel", kernels)
184
+ def test_multi_class_n_jobs(kernel):
185
+ # Test that multi-class GPC produces identical results with n_jobs>1.
186
+ gpc = GaussianProcessClassifier(kernel=kernel)
187
+ gpc.fit(X, y_mc)
188
+
189
+ gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
190
+ gpc_2.fit(X, y_mc)
191
+
192
+ y_prob = gpc.predict_proba(X2)
193
+ y_prob_2 = gpc_2.predict_proba(X2)
194
+ assert_almost_equal(y_prob, y_prob_2)
195
+
196
+
197
+ def test_warning_bounds():
198
+ kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
199
+ gpc = GaussianProcessClassifier(kernel=kernel)
200
+ warning_message = (
201
+ "The optimal value found for dimension 0 of parameter "
202
+ "length_scale is close to the specified upper bound "
203
+ "0.001. Increasing the bound and calling fit again may "
204
+ "find a better value."
205
+ )
206
+ with pytest.warns(ConvergenceWarning, match=warning_message):
207
+ gpc.fit(X, y)
208
+
209
+ kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF(
210
+ length_scale_bounds=[1e3, 1e5]
211
+ )
212
+ gpc_sum = GaussianProcessClassifier(kernel=kernel_sum)
213
+ with warnings.catch_warnings(record=True) as record:
214
+ warnings.simplefilter("always")
215
+ gpc_sum.fit(X, y)
216
+
217
+ assert len(record) == 2
218
+
219
+ assert issubclass(record[0].category, ConvergenceWarning)
220
+ assert (
221
+ record[0].message.args[0]
222
+ == "The optimal value found for "
223
+ "dimension 0 of parameter "
224
+ "k1__noise_level is close to the "
225
+ "specified upper bound 0.001. "
226
+ "Increasing the bound and calling "
227
+ "fit again may find a better value."
228
+ )
229
+
230
+ assert issubclass(record[1].category, ConvergenceWarning)
231
+ assert (
232
+ record[1].message.args[0]
233
+ == "The optimal value found for "
234
+ "dimension 0 of parameter "
235
+ "k2__length_scale is close to the "
236
+ "specified lower bound 1000.0. "
237
+ "Decreasing the bound and calling "
238
+ "fit again may find a better value."
239
+ )
240
+
241
+ X_tile = np.tile(X, 2)
242
+ kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2])
243
+ gpc_dims = GaussianProcessClassifier(kernel=kernel_dims)
244
+
245
+ with warnings.catch_warnings(record=True) as record:
246
+ warnings.simplefilter("always")
247
+ gpc_dims.fit(X_tile, y)
248
+
249
+ assert len(record) == 2
250
+
251
+ assert issubclass(record[0].category, ConvergenceWarning)
252
+ assert (
253
+ record[0].message.args[0]
254
+ == "The optimal value found for "
255
+ "dimension 0 of parameter "
256
+ "length_scale is close to the "
257
+ "specified upper bound 100.0. "
258
+ "Increasing the bound and calling "
259
+ "fit again may find a better value."
260
+ )
261
+
262
+ assert issubclass(record[1].category, ConvergenceWarning)
263
+ assert (
264
+ record[1].message.args[0]
265
+ == "The optimal value found for "
266
+ "dimension 1 of parameter "
267
+ "length_scale is close to the "
268
+ "specified upper bound 100.0. "
269
+ "Increasing the bound and calling "
270
+ "fit again may find a better value."
271
+ )
272
+
273
+
274
+ @pytest.mark.parametrize(
275
+ "params, error_type, err_msg",
276
+ [
277
+ (
278
+ {"kernel": CompoundKernel(0)},
279
+ ValueError,
280
+ "kernel cannot be a CompoundKernel",
281
+ )
282
+ ],
283
+ )
284
+ def test_gpc_fit_error(params, error_type, err_msg):
285
+ """Check that expected error are raised during fit."""
286
+ gpc = GaussianProcessClassifier(**params)
287
+ with pytest.raises(error_type, match=err_msg):
288
+ gpc.fit(X, y)
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py ADDED
@@ -0,0 +1,853 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing for Gaussian process regression """
2
+
3
+ # Author: Jan Hendrik Metzen <[email protected]>
4
+ # Modified by: Pete Green <[email protected]>
5
+ # License: BSD 3 clause
6
+
7
+ import re
8
+ import sys
9
+ import warnings
10
+
11
+ import numpy as np
12
+ import pytest
13
+ from scipy.optimize import approx_fprime
14
+
15
+ from sklearn.exceptions import ConvergenceWarning
16
+ from sklearn.gaussian_process import GaussianProcessRegressor
17
+ from sklearn.gaussian_process.kernels import (
18
+ RBF,
19
+ DotProduct,
20
+ ExpSineSquared,
21
+ WhiteKernel,
22
+ )
23
+ from sklearn.gaussian_process.kernels import (
24
+ ConstantKernel as C,
25
+ )
26
+ from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
27
+ from sklearn.utils._testing import (
28
+ assert_allclose,
29
+ assert_almost_equal,
30
+ assert_array_almost_equal,
31
+ assert_array_less,
32
+ )
33
+
34
+
35
+ def f(x):
36
+ return x * np.sin(x)
37
+
38
+
39
+ X = np.atleast_2d([1.0, 3.0, 5.0, 6.0, 7.0, 8.0]).T
40
+ X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T
41
+ y = f(X).ravel()
42
+
43
+ fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
44
+ kernels = [
45
+ RBF(length_scale=1.0),
46
+ fixed_kernel,
47
+ RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
48
+ C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
49
+ C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
50
+ + C(1e-5, (1e-5, 1e2)),
51
+ C(0.1, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
52
+ + C(1e-5, (1e-5, 1e2)),
53
+ ]
54
+ non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel]
55
+
56
+
57
+ @pytest.mark.parametrize("kernel", kernels)
58
+ def test_gpr_interpolation(kernel):
59
+ if sys.maxsize <= 2**32:
60
+ pytest.xfail("This test may fail on 32 bit Python")
61
+
62
+ # Test the interpolating property for different kernels.
63
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
64
+ y_pred, y_cov = gpr.predict(X, return_cov=True)
65
+
66
+ assert_almost_equal(y_pred, y)
67
+ assert_almost_equal(np.diag(y_cov), 0.0)
68
+
69
+
70
+ def test_gpr_interpolation_structured():
71
+ # Test the interpolating property for different kernels.
72
+ kernel = MiniSeqKernel(baseline_similarity_bounds="fixed")
73
+ X = ["A", "B", "C"]
74
+ y = np.array([1, 2, 3])
75
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
76
+ y_pred, y_cov = gpr.predict(X, return_cov=True)
77
+
78
+ assert_almost_equal(
79
+ kernel(X, eval_gradient=True)[1].ravel(), (1 - np.eye(len(X))).ravel()
80
+ )
81
+ assert_almost_equal(y_pred, y)
82
+ assert_almost_equal(np.diag(y_cov), 0.0)
83
+
84
+
85
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
86
+ def test_lml_improving(kernel):
87
+ if sys.maxsize <= 2**32:
88
+ pytest.xfail("This test may fail on 32 bit Python")
89
+
90
+ # Test that hyperparameter-tuning improves log-marginal likelihood.
91
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
92
+ assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood(
93
+ kernel.theta
94
+ )
95
+
96
+
97
+ @pytest.mark.parametrize("kernel", kernels)
98
+ def test_lml_precomputed(kernel):
99
+ # Test that lml of optimized kernel is stored correctly.
100
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
101
+ assert gpr.log_marginal_likelihood(gpr.kernel_.theta) == pytest.approx(
102
+ gpr.log_marginal_likelihood()
103
+ )
104
+
105
+
106
+ @pytest.mark.parametrize("kernel", kernels)
107
+ def test_lml_without_cloning_kernel(kernel):
108
+ # Test that lml of optimized kernel is stored correctly.
109
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
110
+ input_theta = np.ones(gpr.kernel_.theta.shape, dtype=np.float64)
111
+
112
+ gpr.log_marginal_likelihood(input_theta, clone_kernel=False)
113
+ assert_almost_equal(gpr.kernel_.theta, input_theta, 7)
114
+
115
+
116
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
117
+ def test_converged_to_local_maximum(kernel):
118
+ # Test that we are in local maximum after hyperparameter-optimization.
119
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
120
+
121
+ lml, lml_gradient = gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
122
+
123
+ assert np.all(
124
+ (np.abs(lml_gradient) < 1e-4)
125
+ | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0])
126
+ | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])
127
+ )
128
+
129
+
130
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
131
+ def test_solution_inside_bounds(kernel):
132
+ # Test that hyperparameter-optimization remains in bounds#
133
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
134
+
135
+ bounds = gpr.kernel_.bounds
136
+ max_ = np.finfo(gpr.kernel_.theta.dtype).max
137
+ tiny = 1e-10
138
+ bounds[~np.isfinite(bounds[:, 1]), 1] = max_
139
+
140
+ assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
141
+ assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
142
+
143
+
144
+ @pytest.mark.parametrize("kernel", kernels)
145
+ def test_lml_gradient(kernel):
146
+ # Compare analytic and numeric gradient of log marginal likelihood.
147
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
148
+
149
+ lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
150
+ lml_gradient_approx = approx_fprime(
151
+ kernel.theta, lambda theta: gpr.log_marginal_likelihood(theta, False), 1e-10
152
+ )
153
+
154
+ assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
155
+
156
+
157
+ @pytest.mark.parametrize("kernel", kernels)
158
+ def test_prior(kernel):
159
+ # Test that GP prior has mean 0 and identical variances.
160
+ gpr = GaussianProcessRegressor(kernel=kernel)
161
+
162
+ y_mean, y_cov = gpr.predict(X, return_cov=True)
163
+
164
+ assert_almost_equal(y_mean, 0, 5)
165
+ if len(gpr.kernel.theta) > 1:
166
+ # XXX: quite hacky, works only for current kernels
167
+ assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
168
+ else:
169
+ assert_almost_equal(np.diag(y_cov), 1, 5)
170
+
171
+
172
+ @pytest.mark.parametrize("kernel", kernels)
173
+ def test_sample_statistics(kernel):
174
+ # Test that statistics of samples drawn from GP are correct.
175
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
176
+
177
+ y_mean, y_cov = gpr.predict(X2, return_cov=True)
178
+
179
+ samples = gpr.sample_y(X2, 300000)
180
+
181
+ # More digits accuracy would require many more samples
182
+ assert_almost_equal(y_mean, np.mean(samples, 1), 1)
183
+ assert_almost_equal(
184
+ np.diag(y_cov) / np.diag(y_cov).max(),
185
+ np.var(samples, 1) / np.diag(y_cov).max(),
186
+ 1,
187
+ )
188
+
189
+
190
+ def test_no_optimizer():
191
+ # Test that kernel parameters are unmodified when optimizer is None.
192
+ kernel = RBF(1.0)
193
+ gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
194
+ assert np.exp(gpr.kernel_.theta) == 1.0
195
+
196
+
197
+ @pytest.mark.parametrize("kernel", kernels)
198
+ @pytest.mark.parametrize("target", [y, np.ones(X.shape[0], dtype=np.float64)])
199
+ def test_predict_cov_vs_std(kernel, target):
200
+ if sys.maxsize <= 2**32:
201
+ pytest.xfail("This test may fail on 32 bit Python")
202
+
203
+ # Test that predicted std.-dev. is consistent with cov's diagonal.
204
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
205
+ y_mean, y_cov = gpr.predict(X2, return_cov=True)
206
+ y_mean, y_std = gpr.predict(X2, return_std=True)
207
+ assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
208
+
209
+
210
+ def test_anisotropic_kernel():
211
+ # Test that GPR can identify meaningful anisotropic length-scales.
212
+ # We learn a function which varies in one dimension ten-times slower
213
+ # than in the other. The corresponding length-scales should differ by at
214
+ # least a factor 5
215
+ rng = np.random.RandomState(0)
216
+ X = rng.uniform(-1, 1, (50, 2))
217
+ y = X[:, 0] + 0.1 * X[:, 1]
218
+
219
+ kernel = RBF([1.0, 1.0])
220
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
221
+ assert np.exp(gpr.kernel_.theta[1]) > np.exp(gpr.kernel_.theta[0]) * 5
222
+
223
+
224
+ def test_random_starts():
225
+ # Test that an increasing number of random-starts of GP fitting only
226
+ # increases the log marginal likelihood of the chosen theta.
227
+ n_samples, n_features = 25, 2
228
+ rng = np.random.RandomState(0)
229
+ X = rng.randn(n_samples, n_features) * 2 - 1
230
+ y = (
231
+ np.sin(X).sum(axis=1)
232
+ + np.sin(3 * X).sum(axis=1)
233
+ + rng.normal(scale=0.1, size=n_samples)
234
+ )
235
+
236
+ kernel = C(1.0, (1e-2, 1e2)) * RBF(
237
+ length_scale=[1.0] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features
238
+ ) + WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
239
+ last_lml = -np.inf
240
+ for n_restarts_optimizer in range(5):
241
+ gp = GaussianProcessRegressor(
242
+ kernel=kernel,
243
+ n_restarts_optimizer=n_restarts_optimizer,
244
+ random_state=0,
245
+ ).fit(X, y)
246
+ lml = gp.log_marginal_likelihood(gp.kernel_.theta)
247
+ assert lml > last_lml - np.finfo(np.float32).eps
248
+ last_lml = lml
249
+
250
+
251
+ @pytest.mark.parametrize("kernel", kernels)
252
+ def test_y_normalization(kernel):
253
+ """
254
+ Test normalization of the target values in GP
255
+
256
+ Fitting non-normalizing GP on normalized y and fitting normalizing GP
257
+ on unnormalized y should yield identical results. Note that, here,
258
+ 'normalized y' refers to y that has been made zero mean and unit
259
+ variance.
260
+
261
+ """
262
+
263
+ y_mean = np.mean(y)
264
+ y_std = np.std(y)
265
+ y_norm = (y - y_mean) / y_std
266
+
267
+ # Fit non-normalizing GP on normalized y
268
+ gpr = GaussianProcessRegressor(kernel=kernel)
269
+ gpr.fit(X, y_norm)
270
+
271
+ # Fit normalizing GP on unnormalized y
272
+ gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
273
+ gpr_norm.fit(X, y)
274
+
275
+ # Compare predicted mean, std-devs and covariances
276
+ y_pred, y_pred_std = gpr.predict(X2, return_std=True)
277
+ y_pred = y_pred * y_std + y_mean
278
+ y_pred_std = y_pred_std * y_std
279
+ y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
280
+
281
+ assert_almost_equal(y_pred, y_pred_norm)
282
+ assert_almost_equal(y_pred_std, y_pred_std_norm)
283
+
284
+ _, y_cov = gpr.predict(X2, return_cov=True)
285
+ y_cov = y_cov * y_std**2
286
+ _, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
287
+
288
+ assert_almost_equal(y_cov, y_cov_norm)
289
+
290
+
291
+ def test_large_variance_y():
292
+ """
293
+ Here we test that, when noramlize_y=True, our GP can produce a
294
+ sensible fit to training data whose variance is significantly
295
+ larger than unity. This test was made in response to issue #15612.
296
+
297
+ GP predictions are verified against predictions that were made
298
+ using GPy which, here, is treated as the 'gold standard'. Note that we
299
+ only investigate the RBF kernel here, as that is what was used in the
300
+ GPy implementation.
301
+
302
+ The following code can be used to recreate the GPy data:
303
+
304
+ --------------------------------------------------------------------------
305
+ import GPy
306
+
307
+ kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
308
+ gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
309
+ gpy.optimize()
310
+ y_pred_gpy, y_var_gpy = gpy.predict(X2)
311
+ y_pred_std_gpy = np.sqrt(y_var_gpy)
312
+ --------------------------------------------------------------------------
313
+ """
314
+
315
+ # Here we utilise a larger variance version of the training data
316
+ y_large = 10 * y
317
+
318
+ # Standard GP with normalize_y=True
319
+ RBF_params = {"length_scale": 1.0}
320
+ kernel = RBF(**RBF_params)
321
+ gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
322
+ gpr.fit(X, y_large)
323
+ y_pred, y_pred_std = gpr.predict(X2, return_std=True)
324
+
325
+ # 'Gold standard' mean predictions from GPy
326
+ y_pred_gpy = np.array(
327
+ [15.16918303, -27.98707845, -39.31636019, 14.52605515, 69.18503589]
328
+ )
329
+
330
+ # 'Gold standard' std predictions from GPy
331
+ y_pred_std_gpy = np.array(
332
+ [7.78860962, 3.83179178, 0.63149951, 0.52745188, 0.86170042]
333
+ )
334
+
335
+ # Based on numerical experiments, it's reasonable to expect our
336
+ # GP's mean predictions to get within 7% of predictions of those
337
+ # made by GPy.
338
+ assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0)
339
+
340
+ # Based on numerical experiments, it's reasonable to expect our
341
+ # GP's std predictions to get within 15% of predictions of those
342
+ # made by GPy.
343
+ assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0)
344
+
345
+
346
+ def test_y_multioutput():
347
+ # Test that GPR can deal with multi-dimensional target values
348
+ y_2d = np.vstack((y, y * 2)).T
349
+
350
+ # Test for fixed kernel that first dimension of 2d GP equals the output
351
+ # of 1d GP and that second dimension is twice as large
352
+ kernel = RBF(length_scale=1.0)
353
+
354
+ gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False)
355
+ gpr.fit(X, y)
356
+
357
+ gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False)
358
+ gpr_2d.fit(X, y_2d)
359
+
360
+ y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
361
+ y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
362
+ _, y_cov_1d = gpr.predict(X2, return_cov=True)
363
+ _, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
364
+
365
+ assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
366
+ assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
367
+
368
+ # Standard deviation and covariance do not depend on output
369
+ for target in range(y_2d.shape[1]):
370
+ assert_almost_equal(y_std_1d, y_std_2d[..., target])
371
+ assert_almost_equal(y_cov_1d, y_cov_2d[..., target])
372
+
373
+ y_sample_1d = gpr.sample_y(X2, n_samples=10)
374
+ y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
375
+
376
+ assert y_sample_1d.shape == (5, 10)
377
+ assert y_sample_2d.shape == (5, 2, 10)
378
+ # Only the first target will be equal
379
+ assert_almost_equal(y_sample_1d, y_sample_2d[:, 0, :])
380
+
381
+ # Test hyperparameter optimization
382
+ for kernel in kernels:
383
+ gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
384
+ gpr.fit(X, y)
385
+
386
+ gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
387
+ gpr_2d.fit(X, np.vstack((y, y)).T)
388
+
389
+ assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
390
+
391
+
392
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
393
+ def test_custom_optimizer(kernel):
394
+ # Test that GPR can use externally defined optimizers.
395
+ # Define a dummy optimizer that simply tests 50 random hyperparameters
396
+ def optimizer(obj_func, initial_theta, bounds):
397
+ rng = np.random.RandomState(0)
398
+ theta_opt, func_min = initial_theta, obj_func(
399
+ initial_theta, eval_gradient=False
400
+ )
401
+ for _ in range(50):
402
+ theta = np.atleast_1d(
403
+ rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))
404
+ )
405
+ f = obj_func(theta, eval_gradient=False)
406
+ if f < func_min:
407
+ theta_opt, func_min = theta, f
408
+ return theta_opt, func_min
409
+
410
+ gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
411
+ gpr.fit(X, y)
412
+ # Checks that optimizer improved marginal likelihood
413
+ assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood(
414
+ gpr.kernel.theta
415
+ )
416
+
417
+
418
+ def test_gpr_correct_error_message():
419
+ X = np.arange(12).reshape(6, -1)
420
+ y = np.ones(6)
421
+ kernel = DotProduct()
422
+ gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
423
+ message = (
424
+ "The kernel, %s, is not returning a "
425
+ "positive definite matrix. Try gradually increasing "
426
+ "the 'alpha' parameter of your "
427
+ "GaussianProcessRegressor estimator." % kernel
428
+ )
429
+ with pytest.raises(np.linalg.LinAlgError, match=re.escape(message)):
430
+ gpr.fit(X, y)
431
+
432
+
433
+ @pytest.mark.parametrize("kernel", kernels)
434
+ def test_duplicate_input(kernel):
435
+ # Test GPR can handle two different output-values for the same input.
436
+ gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
437
+ gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
438
+
439
+ X_ = np.vstack((X, X[0]))
440
+ y_ = np.hstack((y, y[0] + 1))
441
+ gpr_equal_inputs.fit(X_, y_)
442
+
443
+ X_ = np.vstack((X, X[0] + 1e-15))
444
+ y_ = np.hstack((y, y[0] + 1))
445
+ gpr_similar_inputs.fit(X_, y_)
446
+
447
+ X_test = np.linspace(0, 10, 100)[:, None]
448
+ y_pred_equal, y_std_equal = gpr_equal_inputs.predict(X_test, return_std=True)
449
+ y_pred_similar, y_std_similar = gpr_similar_inputs.predict(X_test, return_std=True)
450
+
451
+ assert_almost_equal(y_pred_equal, y_pred_similar)
452
+ assert_almost_equal(y_std_equal, y_std_similar)
453
+
454
+
455
+ def test_no_fit_default_predict():
456
+ # Test that GPR predictions without fit does not break by default.
457
+ default_kernel = C(1.0, constant_value_bounds="fixed") * RBF(
458
+ 1.0, length_scale_bounds="fixed"
459
+ )
460
+ gpr1 = GaussianProcessRegressor()
461
+ _, y_std1 = gpr1.predict(X, return_std=True)
462
+ _, y_cov1 = gpr1.predict(X, return_cov=True)
463
+
464
+ gpr2 = GaussianProcessRegressor(kernel=default_kernel)
465
+ _, y_std2 = gpr2.predict(X, return_std=True)
466
+ _, y_cov2 = gpr2.predict(X, return_cov=True)
467
+
468
+ assert_array_almost_equal(y_std1, y_std2)
469
+ assert_array_almost_equal(y_cov1, y_cov2)
470
+
471
+
472
+ def test_warning_bounds():
473
+ kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
474
+ gpr = GaussianProcessRegressor(kernel=kernel)
475
+ warning_message = (
476
+ "The optimal value found for dimension 0 of parameter "
477
+ "length_scale is close to the specified upper bound "
478
+ "0.001. Increasing the bound and calling fit again may "
479
+ "find a better value."
480
+ )
481
+ with pytest.warns(ConvergenceWarning, match=warning_message):
482
+ gpr.fit(X, y)
483
+
484
+ kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF(
485
+ length_scale_bounds=[1e3, 1e5]
486
+ )
487
+ gpr_sum = GaussianProcessRegressor(kernel=kernel_sum)
488
+ with warnings.catch_warnings(record=True) as record:
489
+ warnings.simplefilter("always")
490
+ gpr_sum.fit(X, y)
491
+
492
+ assert len(record) == 2
493
+
494
+ assert issubclass(record[0].category, ConvergenceWarning)
495
+ assert (
496
+ record[0].message.args[0]
497
+ == "The optimal value found for "
498
+ "dimension 0 of parameter "
499
+ "k1__noise_level is close to the "
500
+ "specified upper bound 0.001. "
501
+ "Increasing the bound and calling "
502
+ "fit again may find a better value."
503
+ )
504
+
505
+ assert issubclass(record[1].category, ConvergenceWarning)
506
+ assert (
507
+ record[1].message.args[0]
508
+ == "The optimal value found for "
509
+ "dimension 0 of parameter "
510
+ "k2__length_scale is close to the "
511
+ "specified lower bound 1000.0. "
512
+ "Decreasing the bound and calling "
513
+ "fit again may find a better value."
514
+ )
515
+
516
+ X_tile = np.tile(X, 2)
517
+ kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2])
518
+ gpr_dims = GaussianProcessRegressor(kernel=kernel_dims)
519
+
520
+ with warnings.catch_warnings(record=True) as record:
521
+ warnings.simplefilter("always")
522
+ gpr_dims.fit(X_tile, y)
523
+
524
+ assert len(record) == 2
525
+
526
+ assert issubclass(record[0].category, ConvergenceWarning)
527
+ assert (
528
+ record[0].message.args[0]
529
+ == "The optimal value found for "
530
+ "dimension 0 of parameter "
531
+ "length_scale is close to the "
532
+ "specified lower bound 10.0. "
533
+ "Decreasing the bound and calling "
534
+ "fit again may find a better value."
535
+ )
536
+
537
+ assert issubclass(record[1].category, ConvergenceWarning)
538
+ assert (
539
+ record[1].message.args[0]
540
+ == "The optimal value found for "
541
+ "dimension 1 of parameter "
542
+ "length_scale is close to the "
543
+ "specified lower bound 10.0. "
544
+ "Decreasing the bound and calling "
545
+ "fit again may find a better value."
546
+ )
547
+
548
+
549
+ def test_bound_check_fixed_hyperparameter():
550
+ # Regression test for issue #17943
551
+ # Check that having a hyperparameter with fixed bounds doesn't cause an
552
+ # error
553
+ k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
554
+ k2 = ExpSineSquared(
555
+ length_scale=1.0, periodicity=1.0, periodicity_bounds="fixed"
556
+ ) # seasonal component
557
+ kernel = k1 + k2
558
+ GaussianProcessRegressor(kernel=kernel).fit(X, y)
559
+
560
+
561
+ @pytest.mark.parametrize("kernel", kernels)
562
+ def test_constant_target(kernel):
563
+ """Check that the std. dev. is affected to 1 when normalizing a constant
564
+ feature.
565
+ Non-regression test for:
566
+ https://github.com/scikit-learn/scikit-learn/issues/18318
567
+ NaN where affected to the target when scaling due to null std. dev. with
568
+ constant target.
569
+ """
570
+ y_constant = np.ones(X.shape[0], dtype=np.float64)
571
+
572
+ gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
573
+ gpr.fit(X, y_constant)
574
+ assert gpr._y_train_std == pytest.approx(1.0)
575
+
576
+ y_pred, y_cov = gpr.predict(X, return_cov=True)
577
+ assert_allclose(y_pred, y_constant)
578
+ # set atol because we compare to zero
579
+ assert_allclose(np.diag(y_cov), 0.0, atol=1e-9)
580
+
581
+ # Test multi-target data
582
+ n_samples, n_targets = X.shape[0], 2
583
+ rng = np.random.RandomState(0)
584
+ y = np.concatenate(
585
+ [
586
+ rng.normal(size=(n_samples, 1)), # non-constant target
587
+ np.full(shape=(n_samples, 1), fill_value=2), # constant target
588
+ ],
589
+ axis=1,
590
+ )
591
+
592
+ gpr.fit(X, y)
593
+ Y_pred, Y_cov = gpr.predict(X, return_cov=True)
594
+
595
+ assert_allclose(Y_pred[:, 1], 2)
596
+ assert_allclose(np.diag(Y_cov[..., 1]), 0.0, atol=1e-9)
597
+
598
+ assert Y_pred.shape == (n_samples, n_targets)
599
+ assert Y_cov.shape == (n_samples, n_samples, n_targets)
600
+
601
+
602
+ def test_gpr_consistency_std_cov_non_invertible_kernel():
603
+ """Check the consistency between the returned std. dev. and the covariance.
604
+ Non-regression test for:
605
+ https://github.com/scikit-learn/scikit-learn/issues/19936
606
+ Inconsistencies were observed when the kernel cannot be inverted (or
607
+ numerically stable).
608
+ """
609
+ kernel = C(8.98576054e05, (1e-12, 1e12)) * RBF(
610
+ [5.91326520e02, 1.32584051e03], (1e-12, 1e12)
611
+ ) + WhiteKernel(noise_level=1e-5)
612
+ gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, optimizer=None)
613
+ X_train = np.array(
614
+ [
615
+ [0.0, 0.0],
616
+ [1.54919334, -0.77459667],
617
+ [-1.54919334, 0.0],
618
+ [0.0, -1.54919334],
619
+ [0.77459667, 0.77459667],
620
+ [-0.77459667, 1.54919334],
621
+ ]
622
+ )
623
+ y_train = np.array(
624
+ [
625
+ [-2.14882017e-10],
626
+ [-4.66975823e00],
627
+ [4.01823986e00],
628
+ [-1.30303674e00],
629
+ [-1.35760156e00],
630
+ [3.31215668e00],
631
+ ]
632
+ )
633
+ gpr.fit(X_train, y_train)
634
+ X_test = np.array(
635
+ [
636
+ [-1.93649167, -1.93649167],
637
+ [1.93649167, -1.93649167],
638
+ [-1.93649167, 1.93649167],
639
+ [1.93649167, 1.93649167],
640
+ ]
641
+ )
642
+ pred1, std = gpr.predict(X_test, return_std=True)
643
+ pred2, cov = gpr.predict(X_test, return_cov=True)
644
+ assert_allclose(std, np.sqrt(np.diagonal(cov)), rtol=1e-5)
645
+
646
+
647
+ @pytest.mark.parametrize(
648
+ "params, TypeError, err_msg",
649
+ [
650
+ (
651
+ {"alpha": np.zeros(100)},
652
+ ValueError,
653
+ "alpha must be a scalar or an array with same number of entries as y",
654
+ ),
655
+ (
656
+ {
657
+ "kernel": WhiteKernel(noise_level_bounds=(-np.inf, np.inf)),
658
+ "n_restarts_optimizer": 2,
659
+ },
660
+ ValueError,
661
+ "requires that all bounds are finite",
662
+ ),
663
+ ],
664
+ )
665
+ def test_gpr_fit_error(params, TypeError, err_msg):
666
+ """Check that expected error are raised during fit."""
667
+ gpr = GaussianProcessRegressor(**params)
668
+ with pytest.raises(TypeError, match=err_msg):
669
+ gpr.fit(X, y)
670
+
671
+
672
+ def test_gpr_lml_error():
673
+ """Check that we raise the proper error in the LML method."""
674
+ gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
675
+
676
+ err_msg = "Gradient can only be evaluated for theta!=None"
677
+ with pytest.raises(ValueError, match=err_msg):
678
+ gpr.log_marginal_likelihood(eval_gradient=True)
679
+
680
+
681
+ def test_gpr_predict_error():
682
+ """Check that we raise the proper error during predict."""
683
+ gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
684
+
685
+ err_msg = "At most one of return_std or return_cov can be requested."
686
+ with pytest.raises(RuntimeError, match=err_msg):
687
+ gpr.predict(X, return_cov=True, return_std=True)
688
+
689
+
690
+ @pytest.mark.parametrize("normalize_y", [True, False])
691
+ @pytest.mark.parametrize("n_targets", [None, 1, 10])
692
+ def test_predict_shapes(normalize_y, n_targets):
693
+ """Check the shapes of y_mean, y_std, and y_cov in single-output
694
+ (n_targets=None) and multi-output settings, including the edge case when
695
+ n_targets=1, where the sklearn convention is to squeeze the predictions.
696
+
697
+ Non-regression test for:
698
+ https://github.com/scikit-learn/scikit-learn/issues/17394
699
+ https://github.com/scikit-learn/scikit-learn/issues/18065
700
+ https://github.com/scikit-learn/scikit-learn/issues/22174
701
+ """
702
+ rng = np.random.RandomState(1234)
703
+
704
+ n_features, n_samples_train, n_samples_test = 6, 9, 7
705
+
706
+ y_train_shape = (n_samples_train,)
707
+ if n_targets is not None:
708
+ y_train_shape = y_train_shape + (n_targets,)
709
+
710
+ # By convention single-output data is squeezed upon prediction
711
+ y_test_shape = (n_samples_test,)
712
+ if n_targets is not None and n_targets > 1:
713
+ y_test_shape = y_test_shape + (n_targets,)
714
+
715
+ X_train = rng.randn(n_samples_train, n_features)
716
+ X_test = rng.randn(n_samples_test, n_features)
717
+ y_train = rng.randn(*y_train_shape)
718
+
719
+ model = GaussianProcessRegressor(normalize_y=normalize_y)
720
+ model.fit(X_train, y_train)
721
+
722
+ y_pred, y_std = model.predict(X_test, return_std=True)
723
+ _, y_cov = model.predict(X_test, return_cov=True)
724
+
725
+ assert y_pred.shape == y_test_shape
726
+ assert y_std.shape == y_test_shape
727
+ assert y_cov.shape == (n_samples_test,) + y_test_shape
728
+
729
+
730
+ @pytest.mark.parametrize("normalize_y", [True, False])
731
+ @pytest.mark.parametrize("n_targets", [None, 1, 10])
732
+ def test_sample_y_shapes(normalize_y, n_targets):
733
+ """Check the shapes of y_samples in single-output (n_targets=0) and
734
+ multi-output settings, including the edge case when n_targets=1, where the
735
+ sklearn convention is to squeeze the predictions.
736
+
737
+ Non-regression test for:
738
+ https://github.com/scikit-learn/scikit-learn/issues/22175
739
+ """
740
+ rng = np.random.RandomState(1234)
741
+
742
+ n_features, n_samples_train = 6, 9
743
+ # Number of spatial locations to predict at
744
+ n_samples_X_test = 7
745
+ # Number of sample predictions per test point
746
+ n_samples_y_test = 5
747
+
748
+ y_train_shape = (n_samples_train,)
749
+ if n_targets is not None:
750
+ y_train_shape = y_train_shape + (n_targets,)
751
+
752
+ # By convention single-output data is squeezed upon prediction
753
+ if n_targets is not None and n_targets > 1:
754
+ y_test_shape = (n_samples_X_test, n_targets, n_samples_y_test)
755
+ else:
756
+ y_test_shape = (n_samples_X_test, n_samples_y_test)
757
+
758
+ X_train = rng.randn(n_samples_train, n_features)
759
+ X_test = rng.randn(n_samples_X_test, n_features)
760
+ y_train = rng.randn(*y_train_shape)
761
+
762
+ model = GaussianProcessRegressor(normalize_y=normalize_y)
763
+
764
+ # FIXME: before fitting, the estimator does not have information regarding
765
+ # the number of targets and default to 1. This is inconsistent with the shape
766
+ # provided after `fit`. This assert should be made once the following issue
767
+ # is fixed:
768
+ # https://github.com/scikit-learn/scikit-learn/issues/22430
769
+ # y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
770
+ # assert y_samples.shape == y_test_shape
771
+
772
+ model.fit(X_train, y_train)
773
+
774
+ y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
775
+ assert y_samples.shape == y_test_shape
776
+
777
+
778
+ @pytest.mark.parametrize("n_targets", [None, 1, 2, 3])
779
+ @pytest.mark.parametrize("n_samples", [1, 5])
780
+ def test_sample_y_shape_with_prior(n_targets, n_samples):
781
+ """Check the output shape of `sample_y` is consistent before and after `fit`."""
782
+ rng = np.random.RandomState(1024)
783
+
784
+ X = rng.randn(10, 3)
785
+ y = rng.randn(10, n_targets if n_targets is not None else 1)
786
+
787
+ model = GaussianProcessRegressor(n_targets=n_targets)
788
+ shape_before_fit = model.sample_y(X, n_samples=n_samples).shape
789
+ model.fit(X, y)
790
+ shape_after_fit = model.sample_y(X, n_samples=n_samples).shape
791
+ assert shape_before_fit == shape_after_fit
792
+
793
+
794
+ @pytest.mark.parametrize("n_targets", [None, 1, 2, 3])
795
+ def test_predict_shape_with_prior(n_targets):
796
+ """Check the output shape of `predict` with prior distribution."""
797
+ rng = np.random.RandomState(1024)
798
+
799
+ n_sample = 10
800
+ X = rng.randn(n_sample, 3)
801
+ y = rng.randn(n_sample, n_targets if n_targets is not None else 1)
802
+
803
+ model = GaussianProcessRegressor(n_targets=n_targets)
804
+ mean_prior, cov_prior = model.predict(X, return_cov=True)
805
+ _, std_prior = model.predict(X, return_std=True)
806
+
807
+ model.fit(X, y)
808
+ mean_post, cov_post = model.predict(X, return_cov=True)
809
+ _, std_post = model.predict(X, return_std=True)
810
+
811
+ assert mean_prior.shape == mean_post.shape
812
+ assert cov_prior.shape == cov_post.shape
813
+ assert std_prior.shape == std_post.shape
814
+
815
+
816
+ def test_n_targets_error():
817
+ """Check that an error is raised when the number of targets seen at fit is
818
+ inconsistent with n_targets.
819
+ """
820
+ rng = np.random.RandomState(0)
821
+ X = rng.randn(10, 3)
822
+ y = rng.randn(10, 2)
823
+
824
+ model = GaussianProcessRegressor(n_targets=1)
825
+ with pytest.raises(ValueError, match="The number of targets seen in `y`"):
826
+ model.fit(X, y)
827
+
828
+
829
+ class CustomKernel(C):
830
+ """
831
+ A custom kernel that has a diag method that returns the first column of the
832
+ input matrix X. This is a helper for the test to check that the input
833
+ matrix X is not mutated.
834
+ """
835
+
836
+ def diag(self, X):
837
+ return X[:, 0]
838
+
839
+
840
+ def test_gpr_predict_input_not_modified():
841
+ """
842
+ Check that the input X is not modified by the predict method of the
843
+ GaussianProcessRegressor when setting return_std=True.
844
+
845
+ Non-regression test for:
846
+ https://github.com/scikit-learn/scikit-learn/issues/24340
847
+ """
848
+ gpr = GaussianProcessRegressor(kernel=CustomKernel()).fit(X, y)
849
+
850
+ X2_copy = np.copy(X2)
851
+ _, _ = gpr.predict(X2, return_std=True)
852
+
853
+ assert_allclose(X2, X2_copy)
venv/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_kernels.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing for kernels for Gaussian processes."""
2
+
3
+ # Author: Jan Hendrik Metzen <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ from inspect import signature
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from sklearn.base import clone
12
+ from sklearn.gaussian_process.kernels import (
13
+ RBF,
14
+ CompoundKernel,
15
+ ConstantKernel,
16
+ DotProduct,
17
+ Exponentiation,
18
+ ExpSineSquared,
19
+ KernelOperator,
20
+ Matern,
21
+ PairwiseKernel,
22
+ RationalQuadratic,
23
+ WhiteKernel,
24
+ _approx_fprime,
25
+ )
26
+ from sklearn.metrics.pairwise import (
27
+ PAIRWISE_KERNEL_FUNCTIONS,
28
+ euclidean_distances,
29
+ pairwise_kernels,
30
+ )
31
+ from sklearn.utils._testing import (
32
+ assert_allclose,
33
+ assert_almost_equal,
34
+ assert_array_almost_equal,
35
+ assert_array_equal,
36
+ )
37
+
38
+ X = np.random.RandomState(0).normal(0, 1, (5, 2))
39
+ Y = np.random.RandomState(0).normal(0, 1, (6, 2))
40
+
41
+ kernel_rbf_plus_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
42
+ kernels = [
43
+ RBF(length_scale=2.0),
44
+ RBF(length_scale_bounds=(0.5, 2.0)),
45
+ ConstantKernel(constant_value=10.0),
46
+ 2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
47
+ 2.0 * RBF(length_scale=0.5),
48
+ kernel_rbf_plus_white,
49
+ 2.0 * RBF(length_scale=[0.5, 2.0]),
50
+ 2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
51
+ 2.0 * Matern(length_scale=0.5, nu=0.5),
52
+ 2.0 * Matern(length_scale=1.5, nu=1.5),
53
+ 2.0 * Matern(length_scale=2.5, nu=2.5),
54
+ 2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
55
+ 3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
56
+ 4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
57
+ RationalQuadratic(length_scale=0.5, alpha=1.5),
58
+ ExpSineSquared(length_scale=0.5, periodicity=1.5),
59
+ DotProduct(sigma_0=2.0),
60
+ DotProduct(sigma_0=2.0) ** 2,
61
+ RBF(length_scale=[2.0]),
62
+ Matern(length_scale=[2.0]),
63
+ ]
64
+ for metric in PAIRWISE_KERNEL_FUNCTIONS:
65
+ if metric in ["additive_chi2", "chi2"]:
66
+ continue
67
+ kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
68
+
69
+
70
+ @pytest.mark.parametrize("kernel", kernels)
71
+ def test_kernel_gradient(kernel):
72
+ # Compare analytic and numeric gradient of kernels.
73
+ K, K_gradient = kernel(X, eval_gradient=True)
74
+
75
+ assert K_gradient.shape[0] == X.shape[0]
76
+ assert K_gradient.shape[1] == X.shape[0]
77
+ assert K_gradient.shape[2] == kernel.theta.shape[0]
78
+
79
+ def eval_kernel_for_theta(theta):
80
+ kernel_clone = kernel.clone_with_theta(theta)
81
+ K = kernel_clone(X, eval_gradient=False)
82
+ return K
83
+
84
+ K_gradient_approx = _approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
85
+
86
+ assert_almost_equal(K_gradient, K_gradient_approx, 4)
87
+
88
+
89
+ @pytest.mark.parametrize(
90
+ "kernel",
91
+ [
92
+ kernel
93
+ for kernel in kernels
94
+ # skip non-basic kernels
95
+ if not (isinstance(kernel, (KernelOperator, Exponentiation)))
96
+ ],
97
+ )
98
+ def test_kernel_theta(kernel):
99
+ # Check that parameter vector theta of kernel is set correctly.
100
+ theta = kernel.theta
101
+ _, K_gradient = kernel(X, eval_gradient=True)
102
+
103
+ # Determine kernel parameters that contribute to theta
104
+ init_sign = signature(kernel.__class__.__init__).parameters.values()
105
+ args = [p.name for p in init_sign if p.name != "self"]
106
+ theta_vars = map(
107
+ lambda s: s[0 : -len("_bounds")], filter(lambda s: s.endswith("_bounds"), args)
108
+ )
109
+ assert set(hyperparameter.name for hyperparameter in kernel.hyperparameters) == set(
110
+ theta_vars
111
+ )
112
+
113
+ # Check that values returned in theta are consistent with
114
+ # hyperparameter values (being their logarithms)
115
+ for i, hyperparameter in enumerate(kernel.hyperparameters):
116
+ assert theta[i] == np.log(getattr(kernel, hyperparameter.name))
117
+
118
+ # Fixed kernel parameters must be excluded from theta and gradient.
119
+ for i, hyperparameter in enumerate(kernel.hyperparameters):
120
+ # create copy with certain hyperparameter fixed
121
+ params = kernel.get_params()
122
+ params[hyperparameter.name + "_bounds"] = "fixed"
123
+ kernel_class = kernel.__class__
124
+ new_kernel = kernel_class(**params)
125
+ # Check that theta and K_gradient are identical with the fixed
126
+ # dimension left out
127
+ _, K_gradient_new = new_kernel(X, eval_gradient=True)
128
+ assert theta.shape[0] == new_kernel.theta.shape[0] + 1
129
+ assert K_gradient.shape[2] == K_gradient_new.shape[2] + 1
130
+ if i > 0:
131
+ assert theta[:i] == new_kernel.theta[:i]
132
+ assert_array_equal(K_gradient[..., :i], K_gradient_new[..., :i])
133
+ if i + 1 < len(kernel.hyperparameters):
134
+ assert theta[i + 1 :] == new_kernel.theta[i:]
135
+ assert_array_equal(K_gradient[..., i + 1 :], K_gradient_new[..., i:])
136
+
137
+ # Check that values of theta are modified correctly
138
+ for i, hyperparameter in enumerate(kernel.hyperparameters):
139
+ theta[i] = np.log(42)
140
+ kernel.theta = theta
141
+ assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
142
+
143
+ setattr(kernel, hyperparameter.name, 43)
144
+ assert_almost_equal(kernel.theta[i], np.log(43))
145
+
146
+
147
+ @pytest.mark.parametrize(
148
+ "kernel",
149
+ [
150
+ kernel
151
+ for kernel in kernels
152
+ # Identity is not satisfied on diagonal
153
+ if kernel != kernel_rbf_plus_white
154
+ ],
155
+ )
156
+ def test_auto_vs_cross(kernel):
157
+ # Auto-correlation and cross-correlation should be consistent.
158
+ K_auto = kernel(X)
159
+ K_cross = kernel(X, X)
160
+ assert_almost_equal(K_auto, K_cross, 5)
161
+
162
+
163
+ @pytest.mark.parametrize("kernel", kernels)
164
+ def test_kernel_diag(kernel):
165
+ # Test that diag method of kernel returns consistent results.
166
+ K_call_diag = np.diag(kernel(X))
167
+ K_diag = kernel.diag(X)
168
+ assert_almost_equal(K_call_diag, K_diag, 5)
169
+
170
+
171
+ def test_kernel_operator_commutative():
172
+ # Adding kernels and multiplying kernels should be commutative.
173
+ # Check addition
174
+ assert_almost_equal((RBF(2.0) + 1.0)(X), (1.0 + RBF(2.0))(X))
175
+
176
+ # Check multiplication
177
+ assert_almost_equal((3.0 * RBF(2.0))(X), (RBF(2.0) * 3.0)(X))
178
+
179
+
180
+ def test_kernel_anisotropic():
181
+ # Anisotropic kernel should be consistent with isotropic kernels.
182
+ kernel = 3.0 * RBF([0.5, 2.0])
183
+
184
+ K = kernel(X)
185
+ X1 = np.array(X)
186
+ X1[:, 0] *= 4
187
+ K1 = 3.0 * RBF(2.0)(X1)
188
+ assert_almost_equal(K, K1)
189
+
190
+ X2 = np.array(X)
191
+ X2[:, 1] /= 4
192
+ K2 = 3.0 * RBF(0.5)(X2)
193
+ assert_almost_equal(K, K2)
194
+
195
+ # Check getting and setting via theta
196
+ kernel.theta = kernel.theta + np.log(2)
197
+ assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
198
+ assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
199
+
200
+
201
+ @pytest.mark.parametrize(
202
+ "kernel", [kernel for kernel in kernels if kernel.is_stationary()]
203
+ )
204
+ def test_kernel_stationary(kernel):
205
+ # Test stationarity of kernels.
206
+ K = kernel(X, X + 1)
207
+ assert_almost_equal(K[0, 0], np.diag(K))
208
+
209
+
210
+ @pytest.mark.parametrize("kernel", kernels)
211
+ def test_kernel_input_type(kernel):
212
+ # Test whether kernels is for vectors or structured data
213
+ if isinstance(kernel, Exponentiation):
214
+ assert kernel.requires_vector_input == kernel.kernel.requires_vector_input
215
+ if isinstance(kernel, KernelOperator):
216
+ assert kernel.requires_vector_input == (
217
+ kernel.k1.requires_vector_input or kernel.k2.requires_vector_input
218
+ )
219
+
220
+
221
+ def test_compound_kernel_input_type():
222
+ kernel = CompoundKernel([WhiteKernel(noise_level=3.0)])
223
+ assert not kernel.requires_vector_input
224
+
225
+ kernel = CompoundKernel([WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
226
+ assert kernel.requires_vector_input
227
+
228
+
229
+ def check_hyperparameters_equal(kernel1, kernel2):
230
+ # Check that hyperparameters of two kernels are equal
231
+ for attr in set(dir(kernel1) + dir(kernel2)):
232
+ if attr.startswith("hyperparameter_"):
233
+ attr_value1 = getattr(kernel1, attr)
234
+ attr_value2 = getattr(kernel2, attr)
235
+ assert attr_value1 == attr_value2
236
+
237
+
238
+ @pytest.mark.parametrize("kernel", kernels)
239
+ def test_kernel_clone(kernel):
240
+ # Test that sklearn's clone works correctly on kernels.
241
+ kernel_cloned = clone(kernel)
242
+
243
+ # XXX: Should this be fixed?
244
+ # This differs from the sklearn's estimators equality check.
245
+ assert kernel == kernel_cloned
246
+ assert id(kernel) != id(kernel_cloned)
247
+
248
+ # Check that all constructor parameters are equal.
249
+ assert kernel.get_params() == kernel_cloned.get_params()
250
+
251
+ # Check that all hyperparameters are equal.
252
+ check_hyperparameters_equal(kernel, kernel_cloned)
253
+
254
+
255
+ @pytest.mark.parametrize("kernel", kernels)
256
+ def test_kernel_clone_after_set_params(kernel):
257
+ # This test is to verify that using set_params does not
258
+ # break clone on kernels.
259
+ # This used to break because in kernels such as the RBF, non-trivial
260
+ # logic that modified the length scale used to be in the constructor
261
+ # See https://github.com/scikit-learn/scikit-learn/issues/6961
262
+ # for more details.
263
+ bounds = (1e-5, 1e5)
264
+ kernel_cloned = clone(kernel)
265
+ params = kernel.get_params()
266
+ # RationalQuadratic kernel is isotropic.
267
+ isotropic_kernels = (ExpSineSquared, RationalQuadratic)
268
+ if "length_scale" in params and not isinstance(kernel, isotropic_kernels):
269
+ length_scale = params["length_scale"]
270
+ if np.iterable(length_scale):
271
+ # XXX unreached code as of v0.22
272
+ params["length_scale"] = length_scale[0]
273
+ params["length_scale_bounds"] = bounds
274
+ else:
275
+ params["length_scale"] = [length_scale] * 2
276
+ params["length_scale_bounds"] = bounds * 2
277
+ kernel_cloned.set_params(**params)
278
+ kernel_cloned_clone = clone(kernel_cloned)
279
+ assert kernel_cloned_clone.get_params() == kernel_cloned.get_params()
280
+ assert id(kernel_cloned_clone) != id(kernel_cloned)
281
+ check_hyperparameters_equal(kernel_cloned, kernel_cloned_clone)
282
+
283
+
284
+ def test_matern_kernel():
285
+ # Test consistency of Matern kernel for special values of nu.
286
+ K = Matern(nu=1.5, length_scale=1.0)(X)
287
+ # the diagonal elements of a matern kernel are 1
288
+ assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
289
+ # matern kernel for coef0==0.5 is equal to absolute exponential kernel
290
+ K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
291
+ K = Matern(nu=0.5, length_scale=1.0)(X)
292
+ assert_array_almost_equal(K, K_absexp)
293
+ # matern kernel with coef0==inf is equal to RBF kernel
294
+ K_rbf = RBF(length_scale=1.0)(X)
295
+ K = Matern(nu=np.inf, length_scale=1.0)(X)
296
+ assert_array_almost_equal(K, K_rbf)
297
+ assert_allclose(K, K_rbf)
298
+ # test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
299
+ # result in nearly identical results as the general case for coef0 in
300
+ # [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
301
+ tiny = 1e-10
302
+ for nu in [0.5, 1.5, 2.5]:
303
+ K1 = Matern(nu=nu, length_scale=1.0)(X)
304
+ K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
305
+ assert_array_almost_equal(K1, K2)
306
+ # test that coef0==large is close to RBF
307
+ large = 100
308
+ K1 = Matern(nu=large, length_scale=1.0)(X)
309
+ K2 = RBF(length_scale=1.0)(X)
310
+ assert_array_almost_equal(K1, K2, decimal=2)
311
+
312
+
313
+ @pytest.mark.parametrize("kernel", kernels)
314
+ def test_kernel_versus_pairwise(kernel):
315
+ # Check that GP kernels can also be used as pairwise kernels.
316
+
317
+ # Test auto-kernel
318
+ if kernel != kernel_rbf_plus_white:
319
+ # For WhiteKernel: k(X) != k(X,X). This is assumed by
320
+ # pairwise_kernels
321
+ K1 = kernel(X)
322
+ K2 = pairwise_kernels(X, metric=kernel)
323
+ assert_array_almost_equal(K1, K2)
324
+
325
+ # Test cross-kernel
326
+ K1 = kernel(X, Y)
327
+ K2 = pairwise_kernels(X, Y, metric=kernel)
328
+ assert_array_almost_equal(K1, K2)
329
+
330
+
331
+ @pytest.mark.parametrize("kernel", kernels)
332
+ def test_set_get_params(kernel):
333
+ # Check that set_params()/get_params() is consistent with kernel.theta.
334
+
335
+ # Test get_params()
336
+ index = 0
337
+ params = kernel.get_params()
338
+ for hyperparameter in kernel.hyperparameters:
339
+ if isinstance("string", type(hyperparameter.bounds)):
340
+ if hyperparameter.bounds == "fixed":
341
+ continue
342
+ size = hyperparameter.n_elements
343
+ if size > 1: # anisotropic kernels
344
+ assert_almost_equal(
345
+ np.exp(kernel.theta[index : index + size]), params[hyperparameter.name]
346
+ )
347
+ index += size
348
+ else:
349
+ assert_almost_equal(
350
+ np.exp(kernel.theta[index]), params[hyperparameter.name]
351
+ )
352
+ index += 1
353
+ # Test set_params()
354
+ index = 0
355
+ value = 10 # arbitrary value
356
+ for hyperparameter in kernel.hyperparameters:
357
+ if isinstance("string", type(hyperparameter.bounds)):
358
+ if hyperparameter.bounds == "fixed":
359
+ continue
360
+ size = hyperparameter.n_elements
361
+ if size > 1: # anisotropic kernels
362
+ kernel.set_params(**{hyperparameter.name: [value] * size})
363
+ assert_almost_equal(
364
+ np.exp(kernel.theta[index : index + size]), [value] * size
365
+ )
366
+ index += size
367
+ else:
368
+ kernel.set_params(**{hyperparameter.name: value})
369
+ assert_almost_equal(np.exp(kernel.theta[index]), value)
370
+ index += 1
371
+
372
+
373
+ @pytest.mark.parametrize("kernel", kernels)
374
+ def test_repr_kernels(kernel):
375
+ # Smoke-test for repr in kernels.
376
+
377
+ repr(kernel)
378
+
379
+
380
+ def test_rational_quadratic_kernel():
381
+ kernel = RationalQuadratic(length_scale=[1.0, 1.0])
382
+ message = (
383
+ "RationalQuadratic kernel only supports isotropic "
384
+ "version, please use a single "
385
+ "scalar for length_scale"
386
+ )
387
+ with pytest.raises(AttributeError, match=message):
388
+ kernel(X)
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_base.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc ADDED
Binary file (89.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_huber.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc ADDED
Binary file (65.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc ADDED
Binary file (62.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_omp.cpython-310.pyc ADDED
Binary file (31.3 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc ADDED
Binary file (7.65 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc ADDED
Binary file (7.35 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc ADDED
Binary file (73.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_sag.cpython-310.pyc ADDED
Binary file (9.76 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc ADDED
Binary file (67.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # License: BSD 3 clause
2
+
3
+ from .glm import (
4
+ GammaRegressor,
5
+ PoissonRegressor,
6
+ TweedieRegressor,
7
+ _GeneralizedLinearRegressor,
8
+ )
9
+
10
+ __all__ = [
11
+ "_GeneralizedLinearRegressor",
12
+ "PoissonRegressor",
13
+ "GammaRegressor",
14
+ "TweedieRegressor",
15
+ ]
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (368 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Newton solver for Generalized Linear Models
3
+ """
4
+
5
+ # Author: Christian Lorentzen <[email protected]>
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+ from abc import ABC, abstractmethod
10
+
11
+ import numpy as np
12
+ import scipy.linalg
13
+ import scipy.optimize
14
+
15
+ from ..._loss.loss import HalfSquaredError
16
+ from ...exceptions import ConvergenceWarning
17
+ from ...utils.optimize import _check_optimize_result
18
+ from .._linear_loss import LinearModelLoss
19
+
20
+
21
+ class NewtonSolver(ABC):
22
+ """Newton solver for GLMs.
23
+
24
+ This class implements Newton/2nd-order optimization routines for GLMs. Each Newton
25
+ iteration aims at finding the Newton step which is done by the inner solver. With
26
+ Hessian H, gradient g and coefficients coef, one step solves:
27
+
28
+ H @ coef_newton = -g
29
+
30
+ For our GLM / LinearModelLoss, we have gradient g and Hessian H:
31
+
32
+ g = X.T @ loss.gradient + l2_reg_strength * coef
33
+ H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity
34
+
35
+ Backtracking line search updates coef = coef_old + t * coef_newton for some t in
36
+ (0, 1].
37
+
38
+ This is a base class, actual implementations (child classes) may deviate from the
39
+ above pattern and use structure specific tricks.
40
+
41
+ Usage pattern:
42
+ - initialize solver: sol = NewtonSolver(...)
43
+ - solve the problem: sol.solve(X, y, sample_weight)
44
+
45
+ References
46
+ ----------
47
+ - Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization"
48
+ 2nd edition
49
+ https://doi.org/10.1007/978-0-387-40065-5
50
+
51
+ - Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization."
52
+ Cambridge University Press, 2004.
53
+ https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf
54
+
55
+ Parameters
56
+ ----------
57
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
58
+ Initial coefficients of a linear model.
59
+ If shape (n_classes * n_dof,), the classes of one feature are contiguous,
60
+ i.e. one reconstructs the 2d-array via
61
+ coef.reshape((n_classes, -1), order="F").
62
+
63
+ linear_loss : LinearModelLoss
64
+ The loss to be minimized.
65
+
66
+ l2_reg_strength : float, default=0.0
67
+ L2 regularization strength.
68
+
69
+ tol : float, default=1e-4
70
+ The optimization problem is solved when each of the following condition is
71
+ fulfilled:
72
+ 1. maximum |gradient| <= tol
73
+ 2. Newton decrement d: 1/2 * d^2 <= tol
74
+
75
+ max_iter : int, default=100
76
+ Maximum number of Newton steps allowed.
77
+
78
+ n_threads : int, default=1
79
+ Number of OpenMP threads to use for the computation of the Hessian and gradient
80
+ of the loss function.
81
+
82
+ Attributes
83
+ ----------
84
+ coef_old : ndarray of shape coef.shape
85
+ Coefficient of previous iteration.
86
+
87
+ coef_newton : ndarray of shape coef.shape
88
+ Newton step.
89
+
90
+ gradient : ndarray of shape coef.shape
91
+ Gradient of the loss w.r.t. the coefficients.
92
+
93
+ gradient_old : ndarray of shape coef.shape
94
+ Gradient of previous iteration.
95
+
96
+ loss_value : float
97
+ Value of objective function = loss + penalty.
98
+
99
+ loss_value_old : float
100
+ Value of objective function of previous itertion.
101
+
102
+ raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes)
103
+
104
+ converged : bool
105
+ Indicator for convergence of the solver.
106
+
107
+ iteration : int
108
+ Number of Newton steps, i.e. calls to inner_solve
109
+
110
+ use_fallback_lbfgs_solve : bool
111
+ If set to True, the solver will resort to call LBFGS to finish the optimisation
112
+ procedure in case of convergence issues.
113
+
114
+ gradient_times_newton : float
115
+ gradient @ coef_newton, set in inner_solve and used by line_search. If the
116
+ Newton step is a descent direction, this is negative.
117
+ """
118
+
119
+ def __init__(
120
+ self,
121
+ *,
122
+ coef,
123
+ linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True),
124
+ l2_reg_strength=0.0,
125
+ tol=1e-4,
126
+ max_iter=100,
127
+ n_threads=1,
128
+ verbose=0,
129
+ ):
130
+ self.coef = coef
131
+ self.linear_loss = linear_loss
132
+ self.l2_reg_strength = l2_reg_strength
133
+ self.tol = tol
134
+ self.max_iter = max_iter
135
+ self.n_threads = n_threads
136
+ self.verbose = verbose
137
+
138
+ def setup(self, X, y, sample_weight):
139
+ """Precomputations
140
+
141
+ If None, initializes:
142
+ - self.coef
143
+ Sets:
144
+ - self.raw_prediction
145
+ - self.loss_value
146
+ """
147
+ _, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X)
148
+ self.loss_value = self.linear_loss.loss(
149
+ coef=self.coef,
150
+ X=X,
151
+ y=y,
152
+ sample_weight=sample_weight,
153
+ l2_reg_strength=self.l2_reg_strength,
154
+ n_threads=self.n_threads,
155
+ raw_prediction=self.raw_prediction,
156
+ )
157
+
158
+ @abstractmethod
159
+ def update_gradient_hessian(self, X, y, sample_weight):
160
+ """Update gradient and Hessian."""
161
+
162
+ @abstractmethod
163
+ def inner_solve(self, X, y, sample_weight):
164
+ """Compute Newton step.
165
+
166
+ Sets:
167
+ - self.coef_newton
168
+ - self.gradient_times_newton
169
+ """
170
+
171
+ def fallback_lbfgs_solve(self, X, y, sample_weight):
172
+ """Fallback solver in case of emergency.
173
+
174
+ If a solver detects convergence problems, it may fall back to this methods in
175
+ the hope to exit with success instead of raising an error.
176
+
177
+ Sets:
178
+ - self.coef
179
+ - self.converged
180
+ """
181
+ opt_res = scipy.optimize.minimize(
182
+ self.linear_loss.loss_gradient,
183
+ self.coef,
184
+ method="L-BFGS-B",
185
+ jac=True,
186
+ options={
187
+ "maxiter": self.max_iter,
188
+ "maxls": 50, # default is 20
189
+ "iprint": self.verbose - 1,
190
+ "gtol": self.tol,
191
+ "ftol": 64 * np.finfo(np.float64).eps,
192
+ },
193
+ args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads),
194
+ )
195
+ self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
196
+ self.coef = opt_res.x
197
+ self.converged = opt_res.status == 0
198
+
199
+ def line_search(self, X, y, sample_weight):
200
+ """Backtracking line search.
201
+
202
+ Sets:
203
+ - self.coef_old
204
+ - self.coef
205
+ - self.loss_value_old
206
+ - self.loss_value
207
+ - self.gradient_old
208
+ - self.gradient
209
+ - self.raw_prediction
210
+ """
211
+ # line search parameters
212
+ beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11
213
+ eps = 16 * np.finfo(self.loss_value.dtype).eps
214
+ t = 1 # step size
215
+
216
+ # gradient_times_newton = self.gradient @ self.coef_newton
217
+ # was computed in inner_solve.
218
+ armijo_term = sigma * self.gradient_times_newton
219
+ _, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw(
220
+ self.coef_newton, X
221
+ )
222
+
223
+ self.coef_old = self.coef
224
+ self.loss_value_old = self.loss_value
225
+ self.gradient_old = self.gradient
226
+
227
+ # np.sum(np.abs(self.gradient_old))
228
+ sum_abs_grad_old = -1
229
+
230
+ is_verbose = self.verbose >= 2
231
+ if is_verbose:
232
+ print(" Backtracking Line Search")
233
+ print(f" eps=10 * finfo.eps={eps}")
234
+
235
+ for i in range(21): # until and including t = beta**20 ~ 1e-6
236
+ self.coef = self.coef_old + t * self.coef_newton
237
+ raw = self.raw_prediction + t * raw_prediction_newton
238
+ self.loss_value, self.gradient = self.linear_loss.loss_gradient(
239
+ coef=self.coef,
240
+ X=X,
241
+ y=y,
242
+ sample_weight=sample_weight,
243
+ l2_reg_strength=self.l2_reg_strength,
244
+ n_threads=self.n_threads,
245
+ raw_prediction=raw,
246
+ )
247
+ # Note: If coef_newton is too large, loss_gradient may produce inf values,
248
+ # potentially accompanied by a RuntimeWarning.
249
+ # This case will be captured by the Armijo condition.
250
+
251
+ # 1. Check Armijo / sufficient decrease condition.
252
+ # The smaller (more negative) the better.
253
+ loss_improvement = self.loss_value - self.loss_value_old
254
+ check = loss_improvement <= t * armijo_term
255
+ if is_verbose:
256
+ print(
257
+ f" line search iteration={i+1}, step size={t}\n"
258
+ f" check loss improvement <= armijo term: {loss_improvement} "
259
+ f"<= {t * armijo_term} {check}"
260
+ )
261
+ if check:
262
+ break
263
+ # 2. Deal with relative loss differences around machine precision.
264
+ tiny_loss = np.abs(self.loss_value_old * eps)
265
+ check = np.abs(loss_improvement) <= tiny_loss
266
+ if is_verbose:
267
+ print(
268
+ " check loss |improvement| <= eps * |loss_old|:"
269
+ f" {np.abs(loss_improvement)} <= {tiny_loss} {check}"
270
+ )
271
+ if check:
272
+ if sum_abs_grad_old < 0:
273
+ sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1)
274
+ # 2.1 Check sum of absolute gradients as alternative condition.
275
+ sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1)
276
+ check = sum_abs_grad < sum_abs_grad_old
277
+ if is_verbose:
278
+ print(
279
+ " check sum(|gradient|) < sum(|gradient_old|): "
280
+ f"{sum_abs_grad} < {sum_abs_grad_old} {check}"
281
+ )
282
+ if check:
283
+ break
284
+
285
+ t *= beta
286
+ else:
287
+ warnings.warn(
288
+ (
289
+ f"Line search of Newton solver {self.__class__.__name__} at"
290
+ f" iteration #{self.iteration} did no converge after 21 line search"
291
+ " refinement iterations. It will now resort to lbfgs instead."
292
+ ),
293
+ ConvergenceWarning,
294
+ )
295
+ if self.verbose:
296
+ print(" Line search did not converge and resorts to lbfgs instead.")
297
+ self.use_fallback_lbfgs_solve = True
298
+ return
299
+
300
+ self.raw_prediction = raw
301
+
302
+ def check_convergence(self, X, y, sample_weight):
303
+ """Check for convergence.
304
+
305
+ Sets self.converged.
306
+ """
307
+ if self.verbose:
308
+ print(" Check Convergence")
309
+ # Note: Checking maximum relative change of coefficient <= tol is a bad
310
+ # convergence criterion because even a large step could have brought us close
311
+ # to the true minimum.
312
+ # coef_step = self.coef - self.coef_old
313
+ # check = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old)))
314
+
315
+ # 1. Criterion: maximum |gradient| <= tol
316
+ # The gradient was already updated in line_search()
317
+ check = np.max(np.abs(self.gradient))
318
+ if self.verbose:
319
+ print(f" 1. max |gradient| {check} <= {self.tol}")
320
+ if check > self.tol:
321
+ return
322
+
323
+ # 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol
324
+ # d = sqrt(grad @ hessian^-1 @ grad)
325
+ # = sqrt(coef_newton @ hessian @ coef_newton)
326
+ # See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1.
327
+ d2 = self.coef_newton @ self.hessian @ self.coef_newton
328
+ if self.verbose:
329
+ print(f" 2. Newton decrement {0.5 * d2} <= {self.tol}")
330
+ if 0.5 * d2 > self.tol:
331
+ return
332
+
333
+ if self.verbose:
334
+ loss_value = self.linear_loss.loss(
335
+ coef=self.coef,
336
+ X=X,
337
+ y=y,
338
+ sample_weight=sample_weight,
339
+ l2_reg_strength=self.l2_reg_strength,
340
+ n_threads=self.n_threads,
341
+ )
342
+ print(f" Solver did converge at loss = {loss_value}.")
343
+ self.converged = True
344
+
345
+ def finalize(self, X, y, sample_weight):
346
+ """Finalize the solvers results.
347
+
348
+ Some solvers may need this, others not.
349
+ """
350
+ pass
351
+
352
+ def solve(self, X, y, sample_weight):
353
+ """Solve the optimization problem.
354
+
355
+ This is the main routine.
356
+
357
+ Order of calls:
358
+ self.setup()
359
+ while iteration:
360
+ self.update_gradient_hessian()
361
+ self.inner_solve()
362
+ self.line_search()
363
+ self.check_convergence()
364
+ self.finalize()
365
+
366
+ Returns
367
+ -------
368
+ coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
369
+ Solution of the optimization problem.
370
+ """
371
+ # setup usually:
372
+ # - initializes self.coef if needed
373
+ # - initializes and calculates self.raw_predictions, self.loss_value
374
+ self.setup(X=X, y=y, sample_weight=sample_weight)
375
+
376
+ self.iteration = 1
377
+ self.converged = False
378
+ self.use_fallback_lbfgs_solve = False
379
+
380
+ while self.iteration <= self.max_iter and not self.converged:
381
+ if self.verbose:
382
+ print(f"Newton iter={self.iteration}")
383
+
384
+ self.use_fallback_lbfgs_solve = False # Fallback solver.
385
+
386
+ # 1. Update Hessian and gradient
387
+ self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight)
388
+
389
+ # TODO:
390
+ # if iteration == 1:
391
+ # We might stop early, e.g. we already are close to the optimum,
392
+ # usually detected by zero gradients at this stage.
393
+
394
+ # 2. Inner solver
395
+ # Calculate Newton step/direction
396
+ # This usually sets self.coef_newton and self.gradient_times_newton.
397
+ self.inner_solve(X=X, y=y, sample_weight=sample_weight)
398
+ if self.use_fallback_lbfgs_solve:
399
+ break
400
+
401
+ # 3. Backtracking line search
402
+ # This usually sets self.coef_old, self.coef, self.loss_value_old
403
+ # self.loss_value, self.gradient_old, self.gradient,
404
+ # self.raw_prediction.
405
+ self.line_search(X=X, y=y, sample_weight=sample_weight)
406
+ if self.use_fallback_lbfgs_solve:
407
+ break
408
+
409
+ # 4. Check convergence
410
+ # Sets self.converged.
411
+ self.check_convergence(X=X, y=y, sample_weight=sample_weight)
412
+
413
+ # 5. Next iteration
414
+ self.iteration += 1
415
+
416
+ if not self.converged:
417
+ if self.use_fallback_lbfgs_solve:
418
+ # Note: The fallback solver circumvents check_convergence and relies on
419
+ # the convergence checks of lbfgs instead. Enough warnings have been
420
+ # raised on the way.
421
+ self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight)
422
+ else:
423
+ warnings.warn(
424
+ (
425
+ f"Newton solver did not converge after {self.iteration - 1} "
426
+ "iterations."
427
+ ),
428
+ ConvergenceWarning,
429
+ )
430
+
431
+ self.iteration -= 1
432
+ self.finalize(X=X, y=y, sample_weight=sample_weight)
433
+ return self.coef
434
+
435
+
436
+ class NewtonCholeskySolver(NewtonSolver):
437
+ """Cholesky based Newton solver.
438
+
439
+ Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear
440
+ solver.
441
+ """
442
+
443
+ def setup(self, X, y, sample_weight):
444
+ super().setup(X=X, y=y, sample_weight=sample_weight)
445
+ n_dof = X.shape[1]
446
+ if self.linear_loss.fit_intercept:
447
+ n_dof += 1
448
+ self.gradient = np.empty_like(self.coef)
449
+ self.hessian = np.empty_like(self.coef, shape=(n_dof, n_dof))
450
+
451
+ def update_gradient_hessian(self, X, y, sample_weight):
452
+ _, _, self.hessian_warning = self.linear_loss.gradient_hessian(
453
+ coef=self.coef,
454
+ X=X,
455
+ y=y,
456
+ sample_weight=sample_weight,
457
+ l2_reg_strength=self.l2_reg_strength,
458
+ n_threads=self.n_threads,
459
+ gradient_out=self.gradient,
460
+ hessian_out=self.hessian,
461
+ raw_prediction=self.raw_prediction, # this was updated in line_search
462
+ )
463
+
464
+ def inner_solve(self, X, y, sample_weight):
465
+ if self.hessian_warning:
466
+ warnings.warn(
467
+ (
468
+ f"The inner solver of {self.__class__.__name__} detected a "
469
+ "pointwise hessian with many negative values at iteration "
470
+ f"#{self.iteration}. It will now resort to lbfgs instead."
471
+ ),
472
+ ConvergenceWarning,
473
+ )
474
+ if self.verbose:
475
+ print(
476
+ " The inner solver detected a pointwise Hessian with many "
477
+ "negative values and resorts to lbfgs instead."
478
+ )
479
+ self.use_fallback_lbfgs_solve = True
480
+ return
481
+
482
+ try:
483
+ with warnings.catch_warnings():
484
+ warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
485
+ self.coef_newton = scipy.linalg.solve(
486
+ self.hessian, -self.gradient, check_finite=False, assume_a="sym"
487
+ )
488
+ self.gradient_times_newton = self.gradient @ self.coef_newton
489
+ if self.gradient_times_newton > 0:
490
+ if self.verbose:
491
+ print(
492
+ " The inner solver found a Newton step that is not a "
493
+ "descent direction and resorts to LBFGS steps instead."
494
+ )
495
+ self.use_fallback_lbfgs_solve = True
496
+ return
497
+ except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e:
498
+ warnings.warn(
499
+ f"The inner solver of {self.__class__.__name__} stumbled upon a "
500
+ "singular or very ill-conditioned Hessian matrix at iteration "
501
+ f"#{self.iteration}. It will now resort to lbfgs instead.\n"
502
+ "Further options are to use another solver or to avoid such situation "
503
+ "in the first place. Possible remedies are removing collinear features"
504
+ " of X or increasing the penalization strengths.\n"
505
+ "The original Linear Algebra message was:\n"
506
+ + str(e),
507
+ scipy.linalg.LinAlgWarning,
508
+ )
509
+ # Possible causes:
510
+ # 1. hess_pointwise is negative. But this is already taken care in
511
+ # LinearModelLoss.gradient_hessian.
512
+ # 2. X is singular or ill-conditioned
513
+ # This might be the most probable cause.
514
+ #
515
+ # There are many possible ways to deal with this situation. Most of them
516
+ # add, explicitly or implicitly, a matrix to the hessian to make it
517
+ # positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed.
518
+ # Instead, we resort to lbfgs.
519
+ if self.verbose:
520
+ print(
521
+ " The inner solver stumbled upon an singular or ill-conditioned "
522
+ "Hessian matrix and resorts to LBFGS instead."
523
+ )
524
+ self.use_fallback_lbfgs_solve = True
525
+ return
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py ADDED
@@ -0,0 +1,904 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generalized Linear Models with Exponential Dispersion Family
3
+ """
4
+
5
+ # Author: Christian Lorentzen <[email protected]>
6
+ # some parts and tricks stolen from other sklearn files.
7
+ # License: BSD 3 clause
8
+
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+ import scipy.optimize
13
+
14
+ from ..._loss.loss import (
15
+ HalfGammaLoss,
16
+ HalfPoissonLoss,
17
+ HalfSquaredError,
18
+ HalfTweedieLoss,
19
+ HalfTweedieLossIdentity,
20
+ )
21
+ from ...base import BaseEstimator, RegressorMixin, _fit_context
22
+ from ...utils import check_array
23
+ from ...utils._openmp_helpers import _openmp_effective_n_threads
24
+ from ...utils._param_validation import Hidden, Interval, StrOptions
25
+ from ...utils.optimize import _check_optimize_result
26
+ from ...utils.validation import _check_sample_weight, check_is_fitted
27
+ from .._linear_loss import LinearModelLoss
28
+ from ._newton_solver import NewtonCholeskySolver, NewtonSolver
29
+
30
+
31
+ class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator):
32
+ """Regression via a penalized Generalized Linear Model (GLM).
33
+
34
+ GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at fitting and
35
+ predicting the mean of the target y as y_pred=h(X*w) with coefficients w.
36
+ Therefore, the fit minimizes the following objective function with L2 priors as
37
+ regularizer::
38
+
39
+ 1/(2*sum(s_i)) * sum(s_i * deviance(y_i, h(x_i*w)) + 1/2 * alpha * ||w||_2^2
40
+
41
+ with inverse link function h, s=sample_weight and per observation (unit) deviance
42
+ deviance(y_i, h(x_i*w)). Note that for an EDM, 1/2 * deviance is the negative
43
+ log-likelihood up to a constant (in w) term.
44
+ The parameter ``alpha`` corresponds to the lambda parameter in glmnet.
45
+
46
+ Instead of implementing the EDM family and a link function separately, we directly
47
+ use the loss functions `from sklearn._loss` which have the link functions included
48
+ in them for performance reasons. We pick the loss functions that implement
49
+ (1/2 times) EDM deviances.
50
+
51
+ Read more in the :ref:`User Guide <Generalized_linear_models>`.
52
+
53
+ .. versionadded:: 0.23
54
+
55
+ Parameters
56
+ ----------
57
+ alpha : float, default=1
58
+ Constant that multiplies the penalty term and thus determines the
59
+ regularization strength. ``alpha = 0`` is equivalent to unpenalized
60
+ GLMs. In this case, the design matrix `X` must have full column rank
61
+ (no collinearities).
62
+ Values must be in the range `[0.0, inf)`.
63
+
64
+ fit_intercept : bool, default=True
65
+ Specifies if a constant (a.k.a. bias or intercept) should be
66
+ added to the linear predictor (X @ coef + intercept).
67
+
68
+ solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
69
+ Algorithm to use in the optimization problem:
70
+
71
+ 'lbfgs'
72
+ Calls scipy's L-BFGS-B optimizer.
73
+
74
+ 'newton-cholesky'
75
+ Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
76
+ iterated reweighted least squares) with an inner Cholesky based solver.
77
+ This solver is a good choice for `n_samples` >> `n_features`, especially
78
+ with one-hot encoded categorical features with rare categories. Be aware
79
+ that the memory usage of this solver has a quadratic dependency on
80
+ `n_features` because it explicitly computes the Hessian matrix.
81
+
82
+ .. versionadded:: 1.2
83
+
84
+ max_iter : int, default=100
85
+ The maximal number of iterations for the solver.
86
+ Values must be in the range `[1, inf)`.
87
+
88
+ tol : float, default=1e-4
89
+ Stopping criterion. For the lbfgs solver,
90
+ the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
91
+ where ``g_j`` is the j-th component of the gradient (derivative) of
92
+ the objective function.
93
+ Values must be in the range `(0.0, inf)`.
94
+
95
+ warm_start : bool, default=False
96
+ If set to ``True``, reuse the solution of the previous call to ``fit``
97
+ as initialization for ``coef_`` and ``intercept_``.
98
+
99
+ verbose : int, default=0
100
+ For the lbfgs solver set verbose to any positive number for verbosity.
101
+ Values must be in the range `[0, inf)`.
102
+
103
+ Attributes
104
+ ----------
105
+ coef_ : array of shape (n_features,)
106
+ Estimated coefficients for the linear predictor (`X @ coef_ +
107
+ intercept_`) in the GLM.
108
+
109
+ intercept_ : float
110
+ Intercept (a.k.a. bias) added to linear predictor.
111
+
112
+ n_iter_ : int
113
+ Actual number of iterations used in the solver.
114
+
115
+ _base_loss : BaseLoss, default=HalfSquaredError()
116
+ This is set during fit via `self._get_loss()`.
117
+ A `_base_loss` contains a specific loss function as well as the link
118
+ function. The loss to be minimized specifies the distributional assumption of
119
+ the GLM, i.e. the distribution from the EDM. Here are some examples:
120
+
121
+ ======================= ======== ==========================
122
+ _base_loss Link Target Domain
123
+ ======================= ======== ==========================
124
+ HalfSquaredError identity y any real number
125
+ HalfPoissonLoss log 0 <= y
126
+ HalfGammaLoss log 0 < y
127
+ HalfTweedieLoss log dependent on tweedie power
128
+ HalfTweedieLossIdentity identity dependent on tweedie power
129
+ ======================= ======== ==========================
130
+
131
+ The link function of the GLM, i.e. mapping from linear predictor
132
+ `X @ coeff + intercept` to prediction `y_pred`. For instance, with a log link,
133
+ we have `y_pred = exp(X @ coeff + intercept)`.
134
+ """
135
+
136
+ # We allow for NewtonSolver classes for the "solver" parameter but do not
137
+ # make them public in the docstrings. This facilitates testing and
138
+ # benchmarking.
139
+ _parameter_constraints: dict = {
140
+ "alpha": [Interval(Real, 0.0, None, closed="left")],
141
+ "fit_intercept": ["boolean"],
142
+ "solver": [
143
+ StrOptions({"lbfgs", "newton-cholesky"}),
144
+ Hidden(type),
145
+ ],
146
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
147
+ "tol": [Interval(Real, 0.0, None, closed="neither")],
148
+ "warm_start": ["boolean"],
149
+ "verbose": ["verbose"],
150
+ }
151
+
152
+ def __init__(
153
+ self,
154
+ *,
155
+ alpha=1.0,
156
+ fit_intercept=True,
157
+ solver="lbfgs",
158
+ max_iter=100,
159
+ tol=1e-4,
160
+ warm_start=False,
161
+ verbose=0,
162
+ ):
163
+ self.alpha = alpha
164
+ self.fit_intercept = fit_intercept
165
+ self.solver = solver
166
+ self.max_iter = max_iter
167
+ self.tol = tol
168
+ self.warm_start = warm_start
169
+ self.verbose = verbose
170
+
171
+ @_fit_context(prefer_skip_nested_validation=True)
172
+ def fit(self, X, y, sample_weight=None):
173
+ """Fit a Generalized Linear Model.
174
+
175
+ Parameters
176
+ ----------
177
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
178
+ Training data.
179
+
180
+ y : array-like of shape (n_samples,)
181
+ Target values.
182
+
183
+ sample_weight : array-like of shape (n_samples,), default=None
184
+ Sample weights.
185
+
186
+ Returns
187
+ -------
188
+ self : object
189
+ Fitted model.
190
+ """
191
+ X, y = self._validate_data(
192
+ X,
193
+ y,
194
+ accept_sparse=["csc", "csr"],
195
+ dtype=[np.float64, np.float32],
196
+ y_numeric=True,
197
+ multi_output=False,
198
+ )
199
+
200
+ # required by losses
201
+ if self.solver == "lbfgs":
202
+ # lbfgs will force coef and therefore raw_prediction to be float64. The
203
+ # base_loss needs y, X @ coef and sample_weight all of same dtype
204
+ # (and contiguous).
205
+ loss_dtype = np.float64
206
+ else:
207
+ loss_dtype = min(max(y.dtype, X.dtype), np.float64)
208
+ y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False)
209
+
210
+ if sample_weight is not None:
211
+ # Note that _check_sample_weight calls check_array(order="C") required by
212
+ # losses.
213
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype)
214
+
215
+ n_samples, n_features = X.shape
216
+ self._base_loss = self._get_loss()
217
+
218
+ linear_loss = LinearModelLoss(
219
+ base_loss=self._base_loss,
220
+ fit_intercept=self.fit_intercept,
221
+ )
222
+
223
+ if not linear_loss.base_loss.in_y_true_range(y):
224
+ raise ValueError(
225
+ "Some value(s) of y are out of the valid range of the loss"
226
+ f" {self._base_loss.__class__.__name__!r}."
227
+ )
228
+
229
+ # TODO: if alpha=0 check that X is not rank deficient
230
+
231
+ # NOTE: Rescaling of sample_weight:
232
+ # We want to minimize
233
+ # obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance)
234
+ # + 1/2 * alpha * L2,
235
+ # with
236
+ # deviance = 2 * loss.
237
+ # The objective is invariant to multiplying sample_weight by a constant. We
238
+ # could choose this constant such that sum(sample_weight) = 1 in order to end
239
+ # up with
240
+ # obj = sum(sample_weight * loss) + 1/2 * alpha * L2.
241
+ # But LinearModelLoss.loss() already computes
242
+ # average(loss, weights=sample_weight)
243
+ # Thus, without rescaling, we have
244
+ # obj = LinearModelLoss.loss(...)
245
+
246
+ if self.warm_start and hasattr(self, "coef_"):
247
+ if self.fit_intercept:
248
+ # LinearModelLoss needs intercept at the end of coefficient array.
249
+ coef = np.concatenate((self.coef_, np.array([self.intercept_])))
250
+ else:
251
+ coef = self.coef_
252
+ coef = coef.astype(loss_dtype, copy=False)
253
+ else:
254
+ coef = linear_loss.init_zero_coef(X, dtype=loss_dtype)
255
+ if self.fit_intercept:
256
+ coef[-1] = linear_loss.base_loss.link.link(
257
+ np.average(y, weights=sample_weight)
258
+ )
259
+
260
+ l2_reg_strength = self.alpha
261
+ n_threads = _openmp_effective_n_threads()
262
+
263
+ # Algorithms for optimization:
264
+ # Note again that our losses implement 1/2 * deviance.
265
+ if self.solver == "lbfgs":
266
+ func = linear_loss.loss_gradient
267
+
268
+ opt_res = scipy.optimize.minimize(
269
+ func,
270
+ coef,
271
+ method="L-BFGS-B",
272
+ jac=True,
273
+ options={
274
+ "maxiter": self.max_iter,
275
+ "maxls": 50, # default is 20
276
+ "iprint": self.verbose - 1,
277
+ "gtol": self.tol,
278
+ # The constant 64 was found empirically to pass the test suite.
279
+ # The point is that ftol is very small, but a bit larger than
280
+ # machine precision for float64, which is the dtype used by lbfgs.
281
+ "ftol": 64 * np.finfo(float).eps,
282
+ },
283
+ args=(X, y, sample_weight, l2_reg_strength, n_threads),
284
+ )
285
+ self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
286
+ coef = opt_res.x
287
+ elif self.solver == "newton-cholesky":
288
+ sol = NewtonCholeskySolver(
289
+ coef=coef,
290
+ linear_loss=linear_loss,
291
+ l2_reg_strength=l2_reg_strength,
292
+ tol=self.tol,
293
+ max_iter=self.max_iter,
294
+ n_threads=n_threads,
295
+ verbose=self.verbose,
296
+ )
297
+ coef = sol.solve(X, y, sample_weight)
298
+ self.n_iter_ = sol.iteration
299
+ elif issubclass(self.solver, NewtonSolver):
300
+ sol = self.solver(
301
+ coef=coef,
302
+ linear_loss=linear_loss,
303
+ l2_reg_strength=l2_reg_strength,
304
+ tol=self.tol,
305
+ max_iter=self.max_iter,
306
+ n_threads=n_threads,
307
+ )
308
+ coef = sol.solve(X, y, sample_weight)
309
+ self.n_iter_ = sol.iteration
310
+ else:
311
+ raise ValueError(f"Invalid solver={self.solver}.")
312
+
313
+ if self.fit_intercept:
314
+ self.intercept_ = coef[-1]
315
+ self.coef_ = coef[:-1]
316
+ else:
317
+ # set intercept to zero as the other linear models do
318
+ self.intercept_ = 0.0
319
+ self.coef_ = coef
320
+
321
+ return self
322
+
323
+ def _linear_predictor(self, X):
324
+ """Compute the linear_predictor = `X @ coef_ + intercept_`.
325
+
326
+ Note that we often use the term raw_prediction instead of linear predictor.
327
+
328
+ Parameters
329
+ ----------
330
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
331
+ Samples.
332
+
333
+ Returns
334
+ -------
335
+ y_pred : array of shape (n_samples,)
336
+ Returns predicted values of linear predictor.
337
+ """
338
+ check_is_fitted(self)
339
+ X = self._validate_data(
340
+ X,
341
+ accept_sparse=["csr", "csc", "coo"],
342
+ dtype=[np.float64, np.float32],
343
+ ensure_2d=True,
344
+ allow_nd=False,
345
+ reset=False,
346
+ )
347
+ return X @ self.coef_ + self.intercept_
348
+
349
+ def predict(self, X):
350
+ """Predict using GLM with feature matrix X.
351
+
352
+ Parameters
353
+ ----------
354
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
355
+ Samples.
356
+
357
+ Returns
358
+ -------
359
+ y_pred : array of shape (n_samples,)
360
+ Returns predicted values.
361
+ """
362
+ # check_array is done in _linear_predictor
363
+ raw_prediction = self._linear_predictor(X)
364
+ y_pred = self._base_loss.link.inverse(raw_prediction)
365
+ return y_pred
366
+
367
+ def score(self, X, y, sample_weight=None):
368
+ """Compute D^2, the percentage of deviance explained.
369
+
370
+ D^2 is a generalization of the coefficient of determination R^2.
371
+ R^2 uses squared error and D^2 uses the deviance of this GLM, see the
372
+ :ref:`User Guide <regression_metrics>`.
373
+
374
+ D^2 is defined as
375
+ :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
376
+ :math:`D_{null}` is the null deviance, i.e. the deviance of a model
377
+ with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
378
+ The mean :math:`\\bar{y}` is averaged by sample_weight.
379
+ Best possible score is 1.0 and it can be negative (because the model
380
+ can be arbitrarily worse).
381
+
382
+ Parameters
383
+ ----------
384
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
385
+ Test samples.
386
+
387
+ y : array-like of shape (n_samples,)
388
+ True values of target.
389
+
390
+ sample_weight : array-like of shape (n_samples,), default=None
391
+ Sample weights.
392
+
393
+ Returns
394
+ -------
395
+ score : float
396
+ D^2 of self.predict(X) w.r.t. y.
397
+ """
398
+ # TODO: Adapt link to User Guide in the docstring, once
399
+ # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged.
400
+ #
401
+ # Note, default score defined in RegressorMixin is R^2 score.
402
+ # TODO: make D^2 a score function in module metrics (and thereby get
403
+ # input validation and so on)
404
+ raw_prediction = self._linear_predictor(X) # validates X
405
+ # required by losses
406
+ y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False)
407
+
408
+ if sample_weight is not None:
409
+ # Note that _check_sample_weight calls check_array(order="C") required by
410
+ # losses.
411
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
412
+
413
+ base_loss = self._base_loss
414
+
415
+ if not base_loss.in_y_true_range(y):
416
+ raise ValueError(
417
+ "Some value(s) of y are out of the valid range of the loss"
418
+ f" {base_loss.__name__}."
419
+ )
420
+
421
+ constant = np.average(
422
+ base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None),
423
+ weights=sample_weight,
424
+ )
425
+
426
+ # Missing factor of 2 in deviance cancels out.
427
+ deviance = base_loss(
428
+ y_true=y,
429
+ raw_prediction=raw_prediction,
430
+ sample_weight=sample_weight,
431
+ n_threads=1,
432
+ )
433
+ y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
434
+ deviance_null = base_loss(
435
+ y_true=y,
436
+ raw_prediction=np.tile(y_mean, y.shape[0]),
437
+ sample_weight=sample_weight,
438
+ n_threads=1,
439
+ )
440
+ return 1 - (deviance + constant) / (deviance_null + constant)
441
+
442
+ def _more_tags(self):
443
+ try:
444
+ # Create instance of BaseLoss if fit wasn't called yet. This is necessary as
445
+ # TweedieRegressor might set the used loss during fit different from
446
+ # self._base_loss.
447
+ base_loss = self._get_loss()
448
+ return {"requires_positive_y": not base_loss.in_y_true_range(-1.0)}
449
+ except (ValueError, AttributeError, TypeError):
450
+ # This happens when the link or power parameter of TweedieRegressor is
451
+ # invalid. We fallback on the default tags in that case.
452
+ return {}
453
+
454
+ def _get_loss(self):
455
+ """This is only necessary because of the link and power arguments of the
456
+ TweedieRegressor.
457
+
458
+ Note that we do not need to pass sample_weight to the loss class as this is
459
+ only needed to set loss.constant_hessian on which GLMs do not rely.
460
+ """
461
+ return HalfSquaredError()
462
+
463
+
464
+ class PoissonRegressor(_GeneralizedLinearRegressor):
465
+ """Generalized Linear Model with a Poisson distribution.
466
+
467
+ This regressor uses the 'log' link function.
468
+
469
+ Read more in the :ref:`User Guide <Generalized_linear_models>`.
470
+
471
+ .. versionadded:: 0.23
472
+
473
+ Parameters
474
+ ----------
475
+ alpha : float, default=1
476
+ Constant that multiplies the L2 penalty term and determines the
477
+ regularization strength. ``alpha = 0`` is equivalent to unpenalized
478
+ GLMs. In this case, the design matrix `X` must have full column rank
479
+ (no collinearities).
480
+ Values of `alpha` must be in the range `[0.0, inf)`.
481
+
482
+ fit_intercept : bool, default=True
483
+ Specifies if a constant (a.k.a. bias or intercept) should be
484
+ added to the linear predictor (`X @ coef + intercept`).
485
+
486
+ solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
487
+ Algorithm to use in the optimization problem:
488
+
489
+ 'lbfgs'
490
+ Calls scipy's L-BFGS-B optimizer.
491
+
492
+ 'newton-cholesky'
493
+ Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
494
+ iterated reweighted least squares) with an inner Cholesky based solver.
495
+ This solver is a good choice for `n_samples` >> `n_features`, especially
496
+ with one-hot encoded categorical features with rare categories. Be aware
497
+ that the memory usage of this solver has a quadratic dependency on
498
+ `n_features` because it explicitly computes the Hessian matrix.
499
+
500
+ .. versionadded:: 1.2
501
+
502
+ max_iter : int, default=100
503
+ The maximal number of iterations for the solver.
504
+ Values must be in the range `[1, inf)`.
505
+
506
+ tol : float, default=1e-4
507
+ Stopping criterion. For the lbfgs solver,
508
+ the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
509
+ where ``g_j`` is the j-th component of the gradient (derivative) of
510
+ the objective function.
511
+ Values must be in the range `(0.0, inf)`.
512
+
513
+ warm_start : bool, default=False
514
+ If set to ``True``, reuse the solution of the previous call to ``fit``
515
+ as initialization for ``coef_`` and ``intercept_`` .
516
+
517
+ verbose : int, default=0
518
+ For the lbfgs solver set verbose to any positive number for verbosity.
519
+ Values must be in the range `[0, inf)`.
520
+
521
+ Attributes
522
+ ----------
523
+ coef_ : array of shape (n_features,)
524
+ Estimated coefficients for the linear predictor (`X @ coef_ +
525
+ intercept_`) in the GLM.
526
+
527
+ intercept_ : float
528
+ Intercept (a.k.a. bias) added to linear predictor.
529
+
530
+ n_features_in_ : int
531
+ Number of features seen during :term:`fit`.
532
+
533
+ .. versionadded:: 0.24
534
+
535
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
536
+ Names of features seen during :term:`fit`. Defined only when `X`
537
+ has feature names that are all strings.
538
+
539
+ .. versionadded:: 1.0
540
+
541
+ n_iter_ : int
542
+ Actual number of iterations used in the solver.
543
+
544
+ See Also
545
+ --------
546
+ TweedieRegressor : Generalized Linear Model with a Tweedie distribution.
547
+
548
+ Examples
549
+ --------
550
+ >>> from sklearn import linear_model
551
+ >>> clf = linear_model.PoissonRegressor()
552
+ >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
553
+ >>> y = [12, 17, 22, 21]
554
+ >>> clf.fit(X, y)
555
+ PoissonRegressor()
556
+ >>> clf.score(X, y)
557
+ 0.990...
558
+ >>> clf.coef_
559
+ array([0.121..., 0.158...])
560
+ >>> clf.intercept_
561
+ 2.088...
562
+ >>> clf.predict([[1, 1], [3, 4]])
563
+ array([10.676..., 21.875...])
564
+ """
565
+
566
+ _parameter_constraints: dict = {
567
+ **_GeneralizedLinearRegressor._parameter_constraints
568
+ }
569
+
570
+ def __init__(
571
+ self,
572
+ *,
573
+ alpha=1.0,
574
+ fit_intercept=True,
575
+ solver="lbfgs",
576
+ max_iter=100,
577
+ tol=1e-4,
578
+ warm_start=False,
579
+ verbose=0,
580
+ ):
581
+ super().__init__(
582
+ alpha=alpha,
583
+ fit_intercept=fit_intercept,
584
+ solver=solver,
585
+ max_iter=max_iter,
586
+ tol=tol,
587
+ warm_start=warm_start,
588
+ verbose=verbose,
589
+ )
590
+
591
+ def _get_loss(self):
592
+ return HalfPoissonLoss()
593
+
594
+
595
+ class GammaRegressor(_GeneralizedLinearRegressor):
596
+ """Generalized Linear Model with a Gamma distribution.
597
+
598
+ This regressor uses the 'log' link function.
599
+
600
+ Read more in the :ref:`User Guide <Generalized_linear_models>`.
601
+
602
+ .. versionadded:: 0.23
603
+
604
+ Parameters
605
+ ----------
606
+ alpha : float, default=1
607
+ Constant that multiplies the L2 penalty term and determines the
608
+ regularization strength. ``alpha = 0`` is equivalent to unpenalized
609
+ GLMs. In this case, the design matrix `X` must have full column rank
610
+ (no collinearities).
611
+ Values of `alpha` must be in the range `[0.0, inf)`.
612
+
613
+ fit_intercept : bool, default=True
614
+ Specifies if a constant (a.k.a. bias or intercept) should be
615
+ added to the linear predictor `X @ coef_ + intercept_`.
616
+
617
+ solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
618
+ Algorithm to use in the optimization problem:
619
+
620
+ 'lbfgs'
621
+ Calls scipy's L-BFGS-B optimizer.
622
+
623
+ 'newton-cholesky'
624
+ Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
625
+ iterated reweighted least squares) with an inner Cholesky based solver.
626
+ This solver is a good choice for `n_samples` >> `n_features`, especially
627
+ with one-hot encoded categorical features with rare categories. Be aware
628
+ that the memory usage of this solver has a quadratic dependency on
629
+ `n_features` because it explicitly computes the Hessian matrix.
630
+
631
+ .. versionadded:: 1.2
632
+
633
+ max_iter : int, default=100
634
+ The maximal number of iterations for the solver.
635
+ Values must be in the range `[1, inf)`.
636
+
637
+ tol : float, default=1e-4
638
+ Stopping criterion. For the lbfgs solver,
639
+ the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
640
+ where ``g_j`` is the j-th component of the gradient (derivative) of
641
+ the objective function.
642
+ Values must be in the range `(0.0, inf)`.
643
+
644
+ warm_start : bool, default=False
645
+ If set to ``True``, reuse the solution of the previous call to ``fit``
646
+ as initialization for `coef_` and `intercept_`.
647
+
648
+ verbose : int, default=0
649
+ For the lbfgs solver set verbose to any positive number for verbosity.
650
+ Values must be in the range `[0, inf)`.
651
+
652
+ Attributes
653
+ ----------
654
+ coef_ : array of shape (n_features,)
655
+ Estimated coefficients for the linear predictor (`X @ coef_ +
656
+ intercept_`) in the GLM.
657
+
658
+ intercept_ : float
659
+ Intercept (a.k.a. bias) added to linear predictor.
660
+
661
+ n_features_in_ : int
662
+ Number of features seen during :term:`fit`.
663
+
664
+ .. versionadded:: 0.24
665
+
666
+ n_iter_ : int
667
+ Actual number of iterations used in the solver.
668
+
669
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
670
+ Names of features seen during :term:`fit`. Defined only when `X`
671
+ has feature names that are all strings.
672
+
673
+ .. versionadded:: 1.0
674
+
675
+ See Also
676
+ --------
677
+ PoissonRegressor : Generalized Linear Model with a Poisson distribution.
678
+ TweedieRegressor : Generalized Linear Model with a Tweedie distribution.
679
+
680
+ Examples
681
+ --------
682
+ >>> from sklearn import linear_model
683
+ >>> clf = linear_model.GammaRegressor()
684
+ >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
685
+ >>> y = [19, 26, 33, 30]
686
+ >>> clf.fit(X, y)
687
+ GammaRegressor()
688
+ >>> clf.score(X, y)
689
+ 0.773...
690
+ >>> clf.coef_
691
+ array([0.072..., 0.066...])
692
+ >>> clf.intercept_
693
+ 2.896...
694
+ >>> clf.predict([[1, 0], [2, 8]])
695
+ array([19.483..., 35.795...])
696
+ """
697
+
698
+ _parameter_constraints: dict = {
699
+ **_GeneralizedLinearRegressor._parameter_constraints
700
+ }
701
+
702
+ def __init__(
703
+ self,
704
+ *,
705
+ alpha=1.0,
706
+ fit_intercept=True,
707
+ solver="lbfgs",
708
+ max_iter=100,
709
+ tol=1e-4,
710
+ warm_start=False,
711
+ verbose=0,
712
+ ):
713
+ super().__init__(
714
+ alpha=alpha,
715
+ fit_intercept=fit_intercept,
716
+ solver=solver,
717
+ max_iter=max_iter,
718
+ tol=tol,
719
+ warm_start=warm_start,
720
+ verbose=verbose,
721
+ )
722
+
723
+ def _get_loss(self):
724
+ return HalfGammaLoss()
725
+
726
+
727
+ class TweedieRegressor(_GeneralizedLinearRegressor):
728
+ """Generalized Linear Model with a Tweedie distribution.
729
+
730
+ This estimator can be used to model different GLMs depending on the
731
+ ``power`` parameter, which determines the underlying distribution.
732
+
733
+ Read more in the :ref:`User Guide <Generalized_linear_models>`.
734
+
735
+ .. versionadded:: 0.23
736
+
737
+ Parameters
738
+ ----------
739
+ power : float, default=0
740
+ The power determines the underlying target distribution according
741
+ to the following table:
742
+
743
+ +-------+------------------------+
744
+ | Power | Distribution |
745
+ +=======+========================+
746
+ | 0 | Normal |
747
+ +-------+------------------------+
748
+ | 1 | Poisson |
749
+ +-------+------------------------+
750
+ | (1,2) | Compound Poisson Gamma |
751
+ +-------+------------------------+
752
+ | 2 | Gamma |
753
+ +-------+------------------------+
754
+ | 3 | Inverse Gaussian |
755
+ +-------+------------------------+
756
+
757
+ For ``0 < power < 1``, no distribution exists.
758
+
759
+ alpha : float, default=1
760
+ Constant that multiplies the L2 penalty term and determines the
761
+ regularization strength. ``alpha = 0`` is equivalent to unpenalized
762
+ GLMs. In this case, the design matrix `X` must have full column rank
763
+ (no collinearities).
764
+ Values of `alpha` must be in the range `[0.0, inf)`.
765
+
766
+ fit_intercept : bool, default=True
767
+ Specifies if a constant (a.k.a. bias or intercept) should be
768
+ added to the linear predictor (`X @ coef + intercept`).
769
+
770
+ link : {'auto', 'identity', 'log'}, default='auto'
771
+ The link function of the GLM, i.e. mapping from linear predictor
772
+ `X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
773
+ the link depending on the chosen `power` parameter as follows:
774
+
775
+ - 'identity' for ``power <= 0``, e.g. for the Normal distribution
776
+ - 'log' for ``power > 0``, e.g. for Poisson, Gamma and Inverse Gaussian
777
+ distributions
778
+
779
+ solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
780
+ Algorithm to use in the optimization problem:
781
+
782
+ 'lbfgs'
783
+ Calls scipy's L-BFGS-B optimizer.
784
+
785
+ 'newton-cholesky'
786
+ Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
787
+ iterated reweighted least squares) with an inner Cholesky based solver.
788
+ This solver is a good choice for `n_samples` >> `n_features`, especially
789
+ with one-hot encoded categorical features with rare categories. Be aware
790
+ that the memory usage of this solver has a quadratic dependency on
791
+ `n_features` because it explicitly computes the Hessian matrix.
792
+
793
+ .. versionadded:: 1.2
794
+
795
+ max_iter : int, default=100
796
+ The maximal number of iterations for the solver.
797
+ Values must be in the range `[1, inf)`.
798
+
799
+ tol : float, default=1e-4
800
+ Stopping criterion. For the lbfgs solver,
801
+ the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
802
+ where ``g_j`` is the j-th component of the gradient (derivative) of
803
+ the objective function.
804
+ Values must be in the range `(0.0, inf)`.
805
+
806
+ warm_start : bool, default=False
807
+ If set to ``True``, reuse the solution of the previous call to ``fit``
808
+ as initialization for ``coef_`` and ``intercept_`` .
809
+
810
+ verbose : int, default=0
811
+ For the lbfgs solver set verbose to any positive number for verbosity.
812
+ Values must be in the range `[0, inf)`.
813
+
814
+ Attributes
815
+ ----------
816
+ coef_ : array of shape (n_features,)
817
+ Estimated coefficients for the linear predictor (`X @ coef_ +
818
+ intercept_`) in the GLM.
819
+
820
+ intercept_ : float
821
+ Intercept (a.k.a. bias) added to linear predictor.
822
+
823
+ n_iter_ : int
824
+ Actual number of iterations used in the solver.
825
+
826
+ n_features_in_ : int
827
+ Number of features seen during :term:`fit`.
828
+
829
+ .. versionadded:: 0.24
830
+
831
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
832
+ Names of features seen during :term:`fit`. Defined only when `X`
833
+ has feature names that are all strings.
834
+
835
+ .. versionadded:: 1.0
836
+
837
+ See Also
838
+ --------
839
+ PoissonRegressor : Generalized Linear Model with a Poisson distribution.
840
+ GammaRegressor : Generalized Linear Model with a Gamma distribution.
841
+
842
+ Examples
843
+ --------
844
+ >>> from sklearn import linear_model
845
+ >>> clf = linear_model.TweedieRegressor()
846
+ >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
847
+ >>> y = [2, 3.5, 5, 5.5]
848
+ >>> clf.fit(X, y)
849
+ TweedieRegressor()
850
+ >>> clf.score(X, y)
851
+ 0.839...
852
+ >>> clf.coef_
853
+ array([0.599..., 0.299...])
854
+ >>> clf.intercept_
855
+ 1.600...
856
+ >>> clf.predict([[1, 1], [3, 4]])
857
+ array([2.500..., 4.599...])
858
+ """
859
+
860
+ _parameter_constraints: dict = {
861
+ **_GeneralizedLinearRegressor._parameter_constraints,
862
+ "power": [Interval(Real, None, None, closed="neither")],
863
+ "link": [StrOptions({"auto", "identity", "log"})],
864
+ }
865
+
866
+ def __init__(
867
+ self,
868
+ *,
869
+ power=0.0,
870
+ alpha=1.0,
871
+ fit_intercept=True,
872
+ link="auto",
873
+ solver="lbfgs",
874
+ max_iter=100,
875
+ tol=1e-4,
876
+ warm_start=False,
877
+ verbose=0,
878
+ ):
879
+ super().__init__(
880
+ alpha=alpha,
881
+ fit_intercept=fit_intercept,
882
+ solver=solver,
883
+ max_iter=max_iter,
884
+ tol=tol,
885
+ warm_start=warm_start,
886
+ verbose=verbose,
887
+ )
888
+ self.link = link
889
+ self.power = power
890
+
891
+ def _get_loss(self):
892
+ if self.link == "auto":
893
+ if self.power <= 0:
894
+ # identity link
895
+ return HalfTweedieLossIdentity(power=self.power)
896
+ else:
897
+ # log link
898
+ return HalfTweedieLoss(power=self.power)
899
+
900
+ if self.link == "log":
901
+ return HalfTweedieLoss(power=self.power)
902
+
903
+ if self.link == "identity":
904
+ return HalfTweedieLossIdentity(power=self.power)
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # License: BSD 3 clause
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (199 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc ADDED
Binary file (23.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py ADDED
@@ -0,0 +1,1112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Christian Lorentzen <[email protected]>
2
+ #
3
+ # License: BSD 3 clause
4
+
5
+ import itertools
6
+ import warnings
7
+ from functools import partial
8
+
9
+ import numpy as np
10
+ import pytest
11
+ import scipy
12
+ from numpy.testing import assert_allclose
13
+ from scipy import linalg
14
+ from scipy.optimize import minimize, root
15
+
16
+ from sklearn._loss import HalfBinomialLoss, HalfPoissonLoss, HalfTweedieLoss
17
+ from sklearn._loss.link import IdentityLink, LogLink
18
+ from sklearn.base import clone
19
+ from sklearn.datasets import make_low_rank_matrix, make_regression
20
+ from sklearn.exceptions import ConvergenceWarning
21
+ from sklearn.linear_model import (
22
+ GammaRegressor,
23
+ PoissonRegressor,
24
+ Ridge,
25
+ TweedieRegressor,
26
+ )
27
+ from sklearn.linear_model._glm import _GeneralizedLinearRegressor
28
+ from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver
29
+ from sklearn.linear_model._linear_loss import LinearModelLoss
30
+ from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance
31
+ from sklearn.model_selection import train_test_split
32
+
33
+ SOLVERS = ["lbfgs", "newton-cholesky"]
34
+
35
+
36
+ class BinomialRegressor(_GeneralizedLinearRegressor):
37
+ def _get_loss(self):
38
+ return HalfBinomialLoss()
39
+
40
+
41
+ def _special_minimize(fun, grad, x, tol_NM, tol):
42
+ # Find good starting point by Nelder-Mead
43
+ res_NM = minimize(
44
+ fun, x, method="Nelder-Mead", options={"xatol": tol_NM, "fatol": tol_NM}
45
+ )
46
+ # Now refine via root finding on the gradient of the function, which is
47
+ # more precise than minimizing the function itself.
48
+ res = root(
49
+ grad,
50
+ res_NM.x,
51
+ method="lm",
52
+ options={"ftol": tol, "xtol": tol, "gtol": tol},
53
+ )
54
+ return res.x
55
+
56
+
57
+ @pytest.fixture(scope="module")
58
+ def regression_data():
59
+ X, y = make_regression(
60
+ n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2
61
+ )
62
+ return X, y
63
+
64
+
65
+ @pytest.fixture(
66
+ params=itertools.product(
67
+ ["long", "wide"],
68
+ [
69
+ BinomialRegressor(),
70
+ PoissonRegressor(),
71
+ GammaRegressor(),
72
+ # TweedieRegressor(power=3.0), # too difficult
73
+ # TweedieRegressor(power=0, link="log"), # too difficult
74
+ TweedieRegressor(power=1.5),
75
+ ],
76
+ ),
77
+ ids=lambda param: f"{param[0]}-{param[1]}",
78
+ )
79
+ def glm_dataset(global_random_seed, request):
80
+ """Dataset with GLM solutions, well conditioned X.
81
+
82
+ This is inspired by ols_ridge_dataset in test_ridge.py.
83
+
84
+ The construction is based on the SVD decomposition of X = U S V'.
85
+
86
+ Parameters
87
+ ----------
88
+ type : {"long", "wide"}
89
+ If "long", then n_samples > n_features.
90
+ If "wide", then n_features > n_samples.
91
+ model : a GLM model
92
+
93
+ For "wide", we return the minimum norm solution:
94
+
95
+ min ||w||_2 subject to w = argmin deviance(X, y, w)
96
+
97
+ Note that the deviance is always minimized if y = inverse_link(X w) is possible to
98
+ achieve, which it is in the wide data case. Therefore, we can construct the
99
+ solution with minimum norm like (wide) OLS:
100
+
101
+ min ||w||_2 subject to link(y) = raw_prediction = X w
102
+
103
+ Returns
104
+ -------
105
+ model : GLM model
106
+ X : ndarray
107
+ Last column of 1, i.e. intercept.
108
+ y : ndarray
109
+ coef_unpenalized : ndarray
110
+ Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in
111
+ case of ambiguity)
112
+ Last coefficient is intercept.
113
+ coef_penalized : ndarray
114
+ GLM solution with alpha=l2_reg_strength=1, i.e.
115
+ min 1/n * sum(loss) + ||w[:-1]||_2^2.
116
+ Last coefficient is intercept.
117
+ l2_reg_strength : float
118
+ Always equal 1.
119
+ """
120
+ data_type, model = request.param
121
+ # Make larger dim more than double as big as the smaller one.
122
+ # This helps when constructing singular matrices like (X, X).
123
+ if data_type == "long":
124
+ n_samples, n_features = 12, 4
125
+ else:
126
+ n_samples, n_features = 4, 12
127
+ k = min(n_samples, n_features)
128
+ rng = np.random.RandomState(global_random_seed)
129
+ X = make_low_rank_matrix(
130
+ n_samples=n_samples,
131
+ n_features=n_features,
132
+ effective_rank=k,
133
+ tail_strength=0.1,
134
+ random_state=rng,
135
+ )
136
+ X[:, -1] = 1 # last columns acts as intercept
137
+ U, s, Vt = linalg.svd(X, full_matrices=False)
138
+ assert np.all(s > 1e-3) # to be sure
139
+ assert np.max(s) / np.min(s) < 100 # condition number of X
140
+
141
+ if data_type == "long":
142
+ coef_unpenalized = rng.uniform(low=1, high=3, size=n_features)
143
+ coef_unpenalized *= rng.choice([-1, 1], size=n_features)
144
+ raw_prediction = X @ coef_unpenalized
145
+ else:
146
+ raw_prediction = rng.uniform(low=-3, high=3, size=n_samples)
147
+ # minimum norm solution min ||w||_2 such that raw_prediction = X w:
148
+ # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction
149
+ coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction
150
+
151
+ linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True)
152
+ sw = np.full(shape=n_samples, fill_value=1 / n_samples)
153
+ y = linear_loss.base_loss.link.inverse(raw_prediction)
154
+
155
+ # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with
156
+ # optimizer. Note that the problem is well conditioned such that we get accurate
157
+ # results.
158
+ l2_reg_strength = 1
159
+ fun = partial(
160
+ linear_loss.loss,
161
+ X=X[:, :-1],
162
+ y=y,
163
+ sample_weight=sw,
164
+ l2_reg_strength=l2_reg_strength,
165
+ )
166
+ grad = partial(
167
+ linear_loss.gradient,
168
+ X=X[:, :-1],
169
+ y=y,
170
+ sample_weight=sw,
171
+ l2_reg_strength=l2_reg_strength,
172
+ )
173
+ coef_penalized_with_intercept = _special_minimize(
174
+ fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14
175
+ )
176
+
177
+ linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False)
178
+ fun = partial(
179
+ linear_loss.loss,
180
+ X=X[:, :-1],
181
+ y=y,
182
+ sample_weight=sw,
183
+ l2_reg_strength=l2_reg_strength,
184
+ )
185
+ grad = partial(
186
+ linear_loss.gradient,
187
+ X=X[:, :-1],
188
+ y=y,
189
+ sample_weight=sw,
190
+ l2_reg_strength=l2_reg_strength,
191
+ )
192
+ coef_penalized_without_intercept = _special_minimize(
193
+ fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14
194
+ )
195
+
196
+ # To be sure
197
+ assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm(
198
+ coef_unpenalized
199
+ )
200
+
201
+ return (
202
+ model,
203
+ X,
204
+ y,
205
+ coef_unpenalized,
206
+ coef_penalized_with_intercept,
207
+ coef_penalized_without_intercept,
208
+ l2_reg_strength,
209
+ )
210
+
211
+
212
+ @pytest.mark.parametrize("solver", SOLVERS)
213
+ @pytest.mark.parametrize("fit_intercept", [False, True])
214
+ def test_glm_regression(solver, fit_intercept, glm_dataset):
215
+ """Test that GLM converges for all solvers to correct solution.
216
+
217
+ We work with a simple constructed data set with known solution.
218
+ """
219
+ model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
220
+ params = dict(
221
+ alpha=alpha,
222
+ fit_intercept=fit_intercept,
223
+ solver=solver,
224
+ tol=1e-12,
225
+ max_iter=1000,
226
+ )
227
+
228
+ model = clone(model).set_params(**params)
229
+ X = X[:, :-1] # remove intercept
230
+ if fit_intercept:
231
+ coef = coef_with_intercept
232
+ intercept = coef[-1]
233
+ coef = coef[:-1]
234
+ else:
235
+ coef = coef_without_intercept
236
+ intercept = 0
237
+
238
+ model.fit(X, y)
239
+
240
+ rtol = 5e-5 if solver == "lbfgs" else 1e-9
241
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
242
+ assert_allclose(model.coef_, coef, rtol=rtol)
243
+
244
+ # Same with sample_weight.
245
+ model = (
246
+ clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0]))
247
+ )
248
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
249
+ assert_allclose(model.coef_, coef, rtol=rtol)
250
+
251
+
252
+ @pytest.mark.parametrize("solver", SOLVERS)
253
+ @pytest.mark.parametrize("fit_intercept", [True, False])
254
+ def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset):
255
+ """Test that GLM converges for all solvers to correct solution on hstacked data.
256
+
257
+ We work with a simple constructed data set with known solution.
258
+ Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
259
+ For long X, [X, X] is still a long but singular matrix.
260
+ """
261
+ model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
262
+ n_samples, n_features = X.shape
263
+ params = dict(
264
+ alpha=alpha / 2,
265
+ fit_intercept=fit_intercept,
266
+ solver=solver,
267
+ tol=1e-12,
268
+ max_iter=1000,
269
+ )
270
+
271
+ model = clone(model).set_params(**params)
272
+ X = X[:, :-1] # remove intercept
273
+ X = 0.5 * np.concatenate((X, X), axis=1)
274
+ assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1)
275
+ if fit_intercept:
276
+ coef = coef_with_intercept
277
+ intercept = coef[-1]
278
+ coef = coef[:-1]
279
+ else:
280
+ coef = coef_without_intercept
281
+ intercept = 0
282
+
283
+ with warnings.catch_warnings():
284
+ # XXX: Investigate if the ConvergenceWarning that can appear in some
285
+ # cases should be considered a bug or not. In the mean time we don't
286
+ # fail when the assertions below pass irrespective of the presence of
287
+ # the warning.
288
+ warnings.simplefilter("ignore", ConvergenceWarning)
289
+ model.fit(X, y)
290
+
291
+ rtol = 2e-4 if solver == "lbfgs" else 5e-9
292
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
293
+ assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol)
294
+
295
+
296
+ @pytest.mark.parametrize("solver", SOLVERS)
297
+ @pytest.mark.parametrize("fit_intercept", [True, False])
298
+ def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
299
+ """Test that GLM converges for all solvers to correct solution on vstacked data.
300
+
301
+ We work with a simple constructed data set with known solution.
302
+ Fit on [X] with alpha is the same as fit on [X], [y]
303
+ [X], [y] with 1 * alpha.
304
+ It is the same alpha as the average loss stays the same.
305
+ For wide X, [X', X'] is a singular matrix.
306
+ """
307
+ model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
308
+ n_samples, n_features = X.shape
309
+ params = dict(
310
+ alpha=alpha,
311
+ fit_intercept=fit_intercept,
312
+ solver=solver,
313
+ tol=1e-12,
314
+ max_iter=1000,
315
+ )
316
+
317
+ model = clone(model).set_params(**params)
318
+ X = X[:, :-1] # remove intercept
319
+ X = np.concatenate((X, X), axis=0)
320
+ assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
321
+ y = np.r_[y, y]
322
+ if fit_intercept:
323
+ coef = coef_with_intercept
324
+ intercept = coef[-1]
325
+ coef = coef[:-1]
326
+ else:
327
+ coef = coef_without_intercept
328
+ intercept = 0
329
+ model.fit(X, y)
330
+
331
+ rtol = 3e-5 if solver == "lbfgs" else 5e-9
332
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
333
+ assert_allclose(model.coef_, coef, rtol=rtol)
334
+
335
+
336
+ @pytest.mark.parametrize("solver", SOLVERS)
337
+ @pytest.mark.parametrize("fit_intercept", [True, False])
338
+ def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
339
+ """Test that unpenalized GLM converges for all solvers to correct solution.
340
+
341
+ We work with a simple constructed data set with known solution.
342
+ Note: This checks the minimum norm solution for wide X, i.e.
343
+ n_samples < n_features:
344
+ min ||w||_2 subject to w = argmin deviance(X, y, w)
345
+ """
346
+ model, X, y, coef, _, _, _ = glm_dataset
347
+ n_samples, n_features = X.shape
348
+ alpha = 0 # unpenalized
349
+ params = dict(
350
+ alpha=alpha,
351
+ fit_intercept=fit_intercept,
352
+ solver=solver,
353
+ tol=1e-12,
354
+ max_iter=1000,
355
+ )
356
+
357
+ model = clone(model).set_params(**params)
358
+ if fit_intercept:
359
+ X = X[:, :-1] # remove intercept
360
+ intercept = coef[-1]
361
+ coef = coef[:-1]
362
+ else:
363
+ intercept = 0
364
+
365
+ with warnings.catch_warnings():
366
+ if solver.startswith("newton") and n_samples < n_features:
367
+ # The newton solvers should warn and automatically fallback to LBFGS
368
+ # in this case. The model should still converge.
369
+ warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
370
+ # XXX: Investigate if the ConvergenceWarning that can appear in some
371
+ # cases should be considered a bug or not. In the mean time we don't
372
+ # fail when the assertions below pass irrespective of the presence of
373
+ # the warning.
374
+ warnings.filterwarnings("ignore", category=ConvergenceWarning)
375
+ model.fit(X, y)
376
+
377
+ # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
378
+ # for the wide/fat case with n_features > n_samples. Most current GLM solvers do
379
+ # NOT return the minimum norm solution with fit_intercept=True.
380
+ if n_samples > n_features:
381
+ rtol = 5e-5 if solver == "lbfgs" else 1e-7
382
+ assert model.intercept_ == pytest.approx(intercept)
383
+ assert_allclose(model.coef_, coef, rtol=rtol)
384
+ else:
385
+ # As it is an underdetermined problem, prediction = y. The following shows that
386
+ # we get a solution, i.e. a (non-unique) minimum of the objective function ...
387
+ rtol = 5e-5
388
+ if solver == "newton-cholesky":
389
+ rtol = 5e-4
390
+ assert_allclose(model.predict(X), y, rtol=rtol)
391
+
392
+ norm_solution = np.linalg.norm(np.r_[intercept, coef])
393
+ norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
394
+ if solver == "newton-cholesky":
395
+ # XXX: This solver shows random behaviour. Sometimes it finds solutions
396
+ # with norm_model <= norm_solution! So we check conditionally.
397
+ if norm_model < (1 + 1e-12) * norm_solution:
398
+ assert model.intercept_ == pytest.approx(intercept)
399
+ assert_allclose(model.coef_, coef, rtol=rtol)
400
+ elif solver == "lbfgs" and fit_intercept:
401
+ # But it is not the minimum norm solution. Otherwise the norms would be
402
+ # equal.
403
+ assert norm_model > (1 + 1e-12) * norm_solution
404
+
405
+ # See https://github.com/scikit-learn/scikit-learn/issues/23670.
406
+ # Note: Even adding a tiny penalty does not give the minimal norm solution.
407
+ # XXX: We could have naively expected LBFGS to find the minimal norm
408
+ # solution by adding a very small penalty. Even that fails for a reason we
409
+ # do not properly understand at this point.
410
+ else:
411
+ # When `fit_intercept=False`, LBFGS naturally converges to the minimum norm
412
+ # solution on this problem.
413
+ # XXX: Do we have any theoretical guarantees why this should be the case?
414
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
415
+ assert_allclose(model.coef_, coef, rtol=rtol)
416
+
417
+
418
+ @pytest.mark.parametrize("solver", SOLVERS)
419
+ @pytest.mark.parametrize("fit_intercept", [True, False])
420
+ def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):
421
+ """Test that unpenalized GLM converges for all solvers to correct solution.
422
+
423
+ We work with a simple constructed data set with known solution.
424
+ GLM fit on [X] is the same as fit on [X, X]/2.
425
+ For long X, [X, X] is a singular matrix and we check against the minimum norm
426
+ solution:
427
+ min ||w||_2 subject to w = argmin deviance(X, y, w)
428
+ """
429
+ model, X, y, coef, _, _, _ = glm_dataset
430
+ n_samples, n_features = X.shape
431
+ alpha = 0 # unpenalized
432
+ params = dict(
433
+ alpha=alpha,
434
+ fit_intercept=fit_intercept,
435
+ solver=solver,
436
+ tol=1e-12,
437
+ max_iter=1000,
438
+ )
439
+
440
+ model = clone(model).set_params(**params)
441
+ if fit_intercept:
442
+ intercept = coef[-1]
443
+ coef = coef[:-1]
444
+ if n_samples > n_features:
445
+ X = X[:, :-1] # remove intercept
446
+ X = 0.5 * np.concatenate((X, X), axis=1)
447
+ else:
448
+ # To know the minimum norm solution, we keep one intercept column and do
449
+ # not divide by 2. Later on, we must take special care.
450
+ X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]]
451
+ else:
452
+ intercept = 0
453
+ X = 0.5 * np.concatenate((X, X), axis=1)
454
+ assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
455
+
456
+ with warnings.catch_warnings():
457
+ if solver.startswith("newton"):
458
+ # The newton solvers should warn and automatically fallback to LBFGS
459
+ # in this case. The model should still converge.
460
+ warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
461
+ # XXX: Investigate if the ConvergenceWarning that can appear in some
462
+ # cases should be considered a bug or not. In the mean time we don't
463
+ # fail when the assertions below pass irrespective of the presence of
464
+ # the warning.
465
+ warnings.filterwarnings("ignore", category=ConvergenceWarning)
466
+ model.fit(X, y)
467
+
468
+ if fit_intercept and n_samples < n_features:
469
+ # Here we take special care.
470
+ model_intercept = 2 * model.intercept_
471
+ model_coef = 2 * model.coef_[:-1] # exclude the other intercept term.
472
+ # For minimum norm solution, we would have
473
+ # assert model.intercept_ == pytest.approx(model.coef_[-1])
474
+ else:
475
+ model_intercept = model.intercept_
476
+ model_coef = model.coef_
477
+
478
+ if n_samples > n_features:
479
+ assert model_intercept == pytest.approx(intercept)
480
+ rtol = 1e-4
481
+ assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol)
482
+ else:
483
+ # As it is an underdetermined problem, prediction = y. The following shows that
484
+ # we get a solution, i.e. a (non-unique) minimum of the objective function ...
485
+ rtol = 1e-6 if solver == "lbfgs" else 5e-6
486
+ assert_allclose(model.predict(X), y, rtol=rtol)
487
+ if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky":
488
+ # Same as in test_glm_regression_unpenalized.
489
+ # But it is not the minimum norm solution. Otherwise the norms would be
490
+ # equal.
491
+ norm_solution = np.linalg.norm(
492
+ 0.5 * np.r_[intercept, intercept, coef, coef]
493
+ )
494
+ norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
495
+ assert norm_model > (1 + 1e-12) * norm_solution
496
+ # For minimum norm solution, we would have
497
+ # assert model.intercept_ == pytest.approx(model.coef_[-1])
498
+ else:
499
+ assert model_intercept == pytest.approx(intercept, rel=5e-6)
500
+ assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4)
501
+
502
+
503
+ @pytest.mark.parametrize("solver", SOLVERS)
504
+ @pytest.mark.parametrize("fit_intercept", [True, False])
505
+ def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset):
506
+ """Test that unpenalized GLM converges for all solvers to correct solution.
507
+
508
+ We work with a simple constructed data set with known solution.
509
+ GLM fit on [X] is the same as fit on [X], [y]
510
+ [X], [y].
511
+ For wide X, [X', X'] is a singular matrix and we check against the minimum norm
512
+ solution:
513
+ min ||w||_2 subject to w = argmin deviance(X, y, w)
514
+ """
515
+ model, X, y, coef, _, _, _ = glm_dataset
516
+ n_samples, n_features = X.shape
517
+ alpha = 0 # unpenalized
518
+ params = dict(
519
+ alpha=alpha,
520
+ fit_intercept=fit_intercept,
521
+ solver=solver,
522
+ tol=1e-12,
523
+ max_iter=1000,
524
+ )
525
+
526
+ model = clone(model).set_params(**params)
527
+ if fit_intercept:
528
+ X = X[:, :-1] # remove intercept
529
+ intercept = coef[-1]
530
+ coef = coef[:-1]
531
+ else:
532
+ intercept = 0
533
+ X = np.concatenate((X, X), axis=0)
534
+ assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
535
+ y = np.r_[y, y]
536
+
537
+ with warnings.catch_warnings():
538
+ if solver.startswith("newton") and n_samples < n_features:
539
+ # The newton solvers should warn and automatically fallback to LBFGS
540
+ # in this case. The model should still converge.
541
+ warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
542
+ # XXX: Investigate if the ConvergenceWarning that can appear in some
543
+ # cases should be considered a bug or not. In the mean time we don't
544
+ # fail when the assertions below pass irrespective of the presence of
545
+ # the warning.
546
+ warnings.filterwarnings("ignore", category=ConvergenceWarning)
547
+ model.fit(X, y)
548
+
549
+ if n_samples > n_features:
550
+ rtol = 5e-5 if solver == "lbfgs" else 1e-6
551
+ assert model.intercept_ == pytest.approx(intercept)
552
+ assert_allclose(model.coef_, coef, rtol=rtol)
553
+ else:
554
+ # As it is an underdetermined problem, prediction = y. The following shows that
555
+ # we get a solution, i.e. a (non-unique) minimum of the objective function ...
556
+ rtol = 1e-6 if solver == "lbfgs" else 5e-6
557
+ assert_allclose(model.predict(X), y, rtol=rtol)
558
+
559
+ norm_solution = np.linalg.norm(np.r_[intercept, coef])
560
+ norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
561
+ if solver == "newton-cholesky":
562
+ # XXX: This solver shows random behaviour. Sometimes it finds solutions
563
+ # with norm_model <= norm_solution! So we check conditionally.
564
+ if not (norm_model > (1 + 1e-12) * norm_solution):
565
+ assert model.intercept_ == pytest.approx(intercept)
566
+ assert_allclose(model.coef_, coef, rtol=1e-4)
567
+ elif solver == "lbfgs" and fit_intercept:
568
+ # Same as in test_glm_regression_unpenalized.
569
+ # But it is not the minimum norm solution. Otherwise the norms would be
570
+ # equal.
571
+ assert norm_model > (1 + 1e-12) * norm_solution
572
+ else:
573
+ rtol = 1e-5 if solver == "newton-cholesky" else 1e-4
574
+ assert model.intercept_ == pytest.approx(intercept, rel=rtol)
575
+ assert_allclose(model.coef_, coef, rtol=rtol)
576
+
577
+
578
+ def test_sample_weights_validation():
579
+ """Test the raised errors in the validation of sample_weight."""
580
+ # scalar value but not positive
581
+ X = [[1]]
582
+ y = [1]
583
+ weights = 0
584
+ glm = _GeneralizedLinearRegressor()
585
+
586
+ # Positive weights are accepted
587
+ glm.fit(X, y, sample_weight=1)
588
+
589
+ # 2d array
590
+ weights = [[0]]
591
+ with pytest.raises(ValueError, match="must be 1D array or scalar"):
592
+ glm.fit(X, y, weights)
593
+
594
+ # 1d but wrong length
595
+ weights = [1, 0]
596
+ msg = r"sample_weight.shape == \(2,\), expected \(1,\)!"
597
+ with pytest.raises(ValueError, match=msg):
598
+ glm.fit(X, y, weights)
599
+
600
+
601
+ @pytest.mark.parametrize(
602
+ "glm",
603
+ [
604
+ TweedieRegressor(power=3),
605
+ PoissonRegressor(),
606
+ GammaRegressor(),
607
+ TweedieRegressor(power=1.5),
608
+ ],
609
+ )
610
+ def test_glm_wrong_y_range(glm):
611
+ y = np.array([-1, 2])
612
+ X = np.array([[1], [1]])
613
+ msg = r"Some value\(s\) of y are out of the valid range of the loss"
614
+ with pytest.raises(ValueError, match=msg):
615
+ glm.fit(X, y)
616
+
617
+
618
+ @pytest.mark.parametrize("fit_intercept", [False, True])
619
+ def test_glm_identity_regression(fit_intercept):
620
+ """Test GLM regression with identity link on a simple dataset."""
621
+ coef = [1.0, 2.0]
622
+ X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
623
+ y = np.dot(X, coef)
624
+ glm = _GeneralizedLinearRegressor(
625
+ alpha=0,
626
+ fit_intercept=fit_intercept,
627
+ tol=1e-12,
628
+ )
629
+ if fit_intercept:
630
+ glm.fit(X[:, 1:], y)
631
+ assert_allclose(glm.coef_, coef[1:], rtol=1e-10)
632
+ assert_allclose(glm.intercept_, coef[0], rtol=1e-10)
633
+ else:
634
+ glm.fit(X, y)
635
+ assert_allclose(glm.coef_, coef, rtol=1e-12)
636
+
637
+
638
+ @pytest.mark.parametrize("fit_intercept", [False, True])
639
+ @pytest.mark.parametrize("alpha", [0.0, 1.0])
640
+ @pytest.mark.parametrize(
641
+ "GLMEstimator", [_GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor]
642
+ )
643
+ def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
644
+ """Test that the impact of sample_weight is consistent"""
645
+ rng = np.random.RandomState(0)
646
+ n_samples, n_features = 10, 5
647
+
648
+ X = rng.rand(n_samples, n_features)
649
+ y = rng.rand(n_samples)
650
+ glm_params = dict(alpha=alpha, fit_intercept=fit_intercept)
651
+
652
+ glm = GLMEstimator(**glm_params).fit(X, y)
653
+ coef = glm.coef_.copy()
654
+
655
+ # sample_weight=np.ones(..) should be equivalent to sample_weight=None
656
+ sample_weight = np.ones(y.shape)
657
+ glm.fit(X, y, sample_weight=sample_weight)
658
+ assert_allclose(glm.coef_, coef, rtol=1e-12)
659
+
660
+ # sample_weight are normalized to 1 so, scaling them has no effect
661
+ sample_weight = 2 * np.ones(y.shape)
662
+ glm.fit(X, y, sample_weight=sample_weight)
663
+ assert_allclose(glm.coef_, coef, rtol=1e-12)
664
+
665
+ # setting one element of sample_weight to 0 is equivalent to removing
666
+ # the corresponding sample
667
+ sample_weight = np.ones(y.shape)
668
+ sample_weight[-1] = 0
669
+ glm.fit(X, y, sample_weight=sample_weight)
670
+ coef1 = glm.coef_.copy()
671
+ glm.fit(X[:-1], y[:-1])
672
+ assert_allclose(glm.coef_, coef1, rtol=1e-12)
673
+
674
+ # check that multiplying sample_weight by 2 is equivalent
675
+ # to repeating corresponding samples twice
676
+ X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
677
+ y2 = np.concatenate([y, y[: n_samples // 2]])
678
+ sample_weight_1 = np.ones(len(y))
679
+ sample_weight_1[: n_samples // 2] = 2
680
+
681
+ glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1)
682
+
683
+ glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None)
684
+ assert_allclose(glm1.coef_, glm2.coef_)
685
+
686
+
687
+ @pytest.mark.parametrize("solver", SOLVERS)
688
+ @pytest.mark.parametrize("fit_intercept", [True, False])
689
+ @pytest.mark.parametrize(
690
+ "estimator",
691
+ [
692
+ PoissonRegressor(),
693
+ GammaRegressor(),
694
+ TweedieRegressor(power=3.0),
695
+ TweedieRegressor(power=0, link="log"),
696
+ TweedieRegressor(power=1.5),
697
+ TweedieRegressor(power=4.5),
698
+ ],
699
+ )
700
+ def test_glm_log_regression(solver, fit_intercept, estimator):
701
+ """Test GLM regression with log link on a simple dataset."""
702
+ coef = [0.2, -0.1]
703
+ X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T
704
+ y = np.exp(np.dot(X, coef))
705
+ glm = clone(estimator).set_params(
706
+ alpha=0,
707
+ fit_intercept=fit_intercept,
708
+ solver=solver,
709
+ tol=1e-8,
710
+ )
711
+ if fit_intercept:
712
+ res = glm.fit(X[:, :-1], y)
713
+ assert_allclose(res.coef_, coef[:-1], rtol=1e-6)
714
+ assert_allclose(res.intercept_, coef[-1], rtol=1e-6)
715
+ else:
716
+ res = glm.fit(X, y)
717
+ assert_allclose(res.coef_, coef, rtol=2e-6)
718
+
719
+
720
+ @pytest.mark.parametrize("solver", SOLVERS)
721
+ @pytest.mark.parametrize("fit_intercept", [True, False])
722
+ def test_warm_start(solver, fit_intercept, global_random_seed):
723
+ n_samples, n_features = 100, 10
724
+ X, y = make_regression(
725
+ n_samples=n_samples,
726
+ n_features=n_features,
727
+ n_informative=n_features - 2,
728
+ bias=fit_intercept * 1.0,
729
+ noise=1.0,
730
+ random_state=global_random_seed,
731
+ )
732
+ y = np.abs(y) # Poisson requires non-negative targets.
733
+ alpha = 1
734
+ params = {
735
+ "solver": solver,
736
+ "fit_intercept": fit_intercept,
737
+ "tol": 1e-10,
738
+ }
739
+
740
+ glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params)
741
+ glm1.fit(X, y)
742
+
743
+ glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params)
744
+ # As we intentionally set max_iter=1 such that the solver should raise a
745
+ # ConvergenceWarning.
746
+ with pytest.warns(ConvergenceWarning):
747
+ glm2.fit(X, y)
748
+
749
+ linear_loss = LinearModelLoss(
750
+ base_loss=glm1._get_loss(),
751
+ fit_intercept=fit_intercept,
752
+ )
753
+ sw = np.full_like(y, fill_value=1 / n_samples)
754
+
755
+ objective_glm1 = linear_loss.loss(
756
+ coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_,
757
+ X=X,
758
+ y=y,
759
+ sample_weight=sw,
760
+ l2_reg_strength=alpha,
761
+ )
762
+ objective_glm2 = linear_loss.loss(
763
+ coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_,
764
+ X=X,
765
+ y=y,
766
+ sample_weight=sw,
767
+ l2_reg_strength=alpha,
768
+ )
769
+ assert objective_glm1 < objective_glm2
770
+
771
+ glm2.set_params(max_iter=1000)
772
+ glm2.fit(X, y)
773
+ # The two models are not exactly identical since the lbfgs solver
774
+ # computes the approximate hessian from previous iterations, which
775
+ # will not be strictly identical in the case of a warm start.
776
+ rtol = 2e-4 if solver == "lbfgs" else 1e-9
777
+ assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol)
778
+ assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5)
779
+
780
+
781
+ @pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)])
782
+ @pytest.mark.parametrize("fit_intercept", [True, False])
783
+ @pytest.mark.parametrize("sample_weight", [None, True])
784
+ def test_normal_ridge_comparison(
785
+ n_samples, n_features, fit_intercept, sample_weight, request
786
+ ):
787
+ """Compare with Ridge regression for Normal distributions."""
788
+ test_size = 10
789
+ X, y = make_regression(
790
+ n_samples=n_samples + test_size,
791
+ n_features=n_features,
792
+ n_informative=n_features - 2,
793
+ noise=0.5,
794
+ random_state=42,
795
+ )
796
+
797
+ if n_samples > n_features:
798
+ ridge_params = {"solver": "svd"}
799
+ else:
800
+ ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7}
801
+
802
+ (
803
+ X_train,
804
+ X_test,
805
+ y_train,
806
+ y_test,
807
+ ) = train_test_split(X, y, test_size=test_size, random_state=0)
808
+
809
+ alpha = 1.0
810
+ if sample_weight is None:
811
+ sw_train = None
812
+ alpha_ridge = alpha * n_samples
813
+ else:
814
+ sw_train = np.random.RandomState(0).rand(len(y_train))
815
+ alpha_ridge = alpha * sw_train.sum()
816
+
817
+ # GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2
818
+ ridge = Ridge(
819
+ alpha=alpha_ridge,
820
+ random_state=42,
821
+ fit_intercept=fit_intercept,
822
+ **ridge_params,
823
+ )
824
+ ridge.fit(X_train, y_train, sample_weight=sw_train)
825
+
826
+ glm = _GeneralizedLinearRegressor(
827
+ alpha=alpha,
828
+ fit_intercept=fit_intercept,
829
+ max_iter=300,
830
+ tol=1e-5,
831
+ )
832
+ glm.fit(X_train, y_train, sample_weight=sw_train)
833
+ assert glm.coef_.shape == (X.shape[1],)
834
+ assert_allclose(glm.coef_, ridge.coef_, atol=5e-5)
835
+ assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5)
836
+ assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4)
837
+ assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4)
838
+
839
+
840
+ @pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"])
841
+ def test_poisson_glmnet(solver):
842
+ """Compare Poisson regression with L2 regularization and LogLink to glmnet"""
843
+ # library("glmnet")
844
+ # options(digits=10)
845
+ # df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2))
846
+ # x <- data.matrix(df[,c("a", "b")])
847
+ # y <- df$y
848
+ # fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson",
849
+ # standardize=F, thresh=1e-10, nlambda=10000)
850
+ # coef(fit, s=1)
851
+ # (Intercept) -0.12889386979
852
+ # a 0.29019207995
853
+ # b 0.03741173122
854
+ X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T
855
+ y = np.array([0, 1, 1, 2])
856
+ glm = PoissonRegressor(
857
+ alpha=1,
858
+ fit_intercept=True,
859
+ tol=1e-7,
860
+ max_iter=300,
861
+ solver=solver,
862
+ )
863
+ glm.fit(X, y)
864
+ assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5)
865
+ assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
866
+
867
+
868
+ def test_convergence_warning(regression_data):
869
+ X, y = regression_data
870
+
871
+ est = _GeneralizedLinearRegressor(max_iter=1, tol=1e-20)
872
+ with pytest.warns(ConvergenceWarning):
873
+ est.fit(X, y)
874
+
875
+
876
+ @pytest.mark.parametrize(
877
+ "name, link_class", [("identity", IdentityLink), ("log", LogLink)]
878
+ )
879
+ def test_tweedie_link_argument(name, link_class):
880
+ """Test GLM link argument set as string."""
881
+ y = np.array([0.1, 0.5]) # in range of all distributions
882
+ X = np.array([[1], [2]])
883
+ glm = TweedieRegressor(power=1, link=name).fit(X, y)
884
+ assert isinstance(glm._base_loss.link, link_class)
885
+
886
+
887
+ @pytest.mark.parametrize(
888
+ "power, expected_link_class",
889
+ [
890
+ (0, IdentityLink), # normal
891
+ (1, LogLink), # poisson
892
+ (2, LogLink), # gamma
893
+ (3, LogLink), # inverse-gaussian
894
+ ],
895
+ )
896
+ def test_tweedie_link_auto(power, expected_link_class):
897
+ """Test that link='auto' delivers the expected link function"""
898
+ y = np.array([0.1, 0.5]) # in range of all distributions
899
+ X = np.array([[1], [2]])
900
+ glm = TweedieRegressor(link="auto", power=power).fit(X, y)
901
+ assert isinstance(glm._base_loss.link, expected_link_class)
902
+
903
+
904
+ @pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3])
905
+ @pytest.mark.parametrize("link", ["log", "identity"])
906
+ def test_tweedie_score(regression_data, power, link):
907
+ """Test that GLM score equals d2_tweedie_score for Tweedie losses."""
908
+ X, y = regression_data
909
+ # make y positive
910
+ y = np.abs(y) + 1.0
911
+ glm = TweedieRegressor(power=power, link=link).fit(X, y)
912
+ assert glm.score(X, y) == pytest.approx(
913
+ d2_tweedie_score(y, glm.predict(X), power=power)
914
+ )
915
+
916
+
917
+ @pytest.mark.parametrize(
918
+ "estimator, value",
919
+ [
920
+ (PoissonRegressor(), True),
921
+ (GammaRegressor(), True),
922
+ (TweedieRegressor(power=1.5), True),
923
+ (TweedieRegressor(power=0), False),
924
+ ],
925
+ )
926
+ def test_tags(estimator, value):
927
+ assert estimator._get_tags()["requires_positive_y"] is value
928
+
929
+
930
+ def test_linalg_warning_with_newton_solver(global_random_seed):
931
+ newton_solver = "newton-cholesky"
932
+ rng = np.random.RandomState(global_random_seed)
933
+ # Use at least 20 samples to reduce the likelihood of getting a degenerate
934
+ # dataset for any global_random_seed.
935
+ X_orig = rng.normal(size=(20, 3))
936
+ y = rng.poisson(
937
+ np.exp(X_orig @ np.ones(X_orig.shape[1])), size=X_orig.shape[0]
938
+ ).astype(np.float64)
939
+
940
+ # Collinear variation of the same input features.
941
+ X_collinear = np.hstack([X_orig] * 10)
942
+
943
+ # Let's consider the deviance of a constant baseline on this problem.
944
+ baseline_pred = np.full_like(y, y.mean())
945
+ constant_model_deviance = mean_poisson_deviance(y, baseline_pred)
946
+ assert constant_model_deviance > 1.0
947
+
948
+ # No warning raised on well-conditioned design, even without regularization.
949
+ tol = 1e-10
950
+ with warnings.catch_warnings():
951
+ warnings.simplefilter("error")
952
+ reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(X_orig, y)
953
+ original_newton_deviance = mean_poisson_deviance(y, reg.predict(X_orig))
954
+
955
+ # On this dataset, we should have enough data points to not make it
956
+ # possible to get a near zero deviance (for the any of the admissible
957
+ # random seeds). This will make it easier to interpret meaning of rtol in
958
+ # the subsequent assertions:
959
+ assert original_newton_deviance > 0.2
960
+
961
+ # We check that the model could successfully fit information in X_orig to
962
+ # improve upon the constant baseline by a large margin (when evaluated on
963
+ # the traing set).
964
+ assert constant_model_deviance - original_newton_deviance > 0.1
965
+
966
+ # LBFGS is robust to a collinear design because its approximation of the
967
+ # Hessian is Symmeric Positive Definite by construction. Let's record its
968
+ # solution
969
+ with warnings.catch_warnings():
970
+ warnings.simplefilter("error")
971
+ reg = PoissonRegressor(solver="lbfgs", alpha=0.0, tol=tol).fit(X_collinear, y)
972
+ collinear_lbfgs_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
973
+
974
+ # The LBFGS solution on the collinear is expected to reach a comparable
975
+ # solution to the Newton solution on the original data.
976
+ rtol = 1e-6
977
+ assert collinear_lbfgs_deviance == pytest.approx(original_newton_deviance, rel=rtol)
978
+
979
+ # Fitting a Newton solver on the collinear version of the training data
980
+ # without regularization should raise an informative warning and fallback
981
+ # to the LBFGS solver.
982
+ msg = (
983
+ "The inner solver of .*Newton.*Solver stumbled upon a singular or very "
984
+ "ill-conditioned Hessian matrix"
985
+ )
986
+ with pytest.warns(scipy.linalg.LinAlgWarning, match=msg):
987
+ reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(
988
+ X_collinear, y
989
+ )
990
+ # As a result we should still automatically converge to a good solution.
991
+ collinear_newton_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
992
+ assert collinear_newton_deviance == pytest.approx(
993
+ original_newton_deviance, rel=rtol
994
+ )
995
+
996
+ # Increasing the regularization slightly should make the problem go away:
997
+ with warnings.catch_warnings():
998
+ warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
999
+ reg = PoissonRegressor(solver=newton_solver, alpha=1e-10).fit(X_collinear, y)
1000
+
1001
+ # The slightly penalized model on the collinear data should be close enough
1002
+ # to the unpenalized model on the original data.
1003
+ penalized_collinear_newton_deviance = mean_poisson_deviance(
1004
+ y, reg.predict(X_collinear)
1005
+ )
1006
+ assert penalized_collinear_newton_deviance == pytest.approx(
1007
+ original_newton_deviance, rel=rtol
1008
+ )
1009
+
1010
+
1011
+ @pytest.mark.parametrize("verbose", [0, 1, 2])
1012
+ def test_newton_solver_verbosity(capsys, verbose):
1013
+ """Test the std output of verbose newton solvers."""
1014
+ y = np.array([1, 2], dtype=float)
1015
+ X = np.array([[1.0, 0], [0, 1]], dtype=float)
1016
+ linear_loss = LinearModelLoss(base_loss=HalfPoissonLoss(), fit_intercept=False)
1017
+ sol = NewtonCholeskySolver(
1018
+ coef=linear_loss.init_zero_coef(X),
1019
+ linear_loss=linear_loss,
1020
+ l2_reg_strength=0,
1021
+ verbose=verbose,
1022
+ )
1023
+ sol.solve(X, y, None) # returns array([0., 0.69314758])
1024
+ captured = capsys.readouterr()
1025
+
1026
+ if verbose == 0:
1027
+ assert captured.out == ""
1028
+ else:
1029
+ msg = [
1030
+ "Newton iter=1",
1031
+ "Check Convergence",
1032
+ "1. max |gradient|",
1033
+ "2. Newton decrement",
1034
+ "Solver did converge at loss = ",
1035
+ ]
1036
+ for m in msg:
1037
+ assert m in captured.out
1038
+
1039
+ if verbose >= 2:
1040
+ msg = ["Backtracking Line Search", "line search iteration="]
1041
+ for m in msg:
1042
+ assert m in captured.out
1043
+
1044
+ # Set the Newton solver to a state with a completely wrong Newton step.
1045
+ sol = NewtonCholeskySolver(
1046
+ coef=linear_loss.init_zero_coef(X),
1047
+ linear_loss=linear_loss,
1048
+ l2_reg_strength=0,
1049
+ verbose=verbose,
1050
+ )
1051
+ sol.setup(X=X, y=y, sample_weight=None)
1052
+ sol.iteration = 1
1053
+ sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
1054
+ sol.coef_newton = np.array([1.0, 0])
1055
+ sol.gradient_times_newton = sol.gradient @ sol.coef_newton
1056
+ with warnings.catch_warnings():
1057
+ warnings.simplefilter("ignore", ConvergenceWarning)
1058
+ sol.line_search(X=X, y=y, sample_weight=None)
1059
+ captured = capsys.readouterr()
1060
+ if verbose >= 1:
1061
+ assert (
1062
+ "Line search did not converge and resorts to lbfgs instead." in captured.out
1063
+ )
1064
+
1065
+ # Set the Newton solver to a state with bad Newton step such that the loss
1066
+ # improvement in line search is tiny.
1067
+ sol = NewtonCholeskySolver(
1068
+ coef=np.array([1e-12, 0.69314758]),
1069
+ linear_loss=linear_loss,
1070
+ l2_reg_strength=0,
1071
+ verbose=verbose,
1072
+ )
1073
+ sol.setup(X=X, y=y, sample_weight=None)
1074
+ sol.iteration = 1
1075
+ sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
1076
+ sol.coef_newton = np.array([1e-6, 0])
1077
+ sol.gradient_times_newton = sol.gradient @ sol.coef_newton
1078
+ with warnings.catch_warnings():
1079
+ warnings.simplefilter("ignore", ConvergenceWarning)
1080
+ sol.line_search(X=X, y=y, sample_weight=None)
1081
+ captured = capsys.readouterr()
1082
+ if verbose >= 2:
1083
+ msg = [
1084
+ "line search iteration=",
1085
+ "check loss improvement <= armijo term:",
1086
+ "check loss |improvement| <= eps * |loss_old|:",
1087
+ "check sum(|gradient|) < sum(|gradient_old|):",
1088
+ ]
1089
+ for m in msg:
1090
+ assert m in captured.out
1091
+
1092
+ # Test for a case with negative hessian. We badly initialize coef for a Tweedie
1093
+ # loss with non-canonical link, e.g. Inverse Gaussian deviance with a log link.
1094
+ linear_loss = LinearModelLoss(
1095
+ base_loss=HalfTweedieLoss(power=3), fit_intercept=False
1096
+ )
1097
+ sol = NewtonCholeskySolver(
1098
+ coef=linear_loss.init_zero_coef(X) + 1,
1099
+ linear_loss=linear_loss,
1100
+ l2_reg_strength=0,
1101
+ verbose=verbose,
1102
+ )
1103
+ with warnings.catch_warnings():
1104
+ warnings.simplefilter("ignore", ConvergenceWarning)
1105
+ sol.solve(X, y, None)
1106
+ captured = capsys.readouterr()
1107
+ if verbose >= 1:
1108
+ assert (
1109
+ "The inner solver detected a pointwise Hessian with many negative values"
1110
+ " and resorts to lbfgs instead."
1111
+ in captured.out
1112
+ )
venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (16.7 kB). View file