diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..668b53052855173fb380476437cd87a780585833 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/_pls.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/_pls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7514a4f267da66abc8ee485a7b387b79c03f758e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/_pls.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ebbf5c8e72e8d4803a1db638bc9f9d4371b7eae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/test_pls.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/test_pls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c14bb4e14d0aea2d821635eea980e132c2ce8b7d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/test_pls.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/test_pls.py b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/test_pls.py new file mode 100644 index 0000000000000000000000000000000000000000..b8b5cbaa0f2750d9b2b8eecb3b3f8c416292cd6c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/test_pls.py @@ -0,0 +1,646 @@ +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal + +from sklearn.cross_decomposition import CCA, PLSSVD, PLSCanonical, PLSRegression +from sklearn.cross_decomposition._pls import ( + _center_scale_xy, + _get_first_singular_vectors_power_method, + _get_first_singular_vectors_svd, + _svd_flip_1d, +) +from sklearn.datasets import load_linnerud, make_regression +from sklearn.ensemble import VotingRegressor +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import LinearRegression +from sklearn.utils import check_random_state +from sklearn.utils.extmath import svd_flip + + +def assert_matrix_orthogonal(M): + K = np.dot(M.T, M) + assert_array_almost_equal(K, np.diag(np.diag(K))) + + +def test_pls_canonical_basics(): + # Basic checks for PLSCanonical + d = load_linnerud() + X = d.data + Y = d.target + + pls = PLSCanonical(n_components=X.shape[1]) + pls.fit(X, Y) + + assert_matrix_orthogonal(pls.x_weights_) + assert_matrix_orthogonal(pls.y_weights_) + assert_matrix_orthogonal(pls._x_scores) + assert_matrix_orthogonal(pls._y_scores) + + # Check X = TP' and Y = UQ' + T = pls._x_scores + P = pls.x_loadings_ + U = pls._y_scores + Q = pls.y_loadings_ + # Need to scale first + Xc, Yc, x_mean, y_mean, x_std, y_std = _center_scale_xy( + X.copy(), Y.copy(), scale=True + ) + assert_array_almost_equal(Xc, np.dot(T, P.T)) + assert_array_almost_equal(Yc, np.dot(U, Q.T)) + + # Check that rotations on training data lead to scores + Xt = pls.transform(X) + assert_array_almost_equal(Xt, pls._x_scores) + Xt, Yt = pls.transform(X, Y) + assert_array_almost_equal(Xt, pls._x_scores) + assert_array_almost_equal(Yt, pls._y_scores) + + # Check that inverse_transform works + X_back = pls.inverse_transform(Xt) + assert_array_almost_equal(X_back, X) + _, Y_back = pls.inverse_transform(Xt, Yt) + assert_array_almost_equal(Y_back, Y) + + +def test_sanity_check_pls_regression(): + # Sanity check for PLSRegression + # The results were checked against the R-packages plspm, misOmics and pls + + d = load_linnerud() + X = d.data + Y = d.target + + pls = PLSRegression(n_components=X.shape[1]) + X_trans, _ = pls.fit_transform(X, Y) + + # FIXME: one would expect y_trans == pls.y_scores_ but this is not + # the case. + # xref: https://github.com/scikit-learn/scikit-learn/issues/22420 + assert_allclose(X_trans, pls.x_scores_) + + expected_x_weights = np.array( + [ + [-0.61330704, -0.00443647, 0.78983213], + [-0.74697144, -0.32172099, -0.58183269], + [-0.25668686, 0.94682413, -0.19399983], + ] + ) + + expected_x_loadings = np.array( + [ + [-0.61470416, -0.24574278, 0.78983213], + [-0.65625755, -0.14396183, -0.58183269], + [-0.51733059, 1.00609417, -0.19399983], + ] + ) + + expected_y_weights = np.array( + [ + [+0.32456184, 0.29892183, 0.20316322], + [+0.42439636, 0.61970543, 0.19320542], + [-0.13143144, -0.26348971, -0.17092916], + ] + ) + + expected_y_loadings = np.array( + [ + [+0.32456184, 0.29892183, 0.20316322], + [+0.42439636, 0.61970543, 0.19320542], + [-0.13143144, -0.26348971, -0.17092916], + ] + ) + + assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings)) + assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights)) + assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings)) + assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights)) + + # The R / Python difference in the signs should be consistent across + # loadings, weights, etc. + x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings) + x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights) + y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights) + y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings) + assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip) + assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip) + + +def test_sanity_check_pls_regression_constant_column_Y(): + # Check behavior when the first column of Y is constant + # The results are checked against a modified version of plsreg2 + # from the R-package plsdepot + d = load_linnerud() + X = d.data + Y = d.target + Y[:, 0] = 1 + pls = PLSRegression(n_components=X.shape[1]) + pls.fit(X, Y) + + expected_x_weights = np.array( + [ + [-0.6273573, 0.007081799, 0.7786994], + [-0.7493417, -0.277612681, -0.6011807], + [-0.2119194, 0.960666981, -0.1794690], + ] + ) + + expected_x_loadings = np.array( + [ + [-0.6273512, -0.22464538, 0.7786994], + [-0.6643156, -0.09871193, -0.6011807], + [-0.5125877, 1.01407380, -0.1794690], + ] + ) + + expected_y_loadings = np.array( + [ + [0.0000000, 0.0000000, 0.0000000], + [0.4357300, 0.5828479, 0.2174802], + [-0.1353739, -0.2486423, -0.1810386], + ] + ) + + assert_array_almost_equal(np.abs(expected_x_weights), np.abs(pls.x_weights_)) + assert_array_almost_equal(np.abs(expected_x_loadings), np.abs(pls.x_loadings_)) + # For the PLSRegression with default parameters, y_loadings == y_weights + assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings)) + assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_loadings)) + + x_loadings_sign_flip = np.sign(expected_x_loadings / pls.x_loadings_) + x_weights_sign_flip = np.sign(expected_x_weights / pls.x_weights_) + # we ignore the first full-zeros row for y + y_loadings_sign_flip = np.sign(expected_y_loadings[1:] / pls.y_loadings_[1:]) + + assert_array_equal(x_loadings_sign_flip, x_weights_sign_flip) + assert_array_equal(x_loadings_sign_flip[1:], y_loadings_sign_flip) + + +def test_sanity_check_pls_canonical(): + # Sanity check for PLSCanonical + # The results were checked against the R-package plspm + + d = load_linnerud() + X = d.data + Y = d.target + + pls = PLSCanonical(n_components=X.shape[1]) + pls.fit(X, Y) + + expected_x_weights = np.array( + [ + [-0.61330704, 0.25616119, -0.74715187], + [-0.74697144, 0.11930791, 0.65406368], + [-0.25668686, -0.95924297, -0.11817271], + ] + ) + + expected_x_rotations = np.array( + [ + [-0.61330704, 0.41591889, -0.62297525], + [-0.74697144, 0.31388326, 0.77368233], + [-0.25668686, -0.89237972, -0.24121788], + ] + ) + + expected_y_weights = np.array( + [ + [+0.58989127, 0.7890047, 0.1717553], + [+0.77134053, -0.61351791, 0.16920272], + [-0.23887670, -0.03267062, 0.97050016], + ] + ) + + expected_y_rotations = np.array( + [ + [+0.58989127, 0.7168115, 0.30665872], + [+0.77134053, -0.70791757, 0.19786539], + [-0.23887670, -0.00343595, 0.94162826], + ] + ) + + assert_array_almost_equal(np.abs(pls.x_rotations_), np.abs(expected_x_rotations)) + assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights)) + assert_array_almost_equal(np.abs(pls.y_rotations_), np.abs(expected_y_rotations)) + assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights)) + + x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations) + x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights) + y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations) + y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights) + assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip) + assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip) + + assert_matrix_orthogonal(pls.x_weights_) + assert_matrix_orthogonal(pls.y_weights_) + + assert_matrix_orthogonal(pls._x_scores) + assert_matrix_orthogonal(pls._y_scores) + + +def test_sanity_check_pls_canonical_random(): + # Sanity check for PLSCanonical on random data + # The results were checked against the R-package plspm + n = 500 + p_noise = 10 + q_noise = 5 + # 2 latents vars: + rng = check_random_state(11) + l1 = rng.normal(size=n) + l2 = rng.normal(size=n) + latents = np.array([l1, l1, l2, l2]).T + X = latents + rng.normal(size=4 * n).reshape((n, 4)) + Y = latents + rng.normal(size=4 * n).reshape((n, 4)) + X = np.concatenate((X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1) + Y = np.concatenate((Y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1) + + pls = PLSCanonical(n_components=3) + pls.fit(X, Y) + + expected_x_weights = np.array( + [ + [0.65803719, 0.19197924, 0.21769083], + [0.7009113, 0.13303969, -0.15376699], + [0.13528197, -0.68636408, 0.13856546], + [0.16854574, -0.66788088, -0.12485304], + [-0.03232333, -0.04189855, 0.40690153], + [0.1148816, -0.09643158, 0.1613305], + [0.04792138, -0.02384992, 0.17175319], + [-0.06781, -0.01666137, -0.18556747], + [-0.00266945, -0.00160224, 0.11893098], + [-0.00849528, -0.07706095, 0.1570547], + [-0.00949471, -0.02964127, 0.34657036], + [-0.03572177, 0.0945091, 0.3414855], + [0.05584937, -0.02028961, -0.57682568], + [0.05744254, -0.01482333, -0.17431274], + ] + ) + + expected_x_loadings = np.array( + [ + [0.65649254, 0.1847647, 0.15270699], + [0.67554234, 0.15237508, -0.09182247], + [0.19219925, -0.67750975, 0.08673128], + [0.2133631, -0.67034809, -0.08835483], + [-0.03178912, -0.06668336, 0.43395268], + [0.15684588, -0.13350241, 0.20578984], + [0.03337736, -0.03807306, 0.09871553], + [-0.06199844, 0.01559854, -0.1881785], + [0.00406146, -0.00587025, 0.16413253], + [-0.00374239, -0.05848466, 0.19140336], + [0.00139214, -0.01033161, 0.32239136], + [-0.05292828, 0.0953533, 0.31916881], + [0.04031924, -0.01961045, -0.65174036], + [0.06172484, -0.06597366, -0.1244497], + ] + ) + + expected_y_weights = np.array( + [ + [0.66101097, 0.18672553, 0.22826092], + [0.69347861, 0.18463471, -0.23995597], + [0.14462724, -0.66504085, 0.17082434], + [0.22247955, -0.6932605, -0.09832993], + [0.07035859, 0.00714283, 0.67810124], + [0.07765351, -0.0105204, -0.44108074], + [-0.00917056, 0.04322147, 0.10062478], + [-0.01909512, 0.06182718, 0.28830475], + [0.01756709, 0.04797666, 0.32225745], + ] + ) + + expected_y_loadings = np.array( + [ + [0.68568625, 0.1674376, 0.0969508], + [0.68782064, 0.20375837, -0.1164448], + [0.11712173, -0.68046903, 0.12001505], + [0.17860457, -0.6798319, -0.05089681], + [0.06265739, -0.0277703, 0.74729584], + [0.0914178, 0.00403751, -0.5135078], + [-0.02196918, -0.01377169, 0.09564505], + [-0.03288952, 0.09039729, 0.31858973], + [0.04287624, 0.05254676, 0.27836841], + ] + ) + + assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings)) + assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights)) + assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings)) + assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights)) + + x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings) + x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights) + y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights) + y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings) + assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip) + assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip) + + assert_matrix_orthogonal(pls.x_weights_) + assert_matrix_orthogonal(pls.y_weights_) + + assert_matrix_orthogonal(pls._x_scores) + assert_matrix_orthogonal(pls._y_scores) + + +def test_convergence_fail(): + # Make sure ConvergenceWarning is raised if max_iter is too small + d = load_linnerud() + X = d.data + Y = d.target + pls_nipals = PLSCanonical(n_components=X.shape[1], max_iter=2) + with pytest.warns(ConvergenceWarning): + pls_nipals.fit(X, Y) + + +@pytest.mark.parametrize("Est", (PLSSVD, PLSRegression, PLSCanonical)) +def test_attibutes_shapes(Est): + # Make sure attributes are of the correct shape depending on n_components + d = load_linnerud() + X = d.data + Y = d.target + n_components = 2 + pls = Est(n_components=n_components) + pls.fit(X, Y) + assert all( + attr.shape[1] == n_components for attr in (pls.x_weights_, pls.y_weights_) + ) + + +@pytest.mark.parametrize("Est", (PLSRegression, PLSCanonical, CCA)) +def test_univariate_equivalence(Est): + # Ensure 2D Y with 1 column is equivalent to 1D Y + d = load_linnerud() + X = d.data + Y = d.target + + est = Est(n_components=1) + one_d_coeff = est.fit(X, Y[:, 0]).coef_ + two_d_coeff = est.fit(X, Y[:, :1]).coef_ + + assert one_d_coeff.shape == two_d_coeff.shape + assert_array_almost_equal(one_d_coeff, two_d_coeff) + + +@pytest.mark.parametrize("Est", (PLSRegression, PLSCanonical, CCA, PLSSVD)) +def test_copy(Est): + # check that the "copy" keyword works + d = load_linnerud() + X = d.data + Y = d.target + X_orig = X.copy() + + # copy=True won't modify inplace + pls = Est(copy=True).fit(X, Y) + assert_array_equal(X, X_orig) + + # copy=False will modify inplace + with pytest.raises(AssertionError): + Est(copy=False).fit(X, Y) + assert_array_almost_equal(X, X_orig) + + if Est is PLSSVD: + return # PLSSVD does not support copy param in predict or transform + + X_orig = X.copy() + with pytest.raises(AssertionError): + pls.transform(X, Y, copy=False), + assert_array_almost_equal(X, X_orig) + + X_orig = X.copy() + with pytest.raises(AssertionError): + pls.predict(X, copy=False), + assert_array_almost_equal(X, X_orig) + + # Make sure copy=True gives same transform and predictions as predict=False + assert_array_almost_equal( + pls.transform(X, Y, copy=True), pls.transform(X.copy(), Y.copy(), copy=False) + ) + assert_array_almost_equal( + pls.predict(X, copy=True), pls.predict(X.copy(), copy=False) + ) + + +def _generate_test_scale_and_stability_datasets(): + """Generate dataset for test_scale_and_stability""" + # dataset for non-regression 7818 + rng = np.random.RandomState(0) + n_samples = 1000 + n_targets = 5 + n_features = 10 + Q = rng.randn(n_targets, n_features) + Y = rng.randn(n_samples, n_targets) + X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1 + X *= 1000 + yield X, Y + + # Data set where one of the features is constraint + X, Y = load_linnerud(return_X_y=True) + # causes X[:, -1].std() to be zero + X[:, -1] = 1.0 + yield X, Y + + X = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [3.0, 5.0, 4.0]]) + Y = np.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]) + yield X, Y + + # Seeds that provide a non-regression test for #18746, where CCA fails + seeds = [530, 741] + for seed in seeds: + rng = np.random.RandomState(seed) + X = rng.randn(4, 3) + Y = rng.randn(4, 2) + yield X, Y + + +@pytest.mark.parametrize("Est", (CCA, PLSCanonical, PLSRegression, PLSSVD)) +@pytest.mark.parametrize("X, Y", _generate_test_scale_and_stability_datasets()) +def test_scale_and_stability(Est, X, Y): + """scale=True is equivalent to scale=False on centered/scaled data + This allows to check numerical stability over platforms as well""" + + X_s, Y_s, *_ = _center_scale_xy(X, Y) + + X_score, Y_score = Est(scale=True).fit_transform(X, Y) + X_s_score, Y_s_score = Est(scale=False).fit_transform(X_s, Y_s) + + assert_allclose(X_s_score, X_score, atol=1e-4) + assert_allclose(Y_s_score, Y_score, atol=1e-4) + + +@pytest.mark.parametrize("Estimator", (PLSSVD, PLSRegression, PLSCanonical, CCA)) +def test_n_components_upper_bounds(Estimator): + """Check the validation of `n_components` upper bounds for `PLS` regressors.""" + rng = np.random.RandomState(0) + X = rng.randn(10, 5) + Y = rng.randn(10, 3) + est = Estimator(n_components=10) + err_msg = "`n_components` upper bound is .*. Got 10 instead. Reduce `n_components`." + with pytest.raises(ValueError, match=err_msg): + est.fit(X, Y) + + +@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (100, 200)]) +def test_singular_value_helpers(n_samples, n_features, global_random_seed): + # Make sure SVD and power method give approximately the same results + X, Y = make_regression( + n_samples, n_features, n_targets=5, random_state=global_random_seed + ) + u1, v1, _ = _get_first_singular_vectors_power_method(X, Y, norm_y_weights=True) + u2, v2 = _get_first_singular_vectors_svd(X, Y) + + _svd_flip_1d(u1, v1) + _svd_flip_1d(u2, v2) + + rtol = 1e-3 + # Setting atol because some coordinates are very close to zero + assert_allclose(u1, u2, atol=u2.max() * rtol) + assert_allclose(v1, v2, atol=v2.max() * rtol) + + +def test_one_component_equivalence(global_random_seed): + # PLSSVD, PLSRegression and PLSCanonical should all be equivalent when + # n_components is 1 + X, Y = make_regression(100, 10, n_targets=5, random_state=global_random_seed) + svd = PLSSVD(n_components=1).fit(X, Y).transform(X) + reg = PLSRegression(n_components=1).fit(X, Y).transform(X) + canonical = PLSCanonical(n_components=1).fit(X, Y).transform(X) + + rtol = 1e-3 + # Setting atol because some entries are very close to zero + assert_allclose(svd, reg, atol=reg.max() * rtol) + assert_allclose(svd, canonical, atol=canonical.max() * rtol) + + +def test_svd_flip_1d(): + # Make sure svd_flip_1d is equivalent to svd_flip + u = np.array([1, -4, 2]) + v = np.array([1, 2, 3]) + + u_expected, v_expected = svd_flip(u.reshape(-1, 1), v.reshape(1, -1)) + _svd_flip_1d(u, v) # inplace + + assert_allclose(u, u_expected.ravel()) + assert_allclose(u, [-1, 4, -2]) + + assert_allclose(v, v_expected.ravel()) + assert_allclose(v, [-1, -2, -3]) + + +def test_loadings_converges(global_random_seed): + """Test that CCA converges. Non-regression test for #19549.""" + X, y = make_regression( + n_samples=200, n_features=20, n_targets=20, random_state=global_random_seed + ) + + cca = CCA(n_components=10, max_iter=500) + + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + + cca.fit(X, y) + + # Loadings converges to reasonable values + assert np.all(np.abs(cca.x_loadings_) < 1) + + +def test_pls_constant_y(): + """Checks warning when y is constant. Non-regression test for #19831""" + rng = np.random.RandomState(42) + x = rng.rand(100, 3) + y = np.zeros(100) + + pls = PLSRegression() + + msg = "Y residual is constant at iteration" + with pytest.warns(UserWarning, match=msg): + pls.fit(x, y) + + assert_allclose(pls.x_rotations_, 0) + + +@pytest.mark.parametrize("PLSEstimator", [PLSRegression, PLSCanonical, CCA]) +def test_pls_coef_shape(PLSEstimator): + """Check the shape of `coef_` attribute. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12410 + """ + d = load_linnerud() + X = d.data + Y = d.target + + pls = PLSEstimator(copy=True).fit(X, Y) + + n_targets, n_features = Y.shape[1], X.shape[1] + assert pls.coef_.shape == (n_targets, n_features) + + +@pytest.mark.parametrize("scale", [True, False]) +@pytest.mark.parametrize("PLSEstimator", [PLSRegression, PLSCanonical, CCA]) +def test_pls_prediction(PLSEstimator, scale): + """Check the behaviour of the prediction function.""" + d = load_linnerud() + X = d.data + Y = d.target + + pls = PLSEstimator(copy=True, scale=scale).fit(X, Y) + Y_pred = pls.predict(X, copy=True) + + y_mean = Y.mean(axis=0) + X_trans = X - X.mean(axis=0) + if scale: + X_trans /= X.std(axis=0, ddof=1) + + assert_allclose(pls.intercept_, y_mean) + assert_allclose(Y_pred, X_trans @ pls.coef_.T + pls.intercept_) + + +@pytest.mark.parametrize("Klass", [CCA, PLSSVD, PLSRegression, PLSCanonical]) +def test_pls_feature_names_out(Klass): + """Check `get_feature_names_out` cross_decomposition module.""" + X, Y = load_linnerud(return_X_y=True) + + est = Klass().fit(X, Y) + names_out = est.get_feature_names_out() + + class_name_lower = Klass.__name__.lower() + expected_names_out = np.array( + [f"{class_name_lower}{i}" for i in range(est.x_weights_.shape[1])], + dtype=object, + ) + assert_array_equal(names_out, expected_names_out) + + +@pytest.mark.parametrize("Klass", [CCA, PLSSVD, PLSRegression, PLSCanonical]) +def test_pls_set_output(Klass): + """Check `set_output` in cross_decomposition module.""" + pd = pytest.importorskip("pandas") + X, Y = load_linnerud(return_X_y=True, as_frame=True) + + est = Klass().set_output(transform="pandas").fit(X, Y) + X_trans, y_trans = est.transform(X, Y) + assert isinstance(y_trans, np.ndarray) + assert isinstance(X_trans, pd.DataFrame) + assert_array_equal(X_trans.columns, est.get_feature_names_out()) + + +def test_pls_regression_fit_1d_y(): + """Check that when fitting with 1d `y`, prediction should also be 1d. + + Non-regression test for Issue #26549. + """ + X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]]) + y = np.array([2, 6, 12, 20, 30, 42]) + expected = y.copy() + + plsr = PLSRegression().fit(X, y) + y_pred = plsr.predict(X) + assert y_pred.shape == expected.shape + + # Check that it works in VotingRegressor + lr = LinearRegression().fit(X, y) + vr = VotingRegressor([("lr", lr), ("plsr", plsr)]) + y_pred = vr.fit(X, y).predict(X) + assert y_pred.shape == expected.shape + assert_allclose(y_pred, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f9cfe07dc0e88e6b692ec0c5450d44acfd5594a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__init__.py @@ -0,0 +1,52 @@ +""" +The :mod:`sklearn.decomposition` module includes matrix decomposition +algorithms, including among others PCA, NMF or ICA. Most of the algorithms of +this module can be regarded as dimensionality reduction techniques. +""" + + +from ..utils.extmath import randomized_svd +from ._dict_learning import ( + DictionaryLearning, + MiniBatchDictionaryLearning, + SparseCoder, + dict_learning, + dict_learning_online, + sparse_encode, +) +from ._factor_analysis import FactorAnalysis +from ._fastica import FastICA, fastica +from ._incremental_pca import IncrementalPCA +from ._kernel_pca import KernelPCA +from ._lda import LatentDirichletAllocation +from ._nmf import ( + NMF, + MiniBatchNMF, + non_negative_factorization, +) +from ._pca import PCA +from ._sparse_pca import MiniBatchSparsePCA, SparsePCA +from ._truncated_svd import TruncatedSVD + +__all__ = [ + "DictionaryLearning", + "FastICA", + "IncrementalPCA", + "KernelPCA", + "MiniBatchDictionaryLearning", + "MiniBatchNMF", + "MiniBatchSparsePCA", + "NMF", + "PCA", + "SparseCoder", + "SparsePCA", + "dict_learning", + "dict_learning_online", + "fastica", + "non_negative_factorization", + "randomized_svd", + "sparse_encode", + "FactorAnalysis", + "TruncatedSVD", + "LatentDirichletAllocation", +] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1932ff4fcf07626b4a01ddbdf00d669299b70d80 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f101d556ea7af6fda7e1ba9715c36dfa3aa24478 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b743dff90ada9983e37346737f151ed25300a2a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61f54f1c103aaf6b236beaec17f6e4b700dc7974 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2470ea24bccbef6e1d63ac3d24475d3bba20865d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..062bde7721c3852e60ff0c7f8c68138617e04ac1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57e2e4337e615aa9df759050b912173c07b86e47 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f766406331fd8c0b47183630239a2a741e7c6b7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e982fecaacfa58c440c185356d99bf29d2da36f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd89bd638e7b8b10d32fdf8064f6d64a9f46ce43 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19091bdb83824041595c72251db0a4d07be91b07 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d6a1ca2dc62aab098ecb4cd3f0fb90afc5e3124 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_base.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa720751774f794dc92263c66034966cca3307a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_base.py @@ -0,0 +1,193 @@ +"""Principal Component Analysis Base Classes""" + +# Author: Alexandre Gramfort +# Olivier Grisel +# Mathieu Blondel +# Denis A. Engemann +# Kyle Kastner +# +# License: BSD 3 clause + +from abc import ABCMeta, abstractmethod + +import numpy as np +from scipy import linalg +from scipy.sparse import issparse + +from ..base import BaseEstimator, ClassNamePrefixFeaturesOutMixin, TransformerMixin +from ..utils._array_api import _add_to_diagonal, device, get_namespace +from ..utils.sparsefuncs import _implicit_column_offset +from ..utils.validation import check_is_fitted + + +class _BasePCA( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta +): + """Base class for PCA methods. + + Warning: This class should not be used directly. + Use derived classes instead. + """ + + def get_covariance(self): + """Compute data covariance with the generative model. + + ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` + where S**2 contains the explained variances, and sigma2 contains the + noise variances. + + Returns + ------- + cov : array of shape=(n_features, n_features) + Estimated covariance of data. + """ + xp, _ = get_namespace(self.components_) + + components_ = self.components_ + exp_var = self.explained_variance_ + if self.whiten: + components_ = components_ * xp.sqrt(exp_var[:, np.newaxis]) + exp_var_diff = exp_var - self.noise_variance_ + exp_var_diff = xp.where( + exp_var > self.noise_variance_, + exp_var_diff, + xp.asarray(0.0, device=device(exp_var)), + ) + cov = (components_.T * exp_var_diff) @ components_ + _add_to_diagonal(cov, self.noise_variance_, xp) + return cov + + def get_precision(self): + """Compute data precision matrix with the generative model. + + Equals the inverse of the covariance but computed with + the matrix inversion lemma for efficiency. + + Returns + ------- + precision : array, shape=(n_features, n_features) + Estimated precision of data. + """ + xp, is_array_api_compliant = get_namespace(self.components_) + + n_features = self.components_.shape[1] + + # handle corner cases first + if self.n_components_ == 0: + return xp.eye(n_features) / self.noise_variance_ + + if is_array_api_compliant: + linalg_inv = xp.linalg.inv + else: + linalg_inv = linalg.inv + + if self.noise_variance_ == 0.0: + return linalg_inv(self.get_covariance()) + + # Get precision using matrix inversion lemma + components_ = self.components_ + exp_var = self.explained_variance_ + if self.whiten: + components_ = components_ * xp.sqrt(exp_var[:, np.newaxis]) + exp_var_diff = exp_var - self.noise_variance_ + exp_var_diff = xp.where( + exp_var > self.noise_variance_, + exp_var_diff, + xp.asarray(0.0, device=device(exp_var)), + ) + precision = components_ @ components_.T / self.noise_variance_ + _add_to_diagonal(precision, 1.0 / exp_var_diff, xp) + precision = components_.T @ linalg_inv(precision) @ components_ + precision /= -(self.noise_variance_**2) + _add_to_diagonal(precision, 1.0 / self.noise_variance_, xp) + return precision + + @abstractmethod + def fit(self, X, y=None): + """Placeholder for fit. Subclasses should implement this method! + + Fit the model with X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + self : object + Returns the instance itself. + """ + + def transform(self, X): + """Apply dimensionality reduction to X. + + X is projected on the first principal components previously extracted + from a training set. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : array-like of shape (n_samples, n_components) + Projection of X in the first principal components, where `n_samples` + is the number of samples and `n_components` is the number of the components. + """ + xp, _ = get_namespace(X) + + check_is_fitted(self) + + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[xp.float64, xp.float32], reset=False + ) + if self.mean_ is not None: + if issparse(X): + X = _implicit_column_offset(X, self.mean_) + else: + X = X - self.mean_ + X_transformed = X @ self.components_.T + if self.whiten: + X_transformed /= xp.sqrt(self.explained_variance_) + return X_transformed + + def inverse_transform(self, X): + """Transform data back to its original space. + + In other words, return an input `X_original` whose transform would be X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + New data, where `n_samples` is the number of samples + and `n_components` is the number of components. + + Returns + ------- + X_original array-like of shape (n_samples, n_features) + Original data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Notes + ----- + If whitening is enabled, inverse_transform will compute the + exact inverse operation, which includes reversing whitening. + """ + xp, _ = get_namespace(X) + + if self.whiten: + scaled_components = ( + xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_ + ) + return X @ scaled_components + self.mean_ + else: + return X @ self.components_ + self.mean_ + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..af3498d5344836330d016b5c6fb24d0a6f9fd723 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_factor_analysis.py @@ -0,0 +1,458 @@ +"""Factor Analysis. + +A latent linear variable model. + +FactorAnalysis is similar to probabilistic PCA implemented by PCA.score +While PCA assumes Gaussian noise with the same variance for each +feature, the FactorAnalysis model assumes different variances for +each of them. + +This implementation is based on David Barber's Book, +Bayesian Reasoning and Machine Learning, +http://www.cs.ucl.ac.uk/staff/d.barber/brml, +Algorithm 21.1 +""" + +# Author: Christian Osendorfer +# Alexandre Gramfort +# Denis A. Engemann + +# License: BSD3 + +import warnings +from math import log, sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import linalg + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import fast_logdet, randomized_svd, squared_norm +from ..utils.validation import check_is_fitted + + +class FactorAnalysis(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Factor Analysis (FA). + + A simple linear generative model with Gaussian latent variables. + + The observations are assumed to be caused by a linear transformation of + lower dimensional latent factors and added Gaussian noise. + Without loss of generality the factors are distributed according to a + Gaussian with zero mean and unit covariance. The noise is also zero mean + and has an arbitrary diagonal covariance matrix. + + If we would restrict the model further, by assuming that the Gaussian + noise is even isotropic (all diagonal entries are the same) we would obtain + :class:`PCA`. + + FactorAnalysis performs a maximum likelihood estimate of the so-called + `loading` matrix, the transformation of the latent variables to the + observed ones, using SVD based approach. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_components : int, default=None + Dimensionality of latent space, the number of components + of ``X`` that are obtained after ``transform``. + If None, n_components is set to the number of features. + + tol : float, default=1e-2 + Stopping tolerance for log-likelihood increase. + + copy : bool, default=True + Whether to make a copy of X. If ``False``, the input X gets overwritten + during fitting. + + max_iter : int, default=1000 + Maximum number of iterations. + + noise_variance_init : array-like of shape (n_features,), default=None + The initial guess of the noise variance for each feature. + If None, it defaults to np.ones(n_features). + + svd_method : {'lapack', 'randomized'}, default='randomized' + Which SVD method to use. If 'lapack' use standard SVD from + scipy.linalg, if 'randomized' use fast ``randomized_svd`` function. + Defaults to 'randomized'. For most applications 'randomized' will + be sufficiently precise while providing significant speed gains. + Accuracy can also be improved by setting higher values for + `iterated_power`. If this is not sufficient, for maximum precision + you should choose 'lapack'. + + iterated_power : int, default=3 + Number of iterations for the power method. 3 by default. Only used + if ``svd_method`` equals 'randomized'. + + rotation : {'varimax', 'quartimax'}, default=None + If not None, apply the indicated rotation. Currently, varimax and + quartimax are implemented. See + `"The varimax criterion for analytic rotation in factor analysis" + `_ + H. F. Kaiser, 1958. + + .. versionadded:: 0.24 + + random_state : int or RandomState instance, default=0 + Only used when ``svd_method`` equals 'randomized'. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Components with maximum variance. + + loglike_ : list of shape (n_iterations,) + The log likelihood at each iteration. + + noise_variance_ : ndarray of shape (n_features,) + The estimated noise variance for each feature. + + n_iter_ : int + Number of iterations run. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PCA: Principal component analysis is also a latent linear variable model + which however assumes equal noise variance for each feature. + This extra assumption makes probabilistic PCA faster as it can be + computed in closed form. + FastICA: Independent component analysis, a latent variable model with + non-Gaussian latent variables. + + References + ---------- + - David Barber, Bayesian Reasoning and Machine Learning, + Algorithm 21.1. + + - Christopher M. Bishop: Pattern Recognition and Machine Learning, + Chapter 12.2.4. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import FactorAnalysis + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = FactorAnalysis(n_components=7, random_state=0) + >>> X_transformed = transformer.fit_transform(X) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 0, None, closed="left"), None], + "tol": [Interval(Real, 0.0, None, closed="left")], + "copy": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "noise_variance_init": ["array-like", None], + "svd_method": [StrOptions({"randomized", "lapack"})], + "iterated_power": [Interval(Integral, 0, None, closed="left")], + "rotation": [StrOptions({"varimax", "quartimax"}), None], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + tol=1e-2, + copy=True, + max_iter=1000, + noise_variance_init=None, + svd_method="randomized", + iterated_power=3, + rotation=None, + random_state=0, + ): + self.n_components = n_components + self.copy = copy + self.tol = tol + self.max_iter = max_iter + self.svd_method = svd_method + + self.noise_variance_init = noise_variance_init + self.iterated_power = iterated_power + self.random_state = random_state + self.rotation = rotation + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the FactorAnalysis model to X using SVD based approach. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : Ignored + Ignored parameter. + + Returns + ------- + self : object + FactorAnalysis class instance. + """ + X = self._validate_data(X, copy=self.copy, dtype=np.float64) + + n_samples, n_features = X.shape + n_components = self.n_components + if n_components is None: + n_components = n_features + + self.mean_ = np.mean(X, axis=0) + X -= self.mean_ + + # some constant terms + nsqrt = sqrt(n_samples) + llconst = n_features * log(2.0 * np.pi) + n_components + var = np.var(X, axis=0) + + if self.noise_variance_init is None: + psi = np.ones(n_features, dtype=X.dtype) + else: + if len(self.noise_variance_init) != n_features: + raise ValueError( + "noise_variance_init dimension does not " + "with number of features : %d != %d" + % (len(self.noise_variance_init), n_features) + ) + psi = np.array(self.noise_variance_init) + + loglike = [] + old_ll = -np.inf + SMALL = 1e-12 + + # we'll modify svd outputs to return unexplained variance + # to allow for unified computation of loglikelihood + if self.svd_method == "lapack": + + def my_svd(X): + _, s, Vt = linalg.svd(X, full_matrices=False, check_finite=False) + return ( + s[:n_components], + Vt[:n_components], + squared_norm(s[n_components:]), + ) + + else: # svd_method == "randomized" + random_state = check_random_state(self.random_state) + + def my_svd(X): + _, s, Vt = randomized_svd( + X, + n_components, + random_state=random_state, + n_iter=self.iterated_power, + ) + return s, Vt, squared_norm(X) - squared_norm(s) + + for i in range(self.max_iter): + # SMALL helps numerics + sqrt_psi = np.sqrt(psi) + SMALL + s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt)) + s **= 2 + # Use 'maximum' here to avoid sqrt problems. + W = np.sqrt(np.maximum(s - 1.0, 0.0))[:, np.newaxis] * Vt + del Vt + W *= sqrt_psi + + # loglikelihood + ll = llconst + np.sum(np.log(s)) + ll += unexp_var + np.sum(np.log(psi)) + ll *= -n_samples / 2.0 + loglike.append(ll) + if (ll - old_ll) < self.tol: + break + old_ll = ll + + psi = np.maximum(var - np.sum(W**2, axis=0), SMALL) + else: + warnings.warn( + "FactorAnalysis did not converge." + + " You might want" + + " to increase the number of iterations.", + ConvergenceWarning, + ) + + self.components_ = W + if self.rotation is not None: + self.components_ = self._rotate(W) + self.noise_variance_ = psi + self.loglike_ = loglike + self.n_iter_ = i + 1 + return self + + def transform(self, X): + """Apply dimensionality reduction to X using the model. + + Compute the expected mean of the latent variables. + See Barber, 21.2.33 (or Bishop, 12.66). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + The latent variables of X. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + Ih = np.eye(len(self.components_)) + + X_transformed = X - self.mean_ + + Wpsi = self.components_ / self.noise_variance_ + cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T)) + tmp = np.dot(X_transformed, Wpsi.T) + X_transformed = np.dot(tmp, cov_z) + + return X_transformed + + def get_covariance(self): + """Compute data covariance with the FactorAnalysis model. + + ``cov = components_.T * components_ + diag(noise_variance)`` + + Returns + ------- + cov : ndarray of shape (n_features, n_features) + Estimated covariance of data. + """ + check_is_fitted(self) + + cov = np.dot(self.components_.T, self.components_) + cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace + return cov + + def get_precision(self): + """Compute data precision matrix with the FactorAnalysis model. + + Returns + ------- + precision : ndarray of shape (n_features, n_features) + Estimated precision of data. + """ + check_is_fitted(self) + + n_features = self.components_.shape[1] + + # handle corner cases first + if self.n_components == 0: + return np.diag(1.0 / self.noise_variance_) + if self.n_components == n_features: + return linalg.inv(self.get_covariance()) + + # Get precision using matrix inversion lemma + components_ = self.components_ + precision = np.dot(components_ / self.noise_variance_, components_.T) + precision.flat[:: len(precision) + 1] += 1.0 + precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) + precision /= self.noise_variance_[:, np.newaxis] + precision /= -self.noise_variance_[np.newaxis, :] + precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_ + return precision + + def score_samples(self, X): + """Compute the log-likelihood of each sample. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data. + + Returns + ------- + ll : ndarray of shape (n_samples,) + Log-likelihood of each sample under the current model. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + Xr = X - self.mean_ + precision = self.get_precision() + n_features = X.shape[1] + log_like = -0.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1) + log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision)) + return log_like + + def score(self, X, y=None): + """Compute the average log-likelihood of the samples. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data. + + y : Ignored + Ignored parameter. + + Returns + ------- + ll : float + Average log-likelihood of the samples under the current model. + """ + return np.mean(self.score_samples(X)) + + def _rotate(self, components, n_components=None, tol=1e-6): + "Rotate the factor analysis solution." + # note that tol is not exposed + return _ortho_rotation(components.T, method=self.rotation, tol=tol)[ + : self.n_components + ] + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + +def _ortho_rotation(components, method="varimax", tol=1e-6, max_iter=100): + """Return rotated components.""" + nrow, ncol = components.shape + rotation_matrix = np.eye(ncol) + var = 0 + + for _ in range(max_iter): + comp_rot = np.dot(components, rotation_matrix) + if method == "varimax": + tmp = comp_rot * np.transpose((comp_rot**2).sum(axis=0) / nrow) + elif method == "quartimax": + tmp = 0 + u, s, v = np.linalg.svd(np.dot(components.T, comp_rot**3 - tmp)) + rotation_matrix = np.dot(u, v) + var_new = np.sum(s) + if var != 0 and var_new < var * (1 + tol): + break + var = var_new + + return np.dot(components, rotation_matrix).T diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..1089b2c54e086a79314d59b63532ff4842d9ccc8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_incremental_pca.py @@ -0,0 +1,409 @@ +"""Incremental Principal Components Analysis.""" + +# Author: Kyle Kastner +# Giorgio Patrini +# License: BSD 3 clause + +from numbers import Integral + +import numpy as np +from scipy import linalg, sparse + +from ..base import _fit_context +from ..utils import gen_batches +from ..utils._param_validation import Interval +from ..utils.extmath import _incremental_mean_and_var, svd_flip +from ._base import _BasePCA + + +class IncrementalPCA(_BasePCA): + """Incremental principal components analysis (IPCA). + + Linear dimensionality reduction using Singular Value Decomposition of + the data, keeping only the most significant singular vectors to + project the data to a lower dimensional space. The input data is centered + but not scaled for each feature before applying the SVD. + + Depending on the size of the input data, this algorithm can be much more + memory efficient than a PCA, and allows sparse input. + + This algorithm has constant memory complexity, on the order + of ``batch_size * n_features``, enabling use of np.memmap files without + loading the entire file into memory. For sparse matrices, the input + is converted to dense in batches (in order to be able to subtract the + mean) which avoids storing the entire dense matrix at any one time. + + The computational overhead of each SVD is + ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples + remain in memory at a time. There will be ``n_samples / batch_size`` SVD + computations to get the principal components, versus 1 large SVD of + complexity ``O(n_samples * n_features ** 2)`` for PCA. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_incremental_pca.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.16 + + Parameters + ---------- + n_components : int, default=None + Number of components to keep. If ``n_components`` is ``None``, + then ``n_components`` is set to ``min(n_samples, n_features)``. + + whiten : bool, default=False + When True (False by default) the ``components_`` vectors are divided + by ``n_samples`` times ``components_`` to ensure uncorrelated outputs + with unit component-wise variances. + + Whitening will remove some information from the transformed signal + (the relative variance scales of the components) but can sometimes + improve the predictive accuracy of the downstream estimators by + making data respect some hard-wired assumptions. + + copy : bool, default=True + If False, X will be overwritten. ``copy=False`` can be used to + save memory but is unsafe for general use. + + batch_size : int, default=None + The number of samples to use for each batch. Only used when calling + ``fit``. If ``batch_size`` is ``None``, then ``batch_size`` + is inferred from the data and set to ``5 * n_features``, to provide a + balance between approximation accuracy and memory consumption. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Principal axes in feature space, representing the directions of + maximum variance in the data. Equivalently, the right singular + vectors of the centered input data, parallel to its eigenvectors. + The components are sorted by decreasing ``explained_variance_``. + + explained_variance_ : ndarray of shape (n_components,) + Variance explained by each of the selected components. + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + If all components are stored, the sum of explained variances is equal + to 1.0. + + singular_values_ : ndarray of shape (n_components,) + The singular values corresponding to each of the selected components. + The singular values are equal to the 2-norms of the ``n_components`` + variables in the lower-dimensional space. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, aggregate over calls to ``partial_fit``. + + var_ : ndarray of shape (n_features,) + Per-feature empirical variance, aggregate over calls to + ``partial_fit``. + + noise_variance_ : float + The estimated noise covariance following the Probabilistic PCA model + from Tipping and Bishop 1999. See "Pattern Recognition and + Machine Learning" by C. Bishop, 12.2.1 p. 574 or + http://www.miketipping.com/papers/met-mppca.pdf. + + n_components_ : int + The estimated number of components. Relevant when + ``n_components=None``. + + n_samples_seen_ : int + The number of samples processed by the estimator. Will be reset on + new calls to fit, but increments across ``partial_fit`` calls. + + batch_size_ : int + Inferred batch size from ``batch_size``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PCA : Principal component analysis (PCA). + KernelPCA : Kernel Principal component analysis (KPCA). + SparsePCA : Sparse Principal Components Analysis (SparsePCA). + TruncatedSVD : Dimensionality reduction using truncated SVD. + + Notes + ----- + Implements the incremental PCA model from: + *D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual + Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, + pp. 125-141, May 2008.* + See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf + + This model is an extension of the Sequential Karhunen-Loeve Transform from: + :doi:`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and + its Application to Images, IEEE Transactions on Image Processing, Volume 9, + Number 8, pp. 1371-1374, August 2000. <10.1109/83.855432>` + + We have specifically abstained from an optimization used by authors of both + papers, a QR decomposition used in specific situations to reduce the + algorithmic complexity of the SVD. The source for this technique is + *Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5, + section 5.4.4, pp 252-253.*. This technique has been omitted because it is + advantageous only when decomposing a matrix with ``n_samples`` (rows) + >= 5/3 * ``n_features`` (columns), and hurts the readability of the + implemented algorithm. This would be a good opportunity for future + optimization, if it is deemed necessary. + + References + ---------- + D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual + Tracking, International Journal of Computer Vision, Volume 77, + Issue 1-3, pp. 125-141, May 2008. + + G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5, + Section 5.4.4, pp. 252-253. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import IncrementalPCA + >>> from scipy import sparse + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = IncrementalPCA(n_components=7, batch_size=200) + >>> # either partially fit on smaller batches of data + >>> transformer.partial_fit(X[:100, :]) + IncrementalPCA(batch_size=200, n_components=7) + >>> # or let the fit function itself divide the data into batches + >>> X_sparse = sparse.csr_matrix(X) + >>> X_transformed = transformer.fit_transform(X_sparse) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "whiten": ["boolean"], + "copy": ["boolean"], + "batch_size": [Interval(Integral, 1, None, closed="left"), None], + } + + def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None): + self.n_components = n_components + self.whiten = whiten + self.copy = copy + self.batch_size = batch_size + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X, using minibatches of size batch_size. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + self.components_ = None + self.n_samples_seen_ = 0 + self.mean_ = 0.0 + self.var_ = 0.0 + self.singular_values_ = None + self.explained_variance_ = None + self.explained_variance_ratio_ = None + self.noise_variance_ = None + + X = self._validate_data( + X, + accept_sparse=["csr", "csc", "lil"], + copy=self.copy, + dtype=[np.float64, np.float32], + ) + n_samples, n_features = X.shape + + if self.batch_size is None: + self.batch_size_ = 5 * n_features + else: + self.batch_size_ = self.batch_size + + for batch in gen_batches( + n_samples, self.batch_size_, min_batch_size=self.n_components or 0 + ): + X_batch = X[batch] + if sparse.issparse(X_batch): + X_batch = X_batch.toarray() + self.partial_fit(X_batch, check_input=False) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, check_input=True): + """Incremental fit with X. All of X is processed as a single batch. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + check_input : bool, default=True + Run check_array on X. + + Returns + ------- + self : object + Returns the instance itself. + """ + first_pass = not hasattr(self, "components_") + + if check_input: + if sparse.issparse(X): + raise TypeError( + "IncrementalPCA.partial_fit does not support " + "sparse input. Either convert data to dense " + "or use IncrementalPCA.fit to do so in batches." + ) + X = self._validate_data( + X, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass + ) + n_samples, n_features = X.shape + if first_pass: + self.components_ = None + + if self.n_components is None: + if self.components_ is None: + self.n_components_ = min(n_samples, n_features) + else: + self.n_components_ = self.components_.shape[0] + elif not self.n_components <= n_features: + raise ValueError( + "n_components=%r invalid for n_features=%d, need " + "more rows than columns for IncrementalPCA " + "processing" % (self.n_components, n_features) + ) + elif not self.n_components <= n_samples: + raise ValueError( + "n_components=%r must be less or equal to " + "the batch number of samples " + "%d." % (self.n_components, n_samples) + ) + else: + self.n_components_ = self.n_components + + if (self.components_ is not None) and ( + self.components_.shape[0] != self.n_components_ + ): + raise ValueError( + "Number of input features has changed from %i " + "to %i between calls to partial_fit! Try " + "setting n_components to a fixed value." + % (self.components_.shape[0], self.n_components_) + ) + + # This is the first partial_fit + if not hasattr(self, "n_samples_seen_"): + self.n_samples_seen_ = 0 + self.mean_ = 0.0 + self.var_ = 0.0 + + # Update stats - they are 0 if this is the first step + col_mean, col_var, n_total_samples = _incremental_mean_and_var( + X, + last_mean=self.mean_, + last_variance=self.var_, + last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]), + ) + n_total_samples = n_total_samples[0] + + # Whitening + if self.n_samples_seen_ == 0: + # If it is the first step, simply whiten X + X -= col_mean + else: + col_batch_mean = np.mean(X, axis=0) + X -= col_batch_mean + # Build matrix of combined previous basis and new data + mean_correction = np.sqrt( + (self.n_samples_seen_ / n_total_samples) * n_samples + ) * (self.mean_ - col_batch_mean) + X = np.vstack( + ( + self.singular_values_.reshape((-1, 1)) * self.components_, + X, + mean_correction, + ) + ) + + U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False) + U, Vt = svd_flip(U, Vt, u_based_decision=False) + explained_variance = S**2 / (n_total_samples - 1) + explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples) + + self.n_samples_seen_ = n_total_samples + self.components_ = Vt[: self.n_components_] + self.singular_values_ = S[: self.n_components_] + self.mean_ = col_mean + self.var_ = col_var + self.explained_variance_ = explained_variance[: self.n_components_] + self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_] + # we already checked `self.n_components <= n_samples` above + if self.n_components_ not in (n_samples, n_features): + self.noise_variance_ = explained_variance[self.n_components_ :].mean() + else: + self.noise_variance_ = 0.0 + return self + + def transform(self, X): + """Apply dimensionality reduction to X. + + X is projected on the first principal components previously extracted + from a training set, using minibatches of size batch_size if X is + sparse. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Projection of X in the first principal components. + + Examples + -------- + + >>> import numpy as np + >>> from sklearn.decomposition import IncrementalPCA + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], + ... [1, 1], [2, 1], [3, 2]]) + >>> ipca = IncrementalPCA(n_components=2, batch_size=3) + >>> ipca.fit(X) + IncrementalPCA(batch_size=3, n_components=2) + >>> ipca.transform(X) # doctest: +SKIP + """ + if sparse.issparse(X): + n_samples = X.shape[0] + output = [] + for batch in gen_batches( + n_samples, self.batch_size_, min_batch_size=self.n_components or 0 + ): + output.append(super().transform(X[batch].toarray())) + return np.vstack(output) + else: + return super().transform(X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py new file mode 100644 index 0000000000000000000000000000000000000000..db46540e26708b897ed8389efa669c5312e729f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_nmf.py @@ -0,0 +1,2443 @@ +""" Non-negative matrix factorization. +""" +# Author: Vlad Niculae +# Lars Buitinck +# Mathieu Blondel +# Tom Dupre la Tour +# License: BSD 3 clause + +import itertools +import time +import warnings +from abc import ABC +from math import sqrt +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy import linalg + +from .._config import config_context +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..utils import check_array, check_random_state, gen_batches, metadata_routing +from ..utils._param_validation import ( + Hidden, + Interval, + StrOptions, + validate_params, +) +from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm +from ..utils.validation import ( + check_is_fitted, + check_non_negative, +) +from ._cdnmf_fast import _update_cdnmf_fast + +EPSILON = np.finfo(np.float32).eps + + +def norm(x): + """Dot product-based Euclidean norm implementation. + + See: http://fa.bianp.net/blog/2011/computing-the-vector-norm/ + + Parameters + ---------- + x : array-like + Vector for which to compute the norm. + """ + return sqrt(squared_norm(x)) + + +def trace_dot(X, Y): + """Trace of np.dot(X, Y.T). + + Parameters + ---------- + X : array-like + First matrix. + Y : array-like + Second matrix. + """ + return np.dot(X.ravel(), Y.ravel()) + + +def _check_init(A, shape, whom): + A = check_array(A) + if shape[0] != "auto" and A.shape[0] != shape[0]: + raise ValueError( + f"Array with wrong first dimension passed to {whom}. Expected {shape[0]}, " + f"but got {A.shape[0]}." + ) + if shape[1] != "auto" and A.shape[1] != shape[1]: + raise ValueError( + f"Array with wrong second dimension passed to {whom}. Expected {shape[1]}, " + f"but got {A.shape[1]}." + ) + check_non_negative(A, whom) + if np.max(A) == 0: + raise ValueError(f"Array passed to {whom} is full of zeros.") + + +def _beta_divergence(X, W, H, beta, square_root=False): + """Compute the beta-divergence of X and dot(W, H). + + Parameters + ---------- + X : float or array-like of shape (n_samples, n_features) + + W : float or array-like of shape (n_samples, n_components) + + H : float or array-like of shape (n_components, n_features) + + beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'} + Parameter of the beta-divergence. + If beta == 2, this is half the Frobenius *squared* norm. + If beta == 1, this is the generalized Kullback-Leibler divergence. + If beta == 0, this is the Itakura-Saito divergence. + Else, this is the general beta-divergence. + + square_root : bool, default=False + If True, return np.sqrt(2 * res) + For beta == 2, it corresponds to the Frobenius norm. + + Returns + ------- + res : float + Beta divergence of X and np.dot(X, H). + """ + beta = _beta_loss_to_float(beta) + + # The method can be called with scalars + if not sp.issparse(X): + X = np.atleast_2d(X) + W = np.atleast_2d(W) + H = np.atleast_2d(H) + + # Frobenius norm + if beta == 2: + # Avoid the creation of the dense np.dot(W, H) if X is sparse. + if sp.issparse(X): + norm_X = np.dot(X.data, X.data) + norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H) + cross_prod = trace_dot((X @ H.T), W) + res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0 + else: + res = squared_norm(X - np.dot(W, H)) / 2.0 + + if square_root: + return np.sqrt(res * 2) + else: + return res + + if sp.issparse(X): + # compute np.dot(W, H) only where X is nonzero + WH_data = _special_sparse_dot(W, H, X).data + X_data = X.data + else: + WH = np.dot(W, H) + WH_data = WH.ravel() + X_data = X.ravel() + + # do not affect the zeros: here 0 ** (-1) = 0 and not infinity + indices = X_data > EPSILON + WH_data = WH_data[indices] + X_data = X_data[indices] + + # used to avoid division by zero + WH_data[WH_data < EPSILON] = EPSILON + + # generalized Kullback-Leibler divergence + if beta == 1: + # fast and memory efficient computation of np.sum(np.dot(W, H)) + sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) + # computes np.sum(X * log(X / WH)) only where X is nonzero + div = X_data / WH_data + res = np.dot(X_data, np.log(div)) + # add full np.sum(np.dot(W, H)) - np.sum(X) + res += sum_WH - X_data.sum() + + # Itakura-Saito divergence + elif beta == 0: + div = X_data / WH_data + res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div)) + + # beta-divergence, beta not in (0, 1, 2) + else: + if sp.issparse(X): + # slow loop, but memory efficient computation of : + # np.sum(np.dot(W, H) ** beta) + sum_WH_beta = 0 + for i in range(X.shape[1]): + sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta) + + else: + sum_WH_beta = np.sum(WH**beta) + + sum_X_WH = np.dot(X_data, WH_data ** (beta - 1)) + res = (X_data**beta).sum() - beta * sum_X_WH + res += sum_WH_beta * (beta - 1) + res /= beta * (beta - 1) + + if square_root: + res = max(res, 0) # avoid negative number due to rounding errors + return np.sqrt(2 * res) + else: + return res + + +def _special_sparse_dot(W, H, X): + """Computes np.dot(W, H), only where X is non zero.""" + if sp.issparse(X): + ii, jj = X.nonzero() + n_vals = ii.shape[0] + dot_vals = np.empty(n_vals) + n_components = W.shape[1] + + batch_size = max(n_components, n_vals // n_components) + for start in range(0, n_vals, batch_size): + batch = slice(start, start + batch_size) + dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum( + axis=1 + ) + + WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape) + return WH.tocsr() + else: + return np.dot(W, H) + + +def _beta_loss_to_float(beta_loss): + """Convert string beta_loss to float.""" + beta_loss_map = {"frobenius": 2, "kullback-leibler": 1, "itakura-saito": 0} + if isinstance(beta_loss, str): + beta_loss = beta_loss_map[beta_loss] + return beta_loss + + +def _initialize_nmf(X, n_components, init=None, eps=1e-6, random_state=None): + """Algorithms for NMF initialization. + + Computes an initial guess for the non-negative + rank k matrix approximation for X: X = WH. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix to be decomposed. + + n_components : int + The number of components desired in the approximation. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None + Method used to initialize the procedure. + Valid options: + + - None: 'nndsvda' if n_components <= min(n_samples, n_features), + otherwise 'random'. + + - 'random': non-negative random matrices, scaled with: + sqrt(X.mean() / n_components) + + - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness) + + - 'nndsvda': NNDSVD with zeros filled with the average of X + (better when sparsity is not desired) + + - 'nndsvdar': NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired) + + - 'custom': use custom matrices W and H + + .. versionchanged:: 1.1 + When `init=None` and n_components is less than n_samples and n_features + defaults to `nndsvda` instead of `nndsvd`. + + eps : float, default=1e-6 + Truncate all values less then this in output to zero. + + random_state : int, RandomState instance or None, default=None + Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + W : array-like of shape (n_samples, n_components) + Initial guesses for solving X ~= WH. + + H : array-like of shape (n_components, n_features) + Initial guesses for solving X ~= WH. + + References + ---------- + C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for + nonnegative matrix factorization - Pattern Recognition, 2008 + http://tinyurl.com/nndsvd + """ + check_non_negative(X, "NMF initialization") + n_samples, n_features = X.shape + + if ( + init is not None + and init != "random" + and n_components > min(n_samples, n_features) + ): + raise ValueError( + "init = '{}' can only be used when " + "n_components <= min(n_samples, n_features)".format(init) + ) + + if init is None: + if n_components <= min(n_samples, n_features): + init = "nndsvda" + else: + init = "random" + + # Random initialization + if init == "random": + avg = np.sqrt(X.mean() / n_components) + rng = check_random_state(random_state) + H = avg * rng.standard_normal(size=(n_components, n_features)).astype( + X.dtype, copy=False + ) + W = avg * rng.standard_normal(size=(n_samples, n_components)).astype( + X.dtype, copy=False + ) + np.abs(H, out=H) + np.abs(W, out=W) + return W, H + + # NNDSVD initialization + U, S, V = randomized_svd(X, n_components, random_state=random_state) + W = np.zeros_like(U) + H = np.zeros_like(V) + + # The leading singular triplet is non-negative + # so it can be used as is for initialization. + W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0]) + H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :]) + + for j in range(1, n_components): + x, y = U[:, j], V[j, :] + + # extract positive and negative parts of column vectors + x_p, y_p = np.maximum(x, 0), np.maximum(y, 0) + x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0)) + + # and their norms + x_p_nrm, y_p_nrm = norm(x_p), norm(y_p) + x_n_nrm, y_n_nrm = norm(x_n), norm(y_n) + + m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm + + # choose update + if m_p > m_n: + u = x_p / x_p_nrm + v = y_p / y_p_nrm + sigma = m_p + else: + u = x_n / x_n_nrm + v = y_n / y_n_nrm + sigma = m_n + + lbd = np.sqrt(S[j] * sigma) + W[:, j] = lbd * u + H[j, :] = lbd * v + + W[W < eps] = 0 + H[H < eps] = 0 + + if init == "nndsvd": + pass + elif init == "nndsvda": + avg = X.mean() + W[W == 0] = avg + H[H == 0] = avg + elif init == "nndsvdar": + rng = check_random_state(random_state) + avg = X.mean() + W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100) + H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100) + else: + raise ValueError( + "Invalid init parameter: got %r instead of one of %r" + % (init, (None, "random", "nndsvd", "nndsvda", "nndsvdar")) + ) + + return W, H + + +def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state): + """Helper function for _fit_coordinate_descent. + + Update W to minimize the objective function, iterating once over all + coordinates. By symmetry, to update H, one can call + _update_coordinate_descent(X.T, Ht, W, ...). + + """ + n_components = Ht.shape[1] + + HHt = np.dot(Ht.T, Ht) + XHt = safe_sparse_dot(X, Ht) + + # L2 regularization corresponds to increase of the diagonal of HHt + if l2_reg != 0.0: + # adds l2_reg only on the diagonal + HHt.flat[:: n_components + 1] += l2_reg + # L1 regularization corresponds to decrease of each element of XHt + if l1_reg != 0.0: + XHt -= l1_reg + + if shuffle: + permutation = random_state.permutation(n_components) + else: + permutation = np.arange(n_components) + # The following seems to be required on 64-bit Windows w/ Python 3.5. + permutation = np.asarray(permutation, dtype=np.intp) + return _update_cdnmf_fast(W, HHt, XHt, permutation) + + +def _fit_coordinate_descent( + X, + W, + H, + tol=1e-4, + max_iter=200, + l1_reg_W=0, + l1_reg_H=0, + l2_reg_W=0, + l2_reg_H=0, + update_H=True, + verbose=0, + shuffle=False, + random_state=None, +): + """Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent + + The objective function is minimized with an alternating minimization of W + and H. Each minimization is done with a cyclic (up to a permutation of the + features) Coordinate Descent. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Constant matrix. + + W : array-like of shape (n_samples, n_components) + Initial guess for the solution. + + H : array-like of shape (n_components, n_features) + Initial guess for the solution. + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + max_iter : int, default=200 + Maximum number of iterations before timing out. + + l1_reg_W : float, default=0. + L1 regularization parameter for W. + + l1_reg_H : float, default=0. + L1 regularization parameter for H. + + l2_reg_W : float, default=0. + L2 regularization parameter for W. + + l2_reg_H : float, default=0. + L2 regularization parameter for H. + + update_H : bool, default=True + Set to True, both W and H will be estimated from initial guesses. + Set to False, only W will be estimated. + + verbose : int, default=0 + The verbosity level. + + shuffle : bool, default=False + If true, randomize the order of coordinates in the CD solver. + + random_state : int, RandomState instance or None, default=None + Used to randomize the coordinates in the CD solver, when + ``shuffle`` is set to ``True``. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Solution to the non-negative least squares problem. + + H : ndarray of shape (n_components, n_features) + Solution to the non-negative least squares problem. + + n_iter : int + The number of iterations done by the algorithm. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + """ + # so W and Ht are both in C order in memory + Ht = check_array(H.T, order="C") + X = check_array(X, accept_sparse="csr") + + rng = check_random_state(random_state) + + for n_iter in range(1, max_iter + 1): + violation = 0.0 + + # Update W + violation += _update_coordinate_descent( + X, W, Ht, l1_reg_W, l2_reg_W, shuffle, rng + ) + # Update H + if update_H: + violation += _update_coordinate_descent( + X.T, Ht, W, l1_reg_H, l2_reg_H, shuffle, rng + ) + + if n_iter == 1: + violation_init = violation + + if violation_init == 0: + break + + if verbose: + print("violation:", violation / violation_init) + + if violation / violation_init <= tol: + if verbose: + print("Converged at iteration", n_iter + 1) + break + + return W, Ht.T, n_iter + + +def _multiplicative_update_w( + X, + W, + H, + beta_loss, + l1_reg_W, + l2_reg_W, + gamma, + H_sum=None, + HHt=None, + XHt=None, + update_H=True, +): + """Update W in Multiplicative Update NMF.""" + if beta_loss == 2: + # Numerator + if XHt is None: + XHt = safe_sparse_dot(X, H.T) + if update_H: + # avoid a copy of XHt, which will be re-computed (update_H=True) + numerator = XHt + else: + # preserve the XHt, which is not re-computed (update_H=False) + numerator = XHt.copy() + + # Denominator + if HHt is None: + HHt = np.dot(H, H.T) + denominator = np.dot(W, HHt) + + else: + # Numerator + # if X is sparse, compute WH only where X is non zero + WH_safe_X = _special_sparse_dot(W, H, X) + if sp.issparse(X): + WH_safe_X_data = WH_safe_X.data + X_data = X.data + else: + WH_safe_X_data = WH_safe_X + X_data = X + # copy used in the Denominator + WH = WH_safe_X.copy() + if beta_loss - 1.0 < 0: + WH[WH < EPSILON] = EPSILON + + # to avoid taking a negative power of zero + if beta_loss - 2.0 < 0: + WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON + + if beta_loss == 1: + np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) + elif beta_loss == 0: + # speeds up computation time + # refer to /numpy/numpy/issues/9363 + WH_safe_X_data **= -1 + WH_safe_X_data **= 2 + # element-wise multiplication + WH_safe_X_data *= X_data + else: + WH_safe_X_data **= beta_loss - 2 + # element-wise multiplication + WH_safe_X_data *= X_data + + # here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T) + numerator = safe_sparse_dot(WH_safe_X, H.T) + + # Denominator + if beta_loss == 1: + if H_sum is None: + H_sum = np.sum(H, axis=1) # shape(n_components, ) + denominator = H_sum[np.newaxis, :] + + else: + # computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T) + if sp.issparse(X): + # memory efficient computation + # (compute row by row, avoiding the dense matrix WH) + WHHt = np.empty(W.shape) + for i in range(X.shape[0]): + WHi = np.dot(W[i, :], H) + if beta_loss - 1 < 0: + WHi[WHi < EPSILON] = EPSILON + WHi **= beta_loss - 1 + WHHt[i, :] = np.dot(WHi, H.T) + else: + WH **= beta_loss - 1 + WHHt = np.dot(WH, H.T) + denominator = WHHt + + # Add L1 and L2 regularization + if l1_reg_W > 0: + denominator += l1_reg_W + if l2_reg_W > 0: + denominator = denominator + l2_reg_W * W + denominator[denominator == 0] = EPSILON + + numerator /= denominator + delta_W = numerator + + # gamma is in ]0, 1] + if gamma != 1: + delta_W **= gamma + + W *= delta_W + + return W, H_sum, HHt, XHt + + +def _multiplicative_update_h( + X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma, A=None, B=None, rho=None +): + """update H in Multiplicative Update NMF.""" + if beta_loss == 2: + numerator = safe_sparse_dot(W.T, X) + denominator = np.linalg.multi_dot([W.T, W, H]) + + else: + # Numerator + WH_safe_X = _special_sparse_dot(W, H, X) + if sp.issparse(X): + WH_safe_X_data = WH_safe_X.data + X_data = X.data + else: + WH_safe_X_data = WH_safe_X + X_data = X + # copy used in the Denominator + WH = WH_safe_X.copy() + if beta_loss - 1.0 < 0: + WH[WH < EPSILON] = EPSILON + + # to avoid division by zero + if beta_loss - 2.0 < 0: + WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON + + if beta_loss == 1: + np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) + elif beta_loss == 0: + # speeds up computation time + # refer to /numpy/numpy/issues/9363 + WH_safe_X_data **= -1 + WH_safe_X_data **= 2 + # element-wise multiplication + WH_safe_X_data *= X_data + else: + WH_safe_X_data **= beta_loss - 2 + # element-wise multiplication + WH_safe_X_data *= X_data + + # here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X) + numerator = safe_sparse_dot(W.T, WH_safe_X) + + # Denominator + if beta_loss == 1: + W_sum = np.sum(W, axis=0) # shape(n_components, ) + W_sum[W_sum == 0] = 1.0 + denominator = W_sum[:, np.newaxis] + + # beta_loss not in (1, 2) + else: + # computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1) + if sp.issparse(X): + # memory efficient computation + # (compute column by column, avoiding the dense matrix WH) + WtWH = np.empty(H.shape) + for i in range(X.shape[1]): + WHi = np.dot(W, H[:, i]) + if beta_loss - 1 < 0: + WHi[WHi < EPSILON] = EPSILON + WHi **= beta_loss - 1 + WtWH[:, i] = np.dot(W.T, WHi) + else: + WH **= beta_loss - 1 + WtWH = np.dot(W.T, WH) + denominator = WtWH + + # Add L1 and L2 regularization + if l1_reg_H > 0: + denominator += l1_reg_H + if l2_reg_H > 0: + denominator = denominator + l2_reg_H * H + denominator[denominator == 0] = EPSILON + + if A is not None and B is not None: + # Updates for the online nmf + if gamma != 1: + H **= 1 / gamma + numerator *= H + A *= rho + B *= rho + A += numerator + B += denominator + H = A / B + + if gamma != 1: + H **= gamma + else: + delta_H = numerator + delta_H /= denominator + if gamma != 1: + delta_H **= gamma + H *= delta_H + + return H + + +def _fit_multiplicative_update( + X, + W, + H, + beta_loss="frobenius", + max_iter=200, + tol=1e-4, + l1_reg_W=0, + l1_reg_H=0, + l2_reg_W=0, + l2_reg_H=0, + update_H=True, + verbose=0, +): + """Compute Non-negative Matrix Factorization with Multiplicative Update. + + The objective function is _beta_divergence(X, WH) and is minimized with an + alternating minimization of W and H. Each minimization is done with a + Multiplicative Update. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Constant input matrix. + + W : array-like of shape (n_samples, n_components) + Initial guess for the solution. + + H : array-like of shape (n_components, n_features) + Initial guess for the solution. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. + Beta divergence to be minimized, measuring the distance between X + and the dot product WH. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input + matrix X cannot contain zeros. + + max_iter : int, default=200 + Number of iterations. + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + l1_reg_W : float, default=0. + L1 regularization parameter for W. + + l1_reg_H : float, default=0. + L1 regularization parameter for H. + + l2_reg_W : float, default=0. + L2 regularization parameter for W. + + l2_reg_H : float, default=0. + L2 regularization parameter for H. + + update_H : bool, default=True + Set to True, both W and H will be estimated from initial guesses. + Set to False, only W will be estimated. + + verbose : int, default=0 + The verbosity level. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Solution to the non-negative least squares problem. + + H : ndarray of shape (n_components, n_features) + Solution to the non-negative least squares problem. + + n_iter : int + The number of iterations done by the algorithm. + + References + ---------- + Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix + Factorization. Adv. Neural Inform. Process. Syst.. 13. + Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix + factorization with the beta-divergence. Neural Computation, 23(9). + """ + start_time = time.time() + + beta_loss = _beta_loss_to_float(beta_loss) + + # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011] + if beta_loss < 1: + gamma = 1.0 / (2.0 - beta_loss) + elif beta_loss > 2: + gamma = 1.0 / (beta_loss - 1.0) + else: + gamma = 1.0 + + # used for the convergence criterion + error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True) + previous_error = error_at_init + + H_sum, HHt, XHt = None, None, None + for n_iter in range(1, max_iter + 1): + # update W + # H_sum, HHt and XHt are saved and reused if not update_H + W, H_sum, HHt, XHt = _multiplicative_update_w( + X, + W, + H, + beta_loss=beta_loss, + l1_reg_W=l1_reg_W, + l2_reg_W=l2_reg_W, + gamma=gamma, + H_sum=H_sum, + HHt=HHt, + XHt=XHt, + update_H=update_H, + ) + + # necessary for stability with beta_loss < 1 + if beta_loss < 1: + W[W < np.finfo(np.float64).eps] = 0.0 + + # update H (only at fit or fit_transform) + if update_H: + H = _multiplicative_update_h( + X, + W, + H, + beta_loss=beta_loss, + l1_reg_H=l1_reg_H, + l2_reg_H=l2_reg_H, + gamma=gamma, + ) + + # These values will be recomputed since H changed + H_sum, HHt, XHt = None, None, None + + # necessary for stability with beta_loss < 1 + if beta_loss <= 1: + H[H < np.finfo(np.float64).eps] = 0.0 + + # test convergence criterion every 10 iterations + if tol > 0 and n_iter % 10 == 0: + error = _beta_divergence(X, W, H, beta_loss, square_root=True) + + if verbose: + iter_time = time.time() + print( + "Epoch %02d reached after %.3f seconds, error: %f" + % (n_iter, iter_time - start_time, error) + ) + + if (previous_error - error) / error_at_init < tol: + break + previous_error = error + + # do not print if we have already printed in the convergence test + if verbose and (tol == 0 or n_iter % 10 != 0): + end_time = time.time() + print( + "Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time) + ) + + return W, H, n_iter + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "W": ["array-like", None], + "H": ["array-like", None], + "update_H": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def non_negative_factorization( + X, + W=None, + H=None, + n_components="warn", + *, + init=None, + update_H=True, + solver="cd", + beta_loss="frobenius", + tol=1e-4, + max_iter=200, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + random_state=None, + verbose=0, + shuffle=False, +): + """Compute Non-negative Matrix Factorization (NMF). + + Find two non-negative matrices (W, H) whose product approximates the non- + negative matrix X. This factorization can be used for example for + dimensionality reduction, source separation or topic extraction. + + The objective function is: + + .. math:: + + L(W, H) &= 0.5 * ||X - WH||_{loss}^2 + + &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1 + + &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1 + + &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2 + + &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2 + + Where: + + :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) + + :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm) + + The generic norm :math:`||X - WH||_{loss}^2` may represent + the Frobenius norm or another supported beta-divergence loss. + The choice between options is controlled by the `beta_loss` parameter. + + The regularization terms are scaled by `n_features` for `W` and by `n_samples` for + `H` to keep their impact balanced with respect to one another and to the data fit + term as independent as possible of the size `n_samples` of the training set. + + The objective function is minimized with an alternating minimization of W + and H. If H is given and update_H=False, it solves for W only. + + Note that the transformed data is named W and the components matrix is named H. In + the NMF literature, the naming convention is usually the opposite since the data + matrix X is transposed. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Constant matrix. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is initialised as an array of zeros, unless + `solver='mu'`, then it is filled with values calculated by + `np.sqrt(X.mean() / self._n_components)`. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is used as a constant, to solve for W only. + If `None`, uses the initialisation method specified in `init`. + + n_components : int or {'auto'} or None, default=None + Number of components, if n_components is not set all features + are kept. + If `n_components='auto'`, the number of components is automatically inferred + from `W` or `H` shapes. + + .. versionchanged:: 1.4 + Added `'auto'` value. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None + Method used to initialize the procedure. + + Valid options: + + - None: 'nndsvda' if n_components < n_features, otherwise 'random'. + - 'random': non-negative random matrices, scaled with: + `sqrt(X.mean() / n_components)` + - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness) + - 'nndsvda': NNDSVD with zeros filled with the average of X + (better when sparsity is not desired) + - 'nndsvdar': NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired) + - 'custom': If `update_H=True`, use custom matrices W and H which must both + be provided. If `update_H=False`, then only custom matrix H is used. + + .. versionchanged:: 0.23 + The default value of `init` changed from 'random' to None in 0.23. + + .. versionchanged:: 1.1 + When `init=None` and n_components is less than n_samples and n_features + defaults to `nndsvda` instead of `nndsvd`. + + update_H : bool, default=True + Set to True, both W and H will be estimated from initial guesses. + Set to False, only W will be estimated. + + solver : {'cd', 'mu'}, default='cd' + Numerical solver to use: + + - 'cd' is a Coordinate Descent solver that uses Fast Hierarchical + Alternating Least Squares (Fast HALS). + - 'mu' is a Multiplicative Update solver. + + .. versionadded:: 0.17 + Coordinate Descent solver. + + .. versionadded:: 0.19 + Multiplicative Update solver. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + Beta divergence to be minimized, measuring the distance between X + and the dot product WH. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input + matrix X cannot contain zeros. Used only in 'mu' solver. + + .. versionadded:: 0.19 + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + max_iter : int, default=200 + Maximum number of iterations before timing out. + + alpha_W : float, default=0.0 + Constant that multiplies the regularization terms of `W`. Set it to zero + (default) to have no regularization on `W`. + + .. versionadded:: 1.0 + + alpha_H : float or "same", default="same" + Constant that multiplies the regularization terms of `H`. Set it to zero to + have no regularization on `H`. If "same" (default), it takes the same value as + `alpha_W`. + + .. versionadded:: 1.0 + + l1_ratio : float, default=0.0 + The regularization mixing parameter, with 0 <= l1_ratio <= 1. + For l1_ratio = 0 the penalty is an elementwise L2 penalty + (aka Frobenius Norm). + For l1_ratio = 1 it is an elementwise L1 penalty. + For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. + + random_state : int, RandomState instance or None, default=None + Used for NMF initialisation (when ``init`` == 'nndsvdar' or + 'random'), and in Coordinate Descent. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + verbose : int, default=0 + The verbosity level. + + shuffle : bool, default=False + If true, randomize the order of coordinates in the CD solver. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Solution to the non-negative least squares problem. + + H : ndarray of shape (n_components, n_features) + Solution to the non-negative least squares problem. + + n_iter : int + Actual number of iterations. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + + .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the + beta-divergence" <10.1162/NECO_a_00168>` + Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9). + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) + >>> from sklearn.decomposition import non_negative_factorization + >>> W, H, n_iter = non_negative_factorization( + ... X, n_components=2, init='random', random_state=0) + """ + est = NMF( + n_components=n_components, + init=init, + solver=solver, + beta_loss=beta_loss, + tol=tol, + max_iter=max_iter, + random_state=random_state, + alpha_W=alpha_W, + alpha_H=alpha_H, + l1_ratio=l1_ratio, + verbose=verbose, + shuffle=shuffle, + ) + est._validate_params() + + X = check_array(X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]) + + with config_context(assume_finite=True): + W, H, n_iter = est._fit_transform(X, W=W, H=H, update_H=update_H) + + return W, H, n_iter + + +class _BaseNMF(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, ABC): + """Base class for NMF and MiniBatchNMF.""" + + # This prevents ``set_split_inverse_transform`` to be generated for the + # non-standard ``W`` arg on ``inverse_transform``. + # TODO: remove when W is removed in v1.5 for inverse_transform + __metadata_request__inverse_transform = {"W": metadata_routing.UNUSED} + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + None, + StrOptions({"auto"}), + Hidden(StrOptions({"warn"})), + ], + "init": [ + StrOptions({"random", "nndsvd", "nndsvda", "nndsvdar", "custom"}), + None, + ], + "beta_loss": [ + StrOptions({"frobenius", "kullback-leibler", "itakura-saito"}), + Real, + ], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + "alpha_W": [Interval(Real, 0, None, closed="left")], + "alpha_H": [Interval(Real, 0, None, closed="left"), StrOptions({"same"})], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "verbose": ["verbose"], + } + + def __init__( + self, + n_components="warn", + *, + init=None, + beta_loss="frobenius", + tol=1e-4, + max_iter=200, + random_state=None, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + verbose=0, + ): + self.n_components = n_components + self.init = init + self.beta_loss = beta_loss + self.tol = tol + self.max_iter = max_iter + self.random_state = random_state + self.alpha_W = alpha_W + self.alpha_H = alpha_H + self.l1_ratio = l1_ratio + self.verbose = verbose + + def _check_params(self, X): + # n_components + self._n_components = self.n_components + if self.n_components == "warn": + warnings.warn( + ( + "The default value of `n_components` will change from `None` to" + " `'auto'` in 1.6. Set the value of `n_components` to `None`" + " explicitly to suppress the warning." + ), + FutureWarning, + ) + self._n_components = None # Keeping the old default value + if self._n_components is None: + self._n_components = X.shape[1] + + # beta_loss + self._beta_loss = _beta_loss_to_float(self.beta_loss) + + def _check_w_h(self, X, W, H, update_H): + """Check W and H, or initialize them.""" + n_samples, n_features = X.shape + + if self.init == "custom" and update_H: + _check_init(H, (self._n_components, n_features), "NMF (input H)") + _check_init(W, (n_samples, self._n_components), "NMF (input W)") + if self._n_components == "auto": + self._n_components = H.shape[0] + + if H.dtype != X.dtype or W.dtype != X.dtype: + raise TypeError( + "H and W should have the same dtype as X. Got " + "H.dtype = {} and W.dtype = {}.".format(H.dtype, W.dtype) + ) + + elif not update_H: + if W is not None: + warnings.warn( + "When update_H=False, the provided initial W is not used.", + RuntimeWarning, + ) + + _check_init(H, (self._n_components, n_features), "NMF (input H)") + if self._n_components == "auto": + self._n_components = H.shape[0] + + if H.dtype != X.dtype: + raise TypeError( + "H should have the same dtype as X. Got H.dtype = {}.".format( + H.dtype + ) + ) + + # 'mu' solver should not be initialized by zeros + if self.solver == "mu": + avg = np.sqrt(X.mean() / self._n_components) + W = np.full((n_samples, self._n_components), avg, dtype=X.dtype) + else: + W = np.zeros((n_samples, self._n_components), dtype=X.dtype) + + else: + if W is not None or H is not None: + warnings.warn( + ( + "When init!='custom', provided W or H are ignored. Set " + " init='custom' to use them as initialization." + ), + RuntimeWarning, + ) + + if self._n_components == "auto": + self._n_components = X.shape[1] + + W, H = _initialize_nmf( + X, self._n_components, init=self.init, random_state=self.random_state + ) + + return W, H + + def _compute_regularization(self, X): + """Compute scaled regularization terms.""" + n_samples, n_features = X.shape + alpha_W = self.alpha_W + alpha_H = self.alpha_W if self.alpha_H == "same" else self.alpha_H + + l1_reg_W = n_features * alpha_W * self.l1_ratio + l1_reg_H = n_samples * alpha_H * self.l1_ratio + l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio) + l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio) + + return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H + + def fit(self, X, y=None, **params): + """Learn a NMF model for the data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + **params : kwargs + Parameters (keyword arguments) and values passed to + the fit_transform instance. + + Returns + ------- + self : object + Returns the instance itself. + """ + # param validation is done in fit_transform + + self.fit_transform(X, **params) + return self + + def inverse_transform(self, Xt=None, W=None): + """Transform data back to its original space. + + .. versionadded:: 0.18 + + Parameters + ---------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_components) + Transformed data matrix. + + W : deprecated + Use `Xt` instead. + + .. deprecated:: 1.3 + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + Returns a data matrix of the original shape. + """ + if Xt is None and W is None: + raise TypeError("Missing required positional argument: Xt") + + if W is not None and Xt is not None: + raise ValueError("Please provide only `Xt`, and not `W`.") + + if W is not None: + warnings.warn( + ( + "Input argument `W` was renamed to `Xt` in v1.3 and will be removed" + " in v1.5." + ), + FutureWarning, + ) + Xt = W + + check_is_fitted(self) + return Xt @ self.components_ + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "requires_positive_X": True, + "preserves_dtype": [np.float64, np.float32], + } + + +class NMF(_BaseNMF): + """Non-Negative Matrix Factorization (NMF). + + Find two non-negative matrices, i.e. matrices with all non-negative elements, (W, H) + whose product approximates the non-negative matrix X. This factorization can be used + for example for dimensionality reduction, source separation or topic extraction. + + The objective function is: + + .. math:: + + L(W, H) &= 0.5 * ||X - WH||_{loss}^2 + + &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1 + + &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1 + + &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2 + + &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2 + + Where: + + :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) + + :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm) + + The generic norm :math:`||X - WH||_{loss}` may represent + the Frobenius norm or another supported beta-divergence loss. + The choice between options is controlled by the `beta_loss` parameter. + + The regularization terms are scaled by `n_features` for `W` and by `n_samples` for + `H` to keep their impact balanced with respect to one another and to the data fit + term as independent as possible of the size `n_samples` of the training set. + + The objective function is minimized with an alternating minimization of W + and H. + + Note that the transformed data is named W and the components matrix is named H. In + the NMF literature, the naming convention is usually the opposite since the data + matrix X is transposed. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int or {'auto'} or None, default=None + Number of components, if n_components is not set all features + are kept. + If `n_components='auto'`, the number of components is automatically inferred + from W or H shapes. + + .. versionchanged:: 1.4 + Added `'auto'` value. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None + Method used to initialize the procedure. + Valid options: + + - `None`: 'nndsvda' if n_components <= min(n_samples, n_features), + otherwise random. + + - `'random'`: non-negative random matrices, scaled with: + `sqrt(X.mean() / n_components)` + + - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness) + + - `'nndsvda'`: NNDSVD with zeros filled with the average of X + (better when sparsity is not desired) + + - `'nndsvdar'` NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired) + + - `'custom'`: Use custom matrices `W` and `H` which must both be provided. + + .. versionchanged:: 1.1 + When `init=None` and n_components is less than n_samples and n_features + defaults to `nndsvda` instead of `nndsvd`. + + solver : {'cd', 'mu'}, default='cd' + Numerical solver to use: + + - 'cd' is a Coordinate Descent solver. + - 'mu' is a Multiplicative Update solver. + + .. versionadded:: 0.17 + Coordinate Descent solver. + + .. versionadded:: 0.19 + Multiplicative Update solver. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + Beta divergence to be minimized, measuring the distance between X + and the dot product WH. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input + matrix X cannot contain zeros. Used only in 'mu' solver. + + .. versionadded:: 0.19 + + tol : float, default=1e-4 + Tolerance of the stopping condition. + + max_iter : int, default=200 + Maximum number of iterations before timing out. + + random_state : int, RandomState instance or None, default=None + Used for initialisation (when ``init`` == 'nndsvdar' or + 'random'), and in Coordinate Descent. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + alpha_W : float, default=0.0 + Constant that multiplies the regularization terms of `W`. Set it to zero + (default) to have no regularization on `W`. + + .. versionadded:: 1.0 + + alpha_H : float or "same", default="same" + Constant that multiplies the regularization terms of `H`. Set it to zero to + have no regularization on `H`. If "same" (default), it takes the same value as + `alpha_W`. + + .. versionadded:: 1.0 + + l1_ratio : float, default=0.0 + The regularization mixing parameter, with 0 <= l1_ratio <= 1. + For l1_ratio = 0 the penalty is an elementwise L2 penalty + (aka Frobenius Norm). + For l1_ratio = 1 it is an elementwise L1 penalty. + For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. + + .. versionadded:: 0.17 + Regularization parameter *l1_ratio* used in the Coordinate Descent + solver. + + verbose : int, default=0 + Whether to be verbose. + + shuffle : bool, default=False + If true, randomize the order of coordinates in the CD solver. + + .. versionadded:: 0.17 + *shuffle* parameter used in the Coordinate Descent solver. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_components_ : int + The number of components. It is same as the `n_components` parameter + if it was given. Otherwise, it will be same as the number of + features. + + reconstruction_err_ : float + Frobenius norm of the matrix difference, or beta-divergence, between + the training data ``X`` and the reconstructed data ``WH`` from + the fitted model. + + n_iter_ : int + Actual number of iterations. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + PCA : Principal component analysis. + SparseCoder : Find a sparse representation of data from a fixed, + precomputed dictionary. + SparsePCA : Sparse Principal Components Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + + .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the + beta-divergence" <10.1162/NECO_a_00168>` + Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9). + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) + >>> from sklearn.decomposition import NMF + >>> model = NMF(n_components=2, init='random', random_state=0) + >>> W = model.fit_transform(X) + >>> H = model.components_ + """ + + _parameter_constraints: dict = { + **_BaseNMF._parameter_constraints, + "solver": [StrOptions({"mu", "cd"})], + "shuffle": ["boolean"], + } + + def __init__( + self, + n_components="warn", + *, + init=None, + solver="cd", + beta_loss="frobenius", + tol=1e-4, + max_iter=200, + random_state=None, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + verbose=0, + shuffle=False, + ): + super().__init__( + n_components=n_components, + init=init, + beta_loss=beta_loss, + tol=tol, + max_iter=max_iter, + random_state=random_state, + alpha_W=alpha_W, + alpha_H=alpha_H, + l1_ratio=l1_ratio, + verbose=verbose, + ) + + self.solver = solver + self.shuffle = shuffle + + def _check_params(self, X): + super()._check_params(X) + + # solver + if self.solver != "mu" and self.beta_loss not in (2, "frobenius"): + # 'mu' is the only solver that handles other beta losses than 'frobenius' + raise ValueError( + f"Invalid beta_loss parameter: solver {self.solver!r} does not handle " + f"beta_loss = {self.beta_loss!r}" + ) + if self.solver == "mu" and self.init == "nndsvd": + warnings.warn( + ( + "The multiplicative update ('mu') solver cannot update " + "zeros present in the initialization, and so leads to " + "poorer results when used jointly with init='nndsvd'. " + "You may try init='nndsvda' or init='nndsvdar' instead." + ), + UserWarning, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None, W=None, H=None): + """Learn a NMF model for the data X and returns the transformed data. + + This is more efficient than calling fit followed by transform. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32] + ) + + with config_context(assume_finite=True): + W, H, n_iter = self._fit_transform(X, W=W, H=H) + + self.reconstruction_err_ = _beta_divergence( + X, W, H, self._beta_loss, square_root=True + ) + + self.n_components_ = H.shape[0] + self.components_ = H + self.n_iter_ = n_iter + + return W + + def _fit_transform(self, X, y=None, W=None, H=None, update_H=True): + """Learn a NMF model for the data X and returns the transformed data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed + + y : Ignored + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is initialised as an array of zeros, unless + `solver='mu'`, then it is filled with values calculated by + `np.sqrt(X.mean() / self._n_components)`. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is used as a constant, to solve for W only. + If `None`, uses the initialisation method specified in `init`. + + update_H : bool, default=True + If True, both W and H will be estimated from initial guesses, + this corresponds to a call to the 'fit_transform' method. + If False, only W will be estimated, this corresponds to a call + to the 'transform' method. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + + H : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_iter_ : int + Actual number of iterations. + """ + check_non_negative(X, "NMF (input X)") + + # check parameters + self._check_params(X) + + if X.min() == 0 and self._beta_loss <= 0: + raise ValueError( + "When beta_loss <= 0 and X contains zeros, " + "the solver may diverge. Please add small values " + "to X, or use a positive beta_loss." + ) + + # initialize or check W and H + W, H = self._check_w_h(X, W, H, update_H) + + # scale the regularization terms + l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X) + + if self.solver == "cd": + W, H, n_iter = _fit_coordinate_descent( + X, + W, + H, + self.tol, + self.max_iter, + l1_reg_W, + l1_reg_H, + l2_reg_W, + l2_reg_H, + update_H=update_H, + verbose=self.verbose, + shuffle=self.shuffle, + random_state=self.random_state, + ) + elif self.solver == "mu": + W, H, n_iter, *_ = _fit_multiplicative_update( + X, + W, + H, + self._beta_loss, + self.max_iter, + self.tol, + l1_reg_W, + l1_reg_H, + l2_reg_W, + l2_reg_H, + update_H, + self.verbose, + ) + else: + raise ValueError("Invalid solver parameter '%s'." % self.solver) + + if n_iter == self.max_iter and self.tol > 0: + warnings.warn( + "Maximum number of iterations %d reached. Increase " + "it to improve convergence." + % self.max_iter, + ConvergenceWarning, + ) + + return W, H, n_iter + + def transform(self, X): + """Transform the data X according to the fitted NMF model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False + ) + + with config_context(assume_finite=True): + W, *_ = self._fit_transform(X, H=self.components_, update_H=False) + + return W + + +class MiniBatchNMF(_BaseNMF): + """Mini-Batch Non-Negative Matrix Factorization (NMF). + + .. versionadded:: 1.1 + + Find two non-negative matrices, i.e. matrices with all non-negative elements, + (`W`, `H`) whose product approximates the non-negative matrix `X`. This + factorization can be used for example for dimensionality reduction, source + separation or topic extraction. + + The objective function is: + + .. math:: + + L(W, H) &= 0.5 * ||X - WH||_{loss}^2 + + &+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1 + + &+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1 + + &+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2 + + &+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2 + + Where: + + :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) + + :math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm) + + The generic norm :math:`||X - WH||_{loss}^2` may represent + the Frobenius norm or another supported beta-divergence loss. + The choice between options is controlled by the `beta_loss` parameter. + + The objective function is minimized with an alternating minimization of `W` + and `H`. + + Note that the transformed data is named `W` and the components matrix is + named `H`. In the NMF literature, the naming convention is usually the opposite + since the data matrix `X` is transposed. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int or {'auto'} or None, default=None + Number of components, if `n_components` is not set all features + are kept. + If `n_components='auto'`, the number of components is automatically inferred + from W or H shapes. + + .. versionchanged:: 1.4 + Added `'auto'` value. + + init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None + Method used to initialize the procedure. + Valid options: + + - `None`: 'nndsvda' if `n_components <= min(n_samples, n_features)`, + otherwise random. + + - `'random'`: non-negative random matrices, scaled with: + `sqrt(X.mean() / n_components)` + + - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD) + initialization (better for sparseness). + + - `'nndsvda'`: NNDSVD with zeros filled with the average of X + (better when sparsity is not desired). + + - `'nndsvdar'` NNDSVD with zeros filled with small random values + (generally faster, less accurate alternative to NNDSVDa + for when sparsity is not desired). + + - `'custom'`: Use custom matrices `W` and `H` which must both be provided. + + batch_size : int, default=1024 + Number of samples in each mini-batch. Large batch sizes + give better long-term convergence at the cost of a slower start. + + beta_loss : float or {'frobenius', 'kullback-leibler', \ + 'itakura-saito'}, default='frobenius' + Beta divergence to be minimized, measuring the distance between `X` + and the dot product `WH`. Note that values different from 'frobenius' + (or 2) and 'kullback-leibler' (or 1) lead to significantly slower + fits. Note that for `beta_loss <= 0` (or 'itakura-saito'), the input + matrix `X` cannot contain zeros. + + tol : float, default=1e-4 + Control early stopping based on the norm of the differences in `H` + between 2 steps. To disable early stopping based on changes in `H`, set + `tol` to 0.0. + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + To disable convergence detection based on cost function, set + `max_no_improvement` to None. + + max_iter : int, default=200 + Maximum number of iterations over the complete dataset before + timing out. + + alpha_W : float, default=0.0 + Constant that multiplies the regularization terms of `W`. Set it to zero + (default) to have no regularization on `W`. + + alpha_H : float or "same", default="same" + Constant that multiplies the regularization terms of `H`. Set it to zero to + have no regularization on `H`. If "same" (default), it takes the same value as + `alpha_W`. + + l1_ratio : float, default=0.0 + The regularization mixing parameter, with 0 <= l1_ratio <= 1. + For l1_ratio = 0 the penalty is an elementwise L2 penalty + (aka Frobenius Norm). + For l1_ratio = 1 it is an elementwise L1 penalty. + For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. + + forget_factor : float, default=0.7 + Amount of rescaling of past information. Its value could be 1 with + finite datasets. Choosing values < 1 is recommended with online + learning as more recent batches will weight more than past batches. + + fresh_restarts : bool, default=False + Whether to completely solve for W at each step. Doing fresh restarts will likely + lead to a better solution for a same number of iterations but it is much slower. + + fresh_restarts_max_iter : int, default=30 + Maximum number of iterations when solving for W at each step. Only used when + doing fresh restarts. These iterations may be stopped early based on a small + change of W controlled by `tol`. + + transform_max_iter : int, default=None + Maximum number of iterations when solving for W at transform time. + If None, it defaults to `max_iter`. + + random_state : int, RandomState instance or None, default=None + Used for initialisation (when ``init`` == 'nndsvdar' or + 'random'), and in Coordinate Descent. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + verbose : bool, default=False + Whether to be verbose. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_components_ : int + The number of components. It is same as the `n_components` parameter + if it was given. Otherwise, it will be same as the number of + features. + + reconstruction_err_ : float + Frobenius norm of the matrix difference, or beta-divergence, between + the training data `X` and the reconstructed data `WH` from + the fitted model. + + n_iter_ : int + Actual number of started iterations over the whole dataset. + + n_steps_ : int + Number of mini-batches processed. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + See Also + -------- + NMF : Non-negative matrix factorization. + MiniBatchDictionaryLearning : Finds a dictionary that can best be used to represent + data using a sparse code. + + References + ---------- + .. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor + factorizations" <10.1587/transfun.E92.A.708>` + Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals + of electronics, communications and computer sciences 92.3: 708-721, 2009. + + .. [2] :doi:`"Algorithms for nonnegative matrix factorization with the + beta-divergence" <10.1162/NECO_a_00168>` + Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9). + + .. [3] :doi:`"Online algorithms for nonnegative matrix factorization with the + Itakura-Saito divergence" <10.1109/ASPAA.2011.6082314>` + Lefevre, A., Bach, F., Fevotte, C. (2011). WASPA. + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) + >>> from sklearn.decomposition import MiniBatchNMF + >>> model = MiniBatchNMF(n_components=2, init='random', random_state=0) + >>> W = model.fit_transform(X) + >>> H = model.components_ + """ + + _parameter_constraints: dict = { + **_BaseNMF._parameter_constraints, + "max_no_improvement": [Interval(Integral, 1, None, closed="left"), None], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "forget_factor": [Interval(Real, 0, 1, closed="both")], + "fresh_restarts": ["boolean"], + "fresh_restarts_max_iter": [Interval(Integral, 1, None, closed="left")], + "transform_max_iter": [Interval(Integral, 1, None, closed="left"), None], + } + + def __init__( + self, + n_components="warn", + *, + init=None, + batch_size=1024, + beta_loss="frobenius", + tol=1e-4, + max_no_improvement=10, + max_iter=200, + alpha_W=0.0, + alpha_H="same", + l1_ratio=0.0, + forget_factor=0.7, + fresh_restarts=False, + fresh_restarts_max_iter=30, + transform_max_iter=None, + random_state=None, + verbose=0, + ): + super().__init__( + n_components=n_components, + init=init, + beta_loss=beta_loss, + tol=tol, + max_iter=max_iter, + random_state=random_state, + alpha_W=alpha_W, + alpha_H=alpha_H, + l1_ratio=l1_ratio, + verbose=verbose, + ) + + self.max_no_improvement = max_no_improvement + self.batch_size = batch_size + self.forget_factor = forget_factor + self.fresh_restarts = fresh_restarts + self.fresh_restarts_max_iter = fresh_restarts_max_iter + self.transform_max_iter = transform_max_iter + + def _check_params(self, X): + super()._check_params(X) + + # batch_size + self._batch_size = min(self.batch_size, X.shape[0]) + + # forget_factor + self._rho = self.forget_factor ** (self._batch_size / X.shape[0]) + + # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011] + if self._beta_loss < 1: + self._gamma = 1.0 / (2.0 - self._beta_loss) + elif self._beta_loss > 2: + self._gamma = 1.0 / (self._beta_loss - 1.0) + else: + self._gamma = 1.0 + + # transform_max_iter + self._transform_max_iter = ( + self.max_iter + if self.transform_max_iter is None + else self.transform_max_iter + ) + + return self + + def _solve_W(self, X, H, max_iter): + """Minimize the objective function w.r.t W. + + Update W with H being fixed, until convergence. This is the heart + of `transform` but it's also used during `fit` when doing fresh restarts. + """ + avg = np.sqrt(X.mean() / self._n_components) + W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype) + W_buffer = W.copy() + + # Get scaled regularization terms. Done for each minibatch to take into account + # variable sizes of minibatches. + l1_reg_W, _, l2_reg_W, _ = self._compute_regularization(X) + + for _ in range(max_iter): + W, *_ = _multiplicative_update_w( + X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma + ) + + W_diff = linalg.norm(W - W_buffer) / linalg.norm(W) + if self.tol > 0 and W_diff <= self.tol: + break + + W_buffer[:] = W + + return W + + def _minibatch_step(self, X, W, H, update_H): + """Perform the update of W and H for one minibatch.""" + batch_size = X.shape[0] + + # get scaled regularization terms. Done for each minibatch to take into account + # variable sizes of minibatches. + l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X) + + # update W + if self.fresh_restarts or W is None: + W = self._solve_W(X, H, self.fresh_restarts_max_iter) + else: + W, *_ = _multiplicative_update_w( + X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma + ) + + # necessary for stability with beta_loss < 1 + if self._beta_loss < 1: + W[W < np.finfo(np.float64).eps] = 0.0 + + batch_cost = ( + _beta_divergence(X, W, H, self._beta_loss) + + l1_reg_W * W.sum() + + l1_reg_H * H.sum() + + l2_reg_W * (W**2).sum() + + l2_reg_H * (H**2).sum() + ) / batch_size + + # update H (only at fit or fit_transform) + if update_H: + H[:] = _multiplicative_update_h( + X, + W, + H, + beta_loss=self._beta_loss, + l1_reg_H=l1_reg_H, + l2_reg_H=l2_reg_H, + gamma=self._gamma, + A=self._components_numerator, + B=self._components_denominator, + rho=self._rho, + ) + + # necessary for stability with beta_loss < 1 + if self._beta_loss <= 1: + H[H < np.finfo(np.float64).eps] = 0.0 + + return batch_cost + + def _minibatch_convergence( + self, X, batch_cost, H, H_buffer, n_samples, step, n_steps + ): + """Helper function to encapsulate the early stopping logic""" + batch_size = X.shape[0] + + # counts steps starting from 1 for user friendly verbose mode. + step = step + 1 + + # Ignore first iteration because H is not updated yet. + if step == 1: + if self.verbose: + print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}") + return False + + # Compute an Exponentially Weighted Average of the cost function to + # monitor the convergence while discarding minibatch-local stochastic + # variability: https://en.wikipedia.org/wiki/Moving_average + if self._ewa_cost is None: + self._ewa_cost = batch_cost + else: + alpha = batch_size / (n_samples + 1) + alpha = min(alpha, 1) + self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha + + # Log progress to be able to monitor convergence + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch cost: " + f"{batch_cost}, ewa cost: {self._ewa_cost}" + ) + + # Early stopping based on change of H + H_diff = linalg.norm(H - H_buffer) / linalg.norm(H) + if self.tol > 0 and H_diff <= self.tol: + if self.verbose: + print(f"Converged (small H change) at step {step}/{n_steps}") + return True + + # Early stopping heuristic due to lack of improvement on smoothed + # cost function + if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min: + self._no_improvement = 0 + self._ewa_cost_min = self._ewa_cost + else: + self._no_improvement += 1 + + if ( + self.max_no_improvement is not None + and self._no_improvement >= self.max_no_improvement + ): + if self.verbose: + print( + "Converged (lack of improvement in objective function) " + f"at step {step}/{n_steps}" + ) + return True + + return False + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None, W=None, H=None): + """Learn a NMF model for the data X and returns the transformed data. + + This is more efficient than calling fit followed by transform. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed. + + y : Ignored + Not used, present here for API consistency by convention. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `None`, uses the initialisation method specified in `init`. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32] + ) + + with config_context(assume_finite=True): + W, H, n_iter, n_steps = self._fit_transform(X, W=W, H=H) + + self.reconstruction_err_ = _beta_divergence( + X, W, H, self._beta_loss, square_root=True + ) + + self.n_components_ = H.shape[0] + self.components_ = H + self.n_iter_ = n_iter + self.n_steps_ = n_steps + + return W + + def _fit_transform(self, X, W=None, H=None, update_H=True): + """Learn a NMF model for the data X and returns the transformed data. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is initialised as an array of zeros, unless + `solver='mu'`, then it is filled with values calculated by + `np.sqrt(X.mean() / self._n_components)`. + If `None`, uses the initialisation method specified in `init`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + If `update_H=False`, it is used as a constant, to solve for W only. + If `None`, uses the initialisation method specified in `init`. + + update_H : bool, default=True + If True, both W and H will be estimated from initial guesses, + this corresponds to a call to the `fit_transform` method. + If False, only W will be estimated, this corresponds to a call + to the `transform` method. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + + H : ndarray of shape (n_components, n_features) + Factorization matrix, sometimes called 'dictionary'. + + n_iter : int + Actual number of started iterations over the whole dataset. + + n_steps : int + Number of mini-batches processed. + """ + check_non_negative(X, "MiniBatchNMF (input X)") + self._check_params(X) + + if X.min() == 0 and self._beta_loss <= 0: + raise ValueError( + "When beta_loss <= 0 and X contains zeros, " + "the solver may diverge. Please add small values " + "to X, or use a positive beta_loss." + ) + + n_samples = X.shape[0] + + # initialize or check W and H + W, H = self._check_w_h(X, W, H, update_H) + H_buffer = H.copy() + + # Initialize auxiliary matrices + self._components_numerator = H.copy() + self._components_denominator = np.ones(H.shape, dtype=H.dtype) + + # Attributes to monitor the convergence + self._ewa_cost = None + self._ewa_cost_min = None + self._no_improvement = 0 + + batches = gen_batches(n_samples, self._batch_size) + batches = itertools.cycle(batches) + n_steps_per_iter = int(np.ceil(n_samples / self._batch_size)) + n_steps = self.max_iter * n_steps_per_iter + + for i, batch in zip(range(n_steps), batches): + batch_cost = self._minibatch_step(X[batch], W[batch], H, update_H) + + if update_H and self._minibatch_convergence( + X[batch], batch_cost, H, H_buffer, n_samples, i, n_steps + ): + break + + H_buffer[:] = H + + if self.fresh_restarts: + W = self._solve_W(X, H, self._transform_max_iter) + + n_steps = i + 1 + n_iter = int(np.ceil(n_steps / n_steps_per_iter)) + + if n_iter == self.max_iter and self.tol > 0: + warnings.warn( + ( + f"Maximum number of iterations {self.max_iter} reached. " + "Increase it to improve convergence." + ), + ConvergenceWarning, + ) + + return W, H, n_iter, n_steps + + def transform(self, X): + """Transform the data X according to the fitted MiniBatchNMF model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be transformed by the model. + + Returns + ------- + W : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + X = self._validate_data( + X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False + ) + + W = self._solve_W(X, self.components_, self._transform_max_iter) + + return W + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, W=None, H=None): + """Update the model using the data in `X` as a mini-batch. + + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + + This is especially useful when the whole dataset is too big to fit in + memory at once (see :ref:`scaling_strategies`). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data matrix to be decomposed. + + y : Ignored + Not used, present here for API consistency by convention. + + W : array-like of shape (n_samples, n_components), default=None + If `init='custom'`, it is used as initial guess for the solution. + Only used for the first call to `partial_fit`. + + H : array-like of shape (n_components, n_features), default=None + If `init='custom'`, it is used as initial guess for the solution. + Only used for the first call to `partial_fit`. + + Returns + ------- + self + Returns the instance itself. + """ + has_components = hasattr(self, "components_") + + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + dtype=[np.float64, np.float32], + reset=not has_components, + ) + + if not has_components: + # This instance has not been fitted yet (fit or partial_fit) + self._check_params(X) + _, H = self._check_w_h(X, W=W, H=H, update_H=True) + + self._components_numerator = H.copy() + self._components_denominator = np.ones(H.shape, dtype=H.dtype) + self.n_steps_ = 0 + else: + H = self.components_ + + self._minibatch_step(X, None, H, update_H=True) + + self.n_components_ = H.shape[0] + self.components_ = H + self.n_steps_ += 1 + + return self diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_pca.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..d121c5e5c186fbfda337b203603ea69c7c2c1451 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_pca.py @@ -0,0 +1,747 @@ +""" Principal Component Analysis. +""" + +# Author: Alexandre Gramfort +# Olivier Grisel +# Mathieu Blondel +# Denis A. Engemann +# Michael Eickenberg +# Giorgio Patrini +# +# License: BSD 3 clause + +from math import log, sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.sparse import issparse +from scipy.sparse.linalg import svds +from scipy.special import gammaln + +from ..base import _fit_context +from ..utils import check_random_state +from ..utils._arpack import _init_arpack_v0 +from ..utils._array_api import _convert_to_numpy, get_namespace +from ..utils._param_validation import Interval, RealNotInt, StrOptions +from ..utils.extmath import fast_logdet, randomized_svd, stable_cumsum, svd_flip +from ..utils.sparsefuncs import _implicit_column_offset, mean_variance_axis +from ..utils.validation import check_is_fitted +from ._base import _BasePCA + + +def _assess_dimension(spectrum, rank, n_samples): + """Compute the log-likelihood of a rank ``rank`` dataset. + + The dataset is assumed to be embedded in gaussian noise of shape(n, + dimf) having spectrum ``spectrum``. This implements the method of + T. P. Minka. + + Parameters + ---------- + spectrum : ndarray of shape (n_features,) + Data spectrum. + rank : int + Tested rank value. It should be strictly lower than n_features, + otherwise the method isn't specified (division by zero in equation + (31) from the paper). + n_samples : int + Number of samples. + + Returns + ------- + ll : float + The log-likelihood. + + References + ---------- + This implements the method of `Thomas P. Minka: + Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604 + `_ + """ + xp, _ = get_namespace(spectrum) + + n_features = spectrum.shape[0] + if not 1 <= rank < n_features: + raise ValueError("the tested rank should be in [1, n_features - 1]") + + eps = 1e-15 + + if spectrum[rank - 1] < eps: + # When the tested rank is associated with a small eigenvalue, there's + # no point in computing the log-likelihood: it's going to be very + # small and won't be the max anyway. Also, it can lead to numerical + # issues below when computing pa, in particular in log((spectrum[i] - + # spectrum[j]) because this will take the log of something very small. + return -xp.inf + + pu = -rank * log(2.0) + for i in range(1, rank + 1): + pu += ( + gammaln((n_features - i + 1) / 2.0) + - log(xp.pi) * (n_features - i + 1) / 2.0 + ) + + pl = xp.sum(xp.log(spectrum[:rank])) + pl = -pl * n_samples / 2.0 + + v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank)) + pv = -log(v) * n_samples * (n_features - rank) / 2.0 + + m = n_features * rank - rank * (rank + 1.0) / 2.0 + pp = log(2.0 * xp.pi) * (m + rank) / 2.0 + + pa = 0.0 + spectrum_ = xp.asarray(spectrum, copy=True) + spectrum_[rank:n_features] = v + for i in range(rank): + for j in range(i + 1, spectrum.shape[0]): + pa += log( + (spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i]) + ) + log(n_samples) + + ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0 + + return ll + + +def _infer_dimension(spectrum, n_samples): + """Infers the dimension of a dataset with a given spectrum. + + The returned value will be in [1, n_features - 1]. + """ + xp, _ = get_namespace(spectrum) + + ll = xp.empty_like(spectrum) + ll[0] = -xp.inf # we don't want to return n_components = 0 + for rank in range(1, spectrum.shape[0]): + ll[rank] = _assess_dimension(spectrum, rank, n_samples) + return xp.argmax(ll) + + +class PCA(_BasePCA): + """Principal component analysis (PCA). + + Linear dimensionality reduction using Singular Value Decomposition of the + data to project it to a lower dimensional space. The input data is centered + but not scaled for each feature before applying the SVD. + + It uses the LAPACK implementation of the full SVD or a randomized truncated + SVD by the method of Halko et al. 2009, depending on the shape of the input + data and the number of components to extract. + + It can also use the scipy.sparse.linalg ARPACK implementation of the + truncated SVD. + + Notice that this class does not support sparse input. See + :class:`TruncatedSVD` for an alternative with sparse data. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py` + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, float or 'mle', default=None + Number of components to keep. + if n_components is not set all components are kept:: + + n_components == min(n_samples, n_features) + + If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's + MLE is used to guess the dimension. Use of ``n_components == 'mle'`` + will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``. + + If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the + number of components such that the amount of variance that needs to be + explained is greater than the percentage specified by n_components. + + If ``svd_solver == 'arpack'``, the number of components must be + strictly less than the minimum of n_features and n_samples. + + Hence, the None case results in:: + + n_components == min(n_samples, n_features) - 1 + + copy : bool, default=True + If False, data passed to fit are overwritten and running + fit(X).transform(X) will not yield the expected results, + use fit_transform(X) instead. + + whiten : bool, default=False + When True (False by default) the `components_` vectors are multiplied + by the square root of n_samples and then divided by the singular values + to ensure uncorrelated outputs with unit component-wise variances. + + Whitening will remove some information from the transformed signal + (the relative variance scales of the components) but can sometime + improve the predictive accuracy of the downstream estimators by + making their data respect some hard-wired assumptions. + + svd_solver : {'auto', 'full', 'arpack', 'randomized'}, default='auto' + If auto : + The solver is selected by a default policy based on `X.shape` and + `n_components`: if the input data is larger than 500x500 and the + number of components to extract is lower than 80% of the smallest + dimension of the data, then the more efficient 'randomized' + method is enabled. Otherwise the exact full SVD is computed and + optionally truncated afterwards. + If full : + run exact full SVD calling the standard LAPACK solver via + `scipy.linalg.svd` and select the components by postprocessing + If arpack : + run SVD truncated to n_components calling ARPACK solver via + `scipy.sparse.linalg.svds`. It requires strictly + 0 < n_components < min(X.shape) + If randomized : + run randomized SVD by the method of Halko et al. + + .. versionadded:: 0.18.0 + + tol : float, default=0.0 + Tolerance for singular values computed by svd_solver == 'arpack'. + Must be of range [0.0, infinity). + + .. versionadded:: 0.18.0 + + iterated_power : int or 'auto', default='auto' + Number of iterations for the power method computed by + svd_solver == 'randomized'. + Must be of range [0, infinity). + + .. versionadded:: 0.18.0 + + n_oversamples : int, default=10 + This parameter is only relevant when `svd_solver="randomized"`. + It corresponds to the additional number of random vectors to sample the + range of `X` so as to ensure proper conditioning. See + :func:`~sklearn.utils.extmath.randomized_svd` for more details. + + .. versionadded:: 1.1 + + power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' + Power iteration normalizer for randomized SVD solver. + Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd` + for more details. + + .. versionadded:: 1.1 + + random_state : int, RandomState instance or None, default=None + Used when the 'arpack' or 'randomized' solvers are used. Pass an int + for reproducible results across multiple function calls. + See :term:`Glossary `. + + .. versionadded:: 0.18.0 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Principal axes in feature space, representing the directions of + maximum variance in the data. Equivalently, the right singular + vectors of the centered input data, parallel to its eigenvectors. + The components are sorted by decreasing ``explained_variance_``. + + explained_variance_ : ndarray of shape (n_components,) + The amount of variance explained by each of the selected components. + The variance estimation uses `n_samples - 1` degrees of freedom. + + Equal to n_components largest eigenvalues + of the covariance matrix of X. + + .. versionadded:: 0.18 + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + + If ``n_components`` is not set then all components are stored and the + sum of the ratios is equal to 1.0. + + singular_values_ : ndarray of shape (n_components,) + The singular values corresponding to each of the selected components. + The singular values are equal to the 2-norms of the ``n_components`` + variables in the lower-dimensional space. + + .. versionadded:: 0.19 + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + + Equal to `X.mean(axis=0)`. + + n_components_ : int + The estimated number of components. When n_components is set + to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this + number is estimated from input data. Otherwise it equals the parameter + n_components, or the lesser value of n_features and n_samples + if n_components is None. + + n_samples_ : int + Number of samples in the training data. + + noise_variance_ : float + The estimated noise covariance following the Probabilistic PCA model + from Tipping and Bishop 1999. See "Pattern Recognition and + Machine Learning" by C. Bishop, 12.2.1 p. 574 or + http://www.miketipping.com/papers/met-mppca.pdf. It is required to + compute the estimated data covariance and score samples. + + Equal to the average of (min(n_features, n_samples) - n_components) + smallest eigenvalues of the covariance matrix of X. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + KernelPCA : Kernel Principal Component Analysis. + SparsePCA : Sparse Principal Component Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + IncrementalPCA : Incremental Principal Component Analysis. + + References + ---------- + For n_components == 'mle', this class uses the method from: + `Minka, T. P.. "Automatic choice of dimensionality for PCA". + In NIPS, pp. 598-604 `_ + + Implements the probabilistic PCA model from: + `Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal + component analysis". Journal of the Royal Statistical Society: + Series B (Statistical Methodology), 61(3), 611-622. + `_ + via the score and score_samples methods. + + For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`. + + For svd_solver == 'randomized', see: + :doi:`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011). + "Finding structure with randomness: Probabilistic algorithms for + constructing approximate matrix decompositions". + SIAM review, 53(2), 217-288. + <10.1137/090771806>` + and also + :doi:`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011). + "A randomized algorithm for the decomposition of matrices". + Applied and Computational Harmonic Analysis, 30(1), 47-68. + <10.1016/j.acha.2010.02.003>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.decomposition import PCA + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> pca = PCA(n_components=2) + >>> pca.fit(X) + PCA(n_components=2) + >>> print(pca.explained_variance_ratio_) + [0.9924... 0.0075...] + >>> print(pca.singular_values_) + [6.30061... 0.54980...] + + >>> pca = PCA(n_components=2, svd_solver='full') + >>> pca.fit(X) + PCA(n_components=2, svd_solver='full') + >>> print(pca.explained_variance_ratio_) + [0.9924... 0.00755...] + >>> print(pca.singular_values_) + [6.30061... 0.54980...] + + >>> pca = PCA(n_components=1, svd_solver='arpack') + >>> pca.fit(X) + PCA(n_components=1, svd_solver='arpack') + >>> print(pca.explained_variance_ratio_) + [0.99244...] + >>> print(pca.singular_values_) + [6.30061...] + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 0, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="neither"), + StrOptions({"mle"}), + None, + ], + "copy": ["boolean"], + "whiten": ["boolean"], + "svd_solver": [StrOptions({"auto", "full", "arpack", "randomized"})], + "tol": [Interval(Real, 0, None, closed="left")], + "iterated_power": [ + StrOptions({"auto"}), + Interval(Integral, 0, None, closed="left"), + ], + "n_oversamples": [Interval(Integral, 1, None, closed="left")], + "power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + copy=True, + whiten=False, + svd_solver="auto", + tol=0.0, + iterated_power="auto", + n_oversamples=10, + power_iteration_normalizer="auto", + random_state=None, + ): + self.n_components = n_components + self.copy = copy + self.whiten = whiten + self.svd_solver = svd_solver + self.tol = tol + self.iterated_power = iterated_power + self.n_oversamples = n_oversamples + self.power_iteration_normalizer = power_iteration_normalizer + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Ignored. + + Returns + ------- + self : object + Returns the instance itself. + """ + self._fit(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit the model with X and apply the dimensionality reduction on X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Ignored. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed values. + + Notes + ----- + This method returns a Fortran-ordered array. To convert it to a + C-ordered array, use 'np.ascontiguousarray'. + """ + U, S, Vt = self._fit(X) + U = U[:, : self.n_components_] + + if self.whiten: + # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) + U *= sqrt(X.shape[0] - 1) + else: + # X_new = X * V = U * S * Vt * V = U * S + U *= S[: self.n_components_] + + return U + + def _fit(self, X): + """Dispatch to the right submethod depending on the chosen solver.""" + xp, is_array_api_compliant = get_namespace(X) + + # Raise an error for sparse input and unsupported svd_solver + if issparse(X) and self.svd_solver != "arpack": + raise TypeError( + 'PCA only support sparse inputs with the "arpack" solver, while ' + f'"{self.svd_solver}" was passed. See TruncatedSVD for a possible' + " alternative." + ) + # Raise an error for non-Numpy input and arpack solver. + if self.svd_solver == "arpack" and is_array_api_compliant: + raise ValueError( + "PCA with svd_solver='arpack' is not supported for Array API inputs." + ) + + X = self._validate_data( + X, + dtype=[xp.float64, xp.float32], + accept_sparse=("csr", "csc"), + ensure_2d=True, + copy=self.copy, + ) + + # Handle n_components==None + if self.n_components is None: + if self.svd_solver != "arpack": + n_components = min(X.shape) + else: + n_components = min(X.shape) - 1 + else: + n_components = self.n_components + + # Handle svd_solver + self._fit_svd_solver = self.svd_solver + if self._fit_svd_solver == "auto": + # Small problem or n_components == 'mle', just call full PCA + if max(X.shape) <= 500 or n_components == "mle": + self._fit_svd_solver = "full" + elif 1 <= n_components < 0.8 * min(X.shape): + self._fit_svd_solver = "randomized" + # This is also the case of n_components in (0,1) + else: + self._fit_svd_solver = "full" + + # Call different fits for either full or truncated SVD + if self._fit_svd_solver == "full": + return self._fit_full(X, n_components) + elif self._fit_svd_solver in ["arpack", "randomized"]: + return self._fit_truncated(X, n_components, self._fit_svd_solver) + + def _fit_full(self, X, n_components): + """Fit the model by computing full SVD on X.""" + xp, is_array_api_compliant = get_namespace(X) + + n_samples, n_features = X.shape + + if n_components == "mle": + if n_samples < n_features: + raise ValueError( + "n_components='mle' is only supported if n_samples >= n_features" + ) + elif not 0 <= n_components <= min(n_samples, n_features): + raise ValueError( + "n_components=%r must be between 0 and " + "min(n_samples, n_features)=%r with " + "svd_solver='full'" % (n_components, min(n_samples, n_features)) + ) + + # Center data + self.mean_ = xp.mean(X, axis=0) + X -= self.mean_ + + if not is_array_api_compliant: + # Use scipy.linalg with NumPy/SciPy inputs for the sake of not + # introducing unanticipated behavior changes. In the long run we + # could instead decide to always use xp.linalg.svd for all inputs, + # but that would make this code rely on numpy's SVD instead of + # scipy's. It's not 100% clear whether they use the same LAPACK + # solver by default though (assuming both are built against the + # same BLAS). + U, S, Vt = linalg.svd(X, full_matrices=False) + else: + U, S, Vt = xp.linalg.svd(X, full_matrices=False) + # flip eigenvectors' sign to enforce deterministic output + U, Vt = svd_flip(U, Vt) + + components_ = Vt + + # Get variance explained by singular values + explained_variance_ = (S**2) / (n_samples - 1) + total_var = xp.sum(explained_variance_) + explained_variance_ratio_ = explained_variance_ / total_var + singular_values_ = xp.asarray(S, copy=True) # Store the singular values. + + # Postprocess the number of components required + if n_components == "mle": + n_components = _infer_dimension(explained_variance_, n_samples) + elif 0 < n_components < 1.0: + # number of components for which the cumulated explained + # variance percentage is superior to the desired threshold + # side='right' ensures that number of features selected + # their variance is always greater than n_components float + # passed. More discussion in issue: #15669 + if is_array_api_compliant: + # Convert to numpy as xp.cumsum and xp.searchsorted are not + # part of the Array API standard yet: + # + # https://github.com/data-apis/array-api/issues/597 + # https://github.com/data-apis/array-api/issues/688 + # + # Furthermore, it's not always safe to call them for namespaces + # that already implement them: for instance as + # cupy.searchsorted does not accept a float as second argument. + explained_variance_ratio_np = _convert_to_numpy( + explained_variance_ratio_, xp=xp + ) + else: + explained_variance_ratio_np = explained_variance_ratio_ + ratio_cumsum = stable_cumsum(explained_variance_ratio_np) + n_components = np.searchsorted(ratio_cumsum, n_components, side="right") + 1 + + # Compute noise covariance using Probabilistic PCA model + # The sigma2 maximum likelihood (cf. eq. 12.46) + if n_components < min(n_features, n_samples): + self.noise_variance_ = xp.mean(explained_variance_[n_components:]) + else: + self.noise_variance_ = 0.0 + + self.n_samples_ = n_samples + self.components_ = components_[:n_components, :] + self.n_components_ = n_components + self.explained_variance_ = explained_variance_[:n_components] + self.explained_variance_ratio_ = explained_variance_ratio_[:n_components] + self.singular_values_ = singular_values_[:n_components] + + return U, S, Vt + + def _fit_truncated(self, X, n_components, svd_solver): + """Fit the model by computing truncated SVD (by ARPACK or randomized) + on X. + """ + xp, _ = get_namespace(X) + + n_samples, n_features = X.shape + + if isinstance(n_components, str): + raise ValueError( + "n_components=%r cannot be a string with svd_solver='%s'" + % (n_components, svd_solver) + ) + elif not 1 <= n_components <= min(n_samples, n_features): + raise ValueError( + "n_components=%r must be between 1 and " + "min(n_samples, n_features)=%r with " + "svd_solver='%s'" + % (n_components, min(n_samples, n_features), svd_solver) + ) + elif svd_solver == "arpack" and n_components == min(n_samples, n_features): + raise ValueError( + "n_components=%r must be strictly less than " + "min(n_samples, n_features)=%r with " + "svd_solver='%s'" + % (n_components, min(n_samples, n_features), svd_solver) + ) + + random_state = check_random_state(self.random_state) + + # Center data + total_var = None + if issparse(X): + self.mean_, var = mean_variance_axis(X, axis=0) + total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1 + X = _implicit_column_offset(X, self.mean_) + else: + self.mean_ = xp.mean(X, axis=0) + X -= self.mean_ + + if svd_solver == "arpack": + v0 = _init_arpack_v0(min(X.shape), random_state) + U, S, Vt = svds(X, k=n_components, tol=self.tol, v0=v0) + # svds doesn't abide by scipy.linalg.svd/randomized_svd + # conventions, so reverse its outputs. + S = S[::-1] + # flip eigenvectors' sign to enforce deterministic output + U, Vt = svd_flip(U[:, ::-1], Vt[::-1]) + + elif svd_solver == "randomized": + # sign flipping is done inside + U, S, Vt = randomized_svd( + X, + n_components=n_components, + n_oversamples=self.n_oversamples, + n_iter=self.iterated_power, + power_iteration_normalizer=self.power_iteration_normalizer, + flip_sign=True, + random_state=random_state, + ) + + self.n_samples_ = n_samples + self.components_ = Vt + self.n_components_ = n_components + + # Get variance explained by singular values + self.explained_variance_ = (S**2) / (n_samples - 1) + + # Workaround in-place variance calculation since at the time numpy + # did not have a way to calculate variance in-place. + # + # TODO: update this code to either: + # * Use the array-api variance calculation, unless memory usage suffers + # * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api + # See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991 + if total_var is None: + N = X.shape[0] - 1 + X **= 2 + total_var = xp.sum(X) / N + + self.explained_variance_ratio_ = self.explained_variance_ / total_var + self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values. + + if self.n_components_ < min(n_features, n_samples): + self.noise_variance_ = total_var - xp.sum(self.explained_variance_) + self.noise_variance_ /= min(n_features, n_samples) - n_components + else: + self.noise_variance_ = 0.0 + + return U, S, Vt + + def score_samples(self, X): + """Return the log-likelihood of each sample. + + See. "Pattern Recognition and Machine Learning" + by C. Bishop, 12.2.1 p. 574 + or http://www.miketipping.com/papers/met-mppca.pdf + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + Returns + ------- + ll : ndarray of shape (n_samples,) + Log-likelihood of each sample under the current model. + """ + check_is_fitted(self) + xp, _ = get_namespace(X) + X = self._validate_data(X, dtype=[xp.float64, xp.float32], reset=False) + Xr = X - self.mean_ + n_features = X.shape[1] + precision = self.get_precision() + log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1) + log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision)) + return log_like + + def score(self, X, y=None): + """Return the average log-likelihood of all samples. + + See. "Pattern Recognition and Machine Learning" + by C. Bishop, 12.2.1 p. 574 + or http://www.miketipping.com/papers/met-mppca.pdf + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + y : Ignored + Ignored. + + Returns + ------- + ll : float + Average log-likelihood of the samples under the current model. + """ + xp, _ = get_namespace(X) + return float(xp.mean(self.score_samples(X))) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32], "array_api_support": True} diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d29e29ba19b815566fdaf30a5260b993fc26656 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4744a4e90ebefadf341a0ffe7ed24d19779a687b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6420a5c13ff1efb3e4e9b054cdabba3136623cd5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56e4fc131d777886846bb42c44de2179a523948a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09c3ea0cddb164b722142984fdfb5c354dd79419 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d5694e0703060e12f11728338c32e5781854f80 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e333545ba80022576fab8d257929d80667b3ec3e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f68a5846f67dae3cd14b0fd58ea8b3f1b8078ea Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de0ce38ceac5c2f7be5c777a68f821d103627495 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a41a26d6864748e98d544d0dc23ef8776578c05 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08478720ed4baddc5465444f4d823e319aae8a0b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py new file mode 100644 index 0000000000000000000000000000000000000000..b79df4db8cd74a70452ca7212b36b7ddc305caa3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py @@ -0,0 +1,983 @@ +import itertools +import warnings +from functools import partial + +import numpy as np +import pytest + +import sklearn +from sklearn.base import clone +from sklearn.decomposition import ( + DictionaryLearning, + MiniBatchDictionaryLearning, + SparseCoder, + dict_learning, + dict_learning_online, + sparse_encode, +) +from sklearn.decomposition._dict_learning import _update_dict +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils import check_array +from sklearn.utils._testing import ( + TempMemmap, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.estimator_checks import ( + check_transformer_data_not_an_array, + check_transformer_general, + check_transformers_unfitted, +) +from sklearn.utils.parallel import Parallel + +rng_global = np.random.RandomState(0) +n_samples, n_features = 10, 8 +X = rng_global.randn(n_samples, n_features) + + +def test_sparse_encode_shapes_omp(): + rng = np.random.RandomState(0) + algorithms = ["omp", "lasso_lars", "lasso_cd", "lars", "threshold"] + for n_components, n_samples in itertools.product([1, 5], [1, 9]): + X_ = rng.randn(n_samples, n_features) + dictionary = rng.randn(n_components, n_features) + for algorithm, n_jobs in itertools.product(algorithms, [1, 2]): + code = sparse_encode(X_, dictionary, algorithm=algorithm, n_jobs=n_jobs) + assert code.shape == (n_samples, n_components) + + +def test_dict_learning_shapes(): + n_components = 5 + dico = DictionaryLearning(n_components, random_state=0).fit(X) + assert dico.components_.shape == (n_components, n_features) + + n_components = 1 + dico = DictionaryLearning(n_components, random_state=0).fit(X) + assert dico.components_.shape == (n_components, n_features) + assert dico.transform(X).shape == (X.shape[0], n_components) + + +def test_dict_learning_overcomplete(): + n_components = 12 + dico = DictionaryLearning(n_components, random_state=0).fit(X) + assert dico.components_.shape == (n_components, n_features) + + +def test_max_iter(): + def ricker_function(resolution, center, width): + """Discrete sub-sampled Ricker (Mexican hat) wavelet""" + x = np.linspace(0, resolution - 1, resolution) + x = ( + (2 / (np.sqrt(3 * width) * np.pi**0.25)) + * (1 - (x - center) ** 2 / width**2) + * np.exp(-((x - center) ** 2) / (2 * width**2)) + ) + return x + + def ricker_matrix(width, resolution, n_components): + """Dictionary of Ricker (Mexican hat) wavelets""" + centers = np.linspace(0, resolution - 1, n_components) + D = np.empty((n_components, resolution)) + for i, center in enumerate(centers): + D[i] = ricker_function(resolution, center, width) + D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis] + return D + + transform_algorithm = "lasso_cd" + resolution = 1024 + subsampling = 3 # subsampling factor + n_components = resolution // subsampling + + # Compute a wavelet dictionary + D_multi = np.r_[ + tuple( + ricker_matrix( + width=w, resolution=resolution, n_components=n_components // 5 + ) + for w in (10, 50, 100, 500, 1000) + ) + ] + + X = np.linspace(0, resolution - 1, resolution) + first_quarter = X < resolution / 4 + X[first_quarter] = 3.0 + X[np.logical_not(first_quarter)] = -1.0 + X = X.reshape(1, -1) + + # check that the underlying model fails to converge + with pytest.warns(ConvergenceWarning): + model = SparseCoder( + D_multi, transform_algorithm=transform_algorithm, transform_max_iter=1 + ) + model.fit_transform(X) + + # check that the underlying model converges w/o warnings + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + model = SparseCoder( + D_multi, transform_algorithm=transform_algorithm, transform_max_iter=2000 + ) + model.fit_transform(X) + + +def test_dict_learning_lars_positive_parameter(): + n_components = 5 + alpha = 1 + err_msg = "Positive constraint not supported for 'lars' coding method." + with pytest.raises(ValueError, match=err_msg): + dict_learning(X, n_components, alpha=alpha, positive_code=True) + + +@pytest.mark.parametrize( + "transform_algorithm", + [ + "lasso_lars", + "lasso_cd", + "threshold", + ], +) +@pytest.mark.parametrize("positive_code", [False, True]) +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_dict_learning_positivity(transform_algorithm, positive_code, positive_dict): + n_components = 5 + dico = DictionaryLearning( + n_components, + transform_algorithm=transform_algorithm, + random_state=0, + positive_code=positive_code, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + code = dico.transform(X) + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + if positive_code: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_dict_learning_lars_dict_positivity(positive_dict): + n_components = 5 + dico = DictionaryLearning( + n_components, + transform_algorithm="lars", + random_state=0, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + + +def test_dict_learning_lars_code_positivity(): + n_components = 5 + dico = DictionaryLearning( + n_components, + transform_algorithm="lars", + random_state=0, + positive_code=True, + fit_algorithm="cd", + ).fit(X) + + err_msg = "Positive constraint not supported for '{}' coding method." + err_msg = err_msg.format("lars") + with pytest.raises(ValueError, match=err_msg): + dico.transform(X) + + +def test_dict_learning_reconstruction(): + n_components = 12 + dico = DictionaryLearning( + n_components, transform_algorithm="omp", transform_alpha=0.001, random_state=0 + ) + code = dico.fit(X).transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X) + + dico.set_params(transform_algorithm="lasso_lars") + code = dico.transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) + + # used to test lars here too, but there's no guarantee the number of + # nonzero atoms is right. + + +def test_dict_learning_reconstruction_parallel(): + # regression test that parallel reconstruction works with n_jobs>1 + n_components = 12 + dico = DictionaryLearning( + n_components, + transform_algorithm="omp", + transform_alpha=0.001, + random_state=0, + n_jobs=4, + ) + code = dico.fit(X).transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X) + + dico.set_params(transform_algorithm="lasso_lars") + code = dico.transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) + + +def test_dict_learning_lassocd_readonly_data(): + n_components = 12 + with TempMemmap(X) as X_read_only: + dico = DictionaryLearning( + n_components, + transform_algorithm="lasso_cd", + transform_alpha=0.001, + random_state=0, + n_jobs=4, + ) + with ignore_warnings(category=ConvergenceWarning): + code = dico.fit(X_read_only).transform(X_read_only) + assert_array_almost_equal( + np.dot(code, dico.components_), X_read_only, decimal=2 + ) + + +def test_dict_learning_nonzero_coefs(): + n_components = 4 + dico = DictionaryLearning( + n_components, + transform_algorithm="lars", + transform_n_nonzero_coefs=3, + random_state=0, + ) + code = dico.fit(X).transform(X[np.newaxis, 1]) + assert len(np.flatnonzero(code)) == 3 + + dico.set_params(transform_algorithm="omp") + code = dico.transform(X[np.newaxis, 1]) + assert len(np.flatnonzero(code)) == 3 + + +def test_dict_learning_split(): + n_components = 5 + dico = DictionaryLearning( + n_components, transform_algorithm="threshold", random_state=0 + ) + code = dico.fit(X).transform(X) + dico.split_sign = True + split_code = dico.transform(X) + + assert_array_almost_equal( + split_code[:, :n_components] - split_code[:, n_components:], code + ) + + +def test_dict_learning_online_shapes(): + rng = np.random.RandomState(0) + n_components = 8 + + code, dictionary = dict_learning_online( + X, + n_components=n_components, + batch_size=4, + max_iter=10, + method="cd", + random_state=rng, + return_code=True, + ) + assert code.shape == (n_samples, n_components) + assert dictionary.shape == (n_components, n_features) + assert np.dot(code, dictionary).shape == X.shape + + dictionary = dict_learning_online( + X, + n_components=n_components, + batch_size=4, + max_iter=10, + method="cd", + random_state=rng, + return_code=False, + ) + assert dictionary.shape == (n_components, n_features) + + +def test_dict_learning_online_lars_positive_parameter(): + err_msg = "Positive constraint not supported for 'lars' coding method." + with pytest.raises(ValueError, match=err_msg): + dict_learning_online(X, batch_size=4, max_iter=10, positive_code=True) + + +@pytest.mark.parametrize( + "transform_algorithm", + [ + "lasso_lars", + "lasso_cd", + "threshold", + ], +) +@pytest.mark.parametrize("positive_code", [False, True]) +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_minibatch_dictionary_learning_positivity( + transform_algorithm, positive_code, positive_dict +): + n_components = 8 + dico = MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=10, + transform_algorithm=transform_algorithm, + random_state=0, + positive_code=positive_code, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + code = dico.transform(X) + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + if positive_code: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_minibatch_dictionary_learning_lars(positive_dict): + n_components = 8 + + dico = MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=10, + transform_algorithm="lars", + random_state=0, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + + +@pytest.mark.parametrize("positive_code", [False, True]) +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_dict_learning_online_positivity(positive_code, positive_dict): + rng = np.random.RandomState(0) + n_components = 8 + + code, dictionary = dict_learning_online( + X, + n_components=n_components, + batch_size=4, + method="cd", + alpha=1, + random_state=rng, + positive_dict=positive_dict, + positive_code=positive_code, + ) + if positive_dict: + assert (dictionary >= 0).all() + else: + assert (dictionary < 0).any() + if positive_code: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +def test_dict_learning_online_verbosity(): + # test verbosity for better coverage + n_components = 5 + import sys + from io import StringIO + + old_stdout = sys.stdout + try: + sys.stdout = StringIO() + + # convergence monitoring verbosity + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, verbose=1, tol=0.1, random_state=0 + ) + dico.fit(X) + dico = MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=5, + verbose=1, + max_no_improvement=2, + random_state=0, + ) + dico.fit(X) + # higher verbosity level + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, verbose=2, random_state=0 + ) + dico.fit(X) + + # function API verbosity + dict_learning_online( + X, + n_components=n_components, + batch_size=4, + alpha=1, + verbose=1, + random_state=0, + ) + dict_learning_online( + X, + n_components=n_components, + batch_size=4, + alpha=1, + verbose=2, + random_state=0, + ) + finally: + sys.stdout = old_stdout + + assert dico.components_.shape == (n_components, n_features) + + +def test_dict_learning_online_estimator_shapes(): + n_components = 5 + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, random_state=0 + ) + dico.fit(X) + assert dico.components_.shape == (n_components, n_features) + + +def test_dict_learning_online_overcomplete(): + n_components = 12 + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, random_state=0 + ).fit(X) + assert dico.components_.shape == (n_components, n_features) + + +def test_dict_learning_online_initialization(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=0, dict_init=V, random_state=0 + ).fit(X) + assert_array_equal(dico.components_, V) + + +def test_dict_learning_online_readonly_initialization(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) + V.setflags(write=False) + MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=1, + dict_init=V, + random_state=0, + shuffle=False, + ).fit(X) + + +def test_dict_learning_online_partial_fit(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + dict1 = MiniBatchDictionaryLearning( + n_components, + max_iter=10, + batch_size=1, + alpha=1, + shuffle=False, + dict_init=V, + max_no_improvement=None, + tol=0.0, + random_state=0, + ).fit(X) + dict2 = MiniBatchDictionaryLearning( + n_components, alpha=1, dict_init=V, random_state=0 + ) + for i in range(10): + for sample in X: + dict2.partial_fit(sample[np.newaxis, :]) + + assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0) + assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) + + # partial_fit should ignore max_iter (#17433) + assert dict1.n_steps_ == dict2.n_steps_ == 100 + + +def test_sparse_encode_shapes(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"): + code = sparse_encode(X, V, algorithm=algo) + assert code.shape == (n_samples, n_components) + + +@pytest.mark.parametrize("algo", ["lasso_lars", "lasso_cd", "threshold"]) +@pytest.mark.parametrize("positive", [False, True]) +def test_sparse_encode_positivity(algo, positive): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + code = sparse_encode(X, V, algorithm=algo, positive=positive) + if positive: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +@pytest.mark.parametrize("algo", ["lars", "omp"]) +def test_sparse_encode_unavailable_positivity(algo): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + err_msg = "Positive constraint not supported for '{}' coding method." + err_msg = err_msg.format(algo) + with pytest.raises(ValueError, match=err_msg): + sparse_encode(X, V, algorithm=algo, positive=True) + + +def test_sparse_encode_input(): + n_components = 100 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + Xf = check_array(X, order="F") + for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"): + a = sparse_encode(X, V, algorithm=algo) + b = sparse_encode(Xf, V, algorithm=algo) + assert_array_almost_equal(a, b) + + +def test_sparse_encode_error(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + code = sparse_encode(X, V, alpha=0.001) + assert not np.all(code == 0) + assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1 + + +def test_sparse_encode_error_default_sparsity(): + rng = np.random.RandomState(0) + X = rng.randn(100, 64) + D = rng.randn(2, 64) + code = ignore_warnings(sparse_encode)(X, D, algorithm="omp", n_nonzero_coefs=None) + assert code.shape == (100, 2) + + +def test_sparse_coder_estimator(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + coder = SparseCoder( + dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001 + ).transform(X) + assert not np.all(coder == 0) + assert np.sqrt(np.sum((np.dot(coder, V) - X) ** 2)) < 0.1 + + +def test_sparse_coder_estimator_clone(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + coder = SparseCoder( + dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001 + ) + cloned = clone(coder) + assert id(cloned) != id(coder) + np.testing.assert_allclose(cloned.dictionary, coder.dictionary) + assert id(cloned.dictionary) != id(coder.dictionary) + assert cloned.n_components_ == coder.n_components_ + assert cloned.n_features_in_ == coder.n_features_in_ + data = np.random.rand(n_samples, n_features).astype(np.float32) + np.testing.assert_allclose(cloned.transform(data), coder.transform(data)) + + +def test_sparse_coder_parallel_mmap(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/5956 + # Test that SparseCoder does not error by passing reading only + # arrays to child processes + + rng = np.random.RandomState(777) + n_components, n_features = 40, 64 + init_dict = rng.rand(n_components, n_features) + # Ensure that `data` is >2M. Joblib memory maps arrays + # if they are larger than 1MB. The 4 accounts for float32 + # data type + n_samples = int(2e6) // (4 * n_features) + data = np.random.rand(n_samples, n_features).astype(np.float32) + + sc = SparseCoder(init_dict, transform_algorithm="omp", n_jobs=2) + sc.fit_transform(data) + + +def test_sparse_coder_common_transformer(): + rng = np.random.RandomState(777) + n_components, n_features = 40, 3 + init_dict = rng.rand(n_components, n_features) + + sc = SparseCoder(init_dict) + + check_transformer_data_not_an_array(sc.__class__.__name__, sc) + check_transformer_general(sc.__class__.__name__, sc) + check_transformer_general_memmap = partial( + check_transformer_general, readonly_memmap=True + ) + check_transformer_general_memmap(sc.__class__.__name__, sc) + check_transformers_unfitted(sc.__class__.__name__, sc) + + +def test_sparse_coder_n_features_in(): + d = np.array([[1, 2, 3], [1, 2, 3]]) + sc = SparseCoder(d) + assert sc.n_features_in_ == d.shape[1] + + +def test_update_dict(): + # Check the dict update in batch mode vs online mode + # Non-regression test for #4866 + rng = np.random.RandomState(0) + + code = np.array([[0.5, -0.5], [0.1, 0.9]]) + dictionary = np.array([[1.0, 0.0], [0.6, 0.8]]) + + X = np.dot(code, dictionary) + rng.randn(2, 2) + + # full batch update + newd_batch = dictionary.copy() + _update_dict(newd_batch, X, code) + + # online update + A = np.dot(code.T, code) + B = np.dot(X.T, code) + newd_online = dictionary.copy() + _update_dict(newd_online, X, code, A, B) + + assert_allclose(newd_batch, newd_online) + + +@pytest.mark.parametrize( + "algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +# Note: do not check integer input because `lasso_lars` and `lars` fail with +# `ValueError` in `_lars_path_solver` +def test_sparse_encode_dtype_match(data_type, algorithm): + n_components = 6 + rng = np.random.RandomState(0) + dictionary = rng.randn(n_components, n_features) + code = sparse_encode( + X.astype(data_type), dictionary.astype(data_type), algorithm=algorithm + ) + assert code.dtype == data_type + + +@pytest.mark.parametrize( + "algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +def test_sparse_encode_numerical_consistency(algorithm): + # verify numerical consistency among np.float32 and np.float64 + rtol = 1e-4 + n_components = 6 + rng = np.random.RandomState(0) + dictionary = rng.randn(n_components, n_features) + code_32 = sparse_encode( + X.astype(np.float32), dictionary.astype(np.float32), algorithm=algorithm + ) + code_64 = sparse_encode( + X.astype(np.float64), dictionary.astype(np.float64), algorithm=algorithm + ) + assert_allclose(code_32, code_64, rtol=rtol) + + +@pytest.mark.parametrize( + "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +# Note: do not check integer input because `lasso_lars` and `lars` fail with +# `ValueError` in `_lars_path_solver` +def test_sparse_coder_dtype_match(data_type, transform_algorithm): + # Verify preserving dtype for transform in sparse coder + n_components = 6 + rng = np.random.RandomState(0) + dictionary = rng.randn(n_components, n_features) + coder = SparseCoder( + dictionary.astype(data_type), transform_algorithm=transform_algorithm + ) + code = coder.transform(X.astype(data_type)) + assert code.dtype == data_type + + +@pytest.mark.parametrize("fit_algorithm", ("lars", "cd")) +@pytest.mark.parametrize( + "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_dictionary_learning_dtype_match( + data_type, + expected_type, + fit_algorithm, + transform_algorithm, +): + # Verify preserving dtype for fit and transform in dictionary learning class + dict_learner = DictionaryLearning( + n_components=8, + fit_algorithm=fit_algorithm, + transform_algorithm=transform_algorithm, + random_state=0, + ) + dict_learner.fit(X.astype(data_type)) + assert dict_learner.components_.dtype == expected_type + assert dict_learner.transform(X.astype(data_type)).dtype == expected_type + + +@pytest.mark.parametrize("fit_algorithm", ("lars", "cd")) +@pytest.mark.parametrize( + "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_minibatch_dictionary_learning_dtype_match( + data_type, + expected_type, + fit_algorithm, + transform_algorithm, +): + # Verify preserving dtype for fit and transform in minibatch dictionary learning + dict_learner = MiniBatchDictionaryLearning( + n_components=8, + batch_size=10, + fit_algorithm=fit_algorithm, + transform_algorithm=transform_algorithm, + max_iter=100, + tol=1e-1, + random_state=0, + ) + dict_learner.fit(X.astype(data_type)) + + assert dict_learner.components_.dtype == expected_type + assert dict_learner.transform(X.astype(data_type)).dtype == expected_type + assert dict_learner._A.dtype == expected_type + assert dict_learner._B.dtype == expected_type + + +@pytest.mark.parametrize("method", ("lars", "cd")) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_dict_learning_dtype_match(data_type, expected_type, method): + # Verify output matrix dtype + rng = np.random.RandomState(0) + n_components = 8 + code, dictionary, _ = dict_learning( + X.astype(data_type), + n_components=n_components, + alpha=1, + random_state=rng, + method=method, + ) + assert code.dtype == expected_type + assert dictionary.dtype == expected_type + + +@pytest.mark.parametrize("method", ("lars", "cd")) +def test_dict_learning_numerical_consistency(method): + # verify numerically consistent among np.float32 and np.float64 + rtol = 1e-6 + n_components = 4 + alpha = 2 + + U_64, V_64, _ = dict_learning( + X.astype(np.float64), + n_components=n_components, + alpha=alpha, + random_state=0, + method=method, + ) + U_32, V_32, _ = dict_learning( + X.astype(np.float32), + n_components=n_components, + alpha=alpha, + random_state=0, + method=method, + ) + + # Optimal solution (U*, V*) is not unique. + # If (U*, V*) is optimal solution, (-U*,-V*) is also optimal, + # and (column permutated U*, row permutated V*) are also optional + # as long as holding UV. + # So here UV, ||U||_1,1 and sum(||V_k||_2^2) are verified + # instead of comparing directly U and V. + assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol) + assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol) + assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol) + # verify an obtained solution is not degenerate + assert np.mean(U_64 != 0.0) > 0.05 + assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0) + + +@pytest.mark.parametrize("method", ("lars", "cd")) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_dict_learning_online_dtype_match(data_type, expected_type, method): + # Verify output matrix dtype + rng = np.random.RandomState(0) + n_components = 8 + code, dictionary = dict_learning_online( + X.astype(data_type), + n_components=n_components, + alpha=1, + batch_size=10, + random_state=rng, + method=method, + ) + assert code.dtype == expected_type + assert dictionary.dtype == expected_type + + +@pytest.mark.parametrize("method", ("lars", "cd")) +def test_dict_learning_online_numerical_consistency(method): + # verify numerically consistent among np.float32 and np.float64 + rtol = 1e-4 + n_components = 4 + alpha = 1 + + U_64, V_64 = dict_learning_online( + X.astype(np.float64), + n_components=n_components, + max_iter=1_000, + alpha=alpha, + batch_size=10, + random_state=0, + method=method, + tol=0.0, + max_no_improvement=None, + ) + U_32, V_32 = dict_learning_online( + X.astype(np.float32), + n_components=n_components, + max_iter=1_000, + alpha=alpha, + batch_size=10, + random_state=0, + method=method, + tol=0.0, + max_no_improvement=None, + ) + + # Optimal solution (U*, V*) is not unique. + # If (U*, V*) is optimal solution, (-U*,-V*) is also optimal, + # and (column permutated U*, row permutated V*) are also optional + # as long as holding UV. + # So here UV, ||U||_1,1 and sum(||V_k||_2) are verified + # instead of comparing directly U and V. + assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol) + assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol) + assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol) + # verify an obtained solution is not degenerate + assert np.mean(U_64 != 0.0) > 0.05 + assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0) + + +@pytest.mark.parametrize( + "estimator", + [ + SparseCoder(X.T), + DictionaryLearning(), + MiniBatchDictionaryLearning(batch_size=4, max_iter=10), + ], + ids=lambda x: x.__class__.__name__, +) +def test_get_feature_names_out(estimator): + """Check feature names for dict learning estimators.""" + estimator.fit(X) + n_components = X.shape[1] + + feature_names_out = estimator.get_feature_names_out() + estimator_name = estimator.__class__.__name__.lower() + assert_array_equal( + feature_names_out, + [f"{estimator_name}{i}" for i in range(n_components)], + ) + + +def test_cd_work_on_joblib_memmapped_data(monkeypatch): + monkeypatch.setattr( + sklearn.decomposition._dict_learning, + "Parallel", + partial(Parallel, max_nbytes=100), + ) + + rng = np.random.RandomState(0) + X_train = rng.randn(10, 10) + + dict_learner = DictionaryLearning( + n_components=5, + random_state=0, + n_jobs=2, + fit_algorithm="cd", + max_iter=50, + verbose=True, + ) + + # This must run and complete without error. + dict_learner.fit(X_train) + + +# TODO(1.6): remove in 1.6 +def test_xxx(): + warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed" + with pytest.warns(FutureWarning, match=warn_msg): + MiniBatchDictionaryLearning(max_iter=None, random_state=0).fit(X) + with pytest.warns(FutureWarning, match=warn_msg): + dict_learning_online(X, max_iter=None, random_state=0) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff14f8d71722463e4cd4f8c815c957ffd7ba9f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py @@ -0,0 +1,116 @@ +# Author: Christian Osendorfer +# Alexandre Gramfort +# License: BSD3 + +from itertools import combinations + +import numpy as np +import pytest + +from sklearn.decomposition import FactorAnalysis +from sklearn.decomposition._factor_analysis import _ortho_rotation +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + ignore_warnings, +) + + +# Ignore warnings from switching to more power iterations in randomized_svd +@ignore_warnings +def test_factor_analysis(): + # Test FactorAnalysis ability to recover the data covariance structure + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 20, 5, 3 + + # Some random settings for the generative model + W = rng.randn(n_components, n_features) + # latent variable of dim 3, 20 of it + h = rng.randn(n_samples, n_components) + # using gamma to model different noise variance + # per component + noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features) + + # generate observations + # wlog, mean is 0 + X = np.dot(h, W) + noise + + fas = [] + for method in ["randomized", "lapack"]: + fa = FactorAnalysis(n_components=n_components, svd_method=method) + fa.fit(X) + fas.append(fa) + + X_t = fa.transform(X) + assert X_t.shape == (n_samples, n_components) + + assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum()) + assert_almost_equal(fa.score_samples(X).mean(), fa.score(X)) + + diff = np.all(np.diff(fa.loglike_)) + assert diff > 0.0, "Log likelihood dif not increase" + + # Sample Covariance + scov = np.cov(X, rowvar=0.0, bias=1.0) + + # Model Covariance + mcov = fa.get_covariance() + diff = np.sum(np.abs(scov - mcov)) / W.size + assert diff < 0.1, "Mean absolute difference is %f" % diff + fa = FactorAnalysis( + n_components=n_components, noise_variance_init=np.ones(n_features) + ) + with pytest.raises(ValueError): + fa.fit(X[:, :2]) + + def f(x, y): + return np.abs(getattr(x, y)) # sign will not be equal + + fa1, fa2 = fas + for attr in ["loglike_", "components_", "noise_variance_"]: + assert_almost_equal(f(fa1, attr), f(fa2, attr)) + + fa1.max_iter = 1 + fa1.verbose = True + with pytest.warns(ConvergenceWarning): + fa1.fit(X) + + # Test get_covariance and get_precision with n_components == n_features + # with n_components < n_features and with n_components == 0 + for n_components in [0, 2, X.shape[1]]: + fa.n_components = n_components + fa.fit(X) + cov = fa.get_covariance() + precision = fa.get_precision() + assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12) + + # test rotation + n_components = 2 + + results, projections = {}, {} + for method in (None, "varimax", "quartimax"): + fa_var = FactorAnalysis(n_components=n_components, rotation=method) + results[method] = fa_var.fit_transform(X) + projections[method] = fa_var.get_covariance() + for rot1, rot2 in combinations([None, "varimax", "quartimax"], 2): + assert not np.allclose(results[rot1], results[rot2]) + assert np.allclose(projections[rot1], projections[rot2], atol=3) + + # test against R's psych::principal with rotate="varimax" + # (i.e., the values below stem from rotating the components in R) + # R's factor analysis returns quite different values; therefore, we only + # test the rotation itself + factors = np.array( + [ + [0.89421016, -0.35854928, -0.27770122, 0.03773647], + [-0.45081822, -0.89132754, 0.0932195, -0.01787973], + [0.99500666, -0.02031465, 0.05426497, -0.11539407], + [0.96822861, -0.06299656, 0.24411001, 0.07540887], + ] + ) + r_solution = np.array( + [[0.962, 0.052], [-0.141, 0.989], [0.949, -0.300], [0.937, -0.251]] + ) + rotated = _ortho_rotation(factors[:, :n_components], method="varimax").T + assert_array_almost_equal(np.abs(rotated), np.abs(r_solution), decimal=3) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py new file mode 100644 index 0000000000000000000000000000000000000000..6a376b01ecb19ab531729307229161eaca34946e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_fastica.py @@ -0,0 +1,451 @@ +""" +Test the fastica algorithm. +""" +import itertools +import os +import warnings + +import numpy as np +import pytest +from scipy import stats + +from sklearn.decomposition import PCA, FastICA, fastica +from sklearn.decomposition._fastica import _gs_decorrelation +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils._testing import assert_allclose + + +def center_and_norm(x, axis=-1): + """Centers and norms x **in place** + + Parameters + ----------- + x: ndarray + Array with an axis of observations (statistical units) measured on + random variables. + axis: int, optional + Axis along which the mean and variance are calculated. + """ + x = np.rollaxis(x, axis) + x -= x.mean(axis=0) + x /= x.std(axis=0) + + +def test_gs(): + # Test gram schmidt orthonormalization + # generate a random orthogonal matrix + rng = np.random.RandomState(0) + W, _, _ = np.linalg.svd(rng.randn(10, 10)) + w = rng.randn(10) + _gs_decorrelation(w, W, 10) + assert (w**2).sum() < 1.0e-10 + w = rng.randn(10) + u = _gs_decorrelation(w, W, 5) + tmp = np.dot(u, W.T) + assert (tmp[:5] ** 2).sum() < 1.0e-10 + + +def test_fastica_attributes_dtypes(global_dtype): + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(global_dtype, copy=False) + fica = FastICA( + n_components=5, max_iter=1000, whiten="unit-variance", random_state=0 + ).fit(X) + assert fica.components_.dtype == global_dtype + assert fica.mixing_.dtype == global_dtype + assert fica.mean_.dtype == global_dtype + assert fica.whitening_.dtype == global_dtype + + +def test_fastica_return_dtypes(global_dtype): + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(global_dtype, copy=False) + k_, mixing_, s_ = fastica( + X, max_iter=1000, whiten="unit-variance", random_state=rng + ) + assert k_.dtype == global_dtype + assert mixing_.dtype == global_dtype + assert s_.dtype == global_dtype + + +@pytest.mark.parametrize("add_noise", [True, False]) +def test_fastica_simple(add_noise, global_random_seed, global_dtype): + if ( + global_random_seed == 20 + and global_dtype == np.float32 + and not add_noise + and os.getenv("DISTRIB") == "ubuntu" + ): + pytest.xfail( + "FastICA instability with Ubuntu Atlas build with float32 " + "global_dtype. For more details, see " + "https://github.com/scikit-learn/scikit-learn/issues/24131#issuecomment-1208091119" # noqa + ) + + # Test the FastICA algorithm on very simple data. + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + # Generate two sources: + s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 + s2 = stats.t.rvs(1, size=n_samples, random_state=global_random_seed) + s = np.c_[s1, s2].T + center_and_norm(s) + s = s.astype(global_dtype) + s1, s2 = s + + # Mixing angle + phi = 0.6 + mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]]) + mixing = mixing.astype(global_dtype) + m = np.dot(mixing, s) + + if add_noise: + m += 0.1 * rng.randn(2, 1000) + + center_and_norm(m) + + # function as fun arg + def g_test(x): + return x**3, (3 * x**2).mean(axis=-1) + + algos = ["parallel", "deflation"] + nls = ["logcosh", "exp", "cube", g_test] + whitening = ["arbitrary-variance", "unit-variance", False] + for algo, nl, whiten in itertools.product(algos, nls, whitening): + if whiten: + k_, mixing_, s_ = fastica( + m.T, fun=nl, whiten=whiten, algorithm=algo, random_state=rng + ) + with pytest.raises(ValueError): + fastica(m.T, fun=np.tanh, whiten=whiten, algorithm=algo) + else: + pca = PCA(n_components=2, whiten=True, random_state=rng) + X = pca.fit_transform(m.T) + k_, mixing_, s_ = fastica( + X, fun=nl, algorithm=algo, whiten=False, random_state=rng + ) + with pytest.raises(ValueError): + fastica(X, fun=np.tanh, algorithm=algo) + s_ = s_.T + # Check that the mixing model described in the docstring holds: + if whiten: + # XXX: exact reconstruction to standard relative tolerance is not + # possible. This is probably expected when add_noise is True but we + # also need a non-trivial atol in float32 when add_noise is False. + # + # Note that the 2 sources are non-Gaussian in this test. + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose(np.dot(np.dot(mixing_, k_), m), s_, atol=atol) + + center_and_norm(s_) + s1_, s2_ = s_ + # Check to see if the sources have been estimated + # in the wrong order + if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): + s2_, s1_ = s_ + s1_ *= np.sign(np.dot(s1_, s1)) + s2_ *= np.sign(np.dot(s2_, s2)) + + # Check that we have estimated the original sources + if not add_noise: + assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-2) + assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-2) + else: + assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-1) + assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-1) + + # Test FastICA class + _, _, sources_fun = fastica( + m.T, fun=nl, algorithm=algo, random_state=global_random_seed + ) + ica = FastICA(fun=nl, algorithm=algo, random_state=global_random_seed) + sources = ica.fit_transform(m.T) + assert ica.components_.shape == (2, 2) + assert sources.shape == (1000, 2) + + assert_allclose(sources_fun, sources) + # Set atol to account for the different magnitudes of the elements in sources + # (from 1e-4 to 1e1). + atol = np.max(np.abs(sources)) * (1e-5 if global_dtype == np.float32 else 1e-7) + assert_allclose(sources, ica.transform(m.T), atol=atol) + + assert ica.mixing_.shape == (2, 2) + + ica = FastICA(fun=np.tanh, algorithm=algo) + with pytest.raises(ValueError): + ica.fit(m.T) + + +def test_fastica_nowhiten(): + m = [[0, 1], [1, 0]] + + # test for issue #697 + ica = FastICA(n_components=1, whiten=False, random_state=0) + warn_msg = "Ignoring n_components with whiten=False." + with pytest.warns(UserWarning, match=warn_msg): + ica.fit(m) + assert hasattr(ica, "mixing_") + + +def test_fastica_convergence_fail(): + # Test the FastICA algorithm on very simple data + # (see test_non_square_fastica). + # Ensure a ConvergenceWarning raised if the tolerance is sufficiently low. + rng = np.random.RandomState(0) + + n_samples = 1000 + # Generate two sources: + t = np.linspace(0, 100, n_samples) + s1 = np.sin(t) + s2 = np.ceil(np.sin(np.pi * t)) + s = np.c_[s1, s2].T + center_and_norm(s) + + # Mixing matrix + mixing = rng.randn(6, 2) + m = np.dot(mixing, s) + + # Do fastICA with tolerance 0. to ensure failing convergence + warn_msg = ( + "FastICA did not converge. Consider increasing tolerance " + "or the maximum number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warn_msg): + ica = FastICA( + algorithm="parallel", n_components=2, random_state=rng, max_iter=2, tol=0.0 + ) + ica.fit(m.T) + + +@pytest.mark.parametrize("add_noise", [True, False]) +def test_non_square_fastica(add_noise): + # Test the FastICA algorithm on very simple data. + rng = np.random.RandomState(0) + + n_samples = 1000 + # Generate two sources: + t = np.linspace(0, 100, n_samples) + s1 = np.sin(t) + s2 = np.ceil(np.sin(np.pi * t)) + s = np.c_[s1, s2].T + center_and_norm(s) + s1, s2 = s + + # Mixing matrix + mixing = rng.randn(6, 2) + m = np.dot(mixing, s) + + if add_noise: + m += 0.1 * rng.randn(6, n_samples) + + center_and_norm(m) + + k_, mixing_, s_ = fastica( + m.T, n_components=2, whiten="unit-variance", random_state=rng + ) + s_ = s_.T + + # Check that the mixing model described in the docstring holds: + assert_allclose(s_, np.dot(np.dot(mixing_, k_), m)) + + center_and_norm(s_) + s1_, s2_ = s_ + # Check to see if the sources have been estimated + # in the wrong order + if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): + s2_, s1_ = s_ + s1_ *= np.sign(np.dot(s1_, s1)) + s2_ *= np.sign(np.dot(s2_, s2)) + + # Check that we have estimated the original sources + if not add_noise: + assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-3) + assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-3) + + +def test_fit_transform(global_random_seed, global_dtype): + """Test unit variance of transformed data using FastICA algorithm. + + Check that `fit_transform` gives the same result as applying + `fit` and then `transform`. + + Bug #13056 + """ + # multivariate uniform data in [0, 1] + rng = np.random.RandomState(global_random_seed) + X = rng.random_sample((100, 10)).astype(global_dtype) + max_iter = 300 + for whiten, n_components in [["unit-variance", 5], [False, None]]: + n_components_ = n_components if n_components is not None else X.shape[1] + + ica = FastICA( + n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0 + ) + with warnings.catch_warnings(): + # make sure that numerical errors do not cause sqrt of negative + # values + warnings.simplefilter("error", RuntimeWarning) + # XXX: for some seeds, the model does not converge. + # However this is not what we test here. + warnings.simplefilter("ignore", ConvergenceWarning) + Xt = ica.fit_transform(X) + assert ica.components_.shape == (n_components_, 10) + assert Xt.shape == (X.shape[0], n_components_) + + ica2 = FastICA( + n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0 + ) + with warnings.catch_warnings(): + # make sure that numerical errors do not cause sqrt of negative + # values + warnings.simplefilter("error", RuntimeWarning) + warnings.simplefilter("ignore", ConvergenceWarning) + ica2.fit(X) + assert ica2.components_.shape == (n_components_, 10) + Xt2 = ica2.transform(X) + + # XXX: we have to set atol for this test to pass for all seeds when + # fitting with float32 data. Is this revealing a bug? + if global_dtype: + atol = np.abs(Xt2).mean() / 1e6 + else: + atol = 0.0 # the default rtol is enough for float64 data + assert_allclose(Xt, Xt2, atol=atol) + + +@pytest.mark.filterwarnings("ignore:Ignoring n_components with whiten=False.") +@pytest.mark.parametrize( + "whiten, n_components, expected_mixing_shape", + [ + ("arbitrary-variance", 5, (10, 5)), + ("arbitrary-variance", 10, (10, 10)), + ("unit-variance", 5, (10, 5)), + ("unit-variance", 10, (10, 10)), + (False, 5, (10, 10)), + (False, 10, (10, 10)), + ], +) +def test_inverse_transform( + whiten, n_components, expected_mixing_shape, global_random_seed, global_dtype +): + # Test FastICA.inverse_transform + n_samples = 100 + rng = np.random.RandomState(global_random_seed) + X = rng.random_sample((n_samples, 10)).astype(global_dtype) + + ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten) + with warnings.catch_warnings(): + # For some dataset (depending on the value of global_dtype) the model + # can fail to converge but this should not impact the definition of + # a valid inverse transform. + warnings.simplefilter("ignore", ConvergenceWarning) + Xt = ica.fit_transform(X) + assert ica.mixing_.shape == expected_mixing_shape + X2 = ica.inverse_transform(Xt) + assert X.shape == X2.shape + + # reversibility test in non-reduction case + if n_components == X.shape[1]: + # XXX: we have to set atol for this test to pass for all seeds when + # fitting with float32 data. Is this revealing a bug? + if global_dtype: + # XXX: dividing by a smaller number makes + # tests fail for some seeds. + atol = np.abs(X2).mean() / 1e5 + else: + atol = 0.0 # the default rtol is enough for float64 data + assert_allclose(X, X2, atol=atol) + + +def test_fastica_errors(): + n_features = 3 + n_samples = 10 + rng = np.random.RandomState(0) + X = rng.random_sample((n_samples, n_features)) + w_init = rng.randn(n_features + 1, n_features + 1) + with pytest.raises(ValueError, match=r"alpha must be in \[1,2\]"): + fastica(X, fun_args={"alpha": 0}) + with pytest.raises( + ValueError, match="w_init has invalid shape.+" r"should be \(3L?, 3L?\)" + ): + fastica(X, w_init=w_init) + + +def test_fastica_whiten_unit_variance(): + """Test unit variance of transformed data using FastICA algorithm. + + Bug #13056 + """ + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)) + n_components = X.shape[1] + ica = FastICA(n_components=n_components, whiten="unit-variance", random_state=0) + Xt = ica.fit_transform(X) + + assert np.var(Xt) == pytest.approx(1.0) + + +@pytest.mark.parametrize("whiten", ["arbitrary-variance", "unit-variance", False]) +@pytest.mark.parametrize("return_X_mean", [True, False]) +@pytest.mark.parametrize("return_n_iter", [True, False]) +def test_fastica_output_shape(whiten, return_X_mean, return_n_iter): + n_features = 3 + n_samples = 10 + rng = np.random.RandomState(0) + X = rng.random_sample((n_samples, n_features)) + + expected_len = 3 + return_X_mean + return_n_iter + + out = fastica( + X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean + ) + + assert len(out) == expected_len + if not whiten: + assert out[0] is None + + +@pytest.mark.parametrize("add_noise", [True, False]) +def test_fastica_simple_different_solvers(add_noise, global_random_seed): + """Test FastICA is consistent between whiten_solvers.""" + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + # Generate two sources: + s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 + s2 = stats.t.rvs(1, size=n_samples, random_state=rng) + s = np.c_[s1, s2].T + center_and_norm(s) + s1, s2 = s + + # Mixing angle + phi = rng.rand() * 2 * np.pi + mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]]) + m = np.dot(mixing, s) + + if add_noise: + m += 0.1 * rng.randn(2, 1000) + + center_and_norm(m) + + outs = {} + for solver in ("svd", "eigh"): + ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver=solver) + sources = ica.fit_transform(m.T) + outs[solver] = sources + assert ica.components_.shape == (2, 2) + assert sources.shape == (1000, 2) + + # compared numbers are not all on the same magnitude. Using a small atol to + # make the test less brittle + assert_allclose(outs["eigh"], outs["svd"], atol=1e-12) + + +def test_fastica_eigh_low_rank_warning(global_random_seed): + """Test FastICA eigh solver raises warning for low-rank data.""" + rng = np.random.RandomState(global_random_seed) + A = rng.randn(10, 2) + X = A @ A.T + ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver="eigh") + msg = "There are some small singular values" + with pytest.warns(UserWarning, match=msg): + ica.fit(X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..5d7c8aa03f174ca6b372cd6c42de15cb4a43d15a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py @@ -0,0 +1,452 @@ +"""Tests for Incremental PCA.""" +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn import datasets +from sklearn.decomposition import PCA, IncrementalPCA +from sklearn.utils._testing import ( + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_almost_equal, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS + +iris = datasets.load_iris() + + +def test_incremental_pca(): + # Incremental PCA on dense arrays. + X = iris.data + batch_size = X.shape[0] // 3 + ipca = IncrementalPCA(n_components=2, batch_size=batch_size) + pca = PCA(n_components=2) + pca.fit_transform(X) + + X_transformed = ipca.fit_transform(X) + + assert X_transformed.shape == (X.shape[0], 2) + np.testing.assert_allclose( + ipca.explained_variance_ratio_.sum(), + pca.explained_variance_ratio_.sum(), + rtol=1e-3, + ) + + for n_components in [1, 2, X.shape[1]]: + ipca = IncrementalPCA(n_components, batch_size=batch_size) + ipca.fit(X) + cov = ipca.get_covariance() + precision = ipca.get_precision() + np.testing.assert_allclose( + np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13 + ) + + +@pytest.mark.parametrize( + "sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + LIL_CONTAINERS +) +def test_incremental_pca_sparse(sparse_container): + # Incremental PCA on sparse arrays. + X = iris.data + pca = PCA(n_components=2) + pca.fit_transform(X) + X_sparse = sparse_container(X) + batch_size = X_sparse.shape[0] // 3 + ipca = IncrementalPCA(n_components=2, batch_size=batch_size) + + X_transformed = ipca.fit_transform(X_sparse) + + assert X_transformed.shape == (X_sparse.shape[0], 2) + np.testing.assert_allclose( + ipca.explained_variance_ratio_.sum(), + pca.explained_variance_ratio_.sum(), + rtol=1e-3, + ) + + for n_components in [1, 2, X.shape[1]]: + ipca = IncrementalPCA(n_components, batch_size=batch_size) + ipca.fit(X_sparse) + cov = ipca.get_covariance() + precision = ipca.get_precision() + np.testing.assert_allclose( + np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13 + ) + + with pytest.raises( + TypeError, + match=( + "IncrementalPCA.partial_fit does not support " + "sparse input. Either convert data to dense " + "or use IncrementalPCA.fit to do so in batches." + ), + ): + ipca.partial_fit(X_sparse) + + +def test_incremental_pca_check_projection(): + # Test that the projection of data is correct. + rng = np.random.RandomState(1999) + n, p = 100, 3 + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5]) + Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) + + # Get the reconstruction of the generated data X + # Note that Xt has the same "components" as X, just separated + # This is what we want to ensure is recreated correctly + Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt) + + # Normalize + Yt /= np.sqrt((Yt**2).sum()) + + # Make sure that the first element of Yt is ~1, this means + # the reconstruction worked as expected + assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1) + + +def test_incremental_pca_inverse(): + # Test that the projection of data can be inverted. + rng = np.random.RandomState(1999) + n, p = 50, 3 + X = rng.randn(n, p) # spherical data + X[:, 1] *= 0.00001 # make middle component relatively small + X += [5, 4, 3] # make a large mean + + # same check that we can find the original data from the transformed + # signal (since the data is almost of rank n_components) + ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X) + Y = ipca.transform(X) + Y_inverse = ipca.inverse_transform(Y) + assert_almost_equal(X, Y_inverse, decimal=3) + + +def test_incremental_pca_validation(): + # Test that n_components is <= n_features. + X = np.array([[0, 1, 0], [1, 0, 0]]) + n_samples, n_features = X.shape + n_components = 4 + with pytest.raises( + ValueError, + match=( + "n_components={} invalid" + " for n_features={}, need more rows than" + " columns for IncrementalPCA" + " processing".format(n_components, n_features) + ), + ): + IncrementalPCA(n_components, batch_size=10).fit(X) + + # Tests that n_components is also <= n_samples. + n_components = 3 + with pytest.raises( + ValueError, + match=( + "n_components={} must be" + " less or equal to the batch number of" + " samples {}".format(n_components, n_samples) + ), + ): + IncrementalPCA(n_components=n_components).partial_fit(X) + + +def test_n_samples_equal_n_components(): + # Ensures no warning is raised when n_samples==n_components + # Non-regression test for gh-19050 + ipca = IncrementalPCA(n_components=5) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + ipca.partial_fit(np.random.randn(5, 7)) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + ipca.fit(np.random.randn(5, 7)) + + +def test_n_components_none(): + # Ensures that n_components == None is handled correctly + rng = np.random.RandomState(1999) + for n_samples, n_features in [(50, 10), (10, 50)]: + X = rng.rand(n_samples, n_features) + ipca = IncrementalPCA(n_components=None) + + # First partial_fit call, ipca.n_components_ is inferred from + # min(X.shape) + ipca.partial_fit(X) + assert ipca.n_components_ == min(X.shape) + + # Second partial_fit call, ipca.n_components_ is inferred from + # ipca.components_ computed from the first partial_fit call + ipca.partial_fit(X) + assert ipca.n_components_ == ipca.components_.shape[0] + + +def test_incremental_pca_set_params(): + # Test that components_ sign is stable over batch sizes. + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 20 + X = rng.randn(n_samples, n_features) + X2 = rng.randn(n_samples, n_features) + X3 = rng.randn(n_samples, n_features) + ipca = IncrementalPCA(n_components=20) + ipca.fit(X) + # Decreasing number of components + ipca.set_params(n_components=10) + with pytest.raises(ValueError): + ipca.partial_fit(X2) + # Increasing number of components + ipca.set_params(n_components=15) + with pytest.raises(ValueError): + ipca.partial_fit(X3) + # Returning to original setting + ipca.set_params(n_components=20) + ipca.partial_fit(X) + + +def test_incremental_pca_num_features_change(): + # Test that changing n_components will raise an error. + rng = np.random.RandomState(1999) + n_samples = 100 + X = rng.randn(n_samples, 20) + X2 = rng.randn(n_samples, 50) + ipca = IncrementalPCA(n_components=None) + ipca.fit(X) + with pytest.raises(ValueError): + ipca.partial_fit(X2) + + +def test_incremental_pca_batch_signs(): + # Test that components_ sign is stable over batch sizes. + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 3 + X = rng.randn(n_samples, n_features) + all_components = [] + batch_sizes = np.arange(10, 20) + for batch_size in batch_sizes: + ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) + all_components.append(ipca.components_) + + for i, j in zip(all_components[:-1], all_components[1:]): + assert_almost_equal(np.sign(i), np.sign(j), decimal=6) + + +def test_incremental_pca_batch_values(): + # Test that components_ values are stable over batch sizes. + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 3 + X = rng.randn(n_samples, n_features) + all_components = [] + batch_sizes = np.arange(20, 40, 3) + for batch_size in batch_sizes: + ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) + all_components.append(ipca.components_) + + for i, j in zip(all_components[:-1], all_components[1:]): + assert_almost_equal(i, j, decimal=1) + + +def test_incremental_pca_batch_rank(): + # Test sample size in each batch is always larger or equal to n_components + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 20 + X = rng.randn(n_samples, n_features) + all_components = [] + batch_sizes = np.arange(20, 90, 3) + for batch_size in batch_sizes: + ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X) + all_components.append(ipca.components_) + + for components_i, components_j in zip(all_components[:-1], all_components[1:]): + assert_allclose_dense_sparse(components_i, components_j) + + +def test_incremental_pca_partial_fit(): + # Test that fit and partial_fit get equivalent results. + rng = np.random.RandomState(1999) + n, p = 50, 3 + X = rng.randn(n, p) # spherical data + X[:, 1] *= 0.00001 # make middle component relatively small + X += [5, 4, 3] # make a large mean + + # same check that we can find the original data from the transformed + # signal (since the data is almost of rank n_components) + batch_size = 10 + ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X) + pipca = IncrementalPCA(n_components=2, batch_size=batch_size) + # Add one to make sure endpoint is included + batch_itr = np.arange(0, n + 1, batch_size) + for i, j in zip(batch_itr[:-1], batch_itr[1:]): + pipca.partial_fit(X[i:j, :]) + assert_almost_equal(ipca.components_, pipca.components_, decimal=3) + + +def test_incremental_pca_against_pca_iris(): + # Test that IncrementalPCA and PCA are approximate (to a sign flip). + X = iris.data + + Y_pca = PCA(n_components=2).fit_transform(X) + Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X) + + assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) + + +def test_incremental_pca_against_pca_random_data(): + # Test that IncrementalPCA and PCA are approximate (to a sign flip). + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 3 + X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features) + + Y_pca = PCA(n_components=3).fit_transform(X) + Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X) + + assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) + + +def test_explained_variances(): + # Test that PCA and IncrementalPCA calculations match + X = datasets.make_low_rank_matrix( + 1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999 + ) + prec = 3 + n_samples, n_features = X.shape + for nc in [None, 99]: + pca = PCA(n_components=nc).fit(X) + ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X) + assert_almost_equal( + pca.explained_variance_, ipca.explained_variance_, decimal=prec + ) + assert_almost_equal( + pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec + ) + assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec) + + +def test_singular_values(): + # Check that the IncrementalPCA output has the correct singular values + + rng = np.random.RandomState(0) + n_samples = 1000 + n_features = 100 + + X = datasets.make_low_rank_matrix( + n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng + ) + + pca = PCA(n_components=10, svd_solver="full", random_state=rng).fit(X) + ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X) + assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2) + + # Compare to the Frobenius norm + X_pca = pca.transform(X) + X_ipca = ipca.transform(X) + assert_array_almost_equal( + np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, "fro") ** 2.0, 12 + ) + assert_array_almost_equal( + np.sum(ipca.singular_values_**2.0), np.linalg.norm(X_ipca, "fro") ** 2.0, 2 + ) + + # Compare to the 2-norms of the score vectors + assert_array_almost_equal( + pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12 + ) + assert_array_almost_equal( + ipca.singular_values_, np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2 + ) + + # Set the singular values and see what we get back + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 110 + + X = datasets.make_low_rank_matrix( + n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng + ) + + pca = PCA(n_components=3, svd_solver="full", random_state=rng) + ipca = IncrementalPCA(n_components=3, batch_size=100) + + X_pca = pca.fit_transform(X) + X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0)) + X_pca[:, 0] *= 3.142 + X_pca[:, 1] *= 2.718 + + X_hat = np.dot(X_pca, pca.components_) + pca.fit(X_hat) + ipca.fit(X_hat) + assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14) + assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14) + + +def test_whitening(): + # Test that PCA and IncrementalPCA transforms match to sign flip. + X = datasets.make_low_rank_matrix( + 1000, 10, tail_strength=0.0, effective_rank=2, random_state=1999 + ) + prec = 3 + n_samples, n_features = X.shape + for nc in [None, 9]: + pca = PCA(whiten=True, n_components=nc).fit(X) + ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X) + + Xt_pca = pca.transform(X) + Xt_ipca = ipca.transform(X) + assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec) + Xinv_ipca = ipca.inverse_transform(Xt_ipca) + Xinv_pca = pca.inverse_transform(Xt_pca) + assert_almost_equal(X, Xinv_ipca, decimal=prec) + assert_almost_equal(X, Xinv_pca, decimal=prec) + assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec) + + +def test_incremental_pca_partial_fit_float_division(): + # Test to ensure float division is used in all versions of Python + # (non-regression test for issue #9489) + + rng = np.random.RandomState(0) + A = rng.randn(5, 3) + 2 + B = rng.randn(7, 3) + 5 + + pca = IncrementalPCA(n_components=2) + pca.partial_fit(A) + # Set n_samples_seen_ to be a floating point number instead of an int + pca.n_samples_seen_ = float(pca.n_samples_seen_) + pca.partial_fit(B) + singular_vals_float_samples_seen = pca.singular_values_ + + pca2 = IncrementalPCA(n_components=2) + pca2.partial_fit(A) + pca2.partial_fit(B) + singular_vals_int_samples_seen = pca2.singular_values_ + + np.testing.assert_allclose( + singular_vals_float_samples_seen, singular_vals_int_samples_seen + ) + + +def test_incremental_pca_fit_overflow_error(): + # Test for overflow error on Windows OS + # (non-regression test for issue #17693) + rng = np.random.RandomState(0) + A = rng.rand(500000, 2) + + ipca = IncrementalPCA(n_components=2, batch_size=10000) + ipca.fit(A) + + pca = PCA(n_components=2) + pca.fit(A) + + np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_) + + +def test_incremental_pca_feature_names_out(): + """Check feature names out for IncrementalPCA.""" + ipca = IncrementalPCA(n_components=2).fit(iris.data) + + names = ipca.get_feature_names_out() + assert_array_equal([f"incrementalpca{i}" for i in range(2)], names) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..b222cf4e158ff7059c6e0c43fff678d907b82fea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py @@ -0,0 +1,566 @@ +import warnings + +import numpy as np +import pytest + +import sklearn +from sklearn.datasets import load_iris, make_blobs, make_circles +from sklearn.decomposition import PCA, KernelPCA +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import Perceptron +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.model_selection import GridSearchCV +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS +from sklearn.utils.validation import _check_psd_eigenvalues + + +def test_kernel_pca(): + """Nominal test for all solvers and all known kernels + a custom one + + It tests + - that fit_transform is equivalent to fit+transform + - that the shapes of transforms and inverse transforms are correct + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + def histogram(x, y, **kwargs): + # Histogram kernel implemented as a callable. + assert kwargs == {} # no kernel_params that we didn't ask for + return np.minimum(x, y).sum() + + for eigen_solver in ("auto", "dense", "arpack", "randomized"): + for kernel in ("linear", "rbf", "poly", histogram): + # histogram kernel produces singular matrix inside linalg.solve + # XXX use a least-squares approximation? + inv = not callable(kernel) + + # transform fit data + kpca = KernelPCA( + 4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=inv + ) + X_fit_transformed = kpca.fit_transform(X_fit) + X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) + assert_array_almost_equal( + np.abs(X_fit_transformed), np.abs(X_fit_transformed2) + ) + + # non-regression test: previously, gamma would be 0 by default, + # forcing all eigenvalues to 0 under the poly kernel + assert X_fit_transformed.size != 0 + + # transform new data + X_pred_transformed = kpca.transform(X_pred) + assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1] + + # inverse transform + if inv: + X_pred2 = kpca.inverse_transform(X_pred_transformed) + assert X_pred2.shape == X_pred.shape + + +def test_kernel_pca_invalid_parameters(): + """Check that kPCA raises an error if the parameters are invalid + + Tests fitting inverse transform with a precomputed kernel raises a + ValueError. + """ + estimator = KernelPCA( + n_components=10, fit_inverse_transform=True, kernel="precomputed" + ) + err_ms = "Cannot fit_inverse_transform with a precomputed kernel" + with pytest.raises(ValueError, match=err_ms): + estimator.fit(np.random.randn(10, 10)) + + +def test_kernel_pca_consistent_transform(): + """Check robustness to mutations in the original training array + + Test that after fitting a kPCA model, it stays independent of any + mutation of the values of the original data object by relying on an + internal copy. + """ + # X_fit_ needs to retain the old, unmodified copy of X + state = np.random.RandomState(0) + X = state.rand(10, 10) + kpca = KernelPCA(random_state=state).fit(X) + transformed1 = kpca.transform(X) + + X_copy = X.copy() + X[:, 0] = 666 + transformed2 = kpca.transform(X_copy) + assert_array_almost_equal(transformed1, transformed2) + + +def test_kernel_pca_deterministic_output(): + """Test that Kernel PCA produces deterministic output + + Tests that the same inputs and random state produce the same output. + """ + rng = np.random.RandomState(0) + X = rng.rand(10, 10) + eigen_solver = ("arpack", "dense") + + for solver in eigen_solver: + transformed_X = np.zeros((20, 2)) + for i in range(20): + kpca = KernelPCA(n_components=2, eigen_solver=solver, random_state=rng) + transformed_X[i, :] = kpca.fit_transform(X)[0] + assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_kernel_pca_sparse(csr_container): + """Test that kPCA works on a sparse data input. + + Same test as ``test_kernel_pca except inverse_transform`` since it's not + implemented for sparse matrices. + """ + rng = np.random.RandomState(0) + X_fit = csr_container(rng.random_sample((5, 4))) + X_pred = csr_container(rng.random_sample((2, 4))) + + for eigen_solver in ("auto", "arpack", "randomized"): + for kernel in ("linear", "rbf", "poly"): + # transform fit data + kpca = KernelPCA( + 4, + kernel=kernel, + eigen_solver=eigen_solver, + fit_inverse_transform=False, + random_state=0, + ) + X_fit_transformed = kpca.fit_transform(X_fit) + X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) + assert_array_almost_equal( + np.abs(X_fit_transformed), np.abs(X_fit_transformed2) + ) + + # transform new data + X_pred_transformed = kpca.transform(X_pred) + assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1] + + # inverse transform: not available for sparse matrices + # XXX: should we raise another exception type here? For instance: + # NotImplementedError. + with pytest.raises(NotFittedError): + kpca.inverse_transform(X_pred_transformed) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +@pytest.mark.parametrize("n_features", [4, 10]) +def test_kernel_pca_linear_kernel(solver, n_features): + """Test that kPCA with linear kernel is equivalent to PCA for all solvers. + + KernelPCA with linear kernel should produce the same output as PCA. + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, n_features)) + X_pred = rng.random_sample((2, n_features)) + + # for a linear kernel, kernel PCA should find the same projection as PCA + # modulo the sign (direction) + # fit only the first four components: fifth is near zero eigenvalue, so + # can be trimmed due to roundoff error + n_comps = 3 if solver == "arpack" else 4 + assert_array_almost_equal( + np.abs(KernelPCA(n_comps, eigen_solver=solver).fit(X_fit).transform(X_pred)), + np.abs( + PCA(n_comps, svd_solver=solver if solver != "dense" else "full") + .fit(X_fit) + .transform(X_pred) + ), + ) + + +def test_kernel_pca_n_components(): + """Test that `n_components` is correctly taken into account for projections + + For all solvers this tests that the output has the correct shape depending + on the selected number of components. + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + for eigen_solver in ("dense", "arpack", "randomized"): + for c in [1, 2, 4]: + kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver) + shape = kpca.fit(X_fit).transform(X_pred).shape + + assert shape == (2, c) + + +def test_remove_zero_eig(): + """Check that the ``remove_zero_eig`` parameter works correctly. + + Tests that the null-space (Zero) eigenvalues are removed when + remove_zero_eig=True, whereas they are not by default. + """ + X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]]) + + # n_components=None (default) => remove_zero_eig is True + kpca = KernelPCA() + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 0) + + kpca = KernelPCA(n_components=2) + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 2) + + kpca = KernelPCA(n_components=2, remove_zero_eig=True) + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 0) + + +def test_leave_zero_eig(): + """Non-regression test for issue #12141 (PR #12143) + + This test checks that fit().transform() returns the same result as + fit_transform() in case of non-removed zero eigenvalue. + """ + X_fit = np.array([[1, 1], [0, 0]]) + + # Assert that even with all np warnings on, there is no div by zero warning + with warnings.catch_warnings(): + # There might be warnings about the kernel being badly conditioned, + # but there should not be warnings about division by zero. + # (Numpy division by zero warning can have many message variants, but + # at least we know that it is a RuntimeWarning so lets check only this) + warnings.simplefilter("error", RuntimeWarning) + with np.errstate(all="warn"): + k = KernelPCA(n_components=2, remove_zero_eig=False, eigen_solver="dense") + # Fit, then transform + A = k.fit(X_fit).transform(X_fit) + # Do both at once + B = k.fit_transform(X_fit) + # Compare + assert_array_almost_equal(np.abs(A), np.abs(B)) + + +def test_kernel_pca_precomputed(): + """Test that kPCA works with a precomputed kernel, for all solvers""" + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + for eigen_solver in ("dense", "arpack", "randomized"): + X_kpca = ( + KernelPCA(4, eigen_solver=eigen_solver, random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + + X_kpca2 = ( + KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ) + .fit(np.dot(X_fit, X_fit.T)) + .transform(np.dot(X_pred, X_fit.T)) + ) + + X_kpca_train = KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ).fit_transform(np.dot(X_fit, X_fit.T)) + + X_kpca_train2 = ( + KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ) + .fit(np.dot(X_fit, X_fit.T)) + .transform(np.dot(X_fit, X_fit.T)) + ) + + assert_array_almost_equal(np.abs(X_kpca), np.abs(X_kpca2)) + + assert_array_almost_equal(np.abs(X_kpca_train), np.abs(X_kpca_train2)) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +def test_kernel_pca_precomputed_non_symmetric(solver): + """Check that the kernel centerer works. + + Tests that a non symmetric precomputed kernel is actually accepted + because the kernel centerer does its job correctly. + """ + + # a non symmetric gram matrix + K = [[1, 2], [3, 40]] + kpca = KernelPCA( + kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0 + ) + kpca.fit(K) # no error + + # same test with centered kernel + Kc = [[9, -9], [-9, 9]] + kpca_c = KernelPCA( + kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0 + ) + kpca_c.fit(Kc) + + # comparison between the non-centered and centered versions + assert_array_equal(kpca.eigenvectors_, kpca_c.eigenvectors_) + assert_array_equal(kpca.eigenvalues_, kpca_c.eigenvalues_) + + +def test_gridsearch_pipeline(): + """Check that kPCA works as expected in a grid search pipeline + + Test if we can do a grid-search to find parameters to separate + circles with a perceptron model. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + kpca = KernelPCA(kernel="rbf", n_components=2) + pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) + param_grid = dict(kernel_pca__gamma=2.0 ** np.arange(-2, 2)) + grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) + grid_search.fit(X, y) + assert grid_search.best_score_ == 1 + + +def test_gridsearch_pipeline_precomputed(): + """Check that kPCA works as expected in a grid search pipeline (2) + + Test if we can do a grid-search to find parameters to separate + circles with a perceptron model. This test uses a precomputed kernel. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + kpca = KernelPCA(kernel="precomputed", n_components=2) + pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) + param_grid = dict(Perceptron__max_iter=np.arange(1, 5)) + grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) + X_kernel = rbf_kernel(X, gamma=2.0) + grid_search.fit(X_kernel, y) + assert grid_search.best_score_ == 1 + + +def test_nested_circles(): + """Check that kPCA projects in a space where nested circles are separable + + Tests that 2D nested circles become separable with a perceptron when + projected in the first 2 kPCA using an RBF kernel, while raw samples + are not directly separable in the original space. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + + # 2D nested circles are not linearly separable + train_score = Perceptron(max_iter=5).fit(X, y).score(X, y) + assert train_score < 0.8 + + # Project the circles data into the first 2 components of a RBF Kernel + # PCA model. + # Note that the gamma value is data dependent. If this test breaks + # and the gamma value has to be updated, the Kernel PCA example will + # have to be updated too. + kpca = KernelPCA( + kernel="rbf", n_components=2, fit_inverse_transform=True, gamma=2.0 + ) + X_kpca = kpca.fit_transform(X) + + # The data is perfectly linearly separable in that space + train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y) + assert train_score == 1.0 + + +def test_kernel_conditioning(): + """Check that ``_check_psd_eigenvalues`` is correctly called in kPCA + + Non-regression test for issue #12140 (PR #12145). + """ + + # create a pathological X leading to small non-zero eigenvalue + X = [[5, 1], [5 + 1e-8, 1e-8], [5 + 1e-8, 0]] + kpca = KernelPCA(kernel="linear", n_components=2, fit_inverse_transform=True) + kpca.fit(X) + + # check that the small non-zero eigenvalue was correctly set to zero + assert kpca.eigenvalues_.min() == 0 + assert np.all(kpca.eigenvalues_ == _check_psd_eigenvalues(kpca.eigenvalues_)) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +def test_precomputed_kernel_not_psd(solver): + """Check how KernelPCA works with non-PSD kernels depending on n_components + + Tests for all methods what happens with a non PSD gram matrix (this + can happen in an isomap scenario, or with custom kernel functions, or + maybe with ill-posed datasets). + + When ``n_component`` is large enough to capture a negative eigenvalue, an + error should be raised. Otherwise, KernelPCA should run without error + since the negative eigenvalues are not selected. + """ + + # a non PSD kernel with large eigenvalues, already centered + # it was captured from an isomap call and multiplied by 100 for compacity + K = [ + [4.48, -1.0, 8.07, 2.33, 2.33, 2.33, -5.76, -12.78], + [-1.0, -6.48, 4.5, -1.24, -1.24, -1.24, -0.81, 7.49], + [8.07, 4.5, 15.48, 2.09, 2.09, 2.09, -11.1, -23.23], + [2.33, -1.24, 2.09, 4.0, -3.65, -3.65, 1.02, -0.9], + [2.33, -1.24, 2.09, -3.65, 4.0, -3.65, 1.02, -0.9], + [2.33, -1.24, 2.09, -3.65, -3.65, 4.0, 1.02, -0.9], + [-5.76, -0.81, -11.1, 1.02, 1.02, 1.02, 4.86, 9.75], + [-12.78, 7.49, -23.23, -0.9, -0.9, -0.9, 9.75, 21.46], + ] + # this gram matrix has 5 positive eigenvalues and 3 negative ones + # [ 52.72, 7.65, 7.65, 5.02, 0. , -0. , -6.13, -15.11] + + # 1. ask for enough components to get a significant negative one + kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=7) + # make sure that the appropriate error is raised + with pytest.raises(ValueError, match="There are significant negative eigenvalues"): + kpca.fit(K) + + # 2. ask for a small enough n_components to get only positive ones + kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=2) + if solver == "randomized": + # the randomized method is still inconsistent with the others on this + # since it selects the eigenvalues based on the largest 2 modules, not + # on the largest 2 values. + # + # At least we can ensure that we return an error instead of returning + # the wrong eigenvalues + with pytest.raises( + ValueError, match="There are significant negative eigenvalues" + ): + kpca.fit(K) + else: + # general case: make sure that it works + kpca.fit(K) + + +@pytest.mark.parametrize("n_components", [4, 10, 20]) +def test_kernel_pca_solvers_equivalence(n_components): + """Check that 'dense' 'arpack' & 'randomized' solvers give similar results""" + + # Generate random data + n_train, n_test = 1_000, 100 + X, _ = make_circles( + n_samples=(n_train + n_test), factor=0.3, noise=0.05, random_state=0 + ) + X_fit, X_pred = X[:n_train, :], X[n_train:, :] + + # reference (full) + ref_pred = ( + KernelPCA(n_components, eigen_solver="dense", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + + # arpack + a_pred = ( + KernelPCA(n_components, eigen_solver="arpack", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + # check that the result is still correct despite the approx + assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred)) + + # randomized + r_pred = ( + KernelPCA(n_components, eigen_solver="randomized", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + # check that the result is still correct despite the approximation + assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred)) + + +def test_kernel_pca_inverse_transform_reconstruction(): + """Test if the reconstruction is a good approximation. + + Note that in general it is not possible to get an arbitrarily good + reconstruction because of kernel centering that does not + preserve all the information of the original data. + """ + X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0) + + kpca = KernelPCA( + n_components=20, kernel="rbf", fit_inverse_transform=True, alpha=1e-3 + ) + X_trans = kpca.fit_transform(X) + X_reconst = kpca.inverse_transform(X_trans) + assert np.linalg.norm(X - X_reconst) / np.linalg.norm(X) < 1e-1 + + +def test_kernel_pca_raise_not_fitted_error(): + X = np.random.randn(15).reshape(5, 3) + kpca = KernelPCA() + kpca.fit(X) + with pytest.raises(NotFittedError): + kpca.inverse_transform(X) + + +def test_32_64_decomposition_shape(): + """Test that the decomposition is similar for 32 and 64 bits data + + Non regression test for + https://github.com/scikit-learn/scikit-learn/issues/18146 + """ + X, y = make_blobs( + n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1 + ) + X = StandardScaler().fit_transform(X) + X -= X.min() + + # Compare the shapes (corresponds to the number of non-zero eigenvalues) + kpca = KernelPCA() + assert kpca.fit_transform(X).shape == kpca.fit_transform(X.astype(np.float32)).shape + + +def test_kernel_pca_feature_names_out(): + """Check feature names out for KernelPCA.""" + X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0) + kpca = KernelPCA(n_components=2).fit(X) + + names = kpca.get_feature_names_out() + assert_array_equal([f"kernelpca{i}" for i in range(2)], names) + + +def test_kernel_pca_inverse_correct_gamma(): + """Check that gamma is set correctly when not provided. + + Non-regression test for #26280 + """ + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + + kwargs = { + "n_components": 2, + "random_state": rng, + "fit_inverse_transform": True, + "kernel": "rbf", + } + + expected_gamma = 1 / X.shape[1] + kpca1 = KernelPCA(gamma=None, **kwargs).fit(X) + kpca2 = KernelPCA(gamma=expected_gamma, **kwargs).fit(X) + + assert kpca1.gamma_ == expected_gamma + assert kpca2.gamma_ == expected_gamma + + X1_recon = kpca1.inverse_transform(kpca1.transform(X)) + X2_recon = kpca2.inverse_transform(kpca1.transform(X)) + + assert_allclose(X1_recon, X2_recon) + + +def test_kernel_pca_pandas_output(): + """Check that KernelPCA works with pandas output when the solver is arpack. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27579 + """ + pytest.importorskip("pandas") + X, _ = load_iris(as_frame=True, return_X_y=True) + with sklearn.config_context(transform_output="pandas"): + KernelPCA(n_components=2, eigen_solver="arpack").fit_transform(X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py new file mode 100644 index 0000000000000000000000000000000000000000..2112b59129e254eea3fffcec23ce08bb974cacd7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_nmf.py @@ -0,0 +1,1062 @@ +import re +import sys +import warnings +from io import StringIO + +import numpy as np +import pytest +from scipy import linalg + +from sklearn.base import clone +from sklearn.decomposition import NMF, MiniBatchNMF, non_negative_factorization +from sklearn.decomposition import _nmf as nmf # For testing internals +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.extmath import squared_norm +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_convergence_warning(Estimator, solver): + convergence_warning = ( + "Maximum number of iterations 1 reached. Increase it to improve convergence." + ) + A = np.ones((2, 2)) + with pytest.warns(ConvergenceWarning, match=convergence_warning): + Estimator(max_iter=1, n_components="auto", **solver).fit(A) + + +def test_initialize_nn_output(): + # Test that initialization does not return negative values + rng = np.random.mtrand.RandomState(42) + data = np.abs(rng.randn(10, 10)) + for init in ("random", "nndsvd", "nndsvda", "nndsvdar"): + W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0) + assert not ((W < 0).any() or (H < 0).any()) + + +# TODO(1.6): remove the warning filter for `n_components` +@pytest.mark.filterwarnings( + r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in" + r" the initialization", + "ignore:The default value of `n_components` will change", +) +def test_parameter_checking(): + # Here we only check for invalid parameter values that are not already + # automatically tested in the common tests. + + A = np.ones((2, 2)) + + msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0" + with pytest.raises(ValueError, match=msg): + NMF(solver="cd", beta_loss=1.0).fit(A) + msg = "Negative values in data passed to" + with pytest.raises(ValueError, match=msg): + NMF().fit(-A) + clf = NMF(2, tol=0.1).fit(A) + with pytest.raises(ValueError, match=msg): + clf.transform(-A) + with pytest.raises(ValueError, match=msg): + nmf._initialize_nmf(-A, 2, "nndsvd") + + for init in ["nndsvd", "nndsvda", "nndsvdar"]: + msg = re.escape( + "init = '{}' can only be used when " + "n_components <= min(n_samples, n_features)".format(init) + ) + with pytest.raises(ValueError, match=msg): + NMF(3, init=init).fit(A) + with pytest.raises(ValueError, match=msg): + MiniBatchNMF(3, init=init).fit(A) + with pytest.raises(ValueError, match=msg): + nmf._initialize_nmf(A, 3, init) + + +def test_initialize_close(): + # Test NNDSVD error + # Test that _initialize_nmf error is less than the standard deviation of + # the entries in the matrix. + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(10, 10)) + W, H = nmf._initialize_nmf(A, 10, init="nndsvd") + error = linalg.norm(np.dot(W, H) - A) + sdev = linalg.norm(A - A.mean()) + assert error <= sdev + + +def test_initialize_variants(): + # Test NNDSVD variants correctness + # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic + # 'nndsvd' only where the basic version has zeros. + rng = np.random.mtrand.RandomState(42) + data = np.abs(rng.randn(10, 10)) + W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd") + Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda") + War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0) + + for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)): + assert_almost_equal(evl[ref != 0], ref[ref != 0]) + + +# ignore UserWarning raised when both solver='mu' and init='nndsvd' +@ignore_warnings(category=UserWarning) +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +@pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random")) +@pytest.mark.parametrize("alpha_W", (0.0, 1.0)) +@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) +def test_nmf_fit_nn_output(Estimator, solver, init, alpha_W, alpha_H): + # Test that the decomposition does not contain negative values + A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)] + model = Estimator( + n_components=2, + init=init, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=0, + **solver, + ) + transf = model.fit_transform(A) + assert not ((model.components_ < 0).any() or (transf < 0).any()) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_fit_close(Estimator, solver): + rng = np.random.mtrand.RandomState(42) + # Test that the fit is not too far away + pnmf = Estimator( + 5, + init="nndsvdar", + random_state=0, + max_iter=600, + **solver, + ) + X = np.abs(rng.randn(6, 5)) + assert pnmf.fit(X).reconstruction_err_ < 0.1 + + +def test_nmf_true_reconstruction(): + # Test that the fit is not too far away from an exact solution + # (by construction) + n_samples = 15 + n_features = 10 + n_components = 5 + beta_loss = 1 + batch_size = 3 + max_iter = 1000 + + rng = np.random.mtrand.RandomState(42) + W_true = np.zeros([n_samples, n_components]) + W_array = np.abs(rng.randn(n_samples)) + for j in range(n_components): + W_true[j % n_samples, j] = W_array[j % n_samples] + H_true = np.zeros([n_components, n_features]) + H_array = np.abs(rng.randn(n_components)) + for j in range(n_features): + H_true[j % n_components, j] = H_array[j % n_components] + X = np.dot(W_true, H_true) + + model = NMF( + n_components=n_components, + solver="mu", + beta_loss=beta_loss, + max_iter=max_iter, + random_state=0, + ) + transf = model.fit_transform(X) + X_calc = np.dot(transf, model.components_) + + assert model.reconstruction_err_ < 0.1 + assert_allclose(X, X_calc) + + mbmodel = MiniBatchNMF( + n_components=n_components, + beta_loss=beta_loss, + batch_size=batch_size, + random_state=0, + max_iter=max_iter, + ) + transf = mbmodel.fit_transform(X) + X_calc = np.dot(transf, mbmodel.components_) + + assert mbmodel.reconstruction_err_ < 0.1 + assert_allclose(X, X_calc, atol=1) + + +@pytest.mark.parametrize("solver", ["cd", "mu"]) +def test_nmf_transform(solver): + # Test that fit_transform is equivalent to fit.transform for NMF + # Test that NMF.transform returns close values + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(6, 5)) + m = NMF( + solver=solver, + n_components=3, + init="random", + random_state=0, + tol=1e-6, + ) + ft = m.fit_transform(A) + t = m.transform(A) + assert_allclose(ft, t, atol=1e-1) + + +def test_minibatch_nmf_transform(): + # Test that fit_transform is equivalent to fit.transform for MiniBatchNMF + # Only guaranteed with fresh restarts + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(6, 5)) + m = MiniBatchNMF( + n_components=3, + random_state=0, + tol=1e-3, + fresh_restarts=True, + ) + ft = m.fit_transform(A) + t = m.transform(A) + assert_allclose(ft, t) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_transform_custom_init(Estimator, solver): + # Smoke test that checks if NMF.transform works with custom initialization + random_state = np.random.RandomState(0) + A = np.abs(random_state.randn(6, 5)) + n_components = 4 + avg = np.sqrt(A.mean() / n_components) + H_init = np.abs(avg * random_state.randn(n_components, 5)) + W_init = np.abs(avg * random_state.randn(6, n_components)) + + m = Estimator( + n_components=n_components, init="custom", random_state=0, tol=1e-3, **solver + ) + m.fit_transform(A, W=W_init, H=H_init) + m.transform(A) + + +@pytest.mark.parametrize("solver", ("cd", "mu")) +def test_nmf_inverse_transform(solver): + # Test that NMF.inverse_transform returns close values + random_state = np.random.RandomState(0) + A = np.abs(random_state.randn(6, 4)) + m = NMF( + solver=solver, + n_components=4, + init="random", + random_state=0, + max_iter=1000, + ) + ft = m.fit_transform(A) + A_new = m.inverse_transform(ft) + assert_array_almost_equal(A, A_new, decimal=2) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +def test_mbnmf_inverse_transform(): + # Test that MiniBatchNMF.transform followed by MiniBatchNMF.inverse_transform + # is close to the identity + rng = np.random.RandomState(0) + A = np.abs(rng.randn(6, 4)) + nmf = MiniBatchNMF( + random_state=rng, + max_iter=500, + init="nndsvdar", + fresh_restarts=True, + ) + ft = nmf.fit_transform(A) + A_new = nmf.inverse_transform(ft) + assert_allclose(A, A_new, rtol=1e-3, atol=1e-2) + + +@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF]) +def test_n_components_greater_n_features(Estimator): + # Smoke test for the case of more components than features. + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(30, 10)) + Estimator(n_components=15, random_state=0, tol=1e-2).fit(A) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +@pytest.mark.parametrize("alpha_W", (0.0, 1.0)) +@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) +def test_nmf_sparse_input(Estimator, solver, sparse_container, alpha_W, alpha_H): + # Test that sparse matrices are accepted as input + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(10, 10)) + A[:, 2 * np.arange(5)] = 0 + A_sparse = sparse_container(A) + + est1 = Estimator( + n_components=5, + init="random", + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=0, + tol=0, + max_iter=100, + **solver, + ) + est2 = clone(est1) + + W1 = est1.fit_transform(A) + W2 = est2.fit_transform(A_sparse) + H1 = est1.components_ + H2 = est2.components_ + + assert_allclose(W1, W2) + assert_allclose(H1, H2) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_nmf_sparse_transform(Estimator, solver, csc_container): + # Test that transform works on sparse data. Issue #2124 + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(3, 2)) + A[1, 1] = 0 + A = csc_container(A) + + model = Estimator(random_state=0, n_components=2, max_iter=400, **solver) + A_fit_tr = model.fit_transform(A) + A_tr = model.transform(A) + assert_allclose(A_fit_tr, A_tr, atol=1e-1) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize("init", ["random", "nndsvd"]) +@pytest.mark.parametrize("solver", ("cd", "mu")) +@pytest.mark.parametrize("alpha_W", (0.0, 1.0)) +@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) +def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H): + # Test that the function is called in the same way, either directly + # or through the NMF class + max_iter = 500 + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(10, 10)) + A[:, 2 * np.arange(5)] = 0 + + W_nmf, H, _ = non_negative_factorization( + A, + init=init, + solver=solver, + max_iter=max_iter, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=1, + tol=1e-2, + ) + W_nmf_2, H, _ = non_negative_factorization( + A, + H=H, + update_H=False, + init=init, + solver=solver, + max_iter=max_iter, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=1, + tol=1e-2, + ) + + model_class = NMF( + init=init, + solver=solver, + max_iter=max_iter, + alpha_W=alpha_W, + alpha_H=alpha_H, + random_state=1, + tol=1e-2, + ) + W_cls = model_class.fit_transform(A) + W_cls_2 = model_class.transform(A) + + assert_allclose(W_nmf, W_cls) + assert_allclose(W_nmf_2, W_cls_2) + + +def test_non_negative_factorization_checking(): + # Note that the validity of parameter types and range of possible values + # for scalar numerical or str parameters is already checked in the common + # tests. Here we only check for problems that cannot be captured by simple + # declarative constraints on the valid parameter values. + + A = np.ones((2, 2)) + # Test parameters checking in public function + nnmf = non_negative_factorization + msg = re.escape("Negative values in data passed to NMF (input H)") + with pytest.raises(ValueError, match=msg): + nnmf(A, A, -A, 2, init="custom") + msg = re.escape("Negative values in data passed to NMF (input W)") + with pytest.raises(ValueError, match=msg): + nnmf(A, -A, A, 2, init="custom") + msg = re.escape("Array passed to NMF (input H) is full of zeros") + with pytest.raises(ValueError, match=msg): + nnmf(A, A, 0 * A, 2, init="custom") + + +def _beta_divergence_dense(X, W, H, beta): + """Compute the beta-divergence of X and W.H for dense array only. + + Used as a reference for testing nmf._beta_divergence. + """ + WH = np.dot(W, H) + + if beta == 2: + return squared_norm(X - WH) / 2 + + WH_Xnonzero = WH[X != 0] + X_nonzero = X[X != 0] + np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero) + + if beta == 1: + res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero)) + res += WH.sum() - X.sum() + + elif beta == 0: + div = X_nonzero / WH_Xnonzero + res = np.sum(div) - X.size - np.sum(np.log(div)) + else: + res = (X_nonzero**beta).sum() + res += (beta - 1) * (WH**beta).sum() + res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum() + res /= beta * (beta - 1) + + return res + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_beta_divergence(csr_container): + # Compare _beta_divergence with the reference _beta_divergence_dense + n_samples = 20 + n_features = 10 + n_components = 5 + beta_losses = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0] + + # initialization + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.clip(X, 0, None, out=X) + X_csr = csr_container(X) + W, H = nmf._initialize_nmf(X, n_components, init="random", random_state=42) + + for beta in beta_losses: + ref = _beta_divergence_dense(X, W, H, beta) + loss = nmf._beta_divergence(X, W, H, beta) + loss_csr = nmf._beta_divergence(X_csr, W, H, beta) + + assert_almost_equal(ref, loss, decimal=7) + assert_almost_equal(ref, loss_csr, decimal=7) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_special_sparse_dot(csr_container): + # Test the function that computes np.dot(W, H), only where X is non zero. + n_samples = 10 + n_features = 5 + n_components = 3 + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.clip(X, 0, None, out=X) + X_csr = csr_container(X) + + W = np.abs(rng.randn(n_samples, n_components)) + H = np.abs(rng.randn(n_components, n_features)) + + WH_safe = nmf._special_sparse_dot(W, H, X_csr) + WH = nmf._special_sparse_dot(W, H, X) + + # test that both results have same values, in X_csr nonzero elements + ii, jj = X_csr.nonzero() + WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel() + assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10) + + # test that WH_safe and X_csr have the same sparse structure + assert_array_equal(WH_safe.indices, X_csr.indices) + assert_array_equal(WH_safe.indptr, X_csr.indptr) + assert_array_equal(WH_safe.shape, X_csr.shape) + + +@ignore_warnings(category=ConvergenceWarning) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_nmf_multiplicative_update_sparse(csr_container): + # Compare sparse and dense input in multiplicative update NMF + # Also test continuity of the results with respect to beta_loss parameter + n_samples = 20 + n_features = 10 + n_components = 5 + alpha = 0.1 + l1_ratio = 0.5 + n_iter = 20 + + # initialization + rng = np.random.mtrand.RandomState(1337) + X = rng.randn(n_samples, n_features) + X = np.abs(X) + X_csr = csr_container(X) + W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42) + + for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5): + # Reference with dense array X + W, H = W0.copy(), H0.copy() + W1, H1, _ = non_negative_factorization( + X, + W, + H, + n_components, + init="custom", + update_H=True, + solver="mu", + beta_loss=beta_loss, + max_iter=n_iter, + alpha_W=alpha, + l1_ratio=l1_ratio, + random_state=42, + ) + + # Compare with sparse X + W, H = W0.copy(), H0.copy() + W2, H2, _ = non_negative_factorization( + X_csr, + W, + H, + n_components, + init="custom", + update_H=True, + solver="mu", + beta_loss=beta_loss, + max_iter=n_iter, + alpha_W=alpha, + l1_ratio=l1_ratio, + random_state=42, + ) + + assert_allclose(W1, W2, atol=1e-7) + assert_allclose(H1, H2, atol=1e-7) + + # Compare with almost same beta_loss, since some values have a specific + # behavior, but the results should be continuous w.r.t beta_loss + beta_loss -= 1.0e-5 + W, H = W0.copy(), H0.copy() + W3, H3, _ = non_negative_factorization( + X_csr, + W, + H, + n_components, + init="custom", + update_H=True, + solver="mu", + beta_loss=beta_loss, + max_iter=n_iter, + alpha_W=alpha, + l1_ratio=l1_ratio, + random_state=42, + ) + + assert_allclose(W1, W3, atol=1e-4) + assert_allclose(H1, H3, atol=1e-4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_nmf_negative_beta_loss(csr_container): + # Test that an error is raised if beta_loss < 0 and X contains zeros. + # Test that the output has not NaN values when the input contains zeros. + n_samples = 6 + n_features = 5 + n_components = 3 + + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.clip(X, 0, None, out=X) + X_csr = csr_container(X) + + def _assert_nmf_no_nan(X, beta_loss): + W, H, _ = non_negative_factorization( + X, + init="random", + n_components=n_components, + solver="mu", + beta_loss=beta_loss, + random_state=0, + max_iter=1000, + ) + assert not np.any(np.isnan(W)) + assert not np.any(np.isnan(H)) + + msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge." + for beta_loss in (-0.6, 0.0): + with pytest.raises(ValueError, match=msg): + _assert_nmf_no_nan(X, beta_loss) + _assert_nmf_no_nan(X + 1e-9, beta_loss) + + for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5): + _assert_nmf_no_nan(X, beta_loss) + _assert_nmf_no_nan(X_csr, beta_loss) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize("beta_loss", [-0.5, 0.0]) +def test_minibatch_nmf_negative_beta_loss(beta_loss): + """Check that an error is raised if beta_loss < 0 and X contains zeros.""" + rng = np.random.RandomState(0) + X = rng.normal(size=(6, 5)) + X[X < 0] = 0 + + nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0) + + msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge." + with pytest.raises(ValueError, match=msg): + nmf.fit(X) + + +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_regularization(Estimator, solver): + # Test the effect of L1 and L2 regularizations + n_samples = 6 + n_features = 5 + n_components = 3 + rng = np.random.mtrand.RandomState(42) + X = np.abs(rng.randn(n_samples, n_features)) + + # L1 regularization should increase the number of zeros + l1_ratio = 1.0 + regul = Estimator( + n_components=n_components, + alpha_W=0.5, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + model = Estimator( + n_components=n_components, + alpha_W=0.0, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + + W_regul = regul.fit_transform(X) + W_model = model.fit_transform(X) + + H_regul = regul.components_ + H_model = model.components_ + + eps = np.finfo(np.float64).eps + W_regul_n_zeros = W_regul[W_regul <= eps].size + W_model_n_zeros = W_model[W_model <= eps].size + H_regul_n_zeros = H_regul[H_regul <= eps].size + H_model_n_zeros = H_model[H_model <= eps].size + + assert W_regul_n_zeros > W_model_n_zeros + assert H_regul_n_zeros > H_model_n_zeros + + # L2 regularization should decrease the sum of the squared norm + # of the matrices W and H + l1_ratio = 0.0 + regul = Estimator( + n_components=n_components, + alpha_W=0.5, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + model = Estimator( + n_components=n_components, + alpha_W=0.0, + l1_ratio=l1_ratio, + random_state=42, + **solver, + ) + + W_regul = regul.fit_transform(X) + W_model = model.fit_transform(X) + + H_regul = regul.components_ + H_model = model.components_ + + assert (linalg.norm(W_model)) ** 2.0 + (linalg.norm(H_model)) ** 2.0 > ( + linalg.norm(W_regul) + ) ** 2.0 + (linalg.norm(H_regul)) ** 2.0 + + +@ignore_warnings(category=ConvergenceWarning) +@pytest.mark.parametrize("solver", ("cd", "mu")) +def test_nmf_decreasing(solver): + # test that the objective function is decreasing at each iteration + n_samples = 20 + n_features = 15 + n_components = 10 + alpha = 0.1 + l1_ratio = 0.5 + tol = 0.0 + + # initialization + rng = np.random.mtrand.RandomState(42) + X = rng.randn(n_samples, n_features) + np.abs(X, X) + W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42) + + for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5): + if solver != "mu" and beta_loss != 2: + # not implemented + continue + W, H = W0.copy(), H0.copy() + previous_loss = None + for _ in range(30): + # one more iteration starting from the previous results + W, H, _ = non_negative_factorization( + X, + W, + H, + beta_loss=beta_loss, + init="custom", + n_components=n_components, + max_iter=1, + alpha_W=alpha, + solver=solver, + tol=tol, + l1_ratio=l1_ratio, + verbose=0, + random_state=0, + update_H=True, + ) + + loss = ( + nmf._beta_divergence(X, W, H, beta_loss) + + alpha * l1_ratio * n_features * W.sum() + + alpha * l1_ratio * n_samples * H.sum() + + alpha * (1 - l1_ratio) * n_features * (W**2).sum() + + alpha * (1 - l1_ratio) * n_samples * (H**2).sum() + ) + if previous_loss is not None: + assert previous_loss > loss + previous_loss = loss + + +def test_nmf_underflow(): + # Regression test for an underflow issue in _beta_divergence + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 10, 2, 2 + X = np.abs(rng.randn(n_samples, n_features)) * 10 + W = np.abs(rng.randn(n_samples, n_components)) * 10 + H = np.abs(rng.randn(n_components, n_features)) + + X[0, 0] = 0 + ref = nmf._beta_divergence(X, W, H, beta=1.0) + X[0, 0] = 1e-323 + res = nmf._beta_divergence(X, W, H, beta=1.0) + assert_almost_equal(res, ref) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize( + "dtype_in, dtype_out", + [ + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ], +) +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_dtype_match(Estimator, solver, dtype_in, dtype_out): + # Check that NMF preserves dtype (float32 and float64) + X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False) + np.abs(X, out=X) + + nmf = Estimator( + alpha_W=1.0, + alpha_H=1.0, + tol=1e-2, + random_state=0, + **solver, + ) + + assert nmf.fit(X).transform(X).dtype == dtype_out + assert nmf.fit_transform(X).dtype == dtype_out + assert nmf.components_.dtype == dtype_out + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize( + ["Estimator", "solver"], + [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], +) +def test_nmf_float32_float64_consistency(Estimator, solver): + # Check that the result of NMF is the same between float32 and float64 + X = np.random.RandomState(0).randn(50, 7) + np.abs(X, out=X) + nmf32 = Estimator(random_state=0, tol=1e-3, **solver) + W32 = nmf32.fit_transform(X.astype(np.float32)) + nmf64 = Estimator(random_state=0, tol=1e-3, **solver) + W64 = nmf64.fit_transform(X) + + assert_allclose(W32, W64, atol=1e-5) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF]) +def test_nmf_custom_init_dtype_error(Estimator): + # Check that an error is raise if custom H and/or W don't have the same + # dtype as X. + rng = np.random.RandomState(0) + X = rng.random_sample((20, 15)) + H = rng.random_sample((15, 15)).astype(np.float32) + W = rng.random_sample((20, 15)) + + with pytest.raises(TypeError, match="should have the same dtype as X"): + Estimator(init="custom").fit(X, H=H, W=W) + + with pytest.raises(TypeError, match="should have the same dtype as X"): + non_negative_factorization(X, H=H, update_H=False) + + +@pytest.mark.parametrize("beta_loss", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5]) +def test_nmf_minibatchnmf_equivalence(beta_loss): + # Test that MiniBatchNMF is equivalent to NMF when batch_size = n_samples and + # forget_factor 0.0 (stopping criterion put aside) + rng = np.random.mtrand.RandomState(42) + X = np.abs(rng.randn(48, 5)) + + nmf = NMF( + n_components=5, + beta_loss=beta_loss, + solver="mu", + random_state=0, + tol=0, + ) + mbnmf = MiniBatchNMF( + n_components=5, + beta_loss=beta_loss, + random_state=0, + tol=0, + max_no_improvement=None, + batch_size=X.shape[0], + forget_factor=0.0, + ) + W = nmf.fit_transform(X) + mbW = mbnmf.fit_transform(X) + assert_allclose(W, mbW) + + +def test_minibatch_nmf_partial_fit(): + # Check fit / partial_fit equivalence. Applicable only with fresh restarts. + rng = np.random.mtrand.RandomState(42) + X = np.abs(rng.randn(100, 5)) + + n_components = 5 + batch_size = 10 + max_iter = 2 + + mbnmf1 = MiniBatchNMF( + n_components=n_components, + init="custom", + random_state=0, + max_iter=max_iter, + batch_size=batch_size, + tol=0, + max_no_improvement=None, + fresh_restarts=False, + ) + mbnmf2 = MiniBatchNMF(n_components=n_components, init="custom", random_state=0) + + # Force the same init of H (W is recomputed anyway) to be able to compare results. + W, H = nmf._initialize_nmf( + X, n_components=n_components, init="random", random_state=0 + ) + + mbnmf1.fit(X, W=W, H=H) + for i in range(max_iter): + for j in range(batch_size): + mbnmf2.partial_fit(X[j : j + batch_size], W=W[:batch_size], H=H) + + assert mbnmf1.n_steps_ == mbnmf2.n_steps_ + assert_allclose(mbnmf1.components_, mbnmf2.components_) + + +def test_feature_names_out(): + """Check feature names out for NMF.""" + random_state = np.random.RandomState(0) + X = np.abs(random_state.randn(10, 4)) + nmf = NMF(n_components=3).fit(X) + + names = nmf.get_feature_names_out() + assert_array_equal([f"nmf{i}" for i in range(3)], names) + + +# TODO(1.6): remove the warning filter +@pytest.mark.filterwarnings("ignore:The default value of `n_components` will change") +def test_minibatch_nmf_verbose(): + # Check verbose mode of MiniBatchNMF for better coverage. + A = np.random.RandomState(0).random_sample((100, 10)) + nmf = MiniBatchNMF(tol=1e-2, random_state=0, verbose=1) + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + nmf.fit(A) + finally: + sys.stdout = old_stdout + + +# TODO(1.5): remove this test +def test_NMF_inverse_transform_W_deprecation(): + rng = np.random.mtrand.RandomState(42) + A = np.abs(rng.randn(6, 5)) + est = NMF( + n_components=3, + init="random", + random_state=0, + tol=1e-6, + ) + Xt = est.fit_transform(A) + + with pytest.raises(TypeError, match="Missing required positional argument"): + est.inverse_transform() + + with pytest.raises(ValueError, match="Please provide only"): + est.inverse_transform(Xt=Xt, W=Xt) + + with warnings.catch_warnings(record=True): + warnings.simplefilter("error") + est.inverse_transform(Xt) + + with pytest.warns(FutureWarning, match="Input argument `W` was renamed to `Xt`"): + est.inverse_transform(W=Xt) + + +@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF]) +def test_nmf_n_components_auto(Estimator): + # Check that n_components is correctly inferred + # from the provided custom initialization. + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + W = rng.random_sample((6, 2)) + H = rng.random_sample((2, 5)) + est = Estimator( + n_components="auto", + init="custom", + random_state=0, + tol=1e-6, + ) + est.fit_transform(X, W=W, H=H) + assert est._n_components == H.shape[0] + + +def test_nmf_non_negative_factorization_n_components_auto(): + # Check that n_components is correctly inferred from the provided + # custom initialization. + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + W_init = rng.random_sample((6, 2)) + H_init = rng.random_sample((2, 5)) + W, H, _ = non_negative_factorization( + X, W=W_init, H=H_init, init="custom", n_components="auto" + ) + assert H.shape == H_init.shape + assert W.shape == W_init.shape + + +# TODO(1.6): remove +def test_nmf_n_components_default_value_warning(): + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + H = rng.random_sample((2, 5)) + with pytest.warns( + FutureWarning, match="The default value of `n_components` will change from" + ): + non_negative_factorization(X, H=H) + + +def test_nmf_n_components_auto_no_h_update(): + # Tests that non_negative_factorization does not fail when setting + # n_components="auto" also tests that the inferred n_component + # value is the right one. + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + H_true = rng.random_sample((2, 5)) + W, H, _ = non_negative_factorization( + X, H=H_true, n_components="auto", update_H=False + ) # should not fail + assert_allclose(H, H_true) + assert W.shape == (X.shape[0], H_true.shape[0]) + + +def test_nmf_w_h_not_used_warning(): + # Check that warnings are raised if user provided W and H are not used + # and initialization overrides value of W or H + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + W_init = rng.random_sample((6, 2)) + H_init = rng.random_sample((2, 5)) + with pytest.warns( + RuntimeWarning, + match="When init!='custom', provided W or H are ignored", + ): + non_negative_factorization(X, H=H_init, update_H=True, n_components="auto") + + with pytest.warns( + RuntimeWarning, + match="When init!='custom', provided W or H are ignored", + ): + non_negative_factorization( + X, W=W_init, H=H_init, update_H=True, n_components="auto" + ) + + with pytest.warns( + RuntimeWarning, match="When update_H=False, the provided initial W is not used." + ): + # When update_H is False, W is ignored regardless of init + # TODO: use the provided W when init="custom". + non_negative_factorization( + X, W=W_init, H=H_init, update_H=False, n_components="auto" + ) + + +def test_nmf_custom_init_shape_error(): + # Check that an informative error is raised when custom initialization does not + # have the right shape + rng = np.random.RandomState(0) + X = rng.random_sample((6, 5)) + H = rng.random_sample((2, 5)) + nmf = NMF(n_components=2, init="custom", random_state=0) + + with pytest.raises(ValueError, match="Array with wrong first dimension passed"): + nmf.fit(X, H=H, W=rng.random_sample((5, 2))) + + with pytest.raises(ValueError, match="Array with wrong second dimension passed"): + nmf.fit(X, H=H, W=rng.random_sample((6, 3))) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py new file mode 100644 index 0000000000000000000000000000000000000000..d442d0beeb57394b276ae4de9f683886d982f29e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py @@ -0,0 +1,477 @@ +import sys +from io import StringIO + +import numpy as np +import pytest +from numpy.testing import assert_array_equal +from scipy.linalg import block_diag +from scipy.special import psi + +from sklearn.decomposition import LatentDirichletAllocation +from sklearn.decomposition._online_lda_fast import ( + _dirichlet_expectation_1d, + _dirichlet_expectation_2d, +) +from sklearn.exceptions import NotFittedError +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + if_safe_multiprocessing_with_blas, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def _build_sparse_array(csr_container): + # Create 3 topics and each topic has 3 distinct words. + # (Each word only belongs to a single topic.) + n_components = 3 + block = np.full((3, 3), n_components, dtype=int) + blocks = [block] * n_components + X = block_diag(*blocks) + X = csr_container(X) + return (n_components, X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_default_prior_params(csr_container): + # default prior parameter should be `1 / topics` + # and verbose params should not affect result + n_components, X = _build_sparse_array(csr_container) + prior = 1.0 / n_components + lda_1 = LatentDirichletAllocation( + n_components=n_components, + doc_topic_prior=prior, + topic_word_prior=prior, + random_state=0, + ) + lda_2 = LatentDirichletAllocation(n_components=n_components, random_state=0) + topic_distr_1 = lda_1.fit_transform(X) + topic_distr_2 = lda_2.fit_transform(X) + assert_almost_equal(topic_distr_1, topic_distr_2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_fit_batch(csr_container): + # Test LDA batch learning_offset (`fit` method with 'batch' learning) + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + evaluate_every=1, + learning_method="batch", + random_state=rng, + ) + lda.fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for component in lda.components_: + # Find top 3 words in each LDA component + top_idx = set(component.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_fit_online(csr_container): + # Test LDA online learning (`fit` method with 'online' learning) + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + learning_offset=10.0, + evaluate_every=1, + learning_method="online", + random_state=rng, + ) + lda.fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for component in lda.components_: + # Find top 3 words in each LDA component + top_idx = set(component.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_partial_fit(csr_container): + # Test LDA online learning (`partial_fit` method) + # (same as test_lda_batch) + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + learning_offset=10.0, + total_samples=100, + random_state=rng, + ) + for i in range(3): + lda.partial_fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for c in lda.components_: + top_idx = set(c.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_dense_input(csr_container): + # Test LDA with dense input. + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, learning_method="batch", random_state=rng + ) + lda.fit(X.toarray()) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for component in lda.components_: + # Find top 3 words in each LDA component + top_idx = set(component.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +def test_lda_transform(): + # Test LDA transform. + # Transform result cannot be negative and should be normalized + rng = np.random.RandomState(0) + X = rng.randint(5, size=(20, 10)) + n_components = 3 + lda = LatentDirichletAllocation(n_components=n_components, random_state=rng) + X_trans = lda.fit_transform(X) + assert (X_trans > 0.0).any() + assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0])) + + +@pytest.mark.parametrize("method", ("online", "batch")) +def test_lda_fit_transform(method): + # Test LDA fit_transform & transform + # fit_transform and transform result should be the same + rng = np.random.RandomState(0) + X = rng.randint(10, size=(50, 20)) + lda = LatentDirichletAllocation( + n_components=5, learning_method=method, random_state=rng + ) + X_fit = lda.fit_transform(X) + X_trans = lda.transform(X) + assert_array_almost_equal(X_fit, X_trans, 4) + + +def test_lda_negative_input(): + # test pass dense matrix with sparse negative input. + X = np.full((5, 10), -1.0) + lda = LatentDirichletAllocation() + regex = r"^Negative values in data passed" + with pytest.raises(ValueError, match=regex): + lda.fit(X) + + +def test_lda_no_component_error(): + # test `perplexity` before `fit` + rng = np.random.RandomState(0) + X = rng.randint(4, size=(20, 10)) + lda = LatentDirichletAllocation() + regex = ( + "This LatentDirichletAllocation instance is not fitted yet. " + "Call 'fit' with appropriate arguments before using this " + "estimator." + ) + with pytest.raises(NotFittedError, match=regex): + lda.perplexity(X) + + +@if_safe_multiprocessing_with_blas +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +@pytest.mark.parametrize("method", ("online", "batch")) +def test_lda_multi_jobs(method, csr_container): + n_components, X = _build_sparse_array(csr_container) + # Test LDA batch training with multi CPU + rng = np.random.RandomState(0) + lda = LatentDirichletAllocation( + n_components=n_components, + n_jobs=2, + learning_method=method, + evaluate_every=1, + random_state=rng, + ) + lda.fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for c in lda.components_: + top_idx = set(c.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@if_safe_multiprocessing_with_blas +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_partial_fit_multi_jobs(csr_container): + # Test LDA online training with multi CPU + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + n_jobs=2, + learning_offset=5.0, + total_samples=30, + random_state=rng, + ) + for i in range(2): + lda.partial_fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for c in lda.components_: + top_idx = set(c.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +def test_lda_preplexity_mismatch(): + # test dimension mismatch in `perplexity` method + rng = np.random.RandomState(0) + n_components = rng.randint(3, 6) + n_samples = rng.randint(6, 10) + X = np.random.randint(4, size=(n_samples, 10)) + lda = LatentDirichletAllocation( + n_components=n_components, + learning_offset=5.0, + total_samples=20, + random_state=rng, + ) + lda.fit(X) + # invalid samples + invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components)) + with pytest.raises(ValueError, match=r"Number of samples"): + lda._perplexity_precomp_distr(X, invalid_n_samples) + # invalid topic number + invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1)) + with pytest.raises(ValueError, match=r"Number of topics"): + lda._perplexity_precomp_distr(X, invalid_n_components) + + +@pytest.mark.parametrize("method", ("online", "batch")) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_perplexity(method, csr_container): + # Test LDA perplexity for batch training + # perplexity should be lower after each iteration + n_components, X = _build_sparse_array(csr_container) + lda_1 = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_2 = LatentDirichletAllocation( + n_components=n_components, + max_iter=10, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_1.fit(X) + perp_1 = lda_1.perplexity(X, sub_sampling=False) + + lda_2.fit(X) + perp_2 = lda_2.perplexity(X, sub_sampling=False) + assert perp_1 >= perp_2 + + perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True) + perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True) + assert perp_1_subsampling >= perp_2_subsampling + + +@pytest.mark.parametrize("method", ("online", "batch")) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_score(method, csr_container): + # Test LDA score for batch training + # score should be higher after each iteration + n_components, X = _build_sparse_array(csr_container) + lda_1 = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_2 = LatentDirichletAllocation( + n_components=n_components, + max_iter=10, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_1.fit_transform(X) + score_1 = lda_1.score(X) + + lda_2.fit_transform(X) + score_2 = lda_2.score(X) + assert score_2 >= score_1 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_perplexity_input_format(csr_container): + # Test LDA perplexity for sparse and dense input + # score should be the same for both dense and sparse input + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method="batch", + total_samples=100, + random_state=0, + ) + lda.fit(X) + perp_1 = lda.perplexity(X) + perp_2 = lda.perplexity(X.toarray()) + assert_almost_equal(perp_1, perp_2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_score_perplexity(csr_container): + # Test the relationship between LDA score and perplexity + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, max_iter=10, random_state=0 + ) + lda.fit(X) + perplexity_1 = lda.perplexity(X, sub_sampling=False) + + score = lda.score(X) + perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data))) + assert_almost_equal(perplexity_1, perplexity_2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_fit_perplexity(csr_container): + # Test that the perplexity computed during fit is consistent with what is + # returned by the perplexity method + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method="batch", + random_state=0, + evaluate_every=1, + ) + lda.fit(X) + + # Perplexity computed at end of fit method + perplexity1 = lda.bound_ + + # Result of perplexity method on the train set + perplexity2 = lda.perplexity(X) + + assert_almost_equal(perplexity1, perplexity2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_empty_docs(csr_container): + """Test LDA on empty document (all-zero rows).""" + Z = np.zeros((5, 4)) + for X in [Z, csr_container(Z)]: + lda = LatentDirichletAllocation(max_iter=750).fit(X) + assert_almost_equal( + lda.components_.sum(axis=0), np.ones(lda.components_.shape[1]) + ) + + +def test_dirichlet_expectation(): + """Test Cython version of Dirichlet expectation calculation.""" + x = np.logspace(-100, 10, 10000) + expectation = np.empty_like(x) + _dirichlet_expectation_1d(x, 0, expectation) + assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))), atol=1e-19) + + x = x.reshape(100, 100) + assert_allclose( + _dirichlet_expectation_2d(x), + psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]), + rtol=1e-11, + atol=3e-9, + ) + + +def check_verbosity( + verbose, evaluate_every, expected_lines, expected_perplexities, csr_container +): + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + max_iter=3, + learning_method="batch", + verbose=verbose, + evaluate_every=evaluate_every, + random_state=0, + ) + out = StringIO() + old_out, sys.stdout = sys.stdout, out + try: + lda.fit(X) + finally: + sys.stdout = old_out + + n_lines = out.getvalue().count("\n") + n_perplexity = out.getvalue().count("perplexity") + assert expected_lines == n_lines + assert expected_perplexities == n_perplexity + + +@pytest.mark.parametrize( + "verbose,evaluate_every,expected_lines,expected_perplexities", + [ + (False, 1, 0, 0), + (False, 0, 0, 0), + (True, 0, 3, 0), + (True, 1, 3, 3), + (True, 2, 3, 1), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_verbosity( + verbose, evaluate_every, expected_lines, expected_perplexities, csr_container +): + check_verbosity( + verbose, evaluate_every, expected_lines, expected_perplexities, csr_container + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_feature_names_out(csr_container): + """Check feature names out for LatentDirichletAllocation.""" + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation(n_components=n_components).fit(X) + + names = lda.get_feature_names_out() + assert_array_equal( + [f"latentdirichletallocation{i}" for i in range(n_components)], names + ) + + +@pytest.mark.parametrize("learning_method", ("batch", "online")) +def test_lda_dtype_match(learning_method, global_dtype): + """Check data type preservation of fitted attributes.""" + rng = np.random.RandomState(0) + X = rng.uniform(size=(20, 10)).astype(global_dtype, copy=False) + + lda = LatentDirichletAllocation( + n_components=5, random_state=0, learning_method=learning_method + ) + lda.fit(X) + assert lda.components_.dtype == global_dtype + assert lda.exp_dirichlet_component_.dtype == global_dtype + + +@pytest.mark.parametrize("learning_method", ("batch", "online")) +def test_lda_numerical_consistency(learning_method, global_random_seed): + """Check numerical consistency between np.float32 and np.float64.""" + rng = np.random.RandomState(global_random_seed) + X64 = rng.uniform(size=(20, 10)) + X32 = X64.astype(np.float32) + + lda_64 = LatentDirichletAllocation( + n_components=5, random_state=global_random_seed, learning_method=learning_method + ).fit(X64) + lda_32 = LatentDirichletAllocation( + n_components=5, random_state=global_random_seed, learning_method=learning_method + ).fit(X32) + + assert_allclose(lda_32.components_, lda_64.components_) + assert_allclose(lda_32.transform(X32), lda_64.transform(X64)) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..44281b9038697e56228a8e0584f7c8ba81d8b969 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_pca.py @@ -0,0 +1,987 @@ +import re +import warnings + +import numpy as np +import pytest +import scipy as sp +from numpy.testing import assert_array_equal + +from sklearn import config_context, datasets +from sklearn.base import clone +from sklearn.datasets import load_iris, make_classification +from sklearn.decomposition import PCA +from sklearn.decomposition._pca import _assess_dimension, _infer_dimension +from sklearn.utils._array_api import ( + _atol_for_type, + _convert_to_numpy, + yield_namespace_device_dtype_combinations, +) +from sklearn.utils._array_api import device as array_device +from sklearn.utils._testing import _array_api_for_tests, assert_allclose +from sklearn.utils.estimator_checks import ( + _get_check_estimator_ids, + check_array_api_input_and_values, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +iris = datasets.load_iris() +PCA_SOLVERS = ["full", "arpack", "randomized", "auto"] + +# `SPARSE_M` and `SPARSE_N` could be larger, but be aware: +# * SciPy's generation of random sparse matrix can be costly +# * A (SPARSE_M, SPARSE_N) dense array is allocated to compare against +SPARSE_M, SPARSE_N = 1000, 300 # arbitrary +SPARSE_MAX_COMPONENTS = min(SPARSE_M, SPARSE_N) + + +def _check_fitted_pca_close(pca1, pca2, rtol): + assert_allclose(pca1.components_, pca2.components_, rtol=rtol) + assert_allclose(pca1.explained_variance_, pca2.explained_variance_, rtol=rtol) + assert_allclose(pca1.singular_values_, pca2.singular_values_, rtol=rtol) + assert_allclose(pca1.mean_, pca2.mean_, rtol=rtol) + assert_allclose(pca1.n_components_, pca2.n_components_, rtol=rtol) + assert_allclose(pca1.n_samples_, pca2.n_samples_, rtol=rtol) + assert_allclose(pca1.noise_variance_, pca2.noise_variance_, rtol=rtol) + assert_allclose(pca1.n_features_in_, pca2.n_features_in_, rtol=rtol) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +@pytest.mark.parametrize("n_components", range(1, iris.data.shape[1])) +def test_pca(svd_solver, n_components): + X = iris.data + pca = PCA(n_components=n_components, svd_solver=svd_solver) + + # check the shape of fit.transform + X_r = pca.fit(X).transform(X) + assert X_r.shape[1] == n_components + + # check the equivalence of fit.transform and fit_transform + X_r2 = pca.fit_transform(X) + assert_allclose(X_r, X_r2) + X_r = pca.transform(X) + assert_allclose(X_r, X_r2) + + # Test get_covariance and get_precision + cov = pca.get_covariance() + precision = pca.get_precision() + assert_allclose(np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-12) + + +@pytest.mark.parametrize("density", [0.01, 0.1, 0.30]) +@pytest.mark.parametrize("n_components", [1, 2, 10]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +@pytest.mark.parametrize("svd_solver", ["arpack"]) +@pytest.mark.parametrize("scale", [1, 10, 100]) +def test_pca_sparse( + global_random_seed, svd_solver, sparse_container, n_components, density, scale +): + # Make sure any tolerance changes pass with SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all" + rtol = 5e-07 + transform_rtol = 3e-05 + + random_state = np.random.default_rng(global_random_seed) + X = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=density, + ) + ) + # Scale the data + vary the column means + scale_vector = random_state.random(X.shape[1]) * scale + X = X.multiply(scale_vector) + + pca = PCA( + n_components=n_components, + svd_solver=svd_solver, + random_state=global_random_seed, + ) + pca.fit(X) + + Xd = X.toarray() + pcad = PCA( + n_components=n_components, + svd_solver=svd_solver, + random_state=global_random_seed, + ) + pcad.fit(Xd) + + # Fitted attributes equality + _check_fitted_pca_close(pca, pcad, rtol=rtol) + + # Test transform + X2 = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=density, + ) + ) + X2d = X2.toarray() + + assert_allclose(pca.transform(X2), pca.transform(X2d), rtol=transform_rtol) + assert_allclose(pca.transform(X2), pcad.transform(X2d), rtol=transform_rtol) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_pca_sparse_fit_transform(global_random_seed, sparse_container): + random_state = np.random.default_rng(global_random_seed) + X = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=0.01, + ) + ) + X2 = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + density=0.01, + ) + ) + + pca_fit = PCA(n_components=10, svd_solver="arpack", random_state=global_random_seed) + pca_fit_transform = PCA( + n_components=10, svd_solver="arpack", random_state=global_random_seed + ) + + pca_fit.fit(X) + transformed_X = pca_fit_transform.fit_transform(X) + + _check_fitted_pca_close(pca_fit, pca_fit_transform, rtol=1e-10) + assert_allclose(transformed_X, pca_fit_transform.transform(X), rtol=2e-9) + assert_allclose(transformed_X, pca_fit.transform(X), rtol=2e-9) + assert_allclose(pca_fit.transform(X2), pca_fit_transform.transform(X2), rtol=2e-9) + + +@pytest.mark.parametrize("svd_solver", ["randomized", "full", "auto"]) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_sparse_pca_solver_error(global_random_seed, svd_solver, sparse_container): + random_state = np.random.RandomState(global_random_seed) + X = sparse_container( + sp.sparse.random( + SPARSE_M, + SPARSE_N, + random_state=random_state, + ) + ) + pca = PCA(n_components=30, svd_solver=svd_solver) + error_msg_pattern = ( + f'PCA only support sparse inputs with the "arpack" solver, while "{svd_solver}"' + " was passed" + ) + with pytest.raises(TypeError, match=error_msg_pattern): + pca.fit(X) + + +def test_no_empty_slice_warning(): + # test if we avoid numpy warnings for computing over empty arrays + n_components = 10 + n_features = n_components + 2 # anything > n_comps triggered it in 0.16 + X = np.random.uniform(-1, 1, size=(n_components, n_features)) + pca = PCA(n_components=n_components) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + pca.fit(X) + + +@pytest.mark.parametrize("copy", [True, False]) +@pytest.mark.parametrize("solver", PCA_SOLVERS) +def test_whitening(solver, copy): + # Check that PCA output has unit-variance + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 80 + n_components = 30 + rank = 50 + + # some low rank data with correlated features + X = np.dot( + rng.randn(n_samples, rank), + np.dot(np.diag(np.linspace(10.0, 1.0, rank)), rng.randn(rank, n_features)), + ) + # the component-wise variance of the first 50 features is 3 times the + # mean component-wise variance of the remaining 30 features + X[:, :50] *= 3 + + assert X.shape == (n_samples, n_features) + + # the component-wise variance is thus highly varying: + assert X.std(axis=0).std() > 43.8 + + # whiten the data while projecting to the lower dim subspace + X_ = X.copy() # make sure we keep an original across iterations. + pca = PCA( + n_components=n_components, + whiten=True, + copy=copy, + svd_solver=solver, + random_state=0, + iterated_power=7, + ) + # test fit_transform + X_whitened = pca.fit_transform(X_.copy()) + assert X_whitened.shape == (n_samples, n_components) + X_whitened2 = pca.transform(X_) + assert_allclose(X_whitened, X_whitened2, rtol=5e-4) + + assert_allclose(X_whitened.std(ddof=1, axis=0), np.ones(n_components)) + assert_allclose(X_whitened.mean(axis=0), np.zeros(n_components), atol=1e-12) + + X_ = X.copy() + pca = PCA( + n_components=n_components, whiten=False, copy=copy, svd_solver=solver + ).fit(X_.copy()) + X_unwhitened = pca.transform(X_) + assert X_unwhitened.shape == (n_samples, n_components) + + # in that case the output components still have varying variances + assert X_unwhitened.std(axis=0).std() == pytest.approx(74.1, rel=1e-1) + # we always center, so no test for non-centering. + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_pca_explained_variance_equivalence_solver(svd_solver): + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca_full = PCA(n_components=2, svd_solver="full") + pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=0) + + pca_full.fit(X) + pca_other.fit(X) + + assert_allclose( + pca_full.explained_variance_, pca_other.explained_variance_, rtol=5e-2 + ) + assert_allclose( + pca_full.explained_variance_ratio_, + pca_other.explained_variance_ratio_, + rtol=5e-2, + ) + + +@pytest.mark.parametrize( + "X", + [ + np.random.RandomState(0).randn(100, 80), + datasets.make_classification(100, 80, n_informative=78, random_state=0)[0], + ], + ids=["random-data", "correlated-data"], +) +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_explained_variance_empirical(X, svd_solver): + pca = PCA(n_components=2, svd_solver=svd_solver, random_state=0) + X_pca = pca.fit_transform(X) + assert_allclose(pca.explained_variance_, np.var(X_pca, ddof=1, axis=0)) + + expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0] + expected_result = sorted(expected_result, reverse=True)[:2] + assert_allclose(pca.explained_variance_, expected_result, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_pca_singular_values_consistency(svd_solver): + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca_full = PCA(n_components=2, svd_solver="full", random_state=rng) + pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=rng) + + pca_full.fit(X) + pca_other.fit(X) + + assert_allclose(pca_full.singular_values_, pca_other.singular_values_, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_singular_values(svd_solver): + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng) + X_trans = pca.fit_transform(X) + + # compare to the Frobenius norm + assert_allclose( + np.sum(pca.singular_values_**2), np.linalg.norm(X_trans, "fro") ** 2 + ) + # Compare to the 2-norms of the score vectors + assert_allclose(pca.singular_values_, np.sqrt(np.sum(X_trans**2, axis=0))) + + # set the singular values and see what er get back + n_samples, n_features = 100, 110 + X = rng.randn(n_samples, n_features) + + pca = PCA(n_components=3, svd_solver=svd_solver, random_state=rng) + X_trans = pca.fit_transform(X) + X_trans /= np.sqrt(np.sum(X_trans**2, axis=0)) + X_trans[:, 0] *= 3.142 + X_trans[:, 1] *= 2.718 + X_hat = np.dot(X_trans, pca.components_) + pca.fit(X_hat) + assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0]) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_check_projection(svd_solver): + # Test that the projection of data is correct + rng = np.random.RandomState(0) + n, p = 100, 3 + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5]) + Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) + + Yt = PCA(n_components=2, svd_solver=svd_solver).fit(X).transform(Xt) + Yt /= np.sqrt((Yt**2).sum()) + + assert_allclose(np.abs(Yt[0][0]), 1.0, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_check_projection_list(svd_solver): + # Test that the projection of data is correct + X = [[1.0, 0.0], [0.0, 1.0]] + pca = PCA(n_components=1, svd_solver=svd_solver, random_state=0) + X_trans = pca.fit_transform(X) + assert X_trans.shape, (2, 1) + assert_allclose(X_trans.mean(), 0.00, atol=1e-12) + assert_allclose(X_trans.std(), 0.71, rtol=5e-3) + + +@pytest.mark.parametrize("svd_solver", ["full", "arpack", "randomized"]) +@pytest.mark.parametrize("whiten", [False, True]) +def test_pca_inverse(svd_solver, whiten): + # Test that the projection of data can be inverted + rng = np.random.RandomState(0) + n, p = 50, 3 + X = rng.randn(n, p) # spherical data + X[:, 1] *= 0.00001 # make middle component relatively small + X += [5, 4, 3] # make a large mean + + # same check that we can find the original data from the transformed + # signal (since the data is almost of rank n_components) + pca = PCA(n_components=2, svd_solver=svd_solver, whiten=whiten).fit(X) + Y = pca.transform(X) + Y_inverse = pca.inverse_transform(Y) + assert_allclose(X, Y_inverse, rtol=5e-6) + + +@pytest.mark.parametrize( + "data", [np.array([[0, 1, 0], [1, 0, 0]]), np.array([[0, 1, 0], [1, 0, 0]]).T] +) +@pytest.mark.parametrize( + "svd_solver, n_components, err_msg", + [ + ("arpack", 0, r"must be between 1 and min\(n_samples, n_features\)"), + ("randomized", 0, r"must be between 1 and min\(n_samples, n_features\)"), + ("arpack", 2, r"must be strictly less than min"), + ( + "auto", + 3, + ( + r"n_components=3 must be between 0 and min\(n_samples, " + r"n_features\)=2 with svd_solver='full'" + ), + ), + ], +) +def test_pca_validation(svd_solver, data, n_components, err_msg): + # Ensures that solver-specific extreme inputs for the n_components + # parameter raise errors + smallest_d = 2 # The smallest dimension + pca_fitted = PCA(n_components, svd_solver=svd_solver) + + with pytest.raises(ValueError, match=err_msg): + pca_fitted.fit(data) + + # Additional case for arpack + if svd_solver == "arpack": + n_components = smallest_d + + err_msg = ( + "n_components={}L? must be strictly less than " + r"min\(n_samples, n_features\)={}L? with " + "svd_solver='arpack'".format(n_components, smallest_d) + ) + with pytest.raises(ValueError, match=err_msg): + PCA(n_components, svd_solver=svd_solver).fit(data) + + +@pytest.mark.parametrize( + "solver, n_components_", + [ + ("full", min(iris.data.shape)), + ("arpack", min(iris.data.shape) - 1), + ("randomized", min(iris.data.shape)), + ], +) +@pytest.mark.parametrize("data", [iris.data, iris.data.T]) +def test_n_components_none(data, solver, n_components_): + pca = PCA(svd_solver=solver) + pca.fit(data) + assert pca.n_components_ == n_components_ + + +@pytest.mark.parametrize("svd_solver", ["auto", "full"]) +def test_n_components_mle(svd_solver): + # Ensure that n_components == 'mle' doesn't raise error for auto/full + rng = np.random.RandomState(0) + n_samples, n_features = 600, 10 + X = rng.randn(n_samples, n_features) + pca = PCA(n_components="mle", svd_solver=svd_solver) + pca.fit(X) + assert pca.n_components_ == 1 + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_n_components_mle_error(svd_solver): + # Ensure that n_components == 'mle' will raise an error for unsupported + # solvers + rng = np.random.RandomState(0) + n_samples, n_features = 600, 10 + X = rng.randn(n_samples, n_features) + pca = PCA(n_components="mle", svd_solver=svd_solver) + err_msg = "n_components='mle' cannot be a string with svd_solver='{}'".format( + svd_solver + ) + with pytest.raises(ValueError, match=err_msg): + pca.fit(X) + + +def test_pca_dim(): + # Check automated dimensionality setting + rng = np.random.RandomState(0) + n, p = 100, 5 + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5, 1, 2]) + pca = PCA(n_components="mle", svd_solver="full").fit(X) + assert pca.n_components == "mle" + assert pca.n_components_ == 1 + + +def test_infer_dim_1(): + # TODO: explain what this is testing + # Or at least use explicit variable names... + n, p = 1000, 5 + rng = np.random.RandomState(0) + X = ( + rng.randn(n, p) * 0.1 + + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) + + np.array([1, 0, 7, 4, 6]) + ) + pca = PCA(n_components=p, svd_solver="full") + pca.fit(X) + spect = pca.explained_variance_ + ll = np.array([_assess_dimension(spect, k, n) for k in range(1, p)]) + assert ll[1] > ll.max() - 0.01 * n + + +def test_infer_dim_2(): + # TODO: explain what this is testing + # Or at least use explicit variable names... + n, p = 1000, 5 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5, 1, 2]) + X[10:20] += np.array([6, 0, 7, 2, -1]) + pca = PCA(n_components=p, svd_solver="full") + pca.fit(X) + spect = pca.explained_variance_ + assert _infer_dimension(spect, n) > 1 + + +def test_infer_dim_3(): + n, p = 100, 5 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5, 1, 2]) + X[10:20] += np.array([6, 0, 7, 2, -1]) + X[30:40] += 2 * np.array([-1, 1, -1, 1, -1]) + pca = PCA(n_components=p, svd_solver="full") + pca.fit(X) + spect = pca.explained_variance_ + assert _infer_dimension(spect, n) > 2 + + +@pytest.mark.parametrize( + "X, n_components, n_components_validated", + [ + (iris.data, 0.95, 2), # row > col + (iris.data, 0.01, 1), # row > col + (np.random.RandomState(0).rand(5, 20), 0.5, 2), + ], # row < col +) +def test_infer_dim_by_explained_variance(X, n_components, n_components_validated): + pca = PCA(n_components=n_components, svd_solver="full") + pca.fit(X) + assert pca.n_components == pytest.approx(n_components) + assert pca.n_components_ == n_components_validated + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_score(svd_solver): + # Test that probabilistic PCA scoring yields a reasonable score + n, p = 1000, 3 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5]) + pca = PCA(n_components=2, svd_solver=svd_solver) + pca.fit(X) + + ll1 = pca.score(X) + h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1**2) * p + assert_allclose(ll1 / h, 1, rtol=5e-2) + + ll2 = pca.score(rng.randn(n, p) * 0.2 + np.array([3, 4, 5])) + assert ll1 > ll2 + + pca = PCA(n_components=2, whiten=True, svd_solver=svd_solver) + pca.fit(X) + ll2 = pca.score(X) + assert ll1 > ll2 + + +def test_pca_score3(): + # Check that probabilistic PCA selects the right model + n, p = 200, 3 + rng = np.random.RandomState(0) + Xl = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7]) + Xt = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7]) + ll = np.zeros(p) + for k in range(p): + pca = PCA(n_components=k, svd_solver="full") + pca.fit(Xl) + ll[k] = pca.score(Xt) + + assert ll.argmax() == 1 + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_sanity_noise_variance(svd_solver): + # Sanity check for the noise_variance_. For more details see + # https://github.com/scikit-learn/scikit-learn/issues/7568 + # https://github.com/scikit-learn/scikit-learn/issues/8541 + # https://github.com/scikit-learn/scikit-learn/issues/8544 + X, _ = datasets.load_digits(return_X_y=True) + pca = PCA(n_components=30, svd_solver=svd_solver, random_state=0) + pca.fit(X) + assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0) + + +@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"]) +def test_pca_score_consistency_solvers(svd_solver): + # Check the consistency of score between solvers + X, _ = datasets.load_digits(return_X_y=True) + pca_full = PCA(n_components=30, svd_solver="full", random_state=0) + pca_other = PCA(n_components=30, svd_solver=svd_solver, random_state=0) + pca_full.fit(X) + pca_other.fit(X) + assert_allclose(pca_full.score(X), pca_other.score(X), rtol=5e-6) + + +# arpack raises ValueError for n_components == min(n_samples, n_features) +@pytest.mark.parametrize("svd_solver", ["full", "randomized"]) +def test_pca_zero_noise_variance_edge_cases(svd_solver): + # ensure that noise_variance_ is 0 in edge cases + # when n_components == min(n_samples, n_features) + n, p = 100, 3 + rng = np.random.RandomState(0) + X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5]) + + pca = PCA(n_components=p, svd_solver=svd_solver) + pca.fit(X) + assert pca.noise_variance_ == 0 + # Non-regression test for gh-12489 + # ensure no divide-by-zero error for n_components == n_features < n_samples + pca.score(X) + + pca.fit(X.T) + assert pca.noise_variance_ == 0 + # Non-regression test for gh-12489 + # ensure no divide-by-zero error for n_components == n_samples < n_features + pca.score(X.T) + + +@pytest.mark.parametrize( + "data, n_components, expected_solver", + [ # case: n_components in (0,1) => 'full' + (np.random.RandomState(0).uniform(size=(1000, 50)), 0.5, "full"), + # case: max(X.shape) <= 500 => 'full' + (np.random.RandomState(0).uniform(size=(10, 50)), 5, "full"), + # case: n_components >= .8 * min(X.shape) => 'full' + (np.random.RandomState(0).uniform(size=(1000, 50)), 50, "full"), + # n_components >= 1 and n_components < .8*min(X.shape) => 'randomized' + (np.random.RandomState(0).uniform(size=(1000, 50)), 10, "randomized"), + ], +) +def test_pca_svd_solver_auto(data, n_components, expected_solver): + pca_auto = PCA(n_components=n_components, random_state=0) + pca_test = PCA( + n_components=n_components, svd_solver=expected_solver, random_state=0 + ) + pca_auto.fit(data) + pca_test.fit(data) + assert_allclose(pca_auto.components_, pca_test.components_) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_deterministic_output(svd_solver): + rng = np.random.RandomState(0) + X = rng.rand(10, 10) + + transformed_X = np.zeros((20, 2)) + for i in range(20): + pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng) + transformed_X[i, :] = pca.fit_transform(X)[0] + assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2)) + + +@pytest.mark.parametrize("svd_solver", PCA_SOLVERS) +def test_pca_dtype_preservation(svd_solver): + check_pca_float_dtype_preservation(svd_solver) + check_pca_int_dtype_upcast_to_double(svd_solver) + + +def check_pca_float_dtype_preservation(svd_solver): + # Ensure that PCA does not upscale the dtype when input is float32 + X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64, copy=False) + X_32 = X_64.astype(np.float32) + + pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_64) + pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_32) + + assert pca_64.components_.dtype == np.float64 + assert pca_32.components_.dtype == np.float32 + assert pca_64.transform(X_64).dtype == np.float64 + assert pca_32.transform(X_32).dtype == np.float32 + + # the rtol is set such that the test passes on all platforms tested on + # conda-forge: PR#15775 + # see: https://github.com/conda-forge/scikit-learn-feedstock/pull/113 + assert_allclose(pca_64.components_, pca_32.components_, rtol=2e-4) + + +def check_pca_int_dtype_upcast_to_double(svd_solver): + # Ensure that all int types will be upcast to float64 + X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4)) + X_i64 = X_i64.astype(np.int64, copy=False) + X_i32 = X_i64.astype(np.int32, copy=False) + + pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64) + pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32) + + assert pca_64.components_.dtype == np.float64 + assert pca_32.components_.dtype == np.float64 + assert pca_64.transform(X_i64).dtype == np.float64 + assert pca_32.transform(X_i32).dtype == np.float64 + + assert_allclose(pca_64.components_, pca_32.components_, rtol=1e-4) + + +def test_pca_n_components_mostly_explained_variance_ratio(): + # when n_components is the second highest cumulative sum of the + # explained_variance_ratio_, then n_components_ should equal the + # number of features in the dataset #15669 + X, y = load_iris(return_X_y=True) + pca1 = PCA().fit(X, y) + + n_components = pca1.explained_variance_ratio_.cumsum()[-2] + pca2 = PCA(n_components=n_components).fit(X, y) + assert pca2.n_components_ == X.shape[1] + + +def test_assess_dimension_bad_rank(): + # Test error when tested rank not in [1, n_features - 1] + spectrum = np.array([1, 1e-30, 1e-30, 1e-30]) + n_samples = 10 + for rank in (0, 5): + with pytest.raises(ValueError, match=r"should be in \[1, n_features - 1\]"): + _assess_dimension(spectrum, rank, n_samples) + + +def test_small_eigenvalues_mle(): + # Test rank associated with tiny eigenvalues are given a log-likelihood of + # -inf. The inferred rank will be 1 + spectrum = np.array([1, 1e-30, 1e-30, 1e-30]) + + assert _assess_dimension(spectrum, rank=1, n_samples=10) > -np.inf + + for rank in (2, 3): + assert _assess_dimension(spectrum, rank, 10) == -np.inf + + assert _infer_dimension(spectrum, 10) == 1 + + +def test_mle_redundant_data(): + # Test 'mle' with pathological X: only one relevant feature should give a + # rank of 1 + X, _ = datasets.make_classification( + n_features=20, + n_informative=1, + n_repeated=18, + n_redundant=1, + n_clusters_per_class=1, + random_state=42, + ) + pca = PCA(n_components="mle").fit(X) + assert pca.n_components_ == 1 + + +def test_fit_mle_too_few_samples(): + # Tests that an error is raised when the number of samples is smaller + # than the number of features during an mle fit + X, _ = datasets.make_classification(n_samples=20, n_features=21, random_state=42) + + pca = PCA(n_components="mle", svd_solver="full") + with pytest.raises( + ValueError, + match="n_components='mle' is only supported if n_samples >= n_features", + ): + pca.fit(X) + + +def test_mle_simple_case(): + # non-regression test for issue + # https://github.com/scikit-learn/scikit-learn/issues/16730 + n_samples, n_dim = 1000, 10 + X = np.random.RandomState(0).randn(n_samples, n_dim) + X[:, -1] = np.mean(X[:, :-1], axis=-1) # true X dim is ndim - 1 + pca_skl = PCA("mle", svd_solver="full") + pca_skl.fit(X) + assert pca_skl.n_components_ == n_dim - 1 + + +def test_assess_dimesion_rank_one(): + # Make sure assess_dimension works properly on a matrix of rank 1 + n_samples, n_features = 9, 6 + X = np.ones((n_samples, n_features)) # rank 1 matrix + _, s, _ = np.linalg.svd(X, full_matrices=True) + # except for rank 1, all eigenvalues are 0 resp. close to 0 (FP) + assert_allclose(s[1:], np.zeros(n_features - 1), atol=1e-12) + + assert np.isfinite(_assess_dimension(s, rank=1, n_samples=n_samples)) + for rank in range(2, n_features): + assert _assess_dimension(s, rank, n_samples) == -np.inf + + +def test_pca_randomized_svd_n_oversamples(): + """Check that exposing and setting `n_oversamples` will provide accurate results + even when `X` as a large number of features. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20589 + """ + rng = np.random.RandomState(0) + n_features = 100 + X = rng.randn(1_000, n_features) + + # The default value of `n_oversamples` will lead to inaccurate results + # We force it to the number of features. + pca_randomized = PCA( + n_components=1, + svd_solver="randomized", + n_oversamples=n_features, + random_state=0, + ).fit(X) + pca_full = PCA(n_components=1, svd_solver="full").fit(X) + pca_arpack = PCA(n_components=1, svd_solver="arpack", random_state=0).fit(X) + + assert_allclose(np.abs(pca_full.components_), np.abs(pca_arpack.components_)) + assert_allclose(np.abs(pca_randomized.components_), np.abs(pca_arpack.components_)) + + +def test_feature_names_out(): + """Check feature names out for PCA.""" + pca = PCA(n_components=2).fit(iris.data) + + names = pca.get_feature_names_out() + assert_array_equal([f"pca{i}" for i in range(2)], names) + + +@pytest.mark.parametrize("copy", [True, False]) +def test_variance_correctness(copy): + """Check the accuracy of PCA's internal variance calculation""" + rng = np.random.RandomState(0) + X = rng.randn(1000, 200) + pca = PCA().fit(X) + pca_var = pca.explained_variance_ / pca.explained_variance_ratio_ + true_var = np.var(X, ddof=1, axis=0).sum() + np.testing.assert_allclose(pca_var, true_var) + + +def check_array_api_get_precision(name, estimator, array_namespace, device, dtype_name): + xp = _array_api_for_tests(array_namespace, device) + iris_np = iris.data.astype(dtype_name) + iris_xp = xp.asarray(iris_np, device=device) + + estimator.fit(iris_np) + precision_np = estimator.get_precision() + covariance_np = estimator.get_covariance() + + with config_context(array_api_dispatch=True): + estimator_xp = clone(estimator).fit(iris_xp) + precision_xp = estimator_xp.get_precision() + assert precision_xp.shape == (4, 4) + assert precision_xp.dtype == iris_xp.dtype + + assert_allclose( + _convert_to_numpy(precision_xp, xp=xp), + precision_np, + atol=_atol_for_type(dtype_name), + ) + covariance_xp = estimator_xp.get_covariance() + assert covariance_xp.shape == (4, 4) + assert covariance_xp.dtype == iris_xp.dtype + + assert_allclose( + _convert_to_numpy(covariance_xp, xp=xp), + covariance_np, + atol=_atol_for_type(dtype_name), + ) + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "check", + [check_array_api_input_and_values, check_array_api_get_precision], + ids=_get_check_estimator_ids, +) +@pytest.mark.parametrize( + "estimator", + [ + PCA(n_components=2, svd_solver="full"), + PCA(n_components=0.1, svd_solver="full", whiten=True), + PCA( + n_components=2, + svd_solver="randomized", + power_iteration_normalizer="QR", + random_state=0, # how to use global_random_seed here? + ), + ], + ids=_get_check_estimator_ids, +) +def test_pca_array_api_compliance( + estimator, check, array_namespace, device, dtype_name +): + name = estimator.__class__.__name__ + check(name, estimator, array_namespace, device=device, dtype_name=dtype_name) + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "check", + [check_array_api_get_precision], + ids=_get_check_estimator_ids, +) +@pytest.mark.parametrize( + "estimator", + [ + # PCA with mle cannot use check_array_api_input_and_values because of + # rounding errors in the noisy (low variance) components. Even checking + # the shape of the `components_` is problematic because the number of + # components depends on trimming threshold of the mle algorithm which + # can depend on device-specific rounding errors. + PCA(n_components="mle", svd_solver="full"), + ], + ids=_get_check_estimator_ids, +) +def test_pca_mle_array_api_compliance( + estimator, check, array_namespace, device, dtype_name +): + name = estimator.__class__.__name__ + check(name, estimator, array_namespace, device=device, dtype_name=dtype_name) + + # Simpler variant of the generic check_array_api_input checker tailored for + # the specific case of PCA with mle-trimmed components. + xp = _array_api_for_tests(array_namespace, device) + + X, y = make_classification(random_state=42) + X = X.astype(dtype_name, copy=False) + atol = _atol_for_type(X.dtype) + + est = clone(estimator) + + X_xp = xp.asarray(X, device=device) + y_xp = xp.asarray(y, device=device) + + est.fit(X, y) + + components_np = est.components_ + explained_variance_np = est.explained_variance_ + + est_xp = clone(est) + with config_context(array_api_dispatch=True): + est_xp.fit(X_xp, y_xp) + components_xp = est_xp.components_ + assert array_device(components_xp) == array_device(X_xp) + components_xp_np = _convert_to_numpy(components_xp, xp=xp) + + explained_variance_xp = est_xp.explained_variance_ + assert array_device(explained_variance_xp) == array_device(X_xp) + explained_variance_xp_np = _convert_to_numpy(explained_variance_xp, xp=xp) + + assert components_xp_np.dtype == components_np.dtype + assert components_xp_np.shape[1] == components_np.shape[1] + assert explained_variance_xp_np.dtype == explained_variance_np.dtype + + # Check that the explained variance values match for the + # common components: + min_components = min(components_xp_np.shape[0], components_np.shape[0]) + assert_allclose( + explained_variance_xp_np[:min_components], + explained_variance_np[:min_components], + atol=atol, + ) + + # If the number of components differ, check that the explained variance of + # the trimmed components is very small. + if components_xp_np.shape[0] != components_np.shape[0]: + reference_variance = explained_variance_np[-1] + extra_variance_np = explained_variance_np[min_components:] + extra_variance_xp_np = explained_variance_xp_np[min_components:] + assert all(np.abs(extra_variance_np - reference_variance) < atol) + assert all(np.abs(extra_variance_xp_np - reference_variance) < atol) + + +def test_array_api_error_and_warnings_on_unsupported_params(): + pytest.importorskip("array_api_compat") + xp = pytest.importorskip("numpy.array_api") + iris_xp = xp.asarray(iris.data) + + pca = PCA(n_components=2, svd_solver="arpack", random_state=0) + expected_msg = re.escape( + "PCA with svd_solver='arpack' is not supported for Array API inputs." + ) + with pytest.raises(ValueError, match=expected_msg): + with config_context(array_api_dispatch=True): + pca.fit(iris_xp) + + pca.set_params(svd_solver="randomized", power_iteration_normalizer="LU") + expected_msg = re.escape( + "Array API does not support LU factorization. Set" + " `power_iteration_normalizer='QR'` instead." + ) + with pytest.raises(ValueError, match=expected_msg): + with config_context(array_api_dispatch=True): + pca.fit(iris_xp) + + pca.set_params(svd_solver="randomized", power_iteration_normalizer="auto") + expected_msg = re.escape( + "Array API does not support LU factorization, falling back to QR instead. Set" + " `power_iteration_normalizer='QR'` explicitly to silence this warning." + ) + with pytest.warns(UserWarning, match=expected_msg): + with config_context(array_api_dispatch=True): + pca.fit(iris_xp) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..3797970e3d6badc0a9537f410ae04cb24958bcf7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py @@ -0,0 +1,367 @@ +# Author: Vlad Niculae +# License: BSD 3 clause + +import sys + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.decomposition import PCA, MiniBatchSparsePCA, SparsePCA +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + if_safe_multiprocessing_with_blas, +) + + +def generate_toy_data(n_components, n_samples, image_size, random_state=None): + n_features = image_size[0] * image_size[1] + + rng = check_random_state(random_state) + U = rng.randn(n_samples, n_components) + V = rng.randn(n_components, n_features) + + centers = [(3, 3), (6, 7), (8, 1)] + sz = [1, 2, 1] + for k in range(n_components): + img = np.zeros(image_size) + xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k] + ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k] + img[xmin:xmax][:, ymin:ymax] = 1.0 + V[k, :] = img.ravel() + + # Y is defined by : Y = UV + noise + Y = np.dot(U, V) + Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise + return Y, U, V + + +# SparsePCA can be a bit slow. To avoid having test times go up, we +# test different aspects of the code in the same test + + +def test_correct_shapes(): + rng = np.random.RandomState(0) + X = rng.randn(12, 10) + spca = SparsePCA(n_components=8, random_state=rng) + U = spca.fit_transform(X) + assert spca.components_.shape == (8, 10) + assert U.shape == (12, 8) + # test overcomplete decomposition + spca = SparsePCA(n_components=13, random_state=rng) + U = spca.fit_transform(X) + assert spca.components_.shape == (13, 10) + assert U.shape == (12, 13) + + +def test_fit_transform(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0) + spca_lars.fit(Y) + + # Test that CD gives similar results + spca_lasso = SparsePCA(n_components=3, method="cd", random_state=0, alpha=alpha) + spca_lasso.fit(Y) + assert_array_almost_equal(spca_lasso.components_, spca_lars.components_) + + +@if_safe_multiprocessing_with_blas +def test_fit_transform_parallel(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0) + spca_lars.fit(Y) + U1 = spca_lars.transform(Y) + # Test multiple CPUs + spca = SparsePCA( + n_components=3, n_jobs=2, method="lars", alpha=alpha, random_state=0 + ).fit(Y) + U2 = spca.transform(Y) + assert not np.all(spca_lars.components_ == 0) + assert_array_almost_equal(U1, U2) + + +def test_transform_nan(): + # Test that SparsePCA won't return NaN when there is 0 feature in all + # samples. + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + Y[:, 0] = 0 + estimator = SparsePCA(n_components=8) + assert not np.any(np.isnan(estimator.fit_transform(Y))) + + +def test_fit_transform_tall(): + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array + spca_lars = SparsePCA(n_components=3, method="lars", random_state=rng) + U1 = spca_lars.fit_transform(Y) + spca_lasso = SparsePCA(n_components=3, method="cd", random_state=rng) + U2 = spca_lasso.fit(Y).transform(Y) + assert_array_almost_equal(U1, U2) + + +def test_initialization(): + rng = np.random.RandomState(0) + U_init = rng.randn(5, 3) + V_init = rng.randn(3, 4) + model = SparsePCA( + n_components=3, U_init=U_init, V_init=V_init, max_iter=0, random_state=rng + ) + model.fit(rng.randn(5, 4)) + assert_allclose(model.components_, V_init / np.linalg.norm(V_init, axis=1)[:, None]) + + +def test_mini_batch_correct_shapes(): + rng = np.random.RandomState(0) + X = rng.randn(12, 10) + pca = MiniBatchSparsePCA(n_components=8, max_iter=1, random_state=rng) + U = pca.fit_transform(X) + assert pca.components_.shape == (8, 10) + assert U.shape == (12, 8) + # test overcomplete decomposition + pca = MiniBatchSparsePCA(n_components=13, max_iter=1, random_state=rng) + U = pca.fit_transform(X) + assert pca.components_.shape == (13, 10) + assert U.shape == (12, 13) + + +# XXX: test always skipped +@pytest.mark.skipif(True, reason="skipping mini_batch_fit_transform.") +def test_mini_batch_fit_transform(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0, alpha=alpha).fit(Y) + U1 = spca_lars.transform(Y) + # Test multiple CPUs + if sys.platform == "win32": # fake parallelism for win32 + import joblib + + _mp = joblib.parallel.multiprocessing + joblib.parallel.multiprocessing = None + try: + spca = MiniBatchSparsePCA( + n_components=3, n_jobs=2, alpha=alpha, random_state=0 + ) + U2 = spca.fit(Y).transform(Y) + finally: + joblib.parallel.multiprocessing = _mp + else: # we can efficiently use parallelism + spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0) + U2 = spca.fit(Y).transform(Y) + assert not np.all(spca_lars.components_ == 0) + assert_array_almost_equal(U1, U2) + # Test that CD gives similar results + spca_lasso = MiniBatchSparsePCA( + n_components=3, method="cd", alpha=alpha, random_state=0 + ).fit(Y) + assert_array_almost_equal(spca_lasso.components_, spca_lars.components_) + + +def test_scaling_fit_transform(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng) + spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=rng) + results_train = spca_lars.fit_transform(Y) + results_test = spca_lars.transform(Y[:10]) + assert_allclose(results_train[0], results_test[0]) + + +def test_pca_vs_spca(): + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng) + Z, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) + spca = SparsePCA(alpha=0, ridge_alpha=0, n_components=2) + pca = PCA(n_components=2) + pca.fit(Y) + spca.fit(Y) + results_test_pca = pca.transform(Z) + results_test_spca = spca.transform(Z) + assert_allclose( + np.abs(spca.components_.dot(pca.components_.T)), np.eye(2), atol=1e-5 + ) + results_test_pca *= np.sign(results_test_pca[0, :]) + results_test_spca *= np.sign(results_test_spca[0, :]) + assert_allclose(results_test_pca, results_test_spca) + + +@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA]) +@pytest.mark.parametrize("n_components", [None, 3]) +def test_spca_n_components_(SPCA, n_components): + rng = np.random.RandomState(0) + n_samples, n_features = 12, 10 + X = rng.randn(n_samples, n_features) + + model = SPCA(n_components=n_components).fit(X) + + if n_components is not None: + assert model.n_components_ == n_components + else: + assert model.n_components_ == n_features + + +@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA)) +@pytest.mark.parametrize("method", ("lars", "cd")) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_sparse_pca_dtype_match(SPCA, method, data_type, expected_type): + # Verify output matrix dtype + n_samples, n_features, n_components = 12, 10, 3 + rng = np.random.RandomState(0) + input_array = rng.randn(n_samples, n_features).astype(data_type) + model = SPCA(n_components=n_components, method=method) + transformed = model.fit_transform(input_array) + + assert transformed.dtype == expected_type + assert model.components_.dtype == expected_type + + +@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA)) +@pytest.mark.parametrize("method", ("lars", "cd")) +def test_sparse_pca_numerical_consistency(SPCA, method): + # Verify numericall consistentency among np.float32 and np.float64 + rtol = 1e-3 + alpha = 2 + n_samples, n_features, n_components = 12, 10, 3 + rng = np.random.RandomState(0) + input_array = rng.randn(n_samples, n_features) + + model_32 = SPCA( + n_components=n_components, alpha=alpha, method=method, random_state=0 + ) + transformed_32 = model_32.fit_transform(input_array.astype(np.float32)) + + model_64 = SPCA( + n_components=n_components, alpha=alpha, method=method, random_state=0 + ) + transformed_64 = model_64.fit_transform(input_array.astype(np.float64)) + + assert_allclose(transformed_64, transformed_32, rtol=rtol) + assert_allclose(model_64.components_, model_32.components_, rtol=rtol) + + +@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA]) +def test_spca_feature_names_out(SPCA): + """Check feature names out for *SparsePCA.""" + rng = np.random.RandomState(0) + n_samples, n_features = 12, 10 + X = rng.randn(n_samples, n_features) + + model = SPCA(n_components=4).fit(X) + names = model.get_feature_names_out() + + estimator_name = SPCA.__name__.lower() + assert_array_equal([f"{estimator_name}{i}" for i in range(4)], names) + + +# TODO(1.6): remove in 1.6 +def test_spca_max_iter_None_deprecation(): + """Check that we raise a warning for the deprecation of `max_iter=None`.""" + rng = np.random.RandomState(0) + n_samples, n_features = 12, 10 + X = rng.randn(n_samples, n_features) + + warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed" + with pytest.warns(FutureWarning, match=warn_msg): + MiniBatchSparsePCA(max_iter=None).fit(X) + + +def test_spca_early_stopping(global_random_seed): + """Check that `tol` and `max_no_improvement` act as early stopping.""" + rng = np.random.RandomState(global_random_seed) + n_samples, n_features = 50, 10 + X = rng.randn(n_samples, n_features) + + # vary the tolerance to force the early stopping of one of the model + model_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=0.5, random_state=global_random_seed + ).fit(X) + model_not_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=1e-3, random_state=global_random_seed + ).fit(X) + assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ + + # force the max number of no improvement to a large value to check that + # it does help to early stop + model_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=1e-6, max_no_improvement=2, random_state=global_random_seed + ).fit(X) + model_not_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=1e-6, max_no_improvement=100, random_state=global_random_seed + ).fit(X) + assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ + + +def test_equivalence_components_pca_spca(global_random_seed): + """Check the equivalence of the components found by PCA and SparsePCA. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23932 + """ + rng = np.random.RandomState(global_random_seed) + X = rng.randn(50, 4) + + n_components = 2 + pca = PCA( + n_components=n_components, + svd_solver="randomized", + random_state=0, + ).fit(X) + spca = SparsePCA( + n_components=n_components, + method="lars", + ridge_alpha=0, + alpha=0, + random_state=0, + ).fit(X) + + assert_allclose(pca.components_, spca.components_) + + +def test_sparse_pca_inverse_transform(): + """Check that `inverse_transform` in `SparsePCA` and `PCA` are similar.""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + X = rng.randn(n_samples, n_features) + + n_components = 2 + spca = SparsePCA( + n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0 + ) + pca = PCA(n_components=n_components, random_state=0) + X_trans_spca = spca.fit_transform(X) + X_trans_pca = pca.fit_transform(X) + assert_allclose( + spca.inverse_transform(X_trans_spca), pca.inverse_transform(X_trans_pca) + ) + + +@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA]) +def test_transform_inverse_transform_round_trip(SPCA): + """Check the `transform` and `inverse_transform` round trip with no loss of + information. + """ + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + X = rng.randn(n_samples, n_features) + + n_components = n_features + spca = SPCA( + n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0 + ) + X_trans_spca = spca.fit_transform(X) + assert_allclose(spca.inverse_transform(X_trans_spca), X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py new file mode 100644 index 0000000000000000000000000000000000000000..4edb7d4a111094ce7c3ddafb8ef7a3024d76a964 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py @@ -0,0 +1,212 @@ +"""Test truncated SVD transformer.""" + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn.decomposition import PCA, TruncatedSVD +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose, assert_array_less + +SVD_SOLVERS = ["arpack", "randomized"] + + +@pytest.fixture(scope="module") +def X_sparse(): + # Make an X that looks somewhat like a small tf-idf matrix. + rng = check_random_state(42) + X = sp.random(60, 55, density=0.2, format="csr", random_state=rng) + X.data[:] = 1 + np.log(X.data) + return X + + +@pytest.mark.parametrize("solver", ["randomized"]) +@pytest.mark.parametrize("kind", ("dense", "sparse")) +def test_solvers(X_sparse, solver, kind): + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd_a = TruncatedSVD(30, algorithm="arpack") + svd = TruncatedSVD(30, algorithm=solver, random_state=42, n_oversamples=100) + + Xa = svd_a.fit_transform(X)[:, :6] + Xr = svd.fit_transform(X)[:, :6] + assert_allclose(Xa, Xr, rtol=2e-3) + + comp_a = np.abs(svd_a.components_) + comp = np.abs(svd.components_) + # All elements are equal, but some elements are more equal than others. + assert_allclose(comp_a[:9], comp[:9], rtol=1e-3) + assert_allclose(comp_a[9:], comp[9:], atol=1e-2) + + +@pytest.mark.parametrize("n_components", (10, 25, 41, 55)) +def test_attributes(n_components, X_sparse): + n_features = X_sparse.shape[1] + tsvd = TruncatedSVD(n_components).fit(X_sparse) + assert tsvd.n_components == n_components + assert tsvd.components_.shape == (n_components, n_features) + + +@pytest.mark.parametrize( + "algorithm, n_components", + [ + ("arpack", 55), + ("arpack", 56), + ("randomized", 56), + ], +) +def test_too_many_components(X_sparse, algorithm, n_components): + tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm) + with pytest.raises(ValueError): + tsvd.fit(X_sparse) + + +@pytest.mark.parametrize("fmt", ("array", "csr", "csc", "coo", "lil")) +def test_sparse_formats(fmt, X_sparse): + n_samples = X_sparse.shape[0] + Xfmt = X_sparse.toarray() if fmt == "dense" else getattr(X_sparse, "to" + fmt)() + tsvd = TruncatedSVD(n_components=11) + Xtrans = tsvd.fit_transform(Xfmt) + assert Xtrans.shape == (n_samples, 11) + Xtrans = tsvd.transform(Xfmt) + assert Xtrans.shape == (n_samples, 11) + + +@pytest.mark.parametrize("algo", SVD_SOLVERS) +def test_inverse_transform(algo, X_sparse): + # We need a lot of components for the reconstruction to be "almost + # equal" in all positions. XXX Test means or sums instead? + tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo) + Xt = tsvd.fit_transform(X_sparse) + Xinv = tsvd.inverse_transform(Xt) + assert_allclose(Xinv, X_sparse.toarray(), rtol=1e-1, atol=2e-1) + + +def test_integers(X_sparse): + n_samples = X_sparse.shape[0] + Xint = X_sparse.astype(np.int64) + tsvd = TruncatedSVD(n_components=6) + Xtrans = tsvd.fit_transform(Xint) + assert Xtrans.shape == (n_samples, tsvd.n_components) + + +@pytest.mark.parametrize("kind", ("dense", "sparse")) +@pytest.mark.parametrize("n_components", [10, 20]) +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_explained_variance(X_sparse, kind, n_components, solver): + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd = TruncatedSVD(n_components, algorithm=solver) + X_tr = svd.fit_transform(X) + # Assert that all the values are greater than 0 + assert_array_less(0.0, svd.explained_variance_ratio_) + + # Assert that total explained variance is less than 1 + assert_array_less(svd.explained_variance_ratio_.sum(), 1.0) + + # Test that explained_variance is correct + total_variance = np.var(X_sparse.toarray(), axis=0).sum() + variances = np.var(X_tr, axis=0) + true_explained_variance_ratio = variances / total_variance + + assert_allclose( + svd.explained_variance_ratio_, + true_explained_variance_ratio, + ) + + +@pytest.mark.parametrize("kind", ("dense", "sparse")) +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_explained_variance_components_10_20(X_sparse, kind, solver): + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd_10 = TruncatedSVD(10, algorithm=solver, n_iter=10).fit(X) + svd_20 = TruncatedSVD(20, algorithm=solver, n_iter=10).fit(X) + + # Assert the 1st component is equal + assert_allclose( + svd_10.explained_variance_ratio_, + svd_20.explained_variance_ratio_[:10], + rtol=5e-3, + ) + + # Assert that 20 components has higher explained variance than 10 + assert ( + svd_20.explained_variance_ratio_.sum() > svd_10.explained_variance_ratio_.sum() + ) + + +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_singular_values_consistency(solver): + # Check that the TruncatedSVD output has the correct singular values + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca = TruncatedSVD(n_components=2, algorithm=solver, random_state=rng).fit(X) + + # Compare to the Frobenius norm + X_pca = pca.transform(X) + assert_allclose( + np.sum(pca.singular_values_**2.0), + np.linalg.norm(X_pca, "fro") ** 2.0, + rtol=1e-2, + ) + + # Compare to the 2-norms of the score vectors + assert_allclose( + pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), rtol=1e-2 + ) + + +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_singular_values_expected(solver): + # Set the singular values and see what we get back + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 110 + + X = rng.randn(n_samples, n_features) + + pca = TruncatedSVD(n_components=3, algorithm=solver, random_state=rng) + X_pca = pca.fit_transform(X) + + X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0)) + X_pca[:, 0] *= 3.142 + X_pca[:, 1] *= 2.718 + + X_hat_pca = np.dot(X_pca, pca.components_) + pca.fit(X_hat_pca) + assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0], rtol=1e-14) + + +def test_truncated_svd_eq_pca(X_sparse): + # TruncatedSVD should be equal to PCA on centered data + + X_dense = X_sparse.toarray() + + X_c = X_dense - X_dense.mean(axis=0) + + params = dict(n_components=10, random_state=42) + + svd = TruncatedSVD(algorithm="arpack", **params) + pca = PCA(svd_solver="arpack", **params) + + Xt_svd = svd.fit_transform(X_c) + Xt_pca = pca.fit_transform(X_c) + + assert_allclose(Xt_svd, Xt_pca, rtol=1e-9) + assert_allclose(pca.mean_, 0, atol=1e-9) + assert_allclose(svd.components_, pca.components_) + + +@pytest.mark.parametrize( + "algorithm, tol", [("randomized", 0.0), ("arpack", 1e-6), ("arpack", 0.0)] +) +@pytest.mark.parametrize("kind", ("dense", "sparse")) +def test_fit_transform(X_sparse, algorithm, tol, kind): + # fit_transform(X) should equal fit(X).transform(X) + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd = TruncatedSVD( + n_components=5, n_iter=7, random_state=42, algorithm=algorithm, tol=tol + ) + X_transformed_1 = svd.fit_transform(X) + X_transformed_2 = svd.fit(X).transform(X) + assert_allclose(X_transformed_1, X_transformed_2) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7d316d95ada4dcc0534851b08b221ae3270174d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__init__.py @@ -0,0 +1,88 @@ +import typing + +from ._plot import LearningCurveDisplay, ValidationCurveDisplay +from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV +from ._split import ( + BaseCrossValidator, + BaseShuffleSplit, + GroupKFold, + GroupShuffleSplit, + KFold, + LeaveOneGroupOut, + LeaveOneOut, + LeavePGroupsOut, + LeavePOut, + PredefinedSplit, + RepeatedKFold, + RepeatedStratifiedKFold, + ShuffleSplit, + StratifiedGroupKFold, + StratifiedKFold, + StratifiedShuffleSplit, + TimeSeriesSplit, + check_cv, + train_test_split, +) +from ._validation import ( + cross_val_predict, + cross_val_score, + cross_validate, + learning_curve, + permutation_test_score, + validation_curve, +) + +if typing.TYPE_CHECKING: + # Avoid errors in type checkers (e.g. mypy) for experimental estimators. + # TODO: remove this check once the estimator is no longer experimental. + from ._search_successive_halving import ( # noqa + HalvingGridSearchCV, + HalvingRandomSearchCV, + ) + + +__all__ = [ + "BaseCrossValidator", + "BaseShuffleSplit", + "GridSearchCV", + "TimeSeriesSplit", + "KFold", + "GroupKFold", + "GroupShuffleSplit", + "LeaveOneGroupOut", + "LeaveOneOut", + "LeavePGroupsOut", + "LeavePOut", + "RepeatedKFold", + "RepeatedStratifiedKFold", + "ParameterGrid", + "ParameterSampler", + "PredefinedSplit", + "RandomizedSearchCV", + "ShuffleSplit", + "StratifiedKFold", + "StratifiedGroupKFold", + "StratifiedShuffleSplit", + "check_cv", + "cross_val_predict", + "cross_val_score", + "cross_validate", + "learning_curve", + "LearningCurveDisplay", + "permutation_test_score", + "train_test_split", + "validation_curve", + "ValidationCurveDisplay", +] + + +# TODO: remove this check once the estimator is no longer experimental. +def __getattr__(name): + if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}: + raise ImportError( + f"{name} is experimental and the API might change without any " + "deprecation cycle. To use it, you need to explicitly import " + "enable_halving_search_cv:\n" + "from sklearn.experimental import enable_halving_search_cv" + ) + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bd30254479b90a720bb180591dcb7840d798ee4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09581dac510e8a0032c86e0a77dca6e6955d63c0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b89ed072a627251fbcc4fe1c95e8927ec30a597 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75b8b581325610ea8c72b570d0ff2eb7c5126f7b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..506c9a474f2cccebc9ea98f3ac76bb99d88200fe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c267c86881134518b8d60b9744cff96bb102459 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_plot.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..741c893ae2ea96db7dedc015afeb0b8d7cc9178a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_plot.py @@ -0,0 +1,907 @@ +import warnings + +import numpy as np + +from ..utils import check_matplotlib_support +from ..utils._plotting import _interval_max_min_ratio, _validate_score_name +from ._validation import learning_curve, validation_curve + + +class _BaseCurveDisplay: + def _plot_curve( + self, + x_data, + *, + ax=None, + negate_score=False, + score_name=None, + score_type="test", + log_scale="deprecated", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + check_matplotlib_support(f"{self.__class__.__name__}.plot") + + import matplotlib.pyplot as plt + + if ax is None: + _, ax = plt.subplots() + + if negate_score: + train_scores, test_scores = -self.train_scores, -self.test_scores + else: + train_scores, test_scores = self.train_scores, self.test_scores + + if std_display_style not in ("errorbar", "fill_between", None): + raise ValueError( + f"Unknown std_display_style: {std_display_style}. Should be one of" + " 'errorbar', 'fill_between', or None." + ) + + if score_type not in ("test", "train", "both"): + raise ValueError( + f"Unknown score_type: {score_type}. Should be one of 'test', " + "'train', or 'both'." + ) + + if score_type == "train": + scores = {"Train": train_scores} + elif score_type == "test": + scores = {"Test": test_scores} + else: # score_type == "both" + scores = {"Train": train_scores, "Test": test_scores} + + if std_display_style in ("fill_between", None): + # plot the mean score + if line_kw is None: + line_kw = {} + + self.lines_ = [] + for line_label, score in scores.items(): + self.lines_.append( + *ax.plot( + x_data, + score.mean(axis=1), + label=line_label, + **line_kw, + ) + ) + self.errorbar_ = None + self.fill_between_ = None # overwritten below by fill_between + + if std_display_style == "errorbar": + if errorbar_kw is None: + errorbar_kw = {} + + self.errorbar_ = [] + for line_label, score in scores.items(): + self.errorbar_.append( + ax.errorbar( + x_data, + score.mean(axis=1), + score.std(axis=1), + label=line_label, + **errorbar_kw, + ) + ) + self.lines_, self.fill_between_ = None, None + elif std_display_style == "fill_between": + if fill_between_kw is None: + fill_between_kw = {} + default_fill_between_kw = {"alpha": 0.5} + fill_between_kw = {**default_fill_between_kw, **fill_between_kw} + + self.fill_between_ = [] + for line_label, score in scores.items(): + self.fill_between_.append( + ax.fill_between( + x_data, + score.mean(axis=1) - score.std(axis=1), + score.mean(axis=1) + score.std(axis=1), + **fill_between_kw, + ) + ) + + score_name = self.score_name if score_name is None else score_name + + ax.legend() + + # TODO(1.5): to be removed + if log_scale != "deprecated": + warnings.warn( + ( + "The `log_scale` parameter is deprecated as of version 1.3 " + "and will be removed in 1.5. You can use display.ax_.set_xscale " + "and display.ax_.set_yscale instead." + ), + FutureWarning, + ) + xscale = "log" if log_scale else "linear" + else: + # We found that a ratio, smaller or bigger than 5, between the largest and + # smallest gap of the x values is a good indicator to choose between linear + # and log scale. + if _interval_max_min_ratio(x_data) > 5: + xscale = "symlog" if x_data.min() <= 0 else "log" + else: + xscale = "linear" + ax.set_xscale(xscale) + ax.set_ylabel(f"{score_name}") + + self.ax_ = ax + self.figure_ = ax.figure + + +class LearningCurveDisplay(_BaseCurveDisplay): + """Learning Curve visualization. + + It is recommended to use + :meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator` to + create a :class:`~sklearn.model_selection.LearningCurveDisplay` instance. + All parameters are stored as attributes. + + Read more in the :ref:`User Guide ` for general information + about the visualization API and + :ref:`detailed documentation ` regarding the learning + curve visualization. + + .. versionadded:: 1.2 + + Parameters + ---------- + train_sizes : ndarray of shape (n_unique_ticks,) + Numbers of training examples that has been used to generate the + learning curve. + + train_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on test set. + + score_name : str, default=None + The name of the score used in `learning_curve`. It will override the name + inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if + `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a + string or a callable, we infer the name. We replace `_` by spaces and capitalize + the first letter. We remove `neg_` and replace it by `"Negative"` if + `negate_score` is `False` or just remove it otherwise. + + Attributes + ---------- + ax_ : matplotlib Axes + Axes with the learning curve. + + figure_ : matplotlib Figure + Figure containing the learning curve. + + errorbar_ : list of matplotlib Artist or None + When the `std_display_style` is `"errorbar"`, this is a list of + `matplotlib.container.ErrorbarContainer` objects. If another style is + used, `errorbar_` is `None`. + + lines_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.lines.Line2D` objects corresponding to the mean train and + test scores. If another style is used, `line_` is `None`. + + fill_between_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.collections.PolyCollection` objects. If another style is + used, `fill_between_` is `None`. + + See Also + -------- + sklearn.model_selection.learning_curve : Compute the learning curve. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import LearningCurveDisplay, learning_curve + >>> from sklearn.tree import DecisionTreeClassifier + >>> X, y = load_iris(return_X_y=True) + >>> tree = DecisionTreeClassifier(random_state=0) + >>> train_sizes, train_scores, test_scores = learning_curve( + ... tree, X, y) + >>> display = LearningCurveDisplay(train_sizes=train_sizes, + ... train_scores=train_scores, test_scores=test_scores, score_name="Score") + >>> display.plot() + <...> + >>> plt.show() + """ + + def __init__(self, *, train_sizes, train_scores, test_scores, score_name=None): + self.train_sizes = train_sizes + self.train_scores = train_scores + self.test_scores = test_scores + self.score_name = score_name + + def plot( + self, + ax=None, + *, + negate_score=False, + score_name=None, + score_type="both", + log_scale="deprecated", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Plot visualization. + + Parameters + ---------- + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.learning_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + log_scale : bool, default="deprecated" + Whether or not to use a logarithmic scale for the x-axis. + + .. deprecated:: 1.3 + `log_scale` is deprecated in 1.3 and will be removed in 1.5. + Use `display.ax_.set_xscale` and `display.ax_.set_yscale` instead. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If None, no standard deviation representation is + displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.LearningCurveDisplay` + Object that stores computed values. + """ + self._plot_curve( + self.train_sizes, + ax=ax, + negate_score=negate_score, + score_name=score_name, + score_type=score_type, + log_scale=log_scale, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) + self.ax_.set_xlabel("Number of samples in the training set") + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + groups=None, + train_sizes=np.linspace(0.1, 1.0, 5), + cv=None, + scoring=None, + exploit_incremental_learning=False, + n_jobs=None, + pre_dispatch="all", + verbose=0, + shuffle=False, + random_state=None, + error_score=np.nan, + fit_params=None, + ax=None, + negate_score=False, + score_name=None, + score_type="both", + log_scale="deprecated", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Create a learning curve display from an estimator. + + Read more in the :ref:`User Guide ` for general + information about the visualization API and :ref:`detailed + documentation ` regarding the learning curve + visualization. + + Parameters + ---------- + estimator : object type that implements the "fit" and "predict" methods + An object of that type which is cloned for each validation. + + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + train_sizes : array-like of shape (n_ticks,), \ + default=np.linspace(0.1, 1.0, 5) + Relative or absolute numbers of training examples that will be used + to generate the learning curve. If the dtype is float, it is + regarded as a fraction of the maximum size of the training set + (that is determined by the selected validation method), i.e. it has + to be within (0, 1]. Otherwise it is interpreted as absolute sizes + of the training sets. Note that for classification the number of + samples usually have to be big enough to contain at least one + sample from each class. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and `y` is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. In all + other cases, :class:`~sklearn.model_selection.KFold` is used. These + splitters are instantiated with `shuffle=False` so the splits will + be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + scoring : str or callable, default=None + A string (see :ref:`scoring_parameter`) or + a scorer callable object / function with signature + `scorer(estimator, X, y)` (see :ref:`scoring`). + + exploit_incremental_learning : bool, default=False + If the estimator supports incremental learning, this will be + used to speed up fitting for different training set sizes. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and + computing the score are parallelized over the different training + and test sets. `None` means 1 unless in a + :obj:`joblib.parallel_backend` context. `-1` means using all + processors. See :term:`Glossary ` for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + shuffle : bool, default=False + Whether to shuffle training data before taking prefixes of it + based on`train_sizes`. + + random_state : int, RandomState instance or None, default=None + Used when `shuffle` is True. Pass an int for reproducible + output across multiple function calls. + See :term:`Glossary `. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator + fitting. If set to 'raise', the error is raised. If a numeric value + is given, FitFailedWarning is raised. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.learning_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + log_scale : bool, default="deprecated" + Whether or not to use a logarithmic scale for the x-axis. + + .. deprecated:: 1.3 + `log_scale` is deprecated in 1.3 and will be removed in 1.5. + Use `display.ax_.xscale` and `display.ax_.yscale` instead. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If `None`, no representation of the standard deviation + is displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.LearningCurveDisplay` + Object that stores computed values. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import LearningCurveDisplay + >>> from sklearn.tree import DecisionTreeClassifier + >>> X, y = load_iris(return_X_y=True) + >>> tree = DecisionTreeClassifier(random_state=0) + >>> LearningCurveDisplay.from_estimator(tree, X, y) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + + score_name = _validate_score_name(score_name, scoring, negate_score) + + train_sizes, train_scores, test_scores = learning_curve( + estimator, + X, + y, + groups=groups, + train_sizes=train_sizes, + cv=cv, + scoring=scoring, + exploit_incremental_learning=exploit_incremental_learning, + n_jobs=n_jobs, + pre_dispatch=pre_dispatch, + verbose=verbose, + shuffle=shuffle, + random_state=random_state, + error_score=error_score, + return_times=False, + fit_params=fit_params, + ) + + viz = cls( + train_sizes=train_sizes, + train_scores=train_scores, + test_scores=test_scores, + score_name=score_name, + ) + return viz.plot( + ax=ax, + negate_score=negate_score, + score_type=score_type, + log_scale=log_scale, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) + + +class ValidationCurveDisplay(_BaseCurveDisplay): + """Validation Curve visualization. + + It is recommended to use + :meth:`~sklearn.model_selection.ValidationCurveDisplay.from_estimator` to + create a :class:`~sklearn.model_selection.ValidationCurveDisplay` instance. + All parameters are stored as attributes. + + Read more in the :ref:`User Guide ` for general information + about the visualization API and :ref:`detailed documentation + ` regarding the validation curve visualization. + + .. versionadded:: 1.3 + + Parameters + ---------- + param_name : str + Name of the parameter that has been varied. + + param_range : array-like of shape (n_ticks,) + The values of the parameter that have been evaluated. + + train_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on test set. + + score_name : str, default=None + The name of the score used in `validation_curve`. It will override the name + inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if + `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a + string or a callable, we infer the name. We replace `_` by spaces and capitalize + the first letter. We remove `neg_` and replace it by `"Negative"` if + `negate_score` is `False` or just remove it otherwise. + + Attributes + ---------- + ax_ : matplotlib Axes + Axes with the validation curve. + + figure_ : matplotlib Figure + Figure containing the validation curve. + + errorbar_ : list of matplotlib Artist or None + When the `std_display_style` is `"errorbar"`, this is a list of + `matplotlib.container.ErrorbarContainer` objects. If another style is + used, `errorbar_` is `None`. + + lines_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.lines.Line2D` objects corresponding to the mean train and + test scores. If another style is used, `line_` is `None`. + + fill_between_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.collections.PolyCollection` objects. If another style is + used, `fill_between_` is `None`. + + See Also + -------- + sklearn.model_selection.validation_curve : Compute the validation curve. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import ValidationCurveDisplay, validation_curve + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(n_samples=1_000, random_state=0) + >>> logistic_regression = LogisticRegression() + >>> param_name, param_range = "C", np.logspace(-8, 3, 10) + >>> train_scores, test_scores = validation_curve( + ... logistic_regression, X, y, param_name=param_name, param_range=param_range + ... ) + >>> display = ValidationCurveDisplay( + ... param_name=param_name, param_range=param_range, + ... train_scores=train_scores, test_scores=test_scores, score_name="Score" + ... ) + >>> display.plot() + <...> + >>> plt.show() + """ + + def __init__( + self, *, param_name, param_range, train_scores, test_scores, score_name=None + ): + self.param_name = param_name + self.param_range = param_range + self.train_scores = train_scores + self.test_scores = test_scores + self.score_name = score_name + + def plot( + self, + ax=None, + *, + negate_score=False, + score_name=None, + score_type="both", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Plot visualization. + + Parameters + ---------- + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.validation_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If None, no standard deviation representation is + displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.ValidationCurveDisplay` + Object that stores computed values. + """ + self._plot_curve( + self.param_range, + ax=ax, + negate_score=negate_score, + score_name=score_name, + score_type=score_type, + log_scale="deprecated", + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) + self.ax_.set_xlabel(f"{self.param_name}") + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + param_name, + param_range, + groups=None, + cv=None, + scoring=None, + n_jobs=None, + pre_dispatch="all", + verbose=0, + error_score=np.nan, + fit_params=None, + ax=None, + negate_score=False, + score_name=None, + score_type="both", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Create a validation curve display from an estimator. + + Read more in the :ref:`User Guide ` for general + information about the visualization API and :ref:`detailed + documentation ` regarding the validation curve + visualization. + + Parameters + ---------- + estimator : object type that implements the "fit" and "predict" methods + An object of that type which is cloned for each validation. + + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + param_name : str + Name of the parameter that will be varied. + + param_range : array-like of shape (n_values,) + The values of the parameter that will be evaluated. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and `y` is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. In all + other cases, :class:`~sklearn.model_selection.KFold` is used. These + splitters are instantiated with `shuffle=False` so the splits will + be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + scoring : str or callable, default=None + A string (see :ref:`scoring_parameter`) or + a scorer callable object / function with signature + `scorer(estimator, X, y)` (see :ref:`scoring`). + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and + computing the score are parallelized over the different training + and test sets. `None` means 1 unless in a + :obj:`joblib.parallel_backend` context. `-1` means using all + processors. See :term:`Glossary ` for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator + fitting. If set to 'raise', the error is raised. If a numeric value + is given, FitFailedWarning is raised. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.validation_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If `None`, no representation of the standard deviation + is displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.ValidationCurveDisplay` + Object that stores computed values. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import ValidationCurveDisplay + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(n_samples=1_000, random_state=0) + >>> logistic_regression = LogisticRegression() + >>> param_name, param_range = "C", np.logspace(-8, 3, 10) + >>> ValidationCurveDisplay.from_estimator( + ... logistic_regression, X, y, param_name=param_name, + ... param_range=param_range, + ... ) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + + score_name = _validate_score_name(score_name, scoring, negate_score) + + train_scores, test_scores = validation_curve( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + groups=groups, + cv=cv, + scoring=scoring, + n_jobs=n_jobs, + pre_dispatch=pre_dispatch, + verbose=verbose, + error_score=error_score, + fit_params=fit_params, + ) + + viz = cls( + param_name=param_name, + param_range=np.asarray(param_range), + train_scores=train_scores, + test_scores=test_scores, + score_name=score_name, + ) + return viz.plot( + ax=ax, + negate_score=negate_score, + score_type=score_type, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_search.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_search.py new file mode 100644 index 0000000000000000000000000000000000000000..9de03c2c663ec0b8165f2c42f9183d2da7164815 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_search.py @@ -0,0 +1,1918 @@ +""" +The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the +parameters of an estimator. +""" + +# Author: Alexandre Gramfort , +# Gael Varoquaux +# Andreas Mueller +# Olivier Grisel +# Raghav RV +# License: BSD 3 clause + +import numbers +import operator +import time +import warnings +from abc import ABCMeta, abstractmethod +from collections import defaultdict +from collections.abc import Iterable, Mapping, Sequence +from functools import partial, reduce +from itertools import product + +import numpy as np +from numpy.ma import MaskedArray +from scipy.stats import rankdata + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier +from ..exceptions import NotFittedError +from ..metrics import check_scoring +from ..metrics._scorer import ( + _check_multimetric_scoring, + _MultimetricScorer, + get_scorer_names, +) +from ..utils import Bunch, check_random_state +from ..utils._param_validation import HasMethods, Interval, StrOptions +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import available_if +from ..utils.parallel import Parallel, delayed +from ..utils.random import sample_without_replacement +from ..utils.validation import _check_method_params, check_is_fitted, indexable +from ._split import check_cv +from ._validation import ( + _aggregate_score_dicts, + _fit_and_score, + _insert_error_scores, + _normalize_score_results, + _warn_or_raise_about_fit_failures, +) + +__all__ = ["GridSearchCV", "ParameterGrid", "ParameterSampler", "RandomizedSearchCV"] + + +class ParameterGrid: + """Grid of parameters with a discrete number of values for each. + + Can be used to iterate over parameter value combinations with the + Python built-in function iter. + The order of the generated parameter combinations is deterministic. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + param_grid : dict of str to sequence, or sequence of such + The parameter grid to explore, as a dictionary mapping estimator + parameters to sequences of allowed values. + + An empty dict signifies default parameters. + + A sequence of dicts signifies a sequence of grids to search, and is + useful to avoid exploring parameter combinations that make no sense + or have no effect. See the examples below. + + Examples + -------- + >>> from sklearn.model_selection import ParameterGrid + >>> param_grid = {'a': [1, 2], 'b': [True, False]} + >>> list(ParameterGrid(param_grid)) == ( + ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, + ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) + True + + >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] + >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, + ... {'kernel': 'rbf', 'gamma': 1}, + ... {'kernel': 'rbf', 'gamma': 10}] + True + >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} + True + + See Also + -------- + GridSearchCV : Uses :class:`ParameterGrid` to perform a full parallelized + parameter search. + """ + + def __init__(self, param_grid): + if not isinstance(param_grid, (Mapping, Iterable)): + raise TypeError( + f"Parameter grid should be a dict or a list, got: {param_grid!r} of" + f" type {type(param_grid).__name__}" + ) + + if isinstance(param_grid, Mapping): + # wrap dictionary in a singleton list to support either dict + # or list of dicts + param_grid = [param_grid] + + # check if all entries are dictionaries of lists + for grid in param_grid: + if not isinstance(grid, dict): + raise TypeError(f"Parameter grid is not a dict ({grid!r})") + for key, value in grid.items(): + if isinstance(value, np.ndarray) and value.ndim > 1: + raise ValueError( + f"Parameter array for {key!r} should be one-dimensional, got:" + f" {value!r} with shape {value.shape}" + ) + if isinstance(value, str) or not isinstance( + value, (np.ndarray, Sequence) + ): + raise TypeError( + f"Parameter grid for parameter {key!r} needs to be a list or a" + f" numpy array, but got {value!r} (of type " + f"{type(value).__name__}) instead. Single values " + "need to be wrapped in a list with one element." + ) + if len(value) == 0: + raise ValueError( + f"Parameter grid for parameter {key!r} need " + f"to be a non-empty sequence, got: {value!r}" + ) + + self.param_grid = param_grid + + def __iter__(self): + """Iterate over the points in the grid. + + Returns + ------- + params : iterator over dict of str to any + Yields dictionaries mapping each estimator parameter to one of its + allowed values. + """ + for p in self.param_grid: + # Always sort the keys of a dictionary, for reproducibility + items = sorted(p.items()) + if not items: + yield {} + else: + keys, values = zip(*items) + for v in product(*values): + params = dict(zip(keys, v)) + yield params + + def __len__(self): + """Number of points on the grid.""" + # Product function that can handle iterables (np.prod can't). + product = partial(reduce, operator.mul) + return sum( + product(len(v) for v in p.values()) if p else 1 for p in self.param_grid + ) + + def __getitem__(self, ind): + """Get the parameters that would be ``ind``th in iteration + + Parameters + ---------- + ind : int + The iteration index + + Returns + ------- + params : dict of str to any + Equal to list(self)[ind] + """ + # This is used to make discrete sampling without replacement memory + # efficient. + for sub_grid in self.param_grid: + # XXX: could memoize information used here + if not sub_grid: + if ind == 0: + return {} + else: + ind -= 1 + continue + + # Reverse so most frequent cycling parameter comes first + keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) + sizes = [len(v_list) for v_list in values_lists] + total = np.prod(sizes) + + if ind >= total: + # Try the next grid + ind -= total + else: + out = {} + for key, v_list, n in zip(keys, values_lists, sizes): + ind, offset = divmod(ind, n) + out[key] = v_list[offset] + return out + + raise IndexError("ParameterGrid index out of range") + + +class ParameterSampler: + """Generator on parameters sampled from given distributions. + + Non-deterministic iterable over random candidate combinations for hyper- + parameter search. If all parameters are presented as a list, + sampling without replacement is performed. If at least one parameter + is given as a distribution, sampling with replacement is used. + It is highly recommended to use continuous distributions for continuous + parameters. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + param_distributions : dict + Dictionary with parameters names (`str`) as keys and distributions + or lists of parameters to try. Distributions must provide a ``rvs`` + method for sampling (such as those from scipy.stats.distributions). + If a list is given, it is sampled uniformly. + If a list of dicts is given, first a dict is sampled uniformly, and + then a parameter is sampled using that dict as above. + + n_iter : int + Number of parameter settings that are produced. + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for random uniform sampling + from lists of possible values instead of scipy.stats distributions. + Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + Returns + ------- + params : dict of str to any + **Yields** dictionaries mapping each estimator parameter to + as sampled value. + + Examples + -------- + >>> from sklearn.model_selection import ParameterSampler + >>> from scipy.stats.distributions import expon + >>> import numpy as np + >>> rng = np.random.RandomState(0) + >>> param_grid = {'a':[1, 2], 'b': expon()} + >>> param_list = list(ParameterSampler(param_grid, n_iter=4, + ... random_state=rng)) + >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) + ... for d in param_list] + >>> rounded_list == [{'b': 0.89856, 'a': 1}, + ... {'b': 0.923223, 'a': 1}, + ... {'b': 1.878964, 'a': 2}, + ... {'b': 1.038159, 'a': 2}] + True + """ + + def __init__(self, param_distributions, n_iter, *, random_state=None): + if not isinstance(param_distributions, (Mapping, Iterable)): + raise TypeError( + "Parameter distribution is not a dict or a list," + f" got: {param_distributions!r} of type " + f"{type(param_distributions).__name__}" + ) + + if isinstance(param_distributions, Mapping): + # wrap dictionary in a singleton list to support either dict + # or list of dicts + param_distributions = [param_distributions] + + for dist in param_distributions: + if not isinstance(dist, dict): + raise TypeError( + "Parameter distribution is not a dict ({!r})".format(dist) + ) + for key in dist: + if not isinstance(dist[key], Iterable) and not hasattr( + dist[key], "rvs" + ): + raise TypeError( + f"Parameter grid for parameter {key!r} is not iterable " + f"or a distribution (value={dist[key]})" + ) + self.n_iter = n_iter + self.random_state = random_state + self.param_distributions = param_distributions + + def _is_all_lists(self): + return all( + all(not hasattr(v, "rvs") for v in dist.values()) + for dist in self.param_distributions + ) + + def __iter__(self): + rng = check_random_state(self.random_state) + + # if all distributions are given as lists, we want to sample without + # replacement + if self._is_all_lists(): + # look up sampled parameter settings in parameter grid + param_grid = ParameterGrid(self.param_distributions) + grid_size = len(param_grid) + n_iter = self.n_iter + + if grid_size < n_iter: + warnings.warn( + "The total space of parameters %d is smaller " + "than n_iter=%d. Running %d iterations. For exhaustive " + "searches, use GridSearchCV." % (grid_size, self.n_iter, grid_size), + UserWarning, + ) + n_iter = grid_size + for i in sample_without_replacement(grid_size, n_iter, random_state=rng): + yield param_grid[i] + + else: + for _ in range(self.n_iter): + dist = rng.choice(self.param_distributions) + # Always sort the keys of a dictionary, for reproducibility + items = sorted(dist.items()) + params = dict() + for k, v in items: + if hasattr(v, "rvs"): + params[k] = v.rvs(random_state=rng) + else: + params[k] = v[rng.randint(len(v))] + yield params + + def __len__(self): + """Number of points that will be sampled.""" + if self._is_all_lists(): + grid_size = len(ParameterGrid(self.param_distributions)) + return min(self.n_iter, grid_size) + else: + return self.n_iter + + +def _check_refit(search_cv, attr): + if not search_cv.refit: + raise AttributeError( + f"This {type(search_cv).__name__} instance was initialized with " + f"`refit=False`. {attr} is available only after refitting on the best " + "parameters. You can refit an estimator manually using the " + "`best_params_` attribute" + ) + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + Calling a prediction method will only be available if `refit=True`. In + such case, we check first the fitted best estimator. If it is not + fitted, we check the unfitted estimator. + + Checking the unfitted estimator allows to use `hasattr` on the `SearchCV` + instance even before calling `fit`. + """ + + def check(self): + _check_refit(self, attr) + if hasattr(self, "best_estimator_"): + # raise an AttributeError if `attr` does not exist + getattr(self.best_estimator_, attr) + return True + # raise an AttributeError if `attr` does not exist + getattr(self.estimator, attr) + return True + + return check + + +class BaseSearchCV(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): + """Abstract base class for hyper parameter search with cross-validation.""" + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "n_jobs": [numbers.Integral, None], + "refit": ["boolean", str, callable], + "cv": ["cv_object"], + "verbose": ["verbose"], + "pre_dispatch": [numbers.Integral, str], + "error_score": [StrOptions({"raise"}), numbers.Real], + "return_train_score": ["boolean"], + } + + @abstractmethod + def __init__( + self, + estimator, + *, + scoring=None, + n_jobs=None, + refit=True, + cv=None, + verbose=0, + pre_dispatch="2*n_jobs", + error_score=np.nan, + return_train_score=True, + ): + self.scoring = scoring + self.estimator = estimator + self.n_jobs = n_jobs + self.refit = refit + self.cv = cv + self.verbose = verbose + self.pre_dispatch = pre_dispatch + self.error_score = error_score + self.return_train_score = return_train_score + + @property + def _estimator_type(self): + return self.estimator._estimator_type + + def _more_tags(self): + # allows cross-validation to see 'precomputed' metrics + return { + "pairwise": _safe_tags(self.estimator, "pairwise"), + "_xfail_checks": { + "check_supervised_y_2d": "DataConversionWarning not caught" + }, + } + + def score(self, X, y=None, **params): + """Return the score on the given data, if the estimator has been refit. + + This uses the score defined by ``scoring`` where provided, and the + ``best_estimator_.score`` method otherwise. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples, n_output) \ + or (n_samples,), default=None + Target relative to X for classification or regression; + None for unsupervised learning. + + **params : dict + Parameters to be passed to the underlying scorer(s). + + ..versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + score : float + The score defined by ``scoring`` if provided, and the + ``best_estimator_.score`` method otherwise. + """ + _check_refit(self, "score") + check_is_fitted(self) + + _raise_for_params(params, self, "score") + + if _routing_enabled(): + score_params = process_routing(self, "score", **params).scorer["score"] + else: + score_params = dict() + + if self.scorer_ is None: + raise ValueError( + "No score function explicitly defined, " + "and the estimator doesn't provide one %s" + % self.best_estimator_ + ) + if isinstance(self.scorer_, dict): + if self.multimetric_: + scorer = self.scorer_[self.refit] + else: + scorer = self.scorer_ + return scorer(self.best_estimator_, X, y, **score_params) + + # callable + score = self.scorer_(self.best_estimator_, X, y, **score_params) + if self.multimetric_: + score = score[self.refit] + return score + + @available_if(_estimator_has("score_samples")) + def score_samples(self, X): + """Call score_samples on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``score_samples``. + + .. versionadded:: 0.24 + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements + of the underlying estimator. + + Returns + ------- + y_score : ndarray of shape (n_samples,) + The ``best_estimator_.score_samples`` method. + """ + check_is_fitted(self) + return self.best_estimator_.score_samples(X) + + @available_if(_estimator_has("predict")) + def predict(self, X): + """Call predict on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``predict``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + The predicted labels or values for `X` based on the estimator with + the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.predict(X) + + @available_if(_estimator_has("predict_proba")) + def predict_proba(self, X): + """Call predict_proba on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``predict_proba``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes) + Predicted class probabilities for `X` based on the estimator with + the best found parameters. The order of the classes corresponds + to that in the fitted attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.best_estimator_.predict_proba(X) + + @available_if(_estimator_has("predict_log_proba")) + def predict_log_proba(self, X): + """Call predict_log_proba on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``predict_log_proba``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes) + Predicted class log-probabilities for `X` based on the estimator + with the best found parameters. The order of the classes + corresponds to that in the fitted attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.best_estimator_.predict_log_proba(X) + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Call decision_function on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``decision_function``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_score : ndarray of shape (n_samples,) or (n_samples, n_classes) \ + or (n_samples, n_classes * (n_classes-1) / 2) + Result of the decision function for `X` based on the estimator with + the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.decision_function(X) + + @available_if(_estimator_has("transform")) + def transform(self, X): + """Call transform on the estimator with the best found parameters. + + Only available if the underlying estimator supports ``transform`` and + ``refit=True``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) + `X` transformed in the new space based on the estimator with + the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.transform(X) + + @available_if(_estimator_has("inverse_transform")) + def inverse_transform(self, Xt): + """Call inverse_transform on the estimator with the best found params. + + Only available if the underlying estimator implements + ``inverse_transform`` and ``refit=True``. + + Parameters + ---------- + Xt : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Result of the `inverse_transform` function for `Xt` based on the + estimator with the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.inverse_transform(Xt) + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`. + + Only available when `refit=True`. + """ + # For consistency with other estimators we raise a AttributeError so + # that hasattr() fails if the search estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.best_estimator_.n_features_in_ + + @property + def classes_(self): + """Class labels. + + Only available when `refit=True` and the estimator is a classifier. + """ + _estimator_has("classes_")(self) + return self.best_estimator_.classes_ + + def _run_search(self, evaluate_candidates): + """Repeatedly calls `evaluate_candidates` to conduct a search. + + This method, implemented in sub-classes, makes it possible to + customize the scheduling of evaluations: GridSearchCV and + RandomizedSearchCV schedule evaluations for their whole parameter + search space at once but other more sequential approaches are also + possible: for instance is possible to iteratively schedule evaluations + for new regions of the parameter search space based on previously + collected evaluation results. This makes it possible to implement + Bayesian optimization or more generally sequential model-based + optimization by deriving from the BaseSearchCV abstract base class. + For example, Successive Halving is implemented by calling + `evaluate_candidates` multiples times (once per iteration of the SH + process), each time passing a different set of candidates with `X` + and `y` of increasing sizes. + + Parameters + ---------- + evaluate_candidates : callable + This callback accepts: + - a list of candidates, where each candidate is a dict of + parameter settings. + - an optional `cv` parameter which can be used to e.g. + evaluate candidates on different dataset splits, or + evaluate candidates on subsampled data (as done in the + SucessiveHaling estimators). By default, the original `cv` + parameter is used, and it is available as a private + `_checked_cv_orig` attribute. + - an optional `more_results` dict. Each key will be added to + the `cv_results_` attribute. Values should be lists of + length `n_candidates` + + It returns a dict of all results so far, formatted like + ``cv_results_``. + + Important note (relevant whether the default cv is used or not): + in randomized splitters, and unless the random_state parameter of + cv was set to an int, calling cv.split() multiple times will + yield different splits. Since cv.split() is called in + evaluate_candidates, this means that candidates will be evaluated + on different splits each time evaluate_candidates is called. This + might be a methodological issue depending on the search strategy + that you're implementing. To prevent randomized splitters from + being used, you may use _split._yields_constant_splits() + + Examples + -------- + + :: + + def _run_search(self, evaluate_candidates): + 'Try C=0.1 only if C=1 is better than C=10' + all_results = evaluate_candidates([{'C': 1}, {'C': 10}]) + score = all_results['mean_test_score'] + if score[0] < score[1]: + evaluate_candidates([{'C': 0.1}]) + """ + raise NotImplementedError("_run_search not implemented.") + + def _check_refit_for_multimetric(self, scores): + """Check `refit` is compatible with `scores` is valid""" + multimetric_refit_msg = ( + "For multi-metric scoring, the parameter refit must be set to a " + "scorer key or a callable to refit an estimator with the best " + "parameter setting on the whole data and make the best_* " + "attributes available for that metric. If this is not needed, " + f"refit should be set to False explicitly. {self.refit!r} was " + "passed." + ) + + valid_refit_dict = isinstance(self.refit, str) and self.refit in scores + + if ( + self.refit is not False + and not valid_refit_dict + and not callable(self.refit) + ): + raise ValueError(multimetric_refit_msg) + + @staticmethod + def _select_best_index(refit, refit_metric, results): + """Select index of the best combination of hyperparemeters.""" + if callable(refit): + # If callable, refit is expected to return the index of the best + # parameter set. + best_index = refit(results) + if not isinstance(best_index, numbers.Integral): + raise TypeError("best_index_ returned is not an integer") + if best_index < 0 or best_index >= len(results["params"]): + raise IndexError("best_index_ index out of range") + else: + best_index = results[f"rank_test_{refit_metric}"].argmin() + return best_index + + def _get_scorers(self, convert_multimetric): + """Get the scorer(s) to be used. + + This is used in ``fit`` and ``get_metadata_routing``. + + Parameters + ---------- + convert_multimetric : bool + Whether to convert a dict of scorers to a _MultimetricScorer. This + is used in ``get_metadata_routing`` to include the routing info for + multiple scorers. + + Returns + ------- + scorers, refit_metric + """ + refit_metric = "score" + + if callable(self.scoring): + scorers = self.scoring + elif self.scoring is None or isinstance(self.scoring, str): + scorers = check_scoring(self.estimator, self.scoring) + else: + scorers = _check_multimetric_scoring(self.estimator, self.scoring) + self._check_refit_for_multimetric(scorers) + refit_metric = self.refit + if convert_multimetric and isinstance(scorers, dict): + scorers = _MultimetricScorer( + scorers=scorers, raise_exc=(self.error_score == "raise") + ) + + return scorers, refit_metric + + def _get_routed_params_for_fit(self, params): + """Get the parameters to be used for routing. + + This is a method instead of a snippet in ``fit`` since it's used twice, + here in ``fit``, and in ``HalvingRandomSearchCV.fit``. + """ + if _routing_enabled(): + routed_params = process_routing(self, "fit", **params) + else: + params = params.copy() + groups = params.pop("groups", None) + routed_params = Bunch( + estimator=Bunch(fit=params), + splitter=Bunch(split={"groups": groups}), + scorer=Bunch(score={}), + ) + return routed_params + + @_fit_context( + # *SearchCV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **params): + """Run fit with all sets of parameters. + + Parameters + ---------- + + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples, n_output) \ + or (n_samples,), default=None + Target relative to X for classification or regression; + None for unsupervised learning. + + **params : dict of str -> object + Parameters passed to the ``fit`` method of the estimator, the scorer, + and the CV splitter. + + If a fit parameter is an array-like whose length is equal to + `num_samples` then it will be split across CV groups along with `X` + and `y`. For example, the :term:`sample_weight` parameter is split + because `len(sample_weights) = len(X)`. + + Returns + ------- + self : object + Instance of fitted estimator. + """ + estimator = self.estimator + # Here we keep a dict of scorers as is, and only convert to a + # _MultimetricScorer at a later stage. Issue: + # https://github.com/scikit-learn/scikit-learn/issues/27001 + scorers, refit_metric = self._get_scorers(convert_multimetric=False) + + X, y = indexable(X, y) + params = _check_method_params(X, params=params) + + routed_params = self._get_routed_params_for_fit(params) + + cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator)) + n_splits = cv_orig.get_n_splits(X, y, **routed_params.splitter.split) + + base_estimator = clone(self.estimator) + + parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch) + + fit_and_score_kwargs = dict( + scorer=scorers, + fit_params=routed_params.estimator.fit, + score_params=routed_params.scorer.score, + return_train_score=self.return_train_score, + return_n_test_samples=True, + return_times=True, + return_parameters=False, + error_score=self.error_score, + verbose=self.verbose, + ) + results = {} + with parallel: + all_candidate_params = [] + all_out = [] + all_more_results = defaultdict(list) + + def evaluate_candidates(candidate_params, cv=None, more_results=None): + cv = cv or cv_orig + candidate_params = list(candidate_params) + n_candidates = len(candidate_params) + + if self.verbose > 0: + print( + "Fitting {0} folds for each of {1} candidates," + " totalling {2} fits".format( + n_splits, n_candidates, n_candidates * n_splits + ) + ) + + out = parallel( + delayed(_fit_and_score)( + clone(base_estimator), + X, + y, + train=train, + test=test, + parameters=parameters, + split_progress=(split_idx, n_splits), + candidate_progress=(cand_idx, n_candidates), + **fit_and_score_kwargs, + ) + for (cand_idx, parameters), (split_idx, (train, test)) in product( + enumerate(candidate_params), + enumerate(cv.split(X, y, **routed_params.splitter.split)), + ) + ) + + if len(out) < 1: + raise ValueError( + "No fits were performed. " + "Was the CV iterator empty? " + "Were there no candidates?" + ) + elif len(out) != n_candidates * n_splits: + raise ValueError( + "cv.split and cv.get_n_splits returned " + "inconsistent results. Expected {} " + "splits, got {}".format(n_splits, len(out) // n_candidates) + ) + + _warn_or_raise_about_fit_failures(out, self.error_score) + + # For callable self.scoring, the return type is only know after + # calling. If the return type is a dictionary, the error scores + # can now be inserted with the correct key. The type checking + # of out will be done in `_insert_error_scores`. + if callable(self.scoring): + _insert_error_scores(out, self.error_score) + + all_candidate_params.extend(candidate_params) + all_out.extend(out) + + if more_results is not None: + for key, value in more_results.items(): + all_more_results[key].extend(value) + + nonlocal results + results = self._format_results( + all_candidate_params, n_splits, all_out, all_more_results + ) + + return results + + self._run_search(evaluate_candidates) + + # multimetric is determined here because in the case of a callable + # self.scoring the return type is only known after calling + first_test_score = all_out[0]["test_scores"] + self.multimetric_ = isinstance(first_test_score, dict) + + # check refit_metric now for a callabe scorer that is multimetric + if callable(self.scoring) and self.multimetric_: + self._check_refit_for_multimetric(first_test_score) + refit_metric = self.refit + + # For multi-metric evaluation, store the best_index_, best_params_ and + # best_score_ iff refit is one of the scorer names + # In single metric evaluation, refit_metric is "score" + if self.refit or not self.multimetric_: + self.best_index_ = self._select_best_index( + self.refit, refit_metric, results + ) + if not callable(self.refit): + # With a non-custom callable, we can select the best score + # based on the best index + self.best_score_ = results[f"mean_test_{refit_metric}"][ + self.best_index_ + ] + self.best_params_ = results["params"][self.best_index_] + + if self.refit: + # here we clone the estimator as well as the parameters, since + # sometimes the parameters themselves might be estimators, e.g. + # when we search over different estimators in a pipeline. + # ref: https://github.com/scikit-learn/scikit-learn/pull/26786 + self.best_estimator_ = clone(base_estimator).set_params( + **clone(self.best_params_, safe=False) + ) + + refit_start_time = time.time() + if y is not None: + self.best_estimator_.fit(X, y, **routed_params.estimator.fit) + else: + self.best_estimator_.fit(X, **routed_params.estimator.fit) + refit_end_time = time.time() + self.refit_time_ = refit_end_time - refit_start_time + + if hasattr(self.best_estimator_, "feature_names_in_"): + self.feature_names_in_ = self.best_estimator_.feature_names_in_ + + # Store the only scorer not as a dict for single metric evaluation + self.scorer_ = scorers + + self.cv_results_ = results + self.n_splits_ = n_splits + + return self + + def _format_results(self, candidate_params, n_splits, out, more_results=None): + n_candidates = len(candidate_params) + out = _aggregate_score_dicts(out) + + results = dict(more_results or {}) + for key, val in results.items(): + # each value is a list (as per evaluate_candidate's convention) + # we convert it to an array for consistency with the other keys + results[key] = np.asarray(val) + + def _store(key_name, array, weights=None, splits=False, rank=False): + """A small helper to store the scores/times to the cv_results_""" + # When iterated first by splits, then by parameters + # We want `array` to have `n_candidates` rows and `n_splits` cols. + array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits) + if splits: + for split_idx in range(n_splits): + # Uses closure to alter the results + results["split%d_%s" % (split_idx, key_name)] = array[:, split_idx] + + array_means = np.average(array, axis=1, weights=weights) + results["mean_%s" % key_name] = array_means + + if key_name.startswith(("train_", "test_")) and np.any( + ~np.isfinite(array_means) + ): + warnings.warn( + ( + f"One or more of the {key_name.split('_')[0]} scores " + f"are non-finite: {array_means}" + ), + category=UserWarning, + ) + + # Weighted std is not directly available in numpy + array_stds = np.sqrt( + np.average( + (array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights + ) + ) + results["std_%s" % key_name] = array_stds + + if rank: + # When the fit/scoring fails `array_means` contains NaNs, we + # will exclude them from the ranking process and consider them + # as tied with the worst performers. + if np.isnan(array_means).all(): + # All fit/scoring routines failed. + rank_result = np.ones_like(array_means, dtype=np.int32) + else: + min_array_means = np.nanmin(array_means) - 1 + array_means = np.nan_to_num(array_means, nan=min_array_means) + rank_result = rankdata(-array_means, method="min").astype( + np.int32, copy=False + ) + results["rank_%s" % key_name] = rank_result + + _store("fit_time", out["fit_time"]) + _store("score_time", out["score_time"]) + # Use one MaskedArray and mask all the places where the param is not + # applicable for that candidate. Use defaultdict as each candidate may + # not contain all the params + param_results = defaultdict( + partial( + MaskedArray, + np.empty( + n_candidates, + ), + mask=True, + dtype=object, + ) + ) + for cand_idx, params in enumerate(candidate_params): + for name, value in params.items(): + # An all masked empty array gets created for the key + # `"param_%s" % name` at the first occurrence of `name`. + # Setting the value at an index also unmasks that index + param_results["param_%s" % name][cand_idx] = value + + results.update(param_results) + # Store a list of param dicts at the key 'params' + results["params"] = candidate_params + + test_scores_dict = _normalize_score_results(out["test_scores"]) + if self.return_train_score: + train_scores_dict = _normalize_score_results(out["train_scores"]) + + for scorer_name in test_scores_dict: + # Computed the (weighted) mean and std for test scores alone + _store( + "test_%s" % scorer_name, + test_scores_dict[scorer_name], + splits=True, + rank=True, + weights=None, + ) + if self.return_train_score: + _store( + "train_%s" % scorer_name, + train_scores_dict[scorer_name], + splits=True, + ) + + return results + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__) + router.add( + estimator=self.estimator, + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + + scorer, _ = self._get_scorers(convert_multimetric=True) + router.add( + scorer=scorer, + method_mapping=MethodMapping() + .add(caller="score", callee="score") + .add(caller="fit", callee="score"), + ) + router.add( + splitter=self.cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + return router + + +class GridSearchCV(BaseSearchCV): + """Exhaustive search over specified parameter values for an estimator. + + Important members are fit, predict. + + GridSearchCV implements a "fit" and a "score" method. + It also implements "score_samples", "predict", "predict_proba", + "decision_function", "transform" and "inverse_transform" if they are + implemented in the estimator used. + + The parameters of the estimator used to apply these methods are optimized + by cross-validated grid-search over a parameter grid. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_grid : dict or list of dictionaries + Dictionary with parameters names (`str`) as keys and lists of + parameter settings to try as values, or a list of such + dictionaries, in which case the grids spanned by each dictionary + in the list are explored. This enables searching over any sequence + of parameter settings. + + scoring : str, callable, list, tuple or dict, default=None + Strategy to evaluate the performance of the cross-validated model on + the test set. + + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + refit : bool, str, or callable, default=True + Refit an estimator using the best found parameters on the whole + dataset. + + For multiple metric evaluation, this needs to be a `str` denoting the + scorer that would be used to find the best parameters for refitting + the estimator at the end. + + Where there are considerations other than maximum score in + choosing a best estimator, ``refit`` can be set to a function which + returns the selected ``best_index_`` given ``cv_results_``. In that + case, the ``best_estimator_`` and ``best_params_`` will be set + according to the returned ``best_index_`` while the ``best_score_`` + attribute will not be available. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``GridSearchCV`` instance. + + Also for multiple metric evaluation, the attributes ``best_index_``, + ``best_score_`` and ``best_params_`` will only be available if + ``refit`` is set and all of them will be determined w.r.t this specific + scorer. + + See ``scoring`` parameter to know more about multiple metric + evaluation. + + See :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_digits.py` + to see how to design a custom selection strategy using a callable + via `refit`. + + .. versionchanged:: 0.20 + Support for callable added. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : int + Controls the verbosity: the higher, the more messages. + + - >1 : the computation time for each fold and parameter candidate is + displayed; + - >2 : the score is also displayed; + - >3 : the fold and candidate parameter indexes are also displayed + together with the starting time of the computation. + + pre_dispatch : int, or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + .. versionadded:: 0.19 + + .. versionchanged:: 0.21 + Default value was changed from ``True`` to ``False`` + + Attributes + ---------- + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. + + For instance the below given table + + +------------+-----------+------------+-----------------+---+---------+ + |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...| + +============+===========+============+=================+===+=========+ + | 'poly' | -- | 2 | 0.80 |...| 2 | + +------------+-----------+------------+-----------------+---+---------+ + | 'poly' | -- | 3 | 0.70 |...| 4 | + +------------+-----------+------------+-----------------+---+---------+ + | 'rbf' | 0.1 | -- | 0.80 |...| 3 | + +------------+-----------+------------+-----------------+---+---------+ + | 'rbf' | 0.2 | -- | 0.93 |...| 1 | + +------------+-----------+------------+-----------------+---+---------+ + + will be represented by a ``cv_results_`` dict of:: + + { + 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'], + mask = [False False False False]...) + 'param_gamma': masked_array(data = [-- -- 0.1 0.2], + mask = [ True True False False]...), + 'param_degree': masked_array(data = [2.0 3.0 -- --], + mask = [False False True True]...), + 'split0_test_score' : [0.80, 0.70, 0.80, 0.93], + 'split1_test_score' : [0.82, 0.50, 0.70, 0.78], + 'mean_test_score' : [0.81, 0.60, 0.75, 0.85], + 'std_test_score' : [0.01, 0.10, 0.05, 0.08], + 'rank_test_score' : [2, 4, 3, 1], + 'split0_train_score' : [0.80, 0.92, 0.70, 0.93], + 'split1_train_score' : [0.82, 0.55, 0.70, 0.87], + 'mean_train_score' : [0.81, 0.74, 0.70, 0.90], + 'std_train_score' : [0.01, 0.19, 0.00, 0.03], + 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], + 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], + 'mean_score_time' : [0.01, 0.06, 0.04, 0.04], + 'std_score_time' : [0.00, 0.00, 0.00, 0.01], + 'params' : [{'kernel': 'poly', 'degree': 2}, ...], + } + + NOTE + + The key ``'params'`` is used to store a list of parameter + settings dicts for all the parameter candidates. + + The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and + ``std_score_time`` are all in seconds. + + For multi-metric evaluation, the scores for all the scorers are + available in the ``cv_results_`` dict at the keys ending with that + scorer's name (``'_'``) instead of ``'_score'`` shown + above. ('split0_test_precision', 'mean_train_precision' etc.) + + best_estimator_ : estimator + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + See ``refit`` parameter for more information on allowed values. + + best_score_ : float + Mean cross-validated score of the best_estimator + + For multi-metric evaluation, this is present only if ``refit`` is + specified. + + This attribute is not available if ``refit`` is a function. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + For multi-metric evaluation, this is present only if ``refit`` is + specified. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + For multi-metric evaluation, this is present only if ``refit`` is + specified. + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + For multi-metric evaluation, this attribute holds the validated + ``scoring`` dict which maps the scorer key to the scorer callable. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + .. versionadded:: 0.20 + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + ParameterGrid : Generates all the combinations of a hyperparameter grid. + train_test_split : Utility function to split the data into a development + set usable for fitting a GridSearchCV instance and an evaluation set + for its final evaluation. + sklearn.metrics.make_scorer : Make a scorer from a performance metric or + loss function. + + Notes + ----- + The parameters selected are those that maximize the score of the left out + data, unless an explicit score is passed in which case it is used instead. + + If `n_jobs` was set to a value higher than one, the data is copied for each + point in the grid (and not `n_jobs` times). This is done for efficiency + reasons if individual jobs take very little time, but may raise errors if + the dataset is large and not enough memory is available. A workaround in + this case is to set `pre_dispatch`. Then, the memory is copied only + `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * + n_jobs`. + + Examples + -------- + >>> from sklearn import svm, datasets + >>> from sklearn.model_selection import GridSearchCV + >>> iris = datasets.load_iris() + >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} + >>> svc = svm.SVC() + >>> clf = GridSearchCV(svc, parameters) + >>> clf.fit(iris.data, iris.target) + GridSearchCV(estimator=SVC(), + param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')}) + >>> sorted(clf.cv_results_.keys()) + ['mean_fit_time', 'mean_score_time', 'mean_test_score',... + 'param_C', 'param_kernel', 'params',... + 'rank_test_score', 'split0_test_score',... + 'split2_test_score', ... + 'std_fit_time', 'std_score_time', 'std_test_score'] + """ + + _required_parameters = ["estimator", "param_grid"] + + _parameter_constraints: dict = { + **BaseSearchCV._parameter_constraints, + "param_grid": [dict, list], + } + + def __init__( + self, + estimator, + param_grid, + *, + scoring=None, + n_jobs=None, + refit=True, + cv=None, + verbose=0, + pre_dispatch="2*n_jobs", + error_score=np.nan, + return_train_score=False, + ): + super().__init__( + estimator=estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + cv=cv, + verbose=verbose, + pre_dispatch=pre_dispatch, + error_score=error_score, + return_train_score=return_train_score, + ) + self.param_grid = param_grid + + def _run_search(self, evaluate_candidates): + """Search all candidates in param_grid""" + evaluate_candidates(ParameterGrid(self.param_grid)) + + +class RandomizedSearchCV(BaseSearchCV): + """Randomized search on hyper parameters. + + RandomizedSearchCV implements a "fit" and a "score" method. + It also implements "score_samples", "predict", "predict_proba", + "decision_function", "transform" and "inverse_transform" if they are + implemented in the estimator used. + + The parameters of the estimator used to apply these methods are optimized + by cross-validated search over parameter settings. + + In contrast to GridSearchCV, not all parameter values are tried out, but + rather a fixed number of parameter settings is sampled from the specified + distributions. The number of parameter settings that are tried is + given by n_iter. + + If all parameters are presented as a list, + sampling without replacement is performed. If at least one parameter + is given as a distribution, sampling with replacement is used. + It is highly recommended to use continuous distributions for continuous + parameters. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.14 + + Parameters + ---------- + estimator : estimator object + An object of that type is instantiated for each grid point. + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_distributions : dict or list of dicts + Dictionary with parameters names (`str`) as keys and distributions + or lists of parameters to try. Distributions must provide a ``rvs`` + method for sampling (such as those from scipy.stats.distributions). + If a list is given, it is sampled uniformly. + If a list of dicts is given, first a dict is sampled uniformly, and + then a parameter is sampled using that dict as above. + + n_iter : int, default=10 + Number of parameter settings that are sampled. n_iter trades + off runtime vs quality of the solution. + + scoring : str, callable, list, tuple or dict, default=None + Strategy to evaluate the performance of the cross-validated model on + the test set. + + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + If None, the estimator's score method is used. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + refit : bool, str, or callable, default=True + Refit an estimator using the best found parameters on the whole + dataset. + + For multiple metric evaluation, this needs to be a `str` denoting the + scorer that would be used to find the best parameters for refitting + the estimator at the end. + + Where there are considerations other than maximum score in + choosing a best estimator, ``refit`` can be set to a function which + returns the selected ``best_index_`` given the ``cv_results``. In that + case, the ``best_estimator_`` and ``best_params_`` will be set + according to the returned ``best_index_`` while the ``best_score_`` + attribute will not be available. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``RandomizedSearchCV`` instance. + + Also for multiple metric evaluation, the attributes ``best_index_``, + ``best_score_`` and ``best_params_`` will only be available if + ``refit`` is set and all of them will be determined w.r.t this specific + scorer. + + See ``scoring`` parameter to know more about multiple metric + evaluation. + + .. versionchanged:: 0.20 + Support for callable added. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : int + Controls the verbosity: the higher, the more messages. + + - >1 : the computation time for each fold and parameter candidate is + displayed; + - >2 : the score is also displayed; + - >3 : the fold and candidate parameter indexes are also displayed + together with the starting time of the computation. + + pre_dispatch : int, or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for random uniform sampling + from lists of possible values instead of scipy.stats distributions. + Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + .. versionadded:: 0.19 + + .. versionchanged:: 0.21 + Default value was changed from ``True`` to ``False`` + + Attributes + ---------- + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. + + For instance the below given table + + +--------------+-------------+-------------------+---+---------------+ + | param_kernel | param_gamma | split0_test_score |...|rank_test_score| + +==============+=============+===================+===+===============+ + | 'rbf' | 0.1 | 0.80 |...| 1 | + +--------------+-------------+-------------------+---+---------------+ + | 'rbf' | 0.2 | 0.84 |...| 3 | + +--------------+-------------+-------------------+---+---------------+ + | 'rbf' | 0.3 | 0.70 |...| 2 | + +--------------+-------------+-------------------+---+---------------+ + + will be represented by a ``cv_results_`` dict of:: + + { + 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'], + mask = False), + 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), + 'split0_test_score' : [0.80, 0.84, 0.70], + 'split1_test_score' : [0.82, 0.50, 0.70], + 'mean_test_score' : [0.81, 0.67, 0.70], + 'std_test_score' : [0.01, 0.24, 0.00], + 'rank_test_score' : [1, 3, 2], + 'split0_train_score' : [0.80, 0.92, 0.70], + 'split1_train_score' : [0.82, 0.55, 0.70], + 'mean_train_score' : [0.81, 0.74, 0.70], + 'std_train_score' : [0.01, 0.19, 0.00], + 'mean_fit_time' : [0.73, 0.63, 0.43], + 'std_fit_time' : [0.01, 0.02, 0.01], + 'mean_score_time' : [0.01, 0.06, 0.04], + 'std_score_time' : [0.00, 0.00, 0.00], + 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], + } + + NOTE + + The key ``'params'`` is used to store a list of parameter + settings dicts for all the parameter candidates. + + The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and + ``std_score_time`` are all in seconds. + + For multi-metric evaluation, the scores for all the scorers are + available in the ``cv_results_`` dict at the keys ending with that + scorer's name (``'_'``) instead of ``'_score'`` shown + above. ('split0_test_precision', 'mean_train_precision' etc.) + + best_estimator_ : estimator + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + For multi-metric evaluation, this attribute is present only if + ``refit`` is specified. + + See ``refit`` parameter for more information on allowed values. + + best_score_ : float + Mean cross-validated score of the best_estimator. + + For multi-metric evaluation, this is not available if ``refit`` is + ``False``. See ``refit`` parameter for more information. + + This attribute is not available if ``refit`` is a function. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + For multi-metric evaluation, this is not available if ``refit`` is + ``False``. See ``refit`` parameter for more information. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + For multi-metric evaluation, this is not available if ``refit`` is + ``False``. See ``refit`` parameter for more information. + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + For multi-metric evaluation, this attribute holds the validated + ``scoring`` dict which maps the scorer key to the scorer callable. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + .. versionadded:: 0.20 + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + GridSearchCV : Does exhaustive search over a grid of parameters. + ParameterSampler : A generator over parameter settings, constructed from + param_distributions. + + Notes + ----- + The parameters selected are those that maximize the score of the held-out + data, according to the scoring parameter. + + If `n_jobs` was set to a value higher than one, the data is copied for each + parameter setting(and not `n_jobs` times). This is done for efficiency + reasons if individual jobs take very little time, but may raise errors if + the dataset is large and not enough memory is available. A workaround in + this case is to set `pre_dispatch`. Then, the memory is copied only + `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * + n_jobs`. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.model_selection import RandomizedSearchCV + >>> from scipy.stats import uniform + >>> iris = load_iris() + >>> logistic = LogisticRegression(solver='saga', tol=1e-2, max_iter=200, + ... random_state=0) + >>> distributions = dict(C=uniform(loc=0, scale=4), + ... penalty=['l2', 'l1']) + >>> clf = RandomizedSearchCV(logistic, distributions, random_state=0) + >>> search = clf.fit(iris.data, iris.target) + >>> search.best_params_ + {'C': 2..., 'penalty': 'l1'} + """ + + _required_parameters = ["estimator", "param_distributions"] + + _parameter_constraints: dict = { + **BaseSearchCV._parameter_constraints, + "param_distributions": [dict, list], + "n_iter": [Interval(numbers.Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + def __init__( + self, + estimator, + param_distributions, + *, + n_iter=10, + scoring=None, + n_jobs=None, + refit=True, + cv=None, + verbose=0, + pre_dispatch="2*n_jobs", + random_state=None, + error_score=np.nan, + return_train_score=False, + ): + self.param_distributions = param_distributions + self.n_iter = n_iter + self.random_state = random_state + super().__init__( + estimator=estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + cv=cv, + verbose=verbose, + pre_dispatch=pre_dispatch, + error_score=error_score, + return_train_score=return_train_score, + ) + + def _run_search(self, evaluate_candidates): + """Search n_iter candidates from param_distributions""" + evaluate_candidates( + ParameterSampler( + self.param_distributions, self.n_iter, random_state=self.random_state + ) + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_search_successive_halving.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_search_successive_halving.py new file mode 100644 index 0000000000000000000000000000000000000000..b1cf5ee50965cee9623516d002522ff0d2a502eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_search_successive_halving.py @@ -0,0 +1,1079 @@ +from abc import abstractmethod +from copy import deepcopy +from math import ceil, floor, log +from numbers import Integral, Real + +import numpy as np + +from ..base import _fit_context, is_classifier +from ..metrics._scorer import get_scorer_names +from ..utils import resample +from ..utils._param_validation import Interval, StrOptions +from ..utils.multiclass import check_classification_targets +from ..utils.validation import _num_samples +from . import ParameterGrid, ParameterSampler +from ._search import BaseSearchCV +from ._split import _yields_constant_splits, check_cv + +__all__ = ["HalvingGridSearchCV", "HalvingRandomSearchCV"] + + +class _SubsampleMetaSplitter: + """Splitter that subsamples a given fraction of the dataset""" + + def __init__(self, *, base_cv, fraction, subsample_test, random_state): + self.base_cv = base_cv + self.fraction = fraction + self.subsample_test = subsample_test + self.random_state = random_state + + def split(self, X, y, **kwargs): + for train_idx, test_idx in self.base_cv.split(X, y, **kwargs): + train_idx = resample( + train_idx, + replace=False, + random_state=self.random_state, + n_samples=int(self.fraction * len(train_idx)), + ) + if self.subsample_test: + test_idx = resample( + test_idx, + replace=False, + random_state=self.random_state, + n_samples=int(self.fraction * len(test_idx)), + ) + yield train_idx, test_idx + + +def _top_k(results, k, itr): + # Return the best candidates of a given iteration + iteration, mean_test_score, params = ( + np.asarray(a) + for a in (results["iter"], results["mean_test_score"], results["params"]) + ) + iter_indices = np.flatnonzero(iteration == itr) + scores = mean_test_score[iter_indices] + # argsort() places NaNs at the end of the array so we move NaNs to the + # front of the array so the last `k` items are the those with the + # highest scores. + sorted_indices = np.roll(np.argsort(scores), np.count_nonzero(np.isnan(scores))) + return np.array(params[iter_indices][sorted_indices[-k:]]) + + +class BaseSuccessiveHalving(BaseSearchCV): + """Implements successive halving. + + Ref: + Almost optimal exploration in multi-armed bandits, ICML 13 + Zohar Karnin, Tomer Koren, Oren Somekh + """ + + _parameter_constraints: dict = { + **BaseSearchCV._parameter_constraints, + # overwrite `scoring` since multi-metrics are not supported + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "random_state": ["random_state"], + "max_resources": [ + Interval(Integral, 0, None, closed="neither"), + StrOptions({"auto"}), + ], + "min_resources": [ + Interval(Integral, 0, None, closed="neither"), + StrOptions({"exhaust", "smallest"}), + ], + "resource": [str], + "factor": [Interval(Real, 0, None, closed="neither")], + "aggressive_elimination": ["boolean"], + } + _parameter_constraints.pop("pre_dispatch") # not used in this class + + def __init__( + self, + estimator, + *, + scoring=None, + n_jobs=None, + refit=True, + cv=5, + verbose=0, + random_state=None, + error_score=np.nan, + return_train_score=True, + max_resources="auto", + min_resources="exhaust", + resource="n_samples", + factor=3, + aggressive_elimination=False, + ): + super().__init__( + estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + cv=cv, + verbose=verbose, + error_score=error_score, + return_train_score=return_train_score, + ) + + self.random_state = random_state + self.max_resources = max_resources + self.resource = resource + self.factor = factor + self.min_resources = min_resources + self.aggressive_elimination = aggressive_elimination + + def _check_input_parameters(self, X, y, split_params): + # We need to enforce that successive calls to cv.split() yield the same + # splits: see https://github.com/scikit-learn/scikit-learn/issues/15149 + if not _yields_constant_splits(self._checked_cv_orig): + raise ValueError( + "The cv parameter must yield consistent folds across " + "calls to split(). Set its random_state to an int, or set " + "shuffle=False." + ) + + if ( + self.resource != "n_samples" + and self.resource not in self.estimator.get_params() + ): + raise ValueError( + f"Cannot use resource={self.resource} which is not supported " + f"by estimator {self.estimator.__class__.__name__}" + ) + + if isinstance(self, HalvingRandomSearchCV): + if self.min_resources == self.n_candidates == "exhaust": + # for n_candidates=exhaust to work, we need to know what + # min_resources is. Similarly min_resources=exhaust needs to + # know the actual number of candidates. + raise ValueError( + "n_candidates and min_resources cannot be both set to 'exhaust'." + ) + + self.min_resources_ = self.min_resources + if self.min_resources_ in ("smallest", "exhaust"): + if self.resource == "n_samples": + n_splits = self._checked_cv_orig.get_n_splits(X, y, **split_params) + # please see https://gph.is/1KjihQe for a justification + magic_factor = 2 + self.min_resources_ = n_splits * magic_factor + if is_classifier(self.estimator): + y = self._validate_data(X="no_validation", y=y) + check_classification_targets(y) + n_classes = np.unique(y).shape[0] + self.min_resources_ *= n_classes + else: + self.min_resources_ = 1 + # if 'exhaust', min_resources_ might be set to a higher value later + # in _run_search + + self.max_resources_ = self.max_resources + if self.max_resources_ == "auto": + if not self.resource == "n_samples": + raise ValueError( + "resource can only be 'n_samples' when max_resources='auto'" + ) + self.max_resources_ = _num_samples(X) + + if self.min_resources_ > self.max_resources_: + raise ValueError( + f"min_resources_={self.min_resources_} is greater " + f"than max_resources_={self.max_resources_}." + ) + + if self.min_resources_ == 0: + raise ValueError( + f"min_resources_={self.min_resources_}: you might have passed " + "an empty dataset X." + ) + + @staticmethod + def _select_best_index(refit, refit_metric, results): + """Custom refit callable to return the index of the best candidate. + + We want the best candidate out of the last iteration. By default + BaseSearchCV would return the best candidate out of all iterations. + + Currently, we only support for a single metric thus `refit` and + `refit_metric` are not required. + """ + last_iter = np.max(results["iter"]) + last_iter_indices = np.flatnonzero(results["iter"] == last_iter) + + test_scores = results["mean_test_score"][last_iter_indices] + # If all scores are NaNs there is no way to pick between them, + # so we (arbitrarily) declare the zero'th entry the best one + if np.isnan(test_scores).all(): + best_idx = 0 + else: + best_idx = np.nanargmax(test_scores) + + return last_iter_indices[best_idx] + + @_fit_context( + # Halving*SearchCV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **params): + """Run fit with all sets of parameters. + + Parameters + ---------- + + X : array-like, shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_output), optional + Target relative to X for classification or regression; + None for unsupervised learning. + + **params : dict of string -> object + Parameters passed to the ``fit`` method of the estimator. + + Returns + ------- + self : object + Instance of fitted estimator. + """ + self._checked_cv_orig = check_cv( + self.cv, y, classifier=is_classifier(self.estimator) + ) + + routed_params = self._get_routed_params_for_fit(params) + self._check_input_parameters( + X=X, y=y, split_params=routed_params.splitter.split + ) + + self._n_samples_orig = _num_samples(X) + + super().fit(X, y=y, **params) + + # Set best_score_: BaseSearchCV does not set it, as refit is a callable + self.best_score_ = self.cv_results_["mean_test_score"][self.best_index_] + + return self + + def _run_search(self, evaluate_candidates): + candidate_params = self._generate_candidate_params() + + if self.resource != "n_samples" and any( + self.resource in candidate for candidate in candidate_params + ): + # Can only check this now since we need the candidates list + raise ValueError( + f"Cannot use parameter {self.resource} as the resource since " + "it is part of the searched parameters." + ) + + # n_required_iterations is the number of iterations needed so that the + # last iterations evaluates less than `factor` candidates. + n_required_iterations = 1 + floor(log(len(candidate_params), self.factor)) + + if self.min_resources == "exhaust": + # To exhaust the resources, we want to start with the biggest + # min_resources possible so that the last (required) iteration + # uses as many resources as possible + last_iteration = n_required_iterations - 1 + self.min_resources_ = max( + self.min_resources_, + self.max_resources_ // self.factor**last_iteration, + ) + + # n_possible_iterations is the number of iterations that we can + # actually do starting from min_resources and without exceeding + # max_resources. Depending on max_resources and the number of + # candidates, this may be higher or smaller than + # n_required_iterations. + n_possible_iterations = 1 + floor( + log(self.max_resources_ // self.min_resources_, self.factor) + ) + + if self.aggressive_elimination: + n_iterations = n_required_iterations + else: + n_iterations = min(n_possible_iterations, n_required_iterations) + + if self.verbose: + print(f"n_iterations: {n_iterations}") + print(f"n_required_iterations: {n_required_iterations}") + print(f"n_possible_iterations: {n_possible_iterations}") + print(f"min_resources_: {self.min_resources_}") + print(f"max_resources_: {self.max_resources_}") + print(f"aggressive_elimination: {self.aggressive_elimination}") + print(f"factor: {self.factor}") + + self.n_resources_ = [] + self.n_candidates_ = [] + + for itr in range(n_iterations): + power = itr # default + if self.aggressive_elimination: + # this will set n_resources to the initial value (i.e. the + # value of n_resources at the first iteration) for as many + # iterations as needed (while candidates are being + # eliminated), and then go on as usual. + power = max(0, itr - n_required_iterations + n_possible_iterations) + + n_resources = int(self.factor**power * self.min_resources_) + # guard, probably not needed + n_resources = min(n_resources, self.max_resources_) + self.n_resources_.append(n_resources) + + n_candidates = len(candidate_params) + self.n_candidates_.append(n_candidates) + + if self.verbose: + print("-" * 10) + print(f"iter: {itr}") + print(f"n_candidates: {n_candidates}") + print(f"n_resources: {n_resources}") + + if self.resource == "n_samples": + # subsampling will be done in cv.split() + cv = _SubsampleMetaSplitter( + base_cv=self._checked_cv_orig, + fraction=n_resources / self._n_samples_orig, + subsample_test=True, + random_state=self.random_state, + ) + + else: + # Need copy so that the n_resources of next iteration does + # not overwrite + candidate_params = [c.copy() for c in candidate_params] + for candidate in candidate_params: + candidate[self.resource] = n_resources + cv = self._checked_cv_orig + + more_results = { + "iter": [itr] * n_candidates, + "n_resources": [n_resources] * n_candidates, + } + + results = evaluate_candidates( + candidate_params, cv, more_results=more_results + ) + + n_candidates_to_keep = ceil(n_candidates / self.factor) + candidate_params = _top_k(results, n_candidates_to_keep, itr) + + self.n_remaining_candidates_ = len(candidate_params) + self.n_required_iterations_ = n_required_iterations + self.n_possible_iterations_ = n_possible_iterations + self.n_iterations_ = n_iterations + + @abstractmethod + def _generate_candidate_params(self): + pass + + def _more_tags(self): + tags = deepcopy(super()._more_tags()) + tags["_xfail_checks"].update( + { + "check_fit2d_1sample": ( + "Fail during parameter check since min/max resources requires" + " more samples" + ), + } + ) + return tags + + +class HalvingGridSearchCV(BaseSuccessiveHalving): + """Search over specified parameter values with successive halving. + + The search strategy starts evaluating all the candidates with a small + amount of resources and iteratively selects the best candidates, using + more and more resources. + + Read more in the :ref:`User guide `. + + .. note:: + + This estimator is still **experimental** for now: the predictions + and the API might change without any deprecation cycle. To use it, + you need to explicitly import ``enable_halving_search_cv``:: + + >>> # explicitly require this experimental feature + >>> from sklearn.experimental import enable_halving_search_cv # noqa + >>> # now you can import normally from model_selection + >>> from sklearn.model_selection import HalvingGridSearchCV + + Parameters + ---------- + estimator : estimator object + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_grid : dict or list of dictionaries + Dictionary with parameters names (string) as keys and lists of + parameter settings to try as values, or a list of such + dictionaries, in which case the grids spanned by each dictionary + in the list are explored. This enables searching over any sequence + of parameter settings. + + factor : int or float, default=3 + The 'halving' parameter, which determines the proportion of candidates + that are selected for each subsequent iteration. For example, + ``factor=3`` means that only one third of the candidates are selected. + + resource : ``'n_samples'`` or str, default='n_samples' + Defines the resource that increases with each iteration. By default, + the resource is the number of samples. It can also be set to any + parameter of the base estimator that accepts positive integer + values, e.g. 'n_iterations' or 'n_estimators' for a gradient + boosting estimator. In this case ``max_resources`` cannot be 'auto' + and must be set explicitly. + + max_resources : int, default='auto' + The maximum amount of resource that any candidate is allowed to use + for a given iteration. By default, this is set to ``n_samples`` when + ``resource='n_samples'`` (default), else an error is raised. + + min_resources : {'exhaust', 'smallest'} or int, default='exhaust' + The minimum amount of resource that any candidate is allowed to use + for a given iteration. Equivalently, this defines the amount of + resources `r0` that are allocated for each candidate at the first + iteration. + + - 'smallest' is a heuristic that sets `r0` to a small value: + + - ``n_splits * 2`` when ``resource='n_samples'`` for a regression + problem + - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a + classification problem + - ``1`` when ``resource != 'n_samples'`` + + - 'exhaust' will set `r0` such that the **last** iteration uses as + much resources as possible. Namely, the last iteration will use the + highest value smaller than ``max_resources`` that is a multiple of + both ``min_resources`` and ``factor``. In general, using 'exhaust' + leads to a more accurate estimator, but is slightly more time + consuming. + + Note that the amount of resources used at each iteration is always a + multiple of ``min_resources``. + + aggressive_elimination : bool, default=False + This is only relevant in cases where there isn't enough resources to + reduce the remaining candidates to at most `factor` after the last + iteration. If ``True``, then the search process will 'replay' the + first iteration for as long as needed until the number of candidates + is small enough. This is ``False`` by default, which means that the + last iteration may evaluate more than ``factor`` candidates. See + :ref:`aggressive_elimination` for more details. + + cv : int, cross-validation generator or iterable, default=5 + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. note:: + Due to implementation details, the folds produced by `cv` must be + the same across multiple calls to `cv.split()`. For + built-in `scikit-learn` iterators, this can be achieved by + deactivating shuffling (`shuffle=False`), or by setting the + `cv`'s `random_state` parameter to an integer. + + scoring : str, callable, or None, default=None + A single string (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + If None, the estimator's score method is used. + + refit : bool, default=True + If True, refit an estimator using the best found parameters on the + whole dataset. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``HalvingGridSearchCV`` instance. + + error_score : 'raise' or numeric + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. Default is ``np.nan``. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for subsampling the dataset + when `resources != 'n_samples'`. Ignored otherwise. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_jobs : int or None, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int + Controls the verbosity: the higher, the more messages. + + Attributes + ---------- + n_resources_ : list of int + The amount of resources used at each iteration. + + n_candidates_ : list of int + The number of candidate parameters that were evaluated at each + iteration. + + n_remaining_candidates_ : int + The number of candidate parameters that are left after the last + iteration. It corresponds to `ceil(n_candidates[-1] / factor)` + + max_resources_ : int + The maximum number of resources that any candidate is allowed to use + for a given iteration. Note that since the number of resources used + at each iteration must be a multiple of ``min_resources_``, the + actual number of resources used at the last iteration may be smaller + than ``max_resources_``. + + min_resources_ : int + The amount of resources that are allocated for each candidate at the + first iteration. + + n_iterations_ : int + The actual number of iterations that were run. This is equal to + ``n_required_iterations_`` if ``aggressive_elimination`` is ``True``. + Else, this is equal to ``min(n_possible_iterations_, + n_required_iterations_)``. + + n_possible_iterations_ : int + The number of iterations that are possible starting with + ``min_resources_`` resources and without exceeding + ``max_resources_``. + + n_required_iterations_ : int + The number of iterations that are required to end up with less than + ``factor`` candidates at the last iteration, starting with + ``min_resources_`` resources. This will be smaller than + ``n_possible_iterations_`` when there isn't enough resources. + + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. It contains lots of information + for analysing the results of a search. + Please refer to the :ref:`User guide` + for details. + + best_estimator_ : estimator or dict + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + best_score_ : float + Mean cross-validated score of the best_estimator. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + :class:`HalvingRandomSearchCV`: + Random search over a set of parameters using successive halving. + + Notes + ----- + The parameters selected are those that maximize the score of the held-out + data, according to the scoring parameter. + + All parameter combinations scored with a NaN will share the lowest rank. + + Examples + -------- + + >>> from sklearn.datasets import load_iris + >>> from sklearn.ensemble import RandomForestClassifier + >>> from sklearn.experimental import enable_halving_search_cv # noqa + >>> from sklearn.model_selection import HalvingGridSearchCV + ... + >>> X, y = load_iris(return_X_y=True) + >>> clf = RandomForestClassifier(random_state=0) + ... + >>> param_grid = {"max_depth": [3, None], + ... "min_samples_split": [5, 10]} + >>> search = HalvingGridSearchCV(clf, param_grid, resource='n_estimators', + ... max_resources=10, + ... random_state=0).fit(X, y) + >>> search.best_params_ # doctest: +SKIP + {'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9} + """ + + _required_parameters = ["estimator", "param_grid"] + + _parameter_constraints: dict = { + **BaseSuccessiveHalving._parameter_constraints, + "param_grid": [dict, list], + } + + def __init__( + self, + estimator, + param_grid, + *, + factor=3, + resource="n_samples", + max_resources="auto", + min_resources="exhaust", + aggressive_elimination=False, + cv=5, + scoring=None, + refit=True, + error_score=np.nan, + return_train_score=True, + random_state=None, + n_jobs=None, + verbose=0, + ): + super().__init__( + estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + verbose=verbose, + cv=cv, + random_state=random_state, + error_score=error_score, + return_train_score=return_train_score, + max_resources=max_resources, + resource=resource, + factor=factor, + min_resources=min_resources, + aggressive_elimination=aggressive_elimination, + ) + self.param_grid = param_grid + + def _generate_candidate_params(self): + return ParameterGrid(self.param_grid) + + +class HalvingRandomSearchCV(BaseSuccessiveHalving): + """Randomized search on hyper parameters. + + The search strategy starts evaluating all the candidates with a small + amount of resources and iteratively selects the best candidates, using more + and more resources. + + The candidates are sampled at random from the parameter space and the + number of sampled candidates is determined by ``n_candidates``. + + Read more in the :ref:`User guide`. + + .. note:: + + This estimator is still **experimental** for now: the predictions + and the API might change without any deprecation cycle. To use it, + you need to explicitly import ``enable_halving_search_cv``:: + + >>> # explicitly require this experimental feature + >>> from sklearn.experimental import enable_halving_search_cv # noqa + >>> # now you can import normally from model_selection + >>> from sklearn.model_selection import HalvingRandomSearchCV + + Parameters + ---------- + estimator : estimator object + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_distributions : dict or list of dicts + Dictionary with parameters names (`str`) as keys and distributions + or lists of parameters to try. Distributions must provide a ``rvs`` + method for sampling (such as those from scipy.stats.distributions). + If a list is given, it is sampled uniformly. + If a list of dicts is given, first a dict is sampled uniformly, and + then a parameter is sampled using that dict as above. + + n_candidates : "exhaust" or int, default="exhaust" + The number of candidate parameters to sample, at the first + iteration. Using 'exhaust' will sample enough candidates so that the + last iteration uses as many resources as possible, based on + `min_resources`, `max_resources` and `factor`. In this case, + `min_resources` cannot be 'exhaust'. + + factor : int or float, default=3 + The 'halving' parameter, which determines the proportion of candidates + that are selected for each subsequent iteration. For example, + ``factor=3`` means that only one third of the candidates are selected. + + resource : ``'n_samples'`` or str, default='n_samples' + Defines the resource that increases with each iteration. By default, + the resource is the number of samples. It can also be set to any + parameter of the base estimator that accepts positive integer + values, e.g. 'n_iterations' or 'n_estimators' for a gradient + boosting estimator. In this case ``max_resources`` cannot be 'auto' + and must be set explicitly. + + max_resources : int, default='auto' + The maximum number of resources that any candidate is allowed to use + for a given iteration. By default, this is set ``n_samples`` when + ``resource='n_samples'`` (default), else an error is raised. + + min_resources : {'exhaust', 'smallest'} or int, default='smallest' + The minimum amount of resource that any candidate is allowed to use + for a given iteration. Equivalently, this defines the amount of + resources `r0` that are allocated for each candidate at the first + iteration. + + - 'smallest' is a heuristic that sets `r0` to a small value: + + - ``n_splits * 2`` when ``resource='n_samples'`` for a regression + problem + - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a + classification problem + - ``1`` when ``resource != 'n_samples'`` + + - 'exhaust' will set `r0` such that the **last** iteration uses as + much resources as possible. Namely, the last iteration will use the + highest value smaller than ``max_resources`` that is a multiple of + both ``min_resources`` and ``factor``. In general, using 'exhaust' + leads to a more accurate estimator, but is slightly more time + consuming. 'exhaust' isn't available when `n_candidates='exhaust'`. + + Note that the amount of resources used at each iteration is always a + multiple of ``min_resources``. + + aggressive_elimination : bool, default=False + This is only relevant in cases where there isn't enough resources to + reduce the remaining candidates to at most `factor` after the last + iteration. If ``True``, then the search process will 'replay' the + first iteration for as long as needed until the number of candidates + is small enough. This is ``False`` by default, which means that the + last iteration may evaluate more than ``factor`` candidates. See + :ref:`aggressive_elimination` for more details. + + cv : int, cross-validation generator or an iterable, default=5 + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. note:: + Due to implementation details, the folds produced by `cv` must be + the same across multiple calls to `cv.split()`. For + built-in `scikit-learn` iterators, this can be achieved by + deactivating shuffling (`shuffle=False`), or by setting the + `cv`'s `random_state` parameter to an integer. + + scoring : str, callable, or None, default=None + A single string (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + If None, the estimator's score method is used. + + refit : bool, default=True + If True, refit an estimator using the best found parameters on the + whole dataset. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``HalvingRandomSearchCV`` instance. + + error_score : 'raise' or numeric + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. Default is ``np.nan``. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for subsampling the dataset + when `resources != 'n_samples'`. Also used for random uniform + sampling from lists of possible values instead of scipy.stats + distributions. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_jobs : int or None, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int + Controls the verbosity: the higher, the more messages. + + Attributes + ---------- + n_resources_ : list of int + The amount of resources used at each iteration. + + n_candidates_ : list of int + The number of candidate parameters that were evaluated at each + iteration. + + n_remaining_candidates_ : int + The number of candidate parameters that are left after the last + iteration. It corresponds to `ceil(n_candidates[-1] / factor)` + + max_resources_ : int + The maximum number of resources that any candidate is allowed to use + for a given iteration. Note that since the number of resources used at + each iteration must be a multiple of ``min_resources_``, the actual + number of resources used at the last iteration may be smaller than + ``max_resources_``. + + min_resources_ : int + The amount of resources that are allocated for each candidate at the + first iteration. + + n_iterations_ : int + The actual number of iterations that were run. This is equal to + ``n_required_iterations_`` if ``aggressive_elimination`` is ``True``. + Else, this is equal to ``min(n_possible_iterations_, + n_required_iterations_)``. + + n_possible_iterations_ : int + The number of iterations that are possible starting with + ``min_resources_`` resources and without exceeding + ``max_resources_``. + + n_required_iterations_ : int + The number of iterations that are required to end up with less than + ``factor`` candidates at the last iteration, starting with + ``min_resources_`` resources. This will be smaller than + ``n_possible_iterations_`` when there isn't enough resources. + + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. It contains lots of information + for analysing the results of a search. + Please refer to the :ref:`User guide` + for details. + + best_estimator_ : estimator or dict + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + best_score_ : float + Mean cross-validated score of the best_estimator. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + :class:`HalvingGridSearchCV`: + Search over a grid of parameters using successive halving. + + Notes + ----- + The parameters selected are those that maximize the score of the held-out + data, according to the scoring parameter. + + All parameter combinations scored with a NaN will share the lowest rank. + + Examples + -------- + + >>> from sklearn.datasets import load_iris + >>> from sklearn.ensemble import RandomForestClassifier + >>> from sklearn.experimental import enable_halving_search_cv # noqa + >>> from sklearn.model_selection import HalvingRandomSearchCV + >>> from scipy.stats import randint + >>> import numpy as np + ... + >>> X, y = load_iris(return_X_y=True) + >>> clf = RandomForestClassifier(random_state=0) + >>> np.random.seed(0) + ... + >>> param_distributions = {"max_depth": [3, None], + ... "min_samples_split": randint(2, 11)} + >>> search = HalvingRandomSearchCV(clf, param_distributions, + ... resource='n_estimators', + ... max_resources=10, + ... random_state=0).fit(X, y) + >>> search.best_params_ # doctest: +SKIP + {'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9} + """ + + _required_parameters = ["estimator", "param_distributions"] + + _parameter_constraints: dict = { + **BaseSuccessiveHalving._parameter_constraints, + "param_distributions": [dict, list], + "n_candidates": [ + Interval(Integral, 0, None, closed="neither"), + StrOptions({"exhaust"}), + ], + } + + def __init__( + self, + estimator, + param_distributions, + *, + n_candidates="exhaust", + factor=3, + resource="n_samples", + max_resources="auto", + min_resources="smallest", + aggressive_elimination=False, + cv=5, + scoring=None, + refit=True, + error_score=np.nan, + return_train_score=True, + random_state=None, + n_jobs=None, + verbose=0, + ): + super().__init__( + estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + verbose=verbose, + cv=cv, + random_state=random_state, + error_score=error_score, + return_train_score=return_train_score, + max_resources=max_resources, + resource=resource, + factor=factor, + min_resources=min_resources, + aggressive_elimination=aggressive_elimination, + ) + self.param_distributions = param_distributions + self.n_candidates = n_candidates + + def _generate_candidate_params(self): + n_candidates_first_iter = self.n_candidates + if n_candidates_first_iter == "exhaust": + # This will generate enough candidate so that the last iteration + # uses as much resources as possible + n_candidates_first_iter = self.max_resources_ // self.min_resources_ + return ParameterSampler( + self.param_distributions, + n_candidates_first_iter, + random_state=self.random_state, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_split.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_split.py new file mode 100644 index 0000000000000000000000000000000000000000..1f89832daba227163f0639268deeebd7a26cae62 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_split.py @@ -0,0 +1,2794 @@ +""" +The :mod:`sklearn.model_selection._split` module includes classes and +functions to split the data based on a preset strategy. +""" + +# Author: Alexandre Gramfort +# Gael Varoquaux +# Olivier Grisel +# Raghav RV +# Leandro Hermida +# Rodion Martynov +# License: BSD 3 clause + +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from collections import defaultdict +from collections.abc import Iterable +from inspect import signature +from itertools import chain, combinations +from math import ceil, floor + +import numpy as np +from scipy.special import comb + +from ..utils import ( + _approximate_mode, + _safe_indexing, + check_random_state, + indexable, + metadata_routing, +) +from ..utils._param_validation import Interval, RealNotInt, validate_params +from ..utils.metadata_routing import _MetadataRequester +from ..utils.multiclass import type_of_target +from ..utils.validation import _num_samples, check_array, column_or_1d + +__all__ = [ + "BaseCrossValidator", + "KFold", + "GroupKFold", + "LeaveOneGroupOut", + "LeaveOneOut", + "LeavePGroupsOut", + "LeavePOut", + "RepeatedStratifiedKFold", + "RepeatedKFold", + "ShuffleSplit", + "GroupShuffleSplit", + "StratifiedKFold", + "StratifiedGroupKFold", + "StratifiedShuffleSplit", + "PredefinedSplit", + "train_test_split", + "check_cv", +] + + +class GroupsConsumerMixin(_MetadataRequester): + """A Mixin to ``groups`` by default. + + This Mixin makes the object to request ``groups`` by default as ``True``. + + .. versionadded:: 1.3 + """ + + __metadata_request__split = {"groups": True} + + +class BaseCrossValidator(_MetadataRequester, metaclass=ABCMeta): + """Base class for all cross-validators. + + Implementations must define `_iter_test_masks` or `_iter_test_indices`. + """ + + # This indicates that by default CV splitters don't have a "groups" kwarg, + # unless indicated by inheriting from ``GroupsConsumerMixin``. + # This also prevents ``set_split_request`` to be generated for splitters + # which don't support ``groups``. + __metadata_request__split = {"groups": metadata_routing.UNUSED} + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + X, y, groups = indexable(X, y, groups) + indices = np.arange(_num_samples(X)) + for test_index in self._iter_test_masks(X, y, groups): + train_index = indices[np.logical_not(test_index)] + test_index = indices[test_index] + yield train_index, test_index + + # Since subclasses must implement either _iter_test_masks or + # _iter_test_indices, neither can be abstract. + def _iter_test_masks(self, X=None, y=None, groups=None): + """Generates boolean masks corresponding to test sets. + + By default, delegates to _iter_test_indices(X, y, groups) + """ + for test_index in self._iter_test_indices(X, y, groups): + test_mask = np.zeros(_num_samples(X), dtype=bool) + test_mask[test_index] = True + yield test_mask + + def _iter_test_indices(self, X=None, y=None, groups=None): + """Generates integer indices corresponding to test sets.""" + raise NotImplementedError + + @abstractmethod + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator.""" + + def __repr__(self): + return _build_repr(self) + + +class LeaveOneOut(BaseCrossValidator): + """Leave-One-Out cross-validator. + + Provides train/test indices to split data in train/test sets. Each + sample is used once as a test set (singleton) while the remaining + samples form the training set. + + Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and + ``LeavePOut(p=1)`` where ``n`` is the number of samples. + + Due to the high number of test sets (which is the same as the + number of samples) this cross-validation method can be very costly. + For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit` + or :class:`StratifiedKFold`. + + Read more in the :ref:`User Guide `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeaveOneOut + >>> X = np.array([[1, 2], [3, 4]]) + >>> y = np.array([1, 2]) + >>> loo = LeaveOneOut() + >>> loo.get_n_splits(X) + 2 + >>> print(loo) + LeaveOneOut() + >>> for i, (train_index, test_index) in enumerate(loo.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1] + Test: index=[0] + Fold 1: + Train: index=[0] + Test: index=[1] + + See Also + -------- + LeaveOneGroupOut : For splitting the data according to explicit, + domain-specific stratification of the dataset. + GroupKFold : K-fold iterator variant with non-overlapping groups. + """ + + def _iter_test_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + if n_samples <= 1: + raise ValueError( + "Cannot perform LeaveOneOut with n_samples={}.".format(n_samples) + ) + return range(n_samples) + + def get_n_splits(self, X, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + if X is None: + raise ValueError("The 'X' parameter should not be None.") + return _num_samples(X) + + +class LeavePOut(BaseCrossValidator): + """Leave-P-Out cross-validator. + + Provides train/test indices to split data in train/test sets. This results + in testing on all distinct samples of size p, while the remaining n - p + samples form the training set in each iteration. + + Note: ``LeavePOut(p)`` is NOT equivalent to + ``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets. + + Due to the high number of iterations which grows combinatorically with the + number of samples this cross-validation method can be very costly. For + large datasets one should favor :class:`KFold`, :class:`StratifiedKFold` + or :class:`ShuffleSplit`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + p : int + Size of the test sets. Must be strictly less than the number of + samples. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeavePOut + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + >>> y = np.array([1, 2, 3, 4]) + >>> lpo = LeavePOut(2) + >>> lpo.get_n_splits(X) + 6 + >>> print(lpo) + LeavePOut(p=2) + >>> for i, (train_index, test_index) in enumerate(lpo.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[2 3] + Test: index=[0 1] + Fold 1: + Train: index=[1 3] + Test: index=[0 2] + Fold 2: + Train: index=[1 2] + Test: index=[0 3] + Fold 3: + Train: index=[0 3] + Test: index=[1 2] + Fold 4: + Train: index=[0 2] + Test: index=[1 3] + Fold 5: + Train: index=[0 1] + Test: index=[2 3] + """ + + def __init__(self, p): + self.p = p + + def _iter_test_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + if n_samples <= self.p: + raise ValueError( + "p={} must be strictly less than the number of samples={}".format( + self.p, n_samples + ) + ) + for combination in combinations(range(n_samples), self.p): + yield np.array(combination) + + def get_n_splits(self, X, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + """ + if X is None: + raise ValueError("The 'X' parameter should not be None.") + return int(comb(_num_samples(X), self.p, exact=True)) + + +class _BaseKFold(BaseCrossValidator, metaclass=ABCMeta): + """Base class for K-Fold cross-validators and TimeSeriesSplit.""" + + @abstractmethod + def __init__(self, n_splits, *, shuffle, random_state): + if not isinstance(n_splits, numbers.Integral): + raise ValueError( + "The number of folds must be of Integral type. " + "%s of type %s was passed." % (n_splits, type(n_splits)) + ) + n_splits = int(n_splits) + + if n_splits <= 1: + raise ValueError( + "k-fold cross-validation requires at least one" + " train/test split by setting n_splits=2 or more," + " got n_splits={0}.".format(n_splits) + ) + + if not isinstance(shuffle, bool): + raise TypeError("shuffle must be True or False; got {0}".format(shuffle)) + + if not shuffle and random_state is not None: # None is the default + raise ValueError( + ( + "Setting a random_state has no effect since shuffle is " + "False. You should leave " + "random_state to its default (None), or set shuffle=True." + ), + ) + + self.n_splits = n_splits + self.shuffle = shuffle + self.random_state = random_state + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + X, y, groups = indexable(X, y, groups) + n_samples = _num_samples(X) + if self.n_splits > n_samples: + raise ValueError( + ( + "Cannot have number of splits n_splits={0} greater" + " than the number of samples: n_samples={1}." + ).format(self.n_splits, n_samples) + ) + + for train, test in super().split(X, y, groups): + yield train, test + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return self.n_splits + + +class KFold(_BaseKFold): + """K-Fold cross-validator. + + Provides train/test indices to split data in train/test sets. Split + dataset into k consecutive folds (without shuffling by default). + + Each fold is then used once as a validation while the k - 1 remaining + folds form the training set. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + shuffle : bool, default=False + Whether to shuffle the data before splitting into batches. + Note that the samples within each split will not be shuffled. + + random_state : int, RandomState instance or None, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold. Otherwise, this + parameter has no effect. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import KFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([1, 2, 3, 4]) + >>> kf = KFold(n_splits=2) + >>> kf.get_n_splits(X) + 2 + >>> print(kf) + KFold(n_splits=2, random_state=None, shuffle=False) + >>> for i, (train_index, test_index) in enumerate(kf.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[2 3] + Test: index=[0 1] + Fold 1: + Train: index=[0 1] + Test: index=[2 3] + + Notes + ----- + The first ``n_samples % n_splits`` folds have size + ``n_samples // n_splits + 1``, other folds have size + ``n_samples // n_splits``, where ``n_samples`` is the number of samples. + + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + + See Also + -------- + StratifiedKFold : Takes class information into account to avoid building + folds with imbalanced class distributions (for binary or multiclass + classification tasks). + + GroupKFold : K-fold iterator variant with non-overlapping groups. + + RepeatedKFold : Repeats K-Fold n times. + """ + + def __init__(self, n_splits=5, *, shuffle=False, random_state=None): + super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) + + def _iter_test_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + indices = np.arange(n_samples) + if self.shuffle: + check_random_state(self.random_state).shuffle(indices) + + n_splits = self.n_splits + fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int) + fold_sizes[: n_samples % n_splits] += 1 + current = 0 + for fold_size in fold_sizes: + start, stop = current, current + fold_size + yield indices[start:stop] + current = stop + + +class GroupKFold(GroupsConsumerMixin, _BaseKFold): + """K-fold iterator variant with non-overlapping groups. + + Each group will appear exactly once in the test set across all folds (the + number of distinct groups has to be at least equal to the number of folds). + + The folds are approximately balanced in the sense that the number of + distinct groups is approximately the same in each fold. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + Notes + ----- + Groups appear in an arbitrary order throughout the folds. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import GroupKFold + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> groups = np.array([0, 0, 2, 2, 3, 3]) + >>> group_kfold = GroupKFold(n_splits=2) + >>> group_kfold.get_n_splits(X, y, groups) + 2 + >>> print(group_kfold) + GroupKFold(n_splits=2) + >>> for i, (train_index, test_index) in enumerate(group_kfold.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2 3], group=[2 2] + Test: index=[0 1 4 5], group=[0 0 3 3] + Fold 1: + Train: index=[0 1 4 5], group=[0 0 3 3] + Test: index=[2 3], group=[2 2] + + See Also + -------- + LeaveOneGroupOut : For splitting the data according to explicit + domain-specific stratification of the dataset. + + StratifiedKFold : Takes class information into account to avoid building + folds with imbalanced class proportions (for binary or multiclass + classification tasks). + """ + + def __init__(self, n_splits=5): + super().__init__(n_splits, shuffle=False, random_state=None) + + def _iter_test_indices(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + + unique_groups, groups = np.unique(groups, return_inverse=True) + n_groups = len(unique_groups) + + if self.n_splits > n_groups: + raise ValueError( + "Cannot have number of splits n_splits=%d greater" + " than the number of groups: %d." % (self.n_splits, n_groups) + ) + + # Weight groups by their number of occurrences + n_samples_per_group = np.bincount(groups) + + # Distribute the most frequent groups first + indices = np.argsort(n_samples_per_group)[::-1] + n_samples_per_group = n_samples_per_group[indices] + + # Total weight of each fold + n_samples_per_fold = np.zeros(self.n_splits) + + # Mapping from group index to fold index + group_to_fold = np.zeros(len(unique_groups)) + + # Distribute samples by adding the largest weight to the lightest fold + for group_index, weight in enumerate(n_samples_per_group): + lightest_fold = np.argmin(n_samples_per_fold) + n_samples_per_fold[lightest_fold] += weight + group_to_fold[indices[group_index]] = lightest_fold + + indices = group_to_fold[groups] + + for f in range(self.n_splits): + yield np.where(indices == f)[0] + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + return super().split(X, y, groups) + + +class StratifiedKFold(_BaseKFold): + """Stratified K-Fold cross-validator. + + Provides train/test indices to split data in train/test sets. + + This cross-validation object is a variation of KFold that returns + stratified folds. The folds are made by preserving the percentage of + samples for each class. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + shuffle : bool, default=False + Whether to shuffle each class's samples before splitting into batches. + Note that the samples within each split will not be shuffled. + + random_state : int, RandomState instance or None, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold for each class. + Otherwise, leave `random_state` as `None`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import StratifiedKFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> skf = StratifiedKFold(n_splits=2) + >>> skf.get_n_splits(X, y) + 2 + >>> print(skf) + StratifiedKFold(n_splits=2, random_state=None, shuffle=False) + >>> for i, (train_index, test_index) in enumerate(skf.split(X, y)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 3] + Test: index=[0 2] + Fold 1: + Train: index=[0 2] + Test: index=[1 3] + + Notes + ----- + The implementation is designed to: + + * Generate test sets such that all contain the same distribution of + classes, or as close as possible. + * Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to + ``y = [1, 0]`` should not change the indices generated. + * Preserve order dependencies in the dataset ordering, when + ``shuffle=False``: all samples from class k in some test set were + contiguous in y, or separated in y by samples from classes other than k. + * Generate test sets where the smallest and largest differ by at most one + sample. + + .. versionchanged:: 0.22 + The previous implementation did not follow the last constraint. + + See Also + -------- + RepeatedStratifiedKFold : Repeats Stratified K-Fold n times. + """ + + def __init__(self, n_splits=5, *, shuffle=False, random_state=None): + super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) + + def _make_test_folds(self, X, y=None): + rng = check_random_state(self.random_state) + y = np.asarray(y) + type_of_target_y = type_of_target(y) + allowed_target_types = ("binary", "multiclass") + if type_of_target_y not in allowed_target_types: + raise ValueError( + "Supported target types are: {}. Got {!r} instead.".format( + allowed_target_types, type_of_target_y + ) + ) + + y = column_or_1d(y) + + _, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True) + # y_inv encodes y according to lexicographic order. We invert y_idx to + # map the classes so that they are encoded by order of appearance: + # 0 represents the first label appearing in y, 1 the second, etc. + _, class_perm = np.unique(y_idx, return_inverse=True) + y_encoded = class_perm[y_inv] + + n_classes = len(y_idx) + y_counts = np.bincount(y_encoded) + min_groups = np.min(y_counts) + if np.all(self.n_splits > y_counts): + raise ValueError( + "n_splits=%d cannot be greater than the" + " number of members in each class." % (self.n_splits) + ) + if self.n_splits > min_groups: + warnings.warn( + "The least populated class in y has only %d" + " members, which is less than n_splits=%d." + % (min_groups, self.n_splits), + UserWarning, + ) + + # Determine the optimal number of samples from each class in each fold, + # using round robin over the sorted y. (This can be done direct from + # counts, but that code is unreadable.) + y_order = np.sort(y_encoded) + allocation = np.asarray( + [ + np.bincount(y_order[i :: self.n_splits], minlength=n_classes) + for i in range(self.n_splits) + ] + ) + + # To maintain the data order dependencies as best as possible within + # the stratification constraint, we assign samples from each class in + # blocks (and then mess that up when shuffle=True). + test_folds = np.empty(len(y), dtype="i") + for k in range(n_classes): + # since the kth column of allocation stores the number of samples + # of class k in each test set, this generates blocks of fold + # indices corresponding to the allocation for class k. + folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k]) + if self.shuffle: + rng.shuffle(folds_for_class) + test_folds[y_encoded == k] = folds_for_class + return test_folds + + def _iter_test_masks(self, X, y=None, groups=None): + test_folds = self._make_test_folds(X, y) + for i in range(self.n_splits): + yield test_folds == i + + def split(self, X, y, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Note that providing ``y`` is sufficient to generate the splits and + hence ``np.zeros(n_samples)`` may be used as a placeholder for + ``X`` instead of actual training data. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + Stratification is done based on the y labels. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + y = check_array(y, input_name="y", ensure_2d=False, dtype=None) + return super().split(X, y, groups) + + +class StratifiedGroupKFold(GroupsConsumerMixin, _BaseKFold): + """Stratified K-Fold iterator variant with non-overlapping groups. + + This cross-validation object is a variation of StratifiedKFold attempts to + return stratified folds with non-overlapping groups. The folds are made by + preserving the percentage of samples for each class. + + Each group will appear exactly once in the test set across all folds (the + number of distinct groups has to be at least equal to the number of folds). + + The difference between :class:`~sklearn.model_selection.GroupKFold` + and :class:`~sklearn.model_selection.StratifiedGroupKFold` is that + the former attempts to create balanced folds such that the number of + distinct groups is approximately the same in each fold, whereas + StratifiedGroupKFold attempts to create folds which preserve the + percentage of samples for each class as much as possible given the + constraint of non-overlapping groups between splits. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + shuffle : bool, default=False + Whether to shuffle each class's samples before splitting into batches. + Note that the samples within each split will not be shuffled. + This implementation can only shuffle groups that have approximately the + same y distribution, no global shuffle will be performed. + + random_state : int or RandomState instance, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold for each class. + Otherwise, leave `random_state` as `None`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import StratifiedGroupKFold + >>> X = np.ones((17, 2)) + >>> y = np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + >>> groups = np.array([1, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8]) + >>> sgkf = StratifiedGroupKFold(n_splits=3) + >>> sgkf.get_n_splits(X, y) + 3 + >>> print(sgkf) + StratifiedGroupKFold(n_splits=3, random_state=None, shuffle=False) + >>> for i, (train_index, test_index) in enumerate(sgkf.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" group={groups[train_index]}") + ... print(f" Test: index={test_index}") + ... print(f" group={groups[test_index]}") + Fold 0: + Train: index=[ 0 1 2 3 7 8 9 10 11 15 16] + group=[1 1 2 2 4 5 5 5 5 8 8] + Test: index=[ 4 5 6 12 13 14] + group=[3 3 3 6 6 7] + Fold 1: + Train: index=[ 4 5 6 7 8 9 10 11 12 13 14] + group=[3 3 3 4 5 5 5 5 6 6 7] + Test: index=[ 0 1 2 3 15 16] + group=[1 1 2 2 8 8] + Fold 2: + Train: index=[ 0 1 2 3 4 5 6 12 13 14 15 16] + group=[1 1 2 2 3 3 3 6 6 7 8 8] + Test: index=[ 7 8 9 10 11] + group=[4 5 5 5 5] + + Notes + ----- + The implementation is designed to: + + * Mimic the behavior of StratifiedKFold as much as possible for trivial + groups (e.g. when each group contains only one sample). + * Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to + ``y = [1, 0]`` should not change the indices generated. + * Stratify based on samples as much as possible while keeping + non-overlapping groups constraint. That means that in some cases when + there is a small number of groups containing a large number of samples + the stratification will not be possible and the behavior will be close + to GroupKFold. + + See also + -------- + StratifiedKFold: Takes class information into account to build folds which + retain class distributions (for binary or multiclass classification + tasks). + + GroupKFold: K-fold iterator variant with non-overlapping groups. + """ + + def __init__(self, n_splits=5, shuffle=False, random_state=None): + super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) + + def _iter_test_indices(self, X, y, groups): + # Implementation is based on this kaggle kernel: + # https://www.kaggle.com/jakubwasikowski/stratified-group-k-fold-cross-validation + # and is a subject to Apache 2.0 License. You may obtain a copy of the + # License at http://www.apache.org/licenses/LICENSE-2.0 + # Changelist: + # - Refactored function to a class following scikit-learn KFold + # interface. + # - Added heuristic for assigning group to the least populated fold in + # cases when all other criteria are equal + # - Swtch from using python ``Counter`` to ``np.unique`` to get class + # distribution + # - Added scikit-learn checks for input: checking that target is binary + # or multiclass, checking passed random state, checking that number + # of splits is less than number of members in each class, checking + # that least populated class has more members than there are splits. + rng = check_random_state(self.random_state) + y = np.asarray(y) + type_of_target_y = type_of_target(y) + allowed_target_types = ("binary", "multiclass") + if type_of_target_y not in allowed_target_types: + raise ValueError( + "Supported target types are: {}. Got {!r} instead.".format( + allowed_target_types, type_of_target_y + ) + ) + + y = column_or_1d(y) + _, y_inv, y_cnt = np.unique(y, return_inverse=True, return_counts=True) + if np.all(self.n_splits > y_cnt): + raise ValueError( + "n_splits=%d cannot be greater than the" + " number of members in each class." % (self.n_splits) + ) + n_smallest_class = np.min(y_cnt) + if self.n_splits > n_smallest_class: + warnings.warn( + "The least populated class in y has only %d" + " members, which is less than n_splits=%d." + % (n_smallest_class, self.n_splits), + UserWarning, + ) + n_classes = len(y_cnt) + + _, groups_inv, groups_cnt = np.unique( + groups, return_inverse=True, return_counts=True + ) + y_counts_per_group = np.zeros((len(groups_cnt), n_classes)) + for class_idx, group_idx in zip(y_inv, groups_inv): + y_counts_per_group[group_idx, class_idx] += 1 + + y_counts_per_fold = np.zeros((self.n_splits, n_classes)) + groups_per_fold = defaultdict(set) + + if self.shuffle: + rng.shuffle(y_counts_per_group) + + # Stable sort to keep shuffled order for groups with the same + # class distribution variance + sorted_groups_idx = np.argsort( + -np.std(y_counts_per_group, axis=1), kind="mergesort" + ) + + for group_idx in sorted_groups_idx: + group_y_counts = y_counts_per_group[group_idx] + best_fold = self._find_best_fold( + y_counts_per_fold=y_counts_per_fold, + y_cnt=y_cnt, + group_y_counts=group_y_counts, + ) + y_counts_per_fold[best_fold] += group_y_counts + groups_per_fold[best_fold].add(group_idx) + + for i in range(self.n_splits): + test_indices = [ + idx + for idx, group_idx in enumerate(groups_inv) + if group_idx in groups_per_fold[i] + ] + yield test_indices + + def _find_best_fold(self, y_counts_per_fold, y_cnt, group_y_counts): + best_fold = None + min_eval = np.inf + min_samples_in_fold = np.inf + for i in range(self.n_splits): + y_counts_per_fold[i] += group_y_counts + # Summarise the distribution over classes in each proposed fold + std_per_class = np.std(y_counts_per_fold / y_cnt.reshape(1, -1), axis=0) + y_counts_per_fold[i] -= group_y_counts + fold_eval = np.mean(std_per_class) + samples_in_fold = np.sum(y_counts_per_fold[i]) + is_current_fold_better = ( + fold_eval < min_eval + or np.isclose(fold_eval, min_eval) + and samples_in_fold < min_samples_in_fold + ) + if is_current_fold_better: + min_eval = fold_eval + min_samples_in_fold = samples_in_fold + best_fold = i + return best_fold + + +class TimeSeriesSplit(_BaseKFold): + """Time Series cross-validator. + + Provides train/test indices to split time series data samples + that are observed at fixed time intervals, in train/test sets. + In each split, test indices must be higher than before, and thus shuffling + in cross validator is inappropriate. + + This cross-validation object is a variation of :class:`KFold`. + In the kth split, it returns first k folds as train set and the + (k+1)th fold as test set. + + Note that unlike standard cross-validation methods, successive + training sets are supersets of those that come before them. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + .. versionadded:: 0.18 + + Parameters + ---------- + n_splits : int, default=5 + Number of splits. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + max_train_size : int, default=None + Maximum size for a single training set. + + test_size : int, default=None + Used to limit the size of the test set. Defaults to + ``n_samples // (n_splits + 1)``, which is the maximum allowed value + with ``gap=0``. + + .. versionadded:: 0.24 + + gap : int, default=0 + Number of samples to exclude from the end of each train set before + the test set. + + .. versionadded:: 0.24 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import TimeSeriesSplit + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> tscv = TimeSeriesSplit() + >>> print(tscv) + TimeSeriesSplit(gap=0, max_train_size=None, n_splits=5, test_size=None) + >>> for i, (train_index, test_index) in enumerate(tscv.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[0] + Test: index=[1] + Fold 1: + Train: index=[0 1] + Test: index=[2] + Fold 2: + Train: index=[0 1 2] + Test: index=[3] + Fold 3: + Train: index=[0 1 2 3] + Test: index=[4] + Fold 4: + Train: index=[0 1 2 3 4] + Test: index=[5] + >>> # Fix test_size to 2 with 12 samples + >>> X = np.random.randn(12, 2) + >>> y = np.random.randint(0, 2, 12) + >>> tscv = TimeSeriesSplit(n_splits=3, test_size=2) + >>> for i, (train_index, test_index) in enumerate(tscv.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[0 1 2 3 4 5] + Test: index=[6 7] + Fold 1: + Train: index=[0 1 2 3 4 5 6 7] + Test: index=[8 9] + Fold 2: + Train: index=[0 1 2 3 4 5 6 7 8 9] + Test: index=[10 11] + >>> # Add in a 2 period gap + >>> tscv = TimeSeriesSplit(n_splits=3, test_size=2, gap=2) + >>> for i, (train_index, test_index) in enumerate(tscv.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[0 1 2 3] + Test: index=[6 7] + Fold 1: + Train: index=[0 1 2 3 4 5] + Test: index=[8 9] + Fold 2: + Train: index=[0 1 2 3 4 5 6 7] + Test: index=[10 11] + + For a more extended example see + :ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py`. + + Notes + ----- + The training set has size ``i * n_samples // (n_splits + 1) + + n_samples % (n_splits + 1)`` in the ``i`` th split, + with a test set of size ``n_samples//(n_splits + 1)`` by default, + where ``n_samples`` is the number of samples. + """ + + def __init__(self, n_splits=5, *, max_train_size=None, test_size=None, gap=0): + super().__init__(n_splits, shuffle=False, random_state=None) + self.max_train_size = max_train_size + self.test_size = test_size + self.gap = gap + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Always ignored, exists for compatibility. + + groups : array-like of shape (n_samples,) + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + X, y, groups = indexable(X, y, groups) + n_samples = _num_samples(X) + n_splits = self.n_splits + n_folds = n_splits + 1 + gap = self.gap + test_size = ( + self.test_size if self.test_size is not None else n_samples // n_folds + ) + + # Make sure we have enough samples for the given split parameters + if n_folds > n_samples: + raise ValueError( + f"Cannot have number of folds={n_folds} greater" + f" than the number of samples={n_samples}." + ) + if n_samples - gap - (test_size * n_splits) <= 0: + raise ValueError( + f"Too many splits={n_splits} for number of samples" + f"={n_samples} with test_size={test_size} and gap={gap}." + ) + + indices = np.arange(n_samples) + test_starts = range(n_samples - n_splits * test_size, n_samples, test_size) + + for test_start in test_starts: + train_end = test_start - gap + if self.max_train_size and self.max_train_size < train_end: + yield ( + indices[train_end - self.max_train_size : train_end], + indices[test_start : test_start + test_size], + ) + else: + yield ( + indices[:train_end], + indices[test_start : test_start + test_size], + ) + + +class LeaveOneGroupOut(GroupsConsumerMixin, BaseCrossValidator): + """Leave One Group Out cross-validator. + + Provides train/test indices to split data such that each training set is + comprised of all samples except ones belonging to one specific group. + Arbitrary domain specific group information is provided an array integers + that encodes the group of each sample. + + For instance the groups could be the year of collection of the samples + and thus allow for cross-validation against time-based splits. + + Read more in the :ref:`User Guide `. + + Notes + ----- + Splits are ordered according to the index of the group left out. The first + split has testing set consisting of the group whose index in `groups` is + lowest, and so on. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeaveOneGroupOut + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + >>> y = np.array([1, 2, 1, 2]) + >>> groups = np.array([1, 1, 2, 2]) + >>> logo = LeaveOneGroupOut() + >>> logo.get_n_splits(X, y, groups) + 2 + >>> logo.get_n_splits(groups=groups) # 'groups' is always required + 2 + >>> print(logo) + LeaveOneGroupOut() + >>> for i, (train_index, test_index) in enumerate(logo.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2 3], group=[2 2] + Test: index=[0 1], group=[1 1] + Fold 1: + Train: index=[0 1], group=[1 1] + Test: index=[2 3], group=[2 2] + + See also + -------- + GroupKFold: K-fold iterator variant with non-overlapping groups. + """ + + def _iter_test_masks(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + # We make a copy of groups to avoid side-effects during iteration + groups = check_array( + groups, input_name="groups", copy=True, ensure_2d=False, dtype=None + ) + unique_groups = np.unique(groups) + if len(unique_groups) <= 1: + raise ValueError( + "The groups parameter contains fewer than 2 unique groups " + "(%s). LeaveOneGroupOut expects at least 2." % unique_groups + ) + for i in unique_groups: + yield groups == i + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. This 'groups' parameter must always be specified to + calculate the number of splits, though the other parameters can be + omitted. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + return len(np.unique(groups)) + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + return super().split(X, y, groups) + + +class LeavePGroupsOut(GroupsConsumerMixin, BaseCrossValidator): + """Leave P Group(s) Out cross-validator. + + Provides train/test indices to split data according to a third-party + provided group. This group information can be used to encode arbitrary + domain specific stratifications of the samples as integers. + + For instance the groups could be the year of collection of the samples + and thus allow for cross-validation against time-based splits. + + The difference between LeavePGroupsOut and LeaveOneGroupOut is that + the former builds the test sets with all the samples assigned to + ``p`` different values of the groups while the latter uses samples + all assigned the same groups. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_groups : int + Number of groups (``p``) to leave out in the test split. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeavePGroupsOut + >>> X = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([1, 2, 1]) + >>> groups = np.array([1, 2, 3]) + >>> lpgo = LeavePGroupsOut(n_groups=2) + >>> lpgo.get_n_splits(X, y, groups) + 3 + >>> lpgo.get_n_splits(groups=groups) # 'groups' is always required + 3 + >>> print(lpgo) + LeavePGroupsOut(n_groups=2) + >>> for i, (train_index, test_index) in enumerate(lpgo.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2], group=[3] + Test: index=[0 1], group=[1 2] + Fold 1: + Train: index=[1], group=[2] + Test: index=[0 2], group=[1 3] + Fold 2: + Train: index=[0], group=[1] + Test: index=[1 2], group=[2 3] + + See Also + -------- + GroupKFold : K-fold iterator variant with non-overlapping groups. + """ + + def __init__(self, n_groups): + self.n_groups = n_groups + + def _iter_test_masks(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array( + groups, input_name="groups", copy=True, ensure_2d=False, dtype=None + ) + unique_groups = np.unique(groups) + if self.n_groups >= len(unique_groups): + raise ValueError( + "The groups parameter contains fewer than (or equal to) " + "n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut " + "expects that at least n_groups + 1 (%d) unique groups be " + "present" % (self.n_groups, unique_groups, self.n_groups + 1) + ) + combi = combinations(range(len(unique_groups)), self.n_groups) + for indices in combi: + test_index = np.zeros(_num_samples(X), dtype=bool) + for l in unique_groups[np.array(indices)]: + test_index[groups == l] = True + yield test_index + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. This 'groups' parameter must always be specified to + calculate the number of splits, though the other parameters can be + omitted. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + return int(comb(len(np.unique(groups)), self.n_groups, exact=True)) + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + return super().split(X, y, groups) + + +class _RepeatedSplits(_MetadataRequester, metaclass=ABCMeta): + """Repeated splits for an arbitrary randomized CV splitter. + + Repeats splits for cross-validators n times with different randomization + in each repetition. + + Parameters + ---------- + cv : callable + Cross-validator class. + + n_repeats : int, default=10 + Number of times cross-validator needs to be repeated. + + random_state : int, RandomState instance or None, default=None + Passes `random_state` to the arbitrary repeating cross validator. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + **cvargs : additional params + Constructor parameters for cv. Must not contain random_state + and shuffle. + """ + + # This indicates that by default CV splitters don't have a "groups" kwarg, + # unless indicated by inheriting from ``GroupsConsumerMixin``. + # This also prevents ``set_split_request`` to be generated for splitters + # which don't support ``groups``. + __metadata_request__split = {"groups": metadata_routing.UNUSED} + + def __init__(self, cv, *, n_repeats=10, random_state=None, **cvargs): + if not isinstance(n_repeats, numbers.Integral): + raise ValueError("Number of repetitions must be of Integral type.") + + if n_repeats <= 0: + raise ValueError("Number of repetitions must be greater than 0.") + + if any(key in cvargs for key in ("random_state", "shuffle")): + raise ValueError("cvargs must not contain random_state or shuffle.") + + self.cv = cv + self.n_repeats = n_repeats + self.random_state = random_state + self.cvargs = cvargs + + def split(self, X, y=None, groups=None): + """Generates indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + n_repeats = self.n_repeats + rng = check_random_state(self.random_state) + + for idx in range(n_repeats): + cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) + for train_index, test_index in cv.split(X, y, groups): + yield train_index, test_index + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + ``np.zeros(n_samples)`` may be used as a placeholder. + + y : object + Always ignored, exists for compatibility. + ``np.zeros(n_samples)`` may be used as a placeholder. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + rng = check_random_state(self.random_state) + cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) + return cv.get_n_splits(X, y, groups) * self.n_repeats + + def __repr__(self): + return _build_repr(self) + + +class RepeatedKFold(_RepeatedSplits): + """Repeated K-Fold cross validator. + + Repeats K-Fold n times with different randomization in each repetition. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + n_repeats : int, default=10 + Number of times cross-validator needs to be repeated. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of each repeated cross-validation instance. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import RepeatedKFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124) + >>> rkf.get_n_splits(X, y) + 4 + >>> print(rkf) + RepeatedKFold(n_repeats=2, n_splits=2, random_state=2652124) + >>> for i, (train_index, test_index) in enumerate(rkf.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + ... + Fold 0: + Train: index=[0 1] + Test: index=[2 3] + Fold 1: + Train: index=[2 3] + Test: index=[0 1] + Fold 2: + Train: index=[1 2] + Test: index=[0 3] + Fold 3: + Train: index=[0 3] + Test: index=[1 2] + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + + See Also + -------- + RepeatedStratifiedKFold : Repeats Stratified K-Fold n times. + """ + + def __init__(self, *, n_splits=5, n_repeats=10, random_state=None): + super().__init__( + KFold, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits + ) + + +class RepeatedStratifiedKFold(_RepeatedSplits): + """Repeated Stratified K-Fold cross validator. + + Repeats Stratified K-Fold n times with different randomization in each + repetition. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + n_repeats : int, default=10 + Number of times cross-validator needs to be repeated. + + random_state : int, RandomState instance or None, default=None + Controls the generation of the random states for each repetition. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import RepeatedStratifiedKFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2, + ... random_state=36851234) + >>> rskf.get_n_splits(X, y) + 4 + >>> print(rskf) + RepeatedStratifiedKFold(n_repeats=2, n_splits=2, random_state=36851234) + >>> for i, (train_index, test_index) in enumerate(rskf.split(X, y)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + ... + Fold 0: + Train: index=[1 2] + Test: index=[0 3] + Fold 1: + Train: index=[0 3] + Test: index=[1 2] + Fold 2: + Train: index=[1 3] + Test: index=[0 2] + Fold 3: + Train: index=[0 2] + Test: index=[1 3] + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + + See Also + -------- + RepeatedKFold : Repeats K-Fold n times. + """ + + def __init__(self, *, n_splits=5, n_repeats=10, random_state=None): + super().__init__( + StratifiedKFold, + n_repeats=n_repeats, + random_state=random_state, + n_splits=n_splits, + ) + + +class BaseShuffleSplit(_MetadataRequester, metaclass=ABCMeta): + """Base class for ShuffleSplit and StratifiedShuffleSplit.""" + + # This indicates that by default CV splitters don't have a "groups" kwarg, + # unless indicated by inheriting from ``GroupsConsumerMixin``. + # This also prevents ``set_split_request`` to be generated for splitters + # which don't support ``groups``. + __metadata_request__split = {"groups": metadata_routing.UNUSED} + + def __init__( + self, n_splits=10, *, test_size=None, train_size=None, random_state=None + ): + self.n_splits = n_splits + self.test_size = test_size + self.train_size = train_size + self.random_state = random_state + self._default_test_size = 0.1 + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + X, y, groups = indexable(X, y, groups) + for train, test in self._iter_indices(X, y, groups): + yield train, test + + @abstractmethod + def _iter_indices(self, X, y=None, groups=None): + """Generate (train, test) indices""" + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return self.n_splits + + def __repr__(self): + return _build_repr(self) + + +class ShuffleSplit(BaseShuffleSplit): + """Random permutation cross-validator. + + Yields indices to split data into training and test sets. + + Note: contrary to other cross-validation strategies, random splits + do not guarantee that all folds will be different, although this is + still very likely for sizeable datasets. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=10 + Number of re-shuffling & splitting iterations. + + test_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to include in the test split. If int, represents the + absolute number of test samples. If None, the value is set to the + complement of the train size. If ``train_size`` is also None, it will + be set to 0.1. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the train split. If + int, represents the absolute number of train samples. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import ShuffleSplit + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [3, 4], [5, 6]]) + >>> y = np.array([1, 2, 1, 2, 1, 2]) + >>> rs = ShuffleSplit(n_splits=5, test_size=.25, random_state=0) + >>> rs.get_n_splits(X) + 5 + >>> print(rs) + ShuffleSplit(n_splits=5, random_state=0, test_size=0.25, train_size=None) + >>> for i, (train_index, test_index) in enumerate(rs.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 3 0 4] + Test: index=[5 2] + Fold 1: + Train: index=[4 0 2 5] + Test: index=[1 3] + Fold 2: + Train: index=[1 2 4 0] + Test: index=[3 5] + Fold 3: + Train: index=[3 4 1 0] + Test: index=[5 2] + Fold 4: + Train: index=[3 5 1 0] + Test: index=[2 4] + >>> # Specify train and test size + >>> rs = ShuffleSplit(n_splits=5, train_size=0.5, test_size=.25, + ... random_state=0) + >>> for i, (train_index, test_index) in enumerate(rs.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 3 0] + Test: index=[5 2] + Fold 1: + Train: index=[4 0 2] + Test: index=[1 3] + Fold 2: + Train: index=[1 2 4] + Test: index=[3 5] + Fold 3: + Train: index=[3 4 1] + Test: index=[5 2] + Fold 4: + Train: index=[3 5 1] + Test: index=[2 4] + """ + + def __init__( + self, n_splits=10, *, test_size=None, train_size=None, random_state=None + ): + super().__init__( + n_splits=n_splits, + test_size=test_size, + train_size=train_size, + random_state=random_state, + ) + self._default_test_size = 0.1 + + def _iter_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + n_train, n_test = _validate_shuffle_split( + n_samples, + self.test_size, + self.train_size, + default_test_size=self._default_test_size, + ) + + rng = check_random_state(self.random_state) + for i in range(self.n_splits): + # random partition + permutation = rng.permutation(n_samples) + ind_test = permutation[:n_test] + ind_train = permutation[n_test : (n_test + n_train)] + yield ind_train, ind_test + + +class GroupShuffleSplit(GroupsConsumerMixin, ShuffleSplit): + """Shuffle-Group(s)-Out cross-validation iterator. + + Provides randomized train/test indices to split data according to a + third-party provided group. This group information can be used to encode + arbitrary domain specific stratifications of the samples as integers. + + For instance the groups could be the year of collection of the samples + and thus allow for cross-validation against time-based splits. + + The difference between LeavePGroupsOut and GroupShuffleSplit is that + the former generates splits using all subsets of size ``p`` unique groups, + whereas GroupShuffleSplit generates a user-determined number of random + test splits, each with a user-determined fraction of unique groups. + + For example, a less computationally intensive alternative to + ``LeavePGroupsOut(p=10)`` would be + ``GroupShuffleSplit(test_size=10, n_splits=100)``. + + Note: The parameters ``test_size`` and ``train_size`` refer to groups, and + not to samples, as in ShuffleSplit. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of re-shuffling & splitting iterations. + + test_size : float, int, default=0.2 + If float, should be between 0.0 and 1.0 and represent the proportion + of groups to include in the test split (rounded up). If int, + represents the absolute number of test groups. If None, the value is + set to the complement of the train size. + The default will change in version 0.21. It will remain 0.2 only + if ``train_size`` is unspecified, otherwise it will complement + the specified ``train_size``. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the groups to include in the train split. If + int, represents the absolute number of train groups. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import GroupShuffleSplit + >>> X = np.ones(shape=(8, 2)) + >>> y = np.ones(shape=(8, 1)) + >>> groups = np.array([1, 1, 2, 2, 2, 3, 3, 3]) + >>> print(groups.shape) + (8,) + >>> gss = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42) + >>> gss.get_n_splits() + 2 + >>> print(gss) + GroupShuffleSplit(n_splits=2, random_state=42, test_size=None, train_size=0.7) + >>> for i, (train_index, test_index) in enumerate(gss.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2 3 4 5 6 7], group=[2 2 2 3 3 3] + Test: index=[0 1], group=[1 1] + Fold 1: + Train: index=[0 1 5 6 7], group=[1 1 3 3 3] + Test: index=[2 3 4], group=[2 2 2] + + See Also + -------- + ShuffleSplit : Shuffles samples to create independent test/train sets. + + LeavePGroupsOut : Train set leaves out all possible subsets of `p` groups. + """ + + def __init__( + self, n_splits=5, *, test_size=None, train_size=None, random_state=None + ): + super().__init__( + n_splits=n_splits, + test_size=test_size, + train_size=train_size, + random_state=random_state, + ) + self._default_test_size = 0.2 + + def _iter_indices(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + classes, group_indices = np.unique(groups, return_inverse=True) + for group_train, group_test in super()._iter_indices(X=classes): + # these are the indices of classes in the partition + # invert them into data indices + + train = np.flatnonzero(np.isin(group_indices, group_train)) + test = np.flatnonzero(np.isin(group_indices, group_test)) + + yield train, test + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + return super().split(X, y, groups) + + +class StratifiedShuffleSplit(BaseShuffleSplit): + """Stratified ShuffleSplit cross-validator. + + Provides train/test indices to split data in train/test sets. + + This cross-validation object is a merge of StratifiedKFold and + ShuffleSplit, which returns stratified randomized folds. The folds + are made by preserving the percentage of samples for each class. + + Note: like the ShuffleSplit strategy, stratified random splits + do not guarantee that all folds will be different, although this is + still very likely for sizeable datasets. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=10 + Number of re-shuffling & splitting iterations. + + test_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to include in the test split. If int, represents the + absolute number of test samples. If None, the value is set to the + complement of the train size. If ``train_size`` is also None, it will + be set to 0.1. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the train split. If + int, represents the absolute number of train samples. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import StratifiedShuffleSplit + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 0, 1, 1, 1]) + >>> sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0) + >>> sss.get_n_splits(X, y) + 5 + >>> print(sss) + StratifiedShuffleSplit(n_splits=5, random_state=0, ...) + >>> for i, (train_index, test_index) in enumerate(sss.split(X, y)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[5 2 3] + Test: index=[4 1 0] + Fold 1: + Train: index=[5 1 4] + Test: index=[0 2 3] + Fold 2: + Train: index=[5 0 2] + Test: index=[4 3 1] + Fold 3: + Train: index=[4 1 0] + Test: index=[2 3 5] + Fold 4: + Train: index=[0 5 1] + Test: index=[3 4 2] + """ + + def __init__( + self, n_splits=10, *, test_size=None, train_size=None, random_state=None + ): + super().__init__( + n_splits=n_splits, + test_size=test_size, + train_size=train_size, + random_state=random_state, + ) + self._default_test_size = 0.1 + + def _iter_indices(self, X, y, groups=None): + n_samples = _num_samples(X) + y = check_array(y, input_name="y", ensure_2d=False, dtype=None) + n_train, n_test = _validate_shuffle_split( + n_samples, + self.test_size, + self.train_size, + default_test_size=self._default_test_size, + ) + + if y.ndim == 2: + # for multi-label y, map each distinct row to a string repr + # using join because str(row) uses an ellipsis if len(row) > 1000 + y = np.array([" ".join(row.astype("str")) for row in y]) + + classes, y_indices = np.unique(y, return_inverse=True) + n_classes = classes.shape[0] + + class_counts = np.bincount(y_indices) + if np.min(class_counts) < 2: + raise ValueError( + "The least populated class in y has only 1" + " member, which is too few. The minimum" + " number of groups for any class cannot" + " be less than 2." + ) + + if n_train < n_classes: + raise ValueError( + "The train_size = %d should be greater or " + "equal to the number of classes = %d" % (n_train, n_classes) + ) + if n_test < n_classes: + raise ValueError( + "The test_size = %d should be greater or " + "equal to the number of classes = %d" % (n_test, n_classes) + ) + + # Find the sorted list of instances for each class: + # (np.unique above performs a sort, so code is O(n logn) already) + class_indices = np.split( + np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1] + ) + + rng = check_random_state(self.random_state) + + for _ in range(self.n_splits): + # if there are ties in the class-counts, we want + # to make sure to break them anew in each iteration + n_i = _approximate_mode(class_counts, n_train, rng) + class_counts_remaining = class_counts - n_i + t_i = _approximate_mode(class_counts_remaining, n_test, rng) + + train = [] + test = [] + + for i in range(n_classes): + permutation = rng.permutation(class_counts[i]) + perm_indices_class_i = class_indices[i].take(permutation, mode="clip") + + train.extend(perm_indices_class_i[: n_i[i]]) + test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]]) + + train = rng.permutation(train) + test = rng.permutation(test) + + yield train, test + + def split(self, X, y, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Note that providing ``y`` is sufficient to generate the splits and + hence ``np.zeros(n_samples)`` may be used as a placeholder for + ``X`` instead of actual training data. + + y : array-like of shape (n_samples,) or (n_samples, n_labels) + The target variable for supervised learning problems. + Stratification is done based on the y labels. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + y = check_array(y, input_name="y", ensure_2d=False, dtype=None) + return super().split(X, y, groups) + + +def _validate_shuffle_split(n_samples, test_size, train_size, default_test_size=None): + """ + Validation helper to check if the test/test sizes are meaningful w.r.t. the + size of the data (n_samples). + """ + if test_size is None and train_size is None: + test_size = default_test_size + + test_size_type = np.asarray(test_size).dtype.kind + train_size_type = np.asarray(train_size).dtype.kind + + if ( + test_size_type == "i" + and (test_size >= n_samples or test_size <= 0) + or test_size_type == "f" + and (test_size <= 0 or test_size >= 1) + ): + raise ValueError( + "test_size={0} should be either positive and smaller" + " than the number of samples {1} or a float in the " + "(0, 1) range".format(test_size, n_samples) + ) + + if ( + train_size_type == "i" + and (train_size >= n_samples or train_size <= 0) + or train_size_type == "f" + and (train_size <= 0 or train_size >= 1) + ): + raise ValueError( + "train_size={0} should be either positive and smaller" + " than the number of samples {1} or a float in the " + "(0, 1) range".format(train_size, n_samples) + ) + + if train_size is not None and train_size_type not in ("i", "f"): + raise ValueError("Invalid value for train_size: {}".format(train_size)) + if test_size is not None and test_size_type not in ("i", "f"): + raise ValueError("Invalid value for test_size: {}".format(test_size)) + + if train_size_type == "f" and test_size_type == "f" and train_size + test_size > 1: + raise ValueError( + "The sum of test_size and train_size = {}, should be in the (0, 1)" + " range. Reduce test_size and/or train_size.".format(train_size + test_size) + ) + + if test_size_type == "f": + n_test = ceil(test_size * n_samples) + elif test_size_type == "i": + n_test = float(test_size) + + if train_size_type == "f": + n_train = floor(train_size * n_samples) + elif train_size_type == "i": + n_train = float(train_size) + + if train_size is None: + n_train = n_samples - n_test + elif test_size is None: + n_test = n_samples - n_train + + if n_train + n_test > n_samples: + raise ValueError( + "The sum of train_size and test_size = %d, " + "should be smaller than the number of " + "samples %d. Reduce test_size and/or " + "train_size." % (n_train + n_test, n_samples) + ) + + n_train, n_test = int(n_train), int(n_test) + + if n_train == 0: + raise ValueError( + "With n_samples={}, test_size={} and train_size={}, the " + "resulting train set will be empty. Adjust any of the " + "aforementioned parameters.".format(n_samples, test_size, train_size) + ) + + return n_train, n_test + + +class PredefinedSplit(BaseCrossValidator): + """Predefined split cross-validator. + + Provides train/test indices to split data into train/test sets using a + predefined scheme specified by the user with the ``test_fold`` parameter. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.16 + + Parameters + ---------- + test_fold : array-like of shape (n_samples,) + The entry ``test_fold[i]`` represents the index of the test set that + sample ``i`` belongs to. It is possible to exclude sample ``i`` from + any test set (i.e. include sample ``i`` in every training set) by + setting ``test_fold[i]`` equal to -1. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import PredefinedSplit + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> test_fold = [0, 1, -1, 1] + >>> ps = PredefinedSplit(test_fold) + >>> ps.get_n_splits() + 2 + >>> print(ps) + PredefinedSplit(test_fold=array([ 0, 1, -1, 1])) + >>> for i, (train_index, test_index) in enumerate(ps.split()): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 2 3] + Test: index=[0] + Fold 1: + Train: index=[0 2] + Test: index=[1 3] + """ + + def __init__(self, test_fold): + self.test_fold = np.array(test_fold, dtype=int) + self.test_fold = column_or_1d(self.test_fold) + self.unique_folds = np.unique(self.test_fold) + self.unique_folds = self.unique_folds[self.unique_folds != -1] + + def split(self, X=None, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + ind = np.arange(len(self.test_fold)) + for test_index in self._iter_test_masks(): + train_index = ind[np.logical_not(test_index)] + test_index = ind[test_index] + yield train_index, test_index + + def _iter_test_masks(self): + """Generates boolean masks corresponding to test sets.""" + for f in self.unique_folds: + test_index = np.where(self.test_fold == f)[0] + test_mask = np.zeros(len(self.test_fold), dtype=bool) + test_mask[test_index] = True + yield test_mask + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return len(self.unique_folds) + + +class _CVIterableWrapper(BaseCrossValidator): + """Wrapper class for old style cv objects and iterables.""" + + def __init__(self, cv): + self.cv = list(cv) + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return len(self.cv) + + def split(self, X=None, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + for train, test in self.cv: + yield train, test + + +def check_cv(cv=5, y=None, *, classifier=False): + """Input checker utility for building a cross-validator. + + Parameters + ---------- + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable that generates (train, test) splits as arrays of indices. + + For integer/None inputs, if classifier is True and ``y`` is either + binary or multiclass, :class:`StratifiedKFold` is used. In all other + cases, :class:`KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value changed from 3-fold to 5-fold. + + y : array-like, default=None + The target variable for supervised learning problems. + + classifier : bool, default=False + Whether the task is a classification task, in which case + stratified KFold will be used. + + Returns + ------- + checked_cv : a cross-validator instance. + The return value is a cross-validator which generates the train/test + splits via the ``split`` method. + + Examples + -------- + >>> from sklearn.model_selection import check_cv + >>> check_cv(cv=5, y=None, classifier=False) + KFold(...) + >>> check_cv(cv=5, y=[1, 1, 0, 0, 0, 0], classifier=True) + StratifiedKFold(...) + """ + cv = 5 if cv is None else cv + if isinstance(cv, numbers.Integral): + if ( + classifier + and (y is not None) + and (type_of_target(y, input_name="y") in ("binary", "multiclass")) + ): + return StratifiedKFold(cv) + else: + return KFold(cv) + + if not hasattr(cv, "split") or isinstance(cv, str): + if not isinstance(cv, Iterable) or isinstance(cv, str): + raise ValueError( + "Expected cv as an integer, cross-validation " + "object (from sklearn.model_selection) " + "or an iterable. Got %s." % cv + ) + return _CVIterableWrapper(cv) + + return cv # New style cv objects are passed without any modification + + +@validate_params( + { + "test_size": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(numbers.Integral, 1, None, closed="left"), + None, + ], + "train_size": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(numbers.Integral, 1, None, closed="left"), + None, + ], + "random_state": ["random_state"], + "shuffle": ["boolean"], + "stratify": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def train_test_split( + *arrays, + test_size=None, + train_size=None, + random_state=None, + shuffle=True, + stratify=None, +): + """Split arrays or matrices into random train and test subsets. + + Quick utility that wraps input validation, + ``next(ShuffleSplit().split(X, y))``, and application to input data + into a single call for splitting (and optionally subsampling) data into a + one-liner. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + *arrays : sequence of indexables with same length / shape[0] + Allowed inputs are lists, numpy arrays, scipy-sparse + matrices or pandas dataframes. + + test_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to include in the test split. If int, represents the + absolute number of test samples. If None, the value is set to the + complement of the train size. If ``train_size`` is also None, it will + be set to 0.25. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the train split. If + int, represents the absolute number of train samples. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the shuffling applied to the data before applying the split. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + shuffle : bool, default=True + Whether or not to shuffle the data before splitting. If shuffle=False + then stratify must be None. + + stratify : array-like, default=None + If not None, data is split in a stratified fashion, using this as + the class labels. + Read more in the :ref:`User Guide `. + + Returns + ------- + splitting : list, length=2 * len(arrays) + List containing train-test split of inputs. + + .. versionadded:: 0.16 + If the input is sparse, the output will be a + ``scipy.sparse.csr_matrix``. Else, output type is the same as the + input type. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import train_test_split + >>> X, y = np.arange(10).reshape((5, 2)), range(5) + >>> X + array([[0, 1], + [2, 3], + [4, 5], + [6, 7], + [8, 9]]) + >>> list(y) + [0, 1, 2, 3, 4] + + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, test_size=0.33, random_state=42) + ... + >>> X_train + array([[4, 5], + [0, 1], + [6, 7]]) + >>> y_train + [2, 0, 3] + >>> X_test + array([[2, 3], + [8, 9]]) + >>> y_test + [1, 4] + + >>> train_test_split(y, shuffle=False) + [[0, 1, 2], [3, 4]] + """ + n_arrays = len(arrays) + if n_arrays == 0: + raise ValueError("At least one array required as input") + + arrays = indexable(*arrays) + + n_samples = _num_samples(arrays[0]) + n_train, n_test = _validate_shuffle_split( + n_samples, test_size, train_size, default_test_size=0.25 + ) + + if shuffle is False: + if stratify is not None: + raise ValueError( + "Stratified train/test split is not implemented for shuffle=False" + ) + + train = np.arange(n_train) + test = np.arange(n_train, n_train + n_test) + + else: + if stratify is not None: + CVClass = StratifiedShuffleSplit + else: + CVClass = ShuffleSplit + + cv = CVClass(test_size=n_test, train_size=n_train, random_state=random_state) + + train, test = next(cv.split(X=arrays[0], y=stratify)) + + return list( + chain.from_iterable( + (_safe_indexing(a, train), _safe_indexing(a, test)) for a in arrays + ) + ) + + +# Tell nose that train_test_split is not a test. +# (Needed for external libraries that may use nose.) +# Use setattr to avoid mypy errors when monkeypatching. +setattr(train_test_split, "__test__", False) + + +def _pprint(params, offset=0, printer=repr): + """Pretty print the dictionary 'params' + + Parameters + ---------- + params : dict + The dictionary to pretty print + + offset : int, default=0 + The offset in characters to add at the begin of each line. + + printer : callable, default=repr + The function to convert entries to strings, typically + the builtin str or repr + + """ + # Do a multi-line justified repr: + options = np.get_printoptions() + np.set_printoptions(precision=5, threshold=64, edgeitems=2) + params_list = list() + this_line_length = offset + line_sep = ",\n" + (1 + offset // 2) * " " + for i, (k, v) in enumerate(sorted(params.items())): + if isinstance(v, float): + # use str for representing floating point numbers + # this way we get consistent representation across + # architectures and versions. + this_repr = "%s=%s" % (k, str(v)) + else: + # use repr of the rest + this_repr = "%s=%s" % (k, printer(v)) + if len(this_repr) > 500: + this_repr = this_repr[:300] + "..." + this_repr[-100:] + if i > 0: + if this_line_length + len(this_repr) >= 75 or "\n" in this_repr: + params_list.append(line_sep) + this_line_length = len(line_sep) + else: + params_list.append(", ") + this_line_length += 2 + params_list.append(this_repr) + this_line_length += len(this_repr) + + np.set_printoptions(**options) + lines = "".join(params_list) + # Strip trailing space to avoid nightmare in doctests + lines = "\n".join(l.rstrip(" ") for l in lines.split("\n")) + return lines + + +def _build_repr(self): + # XXX This is copied from BaseEstimator's get_params + cls = self.__class__ + init = getattr(cls.__init__, "deprecated_original", cls.__init__) + # Ignore varargs, kw and default values and pop self + init_signature = signature(init) + # Consider the constructor parameters excluding 'self' + if init is object.__init__: + args = [] + else: + args = sorted( + [ + p.name + for p in init_signature.parameters.values() + if p.name != "self" and p.kind != p.VAR_KEYWORD + ] + ) + class_name = self.__class__.__name__ + params = dict() + for key in args: + # We need deprecation warnings to always be on in order to + # catch deprecated param values. + # This is set in utils/__init__.py but it gets overwritten + # when running under python3 somehow. + warnings.simplefilter("always", FutureWarning) + try: + with warnings.catch_warnings(record=True) as w: + value = getattr(self, key, None) + if value is None and hasattr(self, "cvargs"): + value = self.cvargs.get(key, None) + if len(w) and w[0].category == FutureWarning: + # if the parameter is deprecated, don't show it + continue + finally: + warnings.filters.pop(0) + params[key] = value + + return "%s(%s)" % (class_name, _pprint(params, offset=len(class_name))) + + +def _yields_constant_splits(cv): + # Return True if calling cv.split() always returns the same splits + # We assume that if a cv doesn't have a shuffle parameter, it shuffles by + # default (e.g. ShuffleSplit). If it actually doesn't shuffle (e.g. + # LeaveOneOut), then it won't have a random_state parameter anyway, in + # which case it will default to 0, leading to output=True + shuffle = getattr(cv, "shuffle", True) + random_state = getattr(cv, "random_state", 0) + return isinstance(random_state, numbers.Integral) or not shuffle diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_validation.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..75c956f2d38a73229b8607d83c53913ab782e231 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/_validation.py @@ -0,0 +1,2360 @@ +""" +The :mod:`sklearn.model_selection._validation` module includes classes and +functions to validate the model. +""" + +# Author: Alexandre Gramfort +# Gael Varoquaux +# Olivier Grisel +# Raghav RV +# Michal Karbownik +# License: BSD 3 clause + + +import numbers +import time +import warnings +from collections import Counter +from contextlib import suppress +from functools import partial +from numbers import Real +from traceback import format_exc + +import numpy as np +import scipy.sparse as sp +from joblib import logger + +from ..base import clone, is_classifier +from ..exceptions import FitFailedWarning, UnsetMetadataPassedError +from ..metrics import check_scoring, get_scorer_names +from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer +from ..preprocessing import LabelEncoder +from ..utils import Bunch, _safe_indexing, check_random_state, indexable +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + StrOptions, + validate_params, +) +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import _safe_split +from ..utils.parallel import Parallel, delayed +from ..utils.validation import _check_method_params, _num_samples +from ._split import check_cv + +__all__ = [ + "cross_validate", + "cross_val_score", + "cross_val_predict", + "permutation_test_score", + "learning_curve", + "validation_curve", +] + + +def _check_params_groups_deprecation(fit_params, params, groups): + """A helper function to check deprecations on `groups` and `fit_params`. + + To be removed when set_config(enable_metadata_routing=False) is not possible. + """ + if params is not None and fit_params is not None: + raise ValueError( + "`params` and `fit_params` cannot both be provided. Pass parameters " + "via `params`. `fit_params` is deprecated and will be removed in " + "version 1.6." + ) + elif fit_params is not None: + warnings.warn( + ( + "`fit_params` is deprecated and will be removed in version 1.6. " + "Pass parameters via `params` instead." + ), + FutureWarning, + ) + params = fit_params + + params = {} if params is None else params + + if groups is not None and _routing_enabled(): + raise ValueError( + "`groups` can only be passed if metadata routing is not enabled via" + " `sklearn.set_config(enable_metadata_routing=True)`. When routing is" + " enabled, pass `groups` alongside other metadata via the `params` argument" + " instead." + ) + + return params + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "fit_params": [dict, None], + "params": [dict, None], + "pre_dispatch": [Integral, str], + "return_train_score": ["boolean"], + "return_estimator": ["boolean"], + "return_indices": ["boolean"], + "error_score": [StrOptions({"raise"}), Real], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def cross_validate( + estimator, + X, + y=None, + *, + groups=None, + scoring=None, + cv=None, + n_jobs=None, + verbose=0, + fit_params=None, + params=None, + pre_dispatch="2*n_jobs", + return_train_score=False, + return_estimator=False, + return_indices=False, + error_score=np.nan, +): + """Evaluate metric(s) by cross-validation and also record fit/score times. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to fit. Can be for example a list, or an array. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + .. versionchanged:: 1.4 + ``groups`` can only be passed if metadata routing is not enabled + via ``sklearn.set_config(enable_metadata_routing=True)``. When routing + is enabled, pass ``groups`` alongside other metadata via the ``params`` + argument instead. E.g.: + ``cross_validate(..., params={'groups': groups})``. + + scoring : str, callable, list, tuple, or dict, default=None + Strategy to evaluate the performance of the cross-validated model on + the test set. + + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the cross-validation splits. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + The verbosity level. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. deprecated:: 1.4 + This parameter is deprecated and will be removed in version 1.6. Use + ``params`` instead. + + params : dict, default=None + Parameters to pass to the underlying estimator's ``fit``, the scorer, + and the CV splitter. + + .. versionadded:: 1.4 + + pre_dispatch : int or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + return_train_score : bool, default=False + Whether to include train scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + .. versionadded:: 0.19 + + .. versionchanged:: 0.21 + Default value was changed from ``True`` to ``False`` + + return_estimator : bool, default=False + Whether to return the estimators fitted on each split. + + .. versionadded:: 0.20 + + return_indices : bool, default=False + Whether to return the train-test indices selected for each split. + + .. versionadded:: 1.3 + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + Returns + ------- + scores : dict of float arrays of shape (n_splits,) + Array of scores of the estimator for each run of the cross validation. + + A dict of arrays containing the score/time arrays for each scorer is + returned. The possible keys for this ``dict`` are: + + ``test_score`` + The score array for test scores on each cv split. + Suffix ``_score`` in ``test_score`` changes to a specific + metric like ``test_r2`` or ``test_auc`` if there are + multiple scoring metrics in the scoring parameter. + ``train_score`` + The score array for train scores on each cv split. + Suffix ``_score`` in ``train_score`` changes to a specific + metric like ``train_r2`` or ``train_auc`` if there are + multiple scoring metrics in the scoring parameter. + This is available only if ``return_train_score`` parameter + is ``True``. + ``fit_time`` + The time for fitting the estimator on the train + set for each cv split. + ``score_time`` + The time for scoring the estimator on the test set for each + cv split. (Note time for scoring on the train set is not + included even if ``return_train_score`` is set to ``True`` + ``estimator`` + The estimator objects for each cv split. + This is available only if ``return_estimator`` parameter + is set to ``True``. + ``indices`` + The train/test positional indices for each cv split. A dictionary + is returned where the keys are either `"train"` or `"test"` + and the associated values are a list of integer-dtyped NumPy + arrays with the indices. Available only if `return_indices=True`. + + See Also + -------- + cross_val_score : Run cross-validation for single metric evaluation. + + cross_val_predict : Get predictions from each split of cross-validation for + diagnostic purposes. + + sklearn.metrics.make_scorer : Make a scorer from a performance metric or + loss function. + + Examples + -------- + >>> from sklearn import datasets, linear_model + >>> from sklearn.model_selection import cross_validate + >>> from sklearn.metrics import make_scorer + >>> from sklearn.metrics import confusion_matrix + >>> from sklearn.svm import LinearSVC + >>> diabetes = datasets.load_diabetes() + >>> X = diabetes.data[:150] + >>> y = diabetes.target[:150] + >>> lasso = linear_model.Lasso() + + Single metric evaluation using ``cross_validate`` + + >>> cv_results = cross_validate(lasso, X, y, cv=3) + >>> sorted(cv_results.keys()) + ['fit_time', 'score_time', 'test_score'] + >>> cv_results['test_score'] + array([0.3315057 , 0.08022103, 0.03531816]) + + Multiple metric evaluation using ``cross_validate`` + (please refer the ``scoring`` parameter doc for more information) + + >>> scores = cross_validate(lasso, X, y, cv=3, + ... scoring=('r2', 'neg_mean_squared_error'), + ... return_train_score=True) + >>> print(scores['test_neg_mean_squared_error']) + [-3635.5... -3573.3... -6114.7...] + >>> print(scores['train_r2']) + [0.28009951 0.3908844 0.22784907] + """ + params = _check_params_groups_deprecation(fit_params, params, groups) + + X, y = indexable(X, y) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + + if callable(scoring): + scorers = scoring + elif scoring is None or isinstance(scoring, str): + scorers = check_scoring(estimator, scoring) + else: + scorers = _check_multimetric_scoring(estimator, scoring) + + if _routing_enabled(): + # `cross_validate` will create a `_MultiMetricScorer` if `scoring` is a + # dict at a later stage. We need the same object for the purpose of + # routing. However, creating it here and passing it around would create + # a much larger diff since the dict is used in many places. + if isinstance(scorers, dict): + _scorer = _MultimetricScorer( + scorers=scorers, raise_exc=(error_score == "raise") + ) + else: + _scorer = scorers + # For estimators, a MetadataRouter is created in get_metadata_routing + # methods. For these router methods, we create the router to use + # `process_routing` on it. + router = ( + MetadataRouter(owner="cross_validate") + .add( + splitter=cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + .add( + estimator=estimator, + # TODO(SLEP6): also pass metadata to the predict method for + # scoring? + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + .add( + scorer=_scorer, + method_mapping=MethodMapping().add(caller="fit", callee="score"), + ) + ) + try: + routed_params = process_routing(router, "fit", **params) + except UnsetMetadataPassedError as e: + # The default exception would mention `fit` since in the above + # `process_routing` code, we pass `fit` as the caller. However, + # the user is not calling `fit` directly, so we change the message + # to make it more suitable for this case. + unrequested_params = sorted(e.unrequested_params) + raise UnsetMetadataPassedError( + message=( + f"{unrequested_params} are passed to cross validation but are not" + " explicitly set as requested or not requested for cross_validate's" + f" estimator: {estimator.__class__.__name__}. Call" + " `.set_fit_request({{metadata}}=True)` on the estimator for" + f" each metadata in {unrequested_params} that you" + " want to use and `metadata=False` for not using it. See the" + " Metadata Routing User guide" + " for more" + " information." + ), + unrequested_params=e.unrequested_params, + routed_params=e.routed_params, + ) + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split={"groups": groups}) + routed_params.estimator = Bunch(fit=params) + routed_params.scorer = Bunch(score={}) + + indices = cv.split(X, y, **routed_params.splitter.split) + if return_indices: + # materialize the indices since we need to store them in the returned dict + indices = list(indices) + + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) + results = parallel( + delayed(_fit_and_score)( + clone(estimator), + X, + y, + scorer=scorers, + train=train, + test=test, + verbose=verbose, + parameters=None, + fit_params=routed_params.estimator.fit, + score_params=routed_params.scorer.score, + return_train_score=return_train_score, + return_times=True, + return_estimator=return_estimator, + error_score=error_score, + ) + for train, test in indices + ) + + _warn_or_raise_about_fit_failures(results, error_score) + + # For callable scoring, the return type is only know after calling. If the + # return type is a dictionary, the error scores can now be inserted with + # the correct key. + if callable(scoring): + _insert_error_scores(results, error_score) + + results = _aggregate_score_dicts(results) + + ret = {} + ret["fit_time"] = results["fit_time"] + ret["score_time"] = results["score_time"] + + if return_estimator: + ret["estimator"] = results["estimator"] + + if return_indices: + ret["indices"] = {} + ret["indices"]["train"], ret["indices"]["test"] = zip(*indices) + + test_scores_dict = _normalize_score_results(results["test_scores"]) + if return_train_score: + train_scores_dict = _normalize_score_results(results["train_scores"]) + + for name in test_scores_dict: + ret["test_%s" % name] = test_scores_dict[name] + if return_train_score: + key = "train_%s" % name + ret[key] = train_scores_dict[name] + + return ret + + +def _insert_error_scores(results, error_score): + """Insert error in `results` by replacing them inplace with `error_score`. + + This only applies to multimetric scores because `_fit_and_score` will + handle the single metric case. + """ + successful_score = None + failed_indices = [] + for i, result in enumerate(results): + if result["fit_error"] is not None: + failed_indices.append(i) + elif successful_score is None: + successful_score = result["test_scores"] + + if isinstance(successful_score, dict): + formatted_error = {name: error_score for name in successful_score} + for i in failed_indices: + results[i]["test_scores"] = formatted_error.copy() + if "train_scores" in results[i]: + results[i]["train_scores"] = formatted_error.copy() + + +def _normalize_score_results(scores, scaler_score_key="score"): + """Creates a scoring dictionary based on the type of `scores`""" + if isinstance(scores[0], dict): + # multimetric scoring + return _aggregate_score_dicts(scores) + # scaler + return {scaler_score_key: scores} + + +def _warn_or_raise_about_fit_failures(results, error_score): + fit_errors = [ + result["fit_error"] for result in results if result["fit_error"] is not None + ] + if fit_errors: + num_failed_fits = len(fit_errors) + num_fits = len(results) + fit_errors_counter = Counter(fit_errors) + delimiter = "-" * 80 + "\n" + fit_errors_summary = "\n".join( + f"{delimiter}{n} fits failed with the following error:\n{error}" + for error, n in fit_errors_counter.items() + ) + + if num_failed_fits == num_fits: + all_fits_failed_message = ( + f"\nAll the {num_fits} fits failed.\n" + "It is very likely that your model is misconfigured.\n" + "You can try to debug the error by setting error_score='raise'.\n\n" + f"Below are more details about the failures:\n{fit_errors_summary}" + ) + raise ValueError(all_fits_failed_message) + + else: + some_fits_failed_message = ( + f"\n{num_failed_fits} fits failed out of a total of {num_fits}.\n" + "The score on these train-test partitions for these parameters" + f" will be set to {error_score}.\n" + "If these failures are not expected, you can try to debug them " + "by setting error_score='raise'.\n\n" + f"Below are more details about the failures:\n{fit_errors_summary}" + ) + warnings.warn(some_fits_failed_message, FitFailedWarning) + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "fit_params": [dict, None], + "params": [dict, None], + "pre_dispatch": [Integral, str, None], + "error_score": [StrOptions({"raise"}), Real], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def cross_val_score( + estimator, + X, + y=None, + *, + groups=None, + scoring=None, + cv=None, + n_jobs=None, + verbose=0, + fit_params=None, + params=None, + pre_dispatch="2*n_jobs", + error_score=np.nan, +): + """Evaluate a score by cross-validation. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to fit. Can be for example a list, or an array. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + .. versionchanged:: 1.4 + ``groups`` can only be passed if metadata routing is not enabled + via ``sklearn.set_config(enable_metadata_routing=True)``. When routing + is enabled, pass ``groups`` alongside other metadata via the ``params`` + argument instead. E.g.: + ``cross_val_score(..., params={'groups': groups})``. + + scoring : str or callable, default=None + A str (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)`` which should return only + a single value. + + Similar to :func:`cross_validate` + but only a single metric is permitted. + + If `None`, the estimator's default scorer (if available) is used. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - `None`, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable that generates (train, test) splits as arrays of indices. + + For `int`/`None` inputs, if the estimator is a classifier and `y` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + `cv` default value if `None` changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the cross-validation splits. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + The verbosity level. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. deprecated:: 1.4 + This parameter is deprecated and will be removed in version 1.6. Use + ``params`` instead. + + params : dict, default=None + Parameters to pass to the underlying estimator's ``fit``, the scorer, + and the CV splitter. + + .. versionadded:: 1.4 + + pre_dispatch : int or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - ``None``, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + Returns + ------- + scores : ndarray of float of shape=(len(list(cv)),) + Array of scores of the estimator for each run of the cross validation. + + See Also + -------- + cross_validate : To run cross-validation on multiple metrics and also to + return train scores, fit times and score times. + + cross_val_predict : Get predictions from each split of cross-validation for + diagnostic purposes. + + sklearn.metrics.make_scorer : Make a scorer from a performance metric or + loss function. + + Examples + -------- + >>> from sklearn import datasets, linear_model + >>> from sklearn.model_selection import cross_val_score + >>> diabetes = datasets.load_diabetes() + >>> X = diabetes.data[:150] + >>> y = diabetes.target[:150] + >>> lasso = linear_model.Lasso() + >>> print(cross_val_score(lasso, X, y, cv=3)) + [0.3315057 0.08022103 0.03531816] + """ + # To ensure multimetric format is not supported + scorer = check_scoring(estimator, scoring=scoring) + + cv_results = cross_validate( + estimator=estimator, + X=X, + y=y, + groups=groups, + scoring={"score": scorer}, + cv=cv, + n_jobs=n_jobs, + verbose=verbose, + fit_params=fit_params, + params=params, + pre_dispatch=pre_dispatch, + error_score=error_score, + ) + return cv_results["test_score"] + + +def _fit_and_score( + estimator, + X, + y, + *, + scorer, + train, + test, + verbose, + parameters, + fit_params, + score_params, + return_train_score=False, + return_parameters=False, + return_n_test_samples=False, + return_times=False, + return_estimator=False, + split_progress=None, + candidate_progress=None, + error_score=np.nan, +): + """Fit estimator and compute scores for a given dataset split. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : array-like of shape (n_samples, n_features) + The data to fit. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + The target variable to try to predict in the case of + supervised learning. + + scorer : A single callable or dict mapping scorer name to the callable + If it is a single callable, the return value for ``train_scores`` and + ``test_scores`` is a single float. + + For a dict, it should be one mapping the scorer name to the scorer + callable object / function. + + The callable object / fn should have signature + ``scorer(estimator, X, y)``. + + train : array-like of shape (n_train_samples,) + Indices of training samples. + + test : array-like of shape (n_test_samples,) + Indices of test samples. + + verbose : int + The verbosity level. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + parameters : dict or None + Parameters to be set on the estimator. + + fit_params : dict or None + Parameters that will be passed to ``estimator.fit``. + + score_params : dict or None + Parameters that will be passed to the scorer. + + return_train_score : bool, default=False + Compute and return score on training set. + + return_parameters : bool, default=False + Return parameters that has been used for the estimator. + + split_progress : {list, tuple} of int, default=None + A list or tuple of format (, ). + + candidate_progress : {list, tuple} of int, default=None + A list or tuple of format + (, ). + + return_n_test_samples : bool, default=False + Whether to return the ``n_test_samples``. + + return_times : bool, default=False + Whether to return the fit/score times. + + return_estimator : bool, default=False + Whether to return the fitted estimator. + + Returns + ------- + result : dict with the following attributes + train_scores : dict of scorer name -> float + Score on training set (for all the scorers), + returned only if `return_train_score` is `True`. + test_scores : dict of scorer name -> float + Score on testing set (for all the scorers). + n_test_samples : int + Number of test samples. + fit_time : float + Time spent for fitting in seconds. + score_time : float + Time spent for scoring in seconds. + parameters : dict or None + The parameters that have been evaluated. + estimator : estimator object + The fitted estimator. + fit_error : str or None + Traceback str if the fit failed, None if the fit succeeded. + """ + if not isinstance(error_score, numbers.Number) and error_score != "raise": + raise ValueError( + "error_score must be the string 'raise' or a numeric value. " + "(Hint: if using 'raise', please make sure that it has been " + "spelled correctly.)" + ) + + progress_msg = "" + if verbose > 2: + if split_progress is not None: + progress_msg = f" {split_progress[0]+1}/{split_progress[1]}" + if candidate_progress and verbose > 9: + progress_msg += f"; {candidate_progress[0]+1}/{candidate_progress[1]}" + + if verbose > 1: + if parameters is None: + params_msg = "" + else: + sorted_keys = sorted(parameters) # Ensure deterministic o/p + params_msg = ", ".join(f"{k}={parameters[k]}" for k in sorted_keys) + if verbose > 9: + start_msg = f"[CV{progress_msg}] START {params_msg}" + print(f"{start_msg}{(80 - len(start_msg)) * '.'}") + + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + fit_params = _check_method_params(X, params=fit_params, indices=train) + score_params = score_params if score_params is not None else {} + score_params_train = _check_method_params(X, params=score_params, indices=train) + score_params_test = _check_method_params(X, params=score_params, indices=test) + + if parameters is not None: + # here we clone the parameters, since sometimes the parameters + # themselves might be estimators, e.g. when we search over different + # estimators in a pipeline. + # ref: https://github.com/scikit-learn/scikit-learn/pull/26786 + estimator = estimator.set_params(**clone(parameters, safe=False)) + + start_time = time.time() + + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + + result = {} + try: + if y_train is None: + estimator.fit(X_train, **fit_params) + else: + estimator.fit(X_train, y_train, **fit_params) + + except Exception: + # Note fit time as time until error + fit_time = time.time() - start_time + score_time = 0.0 + if error_score == "raise": + raise + elif isinstance(error_score, numbers.Number): + if isinstance(scorer, dict): + test_scores = {name: error_score for name in scorer} + if return_train_score: + train_scores = test_scores.copy() + else: + test_scores = error_score + if return_train_score: + train_scores = error_score + result["fit_error"] = format_exc() + else: + result["fit_error"] = None + + fit_time = time.time() - start_time + test_scores = _score( + estimator, X_test, y_test, scorer, score_params_test, error_score + ) + score_time = time.time() - start_time - fit_time + if return_train_score: + train_scores = _score( + estimator, X_train, y_train, scorer, score_params_train, error_score + ) + + if verbose > 1: + total_time = score_time + fit_time + end_msg = f"[CV{progress_msg}] END " + result_msg = params_msg + (";" if params_msg else "") + if verbose > 2: + if isinstance(test_scores, dict): + for scorer_name in sorted(test_scores): + result_msg += f" {scorer_name}: (" + if return_train_score: + scorer_scores = train_scores[scorer_name] + result_msg += f"train={scorer_scores:.3f}, " + result_msg += f"test={test_scores[scorer_name]:.3f})" + else: + result_msg += ", score=" + if return_train_score: + result_msg += f"(train={train_scores:.3f}, test={test_scores:.3f})" + else: + result_msg += f"{test_scores:.3f}" + result_msg += f" total time={logger.short_format_time(total_time)}" + + # Right align the result_msg + end_msg += "." * (80 - len(end_msg) - len(result_msg)) + end_msg += result_msg + print(end_msg) + + result["test_scores"] = test_scores + if return_train_score: + result["train_scores"] = train_scores + if return_n_test_samples: + result["n_test_samples"] = _num_samples(X_test) + if return_times: + result["fit_time"] = fit_time + result["score_time"] = score_time + if return_parameters: + result["parameters"] = parameters + if return_estimator: + result["estimator"] = estimator + return result + + +def _score(estimator, X_test, y_test, scorer, score_params, error_score="raise"): + """Compute the score(s) of an estimator on a given test set. + + Will return a dict of floats if `scorer` is a dict, otherwise a single + float is returned. + """ + if isinstance(scorer, dict): + # will cache method calls if needed. scorer() returns a dict + scorer = _MultimetricScorer(scorers=scorer, raise_exc=(error_score == "raise")) + + score_params = {} if score_params is None else score_params + + try: + if y_test is None: + scores = scorer(estimator, X_test, **score_params) + else: + scores = scorer(estimator, X_test, y_test, **score_params) + except Exception: + if isinstance(scorer, _MultimetricScorer): + # If `_MultimetricScorer` raises exception, the `error_score` + # parameter is equal to "raise". + raise + else: + if error_score == "raise": + raise + else: + scores = error_score + warnings.warn( + ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}. Details: \n" + f"{format_exc()}" + ), + UserWarning, + ) + + # Check non-raised error messages in `_MultimetricScorer` + if isinstance(scorer, _MultimetricScorer): + exception_messages = [ + (name, str_e) for name, str_e in scores.items() if isinstance(str_e, str) + ] + if exception_messages: + # error_score != "raise" + for name, str_e in exception_messages: + scores[name] = error_score + warnings.warn( + ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}. Details: \n" + f"{str_e}" + ), + UserWarning, + ) + + error_msg = "scoring must return a number, got %s (%s) instead. (scorer=%s)" + if isinstance(scores, dict): + for name, score in scores.items(): + if hasattr(score, "item"): + with suppress(ValueError): + # e.g. unwrap memmapped scalars + score = score.item() + if not isinstance(score, numbers.Number): + raise ValueError(error_msg % (score, type(score), name)) + scores[name] = score + else: # scalar + if hasattr(scores, "item"): + with suppress(ValueError): + # e.g. unwrap memmapped scalars + scores = scores.item() + if not isinstance(scores, numbers.Number): + raise ValueError(error_msg % (scores, type(scores), scorer)) + return scores + + +@validate_params( + { + "estimator": [HasMethods(["fit", "predict"])], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "fit_params": [dict, None], + "params": [dict, None], + "pre_dispatch": [Integral, str, None], + "method": [ + StrOptions( + { + "predict", + "predict_proba", + "predict_log_proba", + "decision_function", + } + ) + ], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def cross_val_predict( + estimator, + X, + y=None, + *, + groups=None, + cv=None, + n_jobs=None, + verbose=0, + fit_params=None, + params=None, + pre_dispatch="2*n_jobs", + method="predict", +): + """Generate cross-validated estimates for each input data point. + + The data is split according to the cv parameter. Each sample belongs + to exactly one test set, and its prediction is computed with an + estimator fitted on the corresponding training set. + + Passing these predictions into an evaluation metric may not be a valid + way to measure generalization performance. Results can differ from + :func:`cross_validate` and :func:`cross_val_score` unless all tests sets + have equal size and the metric decomposes over samples. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator + The estimator instance to use to fit the data. It must implement a `fit` + method and the method given by the `method` parameter. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to fit. Can be, for example a list, or an array at least 2d. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + .. versionchanged:: 1.4 + ``groups`` can only be passed if metadata routing is not enabled + via ``sklearn.set_config(enable_metadata_routing=True)``. When routing + is enabled, pass ``groups`` alongside other metadata via the ``params`` + argument instead. E.g.: + ``cross_val_predict(..., params={'groups': groups})``. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable that generates (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and + predicting are parallelized over the cross-validation splits. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + The verbosity level. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. deprecated:: 1.4 + This parameter is deprecated and will be removed in version 1.6. Use + ``params`` instead. + + params : dict, default=None + Parameters to pass to the underlying estimator's ``fit`` and the CV + splitter. + + .. versionadded:: 1.4 + + pre_dispatch : int or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + method : {'predict', 'predict_proba', 'predict_log_proba', \ + 'decision_function'}, default='predict' + The method to be invoked by `estimator`. + + Returns + ------- + predictions : ndarray + This is the result of calling `method`. Shape: + + - When `method` is 'predict' and in special case where `method` is + 'decision_function' and the target is binary: (n_samples,) + - When `method` is one of {'predict_proba', 'predict_log_proba', + 'decision_function'} (unless special case above): + (n_samples, n_classes) + - If `estimator` is :term:`multioutput`, an extra dimension + 'n_outputs' is added to the end of each shape above. + + See Also + -------- + cross_val_score : Calculate score for each CV split. + cross_validate : Calculate one or more scores and timings for each CV + split. + + Notes + ----- + In the case that one or more classes are absent in a training portion, a + default score needs to be assigned to all instances for that class if + ``method`` produces columns per class, as in {'decision_function', + 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is + 0. In order to ensure finite output, we approximate negative infinity by + the minimum finite float value for the dtype in other cases. + + Examples + -------- + >>> from sklearn import datasets, linear_model + >>> from sklearn.model_selection import cross_val_predict + >>> diabetes = datasets.load_diabetes() + >>> X = diabetes.data[:150] + >>> y = diabetes.target[:150] + >>> lasso = linear_model.Lasso() + >>> y_pred = cross_val_predict(lasso, X, y, cv=3) + """ + params = _check_params_groups_deprecation(fit_params, params, groups) + X, y = indexable(X, y) + + if _routing_enabled(): + # For estimators, a MetadataRouter is created in get_metadata_routing + # methods. For these router methods, we create the router to use + # `process_routing` on it. + router = ( + MetadataRouter(owner="cross_validate") + .add( + splitter=cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + .add( + estimator=estimator, + # TODO(SLEP6): also pass metadata for the predict method. + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + ) + try: + routed_params = process_routing(router, "fit", **params) + except UnsetMetadataPassedError as e: + # The default exception would mention `fit` since in the above + # `process_routing` code, we pass `fit` as the caller. However, + # the user is not calling `fit` directly, so we change the message + # to make it more suitable for this case. + unrequested_params = sorted(e.unrequested_params) + raise UnsetMetadataPassedError( + message=( + f"{unrequested_params} are passed to `cross_val_predict` but are" + " not explicitly set as requested or not requested for" + f" cross_validate's estimator: {estimator.__class__.__name__} Call" + " `.set_fit_request({{metadata}}=True)` on the estimator for" + f" each metadata in {unrequested_params} that you want to use and" + " `metadata=False` for not using it. See the Metadata Routing User" + " guide " + " for more information." + ), + unrequested_params=e.unrequested_params, + routed_params=e.routed_params, + ) + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split={"groups": groups}) + routed_params.estimator = Bunch(fit=params) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + splits = list(cv.split(X, y, **routed_params.splitter.split)) + + test_indices = np.concatenate([test for _, test in splits]) + if not _check_is_permutation(test_indices, _num_samples(X)): + raise ValueError("cross_val_predict only works for partitions") + + # If classification methods produce multiple columns of output, + # we need to manually encode classes to ensure consistent column ordering. + encode = ( + method in ["decision_function", "predict_proba", "predict_log_proba"] + and y is not None + ) + if encode: + y = np.asarray(y) + if y.ndim == 1: + le = LabelEncoder() + y = le.fit_transform(y) + elif y.ndim == 2: + y_enc = np.zeros_like(y, dtype=int) + for i_label in range(y.shape[1]): + y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label]) + y = y_enc + + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) + predictions = parallel( + delayed(_fit_and_predict)( + clone(estimator), + X, + y, + train, + test, + routed_params.estimator.fit, + method, + ) + for train, test in splits + ) + + inv_test_indices = np.empty(len(test_indices), dtype=int) + inv_test_indices[test_indices] = np.arange(len(test_indices)) + + if sp.issparse(predictions[0]): + predictions = sp.vstack(predictions, format=predictions[0].format) + elif encode and isinstance(predictions[0], list): + # `predictions` is a list of method outputs from each fold. + # If each of those is also a list, then treat this as a + # multioutput-multiclass task. We need to separately concatenate + # the method outputs for each label into an `n_labels` long list. + n_labels = y.shape[1] + concat_pred = [] + for i_label in range(n_labels): + label_preds = np.concatenate([p[i_label] for p in predictions]) + concat_pred.append(label_preds) + predictions = concat_pred + else: + predictions = np.concatenate(predictions) + + if isinstance(predictions, list): + return [p[inv_test_indices] for p in predictions] + else: + return predictions[inv_test_indices] + + +def _fit_and_predict(estimator, X, y, train, test, fit_params, method): + """Fit estimator and predict values for a given dataset split. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' and 'predict' + The object to use to fit the data. + + X : array-like of shape (n_samples, n_features) + The data to fit. + + .. versionchanged:: 0.20 + X is only required to be an object with finite length or shape now + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + The target variable to try to predict in the case of + supervised learning. + + train : array-like of shape (n_train_samples,) + Indices of training samples. + + test : array-like of shape (n_test_samples,) + Indices of test samples. + + fit_params : dict or None + Parameters that will be passed to ``estimator.fit``. + + method : str + Invokes the passed method name of the passed estimator. + + Returns + ------- + predictions : sequence + Result of calling 'estimator.method' + """ + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + fit_params = _check_method_params(X, params=fit_params, indices=train) + + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, _ = _safe_split(estimator, X, y, test, train) + + if y_train is None: + estimator.fit(X_train, **fit_params) + else: + estimator.fit(X_train, y_train, **fit_params) + func = getattr(estimator, method) + predictions = func(X_test) + + encode = ( + method in ["decision_function", "predict_proba", "predict_log_proba"] + and y is not None + ) + + if encode: + if isinstance(predictions, list): + predictions = [ + _enforce_prediction_order( + estimator.classes_[i_label], + predictions[i_label], + n_classes=len(set(y[:, i_label])), + method=method, + ) + for i_label in range(len(predictions)) + ] + else: + # A 2D y array should be a binary label indicator matrix + n_classes = len(set(y)) if y.ndim == 1 else y.shape[1] + predictions = _enforce_prediction_order( + estimator.classes_, predictions, n_classes, method + ) + return predictions + + +def _enforce_prediction_order(classes, predictions, n_classes, method): + """Ensure that prediction arrays have correct column order + + When doing cross-validation, if one or more classes are + not present in the subset of data used for training, + then the output prediction array might not have the same + columns as other folds. Use the list of class names + (assumed to be ints) to enforce the correct column order. + + Note that `classes` is the list of classes in this fold + (a subset of the classes in the full training set) + and `n_classes` is the number of classes in the full training set. + """ + if n_classes != len(classes): + recommendation = ( + "To fix this, use a cross-validation " + "technique resulting in properly " + "stratified folds" + ) + warnings.warn( + "Number of classes in training fold ({}) does " + "not match total number of classes ({}). " + "Results may not be appropriate for your use case. " + "{}".format(len(classes), n_classes, recommendation), + RuntimeWarning, + ) + if method == "decision_function": + if predictions.ndim == 2 and predictions.shape[1] != len(classes): + # This handles the case when the shape of predictions + # does not match the number of classes used to train + # it with. This case is found when sklearn.svm.SVC is + # set to `decision_function_shape='ovo'`. + raise ValueError( + "Output shape {} of {} does not match " + "number of classes ({}) in fold. " + "Irregular decision_function outputs " + "are not currently supported by " + "cross_val_predict".format(predictions.shape, method, len(classes)) + ) + if len(classes) <= 2: + # In this special case, `predictions` contains a 1D array. + raise ValueError( + "Only {} class/es in training fold, but {} " + "in overall dataset. This " + "is not supported for decision_function " + "with imbalanced folds. {}".format( + len(classes), n_classes, recommendation + ) + ) + + float_min = np.finfo(predictions.dtype).min + default_values = { + "decision_function": float_min, + "predict_log_proba": float_min, + "predict_proba": 0, + } + predictions_for_all_classes = np.full( + (_num_samples(predictions), n_classes), + default_values[method], + dtype=predictions.dtype, + ) + predictions_for_all_classes[:, classes] = predictions + predictions = predictions_for_all_classes + return predictions + + +def _check_is_permutation(indices, n_samples): + """Check whether indices is a reordering of the array np.arange(n_samples) + + Parameters + ---------- + indices : ndarray + int array to test + n_samples : int + number of expected elements + + Returns + ------- + is_partition : bool + True iff sorted(indices) is np.arange(n) + """ + if len(indices) != n_samples: + return False + hit = np.zeros(n_samples, dtype=bool) + hit[indices] = True + if not np.all(hit): + return False + return True + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "cv": ["cv_object"], + "n_permutations": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "verbose": ["verbose"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "fit_params": [dict, None], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def permutation_test_score( + estimator, + X, + y, + *, + groups=None, + cv=None, + n_permutations=100, + n_jobs=None, + random_state=0, + verbose=0, + scoring=None, + fit_params=None, +): + """Evaluate the significance of a cross-validated score with permutations. + + Permutes targets to generate 'randomized data' and compute the empirical + p-value against the null hypothesis that features and targets are + independent. + + The p-value represents the fraction of randomized data sets where the + estimator performed as well or better than in the original data. A small + p-value suggests that there is a real dependency between features and + targets which has been used by the estimator to give good predictions. + A large p-value may be due to lack of real dependency between features + and targets or the estimator was not able to use the dependency to + give good predictions. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : array-like of shape at least 2D + The data to fit. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Labels to constrain permutation within groups, i.e. ``y`` values + are permuted among samples with the same group identifier. + When not specified, ``y`` values are permuted among all samples. + + When a grouped cross-validator is used, the group labels are + also passed on to the ``split`` method of the cross-validator. The + cross-validator uses them for grouping the samples while splitting + the dataset into train/test set. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - `None`, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For `int`/`None` inputs, if the estimator is a classifier and `y` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + `cv` default value if `None` changed from 3-fold to 5-fold. + + n_permutations : int, default=100 + Number of times to permute ``y``. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the cross-validated score are parallelized over the permutations. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance or None, default=0 + Pass an int for reproducible output for permutation of + ``y`` values among samples. See :term:`Glossary `. + + verbose : int, default=0 + The verbosity level. + + scoring : str or callable, default=None + A single str (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + + If `None` the estimator's score method is used. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. versionadded:: 0.24 + + Returns + ------- + score : float + The true score without permuting targets. + + permutation_scores : array of shape (n_permutations,) + The scores obtained for each permutations. + + pvalue : float + The p-value, which approximates the probability that the score would + be obtained by chance. This is calculated as: + + `(C + 1) / (n_permutations + 1)` + + Where C is the number of permutations whose score >= the true score. + + The best possible p-value is 1/(n_permutations + 1), the worst is 1.0. + + Notes + ----- + This function implements Test 1 in: + + Ojala and Garriga. `Permutation Tests for Studying Classifier + Performance + `_. The + Journal of Machine Learning Research (2010) vol. 11 + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.model_selection import permutation_test_score + >>> X, y = make_classification(random_state=0) + >>> estimator = LogisticRegression() + >>> score, permutation_scores, pvalue = permutation_test_score( + ... estimator, X, y, random_state=0 + ... ) + >>> print(f"Original Score: {score:.3f}") + Original Score: 0.810 + >>> print( + ... f"Permutation Scores: {permutation_scores.mean():.3f} +/- " + ... f"{permutation_scores.std():.3f}" + ... ) + Permutation Scores: 0.505 +/- 0.057 + >>> print(f"P-value: {pvalue:.3f}") + P-value: 0.010 + """ + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + scorer = check_scoring(estimator, scoring=scoring) + random_state = check_random_state(random_state) + + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + score = _permutation_test_score( + clone(estimator), X, y, groups, cv, scorer, fit_params=fit_params + ) + permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(_permutation_test_score)( + clone(estimator), + X, + _shuffle(y, groups, random_state), + groups, + cv, + scorer, + fit_params=fit_params, + ) + for _ in range(n_permutations) + ) + permutation_scores = np.array(permutation_scores) + pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) + return score, permutation_scores, pvalue + + +def _permutation_test_score(estimator, X, y, groups, cv, scorer, fit_params): + """Auxiliary function for permutation_test_score""" + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + avg_score = [] + for train, test in cv.split(X, y, groups): + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + fit_params = _check_method_params(X, params=fit_params, indices=train) + estimator.fit(X_train, y_train, **fit_params) + avg_score.append(scorer(estimator, X_test, y_test)) + return np.mean(avg_score) + + +def _shuffle(y, groups, random_state): + """Return a shuffled copy of y eventually shuffle among same groups.""" + if groups is None: + indices = random_state.permutation(len(y)) + else: + indices = np.arange(len(groups)) + for group in np.unique(groups): + this_mask = groups == group + indices[this_mask] = random_state.permutation(indices[this_mask]) + return _safe_indexing(y, indices) + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "train_sizes": ["array-like"], + "cv": ["cv_object"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "exploit_incremental_learning": ["boolean"], + "n_jobs": [Integral, None], + "pre_dispatch": [Integral, str], + "verbose": ["verbose"], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "error_score": [StrOptions({"raise"}), Real], + "return_times": ["boolean"], + "fit_params": [dict, None], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def learning_curve( + estimator, + X, + y, + *, + groups=None, + train_sizes=np.linspace(0.1, 1.0, 5), + cv=None, + scoring=None, + exploit_incremental_learning=False, + n_jobs=None, + pre_dispatch="all", + verbose=0, + shuffle=False, + random_state=None, + error_score=np.nan, + return_times=False, + fit_params=None, +): + """Learning curve. + + Determines cross-validated training and test scores for different training + set sizes. + + A cross-validation generator splits the whole dataset k times in training + and test data. Subsets of the training set with varying sizes will be used + to train the estimator and a score for each training subset size and the + test set will be computed. Afterwards, the scores will be averaged over + all k runs for each training subset size. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object type that implements the "fit" method + An object of that type which is cloned for each validation. It must + also implement "predict" unless `scoring` is a callable that doesn't + rely on "predict" to compute a score. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + train_sizes : array-like of shape (n_ticks,), \ + default=np.linspace(0.1, 1.0, 5) + Relative or absolute numbers of training examples that will be used to + generate the learning curve. If the dtype is float, it is regarded as a + fraction of the maximum size of the training set (that is determined + by the selected validation method), i.e. it has to be within (0, 1]. + Otherwise it is interpreted as absolute sizes of the training sets. + Note that for classification the number of samples usually have to + be big enough to contain at least one sample from each class. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + scoring : str or callable, default=None + A str (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + exploit_incremental_learning : bool, default=False + If the estimator supports incremental learning, this will be + used to speed up fitting for different training set sizes. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the different training and test sets. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + shuffle : bool, default=False + Whether to shuffle training data before taking prefixes of it + based on``train_sizes``. + + random_state : int, RandomState instance or None, default=None + Used when ``shuffle`` is True. Pass an int for reproducible + output across multiple function calls. + See :term:`Glossary `. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + return_times : bool, default=False + Whether to return the fit and score times. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. versionadded:: 0.24 + + Returns + ------- + train_sizes_abs : array of shape (n_unique_ticks,) + Numbers of training examples that has been used to generate the + learning curve. Note that the number of ticks might be less + than n_ticks because duplicate entries will be removed. + + train_scores : array of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : array of shape (n_ticks, n_cv_folds) + Scores on test set. + + fit_times : array of shape (n_ticks, n_cv_folds) + Times spent for fitting in seconds. Only present if ``return_times`` + is True. + + score_times : array of shape (n_ticks, n_cv_folds) + Times spent for scoring in seconds. Only present if ``return_times`` + is True. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.tree import DecisionTreeClassifier + >>> from sklearn.model_selection import learning_curve + >>> X, y = make_classification(n_samples=100, n_features=10, random_state=42) + >>> tree = DecisionTreeClassifier(max_depth=4, random_state=42) + >>> train_size_abs, train_scores, test_scores = learning_curve( + ... tree, X, y, train_sizes=[0.3, 0.6, 0.9] + ... ) + >>> for train_size, cv_train_scores, cv_test_scores in zip( + ... train_size_abs, train_scores, test_scores + ... ): + ... print(f"{train_size} samples were used to train the model") + ... print(f"The average train accuracy is {cv_train_scores.mean():.2f}") + ... print(f"The average test accuracy is {cv_test_scores.mean():.2f}") + 24 samples were used to train the model + The average train accuracy is 1.00 + The average test accuracy is 0.85 + 48 samples were used to train the model + The average train accuracy is 1.00 + The average test accuracy is 0.90 + 72 samples were used to train the model + The average train accuracy is 1.00 + The average test accuracy is 0.93 + """ + if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): + raise ValueError( + "An estimator must support the partial_fit interface " + "to exploit incremental learning" + ) + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + # Store it as list as we will be iterating over the list multiple times + cv_iter = list(cv.split(X, y, groups)) + + scorer = check_scoring(estimator, scoring=scoring) + + n_max_training_samples = len(cv_iter[0][0]) + # Because the lengths of folds can be significantly different, it is + # not guaranteed that we use all of the available training data when we + # use the first 'n_max_training_samples' samples. + train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) + n_unique_ticks = train_sizes_abs.shape[0] + if verbose > 0: + print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) + + parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) + + if shuffle: + rng = check_random_state(random_state) + cv_iter = ((rng.permutation(train), test) for train, test in cv_iter) + + if exploit_incremental_learning: + classes = np.unique(y) if is_classifier(estimator) else None + out = parallel( + delayed(_incremental_fit_estimator)( + clone(estimator), + X, + y, + classes, + train, + test, + train_sizes_abs, + scorer, + return_times, + error_score=error_score, + fit_params=fit_params, + ) + for train, test in cv_iter + ) + out = np.asarray(out).transpose((2, 1, 0)) + else: + train_test_proportions = [] + for train, test in cv_iter: + for n_train_samples in train_sizes_abs: + train_test_proportions.append((train[:n_train_samples], test)) + + results = parallel( + delayed(_fit_and_score)( + clone(estimator), + X, + y, + scorer=scorer, + train=train, + test=test, + verbose=verbose, + parameters=None, + fit_params=fit_params, + # TODO(SLEP6): support score params here + score_params=None, + return_train_score=True, + error_score=error_score, + return_times=return_times, + ) + for train, test in train_test_proportions + ) + _warn_or_raise_about_fit_failures(results, error_score) + results = _aggregate_score_dicts(results) + train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T + test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T + out = [train_scores, test_scores] + + if return_times: + fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T + score_times = results["score_time"].reshape(-1, n_unique_ticks).T + out.extend([fit_times, score_times]) + + ret = train_sizes_abs, out[0], out[1] + + if return_times: + ret = ret + (out[2], out[3]) + + return ret + + +def _translate_train_sizes(train_sizes, n_max_training_samples): + """Determine absolute sizes of training subsets and validate 'train_sizes'. + + Examples: + _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] + _translate_train_sizes([5, 10], 10) -> [5, 10] + + Parameters + ---------- + train_sizes : array-like of shape (n_ticks,) + Numbers of training examples that will be used to generate the + learning curve. If the dtype is float, it is regarded as a + fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. + + n_max_training_samples : int + Maximum number of training samples (upper bound of 'train_sizes'). + + Returns + ------- + train_sizes_abs : array of shape (n_unique_ticks,) + Numbers of training examples that will be used to generate the + learning curve. Note that the number of ticks might be less + than n_ticks because duplicate entries will be removed. + """ + train_sizes_abs = np.asarray(train_sizes) + n_ticks = train_sizes_abs.shape[0] + n_min_required_samples = np.min(train_sizes_abs) + n_max_required_samples = np.max(train_sizes_abs) + if np.issubdtype(train_sizes_abs.dtype, np.floating): + if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: + raise ValueError( + "train_sizes has been interpreted as fractions " + "of the maximum number of training samples and " + "must be within (0, 1], but is within [%f, %f]." + % (n_min_required_samples, n_max_required_samples) + ) + train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype( + dtype=int, copy=False + ) + train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) + else: + if ( + n_min_required_samples <= 0 + or n_max_required_samples > n_max_training_samples + ): + raise ValueError( + "train_sizes has been interpreted as absolute " + "numbers of training samples and must be within " + "(0, %d], but is within [%d, %d]." + % ( + n_max_training_samples, + n_min_required_samples, + n_max_required_samples, + ) + ) + + train_sizes_abs = np.unique(train_sizes_abs) + if n_ticks > train_sizes_abs.shape[0]: + warnings.warn( + "Removed duplicate entries from 'train_sizes'. Number " + "of ticks will be less than the size of " + "'train_sizes': %d instead of %d." % (train_sizes_abs.shape[0], n_ticks), + RuntimeWarning, + ) + + return train_sizes_abs + + +def _incremental_fit_estimator( + estimator, + X, + y, + classes, + train, + test, + train_sizes, + scorer, + return_times, + error_score, + fit_params, +): + """Train estimator on training subsets incrementally and compute scores.""" + train_scores, test_scores, fit_times, score_times = [], [], [], [] + partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) + if fit_params is None: + fit_params = {} + if classes is None: + partial_fit_func = partial(estimator.partial_fit, **fit_params) + else: + partial_fit_func = partial(estimator.partial_fit, classes=classes, **fit_params) + + for n_train_samples, partial_train in partitions: + train_subset = train[:n_train_samples] + X_train, y_train = _safe_split(estimator, X, y, train_subset) + X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) + X_test, y_test = _safe_split(estimator, X, y, test, train_subset) + start_fit = time.time() + if y_partial_train is None: + partial_fit_func(X_partial_train) + else: + partial_fit_func(X_partial_train, y_partial_train) + fit_time = time.time() - start_fit + fit_times.append(fit_time) + + start_score = time.time() + + # TODO(SLEP6): support score params in the following two calls + test_scores.append( + _score( + estimator, + X_test, + y_test, + scorer, + score_params=None, + error_score=error_score, + ) + ) + train_scores.append( + _score( + estimator, + X_train, + y_train, + scorer, + score_params=None, + error_score=error_score, + ) + ) + score_time = time.time() - start_score + score_times.append(score_time) + + ret = ( + (train_scores, test_scores, fit_times, score_times) + if return_times + else (train_scores, test_scores) + ) + + return np.array(ret).T + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "param_name": [str], + "param_range": ["array-like"], + "groups": ["array-like", None], + "cv": ["cv_object"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "n_jobs": [Integral, None], + "pre_dispatch": [Integral, str], + "verbose": ["verbose"], + "error_score": [StrOptions({"raise"}), Real], + "fit_params": [dict, None], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def validation_curve( + estimator, + X, + y, + *, + param_name, + param_range, + groups=None, + cv=None, + scoring=None, + n_jobs=None, + pre_dispatch="all", + verbose=0, + error_score=np.nan, + fit_params=None, +): + """Validation curve. + + Determine training and test scores for varying parameter values. + + Compute scores for an estimator with different values of a specified + parameter. This is similar to grid search with one parameter. However, this + will also compute training scores and is merely a utility for plotting the + results. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object type that implements the "fit" method + An object of that type which is cloned for each validation. It must + also implement "predict" unless `scoring` is a callable that doesn't + rely on "predict" to compute a score. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + param_name : str + Name of the parameter that will be varied. + + param_range : array-like of shape (n_values,) + The values of the parameter that will be evaluated. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + scoring : str or callable, default=None + A str (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the combinations of each parameter + value and each cross-validation split. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. versionadded:: 0.24 + + Returns + ------- + train_scores : array of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : array of shape (n_ticks, n_cv_folds) + Scores on test set. + + Notes + ----- + See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import validation_curve + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(n_samples=1_000, random_state=0) + >>> logistic_regression = LogisticRegression() + >>> param_name, param_range = "C", np.logspace(-8, 3, 10) + >>> train_scores, test_scores = validation_curve( + ... logistic_regression, X, y, param_name=param_name, param_range=param_range + ... ) + >>> print(f"The average train accuracy is {train_scores.mean():.2f}") + The average train accuracy is 0.81 + >>> print(f"The average test accuracy is {test_scores.mean():.2f}") + The average test accuracy is 0.81 + """ + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + scorer = check_scoring(estimator, scoring=scoring) + + parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) + results = parallel( + delayed(_fit_and_score)( + clone(estimator), + X, + y, + scorer=scorer, + train=train, + test=test, + verbose=verbose, + parameters={param_name: v}, + fit_params=fit_params, + # TODO(SLEP6): support score params here + score_params=None, + return_train_score=True, + error_score=error_score, + ) + # NOTE do not change order of iteration to allow one time cv splitters + for train, test in cv.split(X, y, groups) + for v in param_range + ) + n_params = len(param_range) + + results = _aggregate_score_dicts(results) + train_scores = results["train_scores"].reshape(-1, n_params).T + test_scores = results["test_scores"].reshape(-1, n_params).T + + return train_scores, test_scores + + +def _aggregate_score_dicts(scores): + """Aggregate the list of dict to dict of np ndarray + + The aggregated output of _aggregate_score_dicts will be a list of dict + of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...] + Convert it to a dict of array {'prec': np.array([0.1 ...]), ...} + + Parameters + ---------- + + scores : list of dict + List of dicts of the scores for all scorers. This is a flat list, + assumed originally to be of row major order. + + Example + ------- + + >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, + ... {'a': 10, 'b': 10}] # doctest: +SKIP + >>> _aggregate_score_dicts(scores) # doctest: +SKIP + {'a': array([1, 2, 3, 10]), + 'b': array([10, 2, 3, 10])} + """ + return { + key: ( + np.asarray([score[key] for score in scores]) + if isinstance(scores[0][key], numbers.Number) + else [score[key] for score in scores] + ) + for key in scores[0] + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19798c9b50e4a981d09ece93f00dd02d488d8282 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64af48cded84f3299bafa8d726d4f1463568dc70 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc79ebb441fdb847369eacb13870a8b6f4c17f66 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c201b1eb85d10f277fa9186f16ef947073dd4176 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb5bea4b5859202f8d4a77fe93130a88a5654b84 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f901d8793217fd6b0653dce33e4681d7154aabe2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3eeae5f1ca2aa2ebb22111c55888d1ffa442ad9f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py new file mode 100644 index 0000000000000000000000000000000000000000..54a993db76933a5e710f0ddd20a4efd0118ecf95 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py @@ -0,0 +1,24 @@ +""" +Common utilities for testing model selection. +""" + +import numpy as np + +from sklearn.model_selection import KFold + + +class OneTimeSplitter: + """A wrapper to make KFold single entry cv iterator""" + + def __init__(self, n_splits=4, n_samples=99): + self.n_splits = n_splits + self.n_samples = n_samples + self.indices = iter(KFold(n_splits=n_splits).split(np.ones(n_samples))) + + def split(self, X=None, y=None, groups=None): + """Split can be called only once""" + for index in self.indices: + yield index + + def get_n_splits(self, X=None, y=None, groups=None): + return self.n_splits diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..1a7268150fd90ceaa67d18a8455e85941e0c016e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py @@ -0,0 +1,595 @@ +import numpy as np +import pytest + +from sklearn.datasets import load_iris +from sklearn.model_selection import ( + LearningCurveDisplay, + ValidationCurveDisplay, + learning_curve, + validation_curve, +) +from sklearn.tree import DecisionTreeClassifier +from sklearn.utils import shuffle +from sklearn.utils._testing import assert_allclose, assert_array_equal + + +@pytest.fixture +def data(): + return shuffle(*load_iris(return_X_y=True), random_state=0) + + +@pytest.mark.parametrize( + "params, err_type, err_msg", + [ + ({"std_display_style": "invalid"}, ValueError, "Unknown std_display_style:"), + ({"score_type": "invalid"}, ValueError, "Unknown score_type:"), + ], +) +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_parameters_validation( + pyplot, data, params, err_type, err_msg, CurveDisplay, specific_params +): + """Check that we raise a proper error when passing invalid parameters.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + with pytest.raises(err_type, match=err_msg): + CurveDisplay.from_estimator(estimator, X, y, **specific_params, **params) + + +def test_learning_curve_display_default_usage(pyplot, data): + """Check the default usage of the LearningCurveDisplay class.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + train_sizes = [0.3, 0.6, 0.9] + display = LearningCurveDisplay.from_estimator( + estimator, X, y, train_sizes=train_sizes + ) + + import matplotlib as mpl + + assert display.errorbar_ is None + + assert isinstance(display.lines_, list) + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + + assert isinstance(display.fill_between_, list) + for fill in display.fill_between_: + assert isinstance(fill, mpl.collections.PolyCollection) + assert fill.get_alpha() == 0.5 + + assert display.score_name == "Score" + assert display.ax_.get_xlabel() == "Number of samples in the training set" + assert display.ax_.get_ylabel() == "Score" + + _, legend_labels = display.ax_.get_legend_handles_labels() + assert legend_labels == ["Train", "Test"] + + train_sizes_abs, train_scores, test_scores = learning_curve( + estimator, X, y, train_sizes=train_sizes + ) + + assert_array_equal(display.train_sizes, train_sizes_abs) + assert_allclose(display.train_scores, train_scores) + assert_allclose(display.test_scores, test_scores) + + +def test_validation_curve_display_default_usage(pyplot, data): + """Check the default usage of the ValidationCurveDisplay class.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + param_name, param_range = "max_depth", [1, 3, 5] + display = ValidationCurveDisplay.from_estimator( + estimator, X, y, param_name=param_name, param_range=param_range + ) + + import matplotlib as mpl + + assert display.errorbar_ is None + + assert isinstance(display.lines_, list) + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + + assert isinstance(display.fill_between_, list) + for fill in display.fill_between_: + assert isinstance(fill, mpl.collections.PolyCollection) + assert fill.get_alpha() == 0.5 + + assert display.score_name == "Score" + assert display.ax_.get_xlabel() == f"{param_name}" + assert display.ax_.get_ylabel() == "Score" + + _, legend_labels = display.ax_.get_legend_handles_labels() + assert legend_labels == ["Train", "Test"] + + train_scores, test_scores = validation_curve( + estimator, X, y, param_name=param_name, param_range=param_range + ) + + assert_array_equal(display.param_range, param_range) + assert_allclose(display.train_scores, train_scores) + assert_allclose(display.test_scores, test_scores) + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_negate_score(pyplot, data, CurveDisplay, specific_params): + """Check the behaviour of the `negate_score` parameter calling `from_estimator` and + `plot`. + """ + X, y = data + estimator = DecisionTreeClassifier(max_depth=1, random_state=0) + + negate_score = False + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, negate_score=negate_score + ) + + positive_scores = display.lines_[0].get_data()[1] + assert (positive_scores >= 0).all() + assert display.ax_.get_ylabel() == "Score" + + negate_score = True + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, negate_score=negate_score + ) + + negative_scores = display.lines_[0].get_data()[1] + assert (negative_scores <= 0).all() + assert_allclose(negative_scores, -positive_scores) + assert display.ax_.get_ylabel() == "Negative score" + + negate_score = False + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, negate_score=negate_score + ) + assert display.ax_.get_ylabel() == "Score" + display.plot(negate_score=not negate_score) + assert display.ax_.get_ylabel() == "Score" + assert (display.lines_[0].get_data()[1] < 0).all() + + +@pytest.mark.parametrize( + "score_name, ylabel", [(None, "Score"), ("Accuracy", "Accuracy")] +) +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_score_name( + pyplot, data, score_name, ylabel, CurveDisplay, specific_params +): + """Check that we can overwrite the default score name shown on the y-axis.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, score_name=score_name + ) + + assert display.ax_.get_ylabel() == ylabel + X, y = data + estimator = DecisionTreeClassifier(max_depth=1, random_state=0) + + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, score_name=score_name + ) + + assert display.score_name == ylabel + + +@pytest.mark.parametrize("std_display_style", (None, "errorbar")) +def test_learning_curve_display_score_type(pyplot, data, std_display_style): + """Check the behaviour of setting the `score_type` parameter.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + train_sizes = [0.3, 0.6, 0.9] + train_sizes_abs, train_scores, test_scores = learning_curve( + estimator, X, y, train_sizes=train_sizes + ) + + score_type = "train" + display = LearningCurveDisplay.from_estimator( + estimator, + X, + y, + train_sizes=train_sizes, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, train_sizes_abs) + assert_allclose(y_data, train_scores.mean(axis=1)) + + score_type = "test" + display = LearningCurveDisplay.from_estimator( + estimator, + X, + y, + train_sizes=train_sizes, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Test"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, train_sizes_abs) + assert_allclose(y_data, test_scores.mean(axis=1)) + + score_type = "both" + display = LearningCurveDisplay.from_estimator( + estimator, + X, + y, + train_sizes=train_sizes, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train", "Test"] + + if std_display_style is None: + assert len(display.lines_) == 2 + assert display.errorbar_ is None + x_data_train, y_data_train = display.lines_[0].get_data() + x_data_test, y_data_test = display.lines_[1].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 2 + x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data() + x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data() + + assert_array_equal(x_data_train, train_sizes_abs) + assert_allclose(y_data_train, train_scores.mean(axis=1)) + assert_array_equal(x_data_test, train_sizes_abs) + assert_allclose(y_data_test, test_scores.mean(axis=1)) + + +@pytest.mark.parametrize("std_display_style", (None, "errorbar")) +def test_validation_curve_display_score_type(pyplot, data, std_display_style): + """Check the behaviour of setting the `score_type` parameter.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + param_name, param_range = "max_depth", [1, 3, 5] + train_scores, test_scores = validation_curve( + estimator, X, y, param_name=param_name, param_range=param_range + ) + + score_type = "train" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, param_range) + assert_allclose(y_data, train_scores.mean(axis=1)) + + score_type = "test" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Test"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, param_range) + assert_allclose(y_data, test_scores.mean(axis=1)) + + score_type = "both" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train", "Test"] + + if std_display_style is None: + assert len(display.lines_) == 2 + assert display.errorbar_ is None + x_data_train, y_data_train = display.lines_[0].get_data() + x_data_test, y_data_test = display.lines_[1].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 2 + x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data() + x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data() + + assert_array_equal(x_data_train, param_range) + assert_allclose(y_data_train, train_scores.mean(axis=1)) + assert_array_equal(x_data_test, param_range) + assert_allclose(y_data_test, test_scores.mean(axis=1)) + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params, expected_xscale", + [ + ( + ValidationCurveDisplay, + {"param_name": "max_depth", "param_range": np.arange(1, 5)}, + "linear", + ), + (LearningCurveDisplay, {"train_sizes": np.linspace(0.1, 0.9, num=5)}, "linear"), + ( + ValidationCurveDisplay, + { + "param_name": "max_depth", + "param_range": np.round(np.logspace(0, 2, num=5)).astype(np.int64), + }, + "log", + ), + (LearningCurveDisplay, {"train_sizes": np.logspace(-1, 0, num=5)}, "log"), + ], +) +def test_curve_display_xscale_auto( + pyplot, data, CurveDisplay, specific_params, expected_xscale +): + """Check the behaviour of the x-axis scaling depending on the data provided.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + display = CurveDisplay.from_estimator(estimator, X, y, **specific_params) + assert display.ax_.get_xscale() == expected_xscale + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_std_display_style(pyplot, data, CurveDisplay, specific_params): + """Check the behaviour of the parameter `std_display_style`.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + import matplotlib as mpl + + std_display_style = None + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + ) + + assert len(display.lines_) == 2 + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + assert display.errorbar_ is None + assert display.fill_between_ is None + _, legend_label = display.ax_.get_legend_handles_labels() + assert len(legend_label) == 2 + + std_display_style = "fill_between" + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + ) + + assert len(display.lines_) == 2 + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + assert display.errorbar_ is None + assert len(display.fill_between_) == 2 + for fill_between in display.fill_between_: + assert isinstance(fill_between, mpl.collections.PolyCollection) + _, legend_label = display.ax_.get_legend_handles_labels() + assert len(legend_label) == 2 + + std_display_style = "errorbar" + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + ) + + assert display.lines_ is None + assert len(display.errorbar_) == 2 + for errorbar in display.errorbar_: + assert isinstance(errorbar, mpl.container.ErrorbarContainer) + assert display.fill_between_ is None + _, legend_label = display.ax_.get_legend_handles_labels() + assert len(legend_label) == 2 + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_plot_kwargs(pyplot, data, CurveDisplay, specific_params): + """Check the behaviour of the different plotting keyword arguments: `line_kw`, + `fill_between_kw`, and `errorbar_kw`.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + std_display_style = "fill_between" + line_kw = {"color": "red"} + fill_between_kw = {"color": "red", "alpha": 1.0} + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + ) + + assert display.lines_[0].get_color() == "red" + assert_allclose( + display.fill_between_[0].get_facecolor(), + [[1.0, 0.0, 0.0, 1.0]], # trust me, it's red + ) + + std_display_style = "errorbar" + errorbar_kw = {"color": "red"} + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + errorbar_kw=errorbar_kw, + ) + + assert display.errorbar_[0].lines[0].get_color() == "red" + + +# TODO(1.5): to be removed +def test_learning_curve_display_deprecate_log_scale(data, pyplot): + """Check that we warn for the deprecated parameter `log_scale`.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + with pytest.warns(FutureWarning, match="`log_scale` parameter is deprecated"): + display = LearningCurveDisplay.from_estimator( + estimator, X, y, train_sizes=[0.3, 0.6, 0.9], log_scale=True + ) + + assert display.ax_.get_xscale() == "log" + assert display.ax_.get_yscale() == "linear" + + with pytest.warns(FutureWarning, match="`log_scale` parameter is deprecated"): + display = LearningCurveDisplay.from_estimator( + estimator, X, y, train_sizes=[0.3, 0.6, 0.9], log_scale=False + ) + + assert display.ax_.get_xscale() == "linear" + assert display.ax_.get_yscale() == "linear" + + +@pytest.mark.parametrize( + "param_range, xscale", + [([5, 10, 15], "linear"), ([-50, 5, 50, 500], "symlog"), ([5, 50, 500], "log")], +) +def test_validation_curve_xscale_from_param_range_provided_as_a_list( + pyplot, data, param_range, xscale +): + """Check the induced xscale from the provided param_range values.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + param_name = "max_depth" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + ) + + assert display.ax_.get_xscale() == xscale + + +@pytest.mark.parametrize( + "Display, params", + [ + (LearningCurveDisplay, {}), + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + ], +) +def test_subclassing_displays(pyplot, data, Display, params): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + class SubclassOfDisplay(Display): + pass + + display = SubclassOfDisplay.from_estimator(estimator, X, y, **params) + assert isinstance(display, SubclassOfDisplay) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py new file mode 100644 index 0000000000000000000000000000000000000000..c0db76c5c6ef654340742eeaf3f744637a06fd8b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py @@ -0,0 +1,2537 @@ +"""Test the search module""" + +import pickle +import re +import sys +from collections.abc import Iterable, Sized +from functools import partial +from io import StringIO +from itertools import chain, product +from types import GeneratorType + +import numpy as np +import pytest +from scipy.stats import bernoulli, expon, uniform + +from sklearn.base import BaseEstimator, ClassifierMixin, is_classifier +from sklearn.cluster import KMeans +from sklearn.datasets import ( + make_blobs, + make_classification, + make_multilabel_classification, +) +from sklearn.ensemble import HistGradientBoostingClassifier +from sklearn.exceptions import FitFailedWarning +from sklearn.experimental import enable_halving_search_cv # noqa +from sklearn.impute import SimpleImputer +from sklearn.linear_model import ( + LinearRegression, + Ridge, + SGDClassifier, +) +from sklearn.metrics import ( + accuracy_score, + confusion_matrix, + f1_score, + make_scorer, + r2_score, + recall_score, + roc_auc_score, +) +from sklearn.metrics.pairwise import euclidean_distances +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + GroupShuffleSplit, + HalvingGridSearchCV, + KFold, + LeaveOneGroupOut, + LeavePGroupsOut, + ParameterGrid, + ParameterSampler, + RandomizedSearchCV, + StratifiedKFold, + StratifiedShuffleSplit, + train_test_split, +) +from sklearn.model_selection._search import BaseSearchCV +from sklearn.model_selection.tests.common import OneTimeSplitter +from sklearn.neighbors import KernelDensity, KNeighborsClassifier, LocalOutlierFactor +from sklearn.pipeline import Pipeline +from sklearn.svm import SVC, LinearSVC +from sklearn.tests.metadata_routing_common import ( + ConsumingScorer, + _Registry, + check_recorded_metadata, +) +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._mocking import CheckingClassifier, MockDataFrame +from sklearn.utils._testing import ( + MinimalClassifier, + MinimalRegressor, + MinimalTransformer, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import CSR_CONTAINERS +from sklearn.utils.validation import _num_samples + + +# Neither of the following two estimators inherit from BaseEstimator, +# to test hyperparameter search on user-defined classifiers. +class MockClassifier: + """Dummy classifier to test the parameter search algorithms""" + + def __init__(self, foo_param=0): + self.foo_param = foo_param + + def fit(self, X, Y): + assert len(X) == len(Y) + self.classes_ = np.unique(Y) + return self + + def predict(self, T): + return T.shape[0] + + def transform(self, X): + return X + self.foo_param + + def inverse_transform(self, X): + return X - self.foo_param + + predict_proba = predict + predict_log_proba = predict + decision_function = predict + + def score(self, X=None, Y=None): + if self.foo_param > 1: + score = 1.0 + else: + score = 0.0 + return score + + def get_params(self, deep=False): + return {"foo_param": self.foo_param} + + def set_params(self, **params): + self.foo_param = params["foo_param"] + return self + + +class LinearSVCNoScore(LinearSVC): + """A LinearSVC classifier that has no score method.""" + + @property + def score(self): + raise AttributeError + + +X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) +y = np.array([1, 1, 2, 2]) + + +def assert_grid_iter_equals_getitem(grid): + assert list(grid) == [grid[i] for i in range(len(grid))] + + +@pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) +@pytest.mark.parametrize( + "input, error_type, error_message", + [ + (0, TypeError, r"Parameter .* a dict or a list, got: 0 of type int"), + ([{"foo": [0]}, 0], TypeError, r"Parameter .* is not a dict \(0\)"), + ( + {"foo": 0}, + TypeError, + r"Parameter (grid|distribution) for parameter 'foo' (is not|needs to be) " + r"(a list or a numpy array|iterable or a distribution).*", + ), + ], +) +def test_validate_parameter_input(klass, input, error_type, error_message): + with pytest.raises(error_type, match=error_message): + klass(input) + + +def test_parameter_grid(): + # Test basic properties of ParameterGrid. + params1 = {"foo": [1, 2, 3]} + grid1 = ParameterGrid(params1) + assert isinstance(grid1, Iterable) + assert isinstance(grid1, Sized) + assert len(grid1) == 3 + assert_grid_iter_equals_getitem(grid1) + + params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} + grid2 = ParameterGrid(params2) + assert len(grid2) == 6 + + # loop to assert we can iterate over the grid multiple times + for i in range(2): + # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) + points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) + assert points == set( + ("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]) + ) + assert_grid_iter_equals_getitem(grid2) + + # Special case: empty grid (useful to get default estimator settings) + empty = ParameterGrid({}) + assert len(empty) == 1 + assert list(empty) == [{}] + assert_grid_iter_equals_getitem(empty) + with pytest.raises(IndexError): + empty[1] + + has_empty = ParameterGrid([{"C": [1, 10]}, {}, {"C": [0.5]}]) + assert len(has_empty) == 4 + assert list(has_empty) == [{"C": 1}, {"C": 10}, {}, {"C": 0.5}] + assert_grid_iter_equals_getitem(has_empty) + + +def test_grid_search(): + # Test that the best estimator contains the right value for foo_param + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=3, verbose=3) + # make sure it selects the smallest parameter in case of ties + old_stdout = sys.stdout + sys.stdout = StringIO() + grid_search.fit(X, y) + sys.stdout = old_stdout + assert grid_search.best_estimator_.foo_param == 2 + + assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) + + # Smoke test the score etc: + grid_search.score(X, y) + grid_search.predict_proba(X) + grid_search.decision_function(X) + grid_search.transform(X) + + # Test exception handling on scoring + grid_search.scoring = "sklearn" + with pytest.raises(ValueError): + grid_search.fit(X, y) + + +def test_grid_search_pipeline_steps(): + # check that parameters that are estimators are cloned before fitting + pipe = Pipeline([("regressor", LinearRegression())]) + param_grid = {"regressor": [LinearRegression(), Ridge()]} + grid_search = GridSearchCV(pipe, param_grid, cv=2) + grid_search.fit(X, y) + regressor_results = grid_search.cv_results_["param_regressor"] + assert isinstance(regressor_results[0], LinearRegression) + assert isinstance(regressor_results[1], Ridge) + assert not hasattr(regressor_results[0], "coef_") + assert not hasattr(regressor_results[1], "coef_") + assert regressor_results[0] is not grid_search.best_estimator_ + assert regressor_results[1] is not grid_search.best_estimator_ + # check that we didn't modify the parameter grid that was passed + assert not hasattr(param_grid["regressor"][0], "coef_") + assert not hasattr(param_grid["regressor"][1], "coef_") + + +@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) +def test_SearchCV_with_fit_params(SearchCV): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_fit_params=["spam", "eggs"]) + searcher = SearchCV(clf, {"foo_param": [1, 2, 3]}, cv=2, error_score="raise") + + # The CheckingClassifier generates an assertion error if + # a parameter is missing or has length != len(X). + err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." + with pytest.raises(AssertionError, match=err_msg): + searcher.fit(X, y, spam=np.ones(10)) + + err_msg = "Fit parameter spam has length 1; expected" + with pytest.raises(AssertionError, match=err_msg): + searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) + searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) + + +@ignore_warnings +def test_grid_search_no_score(): + # Test grid-search on classifier that has no score function. + clf = LinearSVC(dual="auto", random_state=0) + X, y = make_blobs(random_state=0, centers=2) + Cs = [0.1, 1, 10] + clf_no_score = LinearSVCNoScore(dual="auto", random_state=0) + grid_search = GridSearchCV(clf, {"C": Cs}, scoring="accuracy") + grid_search.fit(X, y) + + grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs}, scoring="accuracy") + # smoketest grid search + grid_search_no_score.fit(X, y) + + # check that best params are equal + assert grid_search_no_score.best_params_ == grid_search.best_params_ + # check that we can call score and that it gives the correct result + assert grid_search.score(X, y) == grid_search_no_score.score(X, y) + + # giving no scoring function raises an error + grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs}) + with pytest.raises(TypeError, match="no scoring"): + grid_search_no_score.fit([[1]]) + + +def test_grid_search_score_method(): + X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0) + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [0.1]} + + search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) + search_accuracy = GridSearchCV(clf, grid, scoring="accuracy").fit(X, y) + search_no_score_method_auc = GridSearchCV( + LinearSVCNoScore(dual="auto"), grid, scoring="roc_auc" + ).fit(X, y) + search_auc = GridSearchCV(clf, grid, scoring="roc_auc").fit(X, y) + + # Check warning only occurs in situation where behavior changed: + # estimator requires score method to compete with scoring parameter + score_no_scoring = search_no_scoring.score(X, y) + score_accuracy = search_accuracy.score(X, y) + score_no_score_auc = search_no_score_method_auc.score(X, y) + score_auc = search_auc.score(X, y) + + # ensure the test is sane + assert score_auc < 1.0 + assert score_accuracy < 1.0 + assert score_auc != score_accuracy + + assert_almost_equal(score_accuracy, score_no_scoring) + assert_almost_equal(score_auc, score_no_score_auc) + + +def test_grid_search_groups(): + # Check if ValueError (when groups is None) propagates to GridSearchCV + # And also check if groups is correctly passed to the cv object + rng = np.random.RandomState(0) + + X, y = make_classification(n_samples=15, n_classes=2, random_state=0) + groups = rng.randint(0, 3, 15) + + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [1]} + + group_cvs = [ + LeaveOneGroupOut(), + LeavePGroupsOut(2), + GroupKFold(n_splits=3), + GroupShuffleSplit(), + ] + error_msg = "The 'groups' parameter should not be None." + for cv in group_cvs: + gs = GridSearchCV(clf, grid, cv=cv) + with pytest.raises(ValueError, match=error_msg): + gs.fit(X, y) + gs.fit(X, y, groups=groups) + + non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] + for cv in non_group_cvs: + gs = GridSearchCV(clf, grid, cv=cv) + # Should not raise an error + gs.fit(X, y) + + +def test_classes__property(): + # Test that classes_ property matches best_estimator_.classes_ + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + Cs = [0.1, 1, 10] + + grid_search = GridSearchCV(LinearSVC(dual="auto", random_state=0), {"C": Cs}) + grid_search.fit(X, y) + assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) + + # Test that regressors do not have a classes_ attribute + grid_search = GridSearchCV(Ridge(), {"alpha": [1.0, 2.0]}) + grid_search.fit(X, y) + assert not hasattr(grid_search, "classes_") + + # Test that the grid searcher has no classes_ attribute before it's fit + grid_search = GridSearchCV(LinearSVC(dual="auto", random_state=0), {"C": Cs}) + assert not hasattr(grid_search, "classes_") + + # Test that the grid searcher has no classes_ attribute without a refit + grid_search = GridSearchCV( + LinearSVC(dual="auto", random_state=0), {"C": Cs}, refit=False + ) + grid_search.fit(X, y) + assert not hasattr(grid_search, "classes_") + + +def test_trivial_cv_results_attr(): + # Test search over a "grid" with only one point. + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1]}, cv=3) + grid_search.fit(X, y) + assert hasattr(grid_search, "cv_results_") + + random_search = RandomizedSearchCV(clf, {"foo_param": [0]}, n_iter=1, cv=3) + random_search.fit(X, y) + assert hasattr(grid_search, "cv_results_") + + +def test_no_refit(): + # Test that GSCV can be used for model selection alone without refitting + clf = MockClassifier() + for scoring in [None, ["accuracy", "precision"]]: + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, refit=False, cv=3) + grid_search.fit(X, y) + assert ( + not hasattr(grid_search, "best_estimator_") + and hasattr(grid_search, "best_index_") + and hasattr(grid_search, "best_params_") + ) + + # Make sure the functions predict/transform etc. raise meaningful + # error messages + for fn_name in ( + "predict", + "predict_proba", + "predict_log_proba", + "transform", + "inverse_transform", + ): + outer_msg = f"has no attribute '{fn_name}'" + inner_msg = ( + f"`refit=False`. {fn_name} is available only after " + "refitting on the best parameters" + ) + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + getattr(grid_search, fn_name)(X) + + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + # Test that an invalid refit param raises appropriate error messages + error_msg = ( + "For multi-metric scoring, the parameter refit must be set to a scorer key" + ) + for refit in [True, "recall", "accuracy"]: + with pytest.raises(ValueError, match=error_msg): + GridSearchCV( + clf, {}, refit=refit, scoring={"acc": "accuracy", "prec": "precision"} + ).fit(X, y) + + +def test_grid_search_error(): + # Test that grid search will capture errors on data with different length + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + with pytest.raises(ValueError): + cv.fit(X_[:180], y_) + + +def test_grid_search_one_grid_point(): + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} + + clf = SVC(gamma="auto") + cv = GridSearchCV(clf, param_dict) + cv.fit(X_, y_) + + clf = SVC(C=1.0, kernel="rbf", gamma=0.1) + clf.fit(X_, y_) + + assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) + + +def test_grid_search_when_param_grid_includes_range(): + # Test that the best estimator contains the right value for foo_param + clf = MockClassifier() + grid_search = None + grid_search = GridSearchCV(clf, {"foo_param": range(1, 4)}, cv=3) + grid_search.fit(X, y) + assert grid_search.best_estimator_.foo_param == 2 + + +def test_grid_search_bad_param_grid(): + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + param_dict = {"C": 1} + clf = SVC(gamma="auto") + error_msg = re.escape( + "Parameter grid for parameter 'C' needs to be a list or " + "a numpy array, but got 1 (of type int) instead. Single " + "values need to be wrapped in a list with one element." + ) + search = GridSearchCV(clf, param_dict) + with pytest.raises(TypeError, match=error_msg): + search.fit(X, y) + + param_dict = {"C": []} + clf = SVC() + error_msg = re.escape( + "Parameter grid for parameter 'C' need to be a non-empty sequence, got: []" + ) + search = GridSearchCV(clf, param_dict) + with pytest.raises(ValueError, match=error_msg): + search.fit(X, y) + + param_dict = {"C": "1,2,3"} + clf = SVC(gamma="auto") + error_msg = re.escape( + "Parameter grid for parameter 'C' needs to be a list or a numpy array, " + "but got '1,2,3' (of type str) instead. Single values need to be " + "wrapped in a list with one element." + ) + search = GridSearchCV(clf, param_dict) + with pytest.raises(TypeError, match=error_msg): + search.fit(X, y) + + param_dict = {"C": np.ones((3, 2))} + clf = SVC() + search = GridSearchCV(clf, param_dict) + with pytest.raises(ValueError): + search.fit(X, y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_grid_search_sparse(csr_container): + # Test that grid search works with both dense and sparse matrices + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + cv.fit(X_[:180], y_[:180]) + y_pred = cv.predict(X_[180:]) + C = cv.best_estimator_.C + + X_ = csr_container(X_) + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + cv.fit(X_[:180].tocoo(), y_[:180]) + y_pred2 = cv.predict(X_[180:]) + C2 = cv.best_estimator_.C + + assert np.mean(y_pred == y_pred2) >= 0.9 + assert C == C2 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_grid_search_sparse_scoring(csr_container): + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1") + cv.fit(X_[:180], y_[:180]) + y_pred = cv.predict(X_[180:]) + C = cv.best_estimator_.C + + X_ = csr_container(X_) + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1") + cv.fit(X_[:180], y_[:180]) + y_pred2 = cv.predict(X_[180:]) + C2 = cv.best_estimator_.C + + assert_array_equal(y_pred, y_pred2) + assert C == C2 + # Smoke test the score + # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), + # cv.score(X_[:180], y[:180])) + + # test loss where greater is worse + def f1_loss(y_true_, y_pred_): + return -f1_score(y_true_, y_pred_) + + F1Loss = make_scorer(f1_loss, greater_is_better=False) + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring=F1Loss) + cv.fit(X_[:180], y_[:180]) + y_pred3 = cv.predict(X_[180:]) + C3 = cv.best_estimator_.C + + assert C == C3 + assert_array_equal(y_pred, y_pred3) + + +def test_grid_search_precomputed_kernel(): + # Test that grid search works when the input features are given in the + # form of a precomputed kernel matrix + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + # compute the training kernel matrix corresponding to the linear kernel + K_train = np.dot(X_[:180], X_[:180].T) + y_train = y_[:180] + + clf = SVC(kernel="precomputed") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + cv.fit(K_train, y_train) + + assert cv.best_score_ >= 0 + + # compute the test kernel matrix + K_test = np.dot(X_[180:], X_[:180].T) + y_test = y_[180:] + + y_pred = cv.predict(K_test) + + assert np.mean(y_pred == y_test) >= 0 + + # test error is raised when the precomputed kernel is not array-like + # or sparse + with pytest.raises(ValueError): + cv.fit(K_train.tolist(), y_train) + + +def test_grid_search_precomputed_kernel_error_nonsquare(): + # Test that grid search returns an error with a non-square precomputed + # training kernel matrix + K_train = np.zeros((10, 20)) + y_train = np.ones((10,)) + clf = SVC(kernel="precomputed") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + with pytest.raises(ValueError): + cv.fit(K_train, y_train) + + +class BrokenClassifier(BaseEstimator): + """Broken classifier that cannot be fit twice""" + + def __init__(self, parameter=None): + self.parameter = parameter + + def fit(self, X, y): + assert not hasattr(self, "has_been_fit_") + self.has_been_fit_ = True + + def predict(self, X): + return np.zeros(X.shape[0]) + + +@ignore_warnings +def test_refit(): + # Regression test for bug in refitting + # Simulates re-fitting a broken estimator; this used to break with + # sparse SVMs. + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + clf = GridSearchCV( + BrokenClassifier(), [{"parameter": [0, 1]}], scoring="precision", refit=True + ) + clf.fit(X, y) + + +def test_refit_callable(): + """ + Test refit=callable, which adds flexibility in identifying the + "best" estimator. + """ + + def refit_callable(cv_results): + """ + A dummy function tests `refit=callable` interface. + Return the index of a model that has the least + `mean_test_score`. + """ + # Fit a dummy clf with `refit=True` to get a list of keys in + # clf.cv_results_. + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.01, 0.1, 1]}, + scoring="precision", + refit=True, + ) + clf.fit(X, y) + # Ensure that `best_index_ != 0` for this dummy clf + assert clf.best_index_ != 0 + + # Assert every key matches those in `cv_results` + for key in clf.cv_results_.keys(): + assert key in cv_results + + return cv_results["mean_test_score"].argmin() + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.01, 0.1, 1]}, + scoring="precision", + refit=refit_callable, + ) + clf.fit(X, y) + + assert clf.best_index_ == 0 + # Ensure `best_score_` is disabled when using `refit=callable` + assert not hasattr(clf, "best_score_") + + +def test_refit_callable_invalid_type(): + """ + Test implementation catches the errors when 'best_index_' returns an + invalid result. + """ + + def refit_callable_invalid_type(cv_results): + """ + A dummy function tests when returned 'best_index_' is not integer. + """ + return None + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.1, 1]}, + scoring="precision", + refit=refit_callable_invalid_type, + ) + with pytest.raises(TypeError, match="best_index_ returned is not an integer"): + clf.fit(X, y) + + +@pytest.mark.parametrize("out_bound_value", [-1, 2]) +@pytest.mark.parametrize("search_cv", [RandomizedSearchCV, GridSearchCV]) +def test_refit_callable_out_bound(out_bound_value, search_cv): + """ + Test implementation catches the errors when 'best_index_' returns an + out of bound result. + """ + + def refit_callable_out_bound(cv_results): + """ + A dummy function tests when returned 'best_index_' is out of bounds. + """ + return out_bound_value + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + + clf = search_cv( + LinearSVC(dual="auto", random_state=42), + {"C": [0.1, 1]}, + scoring="precision", + refit=refit_callable_out_bound, + ) + with pytest.raises(IndexError, match="best_index_ index out of range"): + clf.fit(X, y) + + +def test_refit_callable_multi_metric(): + """ + Test refit=callable in multiple metric evaluation setting + """ + + def refit_callable(cv_results): + """ + A dummy function tests `refit=callable` interface. + Return the index of a model that has the least + `mean_test_prec`. + """ + assert "mean_test_prec" in cv_results + return cv_results["mean_test_prec"].argmin() + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + scoring = {"Accuracy": make_scorer(accuracy_score), "prec": "precision"} + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.01, 0.1, 1]}, + scoring=scoring, + refit=refit_callable, + ) + clf.fit(X, y) + + assert clf.best_index_ == 0 + # Ensure `best_score_` is disabled when using `refit=callable` + assert not hasattr(clf, "best_score_") + + +def test_gridsearch_nd(): + # Pass X as list in GridSearchCV + X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) + y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) + + def check_X(x): + return x.shape[1:] == (5, 3, 2) + + def check_y(x): + return x.shape[1:] == (7, 11) + + clf = CheckingClassifier( + check_X=check_X, + check_y=check_y, + methods_to_check=["fit"], + ) + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}) + grid_search.fit(X_4d, y_3d).score(X, y) + assert hasattr(grid_search, "cv_results_") + + +def test_X_as_list(): + # Pass X as list in GridSearchCV + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + clf = CheckingClassifier( + check_X=lambda x: isinstance(x, list), + methods_to_check=["fit"], + ) + cv = KFold(n_splits=3) + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv) + grid_search.fit(X.tolist(), y).score(X, y) + assert hasattr(grid_search, "cv_results_") + + +def test_y_as_list(): + # Pass y as list in GridSearchCV + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + clf = CheckingClassifier( + check_y=lambda x: isinstance(x, list), + methods_to_check=["fit"], + ) + cv = KFold(n_splits=3) + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv) + grid_search.fit(X, y.tolist()).score(X, y) + assert hasattr(grid_search, "cv_results_") + + +@ignore_warnings +def test_pandas_input(): + # check cross_val_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((DataFrame, Series)) + except ImportError: + pass + + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + for InputFeatureType, TargetType in types: + # X dataframe, y series + X_df, y_ser = InputFeatureType(X), TargetType(y) + + def check_df(x): + return isinstance(x, InputFeatureType) + + def check_series(x): + return isinstance(x, TargetType) + + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}) + grid_search.fit(X_df, y_ser).score(X_df, y_ser) + grid_search.predict(X_df) + assert hasattr(grid_search, "cv_results_") + + +def test_unsupervised_grid_search(): + # test grid-search with unsupervised estimator + X, y = make_blobs(n_samples=50, random_state=0) + km = KMeans(random_state=0, init="random", n_init=1) + + # Multi-metric evaluation unsupervised + scoring = ["adjusted_rand_score", "fowlkes_mallows_score"] + for refit in ["adjusted_rand_score", "fowlkes_mallows_score"]: + grid_search = GridSearchCV( + km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit + ) + grid_search.fit(X, y) + # Both ARI and FMS can find the right number :) + assert grid_search.best_params_["n_clusters"] == 3 + + # Single metric evaluation unsupervised + grid_search = GridSearchCV( + km, param_grid=dict(n_clusters=[2, 3, 4]), scoring="fowlkes_mallows_score" + ) + grid_search.fit(X, y) + assert grid_search.best_params_["n_clusters"] == 3 + + # Now without a score, and without y + grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) + grid_search.fit(X) + assert grid_search.best_params_["n_clusters"] == 4 + + +def test_gridsearch_no_predict(): + # test grid-search with an estimator without predict. + # slight duplication of a test from KDE + def custom_scoring(estimator, X): + return 42 if estimator.bandwidth == 0.1 else 0 + + X, _ = make_blobs(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) + search = GridSearchCV( + KernelDensity(), + param_grid=dict(bandwidth=[0.01, 0.1, 1]), + scoring=custom_scoring, + ) + search.fit(X) + assert search.best_params_["bandwidth"] == 0.1 + assert search.best_score_ == 42 + + +def test_param_sampler(): + # test basic properties of param sampler + param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} + sampler = ParameterSampler( + param_distributions=param_distributions, n_iter=10, random_state=0 + ) + samples = [x for x in sampler] + assert len(samples) == 10 + for sample in samples: + assert sample["kernel"] in ["rbf", "linear"] + assert 0 <= sample["C"] <= 1 + + # test that repeated calls yield identical parameters + param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} + sampler = ParameterSampler( + param_distributions=param_distributions, n_iter=3, random_state=0 + ) + assert [x for x in sampler] == [x for x in sampler] + + param_distributions = {"C": uniform(0, 1)} + sampler = ParameterSampler( + param_distributions=param_distributions, n_iter=10, random_state=0 + ) + assert [x for x in sampler] == [x for x in sampler] + + +def check_cv_results_array_types(search, param_keys, score_keys): + # Check if the search `cv_results`'s array are of correct types + cv_results = search.cv_results_ + assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) + assert all(cv_results[key].dtype == object for key in param_keys) + assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) + assert all( + cv_results[key].dtype == np.float64 + for key in score_keys + if not key.startswith("rank") + ) + + scorer_keys = search.scorer_.keys() if search.multimetric_ else ["score"] + + for key in scorer_keys: + assert cv_results["rank_test_%s" % key].dtype == np.int32 + + +def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand, extra_keys=()): + # Test the search.cv_results_ contains all the required results + all_keys = param_keys + score_keys + extra_keys + assert_array_equal(sorted(cv_results.keys()), sorted(all_keys + ("params",))) + assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) + + +def test_grid_search_cv_results(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_grid_points = 6 + params = [ + dict( + kernel=[ + "rbf", + ], + C=[1, 10], + gamma=[0.1, 1], + ), + dict( + kernel=[ + "poly", + ], + degree=[1, 2], + ), + ] + + param_keys = ("param_C", "param_degree", "param_gamma", "param_kernel") + score_keys = ( + "mean_test_score", + "mean_train_score", + "rank_test_score", + "split0_test_score", + "split1_test_score", + "split2_test_score", + "split0_train_score", + "split1_train_score", + "split2_train_score", + "std_test_score", + "std_train_score", + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ) + n_candidates = n_grid_points + + search = GridSearchCV(SVC(), cv=3, param_grid=params, return_train_score=True) + search.fit(X, y) + cv_results = search.cv_results_ + # Check if score and timing are reasonable + assert all(cv_results["rank_test_score"] >= 1) + assert (all(cv_results[k] >= 0) for k in score_keys if k != "rank_test_score") + assert ( + all(cv_results[k] <= 1) + for k in score_keys + if "time" not in k and k != "rank_test_score" + ) + # Check cv_results structure + check_cv_results_array_types(search, param_keys, score_keys) + check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) + # Check masking + cv_results = search.cv_results_ + + poly_results = [ + ( + cv_results["param_C"].mask[i] + and cv_results["param_gamma"].mask[i] + and not cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "poly" + ] + assert all(poly_results) + assert len(poly_results) == 2 + + rbf_results = [ + ( + not cv_results["param_C"].mask[i] + and not cv_results["param_gamma"].mask[i] + and cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "rbf" + ] + assert all(rbf_results) + assert len(rbf_results) == 4 + + +def test_random_search_cv_results(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_search_iter = 30 + + params = [ + {"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)}, + {"kernel": ["poly"], "degree": [2, 3]}, + ] + param_keys = ("param_C", "param_degree", "param_gamma", "param_kernel") + score_keys = ( + "mean_test_score", + "mean_train_score", + "rank_test_score", + "split0_test_score", + "split1_test_score", + "split2_test_score", + "split0_train_score", + "split1_train_score", + "split2_train_score", + "std_test_score", + "std_train_score", + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ) + n_candidates = n_search_iter + + search = RandomizedSearchCV( + SVC(), + n_iter=n_search_iter, + cv=3, + param_distributions=params, + return_train_score=True, + ) + search.fit(X, y) + cv_results = search.cv_results_ + # Check results structure + check_cv_results_array_types(search, param_keys, score_keys) + check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) + assert all( + ( + cv_results["param_C"].mask[i] + and cv_results["param_gamma"].mask[i] + and not cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "poly" + ) + assert all( + ( + not cv_results["param_C"].mask[i] + and not cv_results["param_gamma"].mask[i] + and cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "rbf" + ) + + +@pytest.mark.parametrize( + "SearchCV, specialized_params", + [ + (GridSearchCV, {"param_grid": {"C": [1, 10]}}), + (RandomizedSearchCV, {"param_distributions": {"C": [1, 10]}, "n_iter": 2}), + ], +) +def test_search_default_iid(SearchCV, specialized_params): + # Test the IID parameter TODO: Clearly this test does something else??? + # noise-free simple 2d-data + X, y = make_blobs( + centers=[[0, 0], [1, 0], [0, 1], [1, 1]], + random_state=0, + cluster_std=0.1, + shuffle=False, + n_samples=80, + ) + # split dataset into two folds that are not iid + # first one contains data of all 4 blobs, second only from two. + mask = np.ones(X.shape[0], dtype=bool) + mask[np.where(y == 1)[0][::2]] = 0 + mask[np.where(y == 2)[0][::2]] = 0 + # this leads to perfect classification on one fold and a score of 1/3 on + # the other + # create "cv" for splits + cv = [[mask, ~mask], [~mask, mask]] + + common_params = {"estimator": SVC(), "cv": cv, "return_train_score": True} + search = SearchCV(**common_params, **specialized_params) + search.fit(X, y) + + test_cv_scores = np.array( + [ + search.cv_results_["split%d_test_score" % s][0] + for s in range(search.n_splits_) + ] + ) + test_mean = search.cv_results_["mean_test_score"][0] + test_std = search.cv_results_["std_test_score"][0] + + train_cv_scores = np.array( + [ + search.cv_results_["split%d_train_score" % s][0] + for s in range(search.n_splits_) + ] + ) + train_mean = search.cv_results_["mean_train_score"][0] + train_std = search.cv_results_["std_train_score"][0] + + assert search.cv_results_["param_C"][0] == 1 + # scores are the same as above + assert_allclose(test_cv_scores, [1, 1.0 / 3.0]) + assert_allclose(train_cv_scores, [1, 1]) + # Unweighted mean/std is used + assert test_mean == pytest.approx(np.mean(test_cv_scores)) + assert test_std == pytest.approx(np.std(test_cv_scores)) + + # For the train scores, we do not take a weighted mean irrespective of + # i.i.d. or not + assert train_mean == pytest.approx(1) + assert train_std == pytest.approx(0) + + +def test_grid_search_cv_results_multimetric(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_splits = 3 + params = [ + dict( + kernel=[ + "rbf", + ], + C=[1, 10], + gamma=[0.1, 1], + ), + dict( + kernel=[ + "poly", + ], + degree=[1, 2], + ), + ] + + grid_searches = [] + for scoring in ( + {"accuracy": make_scorer(accuracy_score), "recall": make_scorer(recall_score)}, + "accuracy", + "recall", + ): + grid_search = GridSearchCV( + SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False + ) + grid_search.fit(X, y) + grid_searches.append(grid_search) + + compare_cv_results_multimetric_with_single(*grid_searches) + + +def test_random_search_cv_results_multimetric(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_splits = 3 + n_search_iter = 30 + + # Scipy 0.12's stats dists do not accept seed, hence we use param grid + params = dict(C=np.logspace(-4, 1, 3), gamma=np.logspace(-5, 0, 3, base=0.1)) + for refit in (True, False): + random_searches = [] + for scoring in (("accuracy", "recall"), "accuracy", "recall"): + # If True, for multi-metric pass refit='accuracy' + if refit: + probability = True + refit = "accuracy" if isinstance(scoring, tuple) else refit + else: + probability = False + clf = SVC(probability=probability, random_state=42) + random_search = RandomizedSearchCV( + clf, + n_iter=n_search_iter, + cv=n_splits, + param_distributions=params, + scoring=scoring, + refit=refit, + random_state=0, + ) + random_search.fit(X, y) + random_searches.append(random_search) + + compare_cv_results_multimetric_with_single(*random_searches) + compare_refit_methods_when_refit_with_acc( + random_searches[0], random_searches[1], refit + ) + + +def compare_cv_results_multimetric_with_single(search_multi, search_acc, search_rec): + """Compare multi-metric cv_results with the ensemble of multiple + single metric cv_results from single metric grid/random search""" + + assert search_multi.multimetric_ + assert_array_equal(sorted(search_multi.scorer_), ("accuracy", "recall")) + + cv_results_multi = search_multi.cv_results_ + cv_results_acc_rec = { + re.sub("_score$", "_accuracy", k): v for k, v in search_acc.cv_results_.items() + } + cv_results_acc_rec.update( + {re.sub("_score$", "_recall", k): v for k, v in search_rec.cv_results_.items()} + ) + + # Check if score and timing are reasonable, also checks if the keys + # are present + assert all( + ( + np.all(cv_results_multi[k] <= 1) + for k in ( + "mean_score_time", + "std_score_time", + "mean_fit_time", + "std_fit_time", + ) + ) + ) + + # Compare the keys, other than time keys, among multi-metric and + # single metric grid search results. np.testing.assert_equal performs a + # deep nested comparison of the two cv_results dicts + np.testing.assert_equal( + {k: v for k, v in cv_results_multi.items() if not k.endswith("_time")}, + {k: v for k, v in cv_results_acc_rec.items() if not k.endswith("_time")}, + ) + + +def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit): + """Compare refit multi-metric search methods with single metric methods""" + assert search_acc.refit == refit + if refit: + assert search_multi.refit == "accuracy" + else: + assert not search_multi.refit + return # search cannot predict/score without refit + + X, y = make_blobs(n_samples=100, n_features=4, random_state=42) + for method in ("predict", "predict_proba", "predict_log_proba"): + assert_almost_equal( + getattr(search_multi, method)(X), getattr(search_acc, method)(X) + ) + assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y)) + for key in ("best_index_", "best_score_", "best_params_"): + assert getattr(search_multi, key) == getattr(search_acc, key) + + +@pytest.mark.parametrize( + "search_cv", + [ + RandomizedSearchCV( + estimator=DecisionTreeClassifier(), + param_distributions={"max_depth": [5, 10]}, + ), + GridSearchCV( + estimator=DecisionTreeClassifier(), param_grid={"max_depth": [5, 10]} + ), + ], +) +def test_search_cv_score_samples_error(search_cv): + X, y = make_blobs(n_samples=100, n_features=4, random_state=42) + search_cv.fit(X, y) + + # Make sure to error out when underlying estimator does not implement + # the method `score_samples` + outer_msg = f"'{search_cv.__class__.__name__}' has no attribute 'score_samples'" + inner_msg = "'DecisionTreeClassifier' object has no attribute 'score_samples'" + + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + search_cv.score_samples(X) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg == str(exec_info.value.__cause__) + + +@pytest.mark.parametrize( + "search_cv", + [ + RandomizedSearchCV( + estimator=LocalOutlierFactor(novelty=True), + param_distributions={"n_neighbors": [5, 10]}, + scoring="precision", + ), + GridSearchCV( + estimator=LocalOutlierFactor(novelty=True), + param_grid={"n_neighbors": [5, 10]}, + scoring="precision", + ), + ], +) +def test_search_cv_score_samples_method(search_cv): + # Set parameters + rng = np.random.RandomState(42) + n_samples = 300 + outliers_fraction = 0.15 + n_outliers = int(outliers_fraction * n_samples) + n_inliers = n_samples - n_outliers + + # Create dataset + X = make_blobs( + n_samples=n_inliers, + n_features=2, + centers=[[0, 0], [0, 0]], + cluster_std=0.5, + random_state=0, + )[0] + # Add some noisy points + X = np.concatenate([X, rng.uniform(low=-6, high=6, size=(n_outliers, 2))], axis=0) + + # Define labels to be able to score the estimator with `search_cv` + y_true = np.array([1] * n_samples) + y_true[-n_outliers:] = -1 + + # Fit on data + search_cv.fit(X, y_true) + + # Verify that the stand alone estimator yields the same results + # as the ones obtained with *SearchCV + assert_allclose( + search_cv.score_samples(X), search_cv.best_estimator_.score_samples(X) + ) + + +def test_search_cv_results_rank_tie_breaking(): + X, y = make_blobs(n_samples=50, random_state=42) + + # The two C values are close enough to give similar models + # which would result in a tie of their mean cv-scores + param_grid = {"C": [1, 1.001, 0.001]} + + grid_search = GridSearchCV(SVC(), param_grid=param_grid, return_train_score=True) + random_search = RandomizedSearchCV( + SVC(), n_iter=3, param_distributions=param_grid, return_train_score=True + ) + + for search in (grid_search, random_search): + search.fit(X, y) + cv_results = search.cv_results_ + # Check tie breaking strategy - + # Check that there is a tie in the mean scores between + # candidates 1 and 2 alone + assert_almost_equal( + cv_results["mean_test_score"][0], cv_results["mean_test_score"][1] + ) + assert_almost_equal( + cv_results["mean_train_score"][0], cv_results["mean_train_score"][1] + ) + assert not np.allclose( + cv_results["mean_test_score"][1], cv_results["mean_test_score"][2] + ) + assert not np.allclose( + cv_results["mean_train_score"][1], cv_results["mean_train_score"][2] + ) + # 'min' rank should be assigned to the tied candidates + assert_almost_equal(search.cv_results_["rank_test_score"], [1, 1, 3]) + + +def test_search_cv_results_none_param(): + X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1] + estimators = (DecisionTreeRegressor(), DecisionTreeClassifier()) + est_parameters = {"random_state": [0, None]} + cv = KFold() + + for est in estimators: + grid_search = GridSearchCV( + est, + est_parameters, + cv=cv, + ).fit(X, y) + assert_array_equal(grid_search.cv_results_["param_random_state"], [0, None]) + + +@ignore_warnings() +def test_search_cv_timing(): + svc = LinearSVC(dual="auto", random_state=0) + + X = [ + [ + 1, + ], + [ + 2, + ], + [ + 3, + ], + [ + 4, + ], + ] + y = [0, 1, 1, 0] + + gs = GridSearchCV(svc, {"C": [0, 1]}, cv=2, error_score=0) + rs = RandomizedSearchCV(svc, {"C": [0, 1]}, cv=2, error_score=0, n_iter=2) + + for search in (gs, rs): + search.fit(X, y) + for key in ["mean_fit_time", "std_fit_time"]: + # NOTE The precision of time.time in windows is not high + # enough for the fit/score times to be non-zero for trivial X and y + assert np.all(search.cv_results_[key] >= 0) + assert np.all(search.cv_results_[key] < 1) + + for key in ["mean_score_time", "std_score_time"]: + assert search.cv_results_[key][1] >= 0 + assert search.cv_results_[key][0] == 0.0 + assert np.all(search.cv_results_[key] < 1) + + assert hasattr(search, "refit_time_") + assert isinstance(search.refit_time_, float) + assert search.refit_time_ >= 0 + + +def test_grid_search_correct_score_results(): + # test that correct scores are used + n_splits = 3 + clf = LinearSVC(dual="auto", random_state=0) + X, y = make_blobs(random_state=0, centers=2) + Cs = [0.1, 1, 10] + for score in ["f1", "roc_auc"]: + grid_search = GridSearchCV(clf, {"C": Cs}, scoring=score, cv=n_splits) + cv_results = grid_search.fit(X, y).cv_results_ + + # Test scorer names + result_keys = list(cv_results.keys()) + expected_keys = ("mean_test_score", "rank_test_score") + tuple( + "split%d_test_score" % cv_i for cv_i in range(n_splits) + ) + assert all(np.isin(expected_keys, result_keys)) + + cv = StratifiedKFold(n_splits=n_splits) + n_splits = grid_search.n_splits_ + for candidate_i, C in enumerate(Cs): + clf.set_params(C=C) + cv_scores = np.array( + [ + grid_search.cv_results_["split%d_test_score" % s][candidate_i] + for s in range(n_splits) + ] + ) + for i, (train, test) in enumerate(cv.split(X, y)): + clf.fit(X[train], y[train]) + if score == "f1": + correct_score = f1_score(y[test], clf.predict(X[test])) + elif score == "roc_auc": + dec = clf.decision_function(X[test]) + correct_score = roc_auc_score(y[test], dec) + assert_almost_equal(correct_score, cv_scores[i]) + + +def test_pickle(): + # Test that a fit search can be pickled + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, refit=True, cv=3) + grid_search.fit(X, y) + grid_search_pickled = pickle.loads(pickle.dumps(grid_search)) + assert_array_almost_equal(grid_search.predict(X), grid_search_pickled.predict(X)) + + random_search = RandomizedSearchCV( + clf, {"foo_param": [1, 2, 3]}, refit=True, n_iter=3, cv=3 + ) + random_search.fit(X, y) + random_search_pickled = pickle.loads(pickle.dumps(random_search)) + assert_array_almost_equal( + random_search.predict(X), random_search_pickled.predict(X) + ) + + +def test_grid_search_with_multioutput_data(): + # Test search with multi-output estimator + + X, y = make_multilabel_classification(return_indicator=True, random_state=0) + + est_parameters = {"max_depth": [1, 2, 3, 4]} + cv = KFold() + + estimators = [ + DecisionTreeRegressor(random_state=0), + DecisionTreeClassifier(random_state=0), + ] + + # Test with grid search cv + for est in estimators: + grid_search = GridSearchCV(est, est_parameters, cv=cv) + grid_search.fit(X, y) + res_params = grid_search.cv_results_["params"] + for cand_i in range(len(res_params)): + est.set_params(**res_params[cand_i]) + + for i, (train, test) in enumerate(cv.split(X, y)): + est.fit(X[train], y[train]) + correct_score = est.score(X[test], y[test]) + assert_almost_equal( + correct_score, + grid_search.cv_results_["split%d_test_score" % i][cand_i], + ) + + # Test with a randomized search + for est in estimators: + random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) + random_search.fit(X, y) + res_params = random_search.cv_results_["params"] + for cand_i in range(len(res_params)): + est.set_params(**res_params[cand_i]) + + for i, (train, test) in enumerate(cv.split(X, y)): + est.fit(X[train], y[train]) + correct_score = est.score(X[test], y[test]) + assert_almost_equal( + correct_score, + random_search.cv_results_["split%d_test_score" % i][cand_i], + ) + + +def test_predict_proba_disabled(): + # Test predict_proba when disabled on estimator. + X = np.arange(20).reshape(5, -1) + y = [0, 0, 1, 1, 1] + clf = SVC(probability=False) + gs = GridSearchCV(clf, {}, cv=2).fit(X, y) + assert not hasattr(gs, "predict_proba") + + +def test_grid_search_allows_nans(): + # Test GridSearchCV with SimpleImputer + X = np.arange(20, dtype=np.float64).reshape(5, -1) + X[2, :] = np.nan + y = [0, 0, 1, 1, 1] + p = Pipeline( + [ + ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)), + ("classifier", MockClassifier()), + ] + ) + GridSearchCV(p, {"classifier__foo_param": [1, 2, 3]}, cv=2).fit(X, y) + + +class FailingClassifier(BaseEstimator): + """Classifier that raises a ValueError on fit()""" + + FAILING_PARAMETER = 2 + + def __init__(self, parameter=None): + self.parameter = parameter + + def fit(self, X, y=None): + if self.parameter == FailingClassifier.FAILING_PARAMETER: + raise ValueError("Failing classifier failed as required") + + def predict(self, X): + return np.zeros(X.shape[0]) + + def score(self, X=None, Y=None): + return 0.0 + + +def test_grid_search_failing_classifier(): + # GridSearchCV with on_error != 'raise' + # Ensures that a warning is raised and score reset where appropriate. + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + # refit=False because we only want to check that errors caused by fits + # to individual folds will be caught and warnings raised instead. If + # refit was done, then an exception would be raised on refit and not + # caught by grid_search (expected behavior), and this would cause an + # error in this test. + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring="accuracy", + refit=False, + error_score=0.0, + ) + + warning_message = re.compile( + "5 fits failed.+total of 15.+The score on these" + r" train-test partitions for these parameters will be set to 0\.0.+" + "5 fits failed with the following error.+ValueError.+Failing classifier failed" + " as required", + flags=re.DOTALL, + ) + with pytest.warns(FitFailedWarning, match=warning_message): + gs.fit(X, y) + n_candidates = len(gs.cv_results_["params"]) + + # Ensure that grid scores were set to zero as required for those fits + # that are expected to fail. + def get_cand_scores(i): + return np.array( + [gs.cv_results_["split%d_test_score" % s][i] for s in range(gs.n_splits_)] + ) + + assert all( + ( + np.all(get_cand_scores(cand_i) == 0.0) + for cand_i in range(n_candidates) + if gs.cv_results_["param_parameter"][cand_i] + == FailingClassifier.FAILING_PARAMETER + ) + ) + + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring="accuracy", + refit=False, + error_score=float("nan"), + ) + warning_message = re.compile( + "5 fits failed.+total of 15.+The score on these" + r" train-test partitions for these parameters will be set to nan.+" + "5 fits failed with the following error.+ValueError.+Failing classifier failed" + " as required", + flags=re.DOTALL, + ) + with pytest.warns(FitFailedWarning, match=warning_message): + gs.fit(X, y) + n_candidates = len(gs.cv_results_["params"]) + assert all( + np.all(np.isnan(get_cand_scores(cand_i))) + for cand_i in range(n_candidates) + if gs.cv_results_["param_parameter"][cand_i] + == FailingClassifier.FAILING_PARAMETER + ) + + ranks = gs.cv_results_["rank_test_score"] + + # Check that succeeded estimators have lower ranks + assert ranks[0] <= 2 and ranks[1] <= 2 + # Check that failed estimator has the highest rank + assert ranks[clf.FAILING_PARAMETER] == 3 + assert gs.best_index_ != clf.FAILING_PARAMETER + + +def test_grid_search_classifier_all_fits_fail(): + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + gs = GridSearchCV( + clf, + [{"parameter": [FailingClassifier.FAILING_PARAMETER] * 3}], + error_score=0.0, + ) + + warning_message = re.compile( + ( + "All the 15 fits failed.+15 fits failed with the following" + " error.+ValueError.+Failing classifier failed as required" + ), + flags=re.DOTALL, + ) + with pytest.raises(ValueError, match=warning_message): + gs.fit(X, y) + + +def test_grid_search_failing_classifier_raise(): + # GridSearchCV with on_error == 'raise' raises the error + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + # refit=False because we want to test the behaviour of the grid search part + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring="accuracy", + refit=False, + error_score="raise", + ) + + # FailingClassifier issues a ValueError so this is what we look for. + with pytest.raises(ValueError): + gs.fit(X, y) + + +def test_parameters_sampler_replacement(): + # raise warning if n_iter is bigger than total parameter space + params = [ + {"first": [0, 1], "second": ["a", "b", "c"]}, + {"third": ["two", "values"]}, + ] + sampler = ParameterSampler(params, n_iter=9) + n_iter = 9 + grid_size = 8 + expected_warning = ( + "The total space of parameters %d is smaller " + "than n_iter=%d. Running %d iterations. For " + "exhaustive searches, use GridSearchCV." % (grid_size, n_iter, grid_size) + ) + with pytest.warns(UserWarning, match=expected_warning): + list(sampler) + + # degenerates to GridSearchCV if n_iter the same as grid_size + sampler = ParameterSampler(params, n_iter=8) + samples = list(sampler) + assert len(samples) == 8 + for values in ParameterGrid(params): + assert values in samples + assert len(ParameterSampler(params, n_iter=1000)) == 8 + + # test sampling without replacement in a large grid + params = {"a": range(10), "b": range(10), "c": range(10)} + sampler = ParameterSampler(params, n_iter=99, random_state=42) + samples = list(sampler) + assert len(samples) == 99 + hashable_samples = ["a%db%dc%d" % (p["a"], p["b"], p["c"]) for p in samples] + assert len(set(hashable_samples)) == 99 + + # doesn't go into infinite loops + params_distribution = {"first": bernoulli(0.5), "second": ["a", "b", "c"]} + sampler = ParameterSampler(params_distribution, n_iter=7) + samples = list(sampler) + assert len(samples) == 7 + + +def test_stochastic_gradient_loss_param(): + # Make sure the predict_proba works when loss is specified + # as one of the parameters in the param_grid. + param_grid = { + "loss": ["log_loss"], + } + X = np.arange(24).reshape(6, -1) + y = [0, 0, 0, 1, 1, 1] + clf = GridSearchCV( + estimator=SGDClassifier(loss="hinge"), param_grid=param_grid, cv=3 + ) + + # When the estimator is not fitted, `predict_proba` is not available as the + # loss is 'hinge'. + assert not hasattr(clf, "predict_proba") + clf.fit(X, y) + clf.predict_proba(X) + clf.predict_log_proba(X) + + # Make sure `predict_proba` is not available when setting loss=['hinge'] + # in param_grid + param_grid = { + "loss": ["hinge"], + } + clf = GridSearchCV( + estimator=SGDClassifier(loss="hinge"), param_grid=param_grid, cv=3 + ) + assert not hasattr(clf, "predict_proba") + clf.fit(X, y) + assert not hasattr(clf, "predict_proba") + + +def test_search_train_scores_set_to_false(): + X = np.arange(6).reshape(6, -1) + y = [0, 0, 0, 1, 1, 1] + clf = LinearSVC(dual="auto", random_state=0) + + gs = GridSearchCV(clf, param_grid={"C": [0.1, 0.2]}, cv=3) + gs.fit(X, y) + + +def test_grid_search_cv_splits_consistency(): + # Check if a one time iterable is accepted as a cv parameter. + n_samples = 100 + n_splits = 5 + X, y = make_classification(n_samples=n_samples, random_state=0) + + gs = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), + return_train_score=True, + ) + gs.fit(X, y) + + gs2 = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=KFold(n_splits=n_splits), + return_train_score=True, + ) + gs2.fit(X, y) + + # Give generator as a cv parameter + assert isinstance( + KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), + GeneratorType, + ) + gs3 = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), + return_train_score=True, + ) + gs3.fit(X, y) + + gs4 = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=KFold(n_splits=n_splits, shuffle=True, random_state=0), + return_train_score=True, + ) + gs4.fit(X, y) + + def _pop_time_keys(cv_results): + for key in ( + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ): + cv_results.pop(key) + return cv_results + + # Check if generators are supported as cv and + # that the splits are consistent + np.testing.assert_equal( + _pop_time_keys(gs3.cv_results_), _pop_time_keys(gs4.cv_results_) + ) + + # OneTimeSplitter is a non-re-entrant cv where split can be called only + # once if ``cv.split`` is called once per param setting in GridSearchCV.fit + # the 2nd and 3rd parameter will not be evaluated as no train/test indices + # will be generated for the 2nd and subsequent cv.split calls. + # This is a check to make sure cv.split is not called once per param + # setting. + np.testing.assert_equal( + {k: v for k, v in gs.cv_results_.items() if not k.endswith("_time")}, + {k: v for k, v in gs2.cv_results_.items() if not k.endswith("_time")}, + ) + + # Check consistency of folds across the parameters + gs = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.1, 0.2, 0.2]}, + cv=KFold(n_splits=n_splits, shuffle=True), + return_train_score=True, + ) + gs.fit(X, y) + + # As the first two param settings (C=0.1) and the next two param + # settings (C=0.2) are same, the test and train scores must also be + # same as long as the same train/test indices are generated for all + # the cv splits, for both param setting + for score_type in ("train", "test"): + per_param_scores = {} + for param_i in range(4): + per_param_scores[param_i] = [ + gs.cv_results_["split%d_%s_score" % (s, score_type)][param_i] + for s in range(5) + ] + + assert_array_almost_equal(per_param_scores[0], per_param_scores[1]) + assert_array_almost_equal(per_param_scores[2], per_param_scores[3]) + + +def test_transform_inverse_transform_round_trip(): + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=3, verbose=3) + + grid_search.fit(X, y) + X_round_trip = grid_search.inverse_transform(grid_search.transform(X)) + assert_array_equal(X, X_round_trip) + + +def test_custom_run_search(): + def check_results(results, gscv): + exp_results = gscv.cv_results_ + assert sorted(results.keys()) == sorted(exp_results) + for k in results: + if not k.endswith("_time"): + # XXX: results['params'] is a list :| + results[k] = np.asanyarray(results[k]) + if results[k].dtype.kind == "O": + assert_array_equal( + exp_results[k], results[k], err_msg="Checking " + k + ) + else: + assert_allclose(exp_results[k], results[k], err_msg="Checking " + k) + + def fit_grid(param_grid): + return GridSearchCV(clf, param_grid, return_train_score=True).fit(X, y) + + class CustomSearchCV(BaseSearchCV): + def __init__(self, estimator, **kwargs): + super().__init__(estimator, **kwargs) + + def _run_search(self, evaluate): + results = evaluate([{"max_depth": 1}, {"max_depth": 2}]) + check_results(results, fit_grid({"max_depth": [1, 2]})) + results = evaluate([{"min_samples_split": 5}, {"min_samples_split": 10}]) + check_results( + results, + fit_grid([{"max_depth": [1, 2]}, {"min_samples_split": [5, 10]}]), + ) + + # Using regressor to make sure each score differs + clf = DecisionTreeRegressor(random_state=0) + X, y = make_classification(n_samples=100, n_informative=4, random_state=0) + mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y) + gscv = fit_grid([{"max_depth": [1, 2]}, {"min_samples_split": [5, 10]}]) + + results = mycv.cv_results_ + check_results(results, gscv) + for attr in dir(gscv): + if ( + attr[0].islower() + and attr[-1:] == "_" + and attr + not in { + "cv_results_", + "best_estimator_", + "refit_time_", + "classes_", + "scorer_", + } + ): + assert getattr(gscv, attr) == getattr(mycv, attr), ( + "Attribute %s not equal" % attr + ) + + +def test__custom_fit_no_run_search(): + class NoRunSearchSearchCV(BaseSearchCV): + def __init__(self, estimator, **kwargs): + super().__init__(estimator, **kwargs) + + def fit(self, X, y=None, groups=None, **fit_params): + return self + + # this should not raise any exceptions + NoRunSearchSearchCV(SVC()).fit(X, y) + + class BadSearchCV(BaseSearchCV): + def __init__(self, estimator, **kwargs): + super().__init__(estimator, **kwargs) + + with pytest.raises(NotImplementedError, match="_run_search not implemented."): + # this should raise a NotImplementedError + BadSearchCV(SVC()).fit(X, y) + + +def test_empty_cv_iterator_error(): + # Use global X, y + + # create cv + cv = KFold(n_splits=3).split(X) + + # pop all of it, this should cause the expected ValueError + [u for u in cv] + # cv is empty now + + train_size = 100 + ridge = RandomizedSearchCV(Ridge(), {"alpha": [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) + + # assert that this raises an error + with pytest.raises( + ValueError, + match=( + "No fits were performed. " + "Was the CV iterator empty\\? " + "Were there no candidates\\?" + ), + ): + ridge.fit(X[:train_size], y[:train_size]) + + +def test_random_search_bad_cv(): + # Use global X, y + + class BrokenKFold(KFold): + def get_n_splits(self, *args, **kw): + return 1 + + # create bad cv + cv = BrokenKFold(n_splits=3) + + train_size = 100 + ridge = RandomizedSearchCV(Ridge(), {"alpha": [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) + + # assert that this raises an error + with pytest.raises( + ValueError, + match=( + "cv.split and cv.get_n_splits returned " + "inconsistent results. Expected \\d+ " + "splits, got \\d+" + ), + ): + ridge.fit(X[:train_size], y[:train_size]) + + +@pytest.mark.parametrize("return_train_score", [False, True]) +@pytest.mark.parametrize( + "SearchCV, specialized_params", + [ + (GridSearchCV, {"param_grid": {"max_depth": [2, 3, 5, 8]}}), + ( + RandomizedSearchCV, + {"param_distributions": {"max_depth": [2, 3, 5, 8]}, "n_iter": 4}, + ), + ], +) +def test_searchcv_raise_warning_with_non_finite_score( + SearchCV, specialized_params, return_train_score +): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/10529 + # Check that we raise a UserWarning when a non-finite score is + # computed in the SearchCV + X, y = make_classification(n_classes=2, random_state=0) + + class FailingScorer: + """Scorer that will fail for some split but not all.""" + + def __init__(self): + self.n_counts = 0 + + def __call__(self, estimator, X, y): + self.n_counts += 1 + if self.n_counts % 5 == 0: + return np.nan + return 1 + + grid = SearchCV( + DecisionTreeClassifier(), + scoring=FailingScorer(), + cv=3, + return_train_score=return_train_score, + **specialized_params, + ) + + with pytest.warns(UserWarning) as warn_msg: + grid.fit(X, y) + + set_with_warning = ["test", "train"] if return_train_score else ["test"] + assert len(warn_msg) == len(set_with_warning) + for msg, dataset in zip(warn_msg, set_with_warning): + assert f"One or more of the {dataset} scores are non-finite" in str(msg.message) + + # all non-finite scores should be equally ranked last + last_rank = grid.cv_results_["rank_test_score"].max() + non_finite_mask = np.isnan(grid.cv_results_["mean_test_score"]) + assert_array_equal(grid.cv_results_["rank_test_score"][non_finite_mask], last_rank) + # all finite scores should be better ranked than the non-finite scores + assert np.all(grid.cv_results_["rank_test_score"][~non_finite_mask] < last_rank) + + +def test_callable_multimetric_confusion_matrix(): + # Test callable with many metrics inserts the correct names and metrics + # into the search cv object + def custom_scorer(clf, X, y): + y_pred = clf.predict(X) + cm = confusion_matrix(y, y_pred) + return {"tn": cm[0, 0], "fp": cm[0, 1], "fn": cm[1, 0], "tp": cm[1, 1]} + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + search = GridSearchCV(est, {"C": [0.1, 1]}, scoring=custom_scorer, refit="fp") + + search.fit(X, y) + + score_names = ["tn", "fp", "fn", "tp"] + for name in score_names: + assert "mean_test_{}".format(name) in search.cv_results_ + + y_pred = search.predict(X) + cm = confusion_matrix(y, y_pred) + assert search.score(X, y) == pytest.approx(cm[0, 1]) + + +def test_callable_multimetric_same_as_list_of_strings(): + # Test callable multimetric is the same as a list of strings + def custom_scorer(est, X, y): + y_pred = est.predict(X) + return { + "recall": recall_score(y, y_pred), + "accuracy": accuracy_score(y, y_pred), + } + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + search_callable = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=custom_scorer, refit="recall" + ) + search_str = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=["recall", "accuracy"], refit="recall" + ) + + search_callable.fit(X, y) + search_str.fit(X, y) + + assert search_callable.best_score_ == pytest.approx(search_str.best_score_) + assert search_callable.best_index_ == search_str.best_index_ + assert search_callable.score(X, y) == pytest.approx(search_str.score(X, y)) + + +def test_callable_single_metric_same_as_single_string(): + # Tests callable scorer is the same as scoring with a single string + def custom_scorer(est, X, y): + y_pred = est.predict(X) + return recall_score(y, y_pred) + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + search_callable = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=custom_scorer, refit=True + ) + search_str = GridSearchCV(est, {"C": [0.1, 1]}, scoring="recall", refit="recall") + search_list_str = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=["recall"], refit="recall" + ) + search_callable.fit(X, y) + search_str.fit(X, y) + search_list_str.fit(X, y) + + assert search_callable.best_score_ == pytest.approx(search_str.best_score_) + assert search_callable.best_index_ == search_str.best_index_ + assert search_callable.score(X, y) == pytest.approx(search_str.score(X, y)) + + assert search_list_str.best_score_ == pytest.approx(search_str.best_score_) + assert search_list_str.best_index_ == search_str.best_index_ + assert search_list_str.score(X, y) == pytest.approx(search_str.score(X, y)) + + +def test_callable_multimetric_error_on_invalid_key(): + # Raises when the callable scorer does not return a dict with `refit` key. + def bad_scorer(est, X, y): + return {"bad_name": 1} + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.1, 1]}, + scoring=bad_scorer, + refit="good_name", + ) + + msg = ( + "For multi-metric scoring, the parameter refit must be set to a " + "scorer key or a callable to refit" + ) + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_callable_multimetric_error_failing_clf(): + # Warns when there is an estimator the fails to fit with a float + # error_score + def custom_scorer(est, X, y): + return {"acc": 1} + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring=custom_scorer, + refit=False, + error_score=0.1, + ) + + warning_message = re.compile( + "5 fits failed.+total of 15.+The score on these" + r" train-test partitions for these parameters will be set to 0\.1", + flags=re.DOTALL, + ) + with pytest.warns(FitFailedWarning, match=warning_message): + gs.fit(X, y) + + assert_allclose(gs.cv_results_["mean_test_acc"], [1, 1, 0.1]) + + +def test_callable_multimetric_clf_all_fits_fail(): + # Warns and raises when all estimator fails to fit. + def custom_scorer(est, X, y): + return {"acc": 1} + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + gs = GridSearchCV( + clf, + [{"parameter": [FailingClassifier.FAILING_PARAMETER] * 3}], + scoring=custom_scorer, + refit=False, + error_score=0.1, + ) + + individual_fit_error_message = "ValueError: Failing classifier failed as required" + error_message = re.compile( + ( + "All the 15 fits failed.+your model is misconfigured.+" + f"{individual_fit_error_message}" + ), + flags=re.DOTALL, + ) + + with pytest.raises(ValueError, match=error_message): + gs.fit(X, y) + + +def test_n_features_in(): + # make sure grid search and random search delegate n_features_in to the + # best estimator + n_features = 4 + X, y = make_classification(n_features=n_features) + gbdt = HistGradientBoostingClassifier() + param_grid = {"max_iter": [3, 4]} + gs = GridSearchCV(gbdt, param_grid) + rs = RandomizedSearchCV(gbdt, param_grid, n_iter=1) + assert not hasattr(gs, "n_features_in_") + assert not hasattr(rs, "n_features_in_") + gs.fit(X, y) + rs.fit(X, y) + assert gs.n_features_in_ == n_features + assert rs.n_features_in_ == n_features + + +@pytest.mark.parametrize("pairwise", [True, False]) +def test_search_cv_pairwise_property_delegated_to_base_estimator(pairwise): + """ + Test implementation of BaseSearchCV has the pairwise tag + which matches the pairwise tag of its estimator. + This test make sure pairwise tag is delegated to the base estimator. + + Non-regression test for issue #13920. + """ + + class TestEstimator(BaseEstimator): + def _more_tags(self): + return {"pairwise": pairwise} + + est = TestEstimator() + attr_message = "BaseSearchCV pairwise tag must match estimator" + cv = GridSearchCV(est, {"n_neighbors": [10]}) + assert pairwise == cv._get_tags()["pairwise"], attr_message + + +def test_search_cv__pairwise_property_delegated_to_base_estimator(): + """ + Test implementation of BaseSearchCV has the pairwise property + which matches the pairwise tag of its estimator. + This test make sure pairwise tag is delegated to the base estimator. + + Non-regression test for issue #13920. + """ + + class EstimatorPairwise(BaseEstimator): + def __init__(self, pairwise=True): + self.pairwise = pairwise + + def _more_tags(self): + return {"pairwise": self.pairwise} + + est = EstimatorPairwise() + attr_message = "BaseSearchCV _pairwise property must match estimator" + + for _pairwise_setting in [True, False]: + est.set_params(pairwise=_pairwise_setting) + cv = GridSearchCV(est, {"n_neighbors": [10]}) + assert _pairwise_setting == cv._get_tags()["pairwise"], attr_message + + +def test_search_cv_pairwise_property_equivalence_of_precomputed(): + """ + Test implementation of BaseSearchCV has the pairwise tag + which matches the pairwise tag of its estimator. + This test ensures the equivalence of 'precomputed'. + + Non-regression test for issue #13920. + """ + n_samples = 50 + n_splits = 2 + X, y = make_classification(n_samples=n_samples, random_state=0) + grid_params = {"n_neighbors": [10]} + + # defaults to euclidean metric (minkowski p = 2) + clf = KNeighborsClassifier() + cv = GridSearchCV(clf, grid_params, cv=n_splits) + cv.fit(X, y) + preds_original = cv.predict(X) + + # precompute euclidean metric to validate pairwise is working + X_precomputed = euclidean_distances(X) + clf = KNeighborsClassifier(metric="precomputed") + cv = GridSearchCV(clf, grid_params, cv=n_splits) + cv.fit(X_precomputed, y) + preds_precomputed = cv.predict(X_precomputed) + + attr_message = "GridSearchCV not identical with precomputed metric" + assert (preds_original == preds_precomputed).all(), attr_message + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [(GridSearchCV, {"a": [0.1, 0.01]}), (RandomizedSearchCV, {"a": uniform(1, 3)})], +) +def test_scalar_fit_param(SearchCV, param_search): + # unofficially sanctioned tolerance for scalar values in fit_params + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15805 + class TestEstimator(ClassifierMixin, BaseEstimator): + def __init__(self, a=None): + self.a = a + + def fit(self, X, y, r=None): + self.r_ = r + + def predict(self, X): + return np.zeros(shape=(len(X))) + + model = SearchCV(TestEstimator(), param_search) + X, y = make_classification(random_state=42) + model.fit(X, y, r=42) + assert model.best_estimator_.r_ == 42 + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, {"alpha": [0.1, 0.01]}), + (RandomizedSearchCV, {"alpha": uniform(0.01, 0.1)}), + ], +) +def test_scalar_fit_param_compat(SearchCV, param_search): + # check support for scalar values in fit_params, for instance in LightGBM + # that do not exactly respect the scikit-learn API contract but that we do + # not want to break without an explicit deprecation cycle and API + # recommendations for implementing early stopping with a user provided + # validation set. non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15805 + X_train, X_valid, y_train, y_valid = train_test_split( + *make_classification(random_state=42), random_state=42 + ) + + class _FitParamClassifier(SGDClassifier): + def fit( + self, + X, + y, + sample_weight=None, + tuple_of_arrays=None, + scalar_param=None, + callable_param=None, + ): + super().fit(X, y, sample_weight=sample_weight) + assert scalar_param > 0 + assert callable(callable_param) + + # The tuple of arrays should be preserved as tuple. + assert isinstance(tuple_of_arrays, tuple) + assert tuple_of_arrays[0].ndim == 2 + assert tuple_of_arrays[1].ndim == 1 + return self + + def _fit_param_callable(): + pass + + model = SearchCV(_FitParamClassifier(), param_search) + + # NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which + # is not the case for the following parameters. But this abuse is common in + # popular third-party libraries and we should tolerate this behavior for + # now and be careful not to break support for those without following + # proper deprecation cycle. + fit_params = { + "tuple_of_arrays": (X_valid, y_valid), + "callable_param": _fit_param_callable, + "scalar_param": 42, + } + model.fit(X_train, y_train, **fit_params) + + +# FIXME: Replace this test with a full `check_estimator` once we have API only +# checks. +@pytest.mark.filterwarnings("ignore:The total space of parameters 4 is") +@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) +@pytest.mark.parametrize("Predictor", [MinimalRegressor, MinimalClassifier]) +def test_search_cv_using_minimal_compatible_estimator(SearchCV, Predictor): + # Check that third-party library can run tests without inheriting from + # BaseEstimator. + rng = np.random.RandomState(0) + X, y = rng.randn(25, 2), np.array([0] * 5 + [1] * 20) + + model = Pipeline( + [("transformer", MinimalTransformer()), ("predictor", Predictor())] + ) + + params = { + "transformer__param": [1, 10], + "predictor__parama": [1, 10], + } + search = SearchCV(model, params, error_score="raise") + search.fit(X, y) + + assert search.best_params_.keys() == params.keys() + + y_pred = search.predict(X) + if is_classifier(search): + assert_array_equal(y_pred, 1) + assert search.score(X, y) == pytest.approx(accuracy_score(y, y_pred)) + else: + assert_allclose(y_pred, y.mean()) + assert search.score(X, y) == pytest.approx(r2_score(y, y_pred)) + + +@pytest.mark.parametrize("return_train_score", [True, False]) +def test_search_cv_verbose_3(capsys, return_train_score): + """Check that search cv with verbose>2 shows the score for single + metrics. non-regression test for #19658.""" + X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0) + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [0.1]} + + GridSearchCV( + clf, + grid, + scoring="accuracy", + verbose=3, + cv=3, + return_train_score=return_train_score, + ).fit(X, y) + captured = capsys.readouterr().out + if return_train_score: + match = re.findall(r"score=\(train=[\d\.]+, test=[\d.]+\)", captured) + else: + match = re.findall(r"score=[\d\.]+", captured) + assert len(match) == 3 + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, "param_grid"), + (RandomizedSearchCV, "param_distributions"), + (HalvingGridSearchCV, "param_grid"), + ], +) +def test_search_estimator_param(SearchCV, param_search): + # test that SearchCV object doesn't change the object given in the parameter grid + X, y = make_classification(random_state=42) + + params = {"clf": [LinearSVC(dual="auto")], "clf__C": [0.01]} + orig_C = params["clf"][0].C + + pipe = Pipeline([("trs", MinimalTransformer()), ("clf", None)]) + + param_grid_search = {param_search: params} + gs = SearchCV(pipe, refit=True, cv=2, scoring="accuracy", **param_grid_search).fit( + X, y + ) + + # testing that the original object in params is not changed + assert params["clf"][0].C == orig_C + # testing that the GS is setting the parameter of the step correctly + assert gs.best_estimator_.named_steps["clf"].C == 0.01 + + +# Metadata Routing Tests +# ====================== + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, "param_grid"), + (RandomizedSearchCV, "param_distributions"), + ], +) +def test_multi_metric_search_forwards_metadata(SearchCV, param_search): + """Test that *SearchCV forwards metadata correctly when passed multiple metrics.""" + X, y = make_classification(random_state=42) + n_samples = _num_samples(X) + rng = np.random.RandomState(0) + score_weights = rng.rand(n_samples) + score_metadata = rng.rand(n_samples) + + est = LinearSVC(dual="auto") + param_grid_search = {param_search: {"C": [1]}} + + scorer_registry = _Registry() + scorer = ConsumingScorer(registry=scorer_registry).set_score_request( + sample_weight="score_weights", metadata="score_metadata" + ) + scoring = dict(my_scorer=scorer, accuracy="accuracy") + SearchCV(est, refit="accuracy", cv=2, scoring=scoring, **param_grid_search).fit( + X, y, score_weights=score_weights, score_metadata=score_metadata + ) + assert len(scorer_registry) + for _scorer in scorer_registry: + check_recorded_metadata( + obj=_scorer, + method="score", + split_params=("sample_weight", "metadata"), + sample_weight=score_weights, + metadata=score_metadata, + ) + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, "param_grid"), + (RandomizedSearchCV, "param_distributions"), + (HalvingGridSearchCV, "param_grid"), + ], +) +def test_score_rejects_params_with_no_routing_enabled(SearchCV, param_search): + """*SearchCV should reject **params when metadata routing is not enabled + since this is added only when routing is enabled.""" + X, y = make_classification(random_state=42) + est = LinearSVC(dual="auto") + param_grid_search = {param_search: {"C": [1]}} + + gs = SearchCV(est, cv=2, **param_grid_search).fit(X, y) + + with pytest.raises(ValueError, match="is only supported if"): + gs.score(X, y, metadata=1) + + +# End of Metadata Routing Tests +# ============================= diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py new file mode 100644 index 0000000000000000000000000000000000000000..57bc6b22351b9403c1939da22c31b6da3241886f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py @@ -0,0 +1,2025 @@ +"""Test the split module""" +import re +import warnings +from itertools import combinations, combinations_with_replacement, permutations + +import numpy as np +import pytest +from scipy import stats +from scipy.sparse import issparse +from scipy.special import comb + +from sklearn import config_context +from sklearn.datasets import load_digits, make_classification +from sklearn.dummy import DummyClassifier +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + GroupShuffleSplit, + KFold, + LeaveOneGroupOut, + LeaveOneOut, + LeavePGroupsOut, + LeavePOut, + PredefinedSplit, + RepeatedKFold, + RepeatedStratifiedKFold, + ShuffleSplit, + StratifiedGroupKFold, + StratifiedKFold, + StratifiedShuffleSplit, + TimeSeriesSplit, + check_cv, + cross_val_score, + train_test_split, +) +from sklearn.model_selection._split import ( + _build_repr, + _validate_shuffle_split, + _yields_constant_splits, +) +from sklearn.svm import SVC +from sklearn.tests.metadata_routing_common import assert_request_is_empty +from sklearn.utils._array_api import ( + _convert_to_numpy, + get_namespace, + yield_namespace_device_dtype_combinations, +) +from sklearn.utils._array_api import ( + device as array_api_device, +) +from sklearn.utils._mocking import MockDataFrame +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.estimator_checks import ( + _array_api_for_tests, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.validation import _num_samples + +NO_GROUP_SPLITTERS = [ + KFold(), + StratifiedKFold(), + TimeSeriesSplit(), + LeaveOneOut(), + LeavePOut(p=2), + ShuffleSplit(), + StratifiedShuffleSplit(test_size=0.5), + PredefinedSplit([1, 1, 2, 2]), + RepeatedKFold(), + RepeatedStratifiedKFold(), +] + +GROUP_SPLITTERS = [ + GroupKFold(), + LeavePGroupsOut(n_groups=1), + StratifiedGroupKFold(), + LeaveOneGroupOut(), + GroupShuffleSplit(), +] + +ALL_SPLITTERS = NO_GROUP_SPLITTERS + GROUP_SPLITTERS # type: ignore + +X = np.ones(10) +y = np.arange(10) // 2 +test_groups = ( + np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), + np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), + np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), + np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), + [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], + ["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"], +) +digits = load_digits() + + +@ignore_warnings +def test_cross_validator_with_default_params(): + n_samples = 4 + n_unique_groups = 4 + n_splits = 2 + p = 2 + n_shuffle_splits = 10 # (the default value) + + X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + X_1d = np.array([1, 2, 3, 4]) + y = np.array([1, 1, 2, 2]) + groups = np.array([1, 2, 3, 4]) + loo = LeaveOneOut() + lpo = LeavePOut(p) + kf = KFold(n_splits) + skf = StratifiedKFold(n_splits) + lolo = LeaveOneGroupOut() + lopo = LeavePGroupsOut(p) + ss = ShuffleSplit(random_state=0) + ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2 + sgkf = StratifiedGroupKFold(n_splits) + + loo_repr = "LeaveOneOut()" + lpo_repr = "LeavePOut(p=2)" + kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)" + skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)" + lolo_repr = "LeaveOneGroupOut()" + lopo_repr = "LeavePGroupsOut(n_groups=2)" + ss_repr = ( + "ShuffleSplit(n_splits=10, random_state=0, test_size=None, train_size=None)" + ) + ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))" + sgkf_repr = "StratifiedGroupKFold(n_splits=2, random_state=None, shuffle=False)" + + n_splits_expected = [ + n_samples, + comb(n_samples, p), + n_splits, + n_splits, + n_unique_groups, + comb(n_unique_groups, p), + n_shuffle_splits, + 2, + n_splits, + ] + + for i, (cv, cv_repr) in enumerate( + zip( + [loo, lpo, kf, skf, lolo, lopo, ss, ps, sgkf], + [ + loo_repr, + lpo_repr, + kf_repr, + skf_repr, + lolo_repr, + lopo_repr, + ss_repr, + ps_repr, + sgkf_repr, + ], + ) + ): + # Test if get_n_splits works correctly + assert n_splits_expected[i] == cv.get_n_splits(X, y, groups) + + # Test if the cross-validator works as expected even if + # the data is 1d + np.testing.assert_equal( + list(cv.split(X, y, groups)), list(cv.split(X_1d, y, groups)) + ) + # Test that train, test indices returned are integers + for train, test in cv.split(X, y, groups): + assert np.asarray(train).dtype.kind == "i" + assert np.asarray(test).dtype.kind == "i" + + # Test if the repr works without any errors + assert cv_repr == repr(cv) + + # ValueError for get_n_splits methods + msg = "The 'X' parameter should not be None." + with pytest.raises(ValueError, match=msg): + loo.get_n_splits(None, y, groups) + with pytest.raises(ValueError, match=msg): + lpo.get_n_splits(None, y, groups) + + +def test_2d_y(): + # smoke test for 2d y and multi-label + n_samples = 30 + rng = np.random.RandomState(1) + X = rng.randint(0, 3, size=(n_samples, 2)) + y = rng.randint(0, 3, size=(n_samples,)) + y_2d = y.reshape(-1, 1) + y_multilabel = rng.randint(0, 2, size=(n_samples, 3)) + groups = rng.randint(0, 3, size=(n_samples,)) + splitters = [ + LeaveOneOut(), + LeavePOut(p=2), + KFold(), + StratifiedKFold(), + RepeatedKFold(), + RepeatedStratifiedKFold(), + StratifiedGroupKFold(), + ShuffleSplit(), + StratifiedShuffleSplit(test_size=0.5), + GroupShuffleSplit(), + LeaveOneGroupOut(), + LeavePGroupsOut(n_groups=2), + GroupKFold(n_splits=3), + TimeSeriesSplit(), + PredefinedSplit(test_fold=groups), + ] + for splitter in splitters: + list(splitter.split(X, y, groups)) + list(splitter.split(X, y_2d, groups)) + try: + list(splitter.split(X, y_multilabel, groups)) + except ValueError as e: + allowed_target_types = ("binary", "multiclass") + msg = "Supported target types are: {}. Got 'multilabel".format( + allowed_target_types + ) + assert msg in str(e) + + +def check_valid_split(train, test, n_samples=None): + # Use python sets to get more informative assertion failure messages + train, test = set(train), set(test) + + # Train and test split should not overlap + assert train.intersection(test) == set() + + if n_samples is not None: + # Check that the union of train an test split cover all the indices + assert train.union(test) == set(range(n_samples)) + + +def check_cv_coverage(cv, X, y, groups, expected_n_splits): + n_samples = _num_samples(X) + # Check that a all the samples appear at least once in a test fold + assert cv.get_n_splits(X, y, groups) == expected_n_splits + + collected_test_samples = set() + iterations = 0 + for train, test in cv.split(X, y, groups): + check_valid_split(train, test, n_samples=n_samples) + iterations += 1 + collected_test_samples.update(test) + + # Check that the accumulated test samples cover the whole dataset + assert iterations == expected_n_splits + if n_samples is not None: + assert collected_test_samples == set(range(n_samples)) + + +def test_kfold_valueerrors(): + X1 = np.array([[1, 2], [3, 4], [5, 6]]) + X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]) + # Check that errors are raised if there is not enough samples + (ValueError, next, KFold(4).split(X1)) + + # Check that a warning is raised if the least populated class has too few + # members. + y = np.array([3, 3, -1, -1, 3]) + + skf_3 = StratifiedKFold(3) + with pytest.warns(Warning, match="The least populated class"): + next(skf_3.split(X2, y)) + + sgkf_3 = StratifiedGroupKFold(3) + naive_groups = np.arange(len(y)) + with pytest.warns(Warning, match="The least populated class"): + next(sgkf_3.split(X2, y, naive_groups)) + + # Check that despite the warning the folds are still computed even + # though all the classes are not necessarily represented at on each + # side of the split at each split + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + check_cv_coverage(sgkf_3, X2, y, groups=naive_groups, expected_n_splits=3) + + # Check that errors are raised if all n_groups for individual + # classes are less than n_splits. + y = np.array([3, 3, -1, -1, 2]) + + with pytest.raises(ValueError): + next(skf_3.split(X2, y)) + with pytest.raises(ValueError): + next(sgkf_3.split(X2, y)) + + # Error when number of folds is <= 1 + with pytest.raises(ValueError): + KFold(0) + with pytest.raises(ValueError): + KFold(1) + error_string = "k-fold cross-validation requires at least one train/test split" + with pytest.raises(ValueError, match=error_string): + StratifiedKFold(0) + with pytest.raises(ValueError, match=error_string): + StratifiedKFold(1) + with pytest.raises(ValueError, match=error_string): + StratifiedGroupKFold(0) + with pytest.raises(ValueError, match=error_string): + StratifiedGroupKFold(1) + + # When n_splits is not integer: + with pytest.raises(ValueError): + KFold(1.5) + with pytest.raises(ValueError): + KFold(2.0) + with pytest.raises(ValueError): + StratifiedKFold(1.5) + with pytest.raises(ValueError): + StratifiedKFold(2.0) + with pytest.raises(ValueError): + StratifiedGroupKFold(1.5) + with pytest.raises(ValueError): + StratifiedGroupKFold(2.0) + + # When shuffle is not a bool: + with pytest.raises(TypeError): + KFold(n_splits=4, shuffle=None) + + +def test_kfold_indices(): + # Check all indices are returned in the test folds + X1 = np.ones(18) + kf = KFold(3) + check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3) + + # Check all indices are returned in the test folds even when equal-sized + # folds are not possible + X2 = np.ones(17) + kf = KFold(3) + check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3) + + # Check if get_n_splits returns the number of folds + assert 5 == KFold(5).get_n_splits(X2) + + +def test_kfold_no_shuffle(): + # Manually check that KFold preserves the data ordering on toy datasets + X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + + splits = KFold(2).split(X2[:-1]) + train, test = next(splits) + assert_array_equal(test, [0, 1]) + assert_array_equal(train, [2, 3]) + + train, test = next(splits) + assert_array_equal(test, [2, 3]) + assert_array_equal(train, [0, 1]) + + splits = KFold(2).split(X2) + train, test = next(splits) + assert_array_equal(test, [0, 1, 2]) + assert_array_equal(train, [3, 4]) + + train, test = next(splits) + assert_array_equal(test, [3, 4]) + assert_array_equal(train, [0, 1, 2]) + + +def test_stratified_kfold_no_shuffle(): + # Manually check that StratifiedKFold preserves the data ordering as much + # as possible on toy datasets in order to avoid hiding sample dependencies + # when possible + X, y = np.ones(4), [1, 1, 0, 0] + splits = StratifiedKFold(2).split(X, y) + train, test = next(splits) + assert_array_equal(test, [0, 2]) + assert_array_equal(train, [1, 3]) + + train, test = next(splits) + assert_array_equal(test, [1, 3]) + assert_array_equal(train, [0, 2]) + + X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0] + splits = StratifiedKFold(2).split(X, y) + train, test = next(splits) + assert_array_equal(test, [0, 1, 3, 4]) + assert_array_equal(train, [2, 5, 6]) + + train, test = next(splits) + assert_array_equal(test, [2, 5, 6]) + assert_array_equal(train, [0, 1, 3, 4]) + + # Check if get_n_splits returns the number of folds + assert 5 == StratifiedKFold(5).get_n_splits(X, y) + + # Make sure string labels are also supported + X = np.ones(7) + y1 = ["1", "1", "1", "0", "0", "0", "0"] + y2 = [1, 1, 1, 0, 0, 0, 0] + np.testing.assert_equal( + list(StratifiedKFold(2).split(X, y1)), list(StratifiedKFold(2).split(X, y2)) + ) + + # Check equivalence to KFold + y = [0, 1, 0, 1, 0, 1, 0, 1] + X = np.ones_like(y) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y)), list(KFold(3).split(X, y)) + ) + + +@pytest.mark.parametrize("shuffle", [False, True]) +@pytest.mark.parametrize("k", [4, 5, 6, 7, 8, 9, 10]) +@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold]) +def test_stratified_kfold_ratios(k, shuffle, kfold): + # Check that stratified kfold preserves class ratios in individual splits + # Repeat with shuffling turned off and on + n_samples = 1000 + X = np.ones(n_samples) + y = np.array( + [4] * int(0.10 * n_samples) + + [0] * int(0.89 * n_samples) + + [1] * int(0.01 * n_samples) + ) + # ensure perfect stratification with StratifiedGroupKFold + groups = np.arange(len(y)) + distr = np.bincount(y) / len(y) + + test_sizes = [] + random_state = None if not shuffle else 0 + skf = kfold(k, random_state=random_state, shuffle=shuffle) + for train, test in skf.split(X, y, groups=groups): + assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02) + assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02) + test_sizes.append(len(test)) + assert np.ptp(test_sizes) <= 1 + + +@pytest.mark.parametrize("shuffle", [False, True]) +@pytest.mark.parametrize("k", [4, 6, 7]) +@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold]) +def test_stratified_kfold_label_invariance(k, shuffle, kfold): + # Check that stratified kfold gives the same indices regardless of labels + n_samples = 100 + y = np.array( + [2] * int(0.10 * n_samples) + + [0] * int(0.89 * n_samples) + + [1] * int(0.01 * n_samples) + ) + X = np.ones(len(y)) + # ensure perfect stratification with StratifiedGroupKFold + groups = np.arange(len(y)) + + def get_splits(y): + random_state = None if not shuffle else 0 + return [ + (list(train), list(test)) + for train, test in kfold( + k, random_state=random_state, shuffle=shuffle + ).split(X, y, groups=groups) + ] + + splits_base = get_splits(y) + for perm in permutations([0, 1, 2]): + y_perm = np.take(perm, y) + splits_perm = get_splits(y_perm) + assert splits_perm == splits_base + + +def test_kfold_balance(): + # Check that KFold returns folds with balanced sizes + for i in range(11, 17): + kf = KFold(5).split(X=np.ones(i)) + sizes = [len(test) for _, test in kf] + + assert (np.max(sizes) - np.min(sizes)) <= 1 + assert np.sum(sizes) == i + + +@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold]) +def test_stratifiedkfold_balance(kfold): + # Check that KFold returns folds with balanced sizes (only when + # stratification is possible) + # Repeat with shuffling turned off and on + X = np.ones(17) + y = [0] * 3 + [1] * 14 + # ensure perfect stratification with StratifiedGroupKFold + groups = np.arange(len(y)) + + for shuffle in (True, False): + cv = kfold(3, shuffle=shuffle) + for i in range(11, 17): + skf = cv.split(X[:i], y[:i], groups[:i]) + sizes = [len(test) for _, test in skf] + + assert (np.max(sizes) - np.min(sizes)) <= 1 + assert np.sum(sizes) == i + + +def test_shuffle_kfold(): + # Check the indices are shuffled properly + kf = KFold(3) + kf2 = KFold(3, shuffle=True, random_state=0) + kf3 = KFold(3, shuffle=True, random_state=1) + + X = np.ones(300) + + all_folds = np.zeros(300) + for (tr1, te1), (tr2, te2), (tr3, te3) in zip( + kf.split(X), kf2.split(X), kf3.split(X) + ): + for tr_a, tr_b in combinations((tr1, tr2, tr3), 2): + # Assert that there is no complete overlap + assert len(np.intersect1d(tr_a, tr_b)) != len(tr1) + + # Set all test indices in successive iterations of kf2 to 1 + all_folds[te2] = 1 + + # Check that all indices are returned in the different test folds + assert sum(all_folds) == 300 + + +@pytest.mark.parametrize("kfold", [KFold, StratifiedKFold, StratifiedGroupKFold]) +def test_shuffle_kfold_stratifiedkfold_reproducibility(kfold): + X = np.ones(15) # Divisible by 3 + y = [0] * 7 + [1] * 8 + groups_1 = np.arange(len(y)) + X2 = np.ones(16) # Not divisible by 3 + y2 = [0] * 8 + [1] * 8 + groups_2 = np.arange(len(y2)) + + # Check that when the shuffle is True, multiple split calls produce the + # same split when random_state is int + kf = kfold(3, shuffle=True, random_state=0) + + np.testing.assert_equal( + list(kf.split(X, y, groups_1)), list(kf.split(X, y, groups_1)) + ) + + # Check that when the shuffle is True, multiple split calls often + # (not always) produce different splits when random_state is + # RandomState instance or None + kf = kfold(3, shuffle=True, random_state=np.random.RandomState(0)) + for data in zip((X, X2), (y, y2), (groups_1, groups_2)): + # Test if the two splits are different cv + for (_, test_a), (_, test_b) in zip(kf.split(*data), kf.split(*data)): + # cv.split(...) returns an array of tuples, each tuple + # consisting of an array with train indices and test indices + # Ensure that the splits for data are not same + # when random state is not set + with pytest.raises(AssertionError): + np.testing.assert_array_equal(test_a, test_b) + + +def test_shuffle_stratifiedkfold(): + # Check that shuffling is happening when requested, and for proper + # sample coverage + X_40 = np.ones(40) + y = [0] * 20 + [1] * 20 + kf0 = StratifiedKFold(5, shuffle=True, random_state=0) + kf1 = StratifiedKFold(5, shuffle=True, random_state=1) + for (_, test0), (_, test1) in zip(kf0.split(X_40, y), kf1.split(X_40, y)): + assert set(test0) != set(test1) + check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5) + + # Ensure that we shuffle each class's samples with different + # random_state in StratifiedKFold + # See https://github.com/scikit-learn/scikit-learn/pull/13124 + X = np.arange(10) + y = [0] * 5 + [1] * 5 + kf1 = StratifiedKFold(5, shuffle=True, random_state=0) + kf2 = StratifiedKFold(5, shuffle=True, random_state=1) + test_set1 = sorted([tuple(s[1]) for s in kf1.split(X, y)]) + test_set2 = sorted([tuple(s[1]) for s in kf2.split(X, y)]) + assert test_set1 != test_set2 + + +def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 + # The digits samples are dependent: they are apparently grouped by authors + # although we don't have any information on the groups segment locations + # for this data. We can highlight this fact by computing k-fold cross- + # validation with and without shuffling: we observe that the shuffling case + # wrongly makes the IID assumption and is therefore too optimistic: it + # estimates a much higher accuracy (around 0.93) than that the non + # shuffling variant (around 0.81). + + X, y = digits.data[:600], digits.target[:600] + model = SVC(C=10, gamma=0.005) + + n_splits = 3 + + cv = KFold(n_splits=n_splits, shuffle=False) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert 0.92 > mean_score + assert mean_score > 0.80 + + # Shuffling the data artificially breaks the dependency and hides the + # overfitting of the model with regards to the writing style of the authors + # by yielding a seriously overestimated score: + + cv = KFold(n_splits, shuffle=True, random_state=0) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert mean_score > 0.92 + + cv = KFold(n_splits, shuffle=True, random_state=1) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert mean_score > 0.92 + + # Similarly, StratifiedKFold should try to shuffle the data as little + # as possible (while respecting the balanced class constraints) + # and thus be able to detect the dependency by not overestimating + # the CV score either. As the digits dataset is approximately balanced + # the estimated mean score is close to the score measured with + # non-shuffled KFold + + cv = StratifiedKFold(n_splits) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert 0.94 > mean_score + assert mean_score > 0.80 + + +def test_stratified_group_kfold_trivial(): + sgkf = StratifiedGroupKFold(n_splits=3) + # Trivial example - groups with the same distribution + y = np.array([1] * 6 + [0] * 12) + X = np.ones_like(y).reshape(-1, 1) + groups = np.asarray((1, 2, 3, 4, 5, 6, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6)) + distr = np.bincount(y) / len(y) + test_sizes = [] + for train, test in sgkf.split(X, y, groups): + # check group constraint + assert np.intersect1d(groups[train], groups[test]).size == 0 + # check y distribution + assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02) + assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02) + test_sizes.append(len(test)) + assert np.ptp(test_sizes) <= 1 + + +def test_stratified_group_kfold_approximate(): + # Not perfect stratification (even though it is possible) because of + # iteration over groups + sgkf = StratifiedGroupKFold(n_splits=3) + y = np.array([1] * 6 + [0] * 12) + X = np.ones_like(y).reshape(-1, 1) + groups = np.array([1, 2, 3, 3, 4, 4, 1, 1, 2, 2, 3, 4, 5, 5, 5, 6, 6, 6]) + expected = np.asarray([[0.833, 0.166], [0.666, 0.333], [0.5, 0.5]]) + test_sizes = [] + for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected): + # check group constraint + assert np.intersect1d(groups[train], groups[test]).size == 0 + split_dist = np.bincount(y[test]) / len(test) + assert_allclose(split_dist, expect_dist, atol=0.001) + test_sizes.append(len(test)) + assert np.ptp(test_sizes) <= 1 + + +@pytest.mark.parametrize( + "y, groups, expected", + [ + ( + np.array([0] * 6 + [1] * 6), + np.array([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]), + np.asarray([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]), + ), + ( + np.array([0] * 9 + [1] * 3), + np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6]), + np.asarray([[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]), + ), + ], +) +def test_stratified_group_kfold_homogeneous_groups(y, groups, expected): + sgkf = StratifiedGroupKFold(n_splits=3) + X = np.ones_like(y).reshape(-1, 1) + for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected): + # check group constraint + assert np.intersect1d(groups[train], groups[test]).size == 0 + split_dist = np.bincount(y[test]) / len(test) + assert_allclose(split_dist, expect_dist, atol=0.001) + + +@pytest.mark.parametrize("cls_distr", [(0.4, 0.6), (0.3, 0.7), (0.2, 0.8), (0.8, 0.2)]) +@pytest.mark.parametrize("n_groups", [5, 30, 70]) +def test_stratified_group_kfold_against_group_kfold(cls_distr, n_groups): + # Check that given sufficient amount of samples StratifiedGroupKFold + # produces better stratified folds than regular GroupKFold + n_splits = 5 + sgkf = StratifiedGroupKFold(n_splits=n_splits) + gkf = GroupKFold(n_splits=n_splits) + rng = np.random.RandomState(0) + n_points = 1000 + y = rng.choice(2, size=n_points, p=cls_distr) + X = np.ones_like(y).reshape(-1, 1) + g = rng.choice(n_groups, n_points) + sgkf_folds = sgkf.split(X, y, groups=g) + gkf_folds = gkf.split(X, y, groups=g) + sgkf_entr = 0 + gkf_entr = 0 + for (sgkf_train, sgkf_test), (_, gkf_test) in zip(sgkf_folds, gkf_folds): + # check group constraint + assert np.intersect1d(g[sgkf_train], g[sgkf_test]).size == 0 + sgkf_distr = np.bincount(y[sgkf_test]) / len(sgkf_test) + gkf_distr = np.bincount(y[gkf_test]) / len(gkf_test) + sgkf_entr += stats.entropy(sgkf_distr, qk=cls_distr) + gkf_entr += stats.entropy(gkf_distr, qk=cls_distr) + sgkf_entr /= n_splits + gkf_entr /= n_splits + assert sgkf_entr <= gkf_entr + + +def test_shuffle_split(): + ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X) + ss2 = ShuffleSplit(test_size=2, random_state=0).split(X) + ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X) + ss4 = ShuffleSplit(test_size=int(2), random_state=0).split(X) + for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): + assert_array_equal(t1[0], t2[0]) + assert_array_equal(t2[0], t3[0]) + assert_array_equal(t3[0], t4[0]) + assert_array_equal(t1[1], t2[1]) + assert_array_equal(t2[1], t3[1]) + assert_array_equal(t3[1], t4[1]) + + +@pytest.mark.parametrize("split_class", [ShuffleSplit, StratifiedShuffleSplit]) +@pytest.mark.parametrize( + "train_size, exp_train, exp_test", [(None, 9, 1), (8, 8, 2), (0.8, 8, 2)] +) +def test_shuffle_split_default_test_size(split_class, train_size, exp_train, exp_test): + # Check that the default value has the expected behavior, i.e. 0.1 if both + # unspecified or complement train_size unless both are specified. + X = np.ones(10) + y = np.ones(10) + + X_train, X_test = next(split_class(train_size=train_size).split(X, y)) + + assert len(X_train) == exp_train + assert len(X_test) == exp_test + + +@pytest.mark.parametrize( + "train_size, exp_train, exp_test", [(None, 8, 2), (7, 7, 3), (0.7, 7, 3)] +) +def test_group_shuffle_split_default_test_size(train_size, exp_train, exp_test): + # Check that the default value has the expected behavior, i.e. 0.2 if both + # unspecified or complement train_size unless both are specified. + X = np.ones(10) + y = np.ones(10) + groups = range(10) + + X_train, X_test = next(GroupShuffleSplit(train_size=train_size).split(X, y, groups)) + + assert len(X_train) == exp_train + assert len(X_test) == exp_test + + +@ignore_warnings +def test_stratified_shuffle_split_init(): + X = np.arange(7) + y = np.asarray([0, 1, 1, 1, 2, 2, 2]) + # Check that error is raised if there is a class with only one sample + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(3, test_size=0.2).split(X, y)) + + # Check that error is raised if the test set size is smaller than n_classes + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(3, test_size=2).split(X, y)) + # Check that error is raised if the train set size is smaller than + # n_classes + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(3, test_size=3, train_size=2).split(X, y)) + + X = np.arange(9) + y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) + + # Train size or test size too small + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(train_size=2).split(X, y)) + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(test_size=2).split(X, y)) + + +def test_stratified_shuffle_split_respects_test_size(): + y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]) + test_size = 5 + train_size = 10 + sss = StratifiedShuffleSplit( + 6, test_size=test_size, train_size=train_size, random_state=0 + ).split(np.ones(len(y)), y) + for train, test in sss: + assert len(train) == train_size + assert len(test) == test_size + + +def test_stratified_shuffle_split_iter(): + ys = [ + np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), + np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), + np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), + np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), + np.array([-1] * 800 + [1] * 50), + np.concatenate([[i] * (100 + i) for i in range(11)]), + [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], + ["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"], + ] + + for y in ys: + sss = StratifiedShuffleSplit(6, test_size=0.33, random_state=0).split( + np.ones(len(y)), y + ) + y = np.asanyarray(y) # To make it indexable for y[train] + # this is how test-size is computed internally + # in _validate_shuffle_split + test_size = np.ceil(0.33 * len(y)) + train_size = len(y) - test_size + for train, test in sss: + assert_array_equal(np.unique(y[train]), np.unique(y[test])) + # Checks if folds keep classes proportions + p_train = np.bincount(np.unique(y[train], return_inverse=True)[1]) / float( + len(y[train]) + ) + p_test = np.bincount(np.unique(y[test], return_inverse=True)[1]) / float( + len(y[test]) + ) + assert_array_almost_equal(p_train, p_test, 1) + assert len(train) + len(test) == y.size + assert len(train) == train_size + assert len(test) == test_size + assert_array_equal(np.intersect1d(train, test), []) + + +def test_stratified_shuffle_split_even(): + # Test the StratifiedShuffleSplit, indices are drawn with a + # equal chance + n_folds = 5 + n_splits = 1000 + + def assert_counts_are_ok(idx_counts, p): + # Here we test that the distribution of the counts + # per index is close enough to a binomial + threshold = 0.05 / n_splits + bf = stats.binom(n_splits, p) + for count in idx_counts: + prob = bf.pmf(count) + assert ( + prob > threshold + ), "An index is not drawn with chance corresponding to even draws" + + for n_samples in (6, 22): + groups = np.array((n_samples // 2) * [0, 1]) + splits = StratifiedShuffleSplit( + n_splits=n_splits, test_size=1.0 / n_folds, random_state=0 + ) + + train_counts = [0] * n_samples + test_counts = [0] * n_samples + n_splits_actual = 0 + for train, test in splits.split(X=np.ones(n_samples), y=groups): + n_splits_actual += 1 + for counter, ids in [(train_counts, train), (test_counts, test)]: + for id in ids: + counter[id] += 1 + assert n_splits_actual == n_splits + + n_train, n_test = _validate_shuffle_split( + n_samples, test_size=1.0 / n_folds, train_size=1.0 - (1.0 / n_folds) + ) + + assert len(train) == n_train + assert len(test) == n_test + assert len(set(train).intersection(test)) == 0 + + group_counts = np.unique(groups) + assert splits.test_size == 1.0 / n_folds + assert n_train + n_test == len(groups) + assert len(group_counts) == 2 + ex_test_p = float(n_test) / n_samples + ex_train_p = float(n_train) / n_samples + + assert_counts_are_ok(train_counts, ex_train_p) + assert_counts_are_ok(test_counts, ex_test_p) + + +def test_stratified_shuffle_split_overlap_train_test_bug(): + # See https://github.com/scikit-learn/scikit-learn/issues/6121 for + # the original bug report + y = [0, 1, 2, 3] * 3 + [4, 5] * 5 + X = np.ones_like(y) + + sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) + + train, test = next(sss.split(X=X, y=y)) + + # no overlap + assert_array_equal(np.intersect1d(train, test), []) + + # complete partition + assert_array_equal(np.union1d(train, test), np.arange(len(y))) + + +def test_stratified_shuffle_split_multilabel(): + # fix for issue 9037 + for y in [ + np.array([[0, 1], [1, 0], [1, 0], [0, 1]]), + np.array([[0, 1], [1, 1], [1, 1], [0, 1]]), + ]: + X = np.ones_like(y) + sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) + train, test = next(sss.split(X=X, y=y)) + y_train = y[train] + y_test = y[test] + + # no overlap + assert_array_equal(np.intersect1d(train, test), []) + + # complete partition + assert_array_equal(np.union1d(train, test), np.arange(len(y))) + + # correct stratification of entire rows + # (by design, here y[:, 0] uniquely determines the entire row of y) + expected_ratio = np.mean(y[:, 0]) + assert expected_ratio == np.mean(y_train[:, 0]) + assert expected_ratio == np.mean(y_test[:, 0]) + + +def test_stratified_shuffle_split_multilabel_many_labels(): + # fix in PR #9922: for multilabel data with > 1000 labels, str(row) + # truncates with an ellipsis for elements in positions 4 through + # len(row) - 4, so labels were not being correctly split using the powerset + # method for transforming a multilabel problem to a multiclass one; this + # test checks that this problem is fixed. + row_with_many_zeros = [1, 0, 1] + [0] * 1000 + [1, 0, 1] + row_with_many_ones = [1, 0, 1] + [1] * 1000 + [1, 0, 1] + y = np.array([row_with_many_zeros] * 10 + [row_with_many_ones] * 100) + X = np.ones_like(y) + + sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) + train, test = next(sss.split(X=X, y=y)) + y_train = y[train] + y_test = y[test] + + # correct stratification of entire rows + # (by design, here y[:, 4] uniquely determines the entire row of y) + expected_ratio = np.mean(y[:, 4]) + assert expected_ratio == np.mean(y_train[:, 4]) + assert expected_ratio == np.mean(y_test[:, 4]) + + +def test_predefinedsplit_with_kfold_split(): + # Check that PredefinedSplit can reproduce a split generated by Kfold. + folds = np.full(10, -1.0) + kf_train = [] + kf_test = [] + for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)): + kf_train.append(train_ind) + kf_test.append(test_ind) + folds[test_ind] = i + ps = PredefinedSplit(folds) + # n_splits is simply the no of unique folds + assert len(np.unique(folds)) == ps.get_n_splits() + ps_train, ps_test = zip(*ps.split()) + assert_array_equal(ps_train, kf_train) + assert_array_equal(ps_test, kf_test) + + +def test_group_shuffle_split(): + for groups_i in test_groups: + X = y = np.ones(len(groups_i)) + n_splits = 6 + test_size = 1.0 / 3 + slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0) + + # Make sure the repr works + repr(slo) + + # Test that the length is correct + assert slo.get_n_splits(X, y, groups=groups_i) == n_splits + + l_unique = np.unique(groups_i) + l = np.asarray(groups_i) + + for train, test in slo.split(X, y, groups=groups_i): + # First test: no train group is in the test set and vice versa + l_train_unique = np.unique(l[train]) + l_test_unique = np.unique(l[test]) + assert not np.any(np.isin(l[train], l_test_unique)) + assert not np.any(np.isin(l[test], l_train_unique)) + + # Second test: train and test add up to all the data + assert l[train].size + l[test].size == l.size + + # Third test: train and test are disjoint + assert_array_equal(np.intersect1d(train, test), []) + + # Fourth test: + # unique train and test groups are correct, +- 1 for rounding error + assert abs(len(l_test_unique) - round(test_size * len(l_unique))) <= 1 + assert ( + abs(len(l_train_unique) - round((1.0 - test_size) * len(l_unique))) <= 1 + ) + + +def test_leave_one_p_group_out(): + logo = LeaveOneGroupOut() + lpgo_1 = LeavePGroupsOut(n_groups=1) + lpgo_2 = LeavePGroupsOut(n_groups=2) + + # Make sure the repr works + assert repr(logo) == "LeaveOneGroupOut()" + assert repr(lpgo_1) == "LeavePGroupsOut(n_groups=1)" + assert repr(lpgo_2) == "LeavePGroupsOut(n_groups=2)" + assert repr(LeavePGroupsOut(n_groups=3)) == "LeavePGroupsOut(n_groups=3)" + + for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1), (lpgo_2, 2))): + for i, groups_i in enumerate(test_groups): + n_groups = len(np.unique(groups_i)) + n_splits = n_groups if p_groups_out == 1 else n_groups * (n_groups - 1) / 2 + X = y = np.ones(len(groups_i)) + + # Test that the length is correct + assert cv.get_n_splits(X, y, groups=groups_i) == n_splits + + groups_arr = np.asarray(groups_i) + + # Split using the original list / array / list of string groups_i + for train, test in cv.split(X, y, groups=groups_i): + # First test: no train group is in the test set and vice versa + assert_array_equal( + np.intersect1d(groups_arr[train], groups_arr[test]).tolist(), [] + ) + + # Second test: train and test add up to all the data + assert len(train) + len(test) == len(groups_i) + + # Third test: + # The number of groups in test must be equal to p_groups_out + assert np.unique(groups_arr[test]).shape[0], p_groups_out + + # check get_n_splits() with dummy parameters + assert logo.get_n_splits(None, None, ["a", "b", "c", "b", "c"]) == 3 + assert logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]) == 3 + assert lpgo_2.get_n_splits(None, None, np.arange(4)) == 6 + assert lpgo_1.get_n_splits(groups=np.arange(4)) == 4 + + # raise ValueError if a `groups` parameter is illegal + with pytest.raises(ValueError): + logo.get_n_splits(None, None, [0.0, np.nan, 0.0]) + with pytest.raises(ValueError): + lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0]) + + msg = "The 'groups' parameter should not be None." + with pytest.raises(ValueError, match=msg): + logo.get_n_splits(None, None, None) + with pytest.raises(ValueError, match=msg): + lpgo_1.get_n_splits(None, None, None) + + +def test_leave_group_out_changing_groups(): + # Check that LeaveOneGroupOut and LeavePGroupsOut work normally if + # the groups variable is changed before calling split + groups = np.array([0, 1, 2, 1, 1, 2, 0, 0]) + X = np.ones(len(groups)) + groups_changing = np.array(groups, copy=True) + lolo = LeaveOneGroupOut().split(X, groups=groups) + lolo_changing = LeaveOneGroupOut().split(X, groups=groups) + lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups) + lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups) + groups_changing[:] = 0 + for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: + for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): + assert_array_equal(train, train_chan) + assert_array_equal(test, test_chan) + + # n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3 + assert 3 == LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X, groups=groups) + # n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups) + assert 3 == LeaveOneGroupOut().get_n_splits(X, y=X, groups=groups) + + +def test_leave_group_out_order_dependence(): + # Check that LeaveOneGroupOut orders the splits according to the index + # of the group left out. + groups = np.array([2, 2, 0, 0, 1, 1]) + X = np.ones(len(groups)) + + splits = iter(LeaveOneGroupOut().split(X, groups=groups)) + + expected_indices = [ + ([0, 1, 4, 5], [2, 3]), + ([0, 1, 2, 3], [4, 5]), + ([2, 3, 4, 5], [0, 1]), + ] + + for expected_train, expected_test in expected_indices: + train, test = next(splits) + assert_array_equal(train, expected_train) + assert_array_equal(test, expected_test) + + +def test_leave_one_p_group_out_error_on_fewer_number_of_groups(): + X = y = groups = np.ones(0) + msg = re.escape("Found array with 0 sample(s)") + with pytest.raises(ValueError, match=msg): + next(LeaveOneGroupOut().split(X, y, groups)) + + X = y = groups = np.ones(1) + msg = re.escape( + f"The groups parameter contains fewer than 2 unique groups ({groups})." + " LeaveOneGroupOut expects at least 2." + ) + with pytest.raises(ValueError, match=msg): + next(LeaveOneGroupOut().split(X, y, groups)) + + X = y = groups = np.ones(1) + msg = re.escape( + "The groups parameter contains fewer than (or equal to) n_groups " + f"(3) numbers of unique groups ({groups}). LeavePGroupsOut expects " + "that at least n_groups + 1 (4) unique groups " + "be present" + ) + with pytest.raises(ValueError, match=msg): + next(LeavePGroupsOut(n_groups=3).split(X, y, groups)) + + X = y = groups = np.arange(3) + msg = re.escape( + "The groups parameter contains fewer than (or equal to) n_groups " + f"(3) numbers of unique groups ({groups}). LeavePGroupsOut expects " + "that at least n_groups + 1 (4) unique groups " + "be present" + ) + with pytest.raises(ValueError, match=msg): + next(LeavePGroupsOut(n_groups=3).split(X, y, groups)) + + +@ignore_warnings +def test_repeated_cv_value_errors(): + # n_repeats is not integer or <= 0 + for cv in (RepeatedKFold, RepeatedStratifiedKFold): + with pytest.raises(ValueError): + cv(n_repeats=0) + with pytest.raises(ValueError): + cv(n_repeats=1.5) + + +@pytest.mark.parametrize("RepeatedCV", [RepeatedKFold, RepeatedStratifiedKFold]) +def test_repeated_cv_repr(RepeatedCV): + n_splits, n_repeats = 2, 6 + repeated_cv = RepeatedCV(n_splits=n_splits, n_repeats=n_repeats) + repeated_cv_repr = "{}(n_repeats=6, n_splits=2, random_state=None)".format( + repeated_cv.__class__.__name__ + ) + assert repeated_cv_repr == repr(repeated_cv) + + +def test_repeated_kfold_determinstic_split(): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + random_state = 258173307 + rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=random_state) + + # split should produce same and deterministic splits on + # each call + for _ in range(3): + splits = rkf.split(X) + train, test = next(splits) + assert_array_equal(train, [2, 4]) + assert_array_equal(test, [0, 1, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 3]) + assert_array_equal(test, [2, 4]) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [2, 3, 4]) + + train, test = next(splits) + assert_array_equal(train, [2, 3, 4]) + assert_array_equal(test, [0, 1]) + + with pytest.raises(StopIteration): + next(splits) + + +def test_get_n_splits_for_repeated_kfold(): + n_splits = 3 + n_repeats = 4 + rkf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats) + expected_n_splits = n_splits * n_repeats + assert expected_n_splits == rkf.get_n_splits() + + +def test_get_n_splits_for_repeated_stratified_kfold(): + n_splits = 3 + n_repeats = 4 + rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats) + expected_n_splits = n_splits * n_repeats + assert expected_n_splits == rskf.get_n_splits() + + +def test_repeated_stratified_kfold_determinstic_split(): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + y = [1, 1, 1, 0, 0] + random_state = 1944695409 + rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2, random_state=random_state) + + # split should produce same and deterministic splits on + # each call + for _ in range(3): + splits = rskf.split(X, y) + train, test = next(splits) + assert_array_equal(train, [1, 4]) + assert_array_equal(test, [0, 2, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 2, 3]) + assert_array_equal(test, [1, 4]) + + train, test = next(splits) + assert_array_equal(train, [2, 3]) + assert_array_equal(test, [0, 1, 4]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 4]) + assert_array_equal(test, [2, 3]) + + with pytest.raises(StopIteration): + next(splits) + + +def test_train_test_split_errors(): + pytest.raises(ValueError, train_test_split) + + pytest.raises(ValueError, train_test_split, range(3), train_size=1.1) + + pytest.raises(ValueError, train_test_split, range(3), test_size=0.6, train_size=0.6) + pytest.raises( + ValueError, + train_test_split, + range(3), + test_size=np.float32(0.6), + train_size=np.float32(0.6), + ) + pytest.raises(ValueError, train_test_split, range(3), test_size="wrong_type") + pytest.raises(ValueError, train_test_split, range(3), test_size=2, train_size=4) + pytest.raises(TypeError, train_test_split, range(3), some_argument=1.1) + pytest.raises(ValueError, train_test_split, range(3), range(42)) + pytest.raises(ValueError, train_test_split, range(10), shuffle=False, stratify=True) + + with pytest.raises( + ValueError, + match=r"train_size=11 should be either positive and " + r"smaller than the number of samples 10 or a " + r"float in the \(0, 1\) range", + ): + train_test_split(range(10), train_size=11, test_size=1) + + +@pytest.mark.parametrize( + "train_size, exp_train, exp_test", [(None, 7, 3), (8, 8, 2), (0.8, 8, 2)] +) +def test_train_test_split_default_test_size(train_size, exp_train, exp_test): + # Check that the default value has the expected behavior, i.e. complement + # train_size unless both are specified. + X_train, X_test = train_test_split(X, train_size=train_size) + + assert len(X_train) == exp_train + assert len(X_test) == exp_test + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "shuffle,stratify", + ( + (True, None), + (True, np.hstack((np.ones(6), np.zeros(4)))), + # stratification only works with shuffling + (False, None), + ), +) +def test_array_api_train_test_split( + shuffle, stratify, array_namespace, device, dtype_name +): + xp = _array_api_for_tests(array_namespace, device) + + X = np.arange(100).reshape((10, 10)) + y = np.arange(10) + + X_np = X.astype(dtype_name) + X_xp = xp.asarray(X_np, device=device) + + y_np = y.astype(dtype_name) + y_xp = xp.asarray(y_np, device=device) + + X_train_np, X_test_np, y_train_np, y_test_np = train_test_split( + X_np, y, random_state=0, shuffle=shuffle, stratify=stratify + ) + with config_context(array_api_dispatch=True): + if stratify is not None: + stratify_xp = xp.asarray(stratify) + else: + stratify_xp = stratify + X_train_xp, X_test_xp, y_train_xp, y_test_xp = train_test_split( + X_xp, y_xp, shuffle=shuffle, stratify=stratify_xp, random_state=0 + ) + + # Check that namespace is preserved, has to happen with + # array_api_dispatch enabled. + assert get_namespace(X_train_xp)[0] == get_namespace(X_xp)[0] + assert get_namespace(X_test_xp)[0] == get_namespace(X_xp)[0] + assert get_namespace(y_train_xp)[0] == get_namespace(y_xp)[0] + assert get_namespace(y_test_xp)[0] == get_namespace(y_xp)[0] + + # Check device and dtype is preserved on output + assert array_api_device(X_train_xp) == array_api_device(X_xp) + assert array_api_device(y_train_xp) == array_api_device(y_xp) + assert array_api_device(X_test_xp) == array_api_device(X_xp) + assert array_api_device(y_test_xp) == array_api_device(y_xp) + + assert X_train_xp.dtype == X_xp.dtype + assert y_train_xp.dtype == y_xp.dtype + assert X_test_xp.dtype == X_xp.dtype + assert y_test_xp.dtype == y_xp.dtype + + assert_allclose( + _convert_to_numpy(X_train_xp, xp=xp), + X_train_np, + ) + assert_allclose( + _convert_to_numpy(X_test_xp, xp=xp), + X_test_np, + ) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_train_test_split(coo_container): + X = np.arange(100).reshape((10, 10)) + X_s = coo_container(X) + y = np.arange(10) + + # simple test + split = train_test_split(X, y, test_size=None, train_size=0.5) + X_train, X_test, y_train, y_test = split + assert len(y_test) == len(y_train) + # test correspondence of X and y + assert_array_equal(X_train[:, 0], y_train * 10) + assert_array_equal(X_test[:, 0], y_test * 10) + + # don't convert lists to anything else by default + split = train_test_split(X, X_s, y.tolist()) + X_train, X_test, X_s_train, X_s_test, y_train, y_test = split + assert isinstance(y_train, list) + assert isinstance(y_test, list) + + # allow nd-arrays + X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) + y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) + split = train_test_split(X_4d, y_3d) + assert split[0].shape == (7, 5, 3, 2) + assert split[1].shape == (3, 5, 3, 2) + assert split[2].shape == (7, 7, 11) + assert split[3].shape == (3, 7, 11) + + # test stratification option + y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) + for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): + train, test = train_test_split( + y, test_size=test_size, stratify=y, random_state=0 + ) + assert len(test) == exp_test_size + assert len(test) + len(train) == len(y) + # check the 1:1 ratio of ones and twos in the data is preserved + assert np.sum(train == 1) == np.sum(train == 2) + + # test unshuffled split + y = np.arange(10) + for test_size in [2, 0.2]: + train, test = train_test_split(y, shuffle=False, test_size=test_size) + assert_array_equal(test, [8, 9]) + assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7]) + + +def test_train_test_split_32bit_overflow(): + """Check for integer overflow on 32-bit platforms. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20774 + """ + + # A number 'n' big enough for expression 'n * n * train_size' to cause + # an overflow for signed 32-bit integer + big_number = 100000 + + # Definition of 'y' is a part of reproduction - population for at least + # one class should be in the same order of magnitude as size of X + X = np.arange(big_number) + y = X > (0.99 * big_number) + + split = train_test_split(X, y, stratify=y, train_size=0.25) + X_train, X_test, y_train, y_test = split + + assert X_train.size + X_test.size == big_number + assert y_train.size + y_test.size == big_number + + +@ignore_warnings +def test_train_test_split_pandas(): + # check train_test_split doesn't destroy pandas dataframe + types = [MockDataFrame] + try: + from pandas import DataFrame + + types.append(DataFrame) + except ImportError: + pass + for InputFeatureType in types: + # X dataframe + X_df = InputFeatureType(X) + X_train, X_test = train_test_split(X_df) + assert isinstance(X_train, InputFeatureType) + assert isinstance(X_test, InputFeatureType) + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_train_test_split_sparse(sparse_container): + # check that train_test_split converts scipy sparse matrices + # to csr, as stated in the documentation + X = np.arange(100).reshape((10, 10)) + X_s = sparse_container(X) + X_train, X_test = train_test_split(X_s) + assert issparse(X_train) and X_train.format == "csr" + assert issparse(X_test) and X_test.format == "csr" + + +def test_train_test_split_mock_pandas(): + # X mock dataframe + X_df = MockDataFrame(X) + X_train, X_test = train_test_split(X_df) + assert isinstance(X_train, MockDataFrame) + assert isinstance(X_test, MockDataFrame) + X_train_arr, X_test_arr = train_test_split(X_df) + + +def test_train_test_split_list_input(): + # Check that when y is a list / list of string labels, it works. + X = np.ones(7) + y1 = ["1"] * 4 + ["0"] * 3 + y2 = np.hstack((np.ones(4), np.zeros(3))) + y3 = y2.tolist() + + for stratify in (True, False): + X_train1, X_test1, y_train1, y_test1 = train_test_split( + X, y1, stratify=y1 if stratify else None, random_state=0 + ) + X_train2, X_test2, y_train2, y_test2 = train_test_split( + X, y2, stratify=y2 if stratify else None, random_state=0 + ) + X_train3, X_test3, y_train3, y_test3 = train_test_split( + X, y3, stratify=y3 if stratify else None, random_state=0 + ) + + np.testing.assert_equal(X_train1, X_train2) + np.testing.assert_equal(y_train2, y_train3) + np.testing.assert_equal(X_test1, X_test3) + np.testing.assert_equal(y_test3, y_test2) + + +@pytest.mark.parametrize( + "test_size, train_size", + [(2.0, None), (1.0, None), (0.1, 0.95), (None, 1j), (11, None), (10, None), (8, 3)], +) +def test_shufflesplit_errors(test_size, train_size): + with pytest.raises(ValueError): + next(ShuffleSplit(test_size=test_size, train_size=train_size).split(X)) + + +def test_shufflesplit_reproducible(): + # Check that iterating twice on the ShuffleSplit gives the same + # sequence of train-test when the random_state is given + ss = ShuffleSplit(random_state=21) + assert_array_equal([a for a, b in ss.split(X)], [a for a, b in ss.split(X)]) + + +def test_stratifiedshufflesplit_list_input(): + # Check that when y is a list / list of string labels, it works. + sss = StratifiedShuffleSplit(test_size=2, random_state=42) + X = np.ones(7) + y1 = ["1"] * 4 + ["0"] * 3 + y2 = np.hstack((np.ones(4), np.zeros(3))) + y3 = y2.tolist() + + np.testing.assert_equal(list(sss.split(X, y1)), list(sss.split(X, y2))) + np.testing.assert_equal(list(sss.split(X, y3)), list(sss.split(X, y2))) + + +def test_train_test_split_allow_nans(): + # Check that train_test_split allows input data with NaNs + X = np.arange(200, dtype=np.float64).reshape(10, -1) + X[2, :] = np.nan + y = np.repeat([0, 1], X.shape[0] / 2) + train_test_split(X, y, test_size=0.2, random_state=42) + + +def test_check_cv(): + X = np.ones(9) + cv = check_cv(3, classifier=False) + # Use numpy.testing.assert_equal which recursively compares + # lists of lists + np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) + + y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) + cv = check_cv(3, y_binary, classifier=True) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y_binary)), list(cv.split(X, y_binary)) + ) + + y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) + cv = check_cv(3, y_multiclass, classifier=True) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y_multiclass)), list(cv.split(X, y_multiclass)) + ) + # also works with 2d multiclass + y_multiclass_2d = y_multiclass.reshape(-1, 1) + cv = check_cv(3, y_multiclass_2d, classifier=True) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y_multiclass_2d)), + list(cv.split(X, y_multiclass_2d)), + ) + + assert not np.all( + next(StratifiedKFold(3).split(X, y_multiclass_2d))[0] + == next(KFold(3).split(X, y_multiclass_2d))[0] + ) + + X = np.ones(5) + y_multilabel = np.array( + [[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 1], [0, 0, 1, 0]] + ) + cv = check_cv(3, y_multilabel, classifier=True) + np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) + + y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) + cv = check_cv(3, y_multioutput, classifier=True) + np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) + + with pytest.raises(ValueError): + check_cv(cv="lolo") + + +def test_cv_iterable_wrapper(): + kf_iter = KFold().split(X, y) + kf_iter_wrapped = check_cv(kf_iter) + # Since the wrapped iterable is enlisted and stored, + # split can be called any number of times to produce + # consistent results. + np.testing.assert_equal( + list(kf_iter_wrapped.split(X, y)), list(kf_iter_wrapped.split(X, y)) + ) + # If the splits are randomized, successive calls to split yields different + # results + kf_randomized_iter = KFold(shuffle=True, random_state=0).split(X, y) + kf_randomized_iter_wrapped = check_cv(kf_randomized_iter) + # numpy's assert_array_equal properly compares nested lists + np.testing.assert_equal( + list(kf_randomized_iter_wrapped.split(X, y)), + list(kf_randomized_iter_wrapped.split(X, y)), + ) + + try: + splits_are_equal = True + np.testing.assert_equal( + list(kf_iter_wrapped.split(X, y)), + list(kf_randomized_iter_wrapped.split(X, y)), + ) + except AssertionError: + splits_are_equal = False + assert not splits_are_equal, ( + "If the splits are randomized, " + "successive calls to split should yield different results" + ) + + +@pytest.mark.parametrize("kfold", [GroupKFold, StratifiedGroupKFold]) +def test_group_kfold(kfold): + rng = np.random.RandomState(0) + + # Parameters of the test + n_groups = 15 + n_samples = 1000 + n_splits = 5 + + X = y = np.ones(n_samples) + + # Construct the test data + tolerance = 0.05 * n_samples # 5 percent error allowed + groups = rng.randint(0, n_groups, n_samples) + + ideal_n_groups_per_fold = n_samples // n_splits + + len(np.unique(groups)) + # Get the test fold indices from the test set indices of each fold + folds = np.zeros(n_samples) + lkf = kfold(n_splits=n_splits) + for i, (_, test) in enumerate(lkf.split(X, y, groups)): + folds[test] = i + + # Check that folds have approximately the same size + assert len(folds) == len(groups) + for i in np.unique(folds): + assert tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold) + + # Check that each group appears only in 1 fold + for group in np.unique(groups): + assert len(np.unique(folds[groups == group])) == 1 + + # Check that no group is on both sides of the split + groups = np.asarray(groups, dtype=object) + for train, test in lkf.split(X, y, groups): + assert len(np.intersect1d(groups[train], groups[test])) == 0 + + # Construct the test data + groups = np.array( + [ + "Albert", + "Jean", + "Bertrand", + "Michel", + "Jean", + "Francis", + "Robert", + "Michel", + "Rachel", + "Lois", + "Michelle", + "Bernard", + "Marion", + "Laura", + "Jean", + "Rachel", + "Franck", + "John", + "Gael", + "Anna", + "Alix", + "Robert", + "Marion", + "David", + "Tony", + "Abel", + "Becky", + "Madmood", + "Cary", + "Mary", + "Alexandre", + "David", + "Francis", + "Barack", + "Abdoul", + "Rasha", + "Xi", + "Silvia", + ] + ) + + n_groups = len(np.unique(groups)) + n_samples = len(groups) + n_splits = 5 + tolerance = 0.05 * n_samples # 5 percent error allowed + ideal_n_groups_per_fold = n_samples // n_splits + + X = y = np.ones(n_samples) + + # Get the test fold indices from the test set indices of each fold + folds = np.zeros(n_samples) + for i, (_, test) in enumerate(lkf.split(X, y, groups)): + folds[test] = i + + # Check that folds have approximately the same size + assert len(folds) == len(groups) + for i in np.unique(folds): + assert tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold) + + # Check that each group appears only in 1 fold + with warnings.catch_warnings(): + warnings.simplefilter("ignore", FutureWarning) + for group in np.unique(groups): + assert len(np.unique(folds[groups == group])) == 1 + + # Check that no group is on both sides of the split + groups = np.asarray(groups, dtype=object) + for train, test in lkf.split(X, y, groups): + assert len(np.intersect1d(groups[train], groups[test])) == 0 + + # groups can also be a list + cv_iter = list(lkf.split(X, y, groups.tolist())) + for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups), cv_iter): + assert_array_equal(train1, train2) + assert_array_equal(test1, test2) + + # Should fail if there are more folds than groups + groups = np.array([1, 1, 1, 2, 2]) + X = y = np.ones(len(groups)) + with pytest.raises(ValueError, match="Cannot have number of splits.*greater"): + next(GroupKFold(n_splits=3).split(X, y, groups)) + + +def test_time_series_cv(): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]] + + # Should fail if there are more folds than samples + with pytest.raises(ValueError, match="Cannot have number of folds.*greater"): + next(TimeSeriesSplit(n_splits=7).split(X)) + + tscv = TimeSeriesSplit(2) + + # Manually check that Time Series CV preserves the data + # ordering on toy datasets + splits = tscv.split(X[:-1]) + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [2, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3]) + assert_array_equal(test, [4, 5]) + + splits = TimeSeriesSplit(2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2]) + assert_array_equal(test, [3, 4]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4]) + assert_array_equal(test, [5, 6]) + + # Check get_n_splits returns the correct number of splits + splits = TimeSeriesSplit(2).split(X) + n_splits_actual = len(list(splits)) + assert n_splits_actual == tscv.get_n_splits() + assert n_splits_actual == 2 + + +def _check_time_series_max_train_size(splits, check_splits, max_train_size): + for (train, test), (check_train, check_test) in zip(splits, check_splits): + assert_array_equal(test, check_test) + assert len(check_train) <= max_train_size + suffix_start = max(len(train) - max_train_size, 0) + assert_array_equal(check_train, train[suffix_start:]) + + +def test_time_series_max_train_size(): + X = np.zeros((6, 1)) + splits = TimeSeriesSplit(n_splits=3).split(X) + check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X) + _check_time_series_max_train_size(splits, check_splits, max_train_size=3) + + # Test for the case where the size of a fold is greater than max_train_size + check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X) + _check_time_series_max_train_size(splits, check_splits, max_train_size=2) + + # Test for the case where the size of each fold is less than max_train_size + check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X) + _check_time_series_max_train_size(splits, check_splits, max_train_size=2) + + +def test_time_series_test_size(): + X = np.zeros((10, 1)) + + # Test alone + splits = TimeSeriesSplit(n_splits=3, test_size=3).split(X) + + train, test = next(splits) + assert_array_equal(train, [0]) + assert_array_equal(test, [1, 2, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3]) + assert_array_equal(test, [4, 5, 6]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6]) + assert_array_equal(test, [7, 8, 9]) + + # Test with max_train_size + splits = TimeSeriesSplit(n_splits=2, test_size=2, max_train_size=4).split(X) + + train, test = next(splits) + assert_array_equal(train, [2, 3, 4, 5]) + assert_array_equal(test, [6, 7]) + + train, test = next(splits) + assert_array_equal(train, [4, 5, 6, 7]) + assert_array_equal(test, [8, 9]) + + # Should fail with not enough data points for configuration + with pytest.raises(ValueError, match="Too many splits.*with test_size"): + splits = TimeSeriesSplit(n_splits=5, test_size=2).split(X) + next(splits) + + +def test_time_series_gap(): + X = np.zeros((10, 1)) + + # Test alone + splits = TimeSeriesSplit(n_splits=2, gap=2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [4, 5, 6]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4]) + assert_array_equal(test, [7, 8, 9]) + + # Test with max_train_size + splits = TimeSeriesSplit(n_splits=3, gap=2, max_train_size=2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [4, 5]) + + train, test = next(splits) + assert_array_equal(train, [2, 3]) + assert_array_equal(test, [6, 7]) + + train, test = next(splits) + assert_array_equal(train, [4, 5]) + assert_array_equal(test, [8, 9]) + + # Test with test_size + splits = TimeSeriesSplit(n_splits=2, gap=2, max_train_size=4, test_size=2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3]) + assert_array_equal(test, [6, 7]) + + train, test = next(splits) + assert_array_equal(train, [2, 3, 4, 5]) + assert_array_equal(test, [8, 9]) + + # Test with additional test_size + splits = TimeSeriesSplit(n_splits=2, gap=2, test_size=3).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [4, 5, 6]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4]) + assert_array_equal(test, [7, 8, 9]) + + # Verify proper error is thrown + with pytest.raises(ValueError, match="Too many splits.*and gap"): + splits = TimeSeriesSplit(n_splits=4, gap=2).split(X) + next(splits) + + +def test_nested_cv(): + # Test if nested cross validation works with different combinations of cv + rng = np.random.RandomState(0) + + X, y = make_classification(n_samples=15, n_classes=2, random_state=0) + groups = rng.randint(0, 5, 15) + + cvs = [ + LeaveOneGroupOut(), + StratifiedKFold(n_splits=2), + LeaveOneOut(), + GroupKFold(n_splits=3), + StratifiedKFold(), + StratifiedGroupKFold(), + StratifiedShuffleSplit(n_splits=3, random_state=0), + ] + + for inner_cv, outer_cv in combinations_with_replacement(cvs, 2): + gs = GridSearchCV( + DummyClassifier(), + param_grid={"strategy": ["stratified", "most_frequent"]}, + cv=inner_cv, + error_score="raise", + ) + cross_val_score( + gs, X=X, y=y, groups=groups, cv=outer_cv, params={"groups": groups} + ) + + +def test_build_repr(): + class MockSplitter: + def __init__(self, a, b=0, c=None): + self.a = a + self.b = b + self.c = c + + def __repr__(self): + return _build_repr(self) + + assert repr(MockSplitter(5, 6)) == "MockSplitter(a=5, b=6, c=None)" + + +@pytest.mark.parametrize( + "CVSplitter", (ShuffleSplit, GroupShuffleSplit, StratifiedShuffleSplit) +) +def test_shuffle_split_empty_trainset(CVSplitter): + cv = CVSplitter(test_size=0.99) + X, y = [[1]], [0] # 1 sample + with pytest.raises( + ValueError, + match=( + "With n_samples=1, test_size=0.99 and train_size=None, " + "the resulting train set will be empty" + ), + ): + next(cv.split(X, y, groups=[1])) + + +def test_train_test_split_empty_trainset(): + (X,) = [[1]] # 1 sample + with pytest.raises( + ValueError, + match=( + "With n_samples=1, test_size=0.99 and train_size=None, " + "the resulting train set will be empty" + ), + ): + train_test_split(X, test_size=0.99) + + X = [[1], [1], [1]] # 3 samples, ask for more than 2 thirds + with pytest.raises( + ValueError, + match=( + "With n_samples=3, test_size=0.67 and train_size=None, " + "the resulting train set will be empty" + ), + ): + train_test_split(X, test_size=0.67) + + +def test_leave_one_out_empty_trainset(): + # LeaveOneGroup out expect at least 2 groups so no need to check + cv = LeaveOneOut() + X, y = [[1]], [0] # 1 sample + with pytest.raises(ValueError, match="Cannot perform LeaveOneOut with n_samples=1"): + next(cv.split(X, y)) + + +def test_leave_p_out_empty_trainset(): + # No need to check LeavePGroupsOut + cv = LeavePOut(p=2) + X, y = [[1], [2]], [0, 3] # 2 samples + with pytest.raises( + ValueError, match="p=2 must be strictly less than the number of samples=2" + ): + next(cv.split(X, y, groups=[1, 2])) + + +@pytest.mark.parametrize("Klass", (KFold, StratifiedKFold, StratifiedGroupKFold)) +def test_random_state_shuffle_false(Klass): + # passing a non-default random_state when shuffle=False makes no sense + with pytest.raises(ValueError, match="has no effect since shuffle is False"): + Klass(3, shuffle=False, random_state=0) + + +@pytest.mark.parametrize( + "cv, expected", + [ + (KFold(), True), + (KFold(shuffle=True, random_state=123), True), + (StratifiedKFold(), True), + (StratifiedKFold(shuffle=True, random_state=123), True), + (StratifiedGroupKFold(shuffle=True, random_state=123), True), + (StratifiedGroupKFold(), True), + (RepeatedKFold(random_state=123), True), + (RepeatedStratifiedKFold(random_state=123), True), + (ShuffleSplit(random_state=123), True), + (GroupShuffleSplit(random_state=123), True), + (StratifiedShuffleSplit(random_state=123), True), + (GroupKFold(), True), + (TimeSeriesSplit(), True), + (LeaveOneOut(), True), + (LeaveOneGroupOut(), True), + (LeavePGroupsOut(n_groups=2), True), + (LeavePOut(p=2), True), + (KFold(shuffle=True, random_state=None), False), + (KFold(shuffle=True, random_state=None), False), + (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False), + (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False), + (RepeatedKFold(random_state=None), False), + (RepeatedKFold(random_state=np.random.RandomState(0)), False), + (RepeatedStratifiedKFold(random_state=None), False), + (RepeatedStratifiedKFold(random_state=np.random.RandomState(0)), False), + (ShuffleSplit(random_state=None), False), + (ShuffleSplit(random_state=np.random.RandomState(0)), False), + (GroupShuffleSplit(random_state=None), False), + (GroupShuffleSplit(random_state=np.random.RandomState(0)), False), + (StratifiedShuffleSplit(random_state=None), False), + (StratifiedShuffleSplit(random_state=np.random.RandomState(0)), False), + ], +) +def test_yields_constant_splits(cv, expected): + assert _yields_constant_splits(cv) == expected + + +@pytest.mark.parametrize("cv", ALL_SPLITTERS, ids=[str(cv) for cv in ALL_SPLITTERS]) +def test_splitter_get_metadata_routing(cv): + """Check get_metadata_routing returns the correct MetadataRouter.""" + assert hasattr(cv, "get_metadata_routing") + metadata = cv.get_metadata_routing() + if cv in GROUP_SPLITTERS: + assert metadata.split.requests["groups"] is True + elif cv in NO_GROUP_SPLITTERS: + assert not metadata.split.requests + + assert_request_is_empty(metadata, exclude=["split"]) + + +@pytest.mark.parametrize("cv", ALL_SPLITTERS, ids=[str(cv) for cv in ALL_SPLITTERS]) +def test_splitter_set_split_request(cv): + """Check set_split_request is defined for group splitters and not for others.""" + if cv in GROUP_SPLITTERS: + assert hasattr(cv, "set_split_request") + elif cv in NO_GROUP_SPLITTERS: + assert not hasattr(cv, "set_split_request") diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py new file mode 100644 index 0000000000000000000000000000000000000000..6c89f89afa68481c761e700bb231a7dafb452c65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py @@ -0,0 +1,848 @@ +from math import ceil + +import numpy as np +import pytest +from scipy.stats import expon, norm, randint + +from sklearn.datasets import make_classification +from sklearn.dummy import DummyClassifier +from sklearn.experimental import enable_halving_search_cv # noqa +from sklearn.model_selection import ( + GroupKFold, + GroupShuffleSplit, + HalvingGridSearchCV, + HalvingRandomSearchCV, + KFold, + LeaveOneGroupOut, + LeavePGroupsOut, + ShuffleSplit, + StratifiedKFold, + StratifiedShuffleSplit, +) +from sklearn.model_selection._search_successive_halving import ( + _SubsampleMetaSplitter, + _top_k, +) +from sklearn.model_selection.tests.test_search import ( + check_cv_results_array_types, + check_cv_results_keys, +) +from sklearn.svm import SVC, LinearSVC + + +class FastClassifier(DummyClassifier): + """Dummy classifier that accepts parameters a, b, ... z. + + These parameter don't affect the predictions and are useful for fast + grid searching.""" + + # update the constraints such that we accept all parameters from a to z + _parameter_constraints: dict = { + **DummyClassifier._parameter_constraints, + **{ + chr(key): "no_validation" # type: ignore + for key in range(ord("a"), ord("z") + 1) + }, + } + + def __init__( + self, strategy="stratified", random_state=None, constant=None, **kwargs + ): + super().__init__( + strategy=strategy, random_state=random_state, constant=constant + ) + + def get_params(self, deep=False): + params = super().get_params(deep=deep) + for char in range(ord("a"), ord("z") + 1): + params[chr(char)] = "whatever" + return params + + +class SometimesFailClassifier(DummyClassifier): + def __init__( + self, + strategy="stratified", + random_state=None, + constant=None, + n_estimators=10, + fail_fit=False, + fail_predict=False, + a=0, + ): + self.fail_fit = fail_fit + self.fail_predict = fail_predict + self.n_estimators = n_estimators + self.a = a + + super().__init__( + strategy=strategy, random_state=random_state, constant=constant + ) + + def fit(self, X, y): + if self.fail_fit: + raise Exception("fitting failed") + return super().fit(X, y) + + def predict(self, X): + if self.fail_predict: + raise Exception("predict failed") + return super().predict(X) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.FitFailedWarning") +@pytest.mark.filterwarnings("ignore:Scoring failed:UserWarning") +@pytest.mark.filterwarnings("ignore:One or more of the:UserWarning") +@pytest.mark.parametrize("HalvingSearch", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize("fail_at", ("fit", "predict")) +def test_nan_handling(HalvingSearch, fail_at): + """Check the selection of the best scores in presence of failure represented by + NaN values.""" + n_samples = 1_000 + X, y = make_classification(n_samples=n_samples, random_state=0) + + search = HalvingSearch( + SometimesFailClassifier(), + {f"fail_{fail_at}": [False, True], "a": range(3)}, + resource="n_estimators", + max_resources=6, + min_resources=1, + factor=2, + ) + + search.fit(X, y) + + # estimators that failed during fit/predict should always rank lower + # than ones where the fit/predict succeeded + assert not search.best_params_[f"fail_{fail_at}"] + scores = search.cv_results_["mean_test_score"] + ranks = search.cv_results_["rank_test_score"] + + # some scores should be NaN + assert np.isnan(scores).any() + + unique_nan_ranks = np.unique(ranks[np.isnan(scores)]) + # all NaN scores should have the same rank + assert unique_nan_ranks.shape[0] == 1 + # NaNs should have the lowest rank + assert (unique_nan_ranks[0] >= ranks).all() + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize( + ( + "aggressive_elimination," + "max_resources," + "expected_n_iterations," + "expected_n_required_iterations," + "expected_n_possible_iterations," + "expected_n_remaining_candidates," + "expected_n_candidates," + "expected_n_resources," + ), + [ + # notice how it loops at the beginning + # also, the number of candidates evaluated at the last iteration is + # <= factor + (True, "limited", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]), + # no aggressive elimination: we end up with less iterations, and + # the number of candidates at the last iter is > factor, which isn't + # ideal + (False, "limited", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]), + # # When the amount of resource isn't limited, aggressive_elimination + # # has no effect. Here the default min_resources='exhaust' will take + # # over. + (True, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]), + (False, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]), + ], +) +def test_aggressive_elimination( + Est, + aggressive_elimination, + max_resources, + expected_n_iterations, + expected_n_required_iterations, + expected_n_possible_iterations, + expected_n_remaining_candidates, + expected_n_candidates, + expected_n_resources, +): + # Test the aggressive_elimination parameter. + + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": ("l1", "l2"), "b": list(range(30))} + base_estimator = FastClassifier() + + if max_resources == "limited": + max_resources = 180 + else: + max_resources = n_samples + + sh = Est( + base_estimator, + param_grid, + aggressive_elimination=aggressive_elimination, + max_resources=max_resources, + factor=3, + ) + sh.set_params(verbose=True) # just for test coverage + + if Est is HalvingRandomSearchCV: + # same number of candidates as with the grid + sh.set_params(n_candidates=2 * 30, min_resources="exhaust") + + sh.fit(X, y) + + assert sh.n_iterations_ == expected_n_iterations + assert sh.n_required_iterations_ == expected_n_required_iterations + assert sh.n_possible_iterations_ == expected_n_possible_iterations + assert sh.n_resources_ == expected_n_resources + assert sh.n_candidates_ == expected_n_candidates + assert sh.n_remaining_candidates_ == expected_n_remaining_candidates + assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_ + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize( + ( + "min_resources," + "max_resources," + "expected_n_iterations," + "expected_n_possible_iterations," + "expected_n_resources," + ), + [ + # with enough resources + ("smallest", "auto", 2, 4, [20, 60]), + # with enough resources but min_resources set manually + (50, "auto", 2, 3, [50, 150]), + # without enough resources, only one iteration can be done + ("smallest", 30, 1, 1, [20]), + # with exhaust: use as much resources as possible at the last iter + ("exhaust", "auto", 2, 2, [333, 999]), + ("exhaust", 1000, 2, 2, [333, 999]), + ("exhaust", 999, 2, 2, [333, 999]), + ("exhaust", 600, 2, 2, [200, 600]), + ("exhaust", 599, 2, 2, [199, 597]), + ("exhaust", 300, 2, 2, [100, 300]), + ("exhaust", 60, 2, 2, [20, 60]), + ("exhaust", 50, 1, 1, [20]), + ("exhaust", 20, 1, 1, [20]), + ], +) +def test_min_max_resources( + Est, + min_resources, + max_resources, + expected_n_iterations, + expected_n_possible_iterations, + expected_n_resources, +): + # Test the min_resources and max_resources parameters, and how they affect + # the number of resources used at each iteration + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": [1, 2], "b": [1, 2, 3]} + base_estimator = FastClassifier() + + sh = Est( + base_estimator, + param_grid, + factor=3, + min_resources=min_resources, + max_resources=max_resources, + ) + if Est is HalvingRandomSearchCV: + sh.set_params(n_candidates=6) # same number as with the grid + + sh.fit(X, y) + + expected_n_required_iterations = 2 # given 6 combinations and factor = 3 + assert sh.n_iterations_ == expected_n_iterations + assert sh.n_required_iterations_ == expected_n_required_iterations + assert sh.n_possible_iterations_ == expected_n_possible_iterations + assert sh.n_resources_ == expected_n_resources + if min_resources == "exhaust": + assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_) + + +@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) +@pytest.mark.parametrize( + "max_resources, n_iterations, n_possible_iterations", + [ + ("auto", 5, 9), # all resources are used + (1024, 5, 9), + (700, 5, 8), + (512, 5, 8), + (511, 5, 7), + (32, 4, 4), + (31, 3, 3), + (16, 3, 3), + (4, 1, 1), # max_resources == min_resources, only one iteration is + # possible + ], +) +def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations): + # test the number of actual iterations that were run depending on + # max_resources + + n_samples = 1024 + X, y = make_classification(n_samples=n_samples, random_state=1) + param_grid = {"a": [1, 2], "b": list(range(10))} + base_estimator = FastClassifier() + factor = 2 + + sh = Est( + base_estimator, + param_grid, + cv=2, + factor=factor, + max_resources=max_resources, + min_resources=4, + ) + if Est is HalvingRandomSearchCV: + sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV + sh.fit(X, y) + assert sh.n_required_iterations_ == 5 + assert sh.n_iterations_ == n_iterations + assert sh.n_possible_iterations_ == n_possible_iterations + + +@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) +def test_resource_parameter(Est): + # Test the resource parameter + + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": [1, 2], "b": list(range(10))} + base_estimator = FastClassifier() + sh = Est(base_estimator, param_grid, cv=2, resource="c", max_resources=10, factor=3) + sh.fit(X, y) + assert set(sh.n_resources_) == set([1, 3, 9]) + for r_i, params, param_c in zip( + sh.cv_results_["n_resources"], + sh.cv_results_["params"], + sh.cv_results_["param_c"], + ): + assert r_i == params["c"] == param_c + + with pytest.raises( + ValueError, match="Cannot use resource=1234 which is not supported " + ): + sh = HalvingGridSearchCV( + base_estimator, param_grid, cv=2, resource="1234", max_resources=10 + ) + sh.fit(X, y) + + with pytest.raises( + ValueError, + match=( + "Cannot use parameter c as the resource since it is part " + "of the searched parameters." + ), + ): + param_grid = {"a": [1, 2], "b": [1, 2], "c": [1, 3]} + sh = HalvingGridSearchCV( + base_estimator, param_grid, cv=2, resource="c", max_resources=10 + ) + sh.fit(X, y) + + +@pytest.mark.parametrize( + "max_resources, n_candidates, expected_n_candidates", + [ + (512, "exhaust", 128), # generate exactly as much as needed + (32, "exhaust", 8), + (32, 8, 8), + (32, 7, 7), # ask for less than what we could + (32, 9, 9), # ask for more than 'reasonable' + ], +) +def test_random_search(max_resources, n_candidates, expected_n_candidates): + # Test random search and make sure the number of generated candidates is + # as expected + + n_samples = 1024 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": norm, "b": norm} + base_estimator = FastClassifier() + sh = HalvingRandomSearchCV( + base_estimator, + param_grid, + n_candidates=n_candidates, + cv=2, + max_resources=max_resources, + factor=2, + min_resources=4, + ) + sh.fit(X, y) + assert sh.n_candidates_[0] == expected_n_candidates + if n_candidates == "exhaust": + # Make sure 'exhaust' makes the last iteration use as much resources as + # we can + assert sh.n_resources_[-1] == max_resources + + +@pytest.mark.parametrize( + "param_distributions, expected_n_candidates", + [ + ({"a": [1, 2]}, 2), # all lists, sample less than n_candidates + ({"a": randint(1, 3)}, 10), # not all list, respect n_candidates + ], +) +def test_random_search_discrete_distributions( + param_distributions, expected_n_candidates +): + # Make sure random search samples the appropriate number of candidates when + # we ask for more than what's possible. How many parameters are sampled + # depends whether the distributions are 'all lists' or not (see + # ParameterSampler for details). This is somewhat redundant with the checks + # in ParameterSampler but interaction bugs were discovered during + # development of SH + + n_samples = 1024 + X, y = make_classification(n_samples=n_samples, random_state=0) + base_estimator = FastClassifier() + sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10) + sh.fit(X, y) + assert sh.n_candidates_[0] == expected_n_candidates + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize( + "params, expected_error_message", + [ + ( + {"resource": "not_a_parameter"}, + "Cannot use resource=not_a_parameter which is not supported", + ), + ( + {"resource": "a", "max_resources": 100}, + "Cannot use parameter a as the resource since it is part of", + ), + ( + {"max_resources": "auto", "resource": "b"}, + "resource can only be 'n_samples' when max_resources='auto'", + ), + ( + {"min_resources": 15, "max_resources": 14}, + "min_resources_=15 is greater than max_resources_=14", + ), + ({"cv": KFold(shuffle=True)}, "must yield consistent folds"), + ({"cv": ShuffleSplit()}, "must yield consistent folds"), + ], +) +def test_input_errors(Est, params, expected_error_message): + base_estimator = FastClassifier() + param_grid = {"a": [1]} + X, y = make_classification(100) + + sh = Est(base_estimator, param_grid, **params) + + with pytest.raises(ValueError, match=expected_error_message): + sh.fit(X, y) + + +@pytest.mark.parametrize( + "params, expected_error_message", + [ + ( + {"n_candidates": "exhaust", "min_resources": "exhaust"}, + "cannot be both set to 'exhaust'", + ), + ], +) +def test_input_errors_randomized(params, expected_error_message): + # tests specific to HalvingRandomSearchCV + + base_estimator = FastClassifier() + param_grid = {"a": [1]} + X, y = make_classification(100) + + sh = HalvingRandomSearchCV(base_estimator, param_grid, **params) + + with pytest.raises(ValueError, match=expected_error_message): + sh.fit(X, y) + + +@pytest.mark.parametrize( + "fraction, subsample_test, expected_train_size, expected_test_size", + [ + (0.5, True, 40, 10), + (0.5, False, 40, 20), + (0.2, True, 16, 4), + (0.2, False, 16, 20), + ], +) +def test_subsample_splitter_shapes( + fraction, subsample_test, expected_train_size, expected_test_size +): + # Make sure splits returned by SubsampleMetaSplitter are of appropriate + # size + + n_samples = 100 + X, y = make_classification(n_samples) + cv = _SubsampleMetaSplitter( + base_cv=KFold(5), + fraction=fraction, + subsample_test=subsample_test, + random_state=None, + ) + + for train, test in cv.split(X, y): + assert train.shape[0] == expected_train_size + assert test.shape[0] == expected_test_size + if subsample_test: + assert train.shape[0] + test.shape[0] == int(n_samples * fraction) + else: + assert test.shape[0] == n_samples // cv.base_cv.get_n_splits() + + +@pytest.mark.parametrize("subsample_test", (True, False)) +def test_subsample_splitter_determinism(subsample_test): + # Make sure _SubsampleMetaSplitter is consistent across calls to split(): + # - we're OK having training sets differ (they're always sampled with a + # different fraction anyway) + # - when we don't subsample the test set, we want it to be always the same. + # This check is the most important. This is ensured by the determinism + # of the base_cv. + + # Note: we could force both train and test splits to be always the same if + # we drew an int seed in _SubsampleMetaSplitter.__init__ + + n_samples = 100 + X, y = make_classification(n_samples) + cv = _SubsampleMetaSplitter( + base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None + ) + + folds_a = list(cv.split(X, y, groups=None)) + folds_b = list(cv.split(X, y, groups=None)) + + for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b): + assert not np.all(train_a == train_b) + + if subsample_test: + assert not np.all(test_a == test_b) + else: + assert np.all(test_a == test_b) + assert np.all(X[test_a] == X[test_b]) + + +@pytest.mark.parametrize( + "k, itr, expected", + [ + (1, 0, ["c"]), + (2, 0, ["a", "c"]), + (4, 0, ["d", "b", "a", "c"]), + (10, 0, ["d", "b", "a", "c"]), + (1, 1, ["e"]), + (2, 1, ["f", "e"]), + (10, 1, ["f", "e"]), + (1, 2, ["i"]), + (10, 2, ["g", "h", "i"]), + ], +) +def test_top_k(k, itr, expected): + results = { # this isn't a 'real world' result dict + "iter": [0, 0, 0, 0, 1, 1, 2, 2, 2], + "mean_test_score": [4, 3, 5, 1, 11, 10, 5, 6, 9], + "params": ["a", "b", "c", "d", "e", "f", "g", "h", "i"], + } + got = _top_k(results, k=k, itr=itr) + assert np.all(got == expected) + + +@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) +def test_cv_results(Est): + # test that the cv_results_ matches correctly the logic of the + # tournament: in particular that the candidates continued in each + # successive iteration are those that were best in the previous iteration + pd = pytest.importorskip("pandas") + + rng = np.random.RandomState(0) + + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": ("l1", "l2"), "b": list(range(30))} + base_estimator = FastClassifier() + + # generate random scores: we want to avoid ties, which would otherwise + # mess with the ordering and make testing harder + def scorer(est, X, y): + return rng.rand() + + sh = Est(base_estimator, param_grid, factor=2, scoring=scorer) + if Est is HalvingRandomSearchCV: + # same number of candidates as with the grid + sh.set_params(n_candidates=2 * 30, min_resources="exhaust") + + sh.fit(X, y) + + # non-regression check for + # https://github.com/scikit-learn/scikit-learn/issues/19203 + assert isinstance(sh.cv_results_["iter"], np.ndarray) + assert isinstance(sh.cv_results_["n_resources"], np.ndarray) + + cv_results_df = pd.DataFrame(sh.cv_results_) + + # just make sure we don't have ties + assert len(cv_results_df["mean_test_score"].unique()) == len(cv_results_df) + + cv_results_df["params_str"] = cv_results_df["params"].apply(str) + table = cv_results_df.pivot( + index="params_str", columns="iter", values="mean_test_score" + ) + + # table looks like something like this: + # iter 0 1 2 3 4 5 + # params_str + # {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN + # {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN + # {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN + # {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN + # {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN + # ... + + # where a NaN indicates that the candidate wasn't evaluated at a given + # iteration, because it wasn't part of the top-K at some previous + # iteration. We here make sure that candidates that aren't in the top-k at + # any given iteration are indeed not evaluated at the subsequent + # iterations. + nan_mask = pd.isna(table) + n_iter = sh.n_iterations_ + for it in range(n_iter - 1): + already_discarded_mask = nan_mask[it] + + # make sure that if a candidate is already discarded, we don't evaluate + # it later + assert ( + already_discarded_mask & nan_mask[it + 1] == already_discarded_mask + ).all() + + # make sure that the number of discarded candidate is correct + discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1] + kept_mask = ~already_discarded_mask & ~discarded_now_mask + assert kept_mask.sum() == sh.n_candidates_[it + 1] + + # make sure that all discarded candidates have a lower score than the + # kept candidates + discarded_max_score = table[it].where(discarded_now_mask).max() + kept_min_score = table[it].where(kept_mask).min() + assert discarded_max_score < kept_min_score + + # We now make sure that the best candidate is chosen only from the last + # iteration. + # We also make sure this is true even if there were higher scores in + # earlier rounds (this isn't generally the case, but worth ensuring it's + # possible). + + last_iter = cv_results_df["iter"].max() + idx_best_last_iter = cv_results_df[cv_results_df["iter"] == last_iter][ + "mean_test_score" + ].idxmax() + idx_best_all_iters = cv_results_df["mean_test_score"].idxmax() + + assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]["params"] + assert ( + cv_results_df.iloc[idx_best_last_iter]["mean_test_score"] + < cv_results_df.iloc[idx_best_all_iters]["mean_test_score"] + ) + assert ( + cv_results_df.iloc[idx_best_last_iter]["params"] + != cv_results_df.iloc[idx_best_all_iters]["params"] + ) + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +def test_base_estimator_inputs(Est): + # make sure that the base estimators are passed the correct parameters and + # number of samples at each iteration. + pd = pytest.importorskip("pandas") + + passed_n_samples_fit = [] + passed_n_samples_predict = [] + passed_params = [] + + class FastClassifierBookKeeping(FastClassifier): + def fit(self, X, y): + passed_n_samples_fit.append(X.shape[0]) + return super().fit(X, y) + + def predict(self, X): + passed_n_samples_predict.append(X.shape[0]) + return super().predict(X) + + def set_params(self, **params): + passed_params.append(params) + return super().set_params(**params) + + n_samples = 1024 + n_splits = 2 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": ("l1", "l2"), "b": list(range(30))} + base_estimator = FastClassifierBookKeeping() + + sh = Est( + base_estimator, + param_grid, + factor=2, + cv=n_splits, + return_train_score=False, + refit=False, + ) + if Est is HalvingRandomSearchCV: + # same number of candidates as with the grid + sh.set_params(n_candidates=2 * 30, min_resources="exhaust") + + sh.fit(X, y) + + assert len(passed_n_samples_fit) == len(passed_n_samples_predict) + passed_n_samples = [ + x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict) + ] + + # Lists are of length n_splits * n_iter * n_candidates_at_i. + # Each chunk of size n_splits corresponds to the n_splits folds for the + # same candidate at the same iteration, so they contain equal values. We + # subsample such that the lists are of length n_iter * n_candidates_at_it + passed_n_samples = passed_n_samples[::n_splits] + passed_params = passed_params[::n_splits] + + cv_results_df = pd.DataFrame(sh.cv_results_) + + assert len(passed_params) == len(passed_n_samples) == len(cv_results_df) + + uniques, counts = np.unique(passed_n_samples, return_counts=True) + assert (sh.n_resources_ == uniques).all() + assert (sh.n_candidates_ == counts).all() + + assert (cv_results_df["params"] == passed_params).all() + assert (cv_results_df["n_resources"] == passed_n_samples).all() + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +def test_groups_support(Est): + # Check if ValueError (when groups is None) propagates to + # HalvingGridSearchCV and HalvingRandomSearchCV + # And also check if groups is correctly passed to the cv object + rng = np.random.RandomState(0) + + X, y = make_classification(n_samples=50, n_classes=2, random_state=0) + groups = rng.randint(0, 3, 50) + + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [1]} + + group_cvs = [ + LeaveOneGroupOut(), + LeavePGroupsOut(2), + GroupKFold(n_splits=3), + GroupShuffleSplit(random_state=0), + ] + error_msg = "The 'groups' parameter should not be None." + for cv in group_cvs: + gs = Est(clf, grid, cv=cv, random_state=0) + with pytest.raises(ValueError, match=error_msg): + gs.fit(X, y) + gs.fit(X, y, groups=groups) + + non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)] + for cv in non_group_cvs: + gs = Est(clf, grid, cv=cv) + # Should not raise an error + gs.fit(X, y) + + +@pytest.mark.parametrize("SearchCV", [HalvingRandomSearchCV, HalvingGridSearchCV]) +def test_min_resources_null(SearchCV): + """Check that we raise an error if the minimum resources is set to 0.""" + base_estimator = FastClassifier() + param_grid = {"a": [1]} + X = np.empty(0).reshape(0, 3) + + search = SearchCV(base_estimator, param_grid, min_resources="smallest") + + err_msg = "min_resources_=0: you might have passed an empty dataset X." + with pytest.raises(ValueError, match=err_msg): + search.fit(X, []) + + +@pytest.mark.parametrize("SearchCV", [HalvingGridSearchCV, HalvingRandomSearchCV]) +def test_select_best_index(SearchCV): + """Check the selection strategy of the halving search.""" + results = { # this isn't a 'real world' result dict + "iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]), + "mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]), + "params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]), + } + + # we expect the index of 'i' + best_index = SearchCV._select_best_index(None, None, results) + assert best_index == 8 + + +def test_halving_random_search_list_of_dicts(): + """Check the behaviour of the `HalvingRandomSearchCV` with `param_distribution` + being a list of dictionary. + """ + X, y = make_classification(n_samples=150, n_features=4, random_state=42) + + params = [ + {"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)}, + {"kernel": ["poly"], "degree": [2, 3]}, + ] + param_keys = ( + "param_C", + "param_degree", + "param_gamma", + "param_kernel", + ) + score_keys = ( + "mean_test_score", + "mean_train_score", + "rank_test_score", + "split0_test_score", + "split1_test_score", + "split2_test_score", + "split0_train_score", + "split1_train_score", + "split2_train_score", + "std_test_score", + "std_train_score", + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ) + extra_keys = ("n_resources", "iter") + + search = HalvingRandomSearchCV( + SVC(), cv=3, param_distributions=params, return_train_score=True, random_state=0 + ) + search.fit(X, y) + n_candidates = sum(search.n_candidates_) + cv_results = search.cv_results_ + # Check results structure + check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates, extra_keys) + check_cv_results_array_types(search, param_keys, score_keys) + + assert all( + ( + cv_results["param_C"].mask[i] + and cv_results["param_gamma"].mask[i] + and not cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "poly" + ) + assert all( + ( + not cv_results["param_C"].mask[i] + and not cv_results["param_gamma"].mask[i] + and cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "rbf" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..e1ecfd14f45a3a75f24a9d0258bcf768a9365704 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py @@ -0,0 +1,2621 @@ +"""Test the validation module""" +import os +import re +import sys +import tempfile +import warnings +from functools import partial +from io import StringIO +from time import sleep + +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn.base import BaseEstimator, clone +from sklearn.cluster import KMeans +from sklearn.datasets import ( + load_diabetes, + load_digits, + load_iris, + make_classification, + make_multilabel_classification, + make_regression, +) +from sklearn.ensemble import RandomForestClassifier +from sklearn.exceptions import FitFailedWarning +from sklearn.impute import SimpleImputer +from sklearn.linear_model import ( + LogisticRegression, + PassiveAggressiveClassifier, + Ridge, + RidgeClassifier, + SGDClassifier, +) +from sklearn.metrics import ( + accuracy_score, + check_scoring, + confusion_matrix, + explained_variance_score, + make_scorer, + mean_squared_error, + precision_recall_fscore_support, + precision_score, + r2_score, +) +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + GroupShuffleSplit, + KFold, + LeaveOneGroupOut, + LeaveOneOut, + LeavePGroupsOut, + ShuffleSplit, + StratifiedKFold, + cross_val_predict, + cross_val_score, + cross_validate, + learning_curve, + permutation_test_score, + validation_curve, +) +from sklearn.model_selection._validation import ( + _check_is_permutation, + _fit_and_score, + _score, +) +from sklearn.model_selection.tests.common import OneTimeSplitter +from sklearn.model_selection.tests.test_search import FailingClassifier +from sklearn.multiclass import OneVsRestClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.neural_network import MLPRegressor +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import LabelEncoder, scale +from sklearn.svm import SVC, LinearSVC +from sklearn.tests.metadata_routing_common import ( + ConsumingClassifier, + ConsumingScorer, + ConsumingSplitter, + _Registry, + check_recorded_metadata, +) +from sklearn.utils import shuffle +from sklearn.utils._mocking import CheckingClassifier, MockDataFrame +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.validation import _num_samples + + +class MockImprovingEstimator(BaseEstimator): + """Dummy classifier to test the learning curve""" + + def __init__(self, n_max_train_sizes): + self.n_max_train_sizes = n_max_train_sizes + self.train_sizes = 0 + self.X_subset = None + + def fit(self, X_subset, y_subset=None): + self.X_subset = X_subset + self.train_sizes = X_subset.shape[0] + return self + + def predict(self, X): + raise NotImplementedError + + def score(self, X=None, Y=None): + # training score becomes worse (2 -> 1), test error better (0 -> 1) + if self._is_training_data(X): + return 2.0 - float(self.train_sizes) / self.n_max_train_sizes + else: + return float(self.train_sizes) / self.n_max_train_sizes + + def _is_training_data(self, X): + return X is self.X_subset + + +class MockIncrementalImprovingEstimator(MockImprovingEstimator): + """Dummy classifier that provides partial_fit""" + + def __init__(self, n_max_train_sizes, expected_fit_params=None): + super().__init__(n_max_train_sizes) + self.x = None + self.expected_fit_params = expected_fit_params + + def _is_training_data(self, X): + return self.x in X + + def partial_fit(self, X, y=None, **params): + self.train_sizes += X.shape[0] + self.x = X[0] + if self.expected_fit_params: + missing = set(self.expected_fit_params) - set(params) + if missing: + raise AssertionError( + f"Expected fit parameter(s) {list(missing)} not seen." + ) + for key, value in params.items(): + if key in self.expected_fit_params and _num_samples( + value + ) != _num_samples(X): + raise AssertionError( + f"Fit parameter {key} has length {_num_samples(value)}" + f"; expected {_num_samples(X)}." + ) + + +class MockEstimatorWithParameter(BaseEstimator): + """Dummy classifier to test the validation curve""" + + def __init__(self, param=0.5): + self.X_subset = None + self.param = param + + def fit(self, X_subset, y_subset): + self.X_subset = X_subset + self.train_sizes = X_subset.shape[0] + return self + + def predict(self, X): + raise NotImplementedError + + def score(self, X=None, y=None): + return self.param if self._is_training_data(X) else 1 - self.param + + def _is_training_data(self, X): + return X is self.X_subset + + +class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter): + """Dummy classifier that disallows repeated calls of fit method""" + + def fit(self, X_subset, y_subset): + assert not hasattr(self, "fit_called_"), "fit is called the second time" + self.fit_called_ = True + return super().fit(X_subset, y_subset) + + def predict(self, X): + raise NotImplementedError + + +class MockClassifier: + """Dummy classifier to test the cross-validation""" + + def __init__(self, a=0, allow_nd=False): + self.a = a + self.allow_nd = allow_nd + + def fit( + self, + X, + Y=None, + sample_weight=None, + class_prior=None, + sparse_sample_weight=None, + sparse_param=None, + dummy_int=None, + dummy_str=None, + dummy_obj=None, + callback=None, + ): + """The dummy arguments are to test that this fit function can + accept non-array arguments through cross-validation, such as: + - int + - str (this is actually array-like) + - object + - function + """ + self.dummy_int = dummy_int + self.dummy_str = dummy_str + self.dummy_obj = dummy_obj + if callback is not None: + callback(self) + + if self.allow_nd: + X = X.reshape(len(X), -1) + if X.ndim >= 3 and not self.allow_nd: + raise ValueError("X cannot be d") + if sample_weight is not None: + assert sample_weight.shape[0] == X.shape[0], ( + "MockClassifier extra fit_param " + "sample_weight.shape[0] is {0}, should be {1}".format( + sample_weight.shape[0], X.shape[0] + ) + ) + if class_prior is not None: + assert class_prior.shape[0] == len(np.unique(y)), ( + "MockClassifier extra fit_param class_prior.shape[0]" + " is {0}, should be {1}".format(class_prior.shape[0], len(np.unique(y))) + ) + if sparse_sample_weight is not None: + fmt = ( + "MockClassifier extra fit_param sparse_sample_weight" + ".shape[0] is {0}, should be {1}" + ) + assert sparse_sample_weight.shape[0] == X.shape[0], fmt.format( + sparse_sample_weight.shape[0], X.shape[0] + ) + if sparse_param is not None: + fmt = ( + "MockClassifier extra fit_param sparse_param.shape " + "is ({0}, {1}), should be ({2}, {3})" + ) + assert sparse_param.shape == P.shape, fmt.format( + sparse_param.shape[0], + sparse_param.shape[1], + P.shape[0], + P.shape[1], + ) + return self + + def predict(self, T): + if self.allow_nd: + T = T.reshape(len(T), -1) + return T[:, 0] + + def predict_proba(self, T): + return T + + def score(self, X=None, Y=None): + return 1.0 / (1 + np.abs(self.a)) + + def get_params(self, deep=False): + return {"a": self.a, "allow_nd": self.allow_nd} + + +# XXX: use 2D array, since 1D X is being detected as a single sample in +# check_consistent_length +X = np.ones((10, 2)) +y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]) +# The number of samples per class needs to be > n_splits, +# for StratifiedKFold(n_splits=3) +y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3]) +P = np.eye(5) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_score(coo_container): + clf = MockClassifier() + X_sparse = coo_container(X) + + for a in range(-10, 10): + clf.a = a + # Smoke test + scores = cross_val_score(clf, X, y2) + assert_array_equal(scores, clf.score(X, y2)) + + # test with multioutput y + multioutput_y = np.column_stack([y2, y2[::-1]]) + scores = cross_val_score(clf, X_sparse, multioutput_y) + assert_array_equal(scores, clf.score(X_sparse, multioutput_y)) + + scores = cross_val_score(clf, X_sparse, y2) + assert_array_equal(scores, clf.score(X_sparse, y2)) + + # test with multioutput y + scores = cross_val_score(clf, X_sparse, multioutput_y) + assert_array_equal(scores, clf.score(X_sparse, multioutput_y)) + + # test with X and y as list + list_check = lambda x: isinstance(x, list) + clf = CheckingClassifier(check_X=list_check) + scores = cross_val_score(clf, X.tolist(), y2.tolist(), cv=3) + + clf = CheckingClassifier(check_y=list_check) + scores = cross_val_score(clf, X, y2.tolist(), cv=3) + + # test with 3d X and + X_3d = X[:, :, np.newaxis] + clf = MockClassifier(allow_nd=True) + scores = cross_val_score(clf, X_3d, y2) + + clf = MockClassifier(allow_nd=False) + with pytest.raises(ValueError): + cross_val_score(clf, X_3d, y2, error_score="raise") + + +def test_cross_validate_many_jobs(): + # regression test for #12154: cv='warn' with n_jobs>1 trigger a copy of + # the parameters leading to a failure in check_cv due to cv is 'warn' + # instead of cv == 'warn'. + X, y = load_iris(return_X_y=True) + clf = SVC(gamma="auto") + grid = GridSearchCV(clf, param_grid={"C": [1, 10]}) + cross_validate(grid, X, y, n_jobs=2) + + +def test_cross_validate_invalid_scoring_param(): + X, y = make_classification(random_state=0) + estimator = MockClassifier() + + # Test the errors + error_message_regexp = ".*must be unique strings.*" + + # List/tuple of callables should raise a message advising users to use + # dict of names to callables mapping + with pytest.raises(ValueError, match=error_message_regexp): + cross_validate( + estimator, + X, + y, + scoring=(make_scorer(precision_score), make_scorer(accuracy_score)), + ) + with pytest.raises(ValueError, match=error_message_regexp): + cross_validate(estimator, X, y, scoring=(make_scorer(precision_score),)) + + # So should empty lists/tuples + with pytest.raises(ValueError, match=error_message_regexp + "Empty list.*"): + cross_validate(estimator, X, y, scoring=()) + + # So should duplicated entries + with pytest.raises(ValueError, match=error_message_regexp + "Duplicate.*"): + cross_validate(estimator, X, y, scoring=("f1_micro", "f1_micro")) + + # Nested Lists should raise a generic error message + with pytest.raises(ValueError, match=error_message_regexp): + cross_validate(estimator, X, y, scoring=[[make_scorer(precision_score)]]) + + # Empty dict should raise invalid scoring error + with pytest.raises(ValueError, match="An empty dict"): + cross_validate(estimator, X, y, scoring=(dict())) + + multiclass_scorer = make_scorer(precision_recall_fscore_support) + + # Multiclass Scorers that return multiple values are not supported yet + # the warning message we're expecting to see + warning_message = ( + "Scoring failed. The score on this train-test " + f"partition for these parameters will be set to {np.nan}. " + "Details: \n" + ) + + with pytest.warns(UserWarning, match=warning_message): + cross_validate(estimator, X, y, scoring=multiclass_scorer) + + with pytest.warns(UserWarning, match=warning_message): + cross_validate(estimator, X, y, scoring={"foo": multiclass_scorer}) + + +def test_cross_validate_nested_estimator(): + # Non-regression test to ensure that nested + # estimators are properly returned in a list + # https://github.com/scikit-learn/scikit-learn/pull/17745 + (X, y) = load_iris(return_X_y=True) + pipeline = Pipeline( + [ + ("imputer", SimpleImputer()), + ("classifier", MockClassifier()), + ] + ) + + results = cross_validate(pipeline, X, y, return_estimator=True) + estimators = results["estimator"] + + assert isinstance(estimators, list) + assert all(isinstance(estimator, Pipeline) for estimator in estimators) + + +@pytest.mark.parametrize("use_sparse", [False, True]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_cross_validate(use_sparse: bool, csr_container): + # Compute train and test mse/r2 scores + cv = KFold() + + # Regression + X_reg, y_reg = make_regression(n_samples=30, random_state=0) + reg = Ridge(random_state=0) + + # Classification + X_clf, y_clf = make_classification(n_samples=30, random_state=0) + clf = SVC(kernel="linear", random_state=0) + + if use_sparse: + X_reg = csr_container(X_reg) + X_clf = csr_container(X_clf) + + for X, y, est in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)): + # It's okay to evaluate regression metrics on classification too + mse_scorer = check_scoring(est, scoring="neg_mean_squared_error") + r2_scorer = check_scoring(est, scoring="r2") + train_mse_scores = [] + test_mse_scores = [] + train_r2_scores = [] + test_r2_scores = [] + fitted_estimators = [] + + for train, test in cv.split(X, y): + est = clone(est).fit(X[train], y[train]) + train_mse_scores.append(mse_scorer(est, X[train], y[train])) + train_r2_scores.append(r2_scorer(est, X[train], y[train])) + test_mse_scores.append(mse_scorer(est, X[test], y[test])) + test_r2_scores.append(r2_scorer(est, X[test], y[test])) + fitted_estimators.append(est) + + train_mse_scores = np.array(train_mse_scores) + test_mse_scores = np.array(test_mse_scores) + train_r2_scores = np.array(train_r2_scores) + test_r2_scores = np.array(test_r2_scores) + fitted_estimators = np.array(fitted_estimators) + + scores = ( + train_mse_scores, + test_mse_scores, + train_r2_scores, + test_r2_scores, + fitted_estimators, + ) + + # To ensure that the test does not suffer from + # large statistical fluctuations due to slicing small datasets, + # we pass the cross-validation instance + check_cross_validate_single_metric(est, X, y, scores, cv) + check_cross_validate_multi_metric(est, X, y, scores, cv) + + +def check_cross_validate_single_metric(clf, X, y, scores, cv): + ( + train_mse_scores, + test_mse_scores, + train_r2_scores, + test_r2_scores, + fitted_estimators, + ) = scores + # Test single metric evaluation when scoring is string or singleton list + for return_train_score, dict_len in ((True, 4), (False, 3)): + # Single metric passed as a string + if return_train_score: + mse_scores_dict = cross_validate( + clf, + X, + y, + scoring="neg_mean_squared_error", + return_train_score=True, + cv=cv, + ) + assert_array_almost_equal(mse_scores_dict["train_score"], train_mse_scores) + else: + mse_scores_dict = cross_validate( + clf, + X, + y, + scoring="neg_mean_squared_error", + return_train_score=False, + cv=cv, + ) + assert isinstance(mse_scores_dict, dict) + assert len(mse_scores_dict) == dict_len + assert_array_almost_equal(mse_scores_dict["test_score"], test_mse_scores) + + # Single metric passed as a list + if return_train_score: + # It must be True by default - deprecated + r2_scores_dict = cross_validate( + clf, X, y, scoring=["r2"], return_train_score=True, cv=cv + ) + assert_array_almost_equal(r2_scores_dict["train_r2"], train_r2_scores, True) + else: + r2_scores_dict = cross_validate( + clf, X, y, scoring=["r2"], return_train_score=False, cv=cv + ) + assert isinstance(r2_scores_dict, dict) + assert len(r2_scores_dict) == dict_len + assert_array_almost_equal(r2_scores_dict["test_r2"], test_r2_scores) + + # Test return_estimator option + mse_scores_dict = cross_validate( + clf, X, y, scoring="neg_mean_squared_error", return_estimator=True, cv=cv + ) + for k, est in enumerate(mse_scores_dict["estimator"]): + est_coef = est.coef_.copy() + if issparse(est_coef): + est_coef = est_coef.toarray() + + fitted_est_coef = fitted_estimators[k].coef_.copy() + if issparse(fitted_est_coef): + fitted_est_coef = fitted_est_coef.toarray() + + assert_almost_equal(est_coef, fitted_est_coef) + assert_almost_equal(est.intercept_, fitted_estimators[k].intercept_) + + +def check_cross_validate_multi_metric(clf, X, y, scores, cv): + # Test multimetric evaluation when scoring is a list / dict + ( + train_mse_scores, + test_mse_scores, + train_r2_scores, + test_r2_scores, + fitted_estimators, + ) = scores + + def custom_scorer(clf, X, y): + y_pred = clf.predict(X) + return { + "r2": r2_score(y, y_pred), + "neg_mean_squared_error": -mean_squared_error(y, y_pred), + } + + all_scoring = ( + ("r2", "neg_mean_squared_error"), + { + "r2": make_scorer(r2_score), + "neg_mean_squared_error": "neg_mean_squared_error", + }, + custom_scorer, + ) + + keys_sans_train = { + "test_r2", + "test_neg_mean_squared_error", + "fit_time", + "score_time", + } + keys_with_train = keys_sans_train.union( + {"train_r2", "train_neg_mean_squared_error"} + ) + + for return_train_score in (True, False): + for scoring in all_scoring: + if return_train_score: + # return_train_score must be True by default - deprecated + cv_results = cross_validate( + clf, X, y, scoring=scoring, return_train_score=True, cv=cv + ) + assert_array_almost_equal(cv_results["train_r2"], train_r2_scores) + assert_array_almost_equal( + cv_results["train_neg_mean_squared_error"], train_mse_scores + ) + else: + cv_results = cross_validate( + clf, X, y, scoring=scoring, return_train_score=False, cv=cv + ) + assert isinstance(cv_results, dict) + assert set(cv_results.keys()) == ( + keys_with_train if return_train_score else keys_sans_train + ) + assert_array_almost_equal(cv_results["test_r2"], test_r2_scores) + assert_array_almost_equal( + cv_results["test_neg_mean_squared_error"], test_mse_scores + ) + + # Make sure all the arrays are of np.ndarray type + assert type(cv_results["test_r2"]) == np.ndarray + assert type(cv_results["test_neg_mean_squared_error"]) == np.ndarray + assert type(cv_results["fit_time"]) == np.ndarray + assert type(cv_results["score_time"]) == np.ndarray + + # Ensure all the times are within sane limits + assert np.all(cv_results["fit_time"] >= 0) + assert np.all(cv_results["fit_time"] < 10) + assert np.all(cv_results["score_time"] >= 0) + assert np.all(cv_results["score_time"] < 10) + + +def test_cross_val_score_predict_groups(): + # Check if ValueError (when groups is None) propagates to cross_val_score + # and cross_val_predict + # And also check if groups is correctly passed to the cv object + X, y = make_classification(n_samples=20, n_classes=2, random_state=0) + + clf = SVC(kernel="linear") + + group_cvs = [ + LeaveOneGroupOut(), + LeavePGroupsOut(2), + GroupKFold(), + GroupShuffleSplit(), + ] + error_message = "The 'groups' parameter should not be None." + for cv in group_cvs: + with pytest.raises(ValueError, match=error_message): + cross_val_score(estimator=clf, X=X, y=y, cv=cv) + with pytest.raises(ValueError, match=error_message): + cross_val_predict(estimator=clf, X=X, y=y, cv=cv) + + +@pytest.mark.filterwarnings("ignore: Using or importing the ABCs from") +def test_cross_val_score_pandas(): + # check cross_val_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + # 3 fold cross val is used so we need at least 3 samples per class + X_df, y_ser = InputFeatureType(X), TargetType(y2) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + cross_val_score(clf, X_df, y_ser, cv=3) + + +def test_cross_val_score_mask(): + # test that cross_val_score works with boolean masks + svm = SVC(kernel="linear") + iris = load_iris() + X, y = iris.data, iris.target + kfold = KFold(5) + scores_indices = cross_val_score(svm, X, y, cv=kfold) + kfold = KFold(5) + cv_masks = [] + for train, test in kfold.split(X, y): + mask_train = np.zeros(len(y), dtype=bool) + mask_test = np.zeros(len(y), dtype=bool) + mask_train[train] = 1 + mask_test[test] = 1 + cv_masks.append((train, test)) + scores_masks = cross_val_score(svm, X, y, cv=cv_masks) + assert_array_equal(scores_indices, scores_masks) + + +def test_cross_val_score_precomputed(): + # test for svm with precomputed kernel + svm = SVC(kernel="precomputed") + iris = load_iris() + X, y = iris.data, iris.target + linear_kernel = np.dot(X, X.T) + score_precomputed = cross_val_score(svm, linear_kernel, y) + svm = SVC(kernel="linear") + score_linear = cross_val_score(svm, X, y) + assert_array_almost_equal(score_precomputed, score_linear) + + # test with callable + svm = SVC(kernel=lambda x, y: np.dot(x, y.T)) + score_callable = cross_val_score(svm, X, y) + assert_array_almost_equal(score_precomputed, score_callable) + + # Error raised for non-square X + svm = SVC(kernel="precomputed") + with pytest.raises(ValueError): + cross_val_score(svm, X, y) + + # test error is raised when the precomputed kernel is not array-like + # or sparse + with pytest.raises(ValueError): + cross_val_score(svm, linear_kernel.tolist(), y) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_score_fit_params(coo_container): + clf = MockClassifier() + n_samples = X.shape[0] + n_classes = len(np.unique(y)) + + W_sparse = coo_container( + (np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1) + ) + P_sparse = coo_container(np.eye(5)) + + DUMMY_INT = 42 + DUMMY_STR = "42" + DUMMY_OBJ = object() + + def assert_fit_params(clf): + # Function to test that the values are passed correctly to the + # classifier arguments for non-array type + + assert clf.dummy_int == DUMMY_INT + assert clf.dummy_str == DUMMY_STR + assert clf.dummy_obj == DUMMY_OBJ + + fit_params = { + "sample_weight": np.ones(n_samples), + "class_prior": np.full(n_classes, 1.0 / n_classes), + "sparse_sample_weight": W_sparse, + "sparse_param": P_sparse, + "dummy_int": DUMMY_INT, + "dummy_str": DUMMY_STR, + "dummy_obj": DUMMY_OBJ, + "callback": assert_fit_params, + } + cross_val_score(clf, X, y, params=fit_params) + + +def test_cross_val_score_score_func(): + clf = MockClassifier() + _score_func_args = [] + + def score_func(y_test, y_predict): + _score_func_args.append((y_test, y_predict)) + return 1.0 + + with warnings.catch_warnings(record=True): + scoring = make_scorer(score_func) + score = cross_val_score(clf, X, y, scoring=scoring, cv=3) + assert_array_equal(score, [1.0, 1.0, 1.0]) + # Test that score function is called only 3 times (for cv=3) + assert len(_score_func_args) == 3 + + +def test_cross_val_score_with_score_func_classification(): + iris = load_iris() + clf = SVC(kernel="linear") + + # Default score (should be the accuracy score) + scores = cross_val_score(clf, iris.data, iris.target) + assert_array_almost_equal(scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2) + + # Correct classification score (aka. zero / one score) - should be the + # same as the default estimator score + zo_scores = cross_val_score(clf, iris.data, iris.target, scoring="accuracy") + assert_array_almost_equal(zo_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2) + + # F1 score (class are balanced so f1_score should be equal to zero/one + # score + f1_scores = cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted") + assert_array_almost_equal(f1_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2) + + +def test_cross_val_score_with_score_func_regression(): + X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) + reg = Ridge() + + # Default score of the Ridge regression estimator + scores = cross_val_score(reg, X, y) + assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) + + # R2 score (aka. determination coefficient) - should be the + # same as the default estimator score + r2_scores = cross_val_score(reg, X, y, scoring="r2") + assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) + + # Mean squared error; this is a loss function, so "scores" are negative + neg_mse_scores = cross_val_score(reg, X, y, scoring="neg_mean_squared_error") + expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) + assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2) + + # Explained variance + scoring = make_scorer(explained_variance_score) + ev_scores = cross_val_score(reg, X, y, scoring=scoring) + assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_permutation_score(coo_container): + iris = load_iris() + X = iris.data + X_sparse = coo_container(X) + y = iris.target + svm = SVC(kernel="linear") + cv = StratifiedKFold(2) + + score, scores, pvalue = permutation_test_score( + svm, X, y, n_permutations=30, cv=cv, scoring="accuracy" + ) + assert score > 0.9 + assert_almost_equal(pvalue, 0.0, 1) + + score_group, _, pvalue_group = permutation_test_score( + svm, + X, + y, + n_permutations=30, + cv=cv, + scoring="accuracy", + groups=np.ones(y.size), + random_state=0, + ) + assert score_group == score + assert pvalue_group == pvalue + + # check that we obtain the same results with a sparse representation + svm_sparse = SVC(kernel="linear") + cv_sparse = StratifiedKFold(2) + score_group, _, pvalue_group = permutation_test_score( + svm_sparse, + X_sparse, + y, + n_permutations=30, + cv=cv_sparse, + scoring="accuracy", + groups=np.ones(y.size), + random_state=0, + ) + + assert score_group == score + assert pvalue_group == pvalue + + # test with custom scoring object + def custom_score(y_true, y_pred): + return ((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0] + + scorer = make_scorer(custom_score) + score, _, pvalue = permutation_test_score( + svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0 + ) + assert_almost_equal(score, 0.93, 2) + assert_almost_equal(pvalue, 0.01, 3) + + # set random y + y = np.mod(np.arange(len(y)), 3) + + score, scores, pvalue = permutation_test_score( + svm, X, y, n_permutations=30, cv=cv, scoring="accuracy" + ) + + assert score < 0.5 + assert pvalue > 0.2 + + +def test_permutation_test_score_allow_nans(): + # Check that permutation_test_score allows input data with NaNs + X = np.arange(200, dtype=np.float64).reshape(10, -1) + X[2, :] = np.nan + y = np.repeat([0, 1], X.shape[0] / 2) + p = Pipeline( + [ + ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)), + ("classifier", MockClassifier()), + ] + ) + permutation_test_score(p, X, y) + + +def test_permutation_test_score_fit_params(): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_sample_weight=True) + + err_msg = r"Expected sample_weight to be passed" + with pytest.raises(AssertionError, match=err_msg): + permutation_test_score(clf, X, y) + + err_msg = r"sample_weight.shape == \(1,\), expected \(8,\)!" + with pytest.raises(ValueError, match=err_msg): + permutation_test_score(clf, X, y, fit_params={"sample_weight": np.ones(1)}) + permutation_test_score(clf, X, y, fit_params={"sample_weight": np.ones(10)}) + + +def test_cross_val_score_allow_nans(): + # Check that cross_val_score allows input data with NaNs + X = np.arange(200, dtype=np.float64).reshape(10, -1) + X[2, :] = np.nan + y = np.repeat([0, 1], X.shape[0] / 2) + p = Pipeline( + [ + ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)), + ("classifier", MockClassifier()), + ] + ) + cross_val_score(p, X, y) + + +def test_cross_val_score_multilabel(): + X = np.array( + [ + [-3, 4], + [2, 4], + [3, 3], + [0, 2], + [-3, 1], + [-2, 1], + [0, 0], + [-2, -1], + [-1, -2], + [1, -2], + ] + ) + y = np.array( + [[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]] + ) + clf = KNeighborsClassifier(n_neighbors=1) + scoring_micro = make_scorer(precision_score, average="micro") + scoring_macro = make_scorer(precision_score, average="macro") + scoring_samples = make_scorer(precision_score, average="samples") + score_micro = cross_val_score(clf, X, y, scoring=scoring_micro) + score_macro = cross_val_score(clf, X, y, scoring=scoring_macro) + score_samples = cross_val_score(clf, X, y, scoring=scoring_samples) + assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) + assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) + assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_predict(coo_container): + X, y = load_diabetes(return_X_y=True) + cv = KFold() + + est = Ridge() + + # Naive loop (should be same as cross_val_predict): + preds2 = np.zeros_like(y) + for train, test in cv.split(X, y): + est.fit(X[train], y[train]) + preds2[test] = est.predict(X[test]) + + preds = cross_val_predict(est, X, y, cv=cv) + assert_array_almost_equal(preds, preds2) + + preds = cross_val_predict(est, X, y) + assert len(preds) == len(y) + + cv = LeaveOneOut() + preds = cross_val_predict(est, X, y, cv=cv) + assert len(preds) == len(y) + + Xsp = X.copy() + Xsp *= Xsp > np.median(Xsp) + Xsp = coo_container(Xsp) + preds = cross_val_predict(est, Xsp, y) + assert_array_almost_equal(len(preds), len(y)) + + preds = cross_val_predict(KMeans(n_init="auto"), X) + assert len(preds) == len(y) + + class BadCV: + def split(self, X, y=None, groups=None): + for i in range(4): + yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) + + with pytest.raises(ValueError): + cross_val_predict(est, X, y, cv=BadCV()) + + X, y = load_iris(return_X_y=True) + + warning_message = ( + r"Number of classes in training fold \(2\) does " + r"not match total number of classes \(3\). " + "Results may not be appropriate for your use case." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + cross_val_predict( + LogisticRegression(solver="liblinear"), + X, + y, + method="predict_proba", + cv=KFold(2), + ) + + +def test_cross_val_predict_decision_function_shape(): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="decision_function" + ) + assert preds.shape == (50,) + + X, y = load_iris(return_X_y=True) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="decision_function" + ) + assert preds.shape == (150, 3) + + # This specifically tests imbalanced splits for binary + # classification with decision_function. This is only + # applicable to classifiers that can be fit on a single + # class. + X = X[:100] + y = y[:100] + error_message = ( + "Only 1 class/es in training fold," + " but 2 in overall dataset. This" + " is not supported for decision_function" + " with imbalanced folds. To fix " + "this, use a cross-validation technique " + "resulting in properly stratified folds" + ) + with pytest.raises(ValueError, match=error_message): + cross_val_predict( + RidgeClassifier(), X, y, method="decision_function", cv=KFold(2) + ) + + X, y = load_digits(return_X_y=True) + est = SVC(kernel="linear", decision_function_shape="ovo") + + preds = cross_val_predict(est, X, y, method="decision_function") + assert preds.shape == (1797, 45) + + ind = np.argsort(y) + X, y = X[ind], y[ind] + error_message_regexp = ( + r"Output shape \(599L?, 21L?\) of " + "decision_function does not match number of " + r"classes \(7\) in fold. Irregular " + "decision_function .*" + ) + with pytest.raises(ValueError, match=error_message_regexp): + cross_val_predict(est, X, y, cv=KFold(n_splits=3), method="decision_function") + + +def test_cross_val_predict_predict_proba_shape(): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_proba" + ) + assert preds.shape == (50, 2) + + X, y = load_iris(return_X_y=True) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_proba" + ) + assert preds.shape == (150, 3) + + +def test_cross_val_predict_predict_log_proba_shape(): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_log_proba" + ) + assert preds.shape == (50, 2) + + X, y = load_iris(return_X_y=True) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_log_proba" + ) + assert preds.shape == (150, 3) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_predict_input_types(coo_container): + iris = load_iris() + X, y = iris.data, iris.target + X_sparse = coo_container(X) + multioutput_y = np.column_stack([y, y[::-1]]) + + clf = Ridge(fit_intercept=False, random_state=0) + # 3 fold cv is used --> at least 3 samples per class + # Smoke test + predictions = cross_val_predict(clf, X, y) + assert predictions.shape == (150,) + + # test with multioutput y + predictions = cross_val_predict(clf, X_sparse, multioutput_y) + assert predictions.shape == (150, 2) + + predictions = cross_val_predict(clf, X_sparse, y) + assert_array_equal(predictions.shape, (150,)) + + # test with multioutput y + predictions = cross_val_predict(clf, X_sparse, multioutput_y) + assert_array_equal(predictions.shape, (150, 2)) + + # test with X and y as list + list_check = lambda x: isinstance(x, list) + clf = CheckingClassifier(check_X=list_check) + predictions = cross_val_predict(clf, X.tolist(), y.tolist()) + + clf = CheckingClassifier(check_y=list_check) + predictions = cross_val_predict(clf, X, y.tolist()) + + # test with X and y as list and non empty method + predictions = cross_val_predict( + LogisticRegression(solver="liblinear"), + X.tolist(), + y.tolist(), + method="decision_function", + ) + predictions = cross_val_predict( + LogisticRegression(solver="liblinear"), + X, + y.tolist(), + method="decision_function", + ) + + # test with 3d X and + X_3d = X[:, :, np.newaxis] + check_3d = lambda x: x.ndim == 3 + clf = CheckingClassifier(check_X=check_3d) + predictions = cross_val_predict(clf, X_3d, y) + assert_array_equal(predictions.shape, (150,)) + + +@pytest.mark.filterwarnings("ignore: Using or importing the ABCs from") +# python3.7 deprecation warnings in pandas via matplotlib :-/ +def test_cross_val_predict_pandas(): + # check cross_val_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + X_df, y_ser = InputFeatureType(X), TargetType(y2) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + cross_val_predict(clf, X_df, y_ser, cv=3) + + +def test_cross_val_predict_unbalanced(): + X, y = make_classification( + n_samples=100, + n_features=2, + n_redundant=0, + n_informative=2, + n_clusters_per_class=1, + random_state=1, + ) + # Change the first sample to a new class + y[0] = 2 + clf = LogisticRegression(random_state=1, solver="liblinear") + cv = StratifiedKFold(n_splits=2) + train, test = list(cv.split(X, y)) + yhat_proba = cross_val_predict(clf, X, y, cv=cv, method="predict_proba") + assert y[test[0]][0] == 2 # sanity check for further assertions + assert np.all(yhat_proba[test[0]][:, 2] == 0) + assert np.all(yhat_proba[test[0]][:, 0:1] > 0) + assert np.all(yhat_proba[test[1]] > 0) + assert_array_almost_equal(yhat_proba.sum(axis=1), np.ones(y.shape), decimal=12) + + +def test_cross_val_predict_y_none(): + # ensure that cross_val_predict works when y is None + mock_classifier = MockClassifier() + rng = np.random.RandomState(42) + X = rng.rand(100, 10) + y_hat = cross_val_predict(mock_classifier, X, y=None, cv=5, method="predict") + assert_allclose(X[:, 0], y_hat) + y_hat_proba = cross_val_predict( + mock_classifier, X, y=None, cv=5, method="predict_proba" + ) + assert_allclose(X, y_hat_proba) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_score_sparse_fit_params(coo_container): + iris = load_iris() + X, y = iris.data, iris.target + clf = MockClassifier() + fit_params = {"sparse_sample_weight": coo_container(np.eye(X.shape[0]))} + a = cross_val_score(clf, X, y, params=fit_params, cv=3) + assert_array_equal(a, np.ones(3)) + + +def test_learning_curve(): + n_samples = 30 + n_splits = 3 + X, y = make_classification( + n_samples=n_samples, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits)) + for shuffle_train in [False, True]: + with warnings.catch_warnings(record=True) as w: + ( + train_sizes, + train_scores, + test_scores, + fit_times, + score_times, + ) = learning_curve( + estimator, + X, + y, + cv=KFold(n_splits=n_splits), + train_sizes=np.linspace(0.1, 1.0, 10), + shuffle=shuffle_train, + return_times=True, + ) + if len(w) > 0: + raise RuntimeError("Unexpected warning: %r" % w[0].message) + assert train_scores.shape == (10, 3) + assert test_scores.shape == (10, 3) + assert fit_times.shape == (10, 3) + assert score_times.shape == (10, 3) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + # Cannot use assert_array_almost_equal for fit and score times because + # the values are hardware-dependant + assert fit_times.dtype == "float64" + assert score_times.dtype == "float64" + + # Test a custom cv splitter that can iterate only once + with warnings.catch_warnings(record=True) as w: + train_sizes2, train_scores2, test_scores2 = learning_curve( + estimator, + X, + y, + cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), + train_sizes=np.linspace(0.1, 1.0, 10), + shuffle=shuffle_train, + ) + if len(w) > 0: + raise RuntimeError("Unexpected warning: %r" % w[0].message) + assert_array_almost_equal(train_scores2, train_scores) + assert_array_almost_equal(test_scores2, test_scores) + + +def test_learning_curve_unsupervised(): + X, _ = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + train_sizes, train_scores, test_scores = learning_curve( + estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10) + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_verbose(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + train_sizes, train_scores, test_scores = learning_curve( + estimator, X, y, cv=3, verbose=1 + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + assert "[learning_curve]" in out + + +def test_learning_curve_incremental_learning_not_possible(): + X, y = make_classification( + n_samples=2, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + # The mockup does not have partial_fit() + estimator = MockImprovingEstimator(1) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, exploit_incremental_learning=True) + + +def test_learning_curve_incremental_learning(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockIncrementalImprovingEstimator(20) + for shuffle_train in [False, True]: + train_sizes, train_scores, test_scores = learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + shuffle=shuffle_train, + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_incremental_learning_unsupervised(): + X, _ = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockIncrementalImprovingEstimator(20) + train_sizes, train_scores, test_scores = learning_curve( + estimator, + X, + y=None, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_batch_and_incremental_learning_are_equal(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + train_sizes = np.linspace(0.2, 1.0, 5) + estimator = PassiveAggressiveClassifier(max_iter=1, tol=None, shuffle=False) + + train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve( + estimator, + X, + y, + train_sizes=train_sizes, + cv=3, + exploit_incremental_learning=True, + ) + train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve( + estimator, + X, + y, + cv=3, + train_sizes=train_sizes, + exploit_incremental_learning=False, + ) + + assert_array_equal(train_sizes_inc, train_sizes_batch) + assert_array_almost_equal( + train_scores_inc.mean(axis=1), train_scores_batch.mean(axis=1) + ) + assert_array_almost_equal( + test_scores_inc.mean(axis=1), test_scores_batch.mean(axis=1) + ) + + +def test_learning_curve_n_sample_range_out_of_bounds(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0, 1]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0.0, 1.0]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0.1, 1.1]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0, 20]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[1, 21]) + + +def test_learning_curve_remove_duplicate_sample_sizes(): + X, y = make_classification( + n_samples=3, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(2) + warning_message = ( + "Removed duplicate entries from 'train_sizes'. Number of ticks " + "will be less than the size of 'train_sizes': 2 instead of 3." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + train_sizes, _, _ = learning_curve( + estimator, X, y, cv=3, train_sizes=np.linspace(0.33, 1.0, 3) + ) + assert_array_equal(train_sizes, [1, 2]) + + +def test_learning_curve_with_boolean_indices(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + cv = KFold(n_splits=3) + train_sizes, train_scores, test_scores = learning_curve( + estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10) + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_with_shuffle(): + # Following test case was designed this way to verify the code + # changes made in pull request: #7506. + X = np.array( + [ + [1, 2], + [3, 4], + [5, 6], + [7, 8], + [11, 12], + [13, 14], + [15, 16], + [17, 18], + [19, 20], + [7, 8], + [9, 10], + [11, 12], + [13, 14], + [15, 16], + [17, 18], + ] + ) + y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4]) + groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4]) + # Splits on these groups fail without shuffle as the first iteration + # of the learning curve doesn't contain label 4 in the training set. + estimator = PassiveAggressiveClassifier(max_iter=5, tol=None, shuffle=False) + + cv = GroupKFold(n_splits=2) + train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve( + estimator, + X, + y, + cv=cv, + n_jobs=1, + train_sizes=np.linspace(0.3, 1.0, 3), + groups=groups, + shuffle=True, + random_state=2, + ) + assert_array_almost_equal( + train_scores_batch.mean(axis=1), np.array([0.75, 0.3, 0.36111111]) + ) + assert_array_almost_equal( + test_scores_batch.mean(axis=1), np.array([0.36111111, 0.25, 0.25]) + ) + with pytest.raises(ValueError): + learning_curve( + estimator, + X, + y, + cv=cv, + n_jobs=1, + train_sizes=np.linspace(0.3, 1.0, 3), + groups=groups, + error_score="raise", + ) + + train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve( + estimator, + X, + y, + cv=cv, + n_jobs=1, + train_sizes=np.linspace(0.3, 1.0, 3), + groups=groups, + shuffle=True, + random_state=2, + exploit_incremental_learning=True, + ) + assert_array_almost_equal( + train_scores_inc.mean(axis=1), train_scores_batch.mean(axis=1) + ) + assert_array_almost_equal( + test_scores_inc.mean(axis=1), test_scores_batch.mean(axis=1) + ) + + +def test_learning_curve_fit_params(): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_sample_weight=True) + + err_msg = r"Expected sample_weight to be passed" + with pytest.raises(AssertionError, match=err_msg): + learning_curve(clf, X, y, error_score="raise") + + err_msg = r"sample_weight.shape == \(1,\), expected \(2,\)!" + with pytest.raises(ValueError, match=err_msg): + learning_curve( + clf, X, y, error_score="raise", fit_params={"sample_weight": np.ones(1)} + ) + learning_curve( + clf, X, y, error_score="raise", fit_params={"sample_weight": np.ones(10)} + ) + + +def test_learning_curve_incremental_learning_fit_params(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockIncrementalImprovingEstimator(20, ["sample_weight"]) + err_msg = r"Expected fit parameter\(s\) \['sample_weight'\] not seen." + with pytest.raises(AssertionError, match=err_msg): + learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + error_score="raise", + ) + + err_msg = "Fit parameter sample_weight has length 3; expected" + with pytest.raises(AssertionError, match=err_msg): + learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + error_score="raise", + fit_params={"sample_weight": np.ones(3)}, + ) + + learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + error_score="raise", + fit_params={"sample_weight": np.ones(2)}, + ) + + +def test_validation_curve(): + X, y = make_classification( + n_samples=2, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + param_range = np.linspace(0, 1, 10) + with warnings.catch_warnings(record=True) as w: + train_scores, test_scores = validation_curve( + MockEstimatorWithParameter(), + X, + y, + param_name="param", + param_range=param_range, + cv=2, + ) + if len(w) > 0: + raise RuntimeError("Unexpected warning: %r" % w[0].message) + + assert_array_almost_equal(train_scores.mean(axis=1), param_range) + assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range) + + +def test_validation_curve_clone_estimator(): + X, y = make_classification( + n_samples=2, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + + param_range = np.linspace(1, 0, 10) + _, _ = validation_curve( + MockEstimatorWithSingleFitCallAllowed(), + X, + y, + param_name="param", + param_range=param_range, + cv=2, + ) + + +def test_validation_curve_cv_splits_consistency(): + n_samples = 100 + n_splits = 5 + X, y = make_classification(n_samples=100, random_state=0) + + scores1 = validation_curve( + SVC(kernel="linear", random_state=0), + X, + y, + param_name="C", + param_range=[0.1, 0.1, 0.2, 0.2], + cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), + ) + # The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the + # `split` is called for each parameter, the following should produce + # identical results for param setting 1 and param setting 2 as both have + # the same C value. + assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :], 2)) + + scores2 = validation_curve( + SVC(kernel="linear", random_state=0), + X, + y, + param_name="C", + param_range=[0.1, 0.1, 0.2, 0.2], + cv=KFold(n_splits=n_splits, shuffle=True), + ) + + # For scores2, compare the 1st and 2nd parameter's scores + # (Since the C value for 1st two param setting is 0.1, they must be + # consistent unless the train test folds differ between the param settings) + assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :], 2)) + + scores3 = validation_curve( + SVC(kernel="linear", random_state=0), + X, + y, + param_name="C", + param_range=[0.1, 0.1, 0.2, 0.2], + cv=KFold(n_splits=n_splits), + ) + + # OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check. + assert_array_almost_equal(np.array(scores3), np.array(scores1)) + + +def test_validation_curve_fit_params(): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_sample_weight=True) + + err_msg = r"Expected sample_weight to be passed" + with pytest.raises(AssertionError, match=err_msg): + validation_curve( + clf, + X, + y, + param_name="foo_param", + param_range=[1, 2, 3], + error_score="raise", + ) + + err_msg = r"sample_weight.shape == \(1,\), expected \(8,\)!" + with pytest.raises(ValueError, match=err_msg): + validation_curve( + clf, + X, + y, + param_name="foo_param", + param_range=[1, 2, 3], + error_score="raise", + fit_params={"sample_weight": np.ones(1)}, + ) + validation_curve( + clf, + X, + y, + param_name="foo_param", + param_range=[1, 2, 3], + error_score="raise", + fit_params={"sample_weight": np.ones(10)}, + ) + + +def test_check_is_permutation(): + rng = np.random.RandomState(0) + p = np.arange(100) + rng.shuffle(p) + assert _check_is_permutation(p, 100) + assert not _check_is_permutation(np.delete(p, 23), 100) + + p[0] = 23 + assert not _check_is_permutation(p, 100) + + # Check if the additional duplicate indices are caught + assert not _check_is_permutation(np.hstack((p, 0)), 100) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_cross_val_predict_sparse_prediction(csr_container): + # check that cross_val_predict gives same result for sparse and dense input + X, y = make_multilabel_classification( + n_classes=2, + n_labels=1, + allow_unlabeled=False, + return_indicator=True, + random_state=1, + ) + X_sparse = csr_container(X) + y_sparse = csr_container(y) + classif = OneVsRestClassifier(SVC(kernel="linear")) + preds = cross_val_predict(classif, X, y, cv=10) + preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10) + preds_sparse = preds_sparse.toarray() + assert_array_almost_equal(preds_sparse, preds) + + +def check_cross_val_predict_binary(est, X, y, method): + """Helper for tests of cross_val_predict with binary classification""" + cv = KFold(n_splits=3, shuffle=False) + + # Generate expected outputs + if y.ndim == 1: + exp_shape = (len(X),) if method == "decision_function" else (len(X), 2) + else: + exp_shape = y.shape + expected_predictions = np.zeros(exp_shape) + for train, test in cv.split(X, y): + est = clone(est).fit(X[train], y[train]) + expected_predictions[test] = getattr(est, method)(X[test]) + + # Check actual outputs for several representations of y + for tg in [y, y + 1, y - 2, y.astype("str")]: + assert_allclose( + cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions + ) + + +def check_cross_val_predict_multiclass(est, X, y, method): + """Helper for tests of cross_val_predict with multiclass classification""" + cv = KFold(n_splits=3, shuffle=False) + + # Generate expected outputs + float_min = np.finfo(np.float64).min + default_values = { + "decision_function": float_min, + "predict_log_proba": float_min, + "predict_proba": 0, + } + expected_predictions = np.full( + (len(X), len(set(y))), default_values[method], dtype=np.float64 + ) + _, y_enc = np.unique(y, return_inverse=True) + for train, test in cv.split(X, y_enc): + est = clone(est).fit(X[train], y_enc[train]) + fold_preds = getattr(est, method)(X[test]) + i_cols_fit = np.unique(y_enc[train]) + expected_predictions[np.ix_(test, i_cols_fit)] = fold_preds + + # Check actual outputs for several representations of y + for tg in [y, y + 1, y - 2, y.astype("str")]: + assert_allclose( + cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions + ) + + +def check_cross_val_predict_multilabel(est, X, y, method): + """Check the output of cross_val_predict for 2D targets using + Estimators which provide a predictions as a list with one + element per class. + """ + cv = KFold(n_splits=3, shuffle=False) + + # Create empty arrays of the correct size to hold outputs + float_min = np.finfo(np.float64).min + default_values = { + "decision_function": float_min, + "predict_log_proba": float_min, + "predict_proba": 0, + } + n_targets = y.shape[1] + expected_preds = [] + for i_col in range(n_targets): + n_classes_in_label = len(set(y[:, i_col])) + if n_classes_in_label == 2 and method == "decision_function": + exp_shape = (len(X),) + else: + exp_shape = (len(X), n_classes_in_label) + expected_preds.append( + np.full(exp_shape, default_values[method], dtype=np.float64) + ) + + # Generate expected outputs + y_enc_cols = [ + np.unique(y[:, i], return_inverse=True)[1][:, np.newaxis] + for i in range(y.shape[1]) + ] + y_enc = np.concatenate(y_enc_cols, axis=1) + for train, test in cv.split(X, y_enc): + est = clone(est).fit(X[train], y_enc[train]) + fold_preds = getattr(est, method)(X[test]) + for i_col in range(n_targets): + fold_cols = np.unique(y_enc[train][:, i_col]) + if expected_preds[i_col].ndim == 1: + # Decision function with <=2 classes + expected_preds[i_col][test] = fold_preds[i_col] + else: + idx = np.ix_(test, fold_cols) + expected_preds[i_col][idx] = fold_preds[i_col] + + # Check actual outputs for several representations of y + for tg in [y, y + 1, y - 2, y.astype("str")]: + cv_predict_output = cross_val_predict(est, X, tg, method=method, cv=cv) + assert len(cv_predict_output) == len(expected_preds) + for i in range(len(cv_predict_output)): + assert_allclose(cv_predict_output[i], expected_preds[i]) + + +def check_cross_val_predict_with_method_binary(est): + # This test includes the decision_function with two classes. + # This is a special case: it has only one column of output. + X, y = make_classification(n_classes=2, random_state=0) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + check_cross_val_predict_binary(est, X, y, method) + + +def check_cross_val_predict_with_method_multiclass(est): + iris = load_iris() + X, y = iris.data, iris.target + X, y = shuffle(X, y, random_state=0) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_cross_val_predict_with_method(): + check_cross_val_predict_with_method_binary(LogisticRegression(solver="liblinear")) + check_cross_val_predict_with_method_multiclass( + LogisticRegression(solver="liblinear") + ) + + +def test_cross_val_predict_method_checking(): + # Regression test for issue #9639. Tests that cross_val_predict does not + # check estimator methods (e.g. predict_proba) before fitting + iris = load_iris() + X, y = iris.data, iris.target + X, y = shuffle(X, y, random_state=0) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + est = SGDClassifier(loss="log_loss", random_state=2) + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_gridsearchcv_cross_val_predict_with_method(): + iris = load_iris() + X, y = iris.data, iris.target + X, y = shuffle(X, y, random_state=0) + est = GridSearchCV( + LogisticRegression(random_state=42, solver="liblinear"), {"C": [0.1, 1]}, cv=2 + ) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_cross_val_predict_with_method_multilabel_ovr(): + # OVR does multilabel predictions, but only arrays of + # binary indicator columns. The output of predict_proba + # is a 2D array with shape (n_samples, n_classes). + n_samp = 100 + n_classes = 4 + X, y = make_multilabel_classification( + n_samples=n_samp, n_labels=3, n_classes=n_classes, n_features=5, random_state=42 + ) + est = OneVsRestClassifier(LogisticRegression(solver="liblinear", random_state=0)) + for method in ["predict_proba", "decision_function"]: + check_cross_val_predict_binary(est, X, y, method=method) + + +class RFWithDecisionFunction(RandomForestClassifier): + # None of the current multioutput-multiclass estimators have + # decision function methods. Create a mock decision function + # to test the cross_val_predict function's handling of this case. + def decision_function(self, X): + probs = self.predict_proba(X) + msg = "This helper should only be used on multioutput-multiclass tasks" + assert isinstance(probs, list), msg + probs = [p[:, -1] if p.shape[1] == 2 else p for p in probs] + return probs + + +def test_cross_val_predict_with_method_multilabel_rf(): + # The RandomForest allows multiple classes in each label. + # Output of predict_proba is a list of outputs of predict_proba + # for each individual label. + n_classes = 4 + X, y = make_multilabel_classification( + n_samples=100, n_labels=3, n_classes=n_classes, n_features=5, random_state=42 + ) + y[:, 0] += y[:, 1] # Put three classes in the first column + for method in ["predict_proba", "predict_log_proba", "decision_function"]: + est = RFWithDecisionFunction(n_estimators=5, random_state=0) + with warnings.catch_warnings(): + # Suppress "RuntimeWarning: divide by zero encountered in log" + warnings.simplefilter("ignore") + check_cross_val_predict_multilabel(est, X, y, method=method) + + +def test_cross_val_predict_with_method_rare_class(): + # Test a multiclass problem where one class will be missing from + # one of the CV training sets. + rng = np.random.RandomState(0) + X = rng.normal(0, 1, size=(14, 10)) + y = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 3]) + est = LogisticRegression(solver="liblinear") + for method in ["predict_proba", "predict_log_proba", "decision_function"]: + with warnings.catch_warnings(): + # Suppress warning about too few examples of a class + warnings.simplefilter("ignore") + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_cross_val_predict_with_method_multilabel_rf_rare_class(): + # The RandomForest allows anything for the contents of the labels. + # Output of predict_proba is a list of outputs of predict_proba + # for each individual label. + # In this test, the first label has a class with a single example. + # We'll have one CV fold where the training data don't include it. + rng = np.random.RandomState(0) + X = rng.normal(0, 1, size=(5, 10)) + y = np.array([[0, 0], [1, 1], [2, 1], [0, 1], [1, 0]]) + for method in ["predict_proba", "predict_log_proba"]: + est = RFWithDecisionFunction(n_estimators=5, random_state=0) + with warnings.catch_warnings(): + # Suppress "RuntimeWarning: divide by zero encountered in log" + warnings.simplefilter("ignore") + check_cross_val_predict_multilabel(est, X, y, method=method) + + +def get_expected_predictions(X, y, cv, classes, est, method): + expected_predictions = np.zeros([len(y), classes]) + func = getattr(est, method) + + for train, test in cv.split(X, y): + est.fit(X[train], y[train]) + expected_predictions_ = func(X[test]) + # To avoid 2 dimensional indexing + if method == "predict_proba": + exp_pred_test = np.zeros((len(test), classes)) + else: + exp_pred_test = np.full( + (len(test), classes), np.finfo(expected_predictions.dtype).min + ) + exp_pred_test[:, est.classes_] = expected_predictions_ + expected_predictions[test] = exp_pred_test + + return expected_predictions + + +def test_cross_val_predict_class_subset(): + X = np.arange(200).reshape(100, 2) + y = np.array([x // 10 for x in range(100)]) + classes = 10 + + kfold3 = KFold(n_splits=3) + kfold4 = KFold(n_splits=4) + + le = LabelEncoder() + + methods = ["decision_function", "predict_proba", "predict_log_proba"] + for method in methods: + est = LogisticRegression(solver="liblinear") + + # Test with n_splits=3 + predictions = cross_val_predict(est, X, y, method=method, cv=kfold3) + + # Runs a naive loop (should be same as cross_val_predict): + expected_predictions = get_expected_predictions( + X, y, kfold3, classes, est, method + ) + assert_array_almost_equal(expected_predictions, predictions) + + # Test with n_splits=4 + predictions = cross_val_predict(est, X, y, method=method, cv=kfold4) + expected_predictions = get_expected_predictions( + X, y, kfold4, classes, est, method + ) + assert_array_almost_equal(expected_predictions, predictions) + + # Testing unordered labels + y = shuffle(np.repeat(range(10), 10), random_state=0) + predictions = cross_val_predict(est, X, y, method=method, cv=kfold3) + y = le.fit_transform(y) + expected_predictions = get_expected_predictions( + X, y, kfold3, classes, est, method + ) + assert_array_almost_equal(expected_predictions, predictions) + + +def test_score_memmap(): + # Ensure a scalar score of memmap type is accepted + iris = load_iris() + X, y = iris.data, iris.target + clf = MockClassifier() + tf = tempfile.NamedTemporaryFile(mode="wb", delete=False) + tf.write(b"Hello world!!!!!") + tf.close() + scores = np.memmap(tf.name, dtype=np.float64) + score = np.memmap(tf.name, shape=(), mode="r", dtype=np.float64) + try: + cross_val_score(clf, X, y, scoring=lambda est, X, y: score) + with pytest.raises(ValueError): + cross_val_score(clf, X, y, scoring=lambda est, X, y: scores) + finally: + # Best effort to release the mmap file handles before deleting the + # backing file under Windows + scores, score = None, None + for _ in range(3): + try: + os.unlink(tf.name) + break + except OSError: + sleep(1.0) + + +@pytest.mark.filterwarnings("ignore: Using or importing the ABCs from") +def test_permutation_test_score_pandas(): + # check permutation_test_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + iris = load_iris() + X, y = iris.data, iris.target + X_df, y_ser = InputFeatureType(X), TargetType(y) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + permutation_test_score(clf, X_df, y_ser) + + +def test_fit_and_score_failing(): + # Create a failing classifier to deliberately fail + failing_clf = FailingClassifier(FailingClassifier.FAILING_PARAMETER) + # dummy X data + X = np.arange(1, 10) + fit_and_score_args = dict( + estimator=failing_clf, + X=X, + y=None, + scorer=dict(), + train=None, + test=None, + verbose=0, + parameters=None, + fit_params=None, + score_params=None, + ) + # passing error score to trigger the warning message + fit_and_score_args["error_score"] = "raise" + # check if exception was raised, with default error_score='raise' + with pytest.raises(ValueError, match="Failing classifier failed as required"): + _fit_and_score(**fit_and_score_args) + + assert failing_clf.score() == 0.0 # FailingClassifier coverage + + +def test_fit_and_score_working(): + X, y = make_classification(n_samples=30, random_state=0) + clf = SVC(kernel="linear", random_state=0) + train, test = next(ShuffleSplit().split(X)) + # Test return_parameters option + fit_and_score_args = dict( + estimator=clf, + X=X, + y=y, + scorer=dict(), + train=train, + test=test, + verbose=0, + parameters={"max_iter": 100, "tol": 0.1}, + fit_params=None, + score_params=None, + return_parameters=True, + ) + result = _fit_and_score(**fit_and_score_args) + assert result["parameters"] == fit_and_score_args["parameters"] + + +class DataDependentFailingClassifier(BaseEstimator): + def __init__(self, max_x_value=None): + self.max_x_value = max_x_value + + def fit(self, X, y=None): + num_values_too_high = (X > self.max_x_value).sum() + if num_values_too_high: + raise ValueError( + f"Classifier fit failed with {num_values_too_high} values too high" + ) + + def score(self, X=None, Y=None): + return 0.0 + + +@pytest.mark.parametrize("error_score", [np.nan, 0]) +def test_cross_validate_some_failing_fits_warning(error_score): + # Create a failing classifier to deliberately fail + failing_clf = DataDependentFailingClassifier(max_x_value=8) + # dummy X data + X = np.arange(1, 10) + y = np.ones(9) + # passing error score to trigger the warning message + cross_validate_args = [failing_clf, X, y] + cross_validate_kwargs = {"cv": 3, "error_score": error_score} + # check if the warning message type is as expected + + individual_fit_error_message = ( + "ValueError: Classifier fit failed with 1 values too high" + ) + warning_message = re.compile( + ( + "2 fits failed.+total of 3.+The score on these" + " train-test partitions for these parameters will be set to" + f" {cross_validate_kwargs['error_score']}.+{individual_fit_error_message}" + ), + flags=re.DOTALL, + ) + + with pytest.warns(FitFailedWarning, match=warning_message): + cross_validate(*cross_validate_args, **cross_validate_kwargs) + + +@pytest.mark.parametrize("error_score", [np.nan, 0]) +def test_cross_validate_all_failing_fits_error(error_score): + # Create a failing classifier to deliberately fail + failing_clf = FailingClassifier(FailingClassifier.FAILING_PARAMETER) + # dummy X data + X = np.arange(1, 10) + y = np.ones(9) + + cross_validate_args = [failing_clf, X, y] + cross_validate_kwargs = {"cv": 7, "error_score": error_score} + + individual_fit_error_message = "ValueError: Failing classifier failed as required" + error_message = re.compile( + ( + "All the 7 fits failed.+your model is misconfigured.+" + f"{individual_fit_error_message}" + ), + flags=re.DOTALL, + ) + + with pytest.raises(ValueError, match=error_message): + cross_validate(*cross_validate_args, **cross_validate_kwargs) + + +def _failing_scorer(estimator, X, y, error_msg): + raise ValueError(error_msg) + + +@pytest.mark.filterwarnings("ignore:lbfgs failed to converge") +@pytest.mark.parametrize("error_score", [np.nan, 0, "raise"]) +def test_cross_val_score_failing_scorer(error_score): + # check that an estimator can fail during scoring in `cross_val_score` and + # that we can optionally replaced it with `error_score` + X, y = load_iris(return_X_y=True) + clf = LogisticRegression(max_iter=5).fit(X, y) + + error_msg = "This scorer is supposed to fail!!!" + failing_scorer = partial(_failing_scorer, error_msg=error_msg) + + if error_score == "raise": + with pytest.raises(ValueError, match=error_msg): + cross_val_score( + clf, X, y, cv=3, scoring=failing_scorer, error_score=error_score + ) + else: + warning_msg = ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}" + ) + with pytest.warns(UserWarning, match=warning_msg): + scores = cross_val_score( + clf, X, y, cv=3, scoring=failing_scorer, error_score=error_score + ) + assert_allclose(scores, error_score) + + +@pytest.mark.filterwarnings("ignore:lbfgs failed to converge") +@pytest.mark.parametrize("error_score", [np.nan, 0, "raise"]) +@pytest.mark.parametrize("return_train_score", [True, False]) +@pytest.mark.parametrize("with_multimetric", [False, True]) +def test_cross_validate_failing_scorer( + error_score, return_train_score, with_multimetric +): + # Check that an estimator can fail during scoring in `cross_validate` and + # that we can optionally replace it with `error_score`. In the multimetric + # case also check the result of a non-failing scorer where the other scorers + # are failing. + X, y = load_iris(return_X_y=True) + clf = LogisticRegression(max_iter=5).fit(X, y) + + error_msg = "This scorer is supposed to fail!!!" + failing_scorer = partial(_failing_scorer, error_msg=error_msg) + if with_multimetric: + non_failing_scorer = make_scorer(mean_squared_error) + scoring = { + "score_1": failing_scorer, + "score_2": non_failing_scorer, + "score_3": failing_scorer, + } + else: + scoring = failing_scorer + + if error_score == "raise": + with pytest.raises(ValueError, match=error_msg): + cross_validate( + clf, + X, + y, + cv=3, + scoring=scoring, + return_train_score=return_train_score, + error_score=error_score, + ) + else: + warning_msg = ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}" + ) + with pytest.warns(UserWarning, match=warning_msg): + results = cross_validate( + clf, + X, + y, + cv=3, + scoring=scoring, + return_train_score=return_train_score, + error_score=error_score, + ) + for key in results: + if "_score" in key: + if "_score_2" in key: + # check the test (and optionally train) score for the + # scorer that should be non-failing + for i in results[key]: + assert isinstance(i, float) + else: + # check the test (and optionally train) score for all + # scorers that should be assigned to `error_score`. + assert_allclose(results[key], error_score) + + +def three_params_scorer(i, j, k): + return 3.4213 + + +@pytest.mark.parametrize( + "train_score, scorer, verbose, split_prg, cdt_prg, expected", + [ + ( + False, + three_params_scorer, + 2, + (1, 3), + (0, 1), + r"\[CV\] END ...................................................." + r" total time= 0.\ds", + ), + ( + True, + {"sc1": three_params_scorer, "sc2": three_params_scorer}, + 3, + (1, 3), + (0, 1), + r"\[CV 2/3\] END sc1: \(train=3.421, test=3.421\) sc2: " + r"\(train=3.421, test=3.421\) total time= 0.\ds", + ), + ( + False, + {"sc1": three_params_scorer, "sc2": three_params_scorer}, + 10, + (1, 3), + (0, 1), + r"\[CV 2/3; 1/1\] END ....... sc1: \(test=3.421\) sc2: \(test=3.421\)" + r" total time= 0.\ds", + ), + ], +) +def test_fit_and_score_verbosity( + capsys, train_score, scorer, verbose, split_prg, cdt_prg, expected +): + X, y = make_classification(n_samples=30, random_state=0) + clf = SVC(kernel="linear", random_state=0) + train, test = next(ShuffleSplit().split(X)) + + # test print without train score + fit_and_score_args = dict( + estimator=clf, + X=X, + y=y, + scorer=scorer, + train=train, + test=test, + verbose=verbose, + parameters=None, + fit_params=None, + score_params=None, + return_train_score=train_score, + split_progress=split_prg, + candidate_progress=cdt_prg, + ) + _fit_and_score(**fit_and_score_args) + out, _ = capsys.readouterr() + outlines = out.split("\n") + if len(outlines) > 2: + assert re.match(expected, outlines[1]) + else: + assert re.match(expected, outlines[0]) + + +def test_score(): + error_message = "scoring must return a number, got None" + + def two_params_scorer(estimator, X_test): + return None + + with pytest.raises(ValueError, match=error_message): + _score( + estimator=None, + X_test=None, + y_test=None, + scorer=two_params_scorer, + score_params=None, + error_score=np.nan, + ) + + +def test_callable_multimetric_confusion_matrix_cross_validate(): + def custom_scorer(clf, X, y): + y_pred = clf.predict(X) + cm = confusion_matrix(y, y_pred) + return {"tn": cm[0, 0], "fp": cm[0, 1], "fn": cm[1, 0], "tp": cm[1, 1]} + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + est.fit(X, y) + cv_results = cross_validate(est, X, y, cv=5, scoring=custom_scorer) + + score_names = ["tn", "fp", "fn", "tp"] + for name in score_names: + assert "test_{}".format(name) in cv_results + + +def test_learning_curve_partial_fit_regressors(): + """Check that regressors with partial_fit is supported. + + Non-regression test for #22981. + """ + X, y = make_regression(random_state=42) + + # Does not error + learning_curve(MLPRegressor(), X, y, exploit_incremental_learning=True, cv=2) + + +def test_learning_curve_some_failing_fits_warning(global_random_seed): + """Checks for fit failures in `learning_curve` and raises the required warning""" + + X, y = make_classification( + n_samples=30, + n_classes=3, + n_informative=6, + shuffle=False, + random_state=global_random_seed, + ) + # sorting the target to trigger SVC error on the 2 first splits because a single + # class is present + sorted_idx = np.argsort(y) + X, y = X[sorted_idx], y[sorted_idx] + + svc = SVC() + warning_message = "10 fits failed out of a total of 25" + + with pytest.warns(FitFailedWarning, match=warning_message): + _, train_score, test_score, *_ = learning_curve( + svc, X, y, cv=5, error_score=np.nan + ) + + # the first 2 splits should lead to warnings and thus np.nan scores + for idx in range(2): + assert np.isnan(train_score[idx]).all() + assert np.isnan(test_score[idx]).all() + + for idx in range(2, train_score.shape[0]): + assert not np.isnan(train_score[idx]).any() + assert not np.isnan(test_score[idx]).any() + + +def test_cross_validate_return_indices(global_random_seed): + """Check the behaviour of `return_indices` in `cross_validate`.""" + X, y = load_iris(return_X_y=True) + X = scale(X) # scale features for better convergence + estimator = LogisticRegression() + + cv = KFold(n_splits=3, shuffle=True, random_state=global_random_seed) + cv_results = cross_validate(estimator, X, y, cv=cv, n_jobs=2, return_indices=False) + assert "indices" not in cv_results + + cv_results = cross_validate(estimator, X, y, cv=cv, n_jobs=2, return_indices=True) + assert "indices" in cv_results + train_indices = cv_results["indices"]["train"] + test_indices = cv_results["indices"]["test"] + assert len(train_indices) == cv.n_splits + assert len(test_indices) == cv.n_splits + + assert_array_equal([indices.size for indices in train_indices], 100) + assert_array_equal([indices.size for indices in test_indices], 50) + + for split_idx, (expected_train_idx, expected_test_idx) in enumerate(cv.split(X, y)): + assert_array_equal(train_indices[split_idx], expected_train_idx) + assert_array_equal(test_indices[split_idx], expected_test_idx) + + +# Tests for metadata routing in cross_val* +# ======================================== + + +# TODO(1.6): remove this test in 1.6 +def test_cross_validate_fit_param_deprecation(): + """Check that we warn about deprecating `fit_params`.""" + with pytest.warns(FutureWarning, match="`fit_params` is deprecated"): + cross_validate(estimator=ConsumingClassifier(), X=X, y=y, cv=2, fit_params={}) + + with pytest.raises( + ValueError, match="`params` and `fit_params` cannot both be provided" + ): + cross_validate( + estimator=ConsumingClassifier(), X=X, y=y, fit_params={}, params={} + ) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "cv_method", [cross_validate, cross_val_score, cross_val_predict] +) +def test_groups_with_routing_validation(cv_method): + """Check that we raise an error if `groups` are passed to the cv method instead + of `params` when metadata routing is enabled. + """ + with pytest.raises(ValueError, match="`groups` can only be passed if"): + cv_method( + estimator=ConsumingClassifier(), + X=X, + y=y, + groups=[], + ) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "cv_method", [cross_validate, cross_val_score, cross_val_predict] +) +def test_passed_unrequested_metadata(cv_method): + """Check that we raise an error when passing metadata that is not + requested.""" + err_msg = re.escape("but are not explicitly set as requested or not requested") + with pytest.raises(ValueError, match=err_msg): + cv_method( + estimator=ConsumingClassifier(), + X=X, + y=y, + params=dict(metadata=[]), + ) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "cv_method", [cross_validate, cross_val_score, cross_val_predict] +) +def test_cross_validate_routing(cv_method): + """Check that the respective cv method is properly dispatching the metadata + to the consumer.""" + scorer_registry = _Registry() + scorer = ConsumingScorer(registry=scorer_registry).set_score_request( + sample_weight="score_weights", metadata="score_metadata" + ) + splitter_registry = _Registry() + splitter = ConsumingSplitter(registry=splitter_registry).set_split_request( + groups="split_groups", metadata="split_metadata" + ) + estimator_registry = _Registry() + estimator = ConsumingClassifier(registry=estimator_registry).set_fit_request( + sample_weight="fit_sample_weight", metadata="fit_metadata" + ) + n_samples = _num_samples(X) + rng = np.random.RandomState(0) + score_weights = rng.rand(n_samples) + score_metadata = rng.rand(n_samples) + split_groups = rng.randint(0, 3, n_samples) + split_metadata = rng.rand(n_samples) + fit_sample_weight = rng.rand(n_samples) + fit_metadata = rng.rand(n_samples) + + extra_params = { + cross_validate: dict(scoring=dict(my_scorer=scorer, accuracy="accuracy")), + # cross_val_score doesn't support multiple scorers + cross_val_score: dict(scoring=scorer), + # cross_val_predict doesn't need a scorer + cross_val_predict: dict(), + } + + params = dict( + split_groups=split_groups, + split_metadata=split_metadata, + fit_sample_weight=fit_sample_weight, + fit_metadata=fit_metadata, + ) + + if cv_method is not cross_val_predict: + params.update( + score_weights=score_weights, + score_metadata=score_metadata, + ) + + cv_method( + estimator, + X=X, + y=y, + cv=splitter, + **extra_params[cv_method], + params=params, + ) + + if cv_method is not cross_val_predict: + # cross_val_predict doesn't need a scorer + assert len(scorer_registry) + for _scorer in scorer_registry: + check_recorded_metadata( + obj=_scorer, + method="score", + split_params=("sample_weight", "metadata"), + sample_weight=score_weights, + metadata=score_metadata, + ) + + assert len(splitter_registry) + for _splitter in splitter_registry: + check_recorded_metadata( + obj=_splitter, + method="split", + groups=split_groups, + metadata=split_metadata, + ) + + assert len(estimator_registry) + for _estimator in estimator_registry: + check_recorded_metadata( + obj=_estimator, + method="fit", + split_params=("sample_weight", "metadata"), + sample_weight=fit_sample_weight, + metadata=fit_metadata, + ) + + +# End of metadata routing tests +# ============================= diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/neural_network/_multilayer_perceptron.py b/llmeval-env/lib/python3.10/site-packages/sklearn/neural_network/_multilayer_perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..5175247204fb8e94d0a9a95c74fc7feb6f0cea03 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/neural_network/_multilayer_perceptron.py @@ -0,0 +1,1645 @@ +"""Multi-layer Perceptron +""" + +# Authors: Issam H. Laradji +# Andreas Mueller +# Jiyuan Qian +# License: BSD 3 clause + +import warnings +from abc import ABCMeta, abstractmethod +from itertools import chain +from numbers import Integral, Real + +import numpy as np +import scipy.optimize + +from ..base import ( + BaseEstimator, + ClassifierMixin, + RegressorMixin, + _fit_context, + is_classifier, +) +from ..exceptions import ConvergenceWarning +from ..metrics import accuracy_score, r2_score +from ..model_selection import train_test_split +from ..preprocessing import LabelBinarizer +from ..utils import ( + _safe_indexing, + check_random_state, + column_or_1d, + gen_batches, + shuffle, +) +from ..utils._param_validation import Interval, Options, StrOptions +from ..utils.extmath import safe_sparse_dot +from ..utils.metaestimators import available_if +from ..utils.multiclass import ( + _check_partial_fit_first_call, + type_of_target, + unique_labels, +) +from ..utils.optimize import _check_optimize_result +from ..utils.validation import check_is_fitted +from ._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS +from ._stochastic_optimizers import AdamOptimizer, SGDOptimizer + +_STOCHASTIC_SOLVERS = ["sgd", "adam"] + + +def _pack(coefs_, intercepts_): + """Pack the parameters into a single vector.""" + return np.hstack([l.ravel() for l in coefs_ + intercepts_]) + + +class BaseMultilayerPerceptron(BaseEstimator, metaclass=ABCMeta): + """Base class for MLP classification and regression. + + Warning: This class should not be used directly. + Use derived classes instead. + + .. versionadded:: 0.18 + """ + + _parameter_constraints: dict = { + "hidden_layer_sizes": [ + "array-like", + Interval(Integral, 1, None, closed="left"), + ], + "activation": [StrOptions({"identity", "logistic", "tanh", "relu"})], + "solver": [StrOptions({"lbfgs", "sgd", "adam"})], + "alpha": [Interval(Real, 0, None, closed="left")], + "batch_size": [ + StrOptions({"auto"}), + Interval(Integral, 1, None, closed="left"), + ], + "learning_rate": [StrOptions({"constant", "invscaling", "adaptive"})], + "learning_rate_init": [Interval(Real, 0, None, closed="neither")], + "power_t": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "tol": [Interval(Real, 0, None, closed="left")], + "verbose": ["verbose"], + "warm_start": ["boolean"], + "momentum": [Interval(Real, 0, 1, closed="both")], + "nesterovs_momentum": ["boolean"], + "early_stopping": ["boolean"], + "validation_fraction": [Interval(Real, 0, 1, closed="left")], + "beta_1": [Interval(Real, 0, 1, closed="left")], + "beta_2": [Interval(Real, 0, 1, closed="left")], + "epsilon": [Interval(Real, 0, None, closed="neither")], + "n_iter_no_change": [ + Interval(Integral, 1, None, closed="left"), + Options(Real, {np.inf}), + ], + "max_fun": [Interval(Integral, 1, None, closed="left")], + } + + @abstractmethod + def __init__( + self, + hidden_layer_sizes, + activation, + solver, + alpha, + batch_size, + learning_rate, + learning_rate_init, + power_t, + max_iter, + loss, + shuffle, + random_state, + tol, + verbose, + warm_start, + momentum, + nesterovs_momentum, + early_stopping, + validation_fraction, + beta_1, + beta_2, + epsilon, + n_iter_no_change, + max_fun, + ): + self.activation = activation + self.solver = solver + self.alpha = alpha + self.batch_size = batch_size + self.learning_rate = learning_rate + self.learning_rate_init = learning_rate_init + self.power_t = power_t + self.max_iter = max_iter + self.loss = loss + self.hidden_layer_sizes = hidden_layer_sizes + self.shuffle = shuffle + self.random_state = random_state + self.tol = tol + self.verbose = verbose + self.warm_start = warm_start + self.momentum = momentum + self.nesterovs_momentum = nesterovs_momentum + self.early_stopping = early_stopping + self.validation_fraction = validation_fraction + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + self.n_iter_no_change = n_iter_no_change + self.max_fun = max_fun + + def _unpack(self, packed_parameters): + """Extract the coefficients and intercepts from packed_parameters.""" + for i in range(self.n_layers_ - 1): + start, end, shape = self._coef_indptr[i] + self.coefs_[i] = np.reshape(packed_parameters[start:end], shape) + + start, end = self._intercept_indptr[i] + self.intercepts_[i] = packed_parameters[start:end] + + def _forward_pass(self, activations): + """Perform a forward pass on the network by computing the values + of the neurons in the hidden layers and the output layer. + + Parameters + ---------- + activations : list, length = n_layers - 1 + The ith element of the list holds the values of the ith layer. + """ + hidden_activation = ACTIVATIONS[self.activation] + # Iterate over the hidden layers + for i in range(self.n_layers_ - 1): + activations[i + 1] = safe_sparse_dot(activations[i], self.coefs_[i]) + activations[i + 1] += self.intercepts_[i] + + # For the hidden layers + if (i + 1) != (self.n_layers_ - 1): + hidden_activation(activations[i + 1]) + + # For the last layer + output_activation = ACTIVATIONS[self.out_activation_] + output_activation(activations[i + 1]) + + return activations + + def _forward_pass_fast(self, X, check_input=True): + """Predict using the trained model + + This is the same as _forward_pass but does not record the activations + of all layers and only returns the last layer's activation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + check_input : bool, default=True + Perform input data validation or not. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs) + The decision function of the samples for each class in the model. + """ + if check_input: + X = self._validate_data(X, accept_sparse=["csr", "csc"], reset=False) + + # Initialize first layer + activation = X + + # Forward propagate + hidden_activation = ACTIVATIONS[self.activation] + for i in range(self.n_layers_ - 1): + activation = safe_sparse_dot(activation, self.coefs_[i]) + activation += self.intercepts_[i] + if i != self.n_layers_ - 2: + hidden_activation(activation) + output_activation = ACTIVATIONS[self.out_activation_] + output_activation(activation) + + return activation + + def _compute_loss_grad( + self, layer, n_samples, activations, deltas, coef_grads, intercept_grads + ): + """Compute the gradient of loss with respect to coefs and intercept for + specified layer. + + This function does backpropagation for the specified one layer. + """ + coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer]) + coef_grads[layer] += self.alpha * self.coefs_[layer] + coef_grads[layer] /= n_samples + + intercept_grads[layer] = np.mean(deltas[layer], 0) + + def _loss_grad_lbfgs( + self, packed_coef_inter, X, y, activations, deltas, coef_grads, intercept_grads + ): + """Compute the MLP loss function and its corresponding derivatives + with respect to the different parameters given in the initialization. + + Returned gradients are packed in a single vector so it can be used + in lbfgs + + Parameters + ---------- + packed_coef_inter : ndarray + A vector comprising the flattened coefficients and intercepts. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : ndarray of shape (n_samples,) + The target values. + + activations : list, length = n_layers - 1 + The ith element of the list holds the values of the ith layer. + + deltas : list, length = n_layers - 1 + The ith element of the list holds the difference between the + activations of the i + 1 layer and the backpropagated error. + More specifically, deltas are gradients of loss with respect to z + in each layer, where z = wx + b is the value of a particular layer + before passing through the activation function + + coef_grads : list, length = n_layers - 1 + The ith element contains the amount of change used to update the + coefficient parameters of the ith layer in an iteration. + + intercept_grads : list, length = n_layers - 1 + The ith element contains the amount of change used to update the + intercept parameters of the ith layer in an iteration. + + Returns + ------- + loss : float + grad : array-like, shape (number of nodes of all layers,) + """ + self._unpack(packed_coef_inter) + loss, coef_grads, intercept_grads = self._backprop( + X, y, activations, deltas, coef_grads, intercept_grads + ) + grad = _pack(coef_grads, intercept_grads) + return loss, grad + + def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads): + """Compute the MLP loss function and its corresponding derivatives + with respect to each parameter: weights and bias vectors. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : ndarray of shape (n_samples,) + The target values. + + activations : list, length = n_layers - 1 + The ith element of the list holds the values of the ith layer. + + deltas : list, length = n_layers - 1 + The ith element of the list holds the difference between the + activations of the i + 1 layer and the backpropagated error. + More specifically, deltas are gradients of loss with respect to z + in each layer, where z = wx + b is the value of a particular layer + before passing through the activation function + + coef_grads : list, length = n_layers - 1 + The ith element contains the amount of change used to update the + coefficient parameters of the ith layer in an iteration. + + intercept_grads : list, length = n_layers - 1 + The ith element contains the amount of change used to update the + intercept parameters of the ith layer in an iteration. + + Returns + ------- + loss : float + coef_grads : list, length = n_layers - 1 + intercept_grads : list, length = n_layers - 1 + """ + n_samples = X.shape[0] + + # Forward propagate + activations = self._forward_pass(activations) + + # Get loss + loss_func_name = self.loss + if loss_func_name == "log_loss" and self.out_activation_ == "logistic": + loss_func_name = "binary_log_loss" + loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1]) + # Add L2 regularization term to loss + values = 0 + for s in self.coefs_: + s = s.ravel() + values += np.dot(s, s) + loss += (0.5 * self.alpha) * values / n_samples + + # Backward propagate + last = self.n_layers_ - 2 + + # The calculation of delta[last] here works with following + # combinations of output activation and loss function: + # sigmoid and binary cross entropy, softmax and categorical cross + # entropy, and identity with squared loss + deltas[last] = activations[-1] - y + + # Compute gradient for the last layer + self._compute_loss_grad( + last, n_samples, activations, deltas, coef_grads, intercept_grads + ) + + inplace_derivative = DERIVATIVES[self.activation] + # Iterate over the hidden layers + for i in range(self.n_layers_ - 2, 0, -1): + deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T) + inplace_derivative(activations[i], deltas[i - 1]) + + self._compute_loss_grad( + i - 1, n_samples, activations, deltas, coef_grads, intercept_grads + ) + + return loss, coef_grads, intercept_grads + + def _initialize(self, y, layer_units, dtype): + # set all attributes, allocate weights etc. for first call + # Initialize parameters + self.n_iter_ = 0 + self.t_ = 0 + self.n_outputs_ = y.shape[1] + + # Compute the number of layers + self.n_layers_ = len(layer_units) + + # Output for regression + if not is_classifier(self): + self.out_activation_ = "identity" + # Output for multi class + elif self._label_binarizer.y_type_ == "multiclass": + self.out_activation_ = "softmax" + # Output for binary class and multi-label + else: + self.out_activation_ = "logistic" + + # Initialize coefficient and intercept layers + self.coefs_ = [] + self.intercepts_ = [] + + for i in range(self.n_layers_ - 1): + coef_init, intercept_init = self._init_coef( + layer_units[i], layer_units[i + 1], dtype + ) + self.coefs_.append(coef_init) + self.intercepts_.append(intercept_init) + + if self.solver in _STOCHASTIC_SOLVERS: + self.loss_curve_ = [] + self._no_improvement_count = 0 + if self.early_stopping: + self.validation_scores_ = [] + self.best_validation_score_ = -np.inf + self.best_loss_ = None + else: + self.best_loss_ = np.inf + self.validation_scores_ = None + self.best_validation_score_ = None + + def _init_coef(self, fan_in, fan_out, dtype): + # Use the initialization method recommended by + # Glorot et al. + factor = 6.0 + if self.activation == "logistic": + factor = 2.0 + init_bound = np.sqrt(factor / (fan_in + fan_out)) + + # Generate weights and bias: + coef_init = self._random_state.uniform( + -init_bound, init_bound, (fan_in, fan_out) + ) + intercept_init = self._random_state.uniform(-init_bound, init_bound, fan_out) + coef_init = coef_init.astype(dtype, copy=False) + intercept_init = intercept_init.astype(dtype, copy=False) + return coef_init, intercept_init + + def _fit(self, X, y, incremental=False): + # Make sure self.hidden_layer_sizes is a list + hidden_layer_sizes = self.hidden_layer_sizes + if not hasattr(hidden_layer_sizes, "__iter__"): + hidden_layer_sizes = [hidden_layer_sizes] + hidden_layer_sizes = list(hidden_layer_sizes) + + if np.any(np.array(hidden_layer_sizes) <= 0): + raise ValueError( + "hidden_layer_sizes must be > 0, got %s." % hidden_layer_sizes + ) + first_pass = not hasattr(self, "coefs_") or ( + not self.warm_start and not incremental + ) + + X, y = self._validate_input(X, y, incremental, reset=first_pass) + + n_samples, n_features = X.shape + + # Ensure y is 2D + if y.ndim == 1: + y = y.reshape((-1, 1)) + + self.n_outputs_ = y.shape[1] + + layer_units = [n_features] + hidden_layer_sizes + [self.n_outputs_] + + # check random state + self._random_state = check_random_state(self.random_state) + + if first_pass: + # First time training the model + self._initialize(y, layer_units, X.dtype) + + # Initialize lists + activations = [X] + [None] * (len(layer_units) - 1) + deltas = [None] * (len(activations) - 1) + + coef_grads = [ + np.empty((n_fan_in_, n_fan_out_), dtype=X.dtype) + for n_fan_in_, n_fan_out_ in zip(layer_units[:-1], layer_units[1:]) + ] + + intercept_grads = [ + np.empty(n_fan_out_, dtype=X.dtype) for n_fan_out_ in layer_units[1:] + ] + + # Run the Stochastic optimization solver + if self.solver in _STOCHASTIC_SOLVERS: + self._fit_stochastic( + X, + y, + activations, + deltas, + coef_grads, + intercept_grads, + layer_units, + incremental, + ) + + # Run the LBFGS solver + elif self.solver == "lbfgs": + self._fit_lbfgs( + X, y, activations, deltas, coef_grads, intercept_grads, layer_units + ) + + # validate parameter weights + weights = chain(self.coefs_, self.intercepts_) + if not all(np.isfinite(w).all() for w in weights): + raise ValueError( + "Solver produced non-finite parameter weights. The input data may" + " contain large values and need to be preprocessed." + ) + + return self + + def _fit_lbfgs( + self, X, y, activations, deltas, coef_grads, intercept_grads, layer_units + ): + # Store meta information for the parameters + self._coef_indptr = [] + self._intercept_indptr = [] + start = 0 + + # Save sizes and indices of coefficients for faster unpacking + for i in range(self.n_layers_ - 1): + n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1] + + end = start + (n_fan_in * n_fan_out) + self._coef_indptr.append((start, end, (n_fan_in, n_fan_out))) + start = end + + # Save sizes and indices of intercepts for faster unpacking + for i in range(self.n_layers_ - 1): + end = start + layer_units[i + 1] + self._intercept_indptr.append((start, end)) + start = end + + # Run LBFGS + packed_coef_inter = _pack(self.coefs_, self.intercepts_) + + if self.verbose is True or self.verbose >= 1: + iprint = 1 + else: + iprint = -1 + + opt_res = scipy.optimize.minimize( + self._loss_grad_lbfgs, + packed_coef_inter, + method="L-BFGS-B", + jac=True, + options={ + "maxfun": self.max_fun, + "maxiter": self.max_iter, + "iprint": iprint, + "gtol": self.tol, + }, + args=(X, y, activations, deltas, coef_grads, intercept_grads), + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter) + self.loss_ = opt_res.fun + self._unpack(opt_res.x) + + def _fit_stochastic( + self, + X, + y, + activations, + deltas, + coef_grads, + intercept_grads, + layer_units, + incremental, + ): + params = self.coefs_ + self.intercepts_ + if not incremental or not hasattr(self, "_optimizer"): + if self.solver == "sgd": + self._optimizer = SGDOptimizer( + params, + self.learning_rate_init, + self.learning_rate, + self.momentum, + self.nesterovs_momentum, + self.power_t, + ) + elif self.solver == "adam": + self._optimizer = AdamOptimizer( + params, + self.learning_rate_init, + self.beta_1, + self.beta_2, + self.epsilon, + ) + + # early_stopping in partial_fit doesn't make sense + if self.early_stopping and incremental: + raise ValueError("partial_fit does not support early_stopping=True") + early_stopping = self.early_stopping + if early_stopping: + # don't stratify in multilabel classification + should_stratify = is_classifier(self) and self.n_outputs_ == 1 + stratify = y if should_stratify else None + X, X_val, y, y_val = train_test_split( + X, + y, + random_state=self._random_state, + test_size=self.validation_fraction, + stratify=stratify, + ) + if is_classifier(self): + y_val = self._label_binarizer.inverse_transform(y_val) + else: + X_val = None + y_val = None + + n_samples = X.shape[0] + sample_idx = np.arange(n_samples, dtype=int) + + if self.batch_size == "auto": + batch_size = min(200, n_samples) + else: + if self.batch_size > n_samples: + warnings.warn( + "Got `batch_size` less than 1 or larger than " + "sample size. It is going to be clipped" + ) + batch_size = np.clip(self.batch_size, 1, n_samples) + + try: + self.n_iter_ = 0 + for it in range(self.max_iter): + if self.shuffle: + # Only shuffle the sample indices instead of X and y to + # reduce the memory footprint. These indices will be used + # to slice the X and y. + sample_idx = shuffle(sample_idx, random_state=self._random_state) + + accumulated_loss = 0.0 + for batch_slice in gen_batches(n_samples, batch_size): + if self.shuffle: + X_batch = _safe_indexing(X, sample_idx[batch_slice]) + y_batch = y[sample_idx[batch_slice]] + else: + X_batch = X[batch_slice] + y_batch = y[batch_slice] + + activations[0] = X_batch + batch_loss, coef_grads, intercept_grads = self._backprop( + X_batch, + y_batch, + activations, + deltas, + coef_grads, + intercept_grads, + ) + accumulated_loss += batch_loss * ( + batch_slice.stop - batch_slice.start + ) + + # update weights + grads = coef_grads + intercept_grads + self._optimizer.update_params(params, grads) + + self.n_iter_ += 1 + self.loss_ = accumulated_loss / X.shape[0] + + self.t_ += n_samples + self.loss_curve_.append(self.loss_) + if self.verbose: + print("Iteration %d, loss = %.8f" % (self.n_iter_, self.loss_)) + + # update no_improvement_count based on training loss or + # validation score according to early_stopping + self._update_no_improvement_count(early_stopping, X_val, y_val) + + # for learning rate that needs to be updated at iteration end + self._optimizer.iteration_ends(self.t_) + + if self._no_improvement_count > self.n_iter_no_change: + # not better than last `n_iter_no_change` iterations by tol + # stop or decrease learning rate + if early_stopping: + msg = ( + "Validation score did not improve more than " + "tol=%f for %d consecutive epochs." + % (self.tol, self.n_iter_no_change) + ) + else: + msg = ( + "Training loss did not improve more than tol=%f" + " for %d consecutive epochs." + % (self.tol, self.n_iter_no_change) + ) + + is_stopping = self._optimizer.trigger_stopping(msg, self.verbose) + if is_stopping: + break + else: + self._no_improvement_count = 0 + + if incremental: + break + + if self.n_iter_ == self.max_iter: + warnings.warn( + "Stochastic Optimizer: Maximum iterations (%d) " + "reached and the optimization hasn't converged yet." + % self.max_iter, + ConvergenceWarning, + ) + except KeyboardInterrupt: + warnings.warn("Training interrupted by user.") + + if early_stopping: + # restore best weights + self.coefs_ = self._best_coefs + self.intercepts_ = self._best_intercepts + + def _update_no_improvement_count(self, early_stopping, X_val, y_val): + if early_stopping: + # compute validation score, use that for stopping + self.validation_scores_.append(self._score(X_val, y_val)) + + if self.verbose: + print("Validation score: %f" % self.validation_scores_[-1]) + # update best parameters + # use validation_scores_, not loss_curve_ + # let's hope no-one overloads .score with mse + last_valid_score = self.validation_scores_[-1] + + if last_valid_score < (self.best_validation_score_ + self.tol): + self._no_improvement_count += 1 + else: + self._no_improvement_count = 0 + + if last_valid_score > self.best_validation_score_: + self.best_validation_score_ = last_valid_score + self._best_coefs = [c.copy() for c in self.coefs_] + self._best_intercepts = [i.copy() for i in self.intercepts_] + else: + if self.loss_curve_[-1] > self.best_loss_ - self.tol: + self._no_improvement_count += 1 + else: + self._no_improvement_count = 0 + if self.loss_curve_[-1] < self.best_loss_: + self.best_loss_ = self.loss_curve_[-1] + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model to data matrix X and target(s) y. + + Parameters + ---------- + X : ndarray or sparse matrix of shape (n_samples, n_features) + The input data. + + y : ndarray of shape (n_samples,) or (n_samples, n_outputs) + The target values (class labels in classification, real numbers in + regression). + + Returns + ------- + self : object + Returns a trained MLP model. + """ + return self._fit(X, y, incremental=False) + + def _check_solver(self): + if self.solver not in _STOCHASTIC_SOLVERS: + raise AttributeError( + "partial_fit is only available for stochastic" + " optimizers. %s is not stochastic." + % self.solver + ) + return True + + +class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron): + """Multi-layer Perceptron classifier. + + This model optimizes the log-loss function using LBFGS or stochastic + gradient descent. + + .. versionadded:: 0.18 + + Parameters + ---------- + hidden_layer_sizes : array-like of shape(n_layers - 2,), default=(100,) + The ith element represents the number of neurons in the ith + hidden layer. + + activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu' + Activation function for the hidden layer. + + - 'identity', no-op activation, useful to implement linear bottleneck, + returns f(x) = x + + - 'logistic', the logistic sigmoid function, + returns f(x) = 1 / (1 + exp(-x)). + + - 'tanh', the hyperbolic tan function, + returns f(x) = tanh(x). + + - 'relu', the rectified linear unit function, + returns f(x) = max(0, x) + + solver : {'lbfgs', 'sgd', 'adam'}, default='adam' + The solver for weight optimization. + + - 'lbfgs' is an optimizer in the family of quasi-Newton methods. + + - 'sgd' refers to stochastic gradient descent. + + - 'adam' refers to a stochastic gradient-based optimizer proposed + by Kingma, Diederik, and Jimmy Ba + + Note: The default solver 'adam' works pretty well on relatively + large datasets (with thousands of training samples or more) in terms of + both training time and validation score. + For small datasets, however, 'lbfgs' can converge faster and perform + better. + + alpha : float, default=0.0001 + Strength of the L2 regularization term. The L2 regularization term + is divided by the sample size when added to the loss. + + batch_size : int, default='auto' + Size of minibatches for stochastic optimizers. + If the solver is 'lbfgs', the classifier will not use minibatch. + When set to "auto", `batch_size=min(200, n_samples)`. + + learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant' + Learning rate schedule for weight updates. + + - 'constant' is a constant learning rate given by + 'learning_rate_init'. + + - 'invscaling' gradually decreases the learning rate at each + time step 't' using an inverse scaling exponent of 'power_t'. + effective_learning_rate = learning_rate_init / pow(t, power_t) + + - 'adaptive' keeps the learning rate constant to + 'learning_rate_init' as long as training loss keeps decreasing. + Each time two consecutive epochs fail to decrease training loss by at + least tol, or fail to increase validation score by at least tol if + 'early_stopping' is on, the current learning rate is divided by 5. + + Only used when ``solver='sgd'``. + + learning_rate_init : float, default=0.001 + The initial learning rate used. It controls the step-size + in updating the weights. Only used when solver='sgd' or 'adam'. + + power_t : float, default=0.5 + The exponent for inverse scaling learning rate. + It is used in updating effective learning rate when the learning_rate + is set to 'invscaling'. Only used when solver='sgd'. + + max_iter : int, default=200 + Maximum number of iterations. The solver iterates until convergence + (determined by 'tol') or this number of iterations. For stochastic + solvers ('sgd', 'adam'), note that this determines the number of epochs + (how many times each data point will be used), not the number of + gradient steps. + + shuffle : bool, default=True + Whether to shuffle samples in each iteration. Only used when + solver='sgd' or 'adam'. + + random_state : int, RandomState instance, default=None + Determines random number generation for weights and bias + initialization, train-test split if early stopping is used, and batch + sampling when solver='sgd' or 'adam'. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + tol : float, default=1e-4 + Tolerance for the optimization. When the loss or score is not improving + by at least ``tol`` for ``n_iter_no_change`` consecutive iterations, + unless ``learning_rate`` is set to 'adaptive', convergence is + considered to be reached and training stops. + + verbose : bool, default=False + Whether to print progress messages to stdout. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous + call to fit as initialization, otherwise, just erase the + previous solution. See :term:`the Glossary `. + + momentum : float, default=0.9 + Momentum for gradient descent update. Should be between 0 and 1. Only + used when solver='sgd'. + + nesterovs_momentum : bool, default=True + Whether to use Nesterov's momentum. Only used when solver='sgd' and + momentum > 0. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to true, it will automatically set + aside 10% of training data as validation and terminate training when + validation score is not improving by at least ``tol`` for + ``n_iter_no_change`` consecutive epochs. The split is stratified, + except in a multilabel setting. + If early stopping is False, then the training stops when the training + loss does not improve by more than tol for n_iter_no_change consecutive + passes over the training set. + Only effective when solver='sgd' or 'adam'. + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if early_stopping is True. + + beta_1 : float, default=0.9 + Exponential decay rate for estimates of first moment vector in adam, + should be in [0, 1). Only used when solver='adam'. + + beta_2 : float, default=0.999 + Exponential decay rate for estimates of second moment vector in adam, + should be in [0, 1). Only used when solver='adam'. + + epsilon : float, default=1e-8 + Value for numerical stability in adam. Only used when solver='adam'. + + n_iter_no_change : int, default=10 + Maximum number of epochs to not meet ``tol`` improvement. + Only effective when solver='sgd' or 'adam'. + + .. versionadded:: 0.20 + + max_fun : int, default=15000 + Only used when solver='lbfgs'. Maximum number of loss function calls. + The solver iterates until convergence (determined by 'tol'), number + of iterations reaches max_iter, or this number of loss function calls. + Note that number of loss function calls will be greater than or equal + to the number of iterations for the `MLPClassifier`. + + .. versionadded:: 0.22 + + Attributes + ---------- + classes_ : ndarray or list of ndarray of shape (n_classes,) + Class labels for each output. + + loss_ : float + The current loss computed with the loss function. + + best_loss_ : float or None + The minimum loss reached by the solver throughout fitting. + If `early_stopping=True`, this attribute is set to `None`. Refer to + the `best_validation_score_` fitted attribute instead. + + loss_curve_ : list of shape (`n_iter_`,) + The ith element in the list represents the loss at the ith iteration. + + validation_scores_ : list of shape (`n_iter_`,) or None + The score at each iteration on a held-out validation set. The score + reported is the accuracy score. Only available if `early_stopping=True`, + otherwise the attribute is set to `None`. + + best_validation_score_ : float or None + The best validation score (i.e. accuracy score) that triggered the + early stopping. Only available if `early_stopping=True`, otherwise the + attribute is set to `None`. + + t_ : int + The number of training samples seen by the solver during fitting. + + coefs_ : list of shape (n_layers - 1,) + The ith element in the list represents the weight matrix corresponding + to layer i. + + intercepts_ : list of shape (n_layers - 1,) + The ith element in the list represents the bias vector corresponding to + layer i + 1. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The number of iterations the solver has run. + + n_layers_ : int + Number of layers. + + n_outputs_ : int + Number of outputs. + + out_activation_ : str + Name of the output activation function. + + See Also + -------- + MLPRegressor : Multi-layer Perceptron regressor. + BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM). + + Notes + ----- + MLPClassifier trains iteratively since at each time step + the partial derivatives of the loss function with respect to the model + parameters are computed to update the parameters. + + It can also have a regularization term added to the loss function + that shrinks model parameters to prevent overfitting. + + This implementation works with data represented as dense numpy arrays or + sparse scipy arrays of floating point values. + + References + ---------- + Hinton, Geoffrey E. "Connectionist learning procedures." + Artificial intelligence 40.1 (1989): 185-234. + + Glorot, Xavier, and Yoshua Bengio. + "Understanding the difficulty of training deep feedforward neural networks." + International Conference on Artificial Intelligence and Statistics. 2010. + + :arxiv:`He, Kaiming, et al (2015). "Delving deep into rectifiers: + Surpassing human-level performance on imagenet classification." <1502.01852>` + + :arxiv:`Kingma, Diederik, and Jimmy Ba (2014) + "Adam: A method for stochastic optimization." <1412.6980>` + + Examples + -------- + >>> from sklearn.neural_network import MLPClassifier + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> X, y = make_classification(n_samples=100, random_state=1) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, + ... random_state=1) + >>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train) + >>> clf.predict_proba(X_test[:1]) + array([[0.038..., 0.961...]]) + >>> clf.predict(X_test[:5, :]) + array([1, 0, 1, 0, 1]) + >>> clf.score(X_test, y_test) + 0.8... + """ + + def __init__( + self, + hidden_layer_sizes=(100,), + activation="relu", + *, + solver="adam", + alpha=0.0001, + batch_size="auto", + learning_rate="constant", + learning_rate_init=0.001, + power_t=0.5, + max_iter=200, + shuffle=True, + random_state=None, + tol=1e-4, + verbose=False, + warm_start=False, + momentum=0.9, + nesterovs_momentum=True, + early_stopping=False, + validation_fraction=0.1, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-8, + n_iter_no_change=10, + max_fun=15000, + ): + super().__init__( + hidden_layer_sizes=hidden_layer_sizes, + activation=activation, + solver=solver, + alpha=alpha, + batch_size=batch_size, + learning_rate=learning_rate, + learning_rate_init=learning_rate_init, + power_t=power_t, + max_iter=max_iter, + loss="log_loss", + shuffle=shuffle, + random_state=random_state, + tol=tol, + verbose=verbose, + warm_start=warm_start, + momentum=momentum, + nesterovs_momentum=nesterovs_momentum, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + beta_1=beta_1, + beta_2=beta_2, + epsilon=epsilon, + n_iter_no_change=n_iter_no_change, + max_fun=max_fun, + ) + + def _validate_input(self, X, y, incremental, reset): + X, y = self._validate_data( + X, + y, + accept_sparse=["csr", "csc"], + multi_output=True, + dtype=(np.float64, np.float32), + reset=reset, + ) + if y.ndim == 2 and y.shape[1] == 1: + y = column_or_1d(y, warn=True) + + # Matrix of actions to be taken under the possible combinations: + # The case that incremental == True and classes_ not defined is + # already checked by _check_partial_fit_first_call that is called + # in _partial_fit below. + # The cases are already grouped into the respective if blocks below. + # + # incremental warm_start classes_ def action + # 0 0 0 define classes_ + # 0 1 0 define classes_ + # 0 0 1 redefine classes_ + # + # 0 1 1 check compat warm_start + # 1 1 1 check compat warm_start + # + # 1 0 1 check compat last fit + # + # Note the reliance on short-circuiting here, so that the second + # or part implies that classes_ is defined. + if (not hasattr(self, "classes_")) or (not self.warm_start and not incremental): + self._label_binarizer = LabelBinarizer() + self._label_binarizer.fit(y) + self.classes_ = self._label_binarizer.classes_ + else: + classes = unique_labels(y) + if self.warm_start: + if set(classes) != set(self.classes_): + raise ValueError( + "warm_start can only be used where `y` has the same " + "classes as in the previous call to fit. Previously " + f"got {self.classes_}, `y` has {classes}" + ) + elif len(np.setdiff1d(classes, self.classes_, assume_unique=True)): + raise ValueError( + "`y` has classes not in `self.classes_`. " + f"`self.classes_` has {self.classes_}. 'y' has {classes}." + ) + + # This downcast to bool is to prevent upcasting when working with + # float32 data + y = self._label_binarizer.transform(y).astype(bool) + return X, y + + def predict(self, X): + """Predict using the multi-layer perceptron classifier. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + y : ndarray, shape (n_samples,) or (n_samples, n_classes) + The predicted classes. + """ + check_is_fitted(self) + return self._predict(X) + + def _predict(self, X, check_input=True): + """Private predict method with optional input validation""" + y_pred = self._forward_pass_fast(X, check_input=check_input) + + if self.n_outputs_ == 1: + y_pred = y_pred.ravel() + + return self._label_binarizer.inverse_transform(y_pred) + + def _score(self, X, y): + """Private score method without input validation""" + # Input validation would remove feature names, so we disable it + return accuracy_score(y, self._predict(X, check_input=False)) + + @available_if(lambda est: est._check_solver()) + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None): + """Update the model with a single iteration over the given data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : array-like of shape (n_samples,) + The target values. + + classes : array of shape (n_classes,), default=None + Classes across all calls to partial_fit. + Can be obtained via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is required for the first call to partial_fit + and can be omitted in the subsequent calls. + Note that y doesn't need to contain all labels in `classes`. + + Returns + ------- + self : object + Trained MLP model. + """ + if _check_partial_fit_first_call(self, classes): + self._label_binarizer = LabelBinarizer() + if type_of_target(y).startswith("multilabel"): + self._label_binarizer.fit(y) + else: + self._label_binarizer.fit(classes) + + return self._fit(X, y, incremental=True) + + def predict_log_proba(self, X): + """Return the log of probability estimates. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The input data. + + Returns + ------- + log_y_prob : ndarray of shape (n_samples, n_classes) + The predicted log-probability of the sample for each class + in the model, where classes are ordered as they are in + `self.classes_`. Equivalent to `log(predict_proba(X))`. + """ + y_prob = self.predict_proba(X) + return np.log(y_prob, out=y_prob) + + def predict_proba(self, X): + """Probability estimates. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + y_prob : ndarray of shape (n_samples, n_classes) + The predicted probability of the sample for each class in the + model, where classes are ordered as they are in `self.classes_`. + """ + check_is_fitted(self) + y_pred = self._forward_pass_fast(X) + + if self.n_outputs_ == 1: + y_pred = y_pred.ravel() + + if y_pred.ndim == 1: + return np.vstack([1 - y_pred, y_pred]).T + else: + return y_pred + + def _more_tags(self): + return {"multilabel": True} + + +class MLPRegressor(RegressorMixin, BaseMultilayerPerceptron): + """Multi-layer Perceptron regressor. + + This model optimizes the squared error using LBFGS or stochastic gradient + descent. + + .. versionadded:: 0.18 + + Parameters + ---------- + hidden_layer_sizes : array-like of shape(n_layers - 2,), default=(100,) + The ith element represents the number of neurons in the ith + hidden layer. + + activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu' + Activation function for the hidden layer. + + - 'identity', no-op activation, useful to implement linear bottleneck, + returns f(x) = x + + - 'logistic', the logistic sigmoid function, + returns f(x) = 1 / (1 + exp(-x)). + + - 'tanh', the hyperbolic tan function, + returns f(x) = tanh(x). + + - 'relu', the rectified linear unit function, + returns f(x) = max(0, x) + + solver : {'lbfgs', 'sgd', 'adam'}, default='adam' + The solver for weight optimization. + + - 'lbfgs' is an optimizer in the family of quasi-Newton methods. + + - 'sgd' refers to stochastic gradient descent. + + - 'adam' refers to a stochastic gradient-based optimizer proposed by + Kingma, Diederik, and Jimmy Ba + + Note: The default solver 'adam' works pretty well on relatively + large datasets (with thousands of training samples or more) in terms of + both training time and validation score. + For small datasets, however, 'lbfgs' can converge faster and perform + better. + + alpha : float, default=0.0001 + Strength of the L2 regularization term. The L2 regularization term + is divided by the sample size when added to the loss. + + batch_size : int, default='auto' + Size of minibatches for stochastic optimizers. + If the solver is 'lbfgs', the regressor will not use minibatch. + When set to "auto", `batch_size=min(200, n_samples)`. + + learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant' + Learning rate schedule for weight updates. + + - 'constant' is a constant learning rate given by + 'learning_rate_init'. + + - 'invscaling' gradually decreases the learning rate ``learning_rate_`` + at each time step 't' using an inverse scaling exponent of 'power_t'. + effective_learning_rate = learning_rate_init / pow(t, power_t) + + - 'adaptive' keeps the learning rate constant to + 'learning_rate_init' as long as training loss keeps decreasing. + Each time two consecutive epochs fail to decrease training loss by at + least tol, or fail to increase validation score by at least tol if + 'early_stopping' is on, the current learning rate is divided by 5. + + Only used when solver='sgd'. + + learning_rate_init : float, default=0.001 + The initial learning rate used. It controls the step-size + in updating the weights. Only used when solver='sgd' or 'adam'. + + power_t : float, default=0.5 + The exponent for inverse scaling learning rate. + It is used in updating effective learning rate when the learning_rate + is set to 'invscaling'. Only used when solver='sgd'. + + max_iter : int, default=200 + Maximum number of iterations. The solver iterates until convergence + (determined by 'tol') or this number of iterations. For stochastic + solvers ('sgd', 'adam'), note that this determines the number of epochs + (how many times each data point will be used), not the number of + gradient steps. + + shuffle : bool, default=True + Whether to shuffle samples in each iteration. Only used when + solver='sgd' or 'adam'. + + random_state : int, RandomState instance, default=None + Determines random number generation for weights and bias + initialization, train-test split if early stopping is used, and batch + sampling when solver='sgd' or 'adam'. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + tol : float, default=1e-4 + Tolerance for the optimization. When the loss or score is not improving + by at least ``tol`` for ``n_iter_no_change`` consecutive iterations, + unless ``learning_rate`` is set to 'adaptive', convergence is + considered to be reached and training stops. + + verbose : bool, default=False + Whether to print progress messages to stdout. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous + call to fit as initialization, otherwise, just erase the + previous solution. See :term:`the Glossary `. + + momentum : float, default=0.9 + Momentum for gradient descent update. Should be between 0 and 1. Only + used when solver='sgd'. + + nesterovs_momentum : bool, default=True + Whether to use Nesterov's momentum. Only used when solver='sgd' and + momentum > 0. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to True, it will automatically set + aside ``validation_fraction`` of training data as validation and + terminate training when validation score is not improving by at + least ``tol`` for ``n_iter_no_change`` consecutive epochs. + Only effective when solver='sgd' or 'adam'. + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if early_stopping is True. + + beta_1 : float, default=0.9 + Exponential decay rate for estimates of first moment vector in adam, + should be in [0, 1). Only used when solver='adam'. + + beta_2 : float, default=0.999 + Exponential decay rate for estimates of second moment vector in adam, + should be in [0, 1). Only used when solver='adam'. + + epsilon : float, default=1e-8 + Value for numerical stability in adam. Only used when solver='adam'. + + n_iter_no_change : int, default=10 + Maximum number of epochs to not meet ``tol`` improvement. + Only effective when solver='sgd' or 'adam'. + + .. versionadded:: 0.20 + + max_fun : int, default=15000 + Only used when solver='lbfgs'. Maximum number of function calls. + The solver iterates until convergence (determined by ``tol``), number + of iterations reaches max_iter, or this number of function calls. + Note that number of function calls will be greater than or equal to + the number of iterations for the MLPRegressor. + + .. versionadded:: 0.22 + + Attributes + ---------- + loss_ : float + The current loss computed with the loss function. + + best_loss_ : float + The minimum loss reached by the solver throughout fitting. + If `early_stopping=True`, this attribute is set to `None`. Refer to + the `best_validation_score_` fitted attribute instead. + Only accessible when solver='sgd' or 'adam'. + + loss_curve_ : list of shape (`n_iter_`,) + Loss value evaluated at the end of each training step. + The ith element in the list represents the loss at the ith iteration. + Only accessible when solver='sgd' or 'adam'. + + validation_scores_ : list of shape (`n_iter_`,) or None + The score at each iteration on a held-out validation set. The score + reported is the R2 score. Only available if `early_stopping=True`, + otherwise the attribute is set to `None`. + Only accessible when solver='sgd' or 'adam'. + + best_validation_score_ : float or None + The best validation score (i.e. R2 score) that triggered the + early stopping. Only available if `early_stopping=True`, otherwise the + attribute is set to `None`. + Only accessible when solver='sgd' or 'adam'. + + t_ : int + The number of training samples seen by the solver during fitting. + Mathematically equals `n_iters * X.shape[0]`, it means + `time_step` and it is used by optimizer's learning rate scheduler. + + coefs_ : list of shape (n_layers - 1,) + The ith element in the list represents the weight matrix corresponding + to layer i. + + intercepts_ : list of shape (n_layers - 1,) + The ith element in the list represents the bias vector corresponding to + layer i + 1. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The number of iterations the solver has run. + + n_layers_ : int + Number of layers. + + n_outputs_ : int + Number of outputs. + + out_activation_ : str + Name of the output activation function. + + See Also + -------- + BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM). + MLPClassifier : Multi-layer Perceptron classifier. + sklearn.linear_model.SGDRegressor : Linear model fitted by minimizing + a regularized empirical loss with SGD. + + Notes + ----- + MLPRegressor trains iteratively since at each time step + the partial derivatives of the loss function with respect to the model + parameters are computed to update the parameters. + + It can also have a regularization term added to the loss function + that shrinks model parameters to prevent overfitting. + + This implementation works with data represented as dense and sparse numpy + arrays of floating point values. + + References + ---------- + Hinton, Geoffrey E. "Connectionist learning procedures." + Artificial intelligence 40.1 (1989): 185-234. + + Glorot, Xavier, and Yoshua Bengio. + "Understanding the difficulty of training deep feedforward neural networks." + International Conference on Artificial Intelligence and Statistics. 2010. + + :arxiv:`He, Kaiming, et al (2015). "Delving deep into rectifiers: + Surpassing human-level performance on imagenet classification." <1502.01852>` + + :arxiv:`Kingma, Diederik, and Jimmy Ba (2014) + "Adam: A method for stochastic optimization." <1412.6980>` + + Examples + -------- + >>> from sklearn.neural_network import MLPRegressor + >>> from sklearn.datasets import make_regression + >>> from sklearn.model_selection import train_test_split + >>> X, y = make_regression(n_samples=200, random_state=1) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, + ... random_state=1) + >>> regr = MLPRegressor(random_state=1, max_iter=500).fit(X_train, y_train) + >>> regr.predict(X_test[:2]) + array([-0.9..., -7.1...]) + >>> regr.score(X_test, y_test) + 0.4... + """ + + def __init__( + self, + hidden_layer_sizes=(100,), + activation="relu", + *, + solver="adam", + alpha=0.0001, + batch_size="auto", + learning_rate="constant", + learning_rate_init=0.001, + power_t=0.5, + max_iter=200, + shuffle=True, + random_state=None, + tol=1e-4, + verbose=False, + warm_start=False, + momentum=0.9, + nesterovs_momentum=True, + early_stopping=False, + validation_fraction=0.1, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-8, + n_iter_no_change=10, + max_fun=15000, + ): + super().__init__( + hidden_layer_sizes=hidden_layer_sizes, + activation=activation, + solver=solver, + alpha=alpha, + batch_size=batch_size, + learning_rate=learning_rate, + learning_rate_init=learning_rate_init, + power_t=power_t, + max_iter=max_iter, + loss="squared_error", + shuffle=shuffle, + random_state=random_state, + tol=tol, + verbose=verbose, + warm_start=warm_start, + momentum=momentum, + nesterovs_momentum=nesterovs_momentum, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + beta_1=beta_1, + beta_2=beta_2, + epsilon=epsilon, + n_iter_no_change=n_iter_no_change, + max_fun=max_fun, + ) + + def predict(self, X): + """Predict using the multi-layer perceptron model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + y : ndarray of shape (n_samples, n_outputs) + The predicted values. + """ + check_is_fitted(self) + return self._predict(X) + + def _predict(self, X, check_input=True): + """Private predict method with optional input validation""" + y_pred = self._forward_pass_fast(X, check_input=check_input) + if y_pred.shape[1] == 1: + return y_pred.ravel() + return y_pred + + def _score(self, X, y): + """Private score method without input validation""" + # Input validation would remove feature names, so we disable it + y_pred = self._predict(X, check_input=False) + return r2_score(y, y_pred) + + def _validate_input(self, X, y, incremental, reset): + X, y = self._validate_data( + X, + y, + accept_sparse=["csr", "csc"], + multi_output=True, + y_numeric=True, + dtype=(np.float64, np.float32), + reset=reset, + ) + if y.ndim == 2 and y.shape[1] == 1: + y = column_or_1d(y, warn=True) + return X, y + + @available_if(lambda est: est._check_solver) + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y): + """Update the model with a single iteration over the given data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : ndarray of shape (n_samples,) + The target values. + + Returns + ------- + self : object + Trained MLP model. + """ + return self._fit(X, y, incremental=True) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/neural_network/_rbm.py b/llmeval-env/lib/python3.10/site-packages/sklearn/neural_network/_rbm.py new file mode 100644 index 0000000000000000000000000000000000000000..ec819790c5f735a8ae090f14febefe25ab229e45 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/neural_network/_rbm.py @@ -0,0 +1,453 @@ +"""Restricted Boltzmann Machine +""" + +# Authors: Yann N. Dauphin +# Vlad Niculae +# Gabriel Synnaeve +# Lars Buitinck +# License: BSD 3 clause + +import time +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy.special import expit # logistic function + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..utils import check_random_state, gen_even_slices +from ..utils._param_validation import Interval +from ..utils.extmath import safe_sparse_dot +from ..utils.validation import check_is_fitted + + +class BernoulliRBM(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Bernoulli Restricted Boltzmann Machine (RBM). + + A Restricted Boltzmann Machine with binary visible units and + binary hidden units. Parameters are estimated using Stochastic Maximum + Likelihood (SML), also known as Persistent Contrastive Divergence (PCD) + [2]. + + The time complexity of this implementation is ``O(d ** 2)`` assuming + d ~ n_features ~ n_components. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=256 + Number of binary hidden units. + + learning_rate : float, default=0.1 + The learning rate for weight updates. It is *highly* recommended + to tune this hyper-parameter. Reasonable values are in the + 10**[0., -3.] range. + + batch_size : int, default=10 + Number of examples per minibatch. + + n_iter : int, default=10 + Number of iterations/sweeps over the training dataset to perform + during training. + + verbose : int, default=0 + The verbosity level. The default, zero, means silent mode. Range + of values is [0, inf]. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for: + + - Gibbs sampling from visible and hidden layers. + + - Initializing components, sampling from layers during fit. + + - Corrupting the data when scoring samples. + + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + intercept_hidden_ : array-like of shape (n_components,) + Biases of the hidden units. + + intercept_visible_ : array-like of shape (n_features,) + Biases of the visible units. + + components_ : array-like of shape (n_components, n_features) + Weight matrix, where `n_features` is the number of + visible units and `n_components` is the number of hidden units. + + h_samples_ : array-like of shape (batch_size, n_components) + Hidden Activation sampled from the model distribution, + where `batch_size` is the number of examples per minibatch and + `n_components` is the number of hidden units. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.neural_network.MLPRegressor : Multi-layer Perceptron regressor. + sklearn.neural_network.MLPClassifier : Multi-layer Perceptron classifier. + sklearn.decomposition.PCA : An unsupervised linear dimensionality + reduction model. + + References + ---------- + + [1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for + deep belief nets. Neural Computation 18, pp 1527-1554. + https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf + + [2] Tieleman, T. Training Restricted Boltzmann Machines using + Approximations to the Likelihood Gradient. International Conference + on Machine Learning (ICML) 2008 + + Examples + -------- + + >>> import numpy as np + >>> from sklearn.neural_network import BernoulliRBM + >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) + >>> model = BernoulliRBM(n_components=2) + >>> model.fit(X) + BernoulliRBM(n_components=2) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "learning_rate": [Interval(Real, 0, None, closed="neither")], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "n_iter": [Interval(Integral, 0, None, closed="left")], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=256, + *, + learning_rate=0.1, + batch_size=10, + n_iter=10, + verbose=0, + random_state=None, + ): + self.n_components = n_components + self.learning_rate = learning_rate + self.batch_size = batch_size + self.n_iter = n_iter + self.verbose = verbose + self.random_state = random_state + + def transform(self, X): + """Compute the hidden layer activation probabilities, P(h=1|v=X). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to be transformed. + + Returns + ------- + h : ndarray of shape (n_samples, n_components) + Latent representations of the data. + """ + check_is_fitted(self) + + X = self._validate_data( + X, accept_sparse="csr", reset=False, dtype=(np.float64, np.float32) + ) + return self._mean_hiddens(X) + + def _mean_hiddens(self, v): + """Computes the probabilities P(h=1|v). + + Parameters + ---------- + v : ndarray of shape (n_samples, n_features) + Values of the visible layer. + + Returns + ------- + h : ndarray of shape (n_samples, n_components) + Corresponding mean field values for the hidden layer. + """ + p = safe_sparse_dot(v, self.components_.T) + p += self.intercept_hidden_ + return expit(p, out=p) + + def _sample_hiddens(self, v, rng): + """Sample from the distribution P(h|v). + + Parameters + ---------- + v : ndarray of shape (n_samples, n_features) + Values of the visible layer to sample from. + + rng : RandomState instance + Random number generator to use. + + Returns + ------- + h : ndarray of shape (n_samples, n_components) + Values of the hidden layer. + """ + p = self._mean_hiddens(v) + return rng.uniform(size=p.shape) < p + + def _sample_visibles(self, h, rng): + """Sample from the distribution P(v|h). + + Parameters + ---------- + h : ndarray of shape (n_samples, n_components) + Values of the hidden layer to sample from. + + rng : RandomState instance + Random number generator to use. + + Returns + ------- + v : ndarray of shape (n_samples, n_features) + Values of the visible layer. + """ + p = np.dot(h, self.components_) + p += self.intercept_visible_ + expit(p, out=p) + return rng.uniform(size=p.shape) < p + + def _free_energy(self, v): + """Computes the free energy F(v) = - log sum_h exp(-E(v,h)). + + Parameters + ---------- + v : ndarray of shape (n_samples, n_features) + Values of the visible layer. + + Returns + ------- + free_energy : ndarray of shape (n_samples,) + The value of the free energy. + """ + return -safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp( + 0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_ + ).sum(axis=1) + + def gibbs(self, v): + """Perform one Gibbs sampling step. + + Parameters + ---------- + v : ndarray of shape (n_samples, n_features) + Values of the visible layer to start from. + + Returns + ------- + v_new : ndarray of shape (n_samples, n_features) + Values of the visible layer after one Gibbs step. + """ + check_is_fitted(self) + if not hasattr(self, "random_state_"): + self.random_state_ = check_random_state(self.random_state) + h_ = self._sample_hiddens(v, self.random_state_) + v_ = self._sample_visibles(h_, self.random_state_) + + return v_ + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Fit the model to the partial segment of the data X. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : BernoulliRBM + The fitted model. + """ + first_pass = not hasattr(self, "components_") + X = self._validate_data( + X, accept_sparse="csr", dtype=np.float64, reset=first_pass + ) + if not hasattr(self, "random_state_"): + self.random_state_ = check_random_state(self.random_state) + if not hasattr(self, "components_"): + self.components_ = np.asarray( + self.random_state_.normal(0, 0.01, (self.n_components, X.shape[1])), + order="F", + ) + self._n_features_out = self.components_.shape[0] + if not hasattr(self, "intercept_hidden_"): + self.intercept_hidden_ = np.zeros( + self.n_components, + ) + if not hasattr(self, "intercept_visible_"): + self.intercept_visible_ = np.zeros( + X.shape[1], + ) + if not hasattr(self, "h_samples_"): + self.h_samples_ = np.zeros((self.batch_size, self.n_components)) + + self._fit(X, self.random_state_) + + def _fit(self, v_pos, rng): + """Inner fit for one mini-batch. + + Adjust the parameters to maximize the likelihood of v using + Stochastic Maximum Likelihood (SML). + + Parameters + ---------- + v_pos : ndarray of shape (n_samples, n_features) + The data to use for training. + + rng : RandomState instance + Random number generator to use for sampling. + """ + h_pos = self._mean_hiddens(v_pos) + v_neg = self._sample_visibles(self.h_samples_, rng) + h_neg = self._mean_hiddens(v_neg) + + lr = float(self.learning_rate) / v_pos.shape[0] + update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T + update -= np.dot(h_neg.T, v_neg) + self.components_ += lr * update + self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0)) + self.intercept_visible_ += lr * ( + np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0) + ) + + h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial + self.h_samples_ = np.floor(h_neg, h_neg) + + def score_samples(self, X): + """Compute the pseudo-likelihood of X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Values of the visible layer. Must be all-boolean (not checked). + + Returns + ------- + pseudo_likelihood : ndarray of shape (n_samples,) + Value of the pseudo-likelihood (proxy for likelihood). + + Notes + ----- + This method is not deterministic: it computes a quantity called the + free energy on X, then on a randomly corrupted version of X, and + returns the log of the logistic function of the difference. + """ + check_is_fitted(self) + + v = self._validate_data(X, accept_sparse="csr", reset=False) + rng = check_random_state(self.random_state) + + # Randomly corrupt one feature in each sample in v. + ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0])) + if sp.issparse(v): + data = -2 * v[ind] + 1 + if isinstance(data, np.matrix): # v is a sparse matrix + v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape) + else: # v is a sparse array + v_ = v + sp.csr_array((data.ravel(), ind), shape=v.shape) + else: + v_ = v.copy() + v_[ind] = 1 - v_[ind] + + fe = self._free_energy(v) + fe_ = self._free_energy(v_) + # log(expit(x)) = log(1 / (1 + exp(-x)) = -np.logaddexp(0, -x) + return -v.shape[1] * np.logaddexp(0, -(fe_ - fe)) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model to the data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : BernoulliRBM + The fitted model. + """ + X = self._validate_data(X, accept_sparse="csr", dtype=(np.float64, np.float32)) + n_samples = X.shape[0] + rng = check_random_state(self.random_state) + + self.components_ = np.asarray( + rng.normal(0, 0.01, (self.n_components, X.shape[1])), + order="F", + dtype=X.dtype, + ) + self._n_features_out = self.components_.shape[0] + self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype) + self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype) + self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype) + + n_batches = int(np.ceil(float(n_samples) / self.batch_size)) + batch_slices = list( + gen_even_slices(n_batches * self.batch_size, n_batches, n_samples=n_samples) + ) + verbose = self.verbose + begin = time.time() + for iteration in range(1, self.n_iter + 1): + for batch_slice in batch_slices: + self._fit(X[batch_slice], rng) + + if verbose: + end = time.time() + print( + "[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs" + % ( + type(self).__name__, + iteration, + self.score_samples(X).mean(), + end - begin, + ) + ) + begin = end + + return self + + def _more_tags(self): + return { + "_xfail_checks": { + "check_methods_subset_invariance": ( + "fails for the decision_function method" + ), + "check_methods_sample_order_invariance": ( + "fails for the score_samples method" + ), + }, + "preserves_dtype": [np.float64, np.float32], + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/utils/_weight_vector.pxd b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/_weight_vector.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a8ef0ab53d04848fbfe4b68f493146badb09d734 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/utils/_weight_vector.pxd @@ -0,0 +1,48 @@ +# WARNING: Do not edit this file directly. +# It is automatically generated from 'sklearn/utils/_weight_vector.pxd.tp'. +# Changes must be made there. + + +cdef class WeightVector64(object): + cdef readonly double[::1] w + cdef readonly double[::1] aw + cdef double *w_data_ptr + cdef double *aw_data_ptr + + cdef double wscale + cdef double average_a + cdef double average_b + cdef int n_features + cdef double sq_norm + + cdef void add(self, double *x_data_ptr, int *x_ind_ptr, + int xnnz, double c) noexcept nogil + cdef void add_average(self, double *x_data_ptr, int *x_ind_ptr, + int xnnz, double c, double num_iter) noexcept nogil + cdef double dot(self, double *x_data_ptr, int *x_ind_ptr, + int xnnz) noexcept nogil + cdef void scale(self, double c) noexcept nogil + cdef void reset_wscale(self) noexcept nogil + cdef double norm(self) noexcept nogil + +cdef class WeightVector32(object): + cdef readonly float[::1] w + cdef readonly float[::1] aw + cdef float *w_data_ptr + cdef float *aw_data_ptr + + cdef double wscale + cdef double average_a + cdef double average_b + cdef int n_features + cdef double sq_norm + + cdef void add(self, float *x_data_ptr, int *x_ind_ptr, + int xnnz, float c) noexcept nogil + cdef void add_average(self, float *x_data_ptr, int *x_ind_ptr, + int xnnz, float c, float num_iter) noexcept nogil + cdef float dot(self, float *x_data_ptr, int *x_ind_ptr, + int xnnz) noexcept nogil + cdef void scale(self, float c) noexcept nogil + cdef void reset_wscale(self) noexcept nogil + cdef float norm(self) noexcept nogil