diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4a2a56a99276d26b8a4ed781065b1e81071808c1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py new file mode 100644 index 0000000000000000000000000000000000000000..51350aa5e05bdbb0a9a8b691837d2476f3198981 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_dict_learning.py @@ -0,0 +1,2301 @@ +""" Dictionary learning. +""" +# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort +# License: BSD 3 clause + +import itertools +import sys +import time +from numbers import Integral, Real +from warnings import warn + +import numpy as np +from joblib import effective_n_jobs +from scipy import linalg + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..linear_model import Lars, Lasso, LassoLars, orthogonal_mp_gram +from ..utils import check_array, check_random_state, gen_batches, gen_even_slices +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.extmath import randomized_svd, row_norms, svd_flip +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted + + +def _check_positive_coding(method, positive): + if positive and method in ["omp", "lars"]: + raise ValueError( + "Positive constraint not supported for '{}' coding method.".format(method) + ) + + +def _sparse_encode_precomputed( + X, + dictionary, + *, + gram=None, + cov=None, + algorithm="lasso_lars", + regularization=None, + copy_cov=True, + init=None, + max_iter=1000, + verbose=0, + positive=False, +): + """Generic sparse coding with precomputed Gram and/or covariance matrices. + + Each row of the result is the solution to a Lasso problem. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data matrix. + + dictionary : ndarray of shape (n_components, n_features) + The dictionary matrix against which to solve the sparse coding of + the data. Some of the algorithms assume normalized rows. + + gram : ndarray of shape (n_components, n_components), default=None + Precomputed Gram matrix, `dictionary * dictionary'` + gram can be `None` if method is 'threshold'. + + cov : ndarray of shape (n_components, n_samples), default=None + Precomputed covariance, `dictionary * X'`. + + algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \ + default='lasso_lars' + The algorithm used: + + * `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + * `'lasso_lars'`: uses Lars to compute the Lasso solution; + * `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if + the estimated components are sparse; + * `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution; + * `'threshold'`: squashes to zero all coefficients less than + regularization from the projection `dictionary * data'`. + + regularization : int or float, default=None + The regularization parameter. It corresponds to alpha when + algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`. + Otherwise it corresponds to `n_nonzero_coefs`. + + init : ndarray of shape (n_samples, n_components), default=None + Initialization value of the sparse code. Only used if + `algorithm='lasso_cd'`. + + max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + copy_cov : bool, default=True + Whether to copy the precomputed covariance matrix; if `False`, it may + be overwritten. + + verbose : int, default=0 + Controls the verbosity; the higher, the more messages. + + positive: bool, default=False + Whether to enforce a positivity constraint on the sparse code. + + .. versionadded:: 0.20 + + Returns + ------- + code : ndarray of shape (n_components, n_features) + The sparse codes. + """ + n_samples, n_features = X.shape + n_components = dictionary.shape[0] + + if algorithm == "lasso_lars": + alpha = float(regularization) / n_features # account for scaling + try: + err_mgt = np.seterr(all="ignore") + + # Not passing in verbose=max(0, verbose-1) because Lars.fit already + # corrects the verbosity level. + lasso_lars = LassoLars( + alpha=alpha, + fit_intercept=False, + verbose=verbose, + precompute=gram, + fit_path=False, + positive=positive, + max_iter=max_iter, + ) + lasso_lars.fit(dictionary.T, X.T, Xy=cov) + new_code = lasso_lars.coef_ + finally: + np.seterr(**err_mgt) + + elif algorithm == "lasso_cd": + alpha = float(regularization) / n_features # account for scaling + + # TODO: Make verbosity argument for Lasso? + # sklearn.linear_model.coordinate_descent.enet_path has a verbosity + # argument that we could pass in from Lasso. + clf = Lasso( + alpha=alpha, + fit_intercept=False, + precompute=gram, + max_iter=max_iter, + warm_start=True, + positive=positive, + ) + + if init is not None: + # In some workflows using coordinate descent algorithms: + # - users might provide NumPy arrays with read-only buffers + # - `joblib` might memmap arrays making their buffer read-only + # TODO: move this handling (which is currently too broad) + # closer to the actual private function which need buffers to be writable. + if not init.flags["WRITEABLE"]: + init = np.array(init) + clf.coef_ = init + + clf.fit(dictionary.T, X.T, check_input=False) + new_code = clf.coef_ + + elif algorithm == "lars": + try: + err_mgt = np.seterr(all="ignore") + + # Not passing in verbose=max(0, verbose-1) because Lars.fit already + # corrects the verbosity level. + lars = Lars( + fit_intercept=False, + verbose=verbose, + precompute=gram, + n_nonzero_coefs=int(regularization), + fit_path=False, + ) + lars.fit(dictionary.T, X.T, Xy=cov) + new_code = lars.coef_ + finally: + np.seterr(**err_mgt) + + elif algorithm == "threshold": + new_code = (np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T + if positive: + np.clip(new_code, 0, None, out=new_code) + + elif algorithm == "omp": + new_code = orthogonal_mp_gram( + Gram=gram, + Xy=cov, + n_nonzero_coefs=int(regularization), + tol=None, + norms_squared=row_norms(X, squared=True), + copy_Xy=copy_cov, + ).T + + return new_code.reshape(n_samples, n_components) + + +@validate_params( + { + "X": ["array-like"], + "dictionary": ["array-like"], + "gram": ["array-like", None], + "cov": ["array-like", None], + "algorithm": [ + StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"}) + ], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "alpha": [Interval(Real, 0, None, closed="left"), None], + "copy_cov": ["boolean"], + "init": ["array-like", None], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "n_jobs": [Integral, None], + "check_input": ["boolean"], + "verbose": ["verbose"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +# XXX : could be moved to the linear_model module +def sparse_encode( + X, + dictionary, + *, + gram=None, + cov=None, + algorithm="lasso_lars", + n_nonzero_coefs=None, + alpha=None, + copy_cov=True, + init=None, + max_iter=1000, + n_jobs=None, + check_input=True, + verbose=0, + positive=False, +): + """Sparse coding. + + Each row of the result is the solution to a sparse coding problem. + The goal is to find a sparse array `code` such that:: + + X ~= code * dictionary + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data matrix. + + dictionary : array-like of shape (n_components, n_features) + The dictionary matrix against which to solve the sparse coding of + the data. Some of the algorithms assume normalized rows for meaningful + output. + + gram : array-like of shape (n_components, n_components), default=None + Precomputed Gram matrix, `dictionary * dictionary'`. + + cov : array-like of shape (n_components, n_samples), default=None + Precomputed covariance, `dictionary' * X`. + + algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \ + default='lasso_lars' + The algorithm used: + + * `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + * `'lasso_lars'`: uses Lars to compute the Lasso solution; + * `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if + the estimated components are sparse; + * `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution; + * `'threshold'`: squashes to zero all coefficients less than + regularization from the projection `dictionary * data'`. + + n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and `algorithm='omp'` + and is overridden by `alpha` in the `omp` case. If `None`, then + `n_nonzero_coefs=int(n_features / 10)`. + + alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of + the reconstruction error targeted. In this case, it overrides + `n_nonzero_coefs`. + If `None`, default to 1. + + copy_cov : bool, default=True + Whether to copy the precomputed covariance matrix; if `False`, it may + be overwritten. + + init : ndarray of shape (n_samples, n_components), default=None + Initialization value of the sparse codes. Only used if + `algorithm='lasso_cd'`. + + max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + check_input : bool, default=True + If `False`, the input arrays X and dictionary will not be checked. + + verbose : int, default=0 + Controls the verbosity; the higher, the more messages. + + positive : bool, default=False + Whether to enforce positivity when finding the encoding. + + .. versionadded:: 0.20 + + Returns + ------- + code : ndarray of shape (n_samples, n_components) + The sparse codes. + + See Also + -------- + sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems. + sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer. + SparseCoder : Find a sparse representation of data from a fixed precomputed + dictionary. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.decomposition import sparse_encode + >>> X = np.array([[-1, -1, -1], [0, 0, 3]]) + >>> dictionary = np.array( + ... [[0, 1, 0], + ... [-1, -1, 2], + ... [1, 1, 1], + ... [0, 1, 1], + ... [0, 2, 1]], + ... dtype=np.float64 + ... ) + >>> sparse_encode(X, dictionary, alpha=1e-10) + array([[ 0., 0., -1., 0., 0.], + [ 0., 1., 1., 0., 0.]]) + """ + if check_input: + if algorithm == "lasso_cd": + dictionary = check_array( + dictionary, order="C", dtype=[np.float64, np.float32] + ) + X = check_array(X, order="C", dtype=[np.float64, np.float32]) + else: + dictionary = check_array(dictionary) + X = check_array(X) + + if dictionary.shape[1] != X.shape[1]: + raise ValueError( + "Dictionary and X have different numbers of features:" + "dictionary.shape: {} X.shape{}".format(dictionary.shape, X.shape) + ) + + _check_positive_coding(algorithm, positive) + + return _sparse_encode( + X, + dictionary, + gram=gram, + cov=cov, + algorithm=algorithm, + n_nonzero_coefs=n_nonzero_coefs, + alpha=alpha, + copy_cov=copy_cov, + init=init, + max_iter=max_iter, + n_jobs=n_jobs, + verbose=verbose, + positive=positive, + ) + + +def _sparse_encode( + X, + dictionary, + *, + gram=None, + cov=None, + algorithm="lasso_lars", + n_nonzero_coefs=None, + alpha=None, + copy_cov=True, + init=None, + max_iter=1000, + n_jobs=None, + verbose=0, + positive=False, +): + """Sparse coding without input/parameter validation.""" + + n_samples, n_features = X.shape + n_components = dictionary.shape[0] + + if algorithm in ("lars", "omp"): + regularization = n_nonzero_coefs + if regularization is None: + regularization = min(max(n_features / 10, 1), n_components) + else: + regularization = alpha + if regularization is None: + regularization = 1.0 + + if gram is None and algorithm != "threshold": + gram = np.dot(dictionary, dictionary.T) + + if cov is None and algorithm != "lasso_cd": + copy_cov = False + cov = np.dot(dictionary, X.T) + + if effective_n_jobs(n_jobs) == 1 or algorithm == "threshold": + code = _sparse_encode_precomputed( + X, + dictionary, + gram=gram, + cov=cov, + algorithm=algorithm, + regularization=regularization, + copy_cov=copy_cov, + init=init, + max_iter=max_iter, + verbose=verbose, + positive=positive, + ) + return code + + # Enter parallel code block + n_samples = X.shape[0] + n_components = dictionary.shape[0] + code = np.empty((n_samples, n_components)) + slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs))) + + code_views = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(_sparse_encode_precomputed)( + X[this_slice], + dictionary, + gram=gram, + cov=cov[:, this_slice] if cov is not None else None, + algorithm=algorithm, + regularization=regularization, + copy_cov=copy_cov, + init=init[this_slice] if init is not None else None, + max_iter=max_iter, + verbose=verbose, + positive=positive, + ) + for this_slice in slices + ) + for this_slice, this_view in zip(slices, code_views): + code[this_slice] = this_view + return code + + +def _update_dict( + dictionary, + Y, + code, + A=None, + B=None, + verbose=False, + random_state=None, + positive=False, +): + """Update the dense dictionary factor in place. + + Parameters + ---------- + dictionary : ndarray of shape (n_components, n_features) + Value of the dictionary at the previous iteration. + + Y : ndarray of shape (n_samples, n_features) + Data matrix. + + code : ndarray of shape (n_samples, n_components) + Sparse coding of the data against which to optimize the dictionary. + + A : ndarray of shape (n_components, n_components), default=None + Together with `B`, sufficient stats of the online model to update the + dictionary. + + B : ndarray of shape (n_features, n_components), default=None + Together with `A`, sufficient stats of the online model to update the + dictionary. + + verbose: bool, default=False + Degree of output the procedure will print. + + random_state : int, RandomState instance or None, default=None + Used for randomly initializing the dictionary. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + positive : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + """ + n_samples, n_components = code.shape + random_state = check_random_state(random_state) + + if A is None: + A = code.T @ code + if B is None: + B = Y.T @ code + + n_unused = 0 + + for k in range(n_components): + if A[k, k] > 1e-6: + # 1e-6 is arbitrary but consistent with the spams implementation + dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k] + else: + # kth atom is almost never used -> sample a new one from the data + newd = Y[random_state.choice(n_samples)] + + # add small noise to avoid making the sparse coding ill conditioned + noise_level = 0.01 * (newd.std() or 1) # avoid 0 std + noise = random_state.normal(0, noise_level, size=len(newd)) + + dictionary[k] = newd + noise + code[:, k] = 0 + n_unused += 1 + + if positive: + np.clip(dictionary[k], 0, None, out=dictionary[k]) + + # Projection on the constraint set ||V_k|| <= 1 + dictionary[k] /= max(linalg.norm(dictionary[k]), 1) + + if verbose and n_unused > 0: + print(f"{n_unused} unused atoms resampled.") + + +def _dict_learning( + X, + n_components, + *, + alpha, + max_iter, + tol, + method, + n_jobs, + dict_init, + code_init, + callback, + verbose, + random_state, + return_n_iter, + positive_dict, + positive_code, + method_max_iter, +): + """Main dictionary learning algorithm""" + t0 = time.time() + # Init the code and the dictionary with SVD of Y + if code_init is not None and dict_init is not None: + code = np.array(code_init, order="F") + # Don't copy V, it will happen below + dictionary = dict_init + else: + code, S, dictionary = linalg.svd(X, full_matrices=False) + # flip the initial code's sign to enforce deterministic output + code, dictionary = svd_flip(code, dictionary) + dictionary = S[:, np.newaxis] * dictionary + r = len(dictionary) + if n_components <= r: # True even if n_components=None + code = code[:, :n_components] + dictionary = dictionary[:n_components, :] + else: + code = np.c_[code, np.zeros((len(code), n_components - r))] + dictionary = np.r_[ + dictionary, np.zeros((n_components - r, dictionary.shape[1])) + ] + + # Fortran-order dict better suited for the sparse coding which is the + # bottleneck of this algorithm. + dictionary = np.asfortranarray(dictionary) + + errors = [] + current_cost = np.nan + + if verbose == 1: + print("[dict_learning]", end=" ") + + # If max_iter is 0, number of iterations returned should be zero + ii = -1 + + for ii in range(max_iter): + dt = time.time() - t0 + if verbose == 1: + sys.stdout.write(".") + sys.stdout.flush() + elif verbose: + print( + "Iteration % 3i (elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)" + % (ii, dt, dt / 60, current_cost) + ) + + # Update code + code = sparse_encode( + X, + dictionary, + algorithm=method, + alpha=alpha, + init=code, + n_jobs=n_jobs, + positive=positive_code, + max_iter=method_max_iter, + verbose=verbose, + ) + + # Update dictionary in place + _update_dict( + dictionary, + X, + code, + verbose=verbose, + random_state=random_state, + positive=positive_dict, + ) + + # Cost function + current_cost = 0.5 * np.sum((X - code @ dictionary) ** 2) + alpha * np.sum( + np.abs(code) + ) + errors.append(current_cost) + + if ii > 0: + dE = errors[-2] - errors[-1] + # assert(dE >= -tol * errors[-1]) + if dE < tol * errors[-1]: + if verbose == 1: + # A line return + print("") + elif verbose: + print("--- Convergence reached after %d iterations" % ii) + break + if ii % 5 == 0 and callback is not None: + callback(locals()) + + if return_n_iter: + return code, dictionary, errors, ii + 1 + else: + return code, dictionary, errors + + +def dict_learning_online( + X, + n_components=2, + *, + alpha=1, + max_iter=100, + return_code=True, + dict_init=None, + callback=None, + batch_size=256, + verbose=False, + shuffle=True, + n_jobs=None, + method="lars", + random_state=None, + positive_dict=False, + positive_code=False, + method_max_iter=1000, + tol=1e-3, + max_no_improvement=10, +): + """Solve a dictionary learning matrix factorization problem online. + + Finds the best dictionary and the corresponding sparse code for + approximating the data matrix X by solving:: + + (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 = 1 for all 0 <= k < n_components + + where V is the dictionary and U is the sparse code. ||.||_Fro stands for + the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm + which is the sum of the absolute values of all the entries in the matrix. + This is accomplished by repeatedly iterating over mini-batches by slicing + the input data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data matrix. + + n_components : int or None, default=2 + Number of dictionary atoms to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : float, default=1 + Sparsity controlling parameter. + + max_iter : int, default=100 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + .. versionadded:: 1.1 + + .. deprecated:: 1.4 + `max_iter=None` is deprecated in 1.4 and will be removed in 1.6. + Use the default value (i.e. `100`) instead. + + return_code : bool, default=True + Whether to also return the code U or just the dictionary `V`. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial values for the dictionary for warm restart scenarios. + If `None`, the initial values for the dictionary are created + with an SVD decomposition of the data via + :func:`~sklearn.utils.extmath.randomized_svd`. + + callback : callable, default=None + A callable that gets invoked at the end of each iteration. + + batch_size : int, default=256 + The number of samples to take in each batch. + + .. versionchanged:: 1.3 + The default value of `batch_size` changed from 3 to 256 in version 1.3. + + verbose : bool, default=False + To control the verbosity of the procedure. + + shuffle : bool, default=True + Whether to shuffle the data before splitting it in batches. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + method : {'lars', 'cd'}, default='lars' + * `'lars'`: uses the least angle regression method to solve the lasso + problem (`linear_model.lars_path`); + * `'cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). Lars will be faster if + the estimated components are sparse. + + random_state : int, RandomState instance or None, default=None + Used for initializing the dictionary when ``dict_init`` is not + specified, randomly shuffling the data when ``shuffle`` is set to + ``True``, and updating the dictionary. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + method_max_iter : int, default=1000 + Maximum number of iterations to perform when solving the lasso problem. + + .. versionadded:: 0.22 + + tol : float, default=1e-3 + Control early stopping based on the norm of the differences in the + dictionary between 2 steps. + + To disable early stopping based on changes in the dictionary, set + `tol` to 0.0. + + .. versionadded:: 1.1 + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + + To disable convergence detection based on cost function, set + `max_no_improvement` to None. + + .. versionadded:: 1.1 + + Returns + ------- + code : ndarray of shape (n_samples, n_components), + The sparse code (only returned if `return_code=True`). + + dictionary : ndarray of shape (n_components, n_features), + The solutions to the dictionary learning problem. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is + set to `True`. + + See Also + -------- + dict_learning : Solve a dictionary learning matrix factorization problem. + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary + learning algorithm. + SparsePCA : Sparse Principal Components Analysis. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import dict_learning_online + >>> X, _, _ = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42, + ... ) + >>> U, V = dict_learning_online( + ... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42 + ... ) + + We can check the level of sparsity of `U`: + + >>> np.mean(U == 0) + 0.53... + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = U @ V + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.05... + """ + # TODO(1.6): remove in 1.6 + if max_iter is None: + warn( + ( + "`max_iter=None` is deprecated in version 1.4 and will be removed in " + "version 1.6. Use the default value (i.e. `100`) instead." + ), + FutureWarning, + ) + max_iter = 100 + + transform_algorithm = "lasso_" + method + + est = MiniBatchDictionaryLearning( + n_components=n_components, + alpha=alpha, + max_iter=max_iter, + n_jobs=n_jobs, + fit_algorithm=method, + batch_size=batch_size, + shuffle=shuffle, + dict_init=dict_init, + random_state=random_state, + transform_algorithm=transform_algorithm, + transform_alpha=alpha, + positive_code=positive_code, + positive_dict=positive_dict, + transform_max_iter=method_max_iter, + verbose=verbose, + callback=callback, + tol=tol, + max_no_improvement=max_no_improvement, + ).fit(X) + + if not return_code: + return est.components_ + else: + code = est.transform(X) + return code, est.components_ + + +@validate_params( + { + "X": ["array-like"], + "method": [StrOptions({"lars", "cd"})], + "return_n_iter": ["boolean"], + "method_max_iter": [Interval(Integral, 0, None, closed="left")], + }, + prefer_skip_nested_validation=False, +) +def dict_learning( + X, + n_components, + *, + alpha, + max_iter=100, + tol=1e-8, + method="lars", + n_jobs=None, + dict_init=None, + code_init=None, + callback=None, + verbose=False, + random_state=None, + return_n_iter=False, + positive_dict=False, + positive_code=False, + method_max_iter=1000, +): + """Solve a dictionary learning matrix factorization problem. + + Finds the best dictionary and the corresponding sparse code for + approximating the data matrix X by solving:: + + (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 = 1 for all 0 <= k < n_components + + where V is the dictionary and U is the sparse code. ||.||_Fro stands for + the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm + which is the sum of the absolute values of all the entries in the matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data matrix. + + n_components : int + Number of dictionary atoms to extract. + + alpha : int or float + Sparsity controlling parameter. + + max_iter : int, default=100 + Maximum number of iterations to perform. + + tol : float, default=1e-8 + Tolerance for the stopping condition. + + method : {'lars', 'cd'}, default='lars' + The method used: + + * `'lars'`: uses the least angle regression method to solve the lasso + problem (`linear_model.lars_path`); + * `'cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). Lars will be faster if + the estimated components are sparse. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial value for the dictionary for warm restart scenarios. Only used + if `code_init` and `dict_init` are not None. + + code_init : ndarray of shape (n_samples, n_components), default=None + Initial value for the sparse code for warm restart scenarios. Only used + if `code_init` and `dict_init` are not None. + + callback : callable, default=None + Callable that gets invoked every five iterations. + + verbose : bool, default=False + To control the verbosity of the procedure. + + random_state : int, RandomState instance or None, default=None + Used for randomly initializing the dictionary. Pass an int for + reproducible results across multiple function calls. + See :term:`Glossary `. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + method_max_iter : int, default=1000 + Maximum number of iterations to perform. + + .. versionadded:: 0.22 + + Returns + ------- + code : ndarray of shape (n_samples, n_components) + The sparse code factor in the matrix factorization. + + dictionary : ndarray of shape (n_components, n_features), + The dictionary factor in the matrix factorization. + + errors : array + Vector of errors at each iteration. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is + set to True. + + See Also + -------- + dict_learning_online : Solve a dictionary learning matrix factorization + problem online. + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchDictionaryLearning : A faster, less accurate version + of the dictionary learning algorithm. + SparsePCA : Sparse Principal Components Analysis. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import dict_learning + >>> X, _, _ = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42, + ... ) + >>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42) + + We can check the level of sparsity of `U`: + + >>> np.mean(U == 0) + 0.6... + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = U @ V + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.01... + """ + estimator = DictionaryLearning( + n_components=n_components, + alpha=alpha, + max_iter=max_iter, + tol=tol, + fit_algorithm=method, + n_jobs=n_jobs, + dict_init=dict_init, + callback=callback, + code_init=code_init, + verbose=verbose, + random_state=random_state, + positive_code=positive_code, + positive_dict=positive_dict, + transform_max_iter=method_max_iter, + ).set_output(transform="default") + code = estimator.fit_transform(X) + if return_n_iter: + return ( + code, + estimator.components_, + estimator.error_, + estimator.n_iter_, + ) + return code, estimator.components_, estimator.error_ + + +class _BaseSparseCoding(ClassNamePrefixFeaturesOutMixin, TransformerMixin): + """Base class from SparseCoder and DictionaryLearning algorithms.""" + + def __init__( + self, + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ): + self.transform_algorithm = transform_algorithm + self.transform_n_nonzero_coefs = transform_n_nonzero_coefs + self.transform_alpha = transform_alpha + self.transform_max_iter = transform_max_iter + self.split_sign = split_sign + self.n_jobs = n_jobs + self.positive_code = positive_code + + def _transform(self, X, dictionary): + """Private method allowing to accommodate both DictionaryLearning and + SparseCoder.""" + X = self._validate_data(X, reset=False) + + if hasattr(self, "alpha") and self.transform_alpha is None: + transform_alpha = self.alpha + else: + transform_alpha = self.transform_alpha + + code = sparse_encode( + X, + dictionary, + algorithm=self.transform_algorithm, + n_nonzero_coefs=self.transform_n_nonzero_coefs, + alpha=transform_alpha, + max_iter=self.transform_max_iter, + n_jobs=self.n_jobs, + positive=self.positive_code, + ) + + if self.split_sign: + # feature vector is split into a positive and negative side + n_samples, n_features = code.shape + split_code = np.empty((n_samples, 2 * n_features)) + split_code[:, :n_features] = np.maximum(code, 0) + split_code[:, n_features:] = -np.minimum(code, 0) + code = split_code + + return code + + def transform(self, X): + """Encode the data as a sparse combination of the dictionary atoms. + + Coding method is determined by the object parameter + `transform_algorithm`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Test data to be transformed, must have the same number of + features as the data used to train the model. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + return self._transform(X, self.components_) + + +class SparseCoder(_BaseSparseCoding, BaseEstimator): + """Sparse coding. + + Finds a sparse representation of data against a fixed, precomputed + dictionary. + + Each row of the result is the solution to a sparse coding problem. + The goal is to find a sparse array `code` such that:: + + X ~= code * dictionary + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + dictionary : ndarray of shape (n_components, n_features) + The dictionary atoms used for sparse coding. Lines are assumed to be + normalized to unit norm. + + transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ + 'threshold'}, default='omp' + Algorithm used to transform the data: + + - `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + - `'lasso_lars'`: uses Lars to compute the Lasso solution; + - `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if + the estimated components are sparse; + - `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution; + - `'threshold'`: squashes to zero all coefficients less than alpha from + the projection ``dictionary * X'``. + + transform_n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and `algorithm='omp'` + and is overridden by `alpha` in the `omp` case. If `None`, then + `transform_n_nonzero_coefs=int(n_features / 10)`. + + transform_alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of + the reconstruction error targeted. In this case, it overrides + `n_nonzero_coefs`. + If `None`, default to 1. + + split_sign : bool, default=False + Whether to split the sparse feature vector into the concatenation of + its negative part and its positive part. This can improve the + performance of downstream classifiers. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + transform_max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `lasso_lars`. + + .. versionadded:: 0.22 + + Attributes + ---------- + n_components_ : int + Number of atoms. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchDictionaryLearning : A faster, less accurate, version of the + dictionary learning algorithm. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparsePCA : Sparse Principal Components Analysis. + sparse_encode : Sparse coding where each row of the result is the solution + to a sparse coding problem. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.decomposition import SparseCoder + >>> X = np.array([[-1, -1, -1], [0, 0, 3]]) + >>> dictionary = np.array( + ... [[0, 1, 0], + ... [-1, -1, 2], + ... [1, 1, 1], + ... [0, 1, 1], + ... [0, 2, 1]], + ... dtype=np.float64 + ... ) + >>> coder = SparseCoder( + ... dictionary=dictionary, transform_algorithm='lasso_lars', + ... transform_alpha=1e-10, + ... ) + >>> coder.transform(X) + array([[ 0., 0., -1., 0., 0.], + [ 0., 1., 1., 0., 0.]]) + """ + + _required_parameters = ["dictionary"] + + def __init__( + self, + dictionary, + *, + transform_algorithm="omp", + transform_n_nonzero_coefs=None, + transform_alpha=None, + split_sign=False, + n_jobs=None, + positive_code=False, + transform_max_iter=1000, + ): + super().__init__( + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ) + self.dictionary = dictionary + + def fit(self, X, y=None): + """Do nothing and return the estimator unchanged. + + This method is just there to implement the usual API and hence + work in pipelines. + + Parameters + ---------- + X : Ignored + Not used, present for API consistency by convention. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + return self + + def transform(self, X, y=None): + """Encode the data as a sparse combination of the dictionary atoms. + + Coding method is determined by the object parameter + `transform_algorithm`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed data. + """ + return super()._transform(X, self.dictionary) + + def _more_tags(self): + return { + "requires_fit": False, + "preserves_dtype": [np.float64, np.float32], + } + + @property + def n_components_(self): + """Number of atoms.""" + return self.dictionary.shape[0] + + @property + def n_features_in_(self): + """Number of features seen during `fit`.""" + return self.dictionary.shape[1] + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.n_components_ + + +class DictionaryLearning(_BaseSparseCoding, BaseEstimator): + """Dictionary learning. + + Finds a dictionary (a set of atoms) that performs well at sparsely + encoding the fitted data. + + Solves the optimization problem:: + + (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 <= 1 for all 0 <= k < n_components + + ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for + the entry-wise matrix norm which is the sum of the absolute values + of all the entries in the matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of dictionary elements to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : float, default=1.0 + Sparsity controlling parameter. + + max_iter : int, default=1000 + Maximum number of iterations to perform. + + tol : float, default=1e-8 + Tolerance for numerical error. + + fit_algorithm : {'lars', 'cd'}, default='lars' + * `'lars'`: uses the least angle regression method to solve the lasso + problem (:func:`~sklearn.linear_model.lars_path`); + * `'cd'`: uses the coordinate descent method to compute the + Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be + faster if the estimated components are sparse. + + .. versionadded:: 0.17 + *cd* coordinate descent method to improve speed. + + transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ + 'threshold'}, default='omp' + Algorithm used to transform the data: + + - `'lars'`: uses the least angle regression method + (:func:`~sklearn.linear_model.lars_path`); + - `'lasso_lars'`: uses Lars to compute the Lasso solution. + - `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'` + will be faster if the estimated components are sparse. + - `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution. + - `'threshold'`: squashes to zero all coefficients less than alpha from + the projection ``dictionary * X'``. + + .. versionadded:: 0.17 + *lasso_cd* coordinate descent method to improve speed. + + transform_n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and + `algorithm='omp'`. If `None`, then + `transform_n_nonzero_coefs=int(n_features / 10)`. + + transform_alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `None`, defaults to `alpha`. + + .. versionchanged:: 1.2 + When None, default value changed from 1.0 to `alpha`. + + n_jobs : int or None, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + code_init : ndarray of shape (n_samples, n_components), default=None + Initial value for the code, for warm restart. Only used if `code_init` + and `dict_init` are not None. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial values for the dictionary, for warm restart. Only used if + `code_init` and `dict_init` are not None. + + callback : callable, default=None + Callable that gets invoked every five iterations. + + .. versionadded:: 1.3 + + verbose : bool, default=False + To control the verbosity of the procedure. + + split_sign : bool, default=False + Whether to split the sparse feature vector into the concatenation of + its negative part and its positive part. This can improve the + performance of downstream classifiers. + + random_state : int, RandomState instance or None, default=None + Used for initializing the dictionary when ``dict_init`` is not + specified, randomly shuffling the data when ``shuffle`` is set to + ``True``, and updating the dictionary. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + transform_max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + .. versionadded:: 0.22 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + dictionary atoms extracted from the data + + error_ : array + vector of errors at each iteration + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run. + + See Also + -------- + MiniBatchDictionaryLearning: A faster, less accurate, version of the + dictionary learning algorithm. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparseCoder : Find a sparse representation of data from a fixed, + precomputed dictionary. + SparsePCA : Sparse Principal Components Analysis. + + References + ---------- + + J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning + for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import DictionaryLearning + >>> X, dictionary, code = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42, + ... ) + >>> dict_learner = DictionaryLearning( + ... n_components=15, transform_algorithm='lasso_lars', transform_alpha=0.1, + ... random_state=42, + ... ) + >>> X_transformed = dict_learner.fit(X).transform(X) + + We can check the level of sparsity of `X_transformed`: + + >>> np.mean(X_transformed == 0) + 0.52... + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = X_transformed @ dict_learner.components_ + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.05... + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "fit_algorithm": [StrOptions({"lars", "cd"})], + "transform_algorithm": [ + StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"}) + ], + "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "transform_alpha": [Interval(Real, 0, None, closed="left"), None], + "n_jobs": [Integral, None], + "code_init": [np.ndarray, None], + "dict_init": [np.ndarray, None], + "callback": [callable, None], + "verbose": ["verbose"], + "split_sign": ["boolean"], + "random_state": ["random_state"], + "positive_code": ["boolean"], + "positive_dict": ["boolean"], + "transform_max_iter": [Interval(Integral, 0, None, closed="left")], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + max_iter=1000, + tol=1e-8, + fit_algorithm="lars", + transform_algorithm="omp", + transform_n_nonzero_coefs=None, + transform_alpha=None, + n_jobs=None, + code_init=None, + dict_init=None, + callback=None, + verbose=False, + split_sign=False, + random_state=None, + positive_code=False, + positive_dict=False, + transform_max_iter=1000, + ): + super().__init__( + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ) + self.n_components = n_components + self.alpha = alpha + self.max_iter = max_iter + self.tol = tol + self.fit_algorithm = fit_algorithm + self.code_init = code_init + self.dict_init = dict_init + self.callback = callback + self.verbose = verbose + self.random_state = random_state + self.positive_dict = positive_dict + + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + self.fit_transform(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit the model from data in X and return the transformed data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + V : ndarray of shape (n_samples, n_components) + Transformed data. + """ + _check_positive_coding(method=self.fit_algorithm, positive=self.positive_code) + + method = "lasso_" + self.fit_algorithm + + random_state = check_random_state(self.random_state) + X = self._validate_data(X) + + if self.n_components is None: + n_components = X.shape[1] + else: + n_components = self.n_components + + V, U, E, self.n_iter_ = _dict_learning( + X, + n_components, + alpha=self.alpha, + tol=self.tol, + max_iter=self.max_iter, + method=method, + method_max_iter=self.transform_max_iter, + n_jobs=self.n_jobs, + code_init=self.code_init, + dict_init=self.dict_init, + callback=self.callback, + verbose=self.verbose, + random_state=random_state, + return_n_iter=True, + positive_dict=self.positive_dict, + positive_code=self.positive_code, + ) + self.components_ = U + self.error_ = E + + return V + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } + + +class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator): + """Mini-batch dictionary learning. + + Finds a dictionary (a set of atoms) that performs well at sparsely + encoding the fitted data. + + Solves the optimization problem:: + + (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 + (U,V) + with || V_k ||_2 <= 1 for all 0 <= k < n_components + + ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for + the entry-wise matrix norm which is the sum of the absolute values + of all the entries in the matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of dictionary elements to extract. + + alpha : float, default=1 + Sparsity controlling parameter. + + max_iter : int, default=1_000 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + .. versionadded:: 1.1 + + .. deprecated:: 1.4 + `max_iter=None` is deprecated in 1.4 and will be removed in 1.6. + Use the default value (i.e. `1_000`) instead. + + fit_algorithm : {'lars', 'cd'}, default='lars' + The algorithm used: + + - `'lars'`: uses the least angle regression method to solve the lasso + problem (`linear_model.lars_path`) + - `'cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). Lars will be faster if + the estimated components are sparse. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + batch_size : int, default=256 + Number of samples in each mini-batch. + + .. versionchanged:: 1.3 + The default value of `batch_size` changed from 3 to 256 in version 1.3. + + shuffle : bool, default=True + Whether to shuffle the samples before forming batches. + + dict_init : ndarray of shape (n_components, n_features), default=None + Initial value of the dictionary for warm restart scenarios. + + transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ + 'threshold'}, default='omp' + Algorithm used to transform the data: + + - `'lars'`: uses the least angle regression method + (`linear_model.lars_path`); + - `'lasso_lars'`: uses Lars to compute the Lasso solution. + - `'lasso_cd'`: uses the coordinate descent method to compute the + Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster + if the estimated components are sparse. + - `'omp'`: uses orthogonal matching pursuit to estimate the sparse + solution. + - `'threshold'`: squashes to zero all coefficients less than alpha from + the projection ``dictionary * X'``. + + transform_n_nonzero_coefs : int, default=None + Number of nonzero coefficients to target in each column of the + solution. This is only used by `algorithm='lars'` and + `algorithm='omp'`. If `None`, then + `transform_n_nonzero_coefs=int(n_features / 10)`. + + transform_alpha : float, default=None + If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the + penalty applied to the L1 norm. + If `algorithm='threshold'`, `alpha` is the absolute value of the + threshold below which coefficients will be squashed to zero. + If `None`, defaults to `alpha`. + + .. versionchanged:: 1.2 + When None, default value changed from 1.0 to `alpha`. + + verbose : bool or int, default=False + To control the verbosity of the procedure. + + split_sign : bool, default=False + Whether to split the sparse feature vector into the concatenation of + its negative part and its positive part. This can improve the + performance of downstream classifiers. + + random_state : int, RandomState instance or None, default=None + Used for initializing the dictionary when ``dict_init`` is not + specified, randomly shuffling the data when ``shuffle`` is set to + ``True``, and updating the dictionary. Pass an int for reproducible + results across multiple function calls. + See :term:`Glossary `. + + positive_code : bool, default=False + Whether to enforce positivity when finding the code. + + .. versionadded:: 0.20 + + positive_dict : bool, default=False + Whether to enforce positivity when finding the dictionary. + + .. versionadded:: 0.20 + + transform_max_iter : int, default=1000 + Maximum number of iterations to perform if `algorithm='lasso_cd'` or + `'lasso_lars'`. + + .. versionadded:: 0.22 + + callback : callable, default=None + A callable that gets invoked at the end of each iteration. + + .. versionadded:: 1.1 + + tol : float, default=1e-3 + Control early stopping based on the norm of the differences in the + dictionary between 2 steps. + + To disable early stopping based on changes in the dictionary, set + `tol` to 0.0. + + .. versionadded:: 1.1 + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + + To disable convergence detection based on cost function, set + `max_no_improvement` to None. + + .. versionadded:: 1.1 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Components extracted from the data. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations over the full dataset. + + n_steps_ : int + Number of mini-batches processed. + + .. versionadded:: 1.1 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparseCoder : Find a sparse representation of data from a fixed, + precomputed dictionary. + SparsePCA : Sparse Principal Components Analysis. + + References + ---------- + + J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning + for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_coded_signal + >>> from sklearn.decomposition import MiniBatchDictionaryLearning + >>> X, dictionary, code = make_sparse_coded_signal( + ... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10, + ... random_state=42) + >>> dict_learner = MiniBatchDictionaryLearning( + ... n_components=15, batch_size=3, transform_algorithm='lasso_lars', + ... transform_alpha=0.1, max_iter=20, random_state=42) + >>> X_transformed = dict_learner.fit_transform(X) + + We can check the level of sparsity of `X_transformed`: + + >>> np.mean(X_transformed == 0) > 0.5 + True + + We can compare the average squared euclidean norm of the reconstruction + error of the sparse coded signal relative to the squared euclidean norm of + the original signal: + + >>> X_hat = X_transformed @ dict_learner.components_ + >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) + 0.052... + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)], + "fit_algorithm": [StrOptions({"cd", "lars"})], + "n_jobs": [None, Integral], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "shuffle": ["boolean"], + "dict_init": [None, np.ndarray], + "transform_algorithm": [ + StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"}) + ], + "transform_n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "transform_alpha": [Interval(Real, 0, None, closed="left"), None], + "verbose": ["verbose"], + "split_sign": ["boolean"], + "random_state": ["random_state"], + "positive_code": ["boolean"], + "positive_dict": ["boolean"], + "transform_max_iter": [Interval(Integral, 0, None, closed="left")], + "callback": [None, callable], + "tol": [Interval(Real, 0, None, closed="left")], + "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + max_iter=1_000, + fit_algorithm="lars", + n_jobs=None, + batch_size=256, + shuffle=True, + dict_init=None, + transform_algorithm="omp", + transform_n_nonzero_coefs=None, + transform_alpha=None, + verbose=False, + split_sign=False, + random_state=None, + positive_code=False, + positive_dict=False, + transform_max_iter=1000, + callback=None, + tol=1e-3, + max_no_improvement=10, + ): + super().__init__( + transform_algorithm, + transform_n_nonzero_coefs, + transform_alpha, + split_sign, + n_jobs, + positive_code, + transform_max_iter, + ) + self.n_components = n_components + self.alpha = alpha + self.max_iter = max_iter + self.fit_algorithm = fit_algorithm + self.dict_init = dict_init + self.verbose = verbose + self.shuffle = shuffle + self.batch_size = batch_size + self.split_sign = split_sign + self.random_state = random_state + self.positive_dict = positive_dict + self.callback = callback + self.max_no_improvement = max_no_improvement + self.tol = tol + + def _check_params(self, X): + # n_components + self._n_components = self.n_components + if self._n_components is None: + self._n_components = X.shape[1] + + # fit_algorithm + _check_positive_coding(self.fit_algorithm, self.positive_code) + self._fit_algorithm = "lasso_" + self.fit_algorithm + + # batch_size + self._batch_size = min(self.batch_size, X.shape[0]) + + def _initialize_dict(self, X, random_state): + """Initialization of the dictionary.""" + if self.dict_init is not None: + dictionary = self.dict_init + else: + # Init V with SVD of X + _, S, dictionary = randomized_svd( + X, self._n_components, random_state=random_state + ) + dictionary = S[:, np.newaxis] * dictionary + + if self._n_components <= len(dictionary): + dictionary = dictionary[: self._n_components, :] + else: + dictionary = np.concatenate( + ( + dictionary, + np.zeros( + (self._n_components - len(dictionary), dictionary.shape[1]), + dtype=dictionary.dtype, + ), + ) + ) + + dictionary = check_array(dictionary, order="F", dtype=X.dtype, copy=False) + dictionary = np.require(dictionary, requirements="W") + + return dictionary + + def _update_inner_stats(self, X, code, batch_size, step): + """Update the inner stats inplace.""" + if step < batch_size - 1: + theta = (step + 1) * batch_size + else: + theta = batch_size**2 + step + 1 - batch_size + beta = (theta + 1 - batch_size) / (theta + 1) + + self._A *= beta + self._A += code.T @ code / batch_size + self._B *= beta + self._B += X.T @ code / batch_size + + def _minibatch_step(self, X, dictionary, random_state, step): + """Perform the update on the dictionary for one minibatch.""" + batch_size = X.shape[0] + + # Compute code for this batch + code = _sparse_encode( + X, + dictionary, + algorithm=self._fit_algorithm, + alpha=self.alpha, + n_jobs=self.n_jobs, + positive=self.positive_code, + max_iter=self.transform_max_iter, + verbose=self.verbose, + ) + + batch_cost = ( + 0.5 * ((X - code @ dictionary) ** 2).sum() + + self.alpha * np.sum(np.abs(code)) + ) / batch_size + + # Update inner stats + self._update_inner_stats(X, code, batch_size, step) + + # Update dictionary + _update_dict( + dictionary, + X, + code, + self._A, + self._B, + verbose=self.verbose, + random_state=random_state, + positive=self.positive_dict, + ) + + return batch_cost + + def _check_convergence( + self, X, batch_cost, new_dict, old_dict, n_samples, step, n_steps + ): + """Helper function to encapsulate the early stopping logic. + + Early stopping is based on two factors: + - A small change of the dictionary between two minibatch updates. This is + controlled by the tol parameter. + - No more improvement on a smoothed estimate of the objective function for a + a certain number of consecutive minibatch updates. This is controlled by + the max_no_improvement parameter. + """ + batch_size = X.shape[0] + + # counts steps starting from 1 for user friendly verbose mode. + step = step + 1 + + # Ignore 100 first steps or 1 epoch to avoid initializing the ewa_cost with a + # too bad value + if step <= min(100, n_samples / batch_size): + if self.verbose: + print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}") + return False + + # Compute an Exponentially Weighted Average of the cost function to + # monitor the convergence while discarding minibatch-local stochastic + # variability: https://en.wikipedia.org/wiki/Moving_average + if self._ewa_cost is None: + self._ewa_cost = batch_cost + else: + alpha = batch_size / (n_samples + 1) + alpha = min(alpha, 1) + self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha + + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch cost: " + f"{batch_cost}, ewa cost: {self._ewa_cost}" + ) + + # Early stopping based on change of dictionary + dict_diff = linalg.norm(new_dict - old_dict) / self._n_components + if self.tol > 0 and dict_diff <= self.tol: + if self.verbose: + print(f"Converged (small dictionary change) at step {step}/{n_steps}") + return True + + # Early stopping heuristic due to lack of improvement on smoothed + # cost function + if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min: + self._no_improvement = 0 + self._ewa_cost_min = self._ewa_cost + else: + self._no_improvement += 1 + + if ( + self.max_no_improvement is not None + and self._no_improvement >= self.max_no_improvement + ): + if self.verbose: + print( + "Converged (lack of improvement in objective function) " + f"at step {step}/{n_steps}" + ) + return True + + return False + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data( + X, dtype=[np.float64, np.float32], order="C", copy=False + ) + + self._check_params(X) + self._random_state = check_random_state(self.random_state) + + dictionary = self._initialize_dict(X, self._random_state) + old_dict = dictionary.copy() + + if self.shuffle: + X_train = X.copy() + self._random_state.shuffle(X_train) + else: + X_train = X + + n_samples, n_features = X_train.shape + + if self.verbose: + print("[dict_learning]") + + # Inner stats + self._A = np.zeros( + (self._n_components, self._n_components), dtype=X_train.dtype + ) + self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype) + + # TODO(1.6): remove in 1.6 + if self.max_iter is None: + warn( + ( + "`max_iter=None` is deprecated in version 1.4 and will be removed" + " in version 1.6. Use the default value (i.e. `1_000`) instead." + ), + FutureWarning, + ) + max_iter = 1_000 + else: + max_iter = self.max_iter + + # Attributes to monitor the convergence + self._ewa_cost = None + self._ewa_cost_min = None + self._no_improvement = 0 + + batches = gen_batches(n_samples, self._batch_size) + batches = itertools.cycle(batches) + n_steps_per_iter = int(np.ceil(n_samples / self._batch_size)) + n_steps = max_iter * n_steps_per_iter + + i = -1 # to allow max_iter = 0 + + for i, batch in zip(range(n_steps), batches): + X_batch = X_train[batch] + + batch_cost = self._minibatch_step( + X_batch, dictionary, self._random_state, i + ) + + if self._check_convergence( + X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps + ): + break + + # XXX callback param added for backward compat in #18975 but a common + # unified callback API should be preferred + if self.callback is not None: + self.callback(locals()) + + old_dict[:] = dictionary + + self.n_steps_ = i + 1 + self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter) + self.components_ = dictionary + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Update the model using the data in X as a mini-batch. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Return the instance itself. + """ + has_components = hasattr(self, "components_") + + X = self._validate_data( + X, dtype=[np.float64, np.float32], order="C", reset=not has_components + ) + + if not has_components: + # This instance has not been fitted yet (fit or partial_fit) + self._check_params(X) + self._random_state = check_random_state(self.random_state) + + dictionary = self._initialize_dict(X, self._random_state) + + self.n_steps_ = 0 + + self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype) + self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype) + else: + dictionary = self.components_ + + self._minibatch_step(X, dictionary, self._random_state, self.n_steps_) + + self.components_ = dictionary + self.n_steps_ += 1 + + return self + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f36e5ba87dbc0ed737c1f07ec51d35c1f2d18e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_fastica.py @@ -0,0 +1,795 @@ +""" +Python implementation of the fast ICA algorithms. + +Reference: Tables 8.3 and 8.4 page 196 in the book: +Independent Component Analysis, by Hyvarinen et al. +""" + +# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux, +# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import linalg + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..utils import as_float_array, check_array, check_random_state +from ..utils._param_validation import Interval, Options, StrOptions, validate_params +from ..utils.validation import check_is_fitted + +__all__ = ["fastica", "FastICA"] + + +def _gs_decorrelation(w, W, j): + """ + Orthonormalize w wrt the first j rows of W. + + Parameters + ---------- + w : ndarray of shape (n,) + Array to be orthogonalized + + W : ndarray of shape (p, n) + Null space definition + + j : int < p + The no of (from the first) rows of Null space W wrt which w is + orthogonalized. + + Notes + ----- + Assumes that W is orthogonal + w changed in place + """ + w -= np.linalg.multi_dot([w, W[:j].T, W[:j]]) + return w + + +def _sym_decorrelation(W): + """Symmetric decorrelation + i.e. W <- (W * W.T) ^{-1/2} * W + """ + s, u = linalg.eigh(np.dot(W, W.T)) + # Avoid sqrt of negative values because of rounding errors. Note that + # np.sqrt(tiny) is larger than tiny and therefore this clipping also + # prevents division by zero in the next step. + s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None) + + # u (resp. s) contains the eigenvectors (resp. square roots of + # the eigenvalues) of W * W.T + return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W]) + + +def _ica_def(X, tol, g, fun_args, max_iter, w_init): + """Deflationary FastICA using fun approx to neg-entropy function + + Used internally by FastICA. + """ + + n_components = w_init.shape[0] + W = np.zeros((n_components, n_components), dtype=X.dtype) + n_iter = [] + + # j is the index of the extracted component + for j in range(n_components): + w = w_init[j, :].copy() + w /= np.sqrt((w**2).sum()) + + for i in range(max_iter): + gwtx, g_wtx = g(np.dot(w.T, X), fun_args) + + w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w + + _gs_decorrelation(w1, W, j) + + w1 /= np.sqrt((w1**2).sum()) + + lim = np.abs(np.abs((w1 * w).sum()) - 1) + w = w1 + if lim < tol: + break + + n_iter.append(i + 1) + W[j, :] = w + + return W, max(n_iter) + + +def _ica_par(X, tol, g, fun_args, max_iter, w_init): + """Parallel FastICA. + + Used internally by FastICA --main loop + + """ + W = _sym_decorrelation(w_init) + del w_init + p_ = float(X.shape[1]) + for ii in range(max_iter): + gwtx, g_wtx = g(np.dot(W, X), fun_args) + W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W) + del gwtx, g_wtx + # builtin max, abs are faster than numpy counter parts. + # np.einsum allows having the lowest memory footprint. + # It is faster than np.diag(np.dot(W1, W.T)). + lim = max(abs(abs(np.einsum("ij,ij->i", W1, W)) - 1)) + W = W1 + if lim < tol: + break + else: + warnings.warn( + ( + "FastICA did not converge. Consider increasing " + "tolerance or the maximum number of iterations." + ), + ConvergenceWarning, + ) + + return W, ii + 1 + + +# Some standard non-linear functions. +# XXX: these should be optimized, as they can be a bottleneck. +def _logcosh(x, fun_args=None): + alpha = fun_args.get("alpha", 1.0) # comment it out? + + x *= alpha + gx = np.tanh(x, x) # apply the tanh inplace + g_x = np.empty(x.shape[0], dtype=x.dtype) + # XXX compute in chunks to avoid extra allocation + for i, gx_i in enumerate(gx): # please don't vectorize. + g_x[i] = (alpha * (1 - gx_i**2)).mean() + return gx, g_x + + +def _exp(x, fun_args): + exp = np.exp(-(x**2) / 2) + gx = x * exp + g_x = (1 - x**2) * exp + return gx, g_x.mean(axis=-1) + + +def _cube(x, fun_args): + return x**3, (3 * x**2).mean(axis=-1) + + +@validate_params( + { + "X": ["array-like"], + "return_X_mean": ["boolean"], + "compute_sources": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def fastica( + X, + n_components=None, + *, + algorithm="parallel", + whiten="unit-variance", + fun="logcosh", + fun_args=None, + max_iter=200, + tol=1e-04, + w_init=None, + whiten_solver="svd", + random_state=None, + return_X_mean=False, + compute_sources=True, + return_n_iter=False, +): + """Perform Fast Independent Component Analysis. + + The implementation is based on [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + n_components : int, default=None + Number of components to use. If None is passed, all are used. + + algorithm : {'parallel', 'deflation'}, default='parallel' + Specify which algorithm to use for FastICA. + + whiten : str or bool, default='unit-variance' + Specify the whitening strategy to use. + + - If 'arbitrary-variance', a whitening with variance + arbitrary is used. + - If 'unit-variance', the whitening matrix is rescaled to ensure that + each recovered source has unit variance. + - If False, the data is already considered to be whitened, and no + whitening is performed. + + .. versionchanged:: 1.3 + The default value of `whiten` changed to 'unit-variance' in 1.3. + + fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' + The functional form of the G function used in the + approximation to neg-entropy. Could be either 'logcosh', 'exp', + or 'cube'. + You can also provide your own function. It should return a tuple + containing the value of the function, and of its derivative, in the + point. The derivative should be averaged along its last dimension. + Example:: + + def my_g(x): + return x ** 3, (3 * x ** 2).mean(axis=-1) + + fun_args : dict, default=None + Arguments to send to the functional form. + If empty or None and if fun='logcosh', fun_args will take value + {'alpha' : 1.0}. + + max_iter : int, default=200 + Maximum number of iterations to perform. + + tol : float, default=1e-4 + A positive scalar giving the tolerance at which the + un-mixing matrix is considered to have converged. + + w_init : ndarray of shape (n_components, n_components), default=None + Initial un-mixing array. If `w_init=None`, then an array of values + drawn from a normal distribution is used. + + whiten_solver : {"eigh", "svd"}, default="svd" + The solver to use for whitening. + + - "svd" is more stable numerically if the problem is degenerate, and + often faster when `n_samples <= n_features`. + + - "eigh" is generally more memory efficient when + `n_samples >= n_features`, and can be faster when + `n_samples >= 50 * n_features`. + + .. versionadded:: 1.2 + + random_state : int, RandomState instance or None, default=None + Used to initialize ``w_init`` when not specified, with a + normal distribution. Pass an int, for reproducible results + across multiple function calls. + See :term:`Glossary `. + + return_X_mean : bool, default=False + If True, X_mean is returned too. + + compute_sources : bool, default=True + If False, sources are not computed, but only the rotation matrix. + This can save memory when working with big data. Defaults to True. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + K : ndarray of shape (n_components, n_features) or None + If whiten is 'True', K is the pre-whitening matrix that projects data + onto the first n_components principal components. If whiten is 'False', + K is 'None'. + + W : ndarray of shape (n_components, n_components) + The square matrix that unmixes the data after whitening. + The mixing matrix is the pseudo-inverse of matrix ``W K`` + if K is not None, else it is the inverse of W. + + S : ndarray of shape (n_samples, n_components) or None + Estimated source matrix. + + X_mean : ndarray of shape (n_features,) + The mean over features. Returned only if return_X_mean is True. + + n_iter : int + If the algorithm is "deflation", n_iter is the + maximum number of iterations run across all components. Else + they are just the number of iterations taken to converge. This is + returned only when return_n_iter is set to `True`. + + Notes + ----- + The data matrix X is considered to be a linear combination of + non-Gaussian (independent) components i.e. X = AS where columns of S + contain the independent components and A is a linear mixing + matrix. In short ICA attempts to `un-mix' the data by estimating an + un-mixing matrix W where ``S = W K X.`` + While FastICA was proposed to estimate as many sources + as features, it is possible to estimate less by setting + n_components < n_features. It this case K is not a square matrix + and the estimated A is the pseudo-inverse of ``W K``. + + This implementation was originally made for data of shape + [n_features, n_samples]. Now the input is transposed + before the algorithm is applied. This makes it slightly + faster for Fortran-ordered input. + + References + ---------- + .. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis", + Algorithms and Applications, Neural Networks, 13(4-5), 2000, + pp. 411-430. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import fastica + >>> X, _ = load_digits(return_X_y=True) + >>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance') + >>> K.shape + (7, 64) + >>> W.shape + (7, 7) + >>> S.shape + (1797, 7) + """ + est = FastICA( + n_components=n_components, + algorithm=algorithm, + whiten=whiten, + fun=fun, + fun_args=fun_args, + max_iter=max_iter, + tol=tol, + w_init=w_init, + whiten_solver=whiten_solver, + random_state=random_state, + ) + est._validate_params() + S = est._fit_transform(X, compute_sources=compute_sources) + + if est.whiten in ["unit-variance", "arbitrary-variance"]: + K = est.whitening_ + X_mean = est.mean_ + else: + K = None + X_mean = None + + returned_values = [K, est._unmixing, S] + if return_X_mean: + returned_values.append(X_mean) + if return_n_iter: + returned_values.append(est.n_iter_) + + return returned_values + + +class FastICA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """FastICA: a fast algorithm for Independent Component Analysis. + + The implementation is based on [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of components to use. If None is passed, all are used. + + algorithm : {'parallel', 'deflation'}, default='parallel' + Specify which algorithm to use for FastICA. + + whiten : str or bool, default='unit-variance' + Specify the whitening strategy to use. + + - If 'arbitrary-variance', a whitening with variance + arbitrary is used. + - If 'unit-variance', the whitening matrix is rescaled to ensure that + each recovered source has unit variance. + - If False, the data is already considered to be whitened, and no + whitening is performed. + + .. versionchanged:: 1.3 + The default value of `whiten` changed to 'unit-variance' in 1.3. + + fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' + The functional form of the G function used in the + approximation to neg-entropy. Could be either 'logcosh', 'exp', + or 'cube'. + You can also provide your own function. It should return a tuple + containing the value of the function, and of its derivative, in the + point. The derivative should be averaged along its last dimension. + Example:: + + def my_g(x): + return x ** 3, (3 * x ** 2).mean(axis=-1) + + fun_args : dict, default=None + Arguments to send to the functional form. + If empty or None and if fun='logcosh', fun_args will take value + {'alpha' : 1.0}. + + max_iter : int, default=200 + Maximum number of iterations during fit. + + tol : float, default=1e-4 + A positive scalar giving the tolerance at which the + un-mixing matrix is considered to have converged. + + w_init : array-like of shape (n_components, n_components), default=None + Initial un-mixing array. If `w_init=None`, then an array of values + drawn from a normal distribution is used. + + whiten_solver : {"eigh", "svd"}, default="svd" + The solver to use for whitening. + + - "svd" is more stable numerically if the problem is degenerate, and + often faster when `n_samples <= n_features`. + + - "eigh" is generally more memory efficient when + `n_samples >= n_features`, and can be faster when + `n_samples >= 50 * n_features`. + + .. versionadded:: 1.2 + + random_state : int, RandomState instance or None, default=None + Used to initialize ``w_init`` when not specified, with a + normal distribution. Pass an int, for reproducible results + across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + The linear operator to apply to the data to get the independent + sources. This is equal to the unmixing matrix when ``whiten`` is + False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when + ``whiten`` is True. + + mixing_ : ndarray of shape (n_features, n_components) + The pseudo-inverse of ``components_``. It is the linear operator + that maps independent sources to the data. + + mean_ : ndarray of shape(n_features,) + The mean over features. Only set if `self.whiten` is True. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + If the algorithm is "deflation", n_iter is the + maximum number of iterations run across all components. Else + they are just the number of iterations taken to converge. + + whitening_ : ndarray of shape (n_components, n_features) + Only set if whiten is 'True'. This is the pre-whitening matrix + that projects data onto the first `n_components` principal components. + + See Also + -------- + PCA : Principal component analysis (PCA). + IncrementalPCA : Incremental principal components analysis (IPCA). + KernelPCA : Kernel Principal component analysis (KPCA). + MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. + SparsePCA : Sparse Principal Components Analysis (SparsePCA). + + References + ---------- + .. [1] A. Hyvarinen and E. Oja, Independent Component Analysis: + Algorithms and Applications, Neural Networks, 13(4-5), 2000, + pp. 411-430. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import FastICA + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = FastICA(n_components=7, + ... random_state=0, + ... whiten='unit-variance') + >>> X_transformed = transformer.fit_transform(X) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "algorithm": [StrOptions({"parallel", "deflation"})], + "whiten": [ + StrOptions({"arbitrary-variance", "unit-variance"}), + Options(bool, {False}), + ], + "fun": [StrOptions({"logcosh", "exp", "cube"}), callable], + "fun_args": [dict, None], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "w_init": ["array-like", None], + "whiten_solver": [StrOptions({"eigh", "svd"})], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + algorithm="parallel", + whiten="unit-variance", + fun="logcosh", + fun_args=None, + max_iter=200, + tol=1e-4, + w_init=None, + whiten_solver="svd", + random_state=None, + ): + super().__init__() + self.n_components = n_components + self.algorithm = algorithm + self.whiten = whiten + self.fun = fun + self.fun_args = fun_args + self.max_iter = max_iter + self.tol = tol + self.w_init = w_init + self.whiten_solver = whiten_solver + self.random_state = random_state + + def _fit_transform(self, X, compute_sources=False): + """Fit the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + compute_sources : bool, default=False + If False, sources are not computes but only the rotation matrix. + This can save memory when working with big data. Defaults to False. + + Returns + ------- + S : ndarray of shape (n_samples, n_components) or None + Sources matrix. `None` if `compute_sources` is `False`. + """ + XT = self._validate_data( + X, copy=self.whiten, dtype=[np.float64, np.float32], ensure_min_samples=2 + ).T + fun_args = {} if self.fun_args is None else self.fun_args + random_state = check_random_state(self.random_state) + + alpha = fun_args.get("alpha", 1.0) + if not 1 <= alpha <= 2: + raise ValueError("alpha must be in [1,2]") + + if self.fun == "logcosh": + g = _logcosh + elif self.fun == "exp": + g = _exp + elif self.fun == "cube": + g = _cube + elif callable(self.fun): + + def g(x, fun_args): + return self.fun(x, **fun_args) + + n_features, n_samples = XT.shape + n_components = self.n_components + if not self.whiten and n_components is not None: + n_components = None + warnings.warn("Ignoring n_components with whiten=False.") + + if n_components is None: + n_components = min(n_samples, n_features) + if n_components > min(n_samples, n_features): + n_components = min(n_samples, n_features) + warnings.warn( + "n_components is too large: it will be set to %s" % n_components + ) + + if self.whiten: + # Centering the features of X + X_mean = XT.mean(axis=-1) + XT -= X_mean[:, np.newaxis] + + # Whitening and preprocessing by PCA + if self.whiten_solver == "eigh": + # Faster when num_samples >> n_features + d, u = linalg.eigh(XT.dot(X)) + sort_indices = np.argsort(d)[::-1] + eps = np.finfo(d.dtype).eps + degenerate_idx = d < eps + if np.any(degenerate_idx): + warnings.warn( + "There are some small singular values, using " + "whiten_solver = 'svd' might lead to more " + "accurate results." + ) + d[degenerate_idx] = eps # For numerical issues + np.sqrt(d, out=d) + d, u = d[sort_indices], u[:, sort_indices] + elif self.whiten_solver == "svd": + u, d = linalg.svd(XT, full_matrices=False, check_finite=False)[:2] + + # Give consistent eigenvectors for both svd solvers + u *= np.sign(u[0]) + + K = (u / d).T[:n_components] # see (6.33) p.140 + del u, d + X1 = np.dot(K, XT) + # see (13.6) p.267 Here X1 is white and data + # in X has been projected onto a subspace by PCA + X1 *= np.sqrt(n_samples) + else: + # X must be casted to floats to avoid typing issues with numpy + # 2.0 and the line below + X1 = as_float_array(XT, copy=False) # copy has been taken care of + + w_init = self.w_init + if w_init is None: + w_init = np.asarray( + random_state.normal(size=(n_components, n_components)), dtype=X1.dtype + ) + + else: + w_init = np.asarray(w_init) + if w_init.shape != (n_components, n_components): + raise ValueError( + "w_init has invalid shape -- should be %(shape)s" + % {"shape": (n_components, n_components)} + ) + + kwargs = { + "tol": self.tol, + "g": g, + "fun_args": fun_args, + "max_iter": self.max_iter, + "w_init": w_init, + } + + if self.algorithm == "parallel": + W, n_iter = _ica_par(X1, **kwargs) + elif self.algorithm == "deflation": + W, n_iter = _ica_def(X1, **kwargs) + del X1 + + self.n_iter_ = n_iter + + if compute_sources: + if self.whiten: + S = np.linalg.multi_dot([W, K, XT]).T + else: + S = np.dot(W, XT).T + else: + S = None + + if self.whiten: + if self.whiten == "unit-variance": + if not compute_sources: + S = np.linalg.multi_dot([W, K, XT]).T + S_std = np.std(S, axis=0, keepdims=True) + S /= S_std + W /= S_std.T + + self.components_ = np.dot(W, K) + self.mean_ = X_mean + self.whitening_ = K + else: + self.components_ = W + + self.mixing_ = linalg.pinv(self.components_, check_finite=False) + self._unmixing = W + + return S + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit the model and recover the sources from X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Estimated sources obtained by transforming the data with the + estimated unmixing matrix. + """ + return self._fit_transform(X, compute_sources=True) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + self._fit_transform(X, compute_sources=False) + return self + + def transform(self, X, copy=True): + """Recover the sources from X (apply the unmixing matrix). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to transform, where `n_samples` is the number of samples + and `n_features` is the number of features. + + copy : bool, default=True + If False, data passed to fit can be overwritten. Defaults to True. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Estimated sources obtained by transforming the data with the + estimated unmixing matrix. + """ + check_is_fitted(self) + + X = self._validate_data( + X, copy=(copy and self.whiten), dtype=[np.float64, np.float32], reset=False + ) + if self.whiten: + X -= self.mean_ + + return np.dot(X, self.components_.T) + + def inverse_transform(self, X, copy=True): + """Transform the sources back to the mixed data (apply mixing matrix). + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + Sources, where `n_samples` is the number of samples + and `n_components` is the number of components. + copy : bool, default=True + If False, data passed to fit are overwritten. Defaults to True. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_features) + Reconstructed data obtained with the mixing matrix. + """ + check_is_fitted(self) + + X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32]) + X = np.dot(X, self.mixing_.T) + if self.whiten: + X += self.mean_ + + return X + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return {"preserves_dtype": [np.float32, np.float64]} diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..8fc4aa26a6dfb87428ce59c58d18632cffdc2ad6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_kernel_pca.py @@ -0,0 +1,572 @@ +"""Kernel Principal Components Analysis.""" + +# Author: Mathieu Blondel +# Sylvain Marie +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.linalg import eigh +from scipy.sparse.linalg import eigsh + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import NotFittedError +from ..metrics.pairwise import pairwise_kernels +from ..preprocessing import KernelCenterer +from ..utils._arpack import _init_arpack_v0 +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import _randomized_eigsh, svd_flip +from ..utils.validation import ( + _check_psd_eigenvalues, + check_is_fitted, +) + + +class KernelPCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Kernel Principal component analysis (KPCA) [1]_. + + Non-linear dimensionality reduction through the use of kernels (see + :ref:`metrics`). + + It uses the :func:`scipy.linalg.eigh` LAPACK implementation of the full SVD + or the :func:`scipy.sparse.linalg.eigsh` ARPACK implementation of the + truncated SVD, depending on the shape of the input data and the number of + components to extract. It can also use a randomized truncated SVD by the + method proposed in [3]_, see `eigen_solver`. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_kernel_pca.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of components. If None, all non-zero components are kept. + + kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'} \ + or callable, default='linear' + Kernel used for PCA. + + gamma : float, default=None + Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other + kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``. + + degree : float, default=3 + Degree for poly kernels. Ignored by other kernels. + + coef0 : float, default=1 + Independent term in poly and sigmoid kernels. + Ignored by other kernels. + + kernel_params : dict, default=None + Parameters (keyword arguments) and + values for kernel passed as callable object. + Ignored by other kernels. + + alpha : float, default=1.0 + Hyperparameter of the ridge regression that learns the + inverse transform (when fit_inverse_transform=True). + + fit_inverse_transform : bool, default=False + Learn the inverse transform for non-precomputed kernels + (i.e. learn to find the pre-image of a point). This method is based + on [2]_. + + eigen_solver : {'auto', 'dense', 'arpack', 'randomized'}, \ + default='auto' + Select eigensolver to use. If `n_components` is much + less than the number of training samples, randomized (or arpack to a + smaller extent) may be more efficient than the dense eigensolver. + Randomized SVD is performed according to the method of Halko et al + [3]_. + + auto : + the solver is selected by a default policy based on n_samples + (the number of training samples) and `n_components`: + if the number of components to extract is less than 10 (strict) and + the number of samples is more than 200 (strict), the 'arpack' + method is enabled. Otherwise the exact full eigenvalue + decomposition is computed and optionally truncated afterwards + ('dense' method). + dense : + run exact full eigenvalue decomposition calling the standard + LAPACK solver via `scipy.linalg.eigh`, and select the components + by postprocessing + arpack : + run SVD truncated to n_components calling ARPACK solver using + `scipy.sparse.linalg.eigsh`. It requires strictly + 0 < n_components < n_samples + randomized : + run randomized SVD by the method of Halko et al. [3]_. The current + implementation selects eigenvalues based on their module; therefore + using this method can lead to unexpected results if the kernel is + not positive semi-definite. See also [4]_. + + .. versionchanged:: 1.0 + `'randomized'` was added. + + tol : float, default=0 + Convergence tolerance for arpack. + If 0, optimal value will be chosen by arpack. + + max_iter : int, default=None + Maximum number of iterations for arpack. + If None, optimal value will be chosen by arpack. + + iterated_power : int >= 0, or 'auto', default='auto' + Number of iterations for the power method computed by + svd_solver == 'randomized'. When 'auto', it is set to 7 when + `n_components < 0.1 * min(X.shape)`, other it is set to 4. + + .. versionadded:: 1.0 + + remove_zero_eig : bool, default=False + If True, then all components with zero eigenvalues are removed, so + that the number of components in the output may be < n_components + (and sometimes even zero due to numerical instability). + When n_components is None, this parameter is ignored and components + with zero eigenvalues are removed regardless. + + random_state : int, RandomState instance or None, default=None + Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int + for reproducible results across multiple function calls. + See :term:`Glossary `. + + .. versionadded:: 0.18 + + copy_X : bool, default=True + If True, input X is copied and stored by the model in the `X_fit_` + attribute. If no further changes will be done to X, setting + `copy_X=False` saves memory by storing a reference. + + .. versionadded:: 0.18 + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.18 + + Attributes + ---------- + eigenvalues_ : ndarray of shape (n_components,) + Eigenvalues of the centered kernel matrix in decreasing order. + If `n_components` and `remove_zero_eig` are not set, + then all values are stored. + + eigenvectors_ : ndarray of shape (n_samples, n_components) + Eigenvectors of the centered kernel matrix. If `n_components` and + `remove_zero_eig` are not set, then all components are stored. + + dual_coef_ : ndarray of shape (n_samples, n_features) + Inverse transform matrix. Only available when + ``fit_inverse_transform`` is True. + + X_transformed_fit_ : ndarray of shape (n_samples, n_components) + Projection of the fitted data on the kernel principal components. + Only available when ``fit_inverse_transform`` is True. + + X_fit_ : ndarray of shape (n_samples, n_features) + The data used to fit the model. If `copy_X=False`, then `X_fit_` is + a reference. This attribute is used for the calls to transform. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + gamma_ : float + Kernel coefficient for rbf, poly and sigmoid kernels. When `gamma` + is explicitly provided, this is just the same as `gamma`. When `gamma` + is `None`, this is the actual value of kernel coefficient. + + .. versionadded:: 1.3 + + See Also + -------- + FastICA : A fast algorithm for Independent Component Analysis. + IncrementalPCA : Incremental Principal Component Analysis. + NMF : Non-Negative Matrix Factorization. + PCA : Principal Component Analysis. + SparsePCA : Sparse Principal Component Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + + References + ---------- + .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller. + "Kernel principal component analysis." + International conference on artificial neural networks. + Springer, Berlin, Heidelberg, 1997. + `_ + + .. [2] `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf. + "Learning to find pre-images." + Advances in neural information processing systems 16 (2004): 449-456. + `_ + + .. [3] :arxiv:`Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp. + "Finding structure with randomness: Probabilistic algorithms for + constructing approximate matrix decompositions." + SIAM review 53.2 (2011): 217-288. <0909.4061>` + + .. [4] `Martinsson, Per-Gunnar, Vladimir Rokhlin, and Mark Tygert. + "A randomized algorithm for the decomposition of matrices." + Applied and Computational Harmonic Analysis 30.1 (2011): 47-68. + `_ + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.decomposition import KernelPCA + >>> X, _ = load_digits(return_X_y=True) + >>> transformer = KernelPCA(n_components=7, kernel='linear') + >>> X_transformed = transformer.fit_transform(X) + >>> X_transformed.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + None, + ], + "kernel": [ + StrOptions({"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"}), + callable, + ], + "gamma": [ + Interval(Real, 0, None, closed="left"), + None, + ], + "degree": [Interval(Real, 0, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "kernel_params": [dict, None], + "alpha": [Interval(Real, 0, None, closed="left")], + "fit_inverse_transform": ["boolean"], + "eigen_solver": [StrOptions({"auto", "dense", "arpack", "randomized"})], + "tol": [Interval(Real, 0, None, closed="left")], + "max_iter": [ + Interval(Integral, 1, None, closed="left"), + None, + ], + "iterated_power": [ + Interval(Integral, 0, None, closed="left"), + StrOptions({"auto"}), + ], + "remove_zero_eig": ["boolean"], + "random_state": ["random_state"], + "copy_X": ["boolean"], + "n_jobs": [None, Integral], + } + + def __init__( + self, + n_components=None, + *, + kernel="linear", + gamma=None, + degree=3, + coef0=1, + kernel_params=None, + alpha=1.0, + fit_inverse_transform=False, + eigen_solver="auto", + tol=0, + max_iter=None, + iterated_power="auto", + remove_zero_eig=False, + random_state=None, + copy_X=True, + n_jobs=None, + ): + self.n_components = n_components + self.kernel = kernel + self.kernel_params = kernel_params + self.gamma = gamma + self.degree = degree + self.coef0 = coef0 + self.alpha = alpha + self.fit_inverse_transform = fit_inverse_transform + self.eigen_solver = eigen_solver + self.tol = tol + self.max_iter = max_iter + self.iterated_power = iterated_power + self.remove_zero_eig = remove_zero_eig + self.random_state = random_state + self.n_jobs = n_jobs + self.copy_X = copy_X + + def _get_kernel(self, X, Y=None): + if callable(self.kernel): + params = self.kernel_params or {} + else: + params = {"gamma": self.gamma_, "degree": self.degree, "coef0": self.coef0} + return pairwise_kernels( + X, Y, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **params + ) + + def _fit_transform(self, K): + """Fit's using kernel K""" + # center kernel + K = self._centerer.fit_transform(K) + + # adjust n_components according to user inputs + if self.n_components is None: + n_components = K.shape[0] # use all dimensions + else: + n_components = min(K.shape[0], self.n_components) + + # compute eigenvectors + if self.eigen_solver == "auto": + if K.shape[0] > 200 and n_components < 10: + eigen_solver = "arpack" + else: + eigen_solver = "dense" + else: + eigen_solver = self.eigen_solver + + if eigen_solver == "dense": + # Note: subset_by_index specifies the indices of smallest/largest to return + self.eigenvalues_, self.eigenvectors_ = eigh( + K, subset_by_index=(K.shape[0] - n_components, K.shape[0] - 1) + ) + elif eigen_solver == "arpack": + v0 = _init_arpack_v0(K.shape[0], self.random_state) + self.eigenvalues_, self.eigenvectors_ = eigsh( + K, n_components, which="LA", tol=self.tol, maxiter=self.max_iter, v0=v0 + ) + elif eigen_solver == "randomized": + self.eigenvalues_, self.eigenvectors_ = _randomized_eigsh( + K, + n_components=n_components, + n_iter=self.iterated_power, + random_state=self.random_state, + selection="module", + ) + + # make sure that the eigenvalues are ok and fix numerical issues + self.eigenvalues_ = _check_psd_eigenvalues( + self.eigenvalues_, enable_warnings=False + ) + + # flip eigenvectors' sign to enforce deterministic output + self.eigenvectors_, _ = svd_flip( + self.eigenvectors_, np.zeros_like(self.eigenvectors_).T + ) + + # sort eigenvectors in descending order + indices = self.eigenvalues_.argsort()[::-1] + self.eigenvalues_ = self.eigenvalues_[indices] + self.eigenvectors_ = self.eigenvectors_[:, indices] + + # remove eigenvectors with a zero eigenvalue (null space) if required + if self.remove_zero_eig or self.n_components is None: + self.eigenvectors_ = self.eigenvectors_[:, self.eigenvalues_ > 0] + self.eigenvalues_ = self.eigenvalues_[self.eigenvalues_ > 0] + + # Maintenance note on Eigenvectors normalization + # ---------------------------------------------- + # there is a link between + # the eigenvectors of K=Phi(X)'Phi(X) and the ones of Phi(X)Phi(X)' + # if v is an eigenvector of K + # then Phi(X)v is an eigenvector of Phi(X)Phi(X)' + # if u is an eigenvector of Phi(X)Phi(X)' + # then Phi(X)'u is an eigenvector of Phi(X)'Phi(X) + # + # At this stage our self.eigenvectors_ (the v) have norm 1, we need to scale + # them so that eigenvectors in kernel feature space (the u) have norm=1 + # instead + # + # We COULD scale them here: + # self.eigenvectors_ = self.eigenvectors_ / np.sqrt(self.eigenvalues_) + # + # But choose to perform that LATER when needed, in `fit()` and in + # `transform()`. + + return K + + def _fit_inverse_transform(self, X_transformed, X): + if hasattr(X, "tocsr"): + raise NotImplementedError( + "Inverse transform not implemented for sparse matrices!" + ) + + n_samples = X_transformed.shape[0] + K = self._get_kernel(X_transformed) + K.flat[:: n_samples + 1] += self.alpha + self.dual_coef_ = linalg.solve(K, X, assume_a="pos", overwrite_a=True) + self.X_transformed_fit_ = X_transformed + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + if self.fit_inverse_transform and self.kernel == "precomputed": + raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.") + X = self._validate_data(X, accept_sparse="csr", copy=self.copy_X) + self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma + self._centerer = KernelCenterer().set_output(transform="default") + K = self._get_kernel(X) + self._fit_transform(K) + + if self.fit_inverse_transform: + # no need to use the kernel to transform X, use shortcut expression + X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_) + + self._fit_inverse_transform(X_transformed, X) + + self.X_fit_ = X + return self + + def fit_transform(self, X, y=None, **params): + """Fit the model from data in X and transform X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + **params : kwargs + Parameters (keyword arguments) and values passed to + the fit_transform instance. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Returns the instance itself. + """ + self.fit(X, **params) + + # no need to use the kernel to transform X, use shortcut expression + X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_) + + if self.fit_inverse_transform: + self._fit_inverse_transform(X_transformed, X) + + return X_transformed + + def transform(self, X): + """Transform X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Returns the instance itself. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse="csr", reset=False) + + # Compute centered gram matrix between X and training data X_fit_ + K = self._centerer.transform(self._get_kernel(X, self.X_fit_)) + + # scale eigenvectors (properly account for null-space for dot product) + non_zeros = np.flatnonzero(self.eigenvalues_) + scaled_alphas = np.zeros_like(self.eigenvectors_) + scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt( + self.eigenvalues_[non_zeros] + ) + + # Project with a scalar product between K and the scaled eigenvectors + return np.dot(K, scaled_alphas) + + def inverse_transform(self, X): + """Transform X back to original space. + + ``inverse_transform`` approximates the inverse transformation using + a learned pre-image. The pre-image is learned by kernel ridge + regression of the original data on their low-dimensional representation + vectors. + + .. note: + :meth:`~sklearn.decomposition.fit` internally uses a centered + kernel. As the centered kernel no longer contains the information + of the mean of kernel features, such information is not taken into + account in reconstruction. + + .. note:: + When users want to compute inverse transformation for 'linear' + kernel, it is recommended that they use + :class:`~sklearn.decomposition.PCA` instead. Unlike + :class:`~sklearn.decomposition.PCA`, + :class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform`` + does not reconstruct the mean of data when 'linear' kernel is used + due to the use of centered kernel. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_components) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_features) + Returns the instance itself. + + References + ---------- + `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf. + "Learning to find pre-images." + Advances in neural information processing systems 16 (2004): 449-456. + `_ + """ + if not self.fit_inverse_transform: + raise NotFittedError( + "The fit_inverse_transform parameter was not" + " set to True when instantiating and hence " + "the inverse transform is not available." + ) + + K = self._get_kernel(X, self.X_transformed_fit_) + return np.dot(K, self.dual_coef_) + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + "pairwise": self.kernel == "precomputed", + } + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.eigenvalues_.shape[0] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_lda.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_lda.py new file mode 100644 index 0000000000000000000000000000000000000000..9e161c178b9e327e4a5e6f6f0c0b3ed9c1cbd57f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_lda.py @@ -0,0 +1,929 @@ +""" + +============================================================= +Online Latent Dirichlet Allocation with variational inference +============================================================= + +This implementation is modified from Matthew D. Hoffman's onlineldavb code +Link: https://github.com/blei-lab/onlineldavb +""" + +# Author: Chyi-Kwei Yau +# Author: Matthew D. Hoffman (original onlineldavb implementation) +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from joblib import effective_n_jobs +from scipy.special import gammaln, logsumexp + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..utils import check_random_state, gen_batches, gen_even_slices +from ..utils._param_validation import Interval, StrOptions +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted, check_non_negative +from ._online_lda_fast import ( + _dirichlet_expectation_1d as cy_dirichlet_expectation_1d, +) +from ._online_lda_fast import ( + _dirichlet_expectation_2d, +) +from ._online_lda_fast import ( + mean_change as cy_mean_change, +) + +EPS = np.finfo(float).eps + + +def _update_doc_distribution( + X, + exp_topic_word_distr, + doc_topic_prior, + max_doc_update_iter, + mean_change_tol, + cal_sstats, + random_state, +): + """E-step: update document-topic distribution. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + exp_topic_word_distr : ndarray of shape (n_topics, n_features) + Exponential value of expectation of log topic word distribution. + In the literature, this is `exp(E[log(beta)])`. + + doc_topic_prior : float + Prior of document topic distribution `theta`. + + max_doc_update_iter : int + Max number of iterations for updating document topic distribution in + the E-step. + + mean_change_tol : float + Stopping tolerance for updating document topic distribution in E-step. + + cal_sstats : bool + Parameter that indicate to calculate sufficient statistics or not. + Set `cal_sstats` to `True` when we need to run M-step. + + random_state : RandomState instance or None + Parameter that indicate how to initialize document topic distribution. + Set `random_state` to None will initialize document topic distribution + to a constant number. + + Returns + ------- + (doc_topic_distr, suff_stats) : + `doc_topic_distr` is unnormalized topic distribution for each document. + In the literature, this is `gamma`. we can calculate `E[log(theta)]` + from it. + `suff_stats` is expected sufficient statistics for the M-step. + When `cal_sstats == False`, this will be None. + + """ + is_sparse_x = sp.issparse(X) + n_samples, n_features = X.shape + n_topics = exp_topic_word_distr.shape[0] + + if random_state: + doc_topic_distr = random_state.gamma(100.0, 0.01, (n_samples, n_topics)).astype( + X.dtype, copy=False + ) + else: + doc_topic_distr = np.ones((n_samples, n_topics), dtype=X.dtype) + + # In the literature, this is `exp(E[log(theta)])` + exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr)) + + # diff on `component_` (only calculate it when `cal_diff` is True) + suff_stats = ( + np.zeros(exp_topic_word_distr.shape, dtype=X.dtype) if cal_sstats else None + ) + + if is_sparse_x: + X_data = X.data + X_indices = X.indices + X_indptr = X.indptr + + # These cython functions are called in a nested loop on usually very small arrays + # (length=n_topics). In that case, finding the appropriate signature of the + # fused-typed function can be more costly than its execution, hence the dispatch + # is done outside of the loop. + ctype = "float" if X.dtype == np.float32 else "double" + mean_change = cy_mean_change[ctype] + dirichlet_expectation_1d = cy_dirichlet_expectation_1d[ctype] + eps = np.finfo(X.dtype).eps + + for idx_d in range(n_samples): + if is_sparse_x: + ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]] + cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]] + else: + ids = np.nonzero(X[idx_d, :])[0] + cnts = X[idx_d, ids] + + doc_topic_d = doc_topic_distr[idx_d, :] + # The next one is a copy, since the inner loop overwrites it. + exp_doc_topic_d = exp_doc_topic[idx_d, :].copy() + exp_topic_word_d = exp_topic_word_distr[:, ids] + + # Iterate between `doc_topic_d` and `norm_phi` until convergence + for _ in range(0, max_doc_update_iter): + last_d = doc_topic_d + + # The optimal phi_{dwk} is proportional to + # exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]). + norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps + + doc_topic_d = exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T) + # Note: adds doc_topic_prior to doc_topic_d, in-place. + dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d) + + if mean_change(last_d, doc_topic_d) < mean_change_tol: + break + doc_topic_distr[idx_d, :] = doc_topic_d + + # Contribution of document d to the expected sufficient + # statistics for the M step. + if cal_sstats: + norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps + suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi) + + return (doc_topic_distr, suff_stats) + + +class LatentDirichletAllocation( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator +): + """Latent Dirichlet Allocation with online variational Bayes algorithm. + + The implementation is based on [1]_ and [2]_. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=10 + Number of topics. + + .. versionchanged:: 0.19 + ``n_topics`` was renamed to ``n_components`` + + doc_topic_prior : float, default=None + Prior of document topic distribution `theta`. If the value is None, + defaults to `1 / n_components`. + In [1]_, this is called `alpha`. + + topic_word_prior : float, default=None + Prior of topic word distribution `beta`. If the value is None, defaults + to `1 / n_components`. + In [1]_, this is called `eta`. + + learning_method : {'batch', 'online'}, default='batch' + Method used to update `_component`. Only used in :meth:`fit` method. + In general, if the data size is large, the online update will be much + faster than the batch update. + + Valid options:: + + 'batch': Batch variational Bayes method. Use all training data in + each EM update. + Old `components_` will be overwritten in each iteration. + 'online': Online variational Bayes method. In each EM update, use + mini-batch of training data to update the ``components_`` + variable incrementally. The learning rate is controlled by the + ``learning_decay`` and the ``learning_offset`` parameters. + + .. versionchanged:: 0.20 + The default learning method is now ``"batch"``. + + learning_decay : float, default=0.7 + It is a parameter that control learning rate in the online learning + method. The value should be set between (0.5, 1.0] to guarantee + asymptotic convergence. When the value is 0.0 and batch_size is + ``n_samples``, the update method is same as batch learning. In the + literature, this is called kappa. + + learning_offset : float, default=10.0 + A (positive) parameter that downweights early iterations in online + learning. It should be greater than 1.0. In the literature, this is + called tau_0. + + max_iter : int, default=10 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the :meth:`fit` method, and not the + :meth:`partial_fit` method. + + batch_size : int, default=128 + Number of documents to use in each EM iteration. Only used in online + learning. + + evaluate_every : int, default=-1 + How often to evaluate perplexity. Only used in `fit` method. + set it to 0 or negative number to not evaluate perplexity in + training at all. Evaluating perplexity can help you check convergence + in training process, but it will also increase total training time. + Evaluating perplexity in every iteration might increase training time + up to two-fold. + + total_samples : int, default=1e6 + Total number of documents. Only used in the :meth:`partial_fit` method. + + perp_tol : float, default=1e-1 + Perplexity tolerance in batch learning. Only used when + ``evaluate_every`` is greater than 0. + + mean_change_tol : float, default=1e-3 + Stopping tolerance for updating document topic distribution in E-step. + + max_doc_update_iter : int, default=100 + Max number of iterations for updating document topic distribution in + the E-step. + + n_jobs : int, default=None + The number of jobs to use in the E-step. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + Verbosity level. + + random_state : int, RandomState instance or None, default=None + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Variational parameters for topic word distribution. Since the complete + conditional for topic word distribution is a Dirichlet, + ``components_[i, j]`` can be viewed as pseudocount that represents the + number of times word `j` was assigned to topic `i`. + It can also be viewed as distribution over the words for each topic + after normalization: + ``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``. + + exp_dirichlet_component_ : ndarray of shape (n_components, n_features) + Exponential value of expectation of log topic word distribution. + In the literature, this is `exp(E[log(beta)])`. + + n_batch_iter_ : int + Number of iterations of the EM step. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of passes over the dataset. + + bound_ : float + Final perplexity score on training set. + + doc_topic_prior_ : float + Prior of document topic distribution `theta`. If the value is None, + it is `1 / n_components`. + + random_state_ : RandomState instance + RandomState instance that is generated either from a seed, the random + number generator or by `np.random`. + + topic_word_prior_ : float + Prior of topic word distribution `beta`. If the value is None, it is + `1 / n_components`. + + See Also + -------- + sklearn.discriminant_analysis.LinearDiscriminantAnalysis: + A classifier with a linear decision boundary, generated by fitting + class conditional densities to the data and using Bayes' rule. + + References + ---------- + .. [1] "Online Learning for Latent Dirichlet Allocation", Matthew D. + Hoffman, David M. Blei, Francis Bach, 2010 + https://github.com/blei-lab/onlineldavb + + .. [2] "Stochastic Variational Inference", Matthew D. Hoffman, + David M. Blei, Chong Wang, John Paisley, 2013 + + Examples + -------- + >>> from sklearn.decomposition import LatentDirichletAllocation + >>> from sklearn.datasets import make_multilabel_classification + >>> # This produces a feature matrix of token counts, similar to what + >>> # CountVectorizer would produce on text. + >>> X, _ = make_multilabel_classification(random_state=0) + >>> lda = LatentDirichletAllocation(n_components=5, + ... random_state=0) + >>> lda.fit(X) + LatentDirichletAllocation(...) + >>> # get topics for some given samples: + >>> lda.transform(X[-2:]) + array([[0.00360392, 0.25499205, 0.0036211 , 0.64236448, 0.09541846], + [0.15297572, 0.00362644, 0.44412786, 0.39568399, 0.003586 ]]) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 0, None, closed="neither")], + "doc_topic_prior": [None, Interval(Real, 0, 1, closed="both")], + "topic_word_prior": [None, Interval(Real, 0, 1, closed="both")], + "learning_method": [StrOptions({"batch", "online"})], + "learning_decay": [Interval(Real, 0, 1, closed="both")], + "learning_offset": [Interval(Real, 1.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "batch_size": [Interval(Integral, 0, None, closed="neither")], + "evaluate_every": [Interval(Integral, None, None, closed="neither")], + "total_samples": [Interval(Real, 0, None, closed="neither")], + "perp_tol": [Interval(Real, 0, None, closed="left")], + "mean_change_tol": [Interval(Real, 0, None, closed="left")], + "max_doc_update_iter": [Interval(Integral, 0, None, closed="left")], + "n_jobs": [None, Integral], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=10, + *, + doc_topic_prior=None, + topic_word_prior=None, + learning_method="batch", + learning_decay=0.7, + learning_offset=10.0, + max_iter=10, + batch_size=128, + evaluate_every=-1, + total_samples=1e6, + perp_tol=1e-1, + mean_change_tol=1e-3, + max_doc_update_iter=100, + n_jobs=None, + verbose=0, + random_state=None, + ): + self.n_components = n_components + self.doc_topic_prior = doc_topic_prior + self.topic_word_prior = topic_word_prior + self.learning_method = learning_method + self.learning_decay = learning_decay + self.learning_offset = learning_offset + self.max_iter = max_iter + self.batch_size = batch_size + self.evaluate_every = evaluate_every + self.total_samples = total_samples + self.perp_tol = perp_tol + self.mean_change_tol = mean_change_tol + self.max_doc_update_iter = max_doc_update_iter + self.n_jobs = n_jobs + self.verbose = verbose + self.random_state = random_state + + def _init_latent_vars(self, n_features, dtype=np.float64): + """Initialize latent variables.""" + + self.random_state_ = check_random_state(self.random_state) + self.n_batch_iter_ = 1 + self.n_iter_ = 0 + + if self.doc_topic_prior is None: + self.doc_topic_prior_ = 1.0 / self.n_components + else: + self.doc_topic_prior_ = self.doc_topic_prior + + if self.topic_word_prior is None: + self.topic_word_prior_ = 1.0 / self.n_components + else: + self.topic_word_prior_ = self.topic_word_prior + + init_gamma = 100.0 + init_var = 1.0 / init_gamma + # In the literature, this is called `lambda` + self.components_ = self.random_state_.gamma( + init_gamma, init_var, (self.n_components, n_features) + ).astype(dtype, copy=False) + + # In the literature, this is `exp(E[log(beta)])` + self.exp_dirichlet_component_ = np.exp( + _dirichlet_expectation_2d(self.components_) + ) + + def _e_step(self, X, cal_sstats, random_init, parallel=None): + """E-step in EM update. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + cal_sstats : bool + Parameter that indicate whether to calculate sufficient statistics + or not. Set ``cal_sstats`` to True when we need to run M-step. + + random_init : bool + Parameter that indicate whether to initialize document topic + distribution randomly in the E-step. Set it to True in training + steps. + + parallel : joblib.Parallel, default=None + Pre-initialized instance of joblib.Parallel. + + Returns + ------- + (doc_topic_distr, suff_stats) : + `doc_topic_distr` is unnormalized topic distribution for each + document. In the literature, this is called `gamma`. + `suff_stats` is expected sufficient statistics for the M-step. + When `cal_sstats == False`, it will be None. + + """ + + # Run e-step in parallel + random_state = self.random_state_ if random_init else None + + # TODO: make Parallel._effective_n_jobs public instead? + n_jobs = effective_n_jobs(self.n_jobs) + if parallel is None: + parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) + results = parallel( + delayed(_update_doc_distribution)( + X[idx_slice, :], + self.exp_dirichlet_component_, + self.doc_topic_prior_, + self.max_doc_update_iter, + self.mean_change_tol, + cal_sstats, + random_state, + ) + for idx_slice in gen_even_slices(X.shape[0], n_jobs) + ) + + # merge result + doc_topics, sstats_list = zip(*results) + doc_topic_distr = np.vstack(doc_topics) + + if cal_sstats: + # This step finishes computing the sufficient statistics for the + # M-step. + suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype) + for sstats in sstats_list: + suff_stats += sstats + suff_stats *= self.exp_dirichlet_component_ + else: + suff_stats = None + + return (doc_topic_distr, suff_stats) + + def _em_step(self, X, total_samples, batch_update, parallel=None): + """EM update for 1 iteration. + + update `_component` by batch VB or online VB. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + total_samples : int + Total number of documents. It is only used when + batch_update is `False`. + + batch_update : bool + Parameter that controls updating method. + `True` for batch learning, `False` for online learning. + + parallel : joblib.Parallel, default=None + Pre-initialized instance of joblib.Parallel + + Returns + ------- + doc_topic_distr : ndarray of shape (n_samples, n_components) + Unnormalized document topic distribution. + """ + + # E-step + _, suff_stats = self._e_step( + X, cal_sstats=True, random_init=True, parallel=parallel + ) + + # M-step + if batch_update: + self.components_ = self.topic_word_prior_ + suff_stats + else: + # online update + # In the literature, the weight is `rho` + weight = np.power( + self.learning_offset + self.n_batch_iter_, -self.learning_decay + ) + doc_ratio = float(total_samples) / X.shape[0] + self.components_ *= 1 - weight + self.components_ += weight * ( + self.topic_word_prior_ + doc_ratio * suff_stats + ) + + # update `component_` related variables + self.exp_dirichlet_component_ = np.exp( + _dirichlet_expectation_2d(self.components_) + ) + self.n_batch_iter_ += 1 + return + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + "requires_positive_X": True, + } + + def _check_non_neg_array(self, X, reset_n_features, whom): + """check X format + + check X format and make sure no negative value in X. + + Parameters + ---------- + X : array-like or sparse matrix + + """ + dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype + + X = self._validate_data( + X, + reset=reset_n_features, + accept_sparse="csr", + dtype=dtype, + ) + check_non_negative(X, whom) + + return X + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Online VB with Mini-Batch update. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Partially fitted estimator. + """ + first_time = not hasattr(self, "components_") + + X = self._check_non_neg_array( + X, reset_n_features=first_time, whom="LatentDirichletAllocation.partial_fit" + ) + n_samples, n_features = X.shape + batch_size = self.batch_size + + # initialize parameters or check + if first_time: + self._init_latent_vars(n_features, dtype=X.dtype) + + if n_features != self.components_.shape[1]: + raise ValueError( + "The provided data has %d dimensions while " + "the model was trained with feature size %d." + % (n_features, self.components_.shape[1]) + ) + + n_jobs = effective_n_jobs(self.n_jobs) + with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: + for idx_slice in gen_batches(n_samples, batch_size): + self._em_step( + X[idx_slice, :], + total_samples=self.total_samples, + batch_update=False, + parallel=parallel, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn model for the data X with variational Bayes method. + + When `learning_method` is 'online', use mini-batch update. + Otherwise, use batch update. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Fitted estimator. + """ + X = self._check_non_neg_array( + X, reset_n_features=True, whom="LatentDirichletAllocation.fit" + ) + n_samples, n_features = X.shape + max_iter = self.max_iter + evaluate_every = self.evaluate_every + learning_method = self.learning_method + + batch_size = self.batch_size + + # initialize parameters + self._init_latent_vars(n_features, dtype=X.dtype) + # change to perplexity later + last_bound = None + n_jobs = effective_n_jobs(self.n_jobs) + with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: + for i in range(max_iter): + if learning_method == "online": + for idx_slice in gen_batches(n_samples, batch_size): + self._em_step( + X[idx_slice, :], + total_samples=n_samples, + batch_update=False, + parallel=parallel, + ) + else: + # batch update + self._em_step( + X, total_samples=n_samples, batch_update=True, parallel=parallel + ) + + # check perplexity + if evaluate_every > 0 and (i + 1) % evaluate_every == 0: + doc_topics_distr, _ = self._e_step( + X, cal_sstats=False, random_init=False, parallel=parallel + ) + bound = self._perplexity_precomp_distr( + X, doc_topics_distr, sub_sampling=False + ) + if self.verbose: + print( + "iteration: %d of max_iter: %d, perplexity: %.4f" + % (i + 1, max_iter, bound) + ) + + if last_bound and abs(last_bound - bound) < self.perp_tol: + break + last_bound = bound + + elif self.verbose: + print("iteration: %d of max_iter: %d" % (i + 1, max_iter)) + self.n_iter_ += 1 + + # calculate final perplexity value on train set + doc_topics_distr, _ = self._e_step( + X, cal_sstats=False, random_init=False, parallel=parallel + ) + self.bound_ = self._perplexity_precomp_distr( + X, doc_topics_distr, sub_sampling=False + ) + + return self + + def _unnormalized_transform(self, X): + """Transform data X according to fitted model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + Returns + ------- + doc_topic_distr : ndarray of shape (n_samples, n_components) + Document topic distribution for X. + """ + doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False) + + return doc_topic_distr + + def transform(self, X): + """Transform data X according to the fitted model. + + .. versionchanged:: 0.18 + *doc_topic_distr* is now normalized + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + Returns + ------- + doc_topic_distr : ndarray of shape (n_samples, n_components) + Document topic distribution for X. + """ + check_is_fitted(self) + X = self._check_non_neg_array( + X, reset_n_features=False, whom="LatentDirichletAllocation.transform" + ) + doc_topic_distr = self._unnormalized_transform(X) + doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis] + return doc_topic_distr + + def _approx_bound(self, X, doc_topic_distr, sub_sampling): + """Estimate the variational bound. + + Estimate the variational bound over "all documents" using only the + documents passed in as X. Since log-likelihood of each word cannot + be computed directly, we use this bound to estimate it. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + doc_topic_distr : ndarray of shape (n_samples, n_components) + Document topic distribution. In the literature, this is called + gamma. + + sub_sampling : bool, default=False + Compensate for subsampling of documents. + It is used in calculate bound in online learning. + + Returns + ------- + score : float + + """ + + def _loglikelihood(prior, distr, dirichlet_distr, size): + # calculate log-likelihood + score = np.sum((prior - distr) * dirichlet_distr) + score += np.sum(gammaln(distr) - gammaln(prior)) + score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1))) + return score + + is_sparse_x = sp.issparse(X) + n_samples, n_components = doc_topic_distr.shape + n_features = self.components_.shape[1] + score = 0 + + dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr) + dirichlet_component_ = _dirichlet_expectation_2d(self.components_) + doc_topic_prior = self.doc_topic_prior_ + topic_word_prior = self.topic_word_prior_ + + if is_sparse_x: + X_data = X.data + X_indices = X.indices + X_indptr = X.indptr + + # E[log p(docs | theta, beta)] + for idx_d in range(0, n_samples): + if is_sparse_x: + ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]] + cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]] + else: + ids = np.nonzero(X[idx_d, :])[0] + cnts = X[idx_d, ids] + temp = ( + dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids] + ) + norm_phi = logsumexp(temp, axis=0) + score += np.dot(cnts, norm_phi) + + # compute E[log p(theta | alpha) - log q(theta | gamma)] + score += _loglikelihood( + doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components + ) + + # Compensate for the subsampling of the population of documents + if sub_sampling: + doc_ratio = float(self.total_samples) / n_samples + score *= doc_ratio + + # E[log p(beta | eta) - log q (beta | lambda)] + score += _loglikelihood( + topic_word_prior, self.components_, dirichlet_component_, n_features + ) + + return score + + def score(self, X, y=None): + """Calculate approximate log-likelihood as score. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + score : float + Use approximate bound as score. + """ + check_is_fitted(self) + X = self._check_non_neg_array( + X, reset_n_features=False, whom="LatentDirichletAllocation.score" + ) + + doc_topic_distr = self._unnormalized_transform(X) + score = self._approx_bound(X, doc_topic_distr, sub_sampling=False) + return score + + def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False): + """Calculate approximate perplexity for data X with ability to accept + precomputed doc_topic_distr + + Perplexity is defined as exp(-1. * log-likelihood per word) + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + doc_topic_distr : ndarray of shape (n_samples, n_components), \ + default=None + Document topic distribution. + If it is None, it will be generated by applying transform on X. + + Returns + ------- + score : float + Perplexity score. + """ + if doc_topic_distr is None: + doc_topic_distr = self._unnormalized_transform(X) + else: + n_samples, n_components = doc_topic_distr.shape + if n_samples != X.shape[0]: + raise ValueError( + "Number of samples in X and doc_topic_distr do not match." + ) + + if n_components != self.n_components: + raise ValueError("Number of topics does not match.") + + current_samples = X.shape[0] + bound = self._approx_bound(X, doc_topic_distr, sub_sampling) + + if sub_sampling: + word_cnt = X.sum() * (float(self.total_samples) / current_samples) + else: + word_cnt = X.sum() + perword_bound = bound / word_cnt + + return np.exp(-1.0 * perword_bound) + + def perplexity(self, X, sub_sampling=False): + """Calculate approximate perplexity for data X. + + Perplexity is defined as exp(-1. * log-likelihood per word) + + .. versionchanged:: 0.19 + *doc_topic_distr* argument has been deprecated and is ignored + because user no longer has access to unnormalized distribution + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Document word matrix. + + sub_sampling : bool + Do sub-sampling or not. + + Returns + ------- + score : float + Perplexity score. + """ + check_is_fitted(self) + X = self._check_non_neg_array( + X, reset_n_features=True, whom="LatentDirichletAllocation.perplexity" + ) + return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling) + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c95f066f9667b7156ceeda679229829efaec9ec8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..b14df8c5f4d222a2750be1fc413a671dfbc558e6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_sparse_pca.py @@ -0,0 +1,551 @@ +"""Matrix factorization with Sparse PCA.""" +# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..linear_model import ridge_regression +from ..utils import check_random_state +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.extmath import svd_flip +from ..utils.validation import check_array, check_is_fitted +from ._dict_learning import MiniBatchDictionaryLearning, dict_learning + + +class _BaseSparsePCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Base class for SparsePCA and MiniBatchSparsePCA""" + + _parameter_constraints: dict = { + "n_components": [None, Interval(Integral, 1, None, closed="left")], + "alpha": [Interval(Real, 0.0, None, closed="left")], + "ridge_alpha": [Interval(Real, 0.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "method": [StrOptions({"lars", "cd"})], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + ridge_alpha=0.01, + max_iter=1000, + tol=1e-8, + method="lars", + n_jobs=None, + verbose=False, + random_state=None, + ): + self.n_components = n_components + self.alpha = alpha + self.ridge_alpha = ridge_alpha + self.max_iter = max_iter + self.tol = tol + self.method = method + self.n_jobs = n_jobs + self.verbose = verbose + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model from data in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + random_state = check_random_state(self.random_state) + X = self._validate_data(X) + + self.mean_ = X.mean(axis=0) + X = X - self.mean_ + + if self.n_components is None: + n_components = X.shape[1] + else: + n_components = self.n_components + + return self._fit(X, n_components, random_state) + + def transform(self, X): + """Least Squares projection of the data onto the sparse components. + + To avoid instability issues in case the system is under-determined, + regularization can be applied (Ridge regression) via the + `ridge_alpha` parameter. + + Note that Sparse PCA components orthogonality is not enforced as in PCA + hence one cannot use a simple linear projection. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Test data to be transformed, must have the same number of + features as the data used to train the model. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + X = X - self.mean_ + + U = ridge_regression( + self.components_.T, X.T, self.ridge_alpha, solver="cholesky" + ) + + return U + + def inverse_transform(self, X): + """Transform data from the latent space to the original space. + + This inversion is an approximation due to the loss of information + induced by the forward decomposition. + + .. versionadded:: 1.2 + + Parameters + ---------- + X : ndarray of shape (n_samples, n_components) + Data in the latent space. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + Reconstructed data in the original space. + """ + check_is_fitted(self) + X = check_array(X) + + return (X @ self.components_) + self.mean_ + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } + + +class SparsePCA(_BaseSparsePCA): + """Sparse Principal Components Analysis (SparsePCA). + + Finds the set of sparse components that can optimally reconstruct + the data. The amount of sparseness is controllable by the coefficient + of the L1 penalty, given by the parameter alpha. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of sparse atoms to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : float, default=1 + Sparsity controlling parameter. Higher values lead to sparser + components. + + ridge_alpha : float, default=0.01 + Amount of ridge shrinkage to apply in order to improve + conditioning when calling the transform method. + + max_iter : int, default=1000 + Maximum number of iterations to perform. + + tol : float, default=1e-8 + Tolerance for the stopping condition. + + method : {'lars', 'cd'}, default='lars' + Method to be used for optimization. + lars: uses the least angle regression method to solve the lasso problem + (linear_model.lars_path) + cd: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). Lars will be faster if + the estimated components are sparse. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + U_init : ndarray of shape (n_samples, n_components), default=None + Initial values for the loadings for warm restart scenarios. Only used + if `U_init` and `V_init` are not None. + + V_init : ndarray of shape (n_components, n_features), default=None + Initial values for the components for warm restart scenarios. Only used + if `U_init` and `V_init` are not None. + + verbose : int or bool, default=False + Controls the verbosity; the higher, the more messages. Defaults to 0. + + random_state : int, RandomState instance or None, default=None + Used during dictionary learning. Pass an int for reproducible results + across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Sparse components extracted from the data. + + error_ : ndarray + Vector of errors at each iteration. + + n_components_ : int + Estimated number of components. + + .. versionadded:: 0.23 + + n_iter_ : int + Number of iterations run. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + Equal to ``X.mean(axis=0)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PCA : Principal Component Analysis implementation. + MiniBatchSparsePCA : Mini batch variant of `SparsePCA` that is faster but less + accurate. + DictionaryLearning : Generic dictionary learning problem using a sparse code. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.decomposition import SparsePCA + >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0) + >>> transformer = SparsePCA(n_components=5, random_state=0) + >>> transformer.fit(X) + SparsePCA(...) + >>> X_transformed = transformer.transform(X) + >>> X_transformed.shape + (200, 5) + >>> # most values in the components_ are zero (sparsity) + >>> np.mean(transformer.components_ == 0) + 0.9666... + """ + + _parameter_constraints: dict = { + **_BaseSparsePCA._parameter_constraints, + "U_init": [None, np.ndarray], + "V_init": [None, np.ndarray], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + ridge_alpha=0.01, + max_iter=1000, + tol=1e-8, + method="lars", + n_jobs=None, + U_init=None, + V_init=None, + verbose=False, + random_state=None, + ): + super().__init__( + n_components=n_components, + alpha=alpha, + ridge_alpha=ridge_alpha, + max_iter=max_iter, + tol=tol, + method=method, + n_jobs=n_jobs, + verbose=verbose, + random_state=random_state, + ) + self.U_init = U_init + self.V_init = V_init + + def _fit(self, X, n_components, random_state): + """Specialized `fit` for SparsePCA.""" + + code_init = self.V_init.T if self.V_init is not None else None + dict_init = self.U_init.T if self.U_init is not None else None + code, dictionary, E, self.n_iter_ = dict_learning( + X.T, + n_components, + alpha=self.alpha, + tol=self.tol, + max_iter=self.max_iter, + method=self.method, + n_jobs=self.n_jobs, + verbose=self.verbose, + random_state=random_state, + code_init=code_init, + dict_init=dict_init, + return_n_iter=True, + ) + # flip eigenvectors' sign to enforce deterministic output + code, dictionary = svd_flip(code, dictionary, u_based_decision=False) + self.components_ = code.T + components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis] + components_norm[components_norm == 0] = 1 + self.components_ /= components_norm + self.n_components_ = len(self.components_) + + self.error_ = E + return self + + +class MiniBatchSparsePCA(_BaseSparsePCA): + """Mini-batch Sparse Principal Components Analysis. + + Finds the set of sparse components that can optimally reconstruct + the data. The amount of sparseness is controllable by the coefficient + of the L1 penalty, given by the parameter alpha. + + For an example comparing sparse PCA to PCA, see + :ref:`sphx_glr_auto_examples_decomposition_plot_faces_decomposition.py` + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Number of sparse atoms to extract. If None, then ``n_components`` + is set to ``n_features``. + + alpha : int, default=1 + Sparsity controlling parameter. Higher values lead to sparser + components. + + ridge_alpha : float, default=0.01 + Amount of ridge shrinkage to apply in order to improve + conditioning when calling the transform method. + + max_iter : int, default=1_000 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + .. versionadded:: 1.2 + + .. deprecated:: 1.4 + `max_iter=None` is deprecated in 1.4 and will be removed in 1.6. + Use the default value (i.e. `100`) instead. + + callback : callable, default=None + Callable that gets invoked every five iterations. + + batch_size : int, default=3 + The number of features to take in each mini batch. + + verbose : int or bool, default=False + Controls the verbosity; the higher, the more messages. Defaults to 0. + + shuffle : bool, default=True + Whether to shuffle the data before splitting it in batches. + + n_jobs : int, default=None + Number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + method : {'lars', 'cd'}, default='lars' + Method to be used for optimization. + lars: uses the least angle regression method to solve the lasso problem + (linear_model.lars_path) + cd: uses the coordinate descent method to compute the + Lasso solution (linear_model.Lasso). Lars will be faster if + the estimated components are sparse. + + random_state : int, RandomState instance or None, default=None + Used for random shuffling when ``shuffle`` is set to ``True``, + during online dictionary learning. Pass an int for reproducible results + across multiple function calls. + See :term:`Glossary `. + + tol : float, default=1e-3 + Control early stopping based on the norm of the differences in the + dictionary between 2 steps. + + To disable early stopping based on changes in the dictionary, set + `tol` to 0.0. + + .. versionadded:: 1.1 + + max_no_improvement : int or None, default=10 + Control early stopping based on the consecutive number of mini batches + that does not yield an improvement on the smoothed cost function. + + To disable convergence detection based on cost function, set + `max_no_improvement` to `None`. + + .. versionadded:: 1.1 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Sparse components extracted from the data. + + n_components_ : int + Estimated number of components. + + .. versionadded:: 0.23 + + n_iter_ : int + Number of iterations run. + + mean_ : ndarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + Equal to ``X.mean(axis=0)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + IncrementalPCA : Incremental principal components analysis. + PCA : Principal component analysis. + SparsePCA : Sparse Principal Components Analysis. + TruncatedSVD : Dimensionality reduction using truncated SVD. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.decomposition import MiniBatchSparsePCA + >>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0) + >>> transformer = MiniBatchSparsePCA(n_components=5, batch_size=50, + ... max_iter=10, random_state=0) + >>> transformer.fit(X) + MiniBatchSparsePCA(...) + >>> X_transformed = transformer.transform(X) + >>> X_transformed.shape + (200, 5) + >>> # most values in the components_ are zero (sparsity) + >>> np.mean(transformer.components_ == 0) + 0.9... + """ + + _parameter_constraints: dict = { + **_BaseSparsePCA._parameter_constraints, + "max_iter": [Interval(Integral, 0, None, closed="left"), Hidden(None)], + "callback": [None, callable], + "batch_size": [Interval(Integral, 1, None, closed="left")], + "shuffle": ["boolean"], + "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None], + } + + def __init__( + self, + n_components=None, + *, + alpha=1, + ridge_alpha=0.01, + max_iter=1_000, + callback=None, + batch_size=3, + verbose=False, + shuffle=True, + n_jobs=None, + method="lars", + random_state=None, + tol=1e-3, + max_no_improvement=10, + ): + super().__init__( + n_components=n_components, + alpha=alpha, + ridge_alpha=ridge_alpha, + max_iter=max_iter, + tol=tol, + method=method, + n_jobs=n_jobs, + verbose=verbose, + random_state=random_state, + ) + self.callback = callback + self.batch_size = batch_size + self.shuffle = shuffle + self.max_no_improvement = max_no_improvement + + def _fit(self, X, n_components, random_state): + """Specialized `fit` for MiniBatchSparsePCA.""" + + transform_algorithm = "lasso_" + self.method + est = MiniBatchDictionaryLearning( + n_components=n_components, + alpha=self.alpha, + max_iter=self.max_iter, + dict_init=None, + batch_size=self.batch_size, + shuffle=self.shuffle, + n_jobs=self.n_jobs, + fit_algorithm=self.method, + random_state=random_state, + transform_algorithm=transform_algorithm, + transform_alpha=self.alpha, + verbose=self.verbose, + callback=self.callback, + tol=self.tol, + max_no_improvement=self.max_no_improvement, + ) + est.set_output(transform="default") + est.fit(X.T) + + self.components_, self.n_iter_ = est.transform(X.T).T, est.n_iter_ + + components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis] + components_norm[components_norm == 0] = 1 + self.components_ /= components_norm + self.n_components_ = len(self.components_) + + return self diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py new file mode 100644 index 0000000000000000000000000000000000000000..725683e8d46c6eef2c7fc53780c65f91e51122cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/decomposition/_truncated_svd.py @@ -0,0 +1,319 @@ +"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA). +""" + +# Author: Lars Buitinck +# Olivier Grisel +# Michael Becker +# License: 3-clause BSD. + +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy.sparse.linalg import svds + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..utils import check_array, check_random_state +from ..utils._arpack import _init_arpack_v0 +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.validation import check_is_fitted + +__all__ = ["TruncatedSVD"] + + +class TruncatedSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Dimensionality reduction using truncated SVD (aka LSA). + + This transformer performs linear dimensionality reduction by means of + truncated singular value decomposition (SVD). Contrary to PCA, this + estimator does not center the data before computing the singular value + decomposition. This means it can work with sparse matrices + efficiently. + + In particular, truncated SVD works on term count/tf-idf matrices as + returned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In + that context, it is known as latent semantic analysis (LSA). + + This estimator supports two algorithms: a fast randomized SVD solver, and + a "naive" algorithm that uses ARPACK as an eigensolver on `X * X.T` or + `X.T * X`, whichever is more efficient. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=2 + Desired dimensionality of output data. + If algorithm='arpack', must be strictly less than the number of features. + If algorithm='randomized', must be less than or equal to the number of features. + The default value is useful for visualisation. For LSA, a value of + 100 is recommended. + + algorithm : {'arpack', 'randomized'}, default='randomized' + SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy + (scipy.sparse.linalg.svds), or "randomized" for the randomized + algorithm due to Halko (2009). + + n_iter : int, default=5 + Number of iterations for randomized SVD solver. Not used by ARPACK. The + default is larger than the default in + :func:`~sklearn.utils.extmath.randomized_svd` to handle sparse + matrices that may have large slowly decaying spectrum. + + n_oversamples : int, default=10 + Number of oversamples for randomized SVD solver. Not used by ARPACK. + See :func:`~sklearn.utils.extmath.randomized_svd` for a complete + description. + + .. versionadded:: 1.1 + + power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' + Power iteration normalizer for randomized SVD solver. + Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd` + for more details. + + .. versionadded:: 1.1 + + random_state : int, RandomState instance or None, default=None + Used during randomized svd. Pass an int for reproducible results across + multiple function calls. + See :term:`Glossary `. + + tol : float, default=0.0 + Tolerance for ARPACK. 0 means machine precision. Ignored by randomized + SVD solver. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + The right singular vectors of the input data. + + explained_variance_ : ndarray of shape (n_components,) + The variance of the training samples transformed by a projection to + each component. + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + + singular_values_ : ndarray of shape (n_components,) + The singular values corresponding to each of the selected components. + The singular values are equal to the 2-norms of the ``n_components`` + variables in the lower-dimensional space. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DictionaryLearning : Find a dictionary that sparsely encodes data. + FactorAnalysis : A simple linear generative model with + Gaussian latent variables. + IncrementalPCA : Incremental principal components analysis. + KernelPCA : Kernel Principal component analysis. + NMF : Non-Negative Matrix Factorization. + PCA : Principal component analysis. + + Notes + ----- + SVD suffers from a problem called "sign indeterminacy", which means the + sign of the ``components_`` and the output from transform depend on the + algorithm and random state. To work around this, fit instances of this + class to data once, then keep the instance around to do transformations. + + References + ---------- + :arxiv:`Halko, et al. (2009). "Finding structure with randomness: + Stochastic algorithms for constructing approximate matrix decompositions" + <0909.4061>` + + Examples + -------- + >>> from sklearn.decomposition import TruncatedSVD + >>> from scipy.sparse import csr_matrix + >>> import numpy as np + >>> np.random.seed(0) + >>> X_dense = np.random.rand(100, 100) + >>> X_dense[:, 2 * np.arange(50)] = 0 + >>> X = csr_matrix(X_dense) + >>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) + >>> svd.fit(X) + TruncatedSVD(n_components=5, n_iter=7, random_state=42) + >>> print(svd.explained_variance_ratio_) + [0.0157... 0.0512... 0.0499... 0.0479... 0.0453...] + >>> print(svd.explained_variance_ratio_.sum()) + 0.2102... + >>> print(svd.singular_values_) + [35.2410... 4.5981... 4.5420... 4.4486... 4.3288...] + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "algorithm": [StrOptions({"arpack", "randomized"})], + "n_iter": [Interval(Integral, 0, None, closed="left")], + "n_oversamples": [Interval(Integral, 1, None, closed="left")], + "power_iteration_normalizer": [StrOptions({"auto", "OR", "LU", "none"})], + "random_state": ["random_state"], + "tol": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + n_components=2, + *, + algorithm="randomized", + n_iter=5, + n_oversamples=10, + power_iteration_normalizer="auto", + random_state=None, + tol=0.0, + ): + self.algorithm = algorithm + self.n_components = n_components + self.n_iter = n_iter + self.n_oversamples = n_oversamples + self.power_iteration_normalizer = power_iteration_normalizer + self.random_state = random_state + self.tol = tol + + def fit(self, X, y=None): + """Fit model on training data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the transformer object. + """ + self.fit_transform(X) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit model to X and perform dimensionality reduction on X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Reduced version of X. This will always be a dense array. + """ + X = self._validate_data(X, accept_sparse=["csr", "csc"], ensure_min_features=2) + random_state = check_random_state(self.random_state) + + if self.algorithm == "arpack": + v0 = _init_arpack_v0(min(X.shape), random_state) + U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0) + # svds doesn't abide by scipy.linalg.svd/randomized_svd + # conventions, so reverse its outputs. + Sigma = Sigma[::-1] + U, VT = svd_flip(U[:, ::-1], VT[::-1]) + + elif self.algorithm == "randomized": + if self.n_components > X.shape[1]: + raise ValueError( + f"n_components({self.n_components}) must be <=" + f" n_features({X.shape[1]})." + ) + U, Sigma, VT = randomized_svd( + X, + self.n_components, + n_iter=self.n_iter, + n_oversamples=self.n_oversamples, + power_iteration_normalizer=self.power_iteration_normalizer, + random_state=random_state, + ) + + self.components_ = VT + + # As a result of the SVD approximation error on X ~ U @ Sigma @ V.T, + # X @ V is not the same as U @ Sigma + if self.algorithm == "randomized" or ( + self.algorithm == "arpack" and self.tol > 0 + ): + X_transformed = safe_sparse_dot(X, self.components_.T) + else: + X_transformed = U * Sigma + + # Calculate explained variance & explained variance ratio + self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) + if sp.issparse(X): + _, full_var = mean_variance_axis(X, axis=0) + full_var = full_var.sum() + else: + full_var = np.var(X, axis=0).sum() + self.explained_variance_ratio_ = exp_var / full_var + self.singular_values_ = Sigma # Store the singular values. + + return X_transformed + + def transform(self, X): + """Perform dimensionality reduction on X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Reduced version of X. This will always be a dense array. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse=["csr", "csc"], reset=False) + return safe_sparse_dot(X, self.components_.T) + + def inverse_transform(self, X): + """Transform X back to its original space. + + Returns an array X_original whose transform would be X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + New data. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + Note that this is always a dense array. + """ + X = check_array(X) + return np.dot(X, self.components_) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} + + @property + def _n_features_out(self): + """Number of transformed output features.""" + return self.components_.shape[0] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4fbc631155078f6663beb3c5fcfa7de7fc5878f4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py @@ -0,0 +1,47 @@ +""" +The :mod:`sklearn.feature_selection` module implements feature selection +algorithms. It currently includes univariate filter selection methods and the +recursive feature elimination algorithm. +""" + +from ._base import SelectorMixin +from ._from_model import SelectFromModel +from ._mutual_info import mutual_info_classif, mutual_info_regression +from ._rfe import RFE, RFECV +from ._sequential import SequentialFeatureSelector +from ._univariate_selection import ( + GenericUnivariateSelect, + SelectFdr, + SelectFpr, + SelectFwe, + SelectKBest, + SelectPercentile, + chi2, + f_classif, + f_oneway, + f_regression, + r_regression, +) +from ._variance_threshold import VarianceThreshold + +__all__ = [ + "GenericUnivariateSelect", + "SequentialFeatureSelector", + "RFE", + "RFECV", + "SelectFdr", + "SelectFpr", + "SelectFwe", + "SelectKBest", + "SelectFromModel", + "SelectPercentile", + "VarianceThreshold", + "chi2", + "f_classif", + "f_oneway", + "f_regression", + "r_regression", + "mutual_info_classif", + "mutual_info_regression", + "SelectorMixin", +] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e5b80da796f41641acea29b1134f22ade072759 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82deb881afd5576405d58cb58fab00e2638a5753 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19c4283e280df45016a8d9e45b70571c63f659c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be30a0c8a72f16582371ba45a1015a8b427c8959 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42c62e35c439461d1c4bfd502bac39d65019de6c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6474380cb1b8637d0ced8552b3387500b7db3719 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35290582a66eccb410c3262b178b61dbba01c4ef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..159f553db6ce357772688bf4f106d545e67236bb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_base.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..69e40ce08aed005186416588531f644eb566f150 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_base.py @@ -0,0 +1,266 @@ +"""Generic feature selection mixin""" + +# Authors: G. Varoquaux, A. Gramfort, L. Buitinck, J. Nothman +# License: BSD 3 clause + +import warnings +from abc import ABCMeta, abstractmethod +from operator import attrgetter + +import numpy as np +from scipy.sparse import csc_matrix, issparse + +from ..base import TransformerMixin +from ..utils import ( + _is_pandas_df, + _safe_indexing, + check_array, + safe_sqr, +) +from ..utils._set_output import _get_output_config +from ..utils._tags import _safe_tags +from ..utils.validation import _check_feature_names_in, check_is_fitted + + +class SelectorMixin(TransformerMixin, metaclass=ABCMeta): + """ + Transformer mixin that performs feature selection given a support mask + + This mixin provides a feature selector implementation with `transform` and + `inverse_transform` functionality given an implementation of + `_get_support_mask`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> from sklearn.base import BaseEstimator + >>> from sklearn.feature_selection import SelectorMixin + >>> class FeatureSelector(SelectorMixin, BaseEstimator): + ... def fit(self, X, y=None): + ... self.n_features_in_ = X.shape[1] + ... return self + ... def _get_support_mask(self): + ... mask = np.zeros(self.n_features_in_, dtype=bool) + ... mask[:2] = True # select the first two features + ... return mask + >>> X, y = load_iris(return_X_y=True) + >>> FeatureSelector().fit_transform(X, y).shape + (150, 2) + """ + + def get_support(self, indices=False): + """ + Get a mask, or integer index, of the features selected. + + Parameters + ---------- + indices : bool, default=False + If True, the return value will be an array of integers, rather + than a boolean mask. + + Returns + ------- + support : array + An index that selects the retained features from a feature vector. + If `indices` is False, this is a boolean array of shape + [# input features], in which an element is True iff its + corresponding feature is selected for retention. If `indices` is + True, this is an integer array of shape [# output features] whose + values are indices into the input feature vector. + """ + mask = self._get_support_mask() + return mask if not indices else np.where(mask)[0] + + @abstractmethod + def _get_support_mask(self): + """ + Get the boolean mask indicating which features are selected + + Returns + ------- + support : boolean array of shape [# input features] + An element is True iff its corresponding feature is selected for + retention. + """ + + def transform(self, X): + """Reduce X to the selected features. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + Returns + ------- + X_r : array of shape [n_samples, n_selected_features] + The input samples with only the selected features. + """ + # Preserve X when X is a dataframe and the output is configured to + # be pandas. + output_config_dense = _get_output_config("transform", estimator=self)["dense"] + preserve_X = output_config_dense != "default" and _is_pandas_df(X) + + # note: we use _safe_tags instead of _get_tags because this is a + # public Mixin. + X = self._validate_data( + X, + dtype=None, + accept_sparse="csr", + force_all_finite=not _safe_tags(self, key="allow_nan"), + cast_to_ndarray=not preserve_X, + reset=False, + ) + return self._transform(X) + + def _transform(self, X): + """Reduce X to the selected features.""" + mask = self.get_support() + if not mask.any(): + warnings.warn( + ( + "No features were selected: either the data is" + " too noisy or the selection test too strict." + ), + UserWarning, + ) + if hasattr(X, "iloc"): + return X.iloc[:, :0] + return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0)) + return _safe_indexing(X, mask, axis=1) + + def inverse_transform(self, X): + """Reverse the transformation operation. + + Parameters + ---------- + X : array of shape [n_samples, n_selected_features] + The input samples. + + Returns + ------- + X_r : array of shape [n_samples, n_original_features] + `X` with columns of zeros inserted where features would have + been removed by :meth:`transform`. + """ + if issparse(X): + X = X.tocsc() + # insert additional entries in indptr: + # e.g. if transform changed indptr from [0 2 6 7] to [0 2 3] + # col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3] + it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1)) + col_nonzeros = it.ravel() + indptr = np.concatenate([[0], np.cumsum(col_nonzeros)]) + Xt = csc_matrix( + (X.data, X.indices, indptr), + shape=(X.shape[0], len(indptr) - 1), + dtype=X.dtype, + ) + return Xt + + support = self.get_support() + X = check_array(X, dtype=None) + if support.sum() != X.shape[1]: + raise ValueError("X has a different shape than during fitting.") + + if X.ndim == 1: + X = X[None, :] + Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype) + Xt[:, support] = X + return Xt + + def get_feature_names_out(self, input_features=None): + """Mask feature names according to selected features. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self) + input_features = _check_feature_names_in(self, input_features) + return input_features[self.get_support()] + + +def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1): + """ + Retrieve and aggregate (ndim > 1) the feature importances + from an estimator. Also optionally applies transformation. + + Parameters + ---------- + estimator : estimator + A scikit-learn estimator from which we want to get the feature + importances. + + getter : "auto", str or callable + An attribute or a callable to get the feature importance. If `"auto"`, + `estimator` is expected to expose `coef_` or `feature_importances`. + + transform_func : {"norm", "square"}, default=None + The transform to apply to the feature importances. By default (`None`) + no transformation is applied. + + norm_order : int, default=1 + The norm order to apply when `transform_func="norm"`. Only applied + when `importances.ndim > 1`. + + Returns + ------- + importances : ndarray of shape (n_features,) + The features importances, optionally transformed. + """ + if isinstance(getter, str): + if getter == "auto": + if hasattr(estimator, "coef_"): + getter = attrgetter("coef_") + elif hasattr(estimator, "feature_importances_"): + getter = attrgetter("feature_importances_") + else: + raise ValueError( + "when `importance_getter=='auto'`, the underlying " + f"estimator {estimator.__class__.__name__} should have " + "`coef_` or `feature_importances_` attribute. Either " + "pass a fitted estimator to feature selector or call fit " + "before calling transform." + ) + else: + getter = attrgetter(getter) + elif not callable(getter): + raise ValueError("`importance_getter` has to be a string or `callable`") + + importances = getter(estimator) + + if transform_func is None: + return importances + elif transform_func == "norm": + if importances.ndim == 1: + importances = np.abs(importances) + else: + importances = np.linalg.norm(importances, axis=0, ord=norm_order) + elif transform_func == "square": + if importances.ndim == 1: + importances = safe_sqr(importances) + else: + importances = safe_sqr(importances).sum(axis=0) + else: + raise ValueError( + "Valid values for `transform_func` are " + + "None, 'norm' and 'square'. Those two " + + "transformation are only supported now" + ) + + return importances diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py new file mode 100644 index 0000000000000000000000000000000000000000..61addedd2de787ccc38135147c7df6f895dc53b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_from_model.py @@ -0,0 +1,522 @@ +# Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena +# License: BSD 3 clause + +from copy import deepcopy +from numbers import Integral, Real + +import numpy as np + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone +from ..exceptions import NotFittedError +from ..utils._param_validation import HasMethods, Interval, Options +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import available_if +from ..utils.validation import _num_features, check_is_fitted, check_scalar +from ._base import SelectorMixin, _get_feature_importances + + +def _calculate_threshold(estimator, importances, threshold): + """Interpret the threshold value""" + + if threshold is None: + # determine default from estimator + est_name = estimator.__class__.__name__ + is_l1_penalized = hasattr(estimator, "penalty") and estimator.penalty == "l1" + is_lasso = "Lasso" in est_name + is_elasticnet_l1_penalized = "ElasticNet" in est_name and ( + (hasattr(estimator, "l1_ratio_") and np.isclose(estimator.l1_ratio_, 1.0)) + or (hasattr(estimator, "l1_ratio") and np.isclose(estimator.l1_ratio, 1.0)) + ) + if is_l1_penalized or is_lasso or is_elasticnet_l1_penalized: + # the natural default threshold is 0 when l1 penalty was used + threshold = 1e-5 + else: + threshold = "mean" + + if isinstance(threshold, str): + if "*" in threshold: + scale, reference = threshold.split("*") + scale = float(scale.strip()) + reference = reference.strip() + + if reference == "median": + reference = np.median(importances) + elif reference == "mean": + reference = np.mean(importances) + else: + raise ValueError("Unknown reference: " + reference) + + threshold = scale * reference + + elif threshold == "median": + threshold = np.median(importances) + + elif threshold == "mean": + threshold = np.mean(importances) + + else: + raise ValueError( + "Expected threshold='mean' or threshold='median' got %s" % threshold + ) + + else: + threshold = float(threshold) + + return threshold + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the fitted `estimator_` if available, otherwise we check the + unfitted `estimator`. We raise the original `AttributeError` if `attr` does + not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "estimator_"): + getattr(self.estimator_, attr) + else: + getattr(self.estimator, attr) + + return True + + return check + + +class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator): + """Meta-transformer for selecting features based on importance weights. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + The base estimator from which the transformer is built. + This can be both a fitted (if ``prefit`` is set to True) + or a non-fitted estimator. The estimator should have a + ``feature_importances_`` or ``coef_`` attribute after fitting. + Otherwise, the ``importance_getter`` parameter should be used. + + threshold : str or float, default=None + The threshold value to use for feature selection. Features whose + absolute importance value is greater or equal are kept while the others + are discarded. If "median" (resp. "mean"), then the ``threshold`` value + is the median (resp. the mean) of the feature importances. A scaling + factor (e.g., "1.25*mean") may also be used. If None and if the + estimator has a parameter penalty set to l1, either explicitly + or implicitly (e.g, Lasso), the threshold used is 1e-5. + Otherwise, "mean" is used by default. + + prefit : bool, default=False + Whether a prefit model is expected to be passed into the constructor + directly or not. + If `True`, `estimator` must be a fitted estimator. + If `False`, `estimator` is fitted and updated by calling + `fit` and `partial_fit`, respectively. + + norm_order : non-zero int, inf, -inf, default=1 + Order of the norm used to filter the vectors of coefficients below + ``threshold`` in the case where the ``coef_`` attribute of the + estimator is of dimension 2. + + max_features : int, callable, default=None + The maximum number of features to select. + + - If an integer, then it specifies the maximum number of features to + allow. + - If a callable, then it specifies how to calculate the maximum number of + features allowed by using the output of `max_features(X)`. + - If `None`, then all features are kept. + + To only select based on ``max_features``, set ``threshold=-np.inf``. + + .. versionadded:: 0.20 + .. versionchanged:: 1.1 + `max_features` accepts a callable. + + importance_getter : str or callable, default='auto' + If 'auto', uses the feature importance either through a ``coef_`` + attribute or ``feature_importances_`` attribute of estimator. + + Also accepts a string that specifies an attribute name/path + for extracting feature importance (implemented with `attrgetter`). + For example, give `regressor_.coef_` in case of + :class:`~sklearn.compose.TransformedTargetRegressor` or + `named_steps.clf.feature_importances_` in case of + :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. + + If `callable`, overrides the default feature importance getter. + The callable is passed with the fitted estimator and it should + return importance for each feature. + + .. versionadded:: 0.24 + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the transformer is built. This attribute + exist only when `fit` has been called. + + - If `prefit=True`, it is a deep copy of `estimator`. + - If `prefit=False`, it is a clone of `estimator` and fit on the data + passed to `fit` or `partial_fit`. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + max_features_ : int + Maximum number of features calculated during :term:`fit`. Only defined + if the ``max_features`` is not `None`. + + - If `max_features` is an `int`, then `max_features_ = max_features`. + - If `max_features` is a callable, then `max_features_ = max_features(X)`. + + .. versionadded:: 1.1 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + threshold_ : float + The threshold value used for feature selection. + + See Also + -------- + RFE : Recursive feature elimination based on importance weights. + RFECV : Recursive feature elimination with built-in cross-validated + selection of the best number of features. + SequentialFeatureSelector : Sequential cross-validation based feature + selection. Does not rely on importance weights. + + Notes + ----- + Allows NaN/Inf in the input if the underlying estimator does as well. + + Examples + -------- + >>> from sklearn.feature_selection import SelectFromModel + >>> from sklearn.linear_model import LogisticRegression + >>> X = [[ 0.87, -1.34, 0.31 ], + ... [-2.79, -0.02, -0.85 ], + ... [-1.34, -0.48, -2.55 ], + ... [ 1.92, 1.48, 0.65 ]] + >>> y = [0, 1, 0, 1] + >>> selector = SelectFromModel(estimator=LogisticRegression()).fit(X, y) + >>> selector.estimator_.coef_ + array([[-0.3252..., 0.8345..., 0.4976...]]) + >>> selector.threshold_ + 0.55249... + >>> selector.get_support() + array([False, True, False]) + >>> selector.transform(X) + array([[-1.34], + [-0.02], + [-0.48], + [ 1.48]]) + + Using a callable to create a selector that can use no more than half + of the input features. + + >>> def half_callable(X): + ... return round(len(X[0]) / 2) + >>> half_selector = SelectFromModel(estimator=LogisticRegression(), + ... max_features=half_callable) + >>> _ = half_selector.fit(X, y) + >>> half_selector.max_features_ + 2 + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods("fit")], + "threshold": [Interval(Real, None, None, closed="both"), str, None], + "prefit": ["boolean"], + "norm_order": [ + Interval(Integral, None, -1, closed="right"), + Interval(Integral, 1, None, closed="left"), + Options(Real, {np.inf, -np.inf}), + ], + "max_features": [Interval(Integral, 0, None, closed="left"), callable, None], + "importance_getter": [str, callable], + } + + def __init__( + self, + estimator, + *, + threshold=None, + prefit=False, + norm_order=1, + max_features=None, + importance_getter="auto", + ): + self.estimator = estimator + self.threshold = threshold + self.prefit = prefit + self.importance_getter = importance_getter + self.norm_order = norm_order + self.max_features = max_features + + def _get_support_mask(self): + estimator = getattr(self, "estimator_", self.estimator) + max_features = getattr(self, "max_features_", self.max_features) + + if self.prefit: + try: + check_is_fitted(self.estimator) + except NotFittedError as exc: + raise NotFittedError( + "When `prefit=True`, `estimator` is expected to be a fitted " + "estimator." + ) from exc + if callable(max_features): + # This branch is executed when `transform` is called directly and thus + # `max_features_` is not set and we fallback using `self.max_features` + # that is not validated + raise NotFittedError( + "When `prefit=True` and `max_features` is a callable, call `fit` " + "before calling `transform`." + ) + elif max_features is not None and not isinstance(max_features, Integral): + raise ValueError( + f"`max_features` must be an integer. Got `max_features={max_features}` " + "instead." + ) + + scores = _get_feature_importances( + estimator=estimator, + getter=self.importance_getter, + transform_func="norm", + norm_order=self.norm_order, + ) + threshold = _calculate_threshold(estimator, scores, self.threshold) + if self.max_features is not None: + mask = np.zeros_like(scores, dtype=bool) + candidate_indices = np.argsort(-scores, kind="mergesort")[:max_features] + mask[candidate_indices] = True + else: + mask = np.ones_like(scores, dtype=bool) + mask[scores < threshold] = False + return mask + + def _check_max_features(self, X): + if self.max_features is not None: + n_features = _num_features(X) + + if callable(self.max_features): + max_features = self.max_features(X) + else: # int + max_features = self.max_features + + check_scalar( + max_features, + "max_features", + Integral, + min_val=0, + max_val=n_features, + ) + self.max_features_ = max_features + + @_fit_context( + # SelectFromModel.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **fit_params): + """Fit the SelectFromModel meta-transformer. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,), default=None + The target values (integers that correspond to classes in + classification, real numbers in regression). + + **fit_params : dict + - If `enable_metadata_routing=False` (default): + + Parameters directly passed to the `partial_fit` method of the + sub-estimator. They are ignored if `prefit=True`. + + - If `enable_metadata_routing=True`: + + Parameters safely routed to the `partial_fit` method of the + sub-estimator. They are ignored if `prefit=True`. + + .. versionchanged:: 1.4 + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Fitted estimator. + """ + self._check_max_features(X) + + if self.prefit: + try: + check_is_fitted(self.estimator) + except NotFittedError as exc: + raise NotFittedError( + "When `prefit=True`, `estimator` is expected to be a fitted " + "estimator." + ) from exc + self.estimator_ = deepcopy(self.estimator) + else: + if _routing_enabled(): + routed_params = process_routing(self, "fit", **fit_params) + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X, y, **routed_params.estimator.fit) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X, y, **fit_params) + + if hasattr(self.estimator_, "feature_names_in_"): + self.feature_names_in_ = self.estimator_.feature_names_in_ + else: + self._check_feature_names(X, reset=True) + + return self + + @property + def threshold_(self): + """Threshold value used for feature selection.""" + scores = _get_feature_importances( + estimator=self.estimator_, + getter=self.importance_getter, + transform_func="norm", + norm_order=self.norm_order, + ) + return _calculate_threshold(self.estimator, scores, self.threshold) + + @available_if(_estimator_has("partial_fit")) + @_fit_context( + # SelectFromModel.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def partial_fit(self, X, y=None, **partial_fit_params): + """Fit the SelectFromModel meta-transformer only once. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,), default=None + The target values (integers that correspond to classes in + classification, real numbers in regression). + + **partial_fit_params : dict + - If `enable_metadata_routing=False` (default): + + Parameters directly passed to the `partial_fit` method of the + sub-estimator. + + - If `enable_metadata_routing=True`: + + Parameters passed to the `partial_fit` method of the + sub-estimator. They are ignored if `prefit=True`. + + .. versionchanged:: 1.4 + `**partial_fit_params` are routed to the sub-estimator, if + `enable_metadata_routing=True` is set via + :func:`~sklearn.set_config`, which allows for aliasing. + + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Fitted estimator. + """ + first_call = not hasattr(self, "estimator_") + + if first_call: + self._check_max_features(X) + + if self.prefit: + if first_call: + try: + check_is_fitted(self.estimator) + except NotFittedError as exc: + raise NotFittedError( + "When `prefit=True`, `estimator` is expected to be a fitted " + "estimator." + ) from exc + self.estimator_ = deepcopy(self.estimator) + return self + + if first_call: + self.estimator_ = clone(self.estimator) + if _routing_enabled(): + routed_params = process_routing(self, "partial_fit", **partial_fit_params) + self.estimator_ = clone(self.estimator) + self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + self.estimator_.partial_fit(X, y, **partial_fit_params) + + if hasattr(self.estimator_, "feature_names_in_"): + self.feature_names_in_ = self.estimator_.feature_names_in_ + else: + self._check_feature_names(X, reset=first_call) + + return self + + @property + def n_features_in_(self): + """Number of features seen during `fit`.""" + # For consistency with other estimators we raise a AttributeError so + # that hasattr() fails if the estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.estimator_.n_features_in_ + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(callee="partial_fit", caller="partial_fit") + .add(callee="fit", caller="fit"), + ) + return router + + def _more_tags(self): + return {"allow_nan": _safe_tags(self.estimator, key="allow_nan")} diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py new file mode 100644 index 0000000000000000000000000000000000000000..821ef889e7ed90936d6c9898b4c41744d105cb6e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py @@ -0,0 +1,514 @@ +# Author: Nikolay Mayorov +# License: 3-clause BSD + +from numbers import Integral + +import numpy as np +from scipy.sparse import issparse +from scipy.special import digamma + +from ..metrics.cluster import mutual_info_score +from ..neighbors import KDTree, NearestNeighbors +from ..preprocessing import scale +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.multiclass import check_classification_targets +from ..utils.validation import check_array, check_X_y + + +def _compute_mi_cc(x, y, n_neighbors): + """Compute mutual information between two continuous variables. + + Parameters + ---------- + x, y : ndarray, shape (n_samples,) + Samples of two continuous random variables, must have an identical + shape. + + n_neighbors : int + Number of nearest neighbors to search for each point, see [1]_. + + Returns + ------- + mi : float + Estimated mutual information in nat units. If it turned out to be + negative it is replaced by 0. + + Notes + ----- + True mutual information can't be negative. If its estimate by a numerical + method is negative, it means (providing the method is adequate) that the + mutual information is close to 0 and replacing it by 0 is a reasonable + strategy. + + References + ---------- + .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + """ + n_samples = x.size + + x = x.reshape((-1, 1)) + y = y.reshape((-1, 1)) + xy = np.hstack((x, y)) + + # Here we rely on NearestNeighbors to select the fastest algorithm. + nn = NearestNeighbors(metric="chebyshev", n_neighbors=n_neighbors) + + nn.fit(xy) + radius = nn.kneighbors()[0] + radius = np.nextafter(radius[:, -1], 0) + + # KDTree is explicitly fit to allow for the querying of number of + # neighbors within a specified radius + kd = KDTree(x, metric="chebyshev") + nx = kd.query_radius(x, radius, count_only=True, return_distance=False) + nx = np.array(nx) - 1.0 + + kd = KDTree(y, metric="chebyshev") + ny = kd.query_radius(y, radius, count_only=True, return_distance=False) + ny = np.array(ny) - 1.0 + + mi = ( + digamma(n_samples) + + digamma(n_neighbors) + - np.mean(digamma(nx + 1)) + - np.mean(digamma(ny + 1)) + ) + + return max(0, mi) + + +def _compute_mi_cd(c, d, n_neighbors): + """Compute mutual information between continuous and discrete variables. + + Parameters + ---------- + c : ndarray, shape (n_samples,) + Samples of a continuous random variable. + + d : ndarray, shape (n_samples,) + Samples of a discrete random variable. + + n_neighbors : int + Number of nearest neighbors to search for each point, see [1]_. + + Returns + ------- + mi : float + Estimated mutual information in nat units. If it turned out to be + negative it is replaced by 0. + + Notes + ----- + True mutual information can't be negative. If its estimate by a numerical + method is negative, it means (providing the method is adequate) that the + mutual information is close to 0 and replacing it by 0 is a reasonable + strategy. + + References + ---------- + .. [1] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + """ + n_samples = c.shape[0] + c = c.reshape((-1, 1)) + + radius = np.empty(n_samples) + label_counts = np.empty(n_samples) + k_all = np.empty(n_samples) + nn = NearestNeighbors() + for label in np.unique(d): + mask = d == label + count = np.sum(mask) + if count > 1: + k = min(n_neighbors, count - 1) + nn.set_params(n_neighbors=k) + nn.fit(c[mask]) + r = nn.kneighbors()[0] + radius[mask] = np.nextafter(r[:, -1], 0) + k_all[mask] = k + label_counts[mask] = count + + # Ignore points with unique labels. + mask = label_counts > 1 + n_samples = np.sum(mask) + label_counts = label_counts[mask] + k_all = k_all[mask] + c = c[mask] + radius = radius[mask] + + kd = KDTree(c) + m_all = kd.query_radius(c, radius, count_only=True, return_distance=False) + m_all = np.array(m_all) + + mi = ( + digamma(n_samples) + + np.mean(digamma(k_all)) + - np.mean(digamma(label_counts)) + - np.mean(digamma(m_all)) + ) + + return max(0, mi) + + +def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3): + """Compute mutual information between two variables. + + This is a simple wrapper which selects a proper function to call based on + whether `x` and `y` are discrete or not. + """ + if x_discrete and y_discrete: + return mutual_info_score(x, y) + elif x_discrete and not y_discrete: + return _compute_mi_cd(y, x, n_neighbors) + elif not x_discrete and y_discrete: + return _compute_mi_cd(x, y, n_neighbors) + else: + return _compute_mi_cc(x, y, n_neighbors) + + +def _iterate_columns(X, columns=None): + """Iterate over columns of a matrix. + + Parameters + ---------- + X : ndarray or csc_matrix, shape (n_samples, n_features) + Matrix over which to iterate. + + columns : iterable or None, default=None + Indices of columns to iterate over. If None, iterate over all columns. + + Yields + ------ + x : ndarray, shape (n_samples,) + Columns of `X` in dense format. + """ + if columns is None: + columns = range(X.shape[1]) + + if issparse(X): + for i in columns: + x = np.zeros(X.shape[0]) + start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1] + x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr] + yield x + else: + for i in columns: + yield X[:, i] + + +def _estimate_mi( + X, + y, + discrete_features="auto", + discrete_target=False, + n_neighbors=3, + copy=True, + random_state=None, +): + """Estimate mutual information between the features and the target. + + Parameters + ---------- + X : array-like or sparse matrix, shape (n_samples, n_features) + Feature matrix. + + y : array-like of shape (n_samples,) + Target vector. + + discrete_features : {'auto', bool, array-like}, default='auto' + If bool, then determines whether to consider all features discrete + or continuous. If array, then it should be either a boolean mask + with shape (n_features,) or array with indices of discrete features. + If 'auto', it is assigned to False for dense `X` and to True for + sparse `X`. + + discrete_target : bool, default=False + Whether to consider `y` as a discrete variable. + + n_neighbors : int, default=3 + Number of neighbors to use for MI estimation for continuous variables, + see [1]_ and [2]_. Higher values reduce variance of the estimation, but + could introduce a bias. + + copy : bool, default=True + Whether to make a copy of the given data. If set to False, the initial + data will be overwritten. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for adding small noise to + continuous variables in order to remove repeated values. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + mi : ndarray, shape (n_features,) + Estimated mutual information between each feature and the target in + nat units. A negative value will be replaced by 0. + + References + ---------- + .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + .. [2] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + """ + X, y = check_X_y(X, y, accept_sparse="csc", y_numeric=not discrete_target) + n_samples, n_features = X.shape + + if isinstance(discrete_features, (str, bool)): + if isinstance(discrete_features, str): + if discrete_features == "auto": + discrete_features = issparse(X) + else: + raise ValueError("Invalid string value for discrete_features.") + discrete_mask = np.empty(n_features, dtype=bool) + discrete_mask.fill(discrete_features) + else: + discrete_features = check_array(discrete_features, ensure_2d=False) + if discrete_features.dtype != "bool": + discrete_mask = np.zeros(n_features, dtype=bool) + discrete_mask[discrete_features] = True + else: + discrete_mask = discrete_features + + continuous_mask = ~discrete_mask + if np.any(continuous_mask) and issparse(X): + raise ValueError("Sparse matrix `X` can't have continuous features.") + + rng = check_random_state(random_state) + if np.any(continuous_mask): + X = X.astype(np.float64, copy=copy) + X[:, continuous_mask] = scale( + X[:, continuous_mask], with_mean=False, copy=False + ) + + # Add small noise to continuous features as advised in Kraskov et. al. + means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0)) + X[:, continuous_mask] += ( + 1e-10 + * means + * rng.standard_normal(size=(n_samples, np.sum(continuous_mask))) + ) + + if not discrete_target: + y = scale(y, with_mean=False) + y += ( + 1e-10 + * np.maximum(1, np.mean(np.abs(y))) + * rng.standard_normal(size=n_samples) + ) + + mi = [ + _compute_mi(x, y, discrete_feature, discrete_target, n_neighbors) + for x, discrete_feature in zip(_iterate_columns(X), discrete_mask) + ] + + return np.array(mi) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "copy": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def mutual_info_regression( + X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None +): + """Estimate mutual information for a continuous target variable. + + Mutual information (MI) [1]_ between two random variables is a non-negative + value, which measures the dependency between the variables. It is equal + to zero if and only if two random variables are independent, and higher + values mean higher dependency. + + The function relies on nonparametric methods based on entropy estimation + from k-nearest neighbors distances as described in [2]_ and [3]_. Both + methods are based on the idea originally proposed in [4]_. + + It can be used for univariate features selection, read more in the + :ref:`User Guide `. + + Parameters + ---------- + X : array-like or sparse matrix, shape (n_samples, n_features) + Feature matrix. + + y : array-like of shape (n_samples,) + Target vector. + + discrete_features : {'auto', bool, array-like}, default='auto' + If bool, then determines whether to consider all features discrete + or continuous. If array, then it should be either a boolean mask + with shape (n_features,) or array with indices of discrete features. + If 'auto', it is assigned to False for dense `X` and to True for + sparse `X`. + + n_neighbors : int, default=3 + Number of neighbors to use for MI estimation for continuous variables, + see [2]_ and [3]_. Higher values reduce variance of the estimation, but + could introduce a bias. + + copy : bool, default=True + Whether to make a copy of the given data. If set to False, the initial + data will be overwritten. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for adding small noise to + continuous variables in order to remove repeated values. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + mi : ndarray, shape (n_features,) + Estimated mutual information between each feature and the target in + nat units. + + Notes + ----- + 1. The term "discrete features" is used instead of naming them + "categorical", because it describes the essence more accurately. + For example, pixel intensities of an image are discrete features + (but hardly categorical) and you will get better results if mark them + as such. Also note, that treating a continuous variable as discrete and + vice versa will usually give incorrect results, so be attentive about + that. + 2. True mutual information can't be negative. If its estimate turns out + to be negative, it is replaced by zero. + + References + ---------- + .. [1] `Mutual Information + `_ + on Wikipedia. + .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + .. [3] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy + of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16 + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.feature_selection import mutual_info_regression + >>> X, y = make_regression( + ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 + ... ) + >>> mutual_info_regression(X, y) + array([0.1..., 2.6... , 0.0...]) + """ + return _estimate_mi(X, y, discrete_features, False, n_neighbors, copy, random_state) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "copy": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def mutual_info_classif( + X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None +): + """Estimate mutual information for a discrete target variable. + + Mutual information (MI) [1]_ between two random variables is a non-negative + value, which measures the dependency between the variables. It is equal + to zero if and only if two random variables are independent, and higher + values mean higher dependency. + + The function relies on nonparametric methods based on entropy estimation + from k-nearest neighbors distances as described in [2]_ and [3]_. Both + methods are based on the idea originally proposed in [4]_. + + It can be used for univariate features selection, read more in the + :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Feature matrix. + + y : array-like of shape (n_samples,) + Target vector. + + discrete_features : 'auto', bool or array-like, default='auto' + If bool, then determines whether to consider all features discrete + or continuous. If array, then it should be either a boolean mask + with shape (n_features,) or array with indices of discrete features. + If 'auto', it is assigned to False for dense `X` and to True for + sparse `X`. + + n_neighbors : int, default=3 + Number of neighbors to use for MI estimation for continuous variables, + see [2]_ and [3]_. Higher values reduce variance of the estimation, but + could introduce a bias. + + copy : bool, default=True + Whether to make a copy of the given data. If set to False, the initial + data will be overwritten. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for adding small noise to + continuous variables in order to remove repeated values. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + mi : ndarray, shape (n_features,) + Estimated mutual information between each feature and the target in + nat units. + + Notes + ----- + 1. The term "discrete features" is used instead of naming them + "categorical", because it describes the essence more accurately. + For example, pixel intensities of an image are discrete features + (but hardly categorical) and you will get better results if mark them + as such. Also note, that treating a continuous variable as discrete and + vice versa will usually give incorrect results, so be attentive about + that. + 2. True mutual information can't be negative. If its estimate turns out + to be negative, it is replaced by zero. + + References + ---------- + .. [1] `Mutual Information + `_ + on Wikipedia. + .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual + information". Phys. Rev. E 69, 2004. + .. [3] B. C. Ross "Mutual Information between Discrete and Continuous + Data Sets". PLoS ONE 9(2), 2014. + .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy + of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16 + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.feature_selection import mutual_info_classif + >>> X, y = make_classification( + ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1, + ... shuffle=False, random_state=42 + ... ) + >>> mutual_info_classif(X, y) + array([0.58..., 0.10..., 0.19..., 0.09... , 0. , + 0. , 0. , 0. , 0. , 0. ]) + """ + check_classification_targets(y) + return _estimate_mi(X, y, discrete_features, True, n_neighbors, copy, random_state) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d1b71e08609a6fdc9dc8ad6db29b96c1da0822 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_rfe.py @@ -0,0 +1,792 @@ +# Authors: Alexandre Gramfort +# Vincent Michel +# Gilles Louppe +# +# License: BSD 3 clause + +"""Recursive feature elimination for feature ranking""" + +from numbers import Integral + +import numpy as np +from joblib import effective_n_jobs + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier +from ..metrics import check_scoring +from ..model_selection import check_cv +from ..model_selection._validation import _score +from ..utils._param_validation import HasMethods, Interval, RealNotInt +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.metaestimators import _safe_split, available_if +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin, _get_feature_importances + + +def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer): + """ + Return the score for a fit across one fold. + """ + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + return rfe._fit( + X_train, + y_train, + lambda estimator, features: _score( + # TODO(SLEP6): pass score_params here + estimator, + X_test[:, features], + y_test, + scorer, + score_params=None, + ), + ).scores_ + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the fitted `estimator_` if available, otherwise we check the + unfitted `estimator`. We raise the original `AttributeError` if `attr` does + not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "estimator_"): + getattr(self.estimator_, attr) + else: + getattr(self.estimator, attr) + + return True + + return check + + +class RFE(_RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator): + """Feature ranking with recursive feature elimination. + + Given an external estimator that assigns weights to features (e.g., the + coefficients of a linear model), the goal of recursive feature elimination + (RFE) is to select features by recursively considering smaller and smaller + sets of features. First, the estimator is trained on the initial set of + features and the importance of each feature is obtained either through + any specific attribute or callable. + Then, the least important features are pruned from current set of features. + That procedure is recursively repeated on the pruned set until the desired + number of features to select is eventually reached. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : ``Estimator`` instance + A supervised learning estimator with a ``fit`` method that provides + information about feature importance + (e.g. `coef_`, `feature_importances_`). + + n_features_to_select : int or float, default=None + The number of features to select. If `None`, half of the features are + selected. If integer, the parameter is the absolute number of features + to select. If float between 0 and 1, it is the fraction of features to + select. + + .. versionchanged:: 0.24 + Added float values for fractions. + + step : int or float, default=1 + If greater than or equal to 1, then ``step`` corresponds to the + (integer) number of features to remove at each iteration. + If within (0.0, 1.0), then ``step`` corresponds to the percentage + (rounded down) of features to remove at each iteration. + + verbose : int, default=0 + Controls verbosity of output. + + importance_getter : str or callable, default='auto' + If 'auto', uses the feature importance either through a `coef_` + or `feature_importances_` attributes of estimator. + + Also accepts a string that specifies an attribute name/path + for extracting feature importance (implemented with `attrgetter`). + For example, give `regressor_.coef_` in case of + :class:`~sklearn.compose.TransformedTargetRegressor` or + `named_steps.clf.feature_importances_` in case of + class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. + + If `callable`, overrides the default feature importance getter. + The callable is passed with the fitted estimator and it should + return importance for each feature. + + .. versionadded:: 0.24 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The classes labels. Only available when `estimator` is a classifier. + + estimator_ : ``Estimator`` instance + The fitted estimator used to select features. + + n_features_ : int + The number of selected features. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + ranking_ : ndarray of shape (n_features,) + The feature ranking, such that ``ranking_[i]`` corresponds to the + ranking position of the i-th feature. Selected (i.e., estimated + best) features are assigned rank 1. + + support_ : ndarray of shape (n_features,) + The mask of selected features. + + See Also + -------- + RFECV : Recursive feature elimination with built-in cross-validated + selection of the best number of features. + SelectFromModel : Feature selection based on thresholds of importance + weights. + SequentialFeatureSelector : Sequential cross-validation based feature + selection. Does not rely on importance weights. + + Notes + ----- + Allows NaN/Inf in the input if the underlying estimator does as well. + + References + ---------- + + .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection + for cancer classification using support vector machines", + Mach. Learn., 46(1-3), 389--422, 2002. + + Examples + -------- + The following example shows how to retrieve the 5 most informative + features in the Friedman #1 dataset. + + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.feature_selection import RFE + >>> from sklearn.svm import SVR + >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + >>> estimator = SVR(kernel="linear") + >>> selector = RFE(estimator, n_features_to_select=5, step=1) + >>> selector = selector.fit(X, y) + >>> selector.support_ + array([ True, True, True, True, True, False, False, False, False, + False]) + >>> selector.ranking_ + array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "n_features_to_select": [ + None, + Interval(RealNotInt, 0, 1, closed="right"), + Interval(Integral, 0, None, closed="neither"), + ], + "step": [ + Interval(Integral, 0, None, closed="neither"), + Interval(RealNotInt, 0, 1, closed="neither"), + ], + "verbose": ["verbose"], + "importance_getter": [str, callable], + } + + def __init__( + self, + estimator, + *, + n_features_to_select=None, + step=1, + verbose=0, + importance_getter="auto", + ): + self.estimator = estimator + self.n_features_to_select = n_features_to_select + self.step = step + self.importance_getter = importance_getter + self.verbose = verbose + + @property + def _estimator_type(self): + return self.estimator._estimator_type + + @property + def classes_(self): + """Classes labels available when `estimator` is a classifier. + + Returns + ------- + ndarray of shape (n_classes,) + """ + return self.estimator_.classes_ + + @_fit_context( + # RFE.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit the RFE model and then the underlying estimator on the selected features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) + The target values. + + **fit_params : dict + Additional parameters passed to the `fit` method of the underlying + estimator. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", **fit_params) + return self._fit(X, y, **fit_params) + + def _fit(self, X, y, step_score=None, **fit_params): + # Parameter step_score controls the calculation of self.scores_ + # step_score is not exposed to users + # and is used when implementing RFECV + # self.scores_ will not be calculated when calling _fit through fit + + X, y = self._validate_data( + X, + y, + accept_sparse="csc", + ensure_min_features=2, + force_all_finite=False, + multi_output=True, + ) + + # Initialization + n_features = X.shape[1] + if self.n_features_to_select is None: + n_features_to_select = n_features // 2 + elif isinstance(self.n_features_to_select, Integral): # int + n_features_to_select = self.n_features_to_select + else: # float + n_features_to_select = int(n_features * self.n_features_to_select) + + if 0.0 < self.step < 1.0: + step = int(max(1, self.step * n_features)) + else: + step = int(self.step) + + support_ = np.ones(n_features, dtype=bool) + ranking_ = np.ones(n_features, dtype=int) + + if step_score: + self.scores_ = [] + + # Elimination + while np.sum(support_) > n_features_to_select: + # Remaining features + features = np.arange(n_features)[support_] + + # Rank the remaining features + estimator = clone(self.estimator) + if self.verbose > 0: + print("Fitting estimator with %d features." % np.sum(support_)) + + estimator.fit(X[:, features], y, **fit_params) + + # Get importance and rank them + importances = _get_feature_importances( + estimator, + self.importance_getter, + transform_func="square", + ) + ranks = np.argsort(importances) + + # for sparse case ranks is matrix + ranks = np.ravel(ranks) + + # Eliminate the worse features + threshold = min(step, np.sum(support_) - n_features_to_select) + + # Compute step score on the previous selection iteration + # because 'estimator' must use features + # that have not been eliminated yet + if step_score: + self.scores_.append(step_score(estimator, features)) + support_[features[ranks][:threshold]] = False + ranking_[np.logical_not(support_)] += 1 + + # Set final attributes + features = np.arange(n_features)[support_] + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X[:, features], y, **fit_params) + + # Compute step score when only n_features_to_select features left + if step_score: + self.scores_.append(step_score(self.estimator_, features)) + self.n_features_ = support_.sum() + self.support_ = support_ + self.ranking_ = ranking_ + + return self + + @available_if(_estimator_has("predict")) + def predict(self, X): + """Reduce X to the selected features and predict using the estimator. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + Returns + ------- + y : array of shape [n_samples] + The predicted target values. + """ + check_is_fitted(self) + return self.estimator_.predict(self.transform(X)) + + @available_if(_estimator_has("score")) + def score(self, X, y, **fit_params): + """Reduce X to the selected features and return the score of the estimator. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + y : array of shape [n_samples] + The target values. + + **fit_params : dict + Parameters to pass to the `score` method of the underlying + estimator. + + .. versionadded:: 1.0 + + Returns + ------- + score : float + Score of the underlying base estimator computed with the selected + features returned by `rfe.transform(X)` and `y`. + """ + check_is_fitted(self) + return self.estimator_.score(self.transform(X), y, **fit_params) + + def _get_support_mask(self): + check_is_fitted(self) + return self.support_ + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Compute the decision function of ``X``. + + Parameters + ---------- + X : {array-like or sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + score : array, shape = [n_samples, n_classes] or [n_samples] + The decision function of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + Regression and binary classification produce an array of shape + [n_samples]. + """ + check_is_fitted(self) + return self.estimator_.decision_function(self.transform(X)) + + @available_if(_estimator_has("predict_proba")) + def predict_proba(self, X): + """Predict class probabilities for X. + + Parameters + ---------- + X : {array-like or sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + p : array of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.estimator_.predict_proba(self.transform(X)) + + @available_if(_estimator_has("predict_log_proba")) + def predict_log_proba(self, X): + """Predict class log-probabilities for X. + + Parameters + ---------- + X : array of shape [n_samples, n_features] + The input samples. + + Returns + ------- + p : array of shape (n_samples, n_classes) + The class log-probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.estimator_.predict_log_proba(self.transform(X)) + + def _more_tags(self): + tags = { + "poor_score": True, + "requires_y": True, + "allow_nan": True, + } + + # Adjust allow_nan if estimator explicitly defines `allow_nan`. + if hasattr(self.estimator, "_get_tags"): + tags["allow_nan"] = self.estimator._get_tags()["allow_nan"] + + return tags + + +class RFECV(RFE): + """Recursive feature elimination with cross-validation to select features. + + The number of features selected is tuned automatically by fitting an :class:`RFE` + selector on the different cross-validation splits (provided by the `cv` parameter). + The performance of the :class:`RFE` selector are evaluated using `scorer` for + different number of selected features and aggregated together. Finally, the scores + are averaged across folds and the number of features selected is set to the number + of features that maximize the cross-validation score. + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : ``Estimator`` instance + A supervised learning estimator with a ``fit`` method that provides + information about feature importance either through a ``coef_`` + attribute or through a ``feature_importances_`` attribute. + + step : int or float, default=1 + If greater than or equal to 1, then ``step`` corresponds to the + (integer) number of features to remove at each iteration. + If within (0.0, 1.0), then ``step`` corresponds to the percentage + (rounded down) of features to remove at each iteration. + Note that the last iteration may remove fewer than ``step`` features in + order to reach ``min_features_to_select``. + + min_features_to_select : int, default=1 + The minimum number of features to be selected. This number of features + will always be scored, even if the difference between the original + feature count and ``min_features_to_select`` isn't divisible by + ``step``. + + .. versionadded:: 0.20 + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if ``y`` is binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. If the + estimator is a classifier or if ``y`` is neither binary nor multiclass, + :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value of None changed from 3-fold to 5-fold. + + scoring : str, callable or None, default=None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + verbose : int, default=0 + Controls verbosity of output. + + n_jobs : int or None, default=None + Number of cores to run in parallel while fitting across folds. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.18 + + importance_getter : str or callable, default='auto' + If 'auto', uses the feature importance either through a `coef_` + or `feature_importances_` attributes of estimator. + + Also accepts a string that specifies an attribute name/path + for extracting feature importance. + For example, give `regressor_.coef_` in case of + :class:`~sklearn.compose.TransformedTargetRegressor` or + `named_steps.clf.feature_importances_` in case of + :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. + + If `callable`, overrides the default feature importance getter. + The callable is passed with the fitted estimator and it should + return importance for each feature. + + .. versionadded:: 0.24 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The classes labels. Only available when `estimator` is a classifier. + + estimator_ : ``Estimator`` instance + The fitted estimator used to select features. + + cv_results_ : dict of ndarrays + A dict with keys: + + split(k)_test_score : ndarray of shape (n_subsets_of_features,) + The cross-validation scores across (k)th fold. + + mean_test_score : ndarray of shape (n_subsets_of_features,) + Mean of scores over the folds. + + std_test_score : ndarray of shape (n_subsets_of_features,) + Standard deviation of scores over the folds. + + .. versionadded:: 1.0 + + n_features_ : int + The number of selected features with cross-validation. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + ranking_ : narray of shape (n_features,) + The feature ranking, such that `ranking_[i]` + corresponds to the ranking + position of the i-th feature. + Selected (i.e., estimated best) + features are assigned rank 1. + + support_ : ndarray of shape (n_features,) + The mask of selected features. + + See Also + -------- + RFE : Recursive feature elimination. + + Notes + ----- + The size of all values in ``cv_results_`` is equal to + ``ceil((n_features - min_features_to_select) / step) + 1``, + where step is the number of features removed at each iteration. + + Allows NaN/Inf in the input if the underlying estimator does as well. + + References + ---------- + + .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection + for cancer classification using support vector machines", + Mach. Learn., 46(1-3), 389--422, 2002. + + Examples + -------- + The following example shows how to retrieve the a-priori not known 5 + informative features in the Friedman #1 dataset. + + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.feature_selection import RFECV + >>> from sklearn.svm import SVR + >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + >>> estimator = SVR(kernel="linear") + >>> selector = RFECV(estimator, step=1, cv=5) + >>> selector = selector.fit(X, y) + >>> selector.support_ + array([ True, True, True, True, True, False, False, False, False, + False]) + >>> selector.ranking_ + array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) + """ + + _parameter_constraints: dict = { + **RFE._parameter_constraints, + "min_features_to_select": [Interval(Integral, 0, None, closed="neither")], + "cv": ["cv_object"], + "scoring": [None, str, callable], + "n_jobs": [None, Integral], + } + _parameter_constraints.pop("n_features_to_select") + + def __init__( + self, + estimator, + *, + step=1, + min_features_to_select=1, + cv=None, + scoring=None, + verbose=0, + n_jobs=None, + importance_getter="auto", + ): + self.estimator = estimator + self.step = step + self.importance_getter = importance_getter + self.cv = cv + self.scoring = scoring + self.verbose = verbose + self.n_jobs = n_jobs + self.min_features_to_select = min_features_to_select + + @_fit_context( + # RFECV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, groups=None): + """Fit the RFE model and automatically tune the number of selected features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the total number of features. + + y : array-like of shape (n_samples,) + Target values (integers for classification, real numbers for + regression). + + groups : array-like of shape (n_samples,) or None, default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`~sklearn.model_selection.GroupKFold`). + + .. versionadded:: 0.20 + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", groups=groups) + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + ensure_min_features=2, + force_all_finite=False, + multi_output=True, + ) + + # Initialization + cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) + scorer = check_scoring(self.estimator, scoring=self.scoring) + n_features = X.shape[1] + + if 0.0 < self.step < 1.0: + step = int(max(1, self.step * n_features)) + else: + step = int(self.step) + + # Build an RFE object, which will evaluate and score each possible + # feature count, down to self.min_features_to_select + rfe = RFE( + estimator=self.estimator, + n_features_to_select=self.min_features_to_select, + importance_getter=self.importance_getter, + step=self.step, + verbose=self.verbose, + ) + + # Determine the number of subsets of features by fitting across + # the train folds and choosing the "features_to_select" parameter + # that gives the least averaged error across all folds. + + # Note that joblib raises a non-picklable error for bound methods + # even if n_jobs is set to 1 with the default multiprocessing + # backend. + # This branching is done so that to + # make sure that user code that sets n_jobs to 1 + # and provides bound methods as scorers is not broken with the + # addition of n_jobs parameter in version 0.18. + + if effective_n_jobs(self.n_jobs) == 1: + parallel, func = list, _rfe_single_fit + else: + parallel = Parallel(n_jobs=self.n_jobs) + func = delayed(_rfe_single_fit) + + scores = parallel( + func(rfe, self.estimator, X, y, train, test, scorer) + for train, test in cv.split(X, y, groups) + ) + + scores = np.array(scores) + scores_sum = np.sum(scores, axis=0) + scores_sum_rev = scores_sum[::-1] + argmax_idx = len(scores_sum) - np.argmax(scores_sum_rev) - 1 + n_features_to_select = max( + n_features - (argmax_idx * step), self.min_features_to_select + ) + + # Re-execute an elimination with best_k over the whole set + rfe = RFE( + estimator=self.estimator, + n_features_to_select=n_features_to_select, + step=self.step, + importance_getter=self.importance_getter, + verbose=self.verbose, + ) + + rfe.fit(X, y) + + # Set final attributes + self.support_ = rfe.support_ + self.n_features_ = rfe.n_features_ + self.ranking_ = rfe.ranking_ + self.estimator_ = clone(self.estimator) + self.estimator_.fit(self._transform(X), y) + + # reverse to stay consistent with before + scores_rev = scores[:, ::-1] + self.cv_results_ = {} + self.cv_results_["mean_test_score"] = np.mean(scores_rev, axis=0) + self.cv_results_["std_test_score"] = np.std(scores_rev, axis=0) + + for i in range(scores.shape[0]): + self.cv_results_[f"split{i}_test_score"] = scores_rev[i] + + return self diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..5a90d46c9758b47a92121b91bd6e049207dc1c48 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py @@ -0,0 +1,300 @@ +""" +Sequential feature selection +""" +from numbers import Integral, Real + +import numpy as np + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier +from ..metrics import get_scorer_names +from ..model_selection import check_cv, cross_val_score +from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import _RoutingNotSupportedMixin +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin + + +class SequentialFeatureSelector( + _RoutingNotSupportedMixin, SelectorMixin, MetaEstimatorMixin, BaseEstimator +): + """Transformer that performs Sequential Feature Selection. + + This Sequential Feature Selector adds (forward selection) or + removes (backward selection) features to form a feature subset in a + greedy fashion. At each stage, this estimator chooses the best feature to + add or remove based on the cross-validation score of an estimator. In + the case of unsupervised learning, this Sequential Feature Selector + looks only at the features (X), not the desired outputs (y). + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.24 + + Parameters + ---------- + estimator : estimator instance + An unfitted estimator. + + n_features_to_select : "auto", int or float, default="auto" + If `"auto"`, the behaviour depends on the `tol` parameter: + + - if `tol` is not `None`, then features are selected while the score + change does not exceed `tol`. + - otherwise, half of the features are selected. + + If integer, the parameter is the absolute number of features to select. + If float between 0 and 1, it is the fraction of features to select. + + .. versionadded:: 1.1 + The option `"auto"` was added in version 1.1. + + .. versionchanged:: 1.3 + The default changed from `"warn"` to `"auto"` in 1.3. + + tol : float, default=None + If the score is not incremented by at least `tol` between two + consecutive feature additions or removals, stop adding or removing. + + `tol` can be negative when removing features using `direction="backward"`. + It can be useful to reduce the number of features at the cost of a small + decrease in the score. + + `tol` is enabled only when `n_features_to_select` is `"auto"`. + + .. versionadded:: 1.1 + + direction : {'forward', 'backward'}, default='forward' + Whether to perform forward selection or backward selection. + + scoring : str or callable, default=None + A single str (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + + NOTE that when using a custom scorer, it should return a single + value. + + If None, the estimator's score method is used. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. In all other + cases, :class:`~sklearn.model_selection.KFold` is used. These splitters + are instantiated with `shuffle=False` so the splits will be the same + across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + n_jobs : int, default=None + Number of jobs to run in parallel. When evaluating a new feature to + add or remove, the cross-validation procedure is parallel over the + folds. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_features_to_select_ : int + The number of features that were selected. + + support_ : ndarray of shape (n_features,), dtype=bool + The mask of selected features. + + See Also + -------- + GenericUnivariateSelect : Univariate feature selector with configurable + strategy. + RFE : Recursive feature elimination based on importance weights. + RFECV : Recursive feature elimination based on importance weights, with + automatic selection of the number of features. + SelectFromModel : Feature selection based on thresholds of importance + weights. + + Examples + -------- + >>> from sklearn.feature_selection import SequentialFeatureSelector + >>> from sklearn.neighbors import KNeighborsClassifier + >>> from sklearn.datasets import load_iris + >>> X, y = load_iris(return_X_y=True) + >>> knn = KNeighborsClassifier(n_neighbors=3) + >>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3) + >>> sfs.fit(X, y) + SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3), + n_features_to_select=3) + >>> sfs.get_support() + array([ True, False, True, True]) + >>> sfs.transform(X).shape + (150, 3) + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "n_features_to_select": [ + StrOptions({"auto"}), + Interval(RealNotInt, 0, 1, closed="right"), + Interval(Integral, 0, None, closed="neither"), + ], + "tol": [None, Interval(Real, None, None, closed="neither")], + "direction": [StrOptions({"forward", "backward"})], + "scoring": [None, StrOptions(set(get_scorer_names())), callable], + "cv": ["cv_object"], + "n_jobs": [None, Integral], + } + + def __init__( + self, + estimator, + *, + n_features_to_select="auto", + tol=None, + direction="forward", + scoring=None, + cv=5, + n_jobs=None, + ): + self.estimator = estimator + self.n_features_to_select = n_features_to_select + self.tol = tol + self.direction = direction + self.scoring = scoring + self.cv = cv + self.n_jobs = n_jobs + + @_fit_context( + # SequentialFeatureSelector.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Learn the features to select from X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of predictors. + + y : array-like of shape (n_samples,), default=None + Target values. This parameter may be ignored for + unsupervised learning. + + Returns + ------- + self : object + Returns the instance itself. + """ + tags = self._get_tags() + X = self._validate_data( + X, + accept_sparse="csc", + ensure_min_features=2, + force_all_finite=not tags.get("allow_nan", True), + ) + n_features = X.shape[1] + + if self.n_features_to_select == "auto": + if self.tol is not None: + # With auto feature selection, `n_features_to_select_` will be updated + # to `support_.sum()` after features are selected. + self.n_features_to_select_ = n_features - 1 + else: + self.n_features_to_select_ = n_features // 2 + elif isinstance(self.n_features_to_select, Integral): + if self.n_features_to_select >= n_features: + raise ValueError("n_features_to_select must be < n_features.") + self.n_features_to_select_ = self.n_features_to_select + elif isinstance(self.n_features_to_select, Real): + self.n_features_to_select_ = int(n_features * self.n_features_to_select) + + if self.tol is not None and self.tol < 0 and self.direction == "forward": + raise ValueError("tol must be positive when doing forward selection") + + cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) + + cloned_estimator = clone(self.estimator) + + # the current mask corresponds to the set of features: + # - that we have already *selected* if we do forward selection + # - that we have already *excluded* if we do backward selection + current_mask = np.zeros(shape=n_features, dtype=bool) + n_iterations = ( + self.n_features_to_select_ + if self.n_features_to_select == "auto" or self.direction == "forward" + else n_features - self.n_features_to_select_ + ) + + old_score = -np.inf + is_auto_select = self.tol is not None and self.n_features_to_select == "auto" + for _ in range(n_iterations): + new_feature_idx, new_score = self._get_best_new_feature_score( + cloned_estimator, X, y, cv, current_mask + ) + if is_auto_select and ((new_score - old_score) < self.tol): + break + + old_score = new_score + current_mask[new_feature_idx] = True + + if self.direction == "backward": + current_mask = ~current_mask + + self.support_ = current_mask + self.n_features_to_select_ = self.support_.sum() + + return self + + def _get_best_new_feature_score(self, estimator, X, y, cv, current_mask): + # Return the best new feature and its score to add to the current_mask, + # i.e. return the best new feature and its score to add (resp. remove) + # when doing forward selection (resp. backward selection). + # Feature will be added if the current score and past score are greater + # than tol when n_feature is auto, + candidate_feature_indices = np.flatnonzero(~current_mask) + scores = {} + for feature_idx in candidate_feature_indices: + candidate_mask = current_mask.copy() + candidate_mask[feature_idx] = True + if self.direction == "backward": + candidate_mask = ~candidate_mask + X_new = X[:, candidate_mask] + scores[feature_idx] = cross_val_score( + estimator, + X_new, + y, + cv=cv, + scoring=self.scoring, + n_jobs=self.n_jobs, + ).mean() + new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx]) + return new_feature_idx, scores[new_feature_idx] + + def _get_support_mask(self): + check_is_fitted(self) + return self.support_ + + def _more_tags(self): + return { + "allow_nan": _safe_tags(self.estimator, key="allow_nan"), + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py new file mode 100644 index 0000000000000000000000000000000000000000..df1b5072ce7415c21b1e3df922e742d3676b168c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py @@ -0,0 +1,1161 @@ +"""Univariate features selection.""" + +# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay. +# L. Buitinck, A. Joly +# License: BSD 3 clause + + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import special, stats +from scipy.sparse import issparse + +from ..base import BaseEstimator, _fit_context +from ..preprocessing import LabelBinarizer +from ..utils import as_float_array, check_array, check_X_y, safe_mask, safe_sqr +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import row_norms, safe_sparse_dot +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin + + +def _clean_nans(scores): + """ + Fixes Issue #1240: NaNs can't be properly compared, so change them to the + smallest value of scores's dtype. -inf seems to be unreliable. + """ + # XXX where should this function be called? fit? scoring functions + # themselves? + scores = as_float_array(scores, copy=True) + scores[np.isnan(scores)] = np.finfo(scores.dtype).min + return scores + + +###################################################################### +# Scoring functions + + +# The following function is a rewriting of scipy.stats.f_oneway +# Contrary to the scipy.stats.f_oneway implementation it does not +# copy the data while keeping the inputs unchanged. +def f_oneway(*args): + """Perform a 1-way ANOVA. + + The one-way ANOVA tests the null hypothesis that 2 or more groups have + the same population mean. The test is applied to samples from two or + more groups, possibly with differing sizes. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + *args : {array-like, sparse matrix} + Sample1, sample2... The sample measurements should be given as + arguments. + + Returns + ------- + f_statistic : float + The computed F-value of the test. + p_value : float + The associated p-value from the F-distribution. + + Notes + ----- + The ANOVA test has important assumptions that must be satisfied in order + for the associated p-value to be valid. + + 1. The samples are independent + 2. Each sample is from a normally distributed population + 3. The population standard deviations of the groups are all equal. This + property is known as homoscedasticity. + + If these assumptions are not true for a given set of data, it may still be + possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although + with some loss of power. + + The algorithm is from Heiman[2], pp.394-7. + + See ``scipy.stats.f_oneway`` that should give the same results while + being less efficient. + + References + ---------- + .. [1] Lowry, Richard. "Concepts and Applications of Inferential + Statistics". Chapter 14. + http://vassarstats.net/textbook + + .. [2] Heiman, G.W. Research Methods in Statistics. 2002. + """ + n_classes = len(args) + args = [as_float_array(a) for a in args] + n_samples_per_class = np.array([a.shape[0] for a in args]) + n_samples = np.sum(n_samples_per_class) + ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args) + sums_args = [np.asarray(a.sum(axis=0)) for a in args] + square_of_sums_alldata = sum(sums_args) ** 2 + square_of_sums_args = [s**2 for s in sums_args] + sstot = ss_alldata - square_of_sums_alldata / float(n_samples) + ssbn = 0.0 + for k, _ in enumerate(args): + ssbn += square_of_sums_args[k] / n_samples_per_class[k] + ssbn -= square_of_sums_alldata / float(n_samples) + sswn = sstot - ssbn + dfbn = n_classes - 1 + dfwn = n_samples - n_classes + msb = ssbn / float(dfbn) + msw = sswn / float(dfwn) + constant_features_idx = np.where(msw == 0.0)[0] + if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size: + warnings.warn("Features %s are constant." % constant_features_idx, UserWarning) + f = msb / msw + # flatten matrix to vector in sparse case + f = np.asarray(f).ravel() + prob = special.fdtrc(dfbn, dfwn, f) + return f, prob + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def f_classif(X, y): + """Compute the ANOVA F-value for the provided sample. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The set of regressors that will be tested sequentially. + + y : array-like of shape (n_samples,) + The target vector. + + Returns + ------- + f_statistic : ndarray of shape (n_features,) + F-statistic for each feature. + + p_values : ndarray of shape (n_features,) + P-values associated with the F-statistic. + + See Also + -------- + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.feature_selection import f_classif + >>> X, y = make_classification( + ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1, + ... shuffle=False, random_state=42 + ... ) + >>> f_statistic, p_values = f_classif(X, y) + >>> f_statistic + array([2.2...e+02, 7.0...e-01, 1.6...e+00, 9.3...e-01, + 5.4...e+00, 3.2...e-01, 4.7...e-02, 5.7...e-01, + 7.5...e-01, 8.9...e-02]) + >>> p_values + array([7.1...e-27, 4.0...e-01, 1.9...e-01, 3.3...e-01, + 2.2...e-02, 5.7...e-01, 8.2...e-01, 4.5...e-01, + 3.8...e-01, 7.6...e-01]) + """ + X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"]) + args = [X[safe_mask(X, y == k)] for k in np.unique(y)] + return f_oneway(*args) + + +def _chisquare(f_obs, f_exp): + """Fast replacement for scipy.stats.chisquare. + + Version from https://github.com/scipy/scipy/pull/2525 with additional + optimizations. + """ + f_obs = np.asarray(f_obs, dtype=np.float64) + + k = len(f_obs) + # Reuse f_obs for chi-squared statistics + chisq = f_obs + chisq -= f_exp + chisq **= 2 + with np.errstate(invalid="ignore"): + chisq /= f_exp + chisq = chisq.sum(axis=0) + return chisq, special.chdtrc(k - 1, chisq) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def chi2(X, y): + """Compute chi-squared stats between each non-negative feature and class. + + This score can be used to select the `n_features` features with the + highest values for the test chi-squared statistic from X, which must + contain only **non-negative features** such as booleans or frequencies + (e.g., term counts in document classification), relative to the classes. + + Recall that the chi-square test measures dependence between stochastic + variables, so using this function "weeds out" the features that are the + most likely to be independent of class and therefore irrelevant for + classification. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Sample vectors. + + y : array-like of shape (n_samples,) + Target vector (class labels). + + Returns + ------- + chi2 : ndarray of shape (n_features,) + Chi2 statistics for each feature. + + p_values : ndarray of shape (n_features,) + P-values for each feature. + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + f_regression : F-value between label/feature for regression tasks. + + Notes + ----- + Complexity of this algorithm is O(n_classes * n_features). + + Examples + -------- + >>> import numpy as np + >>> from sklearn.feature_selection import chi2 + >>> X = np.array([[1, 1, 3], + ... [0, 1, 5], + ... [5, 4, 1], + ... [6, 6, 2], + ... [1, 4, 0], + ... [0, 0, 0]]) + >>> y = np.array([1, 1, 0, 0, 2, 2]) + >>> chi2_stats, p_values = chi2(X, y) + >>> chi2_stats + array([15.3..., 6.5 , 8.9...]) + >>> p_values + array([0.0004..., 0.0387..., 0.0116... ]) + """ + + # XXX: we might want to do some of the following in logspace instead for + # numerical stability. + # Converting X to float allows getting better performance for the + # safe_sparse_dot call made below. + X = check_array(X, accept_sparse="csr", dtype=(np.float64, np.float32)) + if np.any((X.data if issparse(X) else X) < 0): + raise ValueError("Input X must be non-negative.") + + # Use a sparse representation for Y by default to reduce memory usage when + # y has many unique classes. + Y = LabelBinarizer(sparse_output=True).fit_transform(y) + if Y.shape[1] == 1: + Y = Y.toarray() + Y = np.append(1 - Y, Y, axis=1) + + observed = safe_sparse_dot(Y.T, X) # n_classes * n_features + + if issparse(observed): + # convert back to a dense array before calling _chisquare + # XXX: could _chisquare be reimplement to accept sparse matrices for + # cases where both n_classes and n_features are large (and X is + # sparse)? + observed = observed.toarray() + + feature_count = X.sum(axis=0).reshape(1, -1) + class_prob = Y.mean(axis=0).reshape(1, -1) + expected = np.dot(class_prob.T, feature_count) + + return _chisquare(observed, expected) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "center": ["boolean"], + "force_finite": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def r_regression(X, y, *, center=True, force_finite=True): + """Compute Pearson's r for each features and the target. + + Pearson's r is also known as the Pearson correlation coefficient. + + Linear model for testing the individual effect of each of many regressors. + This is a scoring function to be used in a feature selection procedure, not + a free standing feature selection procedure. + + The cross correlation between each regressor and the target is computed + as:: + + E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y)) + + For more on usage see the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix. + + y : array-like of shape (n_samples,) + The target vector. + + center : bool, default=True + Whether or not to center the data matrix `X` and the target vector `y`. + By default, `X` and `y` will be centered. + + force_finite : bool, default=True + Whether or not to force the Pearson's R correlation to be finite. + In the particular case where some features in `X` or the target `y` + are constant, the Pearson's R correlation is not defined. When + `force_finite=False`, a correlation of `np.nan` is returned to + acknowledge this case. When `force_finite=True`, this value will be + forced to a minimal correlation of `0.0`. + + .. versionadded:: 1.1 + + Returns + ------- + correlation_coefficient : ndarray of shape (n_features,) + Pearson's R correlation coefficients of features. + + See Also + -------- + f_regression: Univariate linear regression tests returning f-statistic + and p-values. + mutual_info_regression: Mutual information for a continuous target. + f_classif: ANOVA F-value between label/feature for classification tasks. + chi2: Chi-squared stats of non-negative features for classification tasks. + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.feature_selection import r_regression + >>> X, y = make_regression( + ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 + ... ) + >>> r_regression(X, y) + array([-0.15..., 1. , -0.22...]) + """ + X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"], dtype=np.float64) + n_samples = X.shape[0] + + # Compute centered values + # Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we + # need not center X + if center: + y = y - np.mean(y) + # TODO: for Scipy <= 1.10, `isspmatrix(X)` returns `True` for sparse arrays. + # Here, we check the output of the `.mean` operation that returns a `np.matrix` + # for sparse matrices while a `np.array` for dense and sparse arrays. + # We can reconsider using `isspmatrix` when the minimum version is + # SciPy >= 1.11 + X_means = X.mean(axis=0) + X_means = X_means.getA1() if isinstance(X_means, np.matrix) else X_means + # Compute the scaled standard deviations via moments + X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means**2) + else: + X_norms = row_norms(X.T) + + correlation_coefficient = safe_sparse_dot(y, X) + with np.errstate(divide="ignore", invalid="ignore"): + correlation_coefficient /= X_norms + correlation_coefficient /= np.linalg.norm(y) + + if force_finite and not np.isfinite(correlation_coefficient).all(): + # case where the target or some features are constant + # the correlation coefficient(s) is/are set to the minimum (i.e. 0.0) + nan_mask = np.isnan(correlation_coefficient) + correlation_coefficient[nan_mask] = 0.0 + return correlation_coefficient + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like"], + "center": ["boolean"], + "force_finite": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def f_regression(X, y, *, center=True, force_finite=True): + """Univariate linear regression tests returning F-statistic and p-values. + + Quick linear model for testing the effect of a single regressor, + sequentially for many regressors. + + This is done in 2 steps: + + 1. The cross correlation between each regressor and the target is computed + using :func:`r_regression` as:: + + E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y)) + + 2. It is converted to an F score and then to a p-value. + + :func:`f_regression` is derived from :func:`r_regression` and will rank + features in the same order if all the features are positively correlated + with the target. + + Note however that contrary to :func:`f_regression`, :func:`r_regression` + values lie in [-1, 1] and can thus be negative. :func:`f_regression` is + therefore recommended as a feature selection criterion to identify + potentially predictive feature for a downstream classifier, irrespective of + the sign of the association with the target variable. + + Furthermore :func:`f_regression` returns p-values while + :func:`r_regression` does not. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix. + + y : array-like of shape (n_samples,) + The target vector. + + center : bool, default=True + Whether or not to center the data matrix `X` and the target vector `y`. + By default, `X` and `y` will be centered. + + force_finite : bool, default=True + Whether or not to force the F-statistics and associated p-values to + be finite. There are two cases where the F-statistic is expected to not + be finite: + + - when the target `y` or some features in `X` are constant. In this + case, the Pearson's R correlation is not defined leading to obtain + `np.nan` values in the F-statistic and p-value. When + `force_finite=True`, the F-statistic is set to `0.0` and the + associated p-value is set to `1.0`. + - when a feature in `X` is perfectly correlated (or + anti-correlated) with the target `y`. In this case, the F-statistic + is expected to be `np.inf`. When `force_finite=True`, the F-statistic + is set to `np.finfo(dtype).max` and the associated p-value is set to + `0.0`. + + .. versionadded:: 1.1 + + Returns + ------- + f_statistic : ndarray of shape (n_features,) + F-statistic for each feature. + + p_values : ndarray of shape (n_features,) + P-values associated with the F-statistic. + + See Also + -------- + r_regression: Pearson's R between label/feature for regression tasks. + f_classif: ANOVA F-value between label/feature for classification tasks. + chi2: Chi-squared stats of non-negative features for classification tasks. + SelectKBest: Select features based on the k highest scores. + SelectFpr: Select features based on a false positive rate test. + SelectFdr: Select features based on an estimated false discovery rate. + SelectFwe: Select features based on family-wise error rate. + SelectPercentile: Select features based on percentile of the highest + scores. + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.feature_selection import f_regression + >>> X, y = make_regression( + ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 + ... ) + >>> f_statistic, p_values = f_regression(X, y) + >>> f_statistic + array([1.2...+00, 2.6...+13, 2.6...+00]) + >>> p_values + array([2.7..., 1.5..., 1.0...]) + """ + correlation_coefficient = r_regression( + X, y, center=center, force_finite=force_finite + ) + deg_of_freedom = y.size - (2 if center else 1) + + corr_coef_squared = correlation_coefficient**2 + + with np.errstate(divide="ignore", invalid="ignore"): + f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom + p_values = stats.f.sf(f_statistic, 1, deg_of_freedom) + + if force_finite and not np.isfinite(f_statistic).all(): + # case where there is a perfect (anti-)correlation + # f-statistics can be set to the maximum and p-values to zero + mask_inf = np.isinf(f_statistic) + f_statistic[mask_inf] = np.finfo(f_statistic.dtype).max + # case where the target or some features are constant + # f-statistics would be minimum and thus p-values large + mask_nan = np.isnan(f_statistic) + f_statistic[mask_nan] = 0.0 + p_values[mask_nan] = 1.0 + return f_statistic, p_values + + +###################################################################### +# Base classes + + +class _BaseFilter(SelectorMixin, BaseEstimator): + """Initialize the univariate feature selection. + + Parameters + ---------- + score_func : callable + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues) or a single array with scores. + """ + + _parameter_constraints: dict = {"score_func": [callable]} + + def __init__(self, score_func): + self.score_func = score_func + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Run score function on (X, y) and get the appropriate features. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) or None + The target values (class labels in classification, real numbers in + regression). If the selector is unsupervised then `y` can be set to `None`. + + Returns + ------- + self : object + Returns the instance itself. + """ + if y is None: + X = self._validate_data(X, accept_sparse=["csr", "csc"]) + else: + X, y = self._validate_data( + X, y, accept_sparse=["csr", "csc"], multi_output=True + ) + + self._check_params(X, y) + score_func_ret = self.score_func(X, y) + if isinstance(score_func_ret, (list, tuple)): + self.scores_, self.pvalues_ = score_func_ret + self.pvalues_ = np.asarray(self.pvalues_) + else: + self.scores_ = score_func_ret + self.pvalues_ = None + + self.scores_ = np.asarray(self.scores_) + + return self + + def _check_params(self, X, y): + pass + + def _more_tags(self): + return {"requires_y": True} + + +###################################################################### +# Specific filters +###################################################################### +class SelectPercentile(_BaseFilter): + """Select features according to a percentile of the highest scores. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues) or a single array with scores. + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + .. versionadded:: 0.18 + + percentile : int, default=10 + Percent of features to keep. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores, None if `score_func` returned only scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + mutual_info_classif : Mutual information for a discrete target. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Notes + ----- + Ties between features with equal scores will be broken in an unspecified + way. + + This filter supports unsupervised feature selection that only requests `X` for + computing the scores. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.feature_selection import SelectPercentile, chi2 + >>> X, y = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y) + >>> X_new.shape + (1797, 7) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "percentile": [Interval(Real, 0, 100, closed="both")], + } + + def __init__(self, score_func=f_classif, *, percentile=10): + super().__init__(score_func=score_func) + self.percentile = percentile + + def _get_support_mask(self): + check_is_fitted(self) + + # Cater for NaNs + if self.percentile == 100: + return np.ones(len(self.scores_), dtype=bool) + elif self.percentile == 0: + return np.zeros(len(self.scores_), dtype=bool) + + scores = _clean_nans(self.scores_) + threshold = np.percentile(scores, 100 - self.percentile) + mask = scores > threshold + ties = np.where(scores == threshold)[0] + if len(ties): + max_feats = int(len(scores) * self.percentile / 100) + kept_ties = ties[: max_feats - mask.sum()] + mask[kept_ties] = True + return mask + + def _more_tags(self): + return {"requires_y": False} + + +class SelectKBest(_BaseFilter): + """Select features according to the k highest scores. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues) or a single array with scores. + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + .. versionadded:: 0.18 + + k : int or "all", default=10 + Number of top features to select. + The "all" option bypasses selection, for use in a parameter search. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores, None if `score_func` returned only scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif: ANOVA F-value between label/feature for classification tasks. + mutual_info_classif: Mutual information for a discrete target. + chi2: Chi-squared stats of non-negative features for classification tasks. + f_regression: F-value between label/feature for regression tasks. + mutual_info_regression: Mutual information for a continuous target. + SelectPercentile: Select features based on percentile of the highest + scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Notes + ----- + Ties between features with equal scores will be broken in an unspecified + way. + + This filter supports unsupervised feature selection that only requests `X` for + computing the scores. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.feature_selection import SelectKBest, chi2 + >>> X, y = load_digits(return_X_y=True) + >>> X.shape + (1797, 64) + >>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y) + >>> X_new.shape + (1797, 20) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "k": [StrOptions({"all"}), Interval(Integral, 0, None, closed="left")], + } + + def __init__(self, score_func=f_classif, *, k=10): + super().__init__(score_func=score_func) + self.k = k + + def _check_params(self, X, y): + if not isinstance(self.k, str) and self.k > X.shape[1]: + warnings.warn( + f"k={self.k} is greater than n_features={X.shape[1]}. " + "All the features will be returned." + ) + + def _get_support_mask(self): + check_is_fitted(self) + + if self.k == "all": + return np.ones(self.scores_.shape, dtype=bool) + elif self.k == 0: + return np.zeros(self.scores_.shape, dtype=bool) + else: + scores = _clean_nans(self.scores_) + mask = np.zeros(scores.shape, dtype=bool) + + # Request a stable sort. Mergesort takes more memory (~40MB per + # megafeature on x86-64). + mask[np.argsort(scores, kind="mergesort")[-self.k :]] = 1 + return mask + + def _more_tags(self): + return {"requires_y": False} + + +class SelectFpr(_BaseFilter): + """Filter: Select the pvalues below alpha based on a FPR test. + + FPR test stands for False Positive Rate test. It controls the total + amount of false detections. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + alpha : float, default=5e-2 + Features with p-values less than `alpha` are selected. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + chi2 : Chi-squared stats of non-negative features for classification tasks. + mutual_info_classif: Mutual information for a discrete target. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import SelectFpr, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y) + >>> X_new.shape + (569, 16) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "alpha": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, score_func=f_classif, *, alpha=5e-2): + super().__init__(score_func=score_func) + self.alpha = alpha + + def _get_support_mask(self): + check_is_fitted(self) + + return self.pvalues_ < self.alpha + + +class SelectFdr(_BaseFilter): + """Filter: Select the p-values for an estimated false discovery rate. + + This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound + on the expected false discovery rate. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + alpha : float, default=5e-2 + The highest uncorrected p-value for features to keep. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + mutual_info_classif : Mutual information for a discrete target. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFwe : Select features based on family-wise error rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + References + ---------- + https://en.wikipedia.org/wiki/False_discovery_rate + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import SelectFdr, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y) + >>> X_new.shape + (569, 16) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "alpha": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, score_func=f_classif, *, alpha=5e-2): + super().__init__(score_func=score_func) + self.alpha = alpha + + def _get_support_mask(self): + check_is_fitted(self) + + n_features = len(self.pvalues_) + sv = np.sort(self.pvalues_) + selected = sv[ + sv <= float(self.alpha) / n_features * np.arange(1, n_features + 1) + ] + if selected.size == 0: + return np.zeros_like(self.pvalues_, dtype=bool) + return self.pvalues_ <= selected.max() + + +class SelectFwe(_BaseFilter): + """Filter: Select the p-values corresponding to Family-wise error rate. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). + Default is f_classif (see below "See Also"). The default function only + works with classification tasks. + + alpha : float, default=5e-2 + The highest uncorrected p-value for features to keep. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + GenericUnivariateSelect : Univariate feature selector with configurable + mode. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import SelectFwe, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y) + >>> X_new.shape + (569, 15) + """ + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "alpha": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, score_func=f_classif, *, alpha=5e-2): + super().__init__(score_func=score_func) + self.alpha = alpha + + def _get_support_mask(self): + check_is_fitted(self) + + return self.pvalues_ < self.alpha / len(self.pvalues_) + + +###################################################################### +# Generic filter +###################################################################### + + +# TODO this class should fit on either p-values or scores, +# depending on the mode. +class GenericUnivariateSelect(_BaseFilter): + """Univariate feature selector with configurable strategy. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable, default=f_classif + Function taking two arrays X and y, and returning a pair of arrays + (scores, pvalues). For modes 'percentile' or 'kbest' it can return + a single array scores. + + mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile' + Feature selection mode. Note that the `'percentile'` and `'kbest'` + modes are supporting unsupervised feature selection (when `y` is `None`). + + param : "all", float or int, default=1e-5 + Parameter of the corresponding mode. + + Attributes + ---------- + scores_ : array-like of shape (n_features,) + Scores of features. + + pvalues_ : array-like of shape (n_features,) + p-values of feature scores, None if `score_func` returned scores only. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + f_classif : ANOVA F-value between label/feature for classification tasks. + mutual_info_classif : Mutual information for a discrete target. + chi2 : Chi-squared stats of non-negative features for classification tasks. + f_regression : F-value between label/feature for regression tasks. + mutual_info_regression : Mutual information for a continuous target. + SelectPercentile : Select features based on percentile of the highest + scores. + SelectKBest : Select features based on the k highest scores. + SelectFpr : Select features based on a false positive rate test. + SelectFdr : Select features based on an estimated false discovery rate. + SelectFwe : Select features based on family-wise error rate. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.feature_selection import GenericUnivariateSelect, chi2 + >>> X, y = load_breast_cancer(return_X_y=True) + >>> X.shape + (569, 30) + >>> transformer = GenericUnivariateSelect(chi2, mode='k_best', param=20) + >>> X_new = transformer.fit_transform(X, y) + >>> X_new.shape + (569, 20) + """ + + _selection_modes: dict = { + "percentile": SelectPercentile, + "k_best": SelectKBest, + "fpr": SelectFpr, + "fdr": SelectFdr, + "fwe": SelectFwe, + } + + _parameter_constraints: dict = { + **_BaseFilter._parameter_constraints, + "mode": [StrOptions(set(_selection_modes.keys()))], + "param": [Interval(Real, 0, None, closed="left"), StrOptions({"all"})], + } + + def __init__(self, score_func=f_classif, *, mode="percentile", param=1e-5): + super().__init__(score_func=score_func) + self.mode = mode + self.param = param + + def _make_selector(self): + selector = self._selection_modes[self.mode](score_func=self.score_func) + + # Now perform some acrobatics to set the right named parameter in + # the selector + possible_params = selector._get_param_names() + possible_params.remove("score_func") + selector.set_params(**{possible_params[0]: self.param}) + + return selector + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} + + def _check_params(self, X, y): + self._make_selector()._check_params(X, y) + + def _get_support_mask(self): + check_is_fitted(self) + + selector = self._make_selector() + selector.pvalues_ = self.pvalues_ + selector.scores_ = self.scores_ + return selector._get_support_mask() diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..f97c75db1e34b1a5d6179403ebbaf83902c067ac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/_variance_threshold.py @@ -0,0 +1,136 @@ +# Author: Lars Buitinck +# License: 3-clause BSD +from numbers import Real + +import numpy as np + +from ..base import BaseEstimator, _fit_context +from ..utils._param_validation import Interval +from ..utils.sparsefuncs import mean_variance_axis, min_max_axis +from ..utils.validation import check_is_fitted +from ._base import SelectorMixin + + +class VarianceThreshold(SelectorMixin, BaseEstimator): + """Feature selector that removes all low-variance features. + + This feature selection algorithm looks only at the features (X), not the + desired outputs (y), and can thus be used for unsupervised learning. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + threshold : float, default=0 + Features with a training-set variance lower than this threshold will + be removed. The default is to keep all features with non-zero variance, + i.e. remove the features that have the same value in all samples. + + Attributes + ---------- + variances_ : array, shape (n_features,) + Variances of individual features. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SelectFromModel: Meta-transformer for selecting features based on + importance weights. + SelectPercentile : Select features according to a percentile of the highest + scores. + SequentialFeatureSelector : Transformer that performs Sequential Feature + Selection. + + Notes + ----- + Allows NaN in the input. + Raises ValueError if no feature in X meets the variance threshold. + + Examples + -------- + The following dataset has integer features, two of which are the same + in every sample. These are removed with the default setting for threshold:: + + >>> from sklearn.feature_selection import VarianceThreshold + >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]] + >>> selector = VarianceThreshold() + >>> selector.fit_transform(X) + array([[2, 0], + [1, 4], + [1, 1]]) + """ + + _parameter_constraints: dict = { + "threshold": [Interval(Real, 0, None, closed="left")] + } + + def __init__(self, threshold=0.0): + self.threshold = threshold + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn empirical variances from X. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Data from which to compute variances, where `n_samples` is + the number of samples and `n_features` is the number of features. + + y : any, default=None + Ignored. This parameter exists only for compatibility with + sklearn.pipeline.Pipeline. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + dtype=np.float64, + force_all_finite="allow-nan", + ) + + if hasattr(X, "toarray"): # sparse matrix + _, self.variances_ = mean_variance_axis(X, axis=0) + if self.threshold == 0: + mins, maxes = min_max_axis(X, axis=0) + peak_to_peaks = maxes - mins + else: + self.variances_ = np.nanvar(X, axis=0) + if self.threshold == 0: + peak_to_peaks = np.ptp(X, axis=0) + + if self.threshold == 0: + # Use peak-to-peak to avoid numeric precision issues + # for constant features + compare_arr = np.array([self.variances_, peak_to_peaks]) + self.variances_ = np.nanmin(compare_arr, axis=0) + + if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)): + msg = "No feature in X meets the variance threshold {0:.5f}" + if X.shape[0] == 1: + msg += " (X contains only one sample)" + raise ValueError(msg.format(self.threshold)) + + return self + + def _get_support_mask(self): + check_is_fitted(self) + + return self.variances_ > self.threshold + + def _more_tags(self): + return {"allow_nan": True} diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a257d4004a70771d95138e5d858b175b0633f77 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7144e453c22d9c44a5b8339ff6528f6c8c6946c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85f595ead1b1eb9419c08f119443db9ec7452694 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de7547b6bada6eee25df8b12f8e55848bf34a660 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ba23854db1dfbd0d1cdff01dabf136ddd4a3b4e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91f7985dd8ddbda672d9046e0b5b19b814da0b43 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67bbcd2cdaa24f3dd07117e5675c2c157ac2833d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3af1c045c2f5b17754b1c8f3ea9973a6655f1f18 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afc6769614cbfc75857e15de958b1c163925a377 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..5e2bb27bafd1767cec33b1c4255b2116e3f8a9e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_base.py @@ -0,0 +1,153 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.base import BaseEstimator +from sklearn.feature_selection._base import SelectorMixin +from sklearn.utils.fixes import CSC_CONTAINERS + + +class StepSelector(SelectorMixin, BaseEstimator): + """Retain every `step` features (beginning with 0). + + If `step < 1`, then no features are selected. + """ + + def __init__(self, step=2): + self.step = step + + def fit(self, X, y=None): + X = self._validate_data(X, accept_sparse="csc") + return self + + def _get_support_mask(self): + mask = np.zeros(self.n_features_in_, dtype=bool) + if self.step >= 1: + mask[:: self.step] = True + return mask + + +support = [True, False] * 5 +support_inds = [0, 2, 4, 6, 8] +X = np.arange(20).reshape(2, 10) +Xt = np.arange(0, 20, 2).reshape(2, 5) +Xinv = X.copy() +Xinv[:, 1::2] = 0 +y = [0, 1] +feature_names = list("ABCDEFGHIJ") +feature_names_t = feature_names[::2] +feature_names_inv = np.array(feature_names) +feature_names_inv[1::2] = "" + + +def test_transform_dense(): + sel = StepSelector() + Xt_actual = sel.fit(X, y).transform(X) + Xt_actual2 = StepSelector().fit_transform(X, y) + assert_array_equal(Xt, Xt_actual) + assert_array_equal(Xt, Xt_actual2) + + # Check dtype matches + assert np.int32 == sel.transform(X.astype(np.int32)).dtype + assert np.float32 == sel.transform(X.astype(np.float32)).dtype + + # Check 1d list and other dtype: + names_t_actual = sel.transform([feature_names]) + assert_array_equal(feature_names_t, names_t_actual.ravel()) + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.transform(np.array([[1], [2]])) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_transform_sparse(csc_container): + X_sp = csc_container(X) + sel = StepSelector() + Xt_actual = sel.fit(X_sp).transform(X_sp) + Xt_actual2 = sel.fit_transform(X_sp) + assert_array_equal(Xt, Xt_actual.toarray()) + assert_array_equal(Xt, Xt_actual2.toarray()) + + # Check dtype matches + assert np.int32 == sel.transform(X_sp.astype(np.int32)).dtype + assert np.float32 == sel.transform(X_sp.astype(np.float32)).dtype + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.transform(np.array([[1], [2]])) + + +def test_inverse_transform_dense(): + sel = StepSelector() + Xinv_actual = sel.fit(X, y).inverse_transform(Xt) + assert_array_equal(Xinv, Xinv_actual) + + # Check dtype matches + assert np.int32 == sel.inverse_transform(Xt.astype(np.int32)).dtype + assert np.float32 == sel.inverse_transform(Xt.astype(np.float32)).dtype + + # Check 1d list and other dtype: + names_inv_actual = sel.inverse_transform([feature_names_t]) + assert_array_equal(feature_names_inv, names_inv_actual.ravel()) + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.inverse_transform(np.array([[1], [2]])) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_inverse_transform_sparse(csc_container): + X_sp = csc_container(X) + Xt_sp = csc_container(Xt) + sel = StepSelector() + Xinv_actual = sel.fit(X_sp).inverse_transform(Xt_sp) + assert_array_equal(Xinv, Xinv_actual.toarray()) + + # Check dtype matches + assert np.int32 == sel.inverse_transform(Xt_sp.astype(np.int32)).dtype + assert np.float32 == sel.inverse_transform(Xt_sp.astype(np.float32)).dtype + + # Check wrong shape raises error + with pytest.raises(ValueError): + sel.inverse_transform(np.array([[1], [2]])) + + +def test_get_support(): + sel = StepSelector() + sel.fit(X, y) + assert_array_equal(support, sel.get_support()) + assert_array_equal(support_inds, sel.get_support(indices=True)) + + +def test_output_dataframe(): + """Check output dtypes for dataframes is consistent with the input dtypes.""" + pd = pytest.importorskip("pandas") + + X = pd.DataFrame( + { + "a": pd.Series([1.0, 2.4, 4.5], dtype=np.float32), + "b": pd.Series(["a", "b", "a"], dtype="category"), + "c": pd.Series(["j", "b", "b"], dtype="category"), + "d": pd.Series([3.0, 2.4, 1.2], dtype=np.float64), + } + ) + + for step in [2, 3]: + sel = StepSelector(step=step).set_output(transform="pandas") + sel.fit(X) + + output = sel.transform(X) + for name, dtype in output.dtypes.items(): + assert dtype == X.dtypes[name] + + # step=0 will select nothing + sel0 = StepSelector(step=0).set_output(transform="pandas") + sel0.fit(X, y) + + msg = "No features were selected" + with pytest.warns(UserWarning, match=msg): + output0 = sel0.transform(X) + + assert_array_equal(output0.index, X.index) + assert output0.shape == (X.shape[0], 0) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py new file mode 100644 index 0000000000000000000000000000000000000000..c50def36f1b6c281e6c96019355b901bf4326a38 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py @@ -0,0 +1,93 @@ +""" +Tests for chi2, currently the only feature selection function designed +specifically to work with sparse matrices. +""" + +import warnings + +import numpy as np +import pytest +import scipy.stats + +from sklearn.feature_selection import SelectKBest, chi2 +from sklearn.feature_selection._univariate_selection import _chisquare +from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS + +# Feature 0 is highly informative for class 1; +# feature 1 is the same everywhere; +# feature 2 is a bit informative for class 2. +X = [[2, 1, 2], [9, 1, 1], [6, 1, 2], [0, 1, 2]] +y = [0, 1, 2, 2] + + +def mkchi2(k): + """Make k-best chi2 selector""" + return SelectKBest(chi2, k=k) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_chi2(csr_container): + # Test Chi2 feature extraction + + chi2 = mkchi2(k=1).fit(X, y) + chi2 = mkchi2(k=1).fit(X, y) + assert_array_equal(chi2.get_support(indices=True), [0]) + assert_array_equal(chi2.transform(X), np.array(X)[:, [0]]) + + chi2 = mkchi2(k=2).fit(X, y) + assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) + + Xsp = csr_container(X, dtype=np.float64) + chi2 = mkchi2(k=2).fit(Xsp, y) + assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) + Xtrans = chi2.transform(Xsp) + assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2]) + + # == doesn't work on scipy.sparse matrices + Xtrans = Xtrans.toarray() + Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray() + assert_array_almost_equal(Xtrans, Xtrans2) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_chi2_coo(coo_container): + # Check that chi2 works with a COO matrix + # (as returned by CountVectorizer, DictVectorizer) + Xcoo = coo_container(X) + mkchi2(k=2).fit_transform(Xcoo, y) + # if we got here without an exception, we're safe + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_chi2_negative(csr_container): + # Check for proper error on negative numbers in the input X. + X, y = [[0, 1], [-1e-20, 1]], [0, 1] + for X in (X, np.array(X), csr_container(X)): + with pytest.raises(ValueError): + chi2(X, y) + + +def test_chi2_unused_feature(): + # Unused feature should evaluate to NaN + # and should issue no runtime warning + with warnings.catch_warnings(record=True) as warned: + warnings.simplefilter("always") + chi, p = chi2([[1, 0], [0, 0]], [1, 0]) + for w in warned: + if "divide by zero" in repr(w): + raise AssertionError("Found unexpected warning %s" % w) + assert_array_equal(chi, [1, np.nan]) + assert_array_equal(p[1], np.nan) + + +def test_chisquare(): + # Test replacement for scipy.stats.chisquare against the original. + obs = np.array([[2.0, 2.0], [1.0, 1.0]]) + exp = np.array([[1.5, 1.5], [1.5, 1.5]]) + # call SciPy first because our version overwrites obs + chi_scp, p_scp = scipy.stats.chisquare(obs, exp) + chi_our, p_our = _chisquare(obs, exp) + + assert_array_almost_equal(chi_scp, chi_our) + assert_array_almost_equal(p_scp, p_our) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py new file mode 100644 index 0000000000000000000000000000000000000000..3815a88c374e8611dee49e78fe90bd2653efc969 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py @@ -0,0 +1,1017 @@ +""" +Todo: cross-check the F-value with stats model +""" +import itertools +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import sparse, stats + +from sklearn.datasets import load_iris, make_classification, make_regression +from sklearn.feature_selection import ( + GenericUnivariateSelect, + SelectFdr, + SelectFpr, + SelectFwe, + SelectKBest, + SelectPercentile, + chi2, + f_classif, + f_oneway, + f_regression, + mutual_info_classif, + mutual_info_regression, + r_regression, +) +from sklearn.utils import safe_mask +from sklearn.utils._testing import ( + _convert_container, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +############################################################################## +# Test the score functions + + +def test_f_oneway_vs_scipy_stats(): + # Test that our f_oneway gives the same result as scipy.stats + rng = np.random.RandomState(0) + X1 = rng.randn(10, 3) + X2 = 1 + rng.randn(10, 3) + f, pv = stats.f_oneway(X1, X2) + f2, pv2 = f_oneway(X1, X2) + assert np.allclose(f, f2) + assert np.allclose(pv, pv2) + + +def test_f_oneway_ints(): + # Smoke test f_oneway on integers: that it does raise casting errors + # with recent numpys + rng = np.random.RandomState(0) + X = rng.randint(10, size=(10, 10)) + y = np.arange(10) + fint, pint = f_oneway(X, y) + + # test that is gives the same result as with float + f, p = f_oneway(X.astype(float), y) + assert_array_almost_equal(f, fint, decimal=4) + assert_array_almost_equal(p, pint, decimal=4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_f_classif(csr_container): + # Test whether the F test yields meaningful results + # on a simple simulated classification problem + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + F, pv = f_classif(X, y) + F_sparse, pv_sparse = f_classif(csr_container(X), y) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.0e-4).all() + assert_array_almost_equal(F_sparse, F) + assert_array_almost_equal(pv_sparse, pv) + + +@pytest.mark.parametrize("center", [True, False]) +def test_r_regression(center): + X, y = make_regression( + n_samples=2000, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + corr_coeffs = r_regression(X, y, center=center) + assert (-1 < corr_coeffs).all() + assert (corr_coeffs < 1).all() + + sparse_X = _convert_container(X, "sparse") + + sparse_corr_coeffs = r_regression(sparse_X, y, center=center) + assert_allclose(sparse_corr_coeffs, corr_coeffs) + + # Testing against numpy for reference + Z = np.hstack((X, y[:, np.newaxis])) + correlation_matrix = np.corrcoef(Z, rowvar=False) + np_corr_coeffs = correlation_matrix[:-1, -1] + assert_array_almost_equal(np_corr_coeffs, corr_coeffs, decimal=3) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_f_regression(csr_container): + # Test whether the F test yields meaningful results + # on a simple simulated regression problem + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + F, pv = f_regression(X, y) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.0e-4).all() + + # with centering, compare with sparse + F, pv = f_regression(X, y, center=True) + F_sparse, pv_sparse = f_regression(csr_container(X), y, center=True) + assert_allclose(F_sparse, F) + assert_allclose(pv_sparse, pv) + + # again without centering, compare with sparse + F, pv = f_regression(X, y, center=False) + F_sparse, pv_sparse = f_regression(csr_container(X), y, center=False) + assert_allclose(F_sparse, F) + assert_allclose(pv_sparse, pv) + + +def test_f_regression_input_dtype(): + # Test whether f_regression returns the same value + # for any numeric data_type + rng = np.random.RandomState(0) + X = rng.rand(10, 20) + y = np.arange(10).astype(int) + + F1, pv1 = f_regression(X, y) + F2, pv2 = f_regression(X, y.astype(float)) + assert_allclose(F1, F2, 5) + assert_allclose(pv1, pv2, 5) + + +def test_f_regression_center(): + # Test whether f_regression preserves dof according to 'center' argument + # We use two centered variates so we have a simple relationship between + # F-score with variates centering and F-score without variates centering. + # Create toy example + X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean + n_samples = X.size + Y = np.ones(n_samples) + Y[::2] *= -1.0 + Y[0] = 0.0 # have Y mean being null + + F1, _ = f_regression(X, Y, center=True) + F2, _ = f_regression(X, Y, center=False) + assert_allclose(F1 * (n_samples - 1.0) / (n_samples - 2.0), F2) + assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS + + +@pytest.mark.parametrize( + "X, y, expected_corr_coef, force_finite", + [ + ( + # A feature in X is constant - forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([0.0, 0.32075]), + True, + ), + ( + # The target y is constant - forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([0.0, 0.0]), + True, + ), + ( + # A feature in X is constant - not forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([np.nan, 0.32075]), + False, + ), + ( + # The target y is constant - not forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([np.nan, np.nan]), + False, + ), + ], +) +def test_r_regression_force_finite(X, y, expected_corr_coef, force_finite): + """Check the behaviour of `force_finite` for some corner cases with `r_regression`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/15672 + """ + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + corr_coef = r_regression(X, y, force_finite=force_finite) + np.testing.assert_array_almost_equal(corr_coef, expected_corr_coef) + + +@pytest.mark.parametrize( + "X, y, expected_f_statistic, expected_p_values, force_finite", + [ + ( + # A feature in X is constant - forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([0.0, 0.2293578]), + np.array([1.0, 0.67924985]), + True, + ), + ( + # The target y is constant - forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([0.0, 0.0]), + np.array([1.0, 1.0]), + True, + ), + ( + # Feature in X correlated with y - forcing finite + np.array([[0, 1], [1, 0], [2, 10], [3, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.finfo(np.float64).max, 0.845433]), + np.array([0.0, 0.454913]), + True, + ), + ( + # Feature in X anti-correlated with y - forcing finite + np.array([[3, 1], [2, 0], [1, 10], [0, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.finfo(np.float64).max, 0.845433]), + np.array([0.0, 0.454913]), + True, + ), + ( + # A feature in X is constant - not forcing finite + np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), + np.array([0, 1, 1, 0]), + np.array([np.nan, 0.2293578]), + np.array([np.nan, 0.67924985]), + False, + ), + ( + # The target y is constant - not forcing finite + np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), + np.array([0, 0, 0, 0]), + np.array([np.nan, np.nan]), + np.array([np.nan, np.nan]), + False, + ), + ( + # Feature in X correlated with y - not forcing finite + np.array([[0, 1], [1, 0], [2, 10], [3, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.inf, 0.845433]), + np.array([0.0, 0.454913]), + False, + ), + ( + # Feature in X anti-correlated with y - not forcing finite + np.array([[3, 1], [2, 0], [1, 10], [0, 4]]), + np.array([0, 1, 2, 3]), + np.array([np.inf, 0.845433]), + np.array([0.0, 0.454913]), + False, + ), + ], +) +def test_f_regression_corner_case( + X, y, expected_f_statistic, expected_p_values, force_finite +): + """Check the behaviour of `force_finite` for some corner cases with `f_regression`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/15672 + """ + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + f_statistic, p_values = f_regression(X, y, force_finite=force_finite) + np.testing.assert_array_almost_equal(f_statistic, expected_f_statistic) + np.testing.assert_array_almost_equal(p_values, expected_p_values) + + +def test_f_classif_multi_class(): + # Test whether the F test yields meaningful results + # on a simple simulated classification problem + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + F, pv = f_classif(X, y) + assert (F > 0).all() + assert (pv > 0).all() + assert (pv < 1).all() + assert (pv[:5] < 0.05).all() + assert (pv[5:] > 1.0e-4).all() + + +def test_select_percentile_classif(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the percentile heuristic + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + univariate_filter = SelectPercentile(f_classif, percentile=25) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="percentile", param=25) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_select_percentile_classif_sparse(csr_container): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the percentile heuristic + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + X = csr_container(X) + univariate_filter = SelectPercentile(f_classif, percentile=25) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="percentile", param=25) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r.toarray(), X_r2.toarray()) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + X_r2inv = univariate_filter.inverse_transform(X_r2) + assert sparse.issparse(X_r2inv) + support_mask = safe_mask(X_r2inv, support) + assert X_r2inv.shape == X.shape + assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray()) + # Check other columns are empty + assert X_r2inv.nnz == X_r.nnz + + +############################################################################## +# Test univariate selection in classification settings + + +def test_select_kbest_classif(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the k best heuristic + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + univariate_filter = SelectKBest(f_classif, k=5) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="k_best", param=5) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + +def test_select_kbest_all(): + # Test whether k="all" correctly returns all features. + X, y = make_classification( + n_samples=20, n_features=10, shuffle=False, random_state=0 + ) + + univariate_filter = SelectKBest(f_classif, k="all") + X_r = univariate_filter.fit(X, y).transform(X) + assert_array_equal(X, X_r) + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/24949 + X_r2 = ( + GenericUnivariateSelect(f_classif, mode="k_best", param="all") + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + + +@pytest.mark.parametrize("dtype_in", [np.float32, np.float64]) +def test_select_kbest_zero(dtype_in): + # Test whether k=0 correctly returns no features. + X, y = make_classification( + n_samples=20, n_features=10, shuffle=False, random_state=0 + ) + X = X.astype(dtype_in) + + univariate_filter = SelectKBest(f_classif, k=0) + univariate_filter.fit(X, y) + support = univariate_filter.get_support() + gtruth = np.zeros(10, dtype=bool) + assert_array_equal(support, gtruth) + with pytest.warns(UserWarning, match="No features were selected"): + X_selected = univariate_filter.transform(X) + assert X_selected.shape == (20, 0) + assert X_selected.dtype == dtype_in + + +def test_select_heuristics_classif(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple classification problem + # with the fdr, fwe and fpr heuristics + X, y = make_classification( + n_samples=200, + n_features=20, + n_informative=3, + n_redundant=2, + n_repeated=0, + n_classes=8, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + univariate_filter = SelectFwe(f_classif, alpha=0.01) + X_r = univariate_filter.fit(X, y).transform(X) + gtruth = np.zeros(20) + gtruth[:5] = 1 + for mode in ["fdr", "fpr", "fwe"]: + X_r2 = ( + GenericUnivariateSelect(f_classif, mode=mode, param=0.01) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + assert_allclose(support, gtruth) + + +############################################################################## +# Test univariate selection in regression settings + + +def assert_best_scores_kept(score_filter): + scores = score_filter.scores_ + support = score_filter.get_support() + assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum() :]) + + +def test_select_percentile_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the percentile heuristic + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + univariate_filter = SelectPercentile(f_regression, percentile=25) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="percentile", param=25) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + X_2 = X.copy() + X_2[:, np.logical_not(support)] = 0 + assert_array_equal(X_2, univariate_filter.inverse_transform(X_r)) + # Check inverse_transform respects dtype + assert_array_equal( + X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool)) + ) + + +def test_select_percentile_regression_full(): + # Test whether the relative univariate feature selection + # selects all features when '100%' is asked. + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + univariate_filter = SelectPercentile(f_regression, percentile=100) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="percentile", param=100) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.ones(20) + assert_array_equal(support, gtruth) + + +def test_select_kbest_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the k best heuristic + X, y = make_regression( + n_samples=200, + n_features=20, + n_informative=5, + shuffle=False, + random_state=0, + noise=10, + ) + + univariate_filter = SelectKBest(f_regression, k=5) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="k_best", param=5) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support, gtruth) + + +def test_select_heuristics_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the fpr, fdr or fwe heuristics + X, y = make_regression( + n_samples=200, + n_features=20, + n_informative=5, + shuffle=False, + random_state=0, + noise=10, + ) + + univariate_filter = SelectFpr(f_regression, alpha=0.01) + X_r = univariate_filter.fit(X, y).transform(X) + gtruth = np.zeros(20) + gtruth[:5] = 1 + for mode in ["fdr", "fpr", "fwe"]: + X_r2 = ( + GenericUnivariateSelect(f_regression, mode=mode, param=0.01) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + assert_array_equal(support[:5], np.ones((5,), dtype=bool)) + assert np.sum(support[5:] == 1) < 3 + + +def test_boundary_case_ch2(): + # Test boundary case, and always aim to select 1 feature. + X = np.array([[10, 20], [20, 20], [20, 30]]) + y = np.array([[1], [0], [0]]) + scores, pvalues = chi2(X, y) + assert_array_almost_equal(scores, np.array([4.0, 0.71428571])) + assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472])) + + filter_fdr = SelectFdr(chi2, alpha=0.1) + filter_fdr.fit(X, y) + support_fdr = filter_fdr.get_support() + assert_array_equal(support_fdr, np.array([True, False])) + + filter_kbest = SelectKBest(chi2, k=1) + filter_kbest.fit(X, y) + support_kbest = filter_kbest.get_support() + assert_array_equal(support_kbest, np.array([True, False])) + + filter_percentile = SelectPercentile(chi2, percentile=50) + filter_percentile.fit(X, y) + support_percentile = filter_percentile.get_support() + assert_array_equal(support_percentile, np.array([True, False])) + + filter_fpr = SelectFpr(chi2, alpha=0.1) + filter_fpr.fit(X, y) + support_fpr = filter_fpr.get_support() + assert_array_equal(support_fpr, np.array([True, False])) + + filter_fwe = SelectFwe(chi2, alpha=0.1) + filter_fwe.fit(X, y) + support_fwe = filter_fwe.get_support() + assert_array_equal(support_fwe, np.array([True, False])) + + +@pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1]) +@pytest.mark.parametrize("n_informative", [1, 5, 10]) +def test_select_fdr_regression(alpha, n_informative): + # Test that fdr heuristic actually has low FDR. + def single_fdr(alpha, n_informative, random_state): + X, y = make_regression( + n_samples=150, + n_features=20, + n_informative=n_informative, + shuffle=False, + random_state=random_state, + noise=10, + ) + + with warnings.catch_warnings(record=True): + # Warnings can be raised when no features are selected + # (low alpha or very noisy data) + univariate_filter = SelectFdr(f_regression, alpha=alpha) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="fdr", param=alpha) + .fit(X, y) + .transform(X) + ) + + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + num_false_positives = np.sum(support[n_informative:] == 1) + num_true_positives = np.sum(support[:n_informative] == 1) + + if num_false_positives == 0: + return 0.0 + false_discovery_rate = num_false_positives / ( + num_true_positives + num_false_positives + ) + return false_discovery_rate + + # As per Benjamini-Hochberg, the expected false discovery rate + # should be lower than alpha: + # FDR = E(FP / (TP + FP)) <= alpha + false_discovery_rate = np.mean( + [single_fdr(alpha, n_informative, random_state) for random_state in range(100)] + ) + assert alpha >= false_discovery_rate + + # Make sure that the empirical false discovery rate increases + # with alpha: + if false_discovery_rate != 0: + assert false_discovery_rate > alpha / 10 + + +def test_select_fwe_regression(): + # Test whether the relative univariate feature selection + # gets the correct items in a simple regression problem + # with the fwe heuristic + X, y = make_regression( + n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 + ) + + univariate_filter = SelectFwe(f_regression, alpha=0.01) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(f_regression, mode="fwe", param=0.01) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(20) + gtruth[:5] = 1 + assert_array_equal(support[:5], np.ones((5,), dtype=bool)) + assert np.sum(support[5:] == 1) < 2 + + +def test_selectkbest_tiebreaking(): + # Test whether SelectKBest actually selects k features in case of ties. + # Prior to 0.11, SelectKBest would return more features than requested. + Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] + y = [1] + dummy_score = lambda X, y: (X[0], X[0]) + for X in Xs: + sel = SelectKBest(dummy_score, k=1) + X1 = ignore_warnings(sel.fit_transform)([X], y) + assert X1.shape[1] == 1 + assert_best_scores_kept(sel) + + sel = SelectKBest(dummy_score, k=2) + X2 = ignore_warnings(sel.fit_transform)([X], y) + assert X2.shape[1] == 2 + assert_best_scores_kept(sel) + + +def test_selectpercentile_tiebreaking(): + # Test if SelectPercentile selects the right n_features in case of ties. + Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] + y = [1] + dummy_score = lambda X, y: (X[0], X[0]) + for X in Xs: + sel = SelectPercentile(dummy_score, percentile=34) + X1 = ignore_warnings(sel.fit_transform)([X], y) + assert X1.shape[1] == 1 + assert_best_scores_kept(sel) + + sel = SelectPercentile(dummy_score, percentile=67) + X2 = ignore_warnings(sel.fit_transform)([X], y) + assert X2.shape[1] == 2 + assert_best_scores_kept(sel) + + +def test_tied_pvalues(): + # Test whether k-best and percentiles work with tied pvalues from chi2. + # chi2 will return the same p-values for the following features, but it + # will return different scores. + X0 = np.array([[10000, 9999, 9998], [1, 1, 1]]) + y = [0, 1] + + for perm in itertools.permutations((0, 1, 2)): + X = X0[:, perm] + Xt = SelectKBest(chi2, k=2).fit_transform(X, y) + assert Xt.shape == (2, 2) + assert 9998 not in Xt + + Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) + assert Xt.shape == (2, 2) + assert 9998 not in Xt + + +def test_scorefunc_multilabel(): + # Test whether k-best and percentiles works with multilabels with chi2. + + X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]]) + y = [[1, 1], [0, 1], [1, 0]] + + Xt = SelectKBest(chi2, k=2).fit_transform(X, y) + assert Xt.shape == (3, 2) + assert 0 not in Xt + + Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) + assert Xt.shape == (3, 2) + assert 0 not in Xt + + +def test_tied_scores(): + # Test for stable sorting in k-best with tied scores. + X_train = np.array([[0, 0, 0], [1, 1, 1]]) + y_train = [0, 1] + + for n_features in [1, 2, 3]: + sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train) + X_test = sel.transform([[0, 1, 2]]) + assert_array_equal(X_test[0], np.arange(3)[-n_features:]) + + +def test_nans(): + # Assert that SelectKBest and SelectPercentile can handle NaNs. + # First feature has zero variance to confuse f_classif (ANOVA) and + # make it return a NaN. + X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]] + y = [1, 0, 1] + + for select in ( + SelectKBest(f_classif, k=2), + SelectPercentile(f_classif, percentile=67), + ): + ignore_warnings(select.fit)(X, y) + assert_array_equal(select.get_support(indices=True), np.array([1, 2])) + + +def test_invalid_k(): + X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]] + y = [1, 0, 1] + + msg = "k=4 is greater than n_features=3. All the features will be returned." + with pytest.warns(UserWarning, match=msg): + SelectKBest(k=4).fit(X, y) + with pytest.warns(UserWarning, match=msg): + GenericUnivariateSelect(mode="k_best", param=4).fit(X, y) + + +def test_f_classif_constant_feature(): + # Test that f_classif warns if a feature is constant throughout. + + X, y = make_classification(n_samples=10, n_features=5) + X[:, 0] = 2.0 + with pytest.warns(UserWarning): + f_classif(X, y) + + +def test_no_feature_selected(): + rng = np.random.RandomState(0) + + # Generate random uncorrelated data: a strict univariate test should + # rejects all the features + X = rng.rand(40, 10) + y = rng.randint(0, 4, size=40) + strict_selectors = [ + SelectFwe(alpha=0.01).fit(X, y), + SelectFdr(alpha=0.01).fit(X, y), + SelectFpr(alpha=0.01).fit(X, y), + SelectPercentile(percentile=0).fit(X, y), + SelectKBest(k=0).fit(X, y), + ] + for selector in strict_selectors: + assert_array_equal(selector.get_support(), np.zeros(10)) + with pytest.warns(UserWarning, match="No features were selected"): + X_selected = selector.transform(X) + assert X_selected.shape == (40, 0) + + +def test_mutual_info_classif(): + X, y = make_classification( + n_samples=100, + n_features=5, + n_informative=1, + n_redundant=1, + n_repeated=0, + n_classes=2, + n_clusters_per_class=1, + flip_y=0.0, + class_sep=10, + shuffle=False, + random_state=0, + ) + + # Test in KBest mode. + univariate_filter = SelectKBest(mutual_info_classif, k=2) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(mutual_info_classif, mode="k_best", param=2) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(5) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + # Test in Percentile mode. + univariate_filter = SelectPercentile(mutual_info_classif, percentile=40) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(mutual_info_classif, mode="percentile", param=40) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(5) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + +def test_mutual_info_regression(): + X, y = make_regression( + n_samples=100, + n_features=10, + n_informative=2, + shuffle=False, + random_state=0, + noise=10, + ) + + # Test in KBest mode. + univariate_filter = SelectKBest(mutual_info_regression, k=2) + X_r = univariate_filter.fit(X, y).transform(X) + assert_best_scores_kept(univariate_filter) + X_r2 = ( + GenericUnivariateSelect(mutual_info_regression, mode="k_best", param=2) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(10) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + # Test in Percentile mode. + univariate_filter = SelectPercentile(mutual_info_regression, percentile=20) + X_r = univariate_filter.fit(X, y).transform(X) + X_r2 = ( + GenericUnivariateSelect(mutual_info_regression, mode="percentile", param=20) + .fit(X, y) + .transform(X) + ) + assert_array_equal(X_r, X_r2) + support = univariate_filter.get_support() + gtruth = np.zeros(10) + gtruth[:2] = 1 + assert_array_equal(support, gtruth) + + +def test_dataframe_output_dtypes(): + """Check that the output datafarme dtypes are the same as the input. + + Non-regression test for gh-24860. + """ + pd = pytest.importorskip("pandas") + + X, y = load_iris(return_X_y=True, as_frame=True) + X = X.astype( + { + "petal length (cm)": np.float32, + "petal width (cm)": np.float64, + } + ) + X["petal_width_binned"] = pd.cut(X["petal width (cm)"], bins=10) + + column_order = X.columns + + def selector(X, y): + ranking = { + "sepal length (cm)": 1, + "sepal width (cm)": 2, + "petal length (cm)": 3, + "petal width (cm)": 4, + "petal_width_binned": 5, + } + return np.asarray([ranking[name] for name in column_order]) + + univariate_filter = SelectKBest(selector, k=3).set_output(transform="pandas") + output = univariate_filter.fit_transform(X, y) + + assert_array_equal( + output.columns, ["petal length (cm)", "petal width (cm)", "petal_width_binned"] + ) + for name, dtype in output.dtypes.items(): + assert dtype == X.dtypes[name] + + +@pytest.mark.parametrize( + "selector", + [ + SelectKBest(k=4), + SelectPercentile(percentile=80), + GenericUnivariateSelect(mode="k_best", param=4), + GenericUnivariateSelect(mode="percentile", param=80), + ], +) +def test_unsupervised_filter(selector): + """Check support for unsupervised feature selection for the filter that could + require only `X`. + """ + rng = np.random.RandomState(0) + X = rng.randn(10, 5) + + def score_func(X, y=None): + return np.array([1, 1, 1, 1, 0]) + + selector.set_params(score_func=score_func) + selector.fit(X) + X_trans = selector.transform(X) + assert_allclose(X_trans, X[:, :4]) + X_trans = selector.fit_transform(X) + assert_allclose(X_trans, X[:, :4]) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py new file mode 100644 index 0000000000000000000000000000000000000000..3573b7a078294f6284920c5f387fce5f9625906b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py @@ -0,0 +1,684 @@ +import re +import warnings +from unittest.mock import Mock + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator +from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression +from sklearn.datasets import make_friedman1 +from sklearn.decomposition import PCA +from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier +from sklearn.exceptions import NotFittedError +from sklearn.feature_selection import SelectFromModel +from sklearn.linear_model import ( + ElasticNet, + ElasticNetCV, + Lasso, + LassoCV, + LinearRegression, + LogisticRegression, + PassiveAggressiveClassifier, + SGDClassifier, +) +from sklearn.pipeline import make_pipeline +from sklearn.svm import LinearSVC +from sklearn.utils._testing import ( + MinimalClassifier, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + skip_if_32bit, +) + + +class NaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": True} + + +class NoNaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": False} + + +class NaNTagRandomForest(RandomForestClassifier): + def _more_tags(self): + return {"allow_nan": True} + + +iris = datasets.load_iris() +data, y = iris.data, iris.target +rng = np.random.RandomState(0) + + +def test_invalid_input(): + clf = SGDClassifier( + alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None + ) + for threshold in ["gobbledigook", ".5 * gobbledigook"]: + model = SelectFromModel(clf, threshold=threshold) + model.fit(data, y) + with pytest.raises(ValueError): + model.transform(data) + + +def test_input_estimator_unchanged(): + # Test that SelectFromModel fits on a clone of the estimator. + est = RandomForestClassifier() + transformer = SelectFromModel(estimator=est) + transformer.fit(data, y) + assert transformer.estimator is est + + +@pytest.mark.parametrize( + "max_features, err_type, err_msg", + [ + ( + data.shape[1] + 1, + ValueError, + "max_features ==", + ), + ( + lambda X: 1.5, + TypeError, + "max_features must be an instance of int, not float.", + ), + ( + lambda X: data.shape[1] + 1, + ValueError, + "max_features ==", + ), + ( + lambda X: -1, + ValueError, + "max_features ==", + ), + ], +) +def test_max_features_error(max_features, err_type, err_msg): + err_msg = re.escape(err_msg) + clf = RandomForestClassifier(n_estimators=5, random_state=0) + + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + with pytest.raises(err_type, match=err_msg): + transformer.fit(data, y) + + +@pytest.mark.parametrize("max_features", [0, 2, data.shape[1], None]) +def test_inferred_max_features_integer(max_features): + """Check max_features_ and output shape for integer max_features.""" + clf = RandomForestClassifier(n_estimators=5, random_state=0) + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + X_trans = transformer.fit_transform(data, y) + if max_features is not None: + assert transformer.max_features_ == max_features + assert X_trans.shape[1] == transformer.max_features_ + else: + assert not hasattr(transformer, "max_features_") + assert X_trans.shape[1] == data.shape[1] + + +@pytest.mark.parametrize( + "max_features", + [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], +) +def test_inferred_max_features_callable(max_features): + """Check max_features_ and output shape for callable max_features.""" + clf = RandomForestClassifier(n_estimators=5, random_state=0) + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + X_trans = transformer.fit_transform(data, y) + assert transformer.max_features_ == max_features(data) + assert X_trans.shape[1] == transformer.max_features_ + + +@pytest.mark.parametrize("max_features", [lambda X: round(len(X[0]) / 2), 2]) +def test_max_features_array_like(max_features): + X = [ + [0.87, -1.34, 0.31], + [-2.79, -0.02, -0.85], + [-1.34, -0.48, -2.55], + [1.92, 1.48, 0.65], + ] + y = [0, 1, 0, 1] + + clf = RandomForestClassifier(n_estimators=5, random_state=0) + transformer = SelectFromModel( + estimator=clf, max_features=max_features, threshold=-np.inf + ) + X_trans = transformer.fit_transform(X, y) + assert X_trans.shape[1] == transformer.max_features_ + + +@pytest.mark.parametrize( + "max_features", + [lambda X: min(X.shape[1], 10000), lambda X: X.shape[1], lambda X: 1], +) +def test_max_features_callable_data(max_features): + """Tests that the callable passed to `fit` is called on X.""" + clf = RandomForestClassifier(n_estimators=50, random_state=0) + m = Mock(side_effect=max_features) + transformer = SelectFromModel(estimator=clf, max_features=m, threshold=-np.inf) + transformer.fit_transform(data, y) + m.assert_called_with(data) + + +class FixedImportanceEstimator(BaseEstimator): + def __init__(self, importances): + self.importances = importances + + def fit(self, X, y=None): + self.feature_importances_ = np.array(self.importances) + + +def test_max_features(): + # Test max_features parameter using various values + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + max_features = X.shape[1] + est = RandomForestClassifier(n_estimators=50, random_state=0) + + transformer1 = SelectFromModel(estimator=est, threshold=-np.inf) + transformer2 = SelectFromModel( + estimator=est, max_features=max_features, threshold=-np.inf + ) + X_new1 = transformer1.fit_transform(X, y) + X_new2 = transformer2.fit_transform(X, y) + assert_allclose(X_new1, X_new2) + + # Test max_features against actual model. + transformer1 = SelectFromModel(estimator=Lasso(alpha=0.025, random_state=42)) + X_new1 = transformer1.fit_transform(X, y) + scores1 = np.abs(transformer1.estimator_.coef_) + candidate_indices1 = np.argsort(-scores1, kind="mergesort") + + for n_features in range(1, X_new1.shape[1] + 1): + transformer2 = SelectFromModel( + estimator=Lasso(alpha=0.025, random_state=42), + max_features=n_features, + threshold=-np.inf, + ) + X_new2 = transformer2.fit_transform(X, y) + scores2 = np.abs(transformer2.estimator_.coef_) + candidate_indices2 = np.argsort(-scores2, kind="mergesort") + assert_allclose( + X[:, candidate_indices1[:n_features]], X[:, candidate_indices2[:n_features]] + ) + assert_allclose(transformer1.estimator_.coef_, transformer2.estimator_.coef_) + + +def test_max_features_tiebreak(): + # Test if max_features can break tie among feature importance + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + max_features = X.shape[1] + + feature_importances = np.array([4, 4, 4, 4, 3, 3, 3, 2, 2, 1]) + for n_features in range(1, max_features + 1): + transformer = SelectFromModel( + FixedImportanceEstimator(feature_importances), + max_features=n_features, + threshold=-np.inf, + ) + X_new = transformer.fit_transform(X, y) + selected_feature_indices = np.where(transformer._get_support_mask())[0] + assert_array_equal(selected_feature_indices, np.arange(n_features)) + assert X_new.shape[1] == n_features + + +def test_threshold_and_max_features(): + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + est = RandomForestClassifier(n_estimators=50, random_state=0) + + transformer1 = SelectFromModel(estimator=est, max_features=3, threshold=-np.inf) + X_new1 = transformer1.fit_transform(X, y) + + transformer2 = SelectFromModel(estimator=est, threshold=0.04) + X_new2 = transformer2.fit_transform(X, y) + + transformer3 = SelectFromModel(estimator=est, max_features=3, threshold=0.04) + X_new3 = transformer3.fit_transform(X, y) + assert X_new3.shape[1] == min(X_new1.shape[1], X_new2.shape[1]) + selected_indices = transformer3.transform(np.arange(X.shape[1])[np.newaxis, :]) + assert_allclose(X_new3, X[:, selected_indices[0]]) + + +@skip_if_32bit +def test_feature_importances(): + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + est = RandomForestClassifier(n_estimators=50, random_state=0) + for threshold, func in zip(["mean", "median"], [np.mean, np.median]): + transformer = SelectFromModel(estimator=est, threshold=threshold) + transformer.fit(X, y) + assert hasattr(transformer.estimator_, "feature_importances_") + + X_new = transformer.transform(X) + assert X_new.shape[1] < X.shape[1] + importances = transformer.estimator_.feature_importances_ + + feature_mask = np.abs(importances) > func(importances) + assert_array_almost_equal(X_new, X[:, feature_mask]) + + +def test_sample_weight(): + # Ensure sample weights are passed to underlying estimator + X, y = datasets.make_classification( + n_samples=100, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + # Check with sample weights + sample_weight = np.ones(y.shape) + sample_weight[y == 1] *= 100 + + est = LogisticRegression(random_state=0, fit_intercept=False) + transformer = SelectFromModel(estimator=est) + transformer.fit(X, y, sample_weight=None) + mask = transformer._get_support_mask() + transformer.fit(X, y, sample_weight=sample_weight) + weighted_mask = transformer._get_support_mask() + assert not np.all(weighted_mask == mask) + transformer.fit(X, y, sample_weight=3 * sample_weight) + reweighted_mask = transformer._get_support_mask() + assert np.all(weighted_mask == reweighted_mask) + + +@pytest.mark.parametrize( + "estimator", + [ + Lasso(alpha=0.1, random_state=42), + LassoCV(random_state=42), + ElasticNet(l1_ratio=1, random_state=42), + ElasticNetCV(l1_ratio=[1], random_state=42), + ], +) +def test_coef_default_threshold(estimator): + X, y = datasets.make_classification( + n_samples=100, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + # For the Lasso and related models, the threshold defaults to 1e-5 + transformer = SelectFromModel(estimator=estimator) + transformer.fit(X, y) + X_new = transformer.transform(X) + mask = np.abs(transformer.estimator_.coef_) > 1e-5 + assert_array_almost_equal(X_new, X[:, mask]) + + +@skip_if_32bit +def test_2d_coef(): + X, y = datasets.make_classification( + n_samples=1000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + n_classes=4, + ) + + est = LogisticRegression() + for threshold, func in zip(["mean", "median"], [np.mean, np.median]): + for order in [1, 2, np.inf]: + # Fit SelectFromModel a multi-class problem + transformer = SelectFromModel( + estimator=LogisticRegression(), threshold=threshold, norm_order=order + ) + transformer.fit(X, y) + assert hasattr(transformer.estimator_, "coef_") + X_new = transformer.transform(X) + assert X_new.shape[1] < X.shape[1] + + # Manually check that the norm is correctly performed + est.fit(X, y) + importances = np.linalg.norm(est.coef_, axis=0, ord=order) + feature_mask = importances > func(importances) + assert_array_almost_equal(X_new, X[:, feature_mask]) + + +def test_partial_fit(): + est = PassiveAggressiveClassifier( + random_state=0, shuffle=False, max_iter=5, tol=None + ) + transformer = SelectFromModel(estimator=est) + transformer.partial_fit(data, y, classes=np.unique(y)) + old_model = transformer.estimator_ + transformer.partial_fit(data, y, classes=np.unique(y)) + new_model = transformer.estimator_ + assert old_model is new_model + + X_transform = transformer.transform(data) + transformer.fit(np.vstack((data, data)), np.concatenate((y, y))) + assert_array_almost_equal(X_transform, transformer.transform(data)) + + # check that if est doesn't have partial_fit, neither does SelectFromModel + transformer = SelectFromModel(estimator=RandomForestClassifier()) + assert not hasattr(transformer, "partial_fit") + + +def test_calling_fit_reinitializes(): + est = LinearSVC(dual="auto", random_state=0) + transformer = SelectFromModel(estimator=est) + transformer.fit(data, y) + transformer.set_params(estimator__C=100) + transformer.fit(data, y) + assert transformer.estimator_.C == 100 + + +def test_prefit(): + # Test all possible combinations of the prefit parameter. + + # Passing a prefit parameter with the selected model + # and fitting a unfit model with prefit=False should give same results. + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) + model = SelectFromModel(clf) + model.fit(data, y) + X_transform = model.transform(data) + clf.fit(data, y) + model = SelectFromModel(clf, prefit=True) + assert_array_almost_equal(model.transform(data), X_transform) + model.fit(data, y) + assert model.estimator_ is not clf + + # Check that the model is rewritten if prefit=False and a fitted model is + # passed + model = SelectFromModel(clf, prefit=False) + model.fit(data, y) + assert_array_almost_equal(model.transform(data), X_transform) + + # Check that passing an unfitted estimator with `prefit=True` raises a + # `ValueError` + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) + model = SelectFromModel(clf, prefit=True) + err_msg = "When `prefit=True`, `estimator` is expected to be a fitted estimator." + with pytest.raises(NotFittedError, match=err_msg): + model.fit(data, y) + with pytest.raises(NotFittedError, match=err_msg): + model.partial_fit(data, y) + with pytest.raises(NotFittedError, match=err_msg): + model.transform(data) + + # Check that the internal parameters of prefitted model are not changed + # when calling `fit` or `partial_fit` with `prefit=True` + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, tol=None).fit(data, y) + model = SelectFromModel(clf, prefit=True) + model.fit(data, y) + assert_allclose(model.estimator_.coef_, clf.coef_) + model.partial_fit(data, y) + assert_allclose(model.estimator_.coef_, clf.coef_) + + +def test_prefit_max_features(): + """Check the interaction between `prefit` and `max_features`.""" + # case 1: an error should be raised at `transform` if `fit` was not called to + # validate the attributes + estimator = RandomForestClassifier(n_estimators=5, random_state=0) + estimator.fit(data, y) + model = SelectFromModel(estimator, prefit=True, max_features=lambda X: X.shape[1]) + + err_msg = ( + "When `prefit=True` and `max_features` is a callable, call `fit` " + "before calling `transform`." + ) + with pytest.raises(NotFittedError, match=err_msg): + model.transform(data) + + # case 2: `max_features` is not validated and different from an integer + # FIXME: we cannot validate the upper bound of the attribute at transform + # and we should force calling `fit` if we intend to force the attribute + # to have such an upper bound. + max_features = 2.5 + model.set_params(max_features=max_features) + with pytest.raises(ValueError, match="`max_features` must be an integer"): + model.transform(data) + + +def test_prefit_get_feature_names_out(): + """Check the interaction between prefit and the feature names.""" + clf = RandomForestClassifier(n_estimators=2, random_state=0) + clf.fit(data, y) + model = SelectFromModel(clf, prefit=True, max_features=1) + + name = type(model).__name__ + err_msg = ( + f"This {name} instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=err_msg): + model.get_feature_names_out() + + model.fit(data, y) + feature_names = model.get_feature_names_out() + assert feature_names == ["x3"] + + +def test_threshold_string(): + est = RandomForestClassifier(n_estimators=50, random_state=0) + model = SelectFromModel(est, threshold="0.5*mean") + model.fit(data, y) + X_transform = model.transform(data) + + # Calculate the threshold from the estimator directly. + est.fit(data, y) + threshold = 0.5 * np.mean(est.feature_importances_) + mask = est.feature_importances_ > threshold + assert_array_almost_equal(X_transform, data[:, mask]) + + +def test_threshold_without_refitting(): + # Test that the threshold can be set without refitting the model. + clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) + model = SelectFromModel(clf, threshold="0.1 * mean") + model.fit(data, y) + X_transform = model.transform(data) + + # Set a higher threshold to filter out more features. + model.threshold = "1.0 * mean" + assert X_transform.shape[1] > model.transform(data).shape[1] + + +def test_fit_accepts_nan_inf(): + # Test that fit doesn't check for np.inf and np.nan values. + clf = HistGradientBoostingClassifier(random_state=0) + + model = SelectFromModel(estimator=clf) + + nan_data = data.copy() + nan_data[0] = np.nan + nan_data[1] = np.inf + + model.fit(data, y) + + +def test_transform_accepts_nan_inf(): + # Test that transform doesn't check for np.inf and np.nan values. + clf = NaNTagRandomForest(n_estimators=100, random_state=0) + nan_data = data.copy() + + model = SelectFromModel(estimator=clf) + model.fit(nan_data, y) + + nan_data[0] = np.nan + nan_data[1] = np.inf + + model.transform(nan_data) + + +def test_allow_nan_tag_comes_from_estimator(): + allow_nan_est = NaNTag() + model = SelectFromModel(estimator=allow_nan_est) + assert model._get_tags()["allow_nan"] is True + + no_nan_est = NoNaNTag() + model = SelectFromModel(estimator=no_nan_est) + assert model._get_tags()["allow_nan"] is False + + +def _pca_importances(pca_estimator): + return np.abs(pca_estimator.explained_variance_) + + +@pytest.mark.parametrize( + "estimator, importance_getter", + [ + ( + make_pipeline(PCA(random_state=0), LogisticRegression()), + "named_steps.logisticregression.coef_", + ), + (PCA(random_state=0), _pca_importances), + ], +) +def test_importance_getter(estimator, importance_getter): + selector = SelectFromModel( + estimator, threshold="mean", importance_getter=importance_getter + ) + selector.fit(data, y) + assert selector.transform(data).shape[1] == 1 + + +@pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression]) +def test_select_from_model_pls(PLSEstimator): + """Check the behaviour of SelectFromModel with PLS estimators. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12410 + """ + X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + estimator = PLSEstimator(n_components=1) + model = make_pipeline(SelectFromModel(estimator), estimator).fit(X, y) + assert model.score(X, y) > 0.5 + + +def test_estimator_does_not_support_feature_names(): + """SelectFromModel works with estimators that do not support feature_names_in_. + + Non-regression test for #21949. + """ + pytest.importorskip("pandas") + X, y = datasets.load_iris(as_frame=True, return_X_y=True) + all_feature_names = set(X.columns) + + def importance_getter(estimator): + return np.arange(X.shape[1]) + + selector = SelectFromModel( + MinimalClassifier(), importance_getter=importance_getter + ).fit(X, y) + + # selector learns the feature names itself + assert_array_equal(selector.feature_names_in_, X.columns) + + feature_names_out = set(selector.get_feature_names_out()) + assert feature_names_out < all_feature_names + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + + selector.transform(X.iloc[1:3]) + + +@pytest.mark.parametrize( + "error, err_msg, max_features", + ( + [ValueError, "max_features == 10, must be <= 4", 10], + [ValueError, "max_features == 5, must be <= 4", lambda x: x.shape[1] + 1], + ), +) +def test_partial_fit_validate_max_features(error, err_msg, max_features): + """Test that partial_fit from SelectFromModel validates `max_features`.""" + X, y = datasets.make_classification( + n_samples=100, + n_features=4, + random_state=0, + ) + + with pytest.raises(error, match=err_msg): + SelectFromModel( + estimator=SGDClassifier(), max_features=max_features + ).partial_fit(X, y, classes=[0, 1]) + + +@pytest.mark.parametrize("as_frame", [True, False]) +def test_partial_fit_validate_feature_names(as_frame): + """Test that partial_fit from SelectFromModel validates `feature_names_in_`.""" + pytest.importorskip("pandas") + X, y = datasets.load_iris(as_frame=as_frame, return_X_y=True) + + selector = SelectFromModel(estimator=SGDClassifier(), max_features=4).partial_fit( + X, y, classes=[0, 1, 2] + ) + if as_frame: + assert_array_equal(selector.feature_names_in_, X.columns) + else: + assert not hasattr(selector, "feature_names_in_") + + +def test_from_model_estimator_attribute_error(): + """Check that we raise the proper AttributeError when the estimator + does not implement the `partial_fit` method, which is decorated with + `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + # `LinearRegression` does not implement 'partial_fit' and should raise an + # AttributeError + from_model = SelectFromModel(estimator=LinearRegression()) + + outer_msg = "This 'SelectFromModel' has no attribute 'partial_fit'" + inner_msg = "'LinearRegression' object has no attribute 'partial_fit'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + from_model.fit(data, y).partial_fit(data) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py new file mode 100644 index 0000000000000000000000000000000000000000..26367544baa539d8daa7b6508f4ae23cbf4da31c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_mutual_info.py @@ -0,0 +1,254 @@ +import numpy as np +import pytest + +from sklearn.feature_selection import mutual_info_classif, mutual_info_regression +from sklearn.feature_selection._mutual_info import _compute_mi +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_compute_mi_dd(): + # In discrete case computations are straightforward and can be done + # by hand on given vectors. + x = np.array([0, 1, 1, 0, 0]) + y = np.array([1, 0, 0, 0, 1]) + + H_x = H_y = -(3 / 5) * np.log(3 / 5) - (2 / 5) * np.log(2 / 5) + H_xy = -1 / 5 * np.log(1 / 5) - 2 / 5 * np.log(2 / 5) - 2 / 5 * np.log(2 / 5) + I_xy = H_x + H_y - H_xy + + assert_allclose(_compute_mi(x, y, x_discrete=True, y_discrete=True), I_xy) + + +def test_compute_mi_cc(global_dtype): + # For two continuous variables a good approach is to test on bivariate + # normal distribution, where mutual information is known. + + # Mean of the distribution, irrelevant for mutual information. + mean = np.zeros(2) + + # Setup covariance matrix with correlation coeff. equal 0.5. + sigma_1 = 1 + sigma_2 = 10 + corr = 0.5 + cov = np.array( + [ + [sigma_1**2, corr * sigma_1 * sigma_2], + [corr * sigma_1 * sigma_2, sigma_2**2], + ] + ) + + # True theoretical mutual information. + I_theory = np.log(sigma_1) + np.log(sigma_2) - 0.5 * np.log(np.linalg.det(cov)) + + rng = check_random_state(0) + Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False) + + x, y = Z[:, 0], Z[:, 1] + + # Theory and computed values won't be very close + # We here check with a large relative tolerance + for n_neighbors in [3, 5, 7]: + I_computed = _compute_mi( + x, y, x_discrete=False, y_discrete=False, n_neighbors=n_neighbors + ) + assert_allclose(I_computed, I_theory, rtol=1e-1) + + +def test_compute_mi_cd(global_dtype): + # To test define a joint distribution as follows: + # p(x, y) = p(x) p(y | x) + # X ~ Bernoulli(p) + # (Y | x = 0) ~ Uniform(-1, 1) + # (Y | x = 1) ~ Uniform(0, 2) + + # Use the following formula for mutual information: + # I(X; Y) = H(Y) - H(Y | X) + # Two entropies can be computed by hand: + # H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2) + # H(Y | X) = ln(2) + + # Now we need to implement sampling from out distribution, which is + # done easily using conditional distribution logic. + + n_samples = 1000 + rng = check_random_state(0) + + for p in [0.3, 0.5, 0.7]: + x = rng.uniform(size=n_samples) > p + + y = np.empty(n_samples, global_dtype) + mask = x == 0 + y[mask] = rng.uniform(-1, 1, size=np.sum(mask)) + y[~mask] = rng.uniform(0, 2, size=np.sum(~mask)) + + I_theory = -0.5 * ( + (1 - p) * np.log(0.5 * (1 - p)) + p * np.log(0.5 * p) + np.log(0.5) + ) - np.log(2) + + # Assert the same tolerance. + for n_neighbors in [3, 5, 7]: + I_computed = _compute_mi( + x, y, x_discrete=True, y_discrete=False, n_neighbors=n_neighbors + ) + assert_allclose(I_computed, I_theory, rtol=1e-1) + + +def test_compute_mi_cd_unique_label(global_dtype): + # Test that adding unique label doesn't change MI. + n_samples = 100 + x = np.random.uniform(size=n_samples) > 0.5 + + y = np.empty(n_samples, global_dtype) + mask = x == 0 + y[mask] = np.random.uniform(-1, 1, size=np.sum(mask)) + y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask)) + + mi_1 = _compute_mi(x, y, x_discrete=True, y_discrete=False) + + x = np.hstack((x, 2)) + y = np.hstack((y, 10)) + mi_2 = _compute_mi(x, y, x_discrete=True, y_discrete=False) + + assert_allclose(mi_1, mi_2) + + +# We are going test that feature ordering by MI matches our expectations. +def test_mutual_info_classif_discrete(global_dtype): + X = np.array( + [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype + ) + y = np.array([0, 1, 2, 2, 1]) + + # Here X[:, 0] is the most informative feature, and X[:, 1] is weakly + # informative. + mi = mutual_info_classif(X, y, discrete_features=True) + assert_array_equal(np.argsort(-mi), np.array([0, 2, 1])) + + +def test_mutual_info_regression(global_dtype): + # We generate sample from multivariate normal distribution, using + # transformation from initially uncorrelated variables. The zero + # variables after transformation is selected as the target vector, + # it has the strongest correlation with the variable 2, and + # the weakest correlation with the variable 1. + T = np.array([[1, 0.5, 2, 1], [0, 1, 0.1, 0.0], [0, 0.1, 1, 0.1], [0, 0.1, 0.1, 1]]) + cov = T.dot(T.T) + mean = np.zeros(4) + + rng = check_random_state(0) + Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False) + X = Z[:, 1:] + y = Z[:, 0] + + mi = mutual_info_regression(X, y, random_state=0) + assert_array_equal(np.argsort(-mi), np.array([1, 2, 0])) + # XXX: should mutual_info_regression be fixed to avoid + # up-casting float32 inputs to float64? + assert mi.dtype == np.float64 + + +def test_mutual_info_classif_mixed(global_dtype): + # Here the target is discrete and there are two continuous and one + # discrete feature. The idea of this test is clear from the code. + rng = check_random_state(0) + X = rng.rand(1000, 3).astype(global_dtype, copy=False) + X[:, 1] += X[:, 0] + y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int) + X[:, 2] = X[:, 2] > 0.5 + + mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3, random_state=0) + assert_array_equal(np.argsort(-mi), [2, 0, 1]) + for n_neighbors in [5, 7, 9]: + mi_nn = mutual_info_classif( + X, y, discrete_features=[2], n_neighbors=n_neighbors, random_state=0 + ) + # Check that the continuous values have an higher MI with greater + # n_neighbors + assert mi_nn[0] > mi[0] + assert mi_nn[1] > mi[1] + # The n_neighbors should not have any effect on the discrete value + # The MI should be the same + assert mi_nn[2] == mi[2] + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_mutual_info_options(global_dtype, csr_container): + X = np.array( + [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype + ) + y = np.array([0, 1, 2, 2, 1], dtype=global_dtype) + X_csr = csr_container(X) + + for mutual_info in (mutual_info_regression, mutual_info_classif): + with pytest.raises(ValueError): + mutual_info(X_csr, y, discrete_features=False) + with pytest.raises(ValueError): + mutual_info(X, y, discrete_features="manual") + with pytest.raises(ValueError): + mutual_info(X_csr, y, discrete_features=[True, False, True]) + with pytest.raises(IndexError): + mutual_info(X, y, discrete_features=[True, False, True, False]) + with pytest.raises(IndexError): + mutual_info(X, y, discrete_features=[1, 4]) + + mi_1 = mutual_info(X, y, discrete_features="auto", random_state=0) + mi_2 = mutual_info(X, y, discrete_features=False, random_state=0) + mi_3 = mutual_info(X_csr, y, discrete_features="auto", random_state=0) + mi_4 = mutual_info(X_csr, y, discrete_features=True, random_state=0) + mi_5 = mutual_info(X, y, discrete_features=[True, False, True], random_state=0) + mi_6 = mutual_info(X, y, discrete_features=[0, 2], random_state=0) + + assert_allclose(mi_1, mi_2) + assert_allclose(mi_3, mi_4) + assert_allclose(mi_5, mi_6) + + assert not np.allclose(mi_1, mi_3) + + +@pytest.mark.parametrize("correlated", [True, False]) +def test_mutual_information_symmetry_classif_regression(correlated, global_random_seed): + """Check that `mutual_info_classif` and `mutual_info_regression` are + symmetric by switching the target `y` as `feature` in `X` and vice + versa. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23720 + """ + rng = np.random.RandomState(global_random_seed) + n = 100 + d = rng.randint(10, size=n) + + if correlated: + c = d.astype(np.float64) + else: + c = rng.normal(0, 1, size=n) + + mi_classif = mutual_info_classif( + c[:, None], d, discrete_features=[False], random_state=global_random_seed + ) + + mi_regression = mutual_info_regression( + d[:, None], c, discrete_features=[True], random_state=global_random_seed + ) + + assert mi_classif == pytest.approx(mi_regression) + + +def test_mutual_info_regression_X_int_dtype(global_random_seed): + """Check that results agree when X is integer dtype and float dtype. + + Non-regression test for Issue #26696. + """ + rng = np.random.RandomState(global_random_seed) + X = rng.randint(100, size=(100, 10)) + X_float = X.astype(np.float64, copy=True) + y = rng.randint(100, size=100) + + expected = mutual_info_regression(X_float, y, random_state=global_random_seed) + result = mutual_info_regression(X, y, random_state=global_random_seed) + assert_allclose(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py new file mode 100644 index 0000000000000000000000000000000000000000..e3edb0e7b5d213dc4b9445a3cf971a1bc4d28398 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py @@ -0,0 +1,615 @@ +""" +Testing Recursive feature elimination +""" + +from operator import attrgetter + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal + +from sklearn.base import BaseEstimator, ClassifierMixin +from sklearn.compose import TransformedTargetRegressor +from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression +from sklearn.datasets import load_iris, make_friedman1 +from sklearn.ensemble import RandomForestClassifier +from sklearn.feature_selection import RFE, RFECV +from sklearn.impute import SimpleImputer +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.metrics import get_scorer, make_scorer, zero_one_loss +from sklearn.model_selection import GroupKFold, cross_val_score +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC, SVR, LinearSVR +from sklearn.utils import check_random_state +from sklearn.utils._testing import ignore_warnings +from sklearn.utils.fixes import CSR_CONTAINERS + + +class MockClassifier: + """ + Dummy classifier to test recursive feature elimination + """ + + def __init__(self, foo_param=0): + self.foo_param = foo_param + + def fit(self, X, y): + assert len(X) == len(y) + self.coef_ = np.ones(X.shape[1], dtype=np.float64) + return self + + def predict(self, T): + return T.shape[0] + + predict_proba = predict + decision_function = predict + transform = predict + + def score(self, X=None, y=None): + return 0.0 + + def get_params(self, deep=True): + return {"foo_param": self.foo_param} + + def set_params(self, **params): + return self + + def _more_tags(self): + return {"allow_nan": True} + + +def test_rfe_features_importance(): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2) + rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe.fit(X, y) + assert len(rfe.ranking_) == X.shape[1] + + clf_svc = SVC(kernel="linear") + rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1) + rfe_svc.fit(X, y) + + # Check if the supports are equal + assert_array_equal(rfe.get_support(), rfe_svc.get_support()) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_rfe(csr_container): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + X_sparse = csr_container(X) + y = iris.target + + # dense model + clf = SVC(kernel="linear") + rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe.fit(X, y) + X_r = rfe.transform(X) + clf.fit(X_r, y) + assert len(rfe.ranking_) == X.shape[1] + + # sparse model + clf_sparse = SVC(kernel="linear") + rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1) + rfe_sparse.fit(X_sparse, y) + X_r_sparse = rfe_sparse.transform(X_sparse) + + assert X_r.shape == iris.data.shape + assert_array_almost_equal(X_r[:10], iris.data[:10]) + + assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data)) + assert rfe.score(X, y) == clf.score(iris.data, iris.target) + assert_array_almost_equal(X_r, X_r_sparse.toarray()) + + +def test_RFE_fit_score_params(): + # Make sure RFE passes the metadata down to fit and score methods of the + # underlying estimator + class TestEstimator(BaseEstimator, ClassifierMixin): + def fit(self, X, y, prop=None): + if prop is None: + raise ValueError("fit: prop cannot be None") + self.svc_ = SVC(kernel="linear").fit(X, y) + self.coef_ = self.svc_.coef_ + return self + + def score(self, X, y, prop=None): + if prop is None: + raise ValueError("score: prop cannot be None") + return self.svc_.score(X, y) + + X, y = load_iris(return_X_y=True) + with pytest.raises(ValueError, match="fit: prop cannot be None"): + RFE(estimator=TestEstimator()).fit(X, y) + with pytest.raises(ValueError, match="score: prop cannot be None"): + RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y) + + RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y, prop="foo") + + +def test_rfe_percent_n_features(): + # test that the results are the same + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + # there are 10 features in the data. We select 40%. + clf = SVC(kernel="linear") + rfe_num = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe_num.fit(X, y) + + rfe_perc = RFE(estimator=clf, n_features_to_select=0.4, step=0.1) + rfe_perc.fit(X, y) + + assert_array_equal(rfe_perc.ranking_, rfe_num.ranking_) + assert_array_equal(rfe_perc.support_, rfe_num.support_) + + +def test_rfe_mockclassifier(): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + # dense model + clf = MockClassifier() + rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) + rfe.fit(X, y) + X_r = rfe.transform(X) + clf.fit(X_r, y) + assert len(rfe.ranking_) == X.shape[1] + assert X_r.shape == iris.data.shape + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_rfecv(csr_container): + generator = check_random_state(0) + iris = load_iris() + # Add some irrelevant features. Random seed is set to make sure that + # irrelevant features are always irrelevant. + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) # regression test: list should be supported + + # Test using the score function + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1) + rfecv.fit(X, y) + # non-regression test for missing worst feature: + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == X.shape[1] + + assert len(rfecv.ranking_) == X.shape[1] + X_r = rfecv.transform(X) + + # All the noisy variable were filtered out + assert_array_equal(X_r, iris.data) + + # same in sparse + rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1) + X_sparse = csr_container(X) + rfecv_sparse.fit(X_sparse, y) + X_r_sparse = rfecv_sparse.transform(X_sparse) + assert_array_equal(X_r_sparse.toarray(), iris.data) + + # Test using a customized loss function + scoring = make_scorer(zero_one_loss, greater_is_better=False) + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scoring) + ignore_warnings(rfecv.fit)(X, y) + X_r = rfecv.transform(X) + assert_array_equal(X_r, iris.data) + + # Test using a scorer + scorer = get_scorer("accuracy") + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scorer) + rfecv.fit(X, y) + X_r = rfecv.transform(X) + assert_array_equal(X_r, iris.data) + + # Test fix on cv_results_ + def test_scorer(estimator, X, y): + return 1.0 + + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=test_scorer) + rfecv.fit(X, y) + + # In the event of cross validation score ties, the expected behavior of + # RFECV is to return the FEWEST features that maximize the CV score. + # Because test_scorer always returns 1.0 in this example, RFECV should + # reduce the dimensionality to a single feature (i.e. n_features_ = 1) + assert rfecv.n_features_ == 1 + + # Same as the first two tests, but with step=2 + rfecv = RFECV(estimator=SVC(kernel="linear"), step=2) + rfecv.fit(X, y) + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == 6 + + assert len(rfecv.ranking_) == X.shape[1] + X_r = rfecv.transform(X) + assert_array_equal(X_r, iris.data) + + rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2) + X_sparse = csr_container(X) + rfecv_sparse.fit(X_sparse, y) + X_r_sparse = rfecv_sparse.transform(X_sparse) + assert_array_equal(X_r_sparse.toarray(), iris.data) + + # Verifying that steps < 1 don't blow up. + rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=0.2) + X_sparse = csr_container(X) + rfecv_sparse.fit(X_sparse, y) + X_r_sparse = rfecv_sparse.transform(X_sparse) + assert_array_equal(X_r_sparse.toarray(), iris.data) + + +def test_rfecv_mockclassifier(): + generator = check_random_state(0) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) # regression test: list should be supported + + # Test using the score function + rfecv = RFECV(estimator=MockClassifier(), step=1) + rfecv.fit(X, y) + # non-regression test for missing worst feature: + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == X.shape[1] + + assert len(rfecv.ranking_) == X.shape[1] + + +def test_rfecv_verbose_output(): + # Check verbose=1 is producing an output. + import sys + from io import StringIO + + sys.stdout = StringIO() + + generator = check_random_state(0) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) + + rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, verbose=1) + rfecv.fit(X, y) + + verbose_output = sys.stdout + verbose_output.seek(0) + assert len(verbose_output.readline()) > 0 + + +def test_rfecv_cv_results_size(global_random_seed): + generator = check_random_state(global_random_seed) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = list(iris.target) # regression test: list should be supported + + # Non-regression test for varying combinations of step and + # min_features_to_select. + for step, min_features_to_select in [[2, 1], [2, 2], [3, 3]]: + rfecv = RFECV( + estimator=MockClassifier(), + step=step, + min_features_to_select=min_features_to_select, + ) + rfecv.fit(X, y) + + score_len = np.ceil((X.shape[1] - min_features_to_select) / step) + 1 + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == score_len + + assert len(rfecv.ranking_) == X.shape[1] + assert rfecv.n_features_ >= min_features_to_select + + +def test_rfe_estimator_tags(): + rfe = RFE(SVC(kernel="linear")) + assert rfe._estimator_type == "classifier" + # make sure that cross-validation is stratified + iris = load_iris() + score = cross_val_score(rfe, iris.data, iris.target) + assert score.min() > 0.7 + + +def test_rfe_min_step(global_random_seed): + n_features = 10 + X, y = make_friedman1( + n_samples=50, n_features=n_features, random_state=global_random_seed + ) + n_samples, n_features = X.shape + estimator = SVR(kernel="linear") + + # Test when floor(step * n_features) <= 0 + selector = RFE(estimator, step=0.01) + sel = selector.fit(X, y) + assert sel.support_.sum() == n_features // 2 + + # Test when step is between (0,1) and floor(step * n_features) > 0 + selector = RFE(estimator, step=0.20) + sel = selector.fit(X, y) + assert sel.support_.sum() == n_features // 2 + + # Test when step is an integer + selector = RFE(estimator, step=5) + sel = selector.fit(X, y) + assert sel.support_.sum() == n_features // 2 + + +def test_number_of_subsets_of_features(global_random_seed): + # In RFE, 'number_of_subsets_of_features' + # = the number of iterations in '_fit' + # = max(ranking_) + # = 1 + (n_features + step - n_features_to_select - 1) // step + # After optimization #4534, this number + # = 1 + np.ceil((n_features - n_features_to_select) / float(step)) + # This test case is to test their equivalence, refer to #4534 and #3824 + + def formula1(n_features, n_features_to_select, step): + return 1 + ((n_features + step - n_features_to_select - 1) // step) + + def formula2(n_features, n_features_to_select, step): + return 1 + np.ceil((n_features - n_features_to_select) / float(step)) + + # RFE + # Case 1, n_features - n_features_to_select is divisible by step + # Case 2, n_features - n_features_to_select is not divisible by step + n_features_list = [11, 11] + n_features_to_select_list = [3, 3] + step_list = [2, 3] + for n_features, n_features_to_select, step in zip( + n_features_list, n_features_to_select_list, step_list + ): + generator = check_random_state(global_random_seed) + X = generator.normal(size=(100, n_features)) + y = generator.rand(100).round() + rfe = RFE( + estimator=SVC(kernel="linear"), + n_features_to_select=n_features_to_select, + step=step, + ) + rfe.fit(X, y) + # this number also equals to the maximum of ranking_ + assert np.max(rfe.ranking_) == formula1(n_features, n_features_to_select, step) + assert np.max(rfe.ranking_) == formula2(n_features, n_features_to_select, step) + + # In RFECV, 'fit' calls 'RFE._fit' + # 'number_of_subsets_of_features' of RFE + # = the size of each score in 'cv_results_' of RFECV + # = the number of iterations of the for loop before optimization #4534 + + # RFECV, n_features_to_select = 1 + # Case 1, n_features - 1 is divisible by step + # Case 2, n_features - 1 is not divisible by step + + n_features_to_select = 1 + n_features_list = [11, 10] + step_list = [2, 2] + for n_features, step in zip(n_features_list, step_list): + generator = check_random_state(global_random_seed) + X = generator.normal(size=(100, n_features)) + y = generator.rand(100).round() + rfecv = RFECV(estimator=SVC(kernel="linear"), step=step) + rfecv.fit(X, y) + + for key in rfecv.cv_results_.keys(): + assert len(rfecv.cv_results_[key]) == formula1( + n_features, n_features_to_select, step + ) + assert len(rfecv.cv_results_[key]) == formula2( + n_features, n_features_to_select, step + ) + + +def test_rfe_cv_n_jobs(global_random_seed): + generator = check_random_state(global_random_seed) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + rfecv = RFECV(estimator=SVC(kernel="linear")) + rfecv.fit(X, y) + rfecv_ranking = rfecv.ranking_ + + rfecv_cv_results_ = rfecv.cv_results_ + + rfecv.set_params(n_jobs=2) + rfecv.fit(X, y) + assert_array_almost_equal(rfecv.ranking_, rfecv_ranking) + + assert rfecv_cv_results_.keys() == rfecv.cv_results_.keys() + for key in rfecv_cv_results_.keys(): + assert rfecv_cv_results_[key] == pytest.approx(rfecv.cv_results_[key]) + + +def test_rfe_cv_groups(): + generator = check_random_state(0) + iris = load_iris() + number_groups = 4 + groups = np.floor(np.linspace(0, number_groups, len(iris.target))) + X = iris.data + y = (iris.target > 0).astype(int) + + est_groups = RFECV( + estimator=RandomForestClassifier(random_state=generator), + step=1, + scoring="accuracy", + cv=GroupKFold(n_splits=2), + ) + est_groups.fit(X, y, groups=groups) + assert est_groups.n_features_ > 0 + + +@pytest.mark.parametrize( + "importance_getter", [attrgetter("regressor_.coef_"), "regressor_.coef_"] +) +@pytest.mark.parametrize("selector, expected_n_features", [(RFE, 5), (RFECV, 4)]) +def test_rfe_wrapped_estimator(importance_getter, selector, expected_n_features): + # Non-regression test for + # https://github.com/scikit-learn/scikit-learn/issues/15312 + X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + estimator = LinearSVR(dual="auto", random_state=0) + + log_estimator = TransformedTargetRegressor( + regressor=estimator, func=np.log, inverse_func=np.exp + ) + + selector = selector(log_estimator, importance_getter=importance_getter) + sel = selector.fit(X, y) + assert sel.support_.sum() == expected_n_features + + +@pytest.mark.parametrize( + "importance_getter, err_type", + [ + ("auto", ValueError), + ("random", AttributeError), + (lambda x: x.importance, AttributeError), + ], +) +@pytest.mark.parametrize("Selector", [RFE, RFECV]) +def test_rfe_importance_getter_validation(importance_getter, err_type, Selector): + X, y = make_friedman1(n_samples=50, n_features=10, random_state=42) + estimator = LinearSVR(dual="auto") + log_estimator = TransformedTargetRegressor( + regressor=estimator, func=np.log, inverse_func=np.exp + ) + + with pytest.raises(err_type): + model = Selector(log_estimator, importance_getter=importance_getter) + model.fit(X, y) + + +@pytest.mark.parametrize("cv", [None, 5]) +def test_rfe_allow_nan_inf_in_x(cv): + iris = load_iris() + X = iris.data + y = iris.target + + # add nan and inf value to X + X[0][0] = np.nan + X[0][1] = np.inf + + clf = MockClassifier() + if cv is not None: + rfe = RFECV(estimator=clf, cv=cv) + else: + rfe = RFE(estimator=clf) + rfe.fit(X, y) + rfe.transform(X) + + +def test_w_pipeline_2d_coef_(): + pipeline = make_pipeline(StandardScaler(), LogisticRegression()) + + data, y = load_iris(return_X_y=True) + sfm = RFE( + pipeline, + n_features_to_select=2, + importance_getter="named_steps.logisticregression.coef_", + ) + + sfm.fit(data, y) + assert sfm.transform(data).shape[1] == 2 + + +def test_rfecv_std_and_mean(global_random_seed): + generator = check_random_state(global_random_seed) + iris = load_iris() + X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] + y = iris.target + + rfecv = RFECV(estimator=SVC(kernel="linear")) + rfecv.fit(X, y) + n_split_keys = len(rfecv.cv_results_) - 2 + split_keys = [f"split{i}_test_score" for i in range(n_split_keys)] + + cv_scores = np.asarray([rfecv.cv_results_[key] for key in split_keys]) + expected_mean = np.mean(cv_scores, axis=0) + expected_std = np.std(cv_scores, axis=0) + + assert_allclose(rfecv.cv_results_["mean_test_score"], expected_mean) + assert_allclose(rfecv.cv_results_["std_test_score"], expected_std) + + +@pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) +def test_multioutput(ClsRFE): + X = np.random.normal(size=(10, 3)) + y = np.random.randint(2, size=(10, 2)) + clf = RandomForestClassifier(n_estimators=5) + rfe_test = ClsRFE(clf) + rfe_test.fit(X, y) + + +@pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) +def test_pipeline_with_nans(ClsRFE): + """Check that RFE works with pipeline that accept nans. + + Non-regression test for gh-21743. + """ + X, y = load_iris(return_X_y=True) + X[0, 0] = np.nan + + pipe = make_pipeline( + SimpleImputer(), + StandardScaler(), + LogisticRegression(), + ) + + fs = ClsRFE( + estimator=pipe, + importance_getter="named_steps.logisticregression.coef_", + ) + fs.fit(X, y) + + +@pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) +@pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression]) +def test_rfe_pls(ClsRFE, PLSEstimator): + """Check the behaviour of RFE with PLS estimators. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/12410 + """ + X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) + estimator = PLSEstimator(n_components=1) + selector = ClsRFE(estimator, step=1).fit(X, y) + assert selector.score(X, y) > 0.5 + + +def test_rfe_estimator_attribute_error(): + """Check that we raise the proper AttributeError when the estimator + does not implement the `decision_function` method, which is decorated with + `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + iris = load_iris() + + # `LinearRegression` does not implement 'decision_function' and should raise an + # AttributeError + rfe = RFE(estimator=LinearRegression()) + + outer_msg = "This 'RFE' has no attribute 'decision_function'" + inner_msg = "'LinearRegression' object has no attribute 'decision_function'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + rfe.fit(iris.data, iris.target).decision_function(iris.data) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..82d65c55a019512ecef189a881fd9316bd813d70 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py @@ -0,0 +1,323 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.cluster import KMeans +from sklearn.datasets import make_blobs, make_classification, make_regression +from sklearn.ensemble import HistGradientBoostingRegressor +from sklearn.feature_selection import SequentialFeatureSelector +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import LeaveOneGroupOut, cross_val_score +from sklearn.neighbors import KNeighborsClassifier +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_bad_n_features_to_select(): + n_features = 5 + X, y = make_regression(n_features=n_features) + sfs = SequentialFeatureSelector(LinearRegression(), n_features_to_select=n_features) + with pytest.raises(ValueError, match="n_features_to_select must be < n_features"): + sfs.fit(X, y) + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +@pytest.mark.parametrize("n_features_to_select", (1, 5, 9, "auto")) +def test_n_features_to_select(direction, n_features_to_select): + # Make sure n_features_to_select is respected + + n_features = 10 + X, y = make_regression(n_features=n_features, random_state=0) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select=n_features_to_select, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + + if n_features_to_select == "auto": + n_features_to_select = n_features // 2 + + assert sfs.get_support(indices=True).shape[0] == n_features_to_select + assert sfs.n_features_to_select_ == n_features_to_select + assert sfs.transform(X).shape[1] == n_features_to_select + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +def test_n_features_to_select_auto(direction): + """Check the behaviour of `n_features_to_select="auto"` with different + values for the parameter `tol`. + """ + + n_features = 10 + tol = 1e-3 + X, y = make_regression(n_features=n_features, random_state=0) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select="auto", + tol=tol, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + + max_features_to_select = n_features - 1 + + assert sfs.get_support(indices=True).shape[0] <= max_features_to_select + assert sfs.n_features_to_select_ <= max_features_to_select + assert sfs.transform(X).shape[1] <= max_features_to_select + assert sfs.get_support(indices=True).shape[0] == sfs.n_features_to_select_ + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +def test_n_features_to_select_stopping_criterion(direction): + """Check the behaviour stopping criterion for feature selection + depending on the values of `n_features_to_select` and `tol`. + + When `direction` is `'forward'`, select a new features at random + among those not currently selected in selector.support_, + build a new version of the data that includes all the features + in selector.support_ + this newly selected feature. + And check that the cross-validation score of the model trained on + this new dataset variant is lower than the model with + the selected forward selected features or at least does not improve + by more than the tol margin. + + When `direction` is `'backward'`, instead of adding a new feature + to selector.support_, try to remove one of those selected features at random + And check that the cross-validation score is either decreasing or + not improving by more than the tol margin. + """ + + X, y = make_regression(n_features=50, n_informative=10, random_state=0) + + tol = 1e-3 + + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select="auto", + tol=tol, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + selected_X = sfs.transform(X) + + rng = np.random.RandomState(0) + + added_candidates = list(set(range(X.shape[1])) - set(sfs.get_support(indices=True))) + added_X = np.hstack( + [ + selected_X, + (X[:, rng.choice(added_candidates)])[:, np.newaxis], + ] + ) + + removed_candidate = rng.choice(list(range(sfs.n_features_to_select_))) + removed_X = np.delete(selected_X, removed_candidate, axis=1) + + plain_cv_score = cross_val_score(LinearRegression(), X, y, cv=2).mean() + sfs_cv_score = cross_val_score(LinearRegression(), selected_X, y, cv=2).mean() + added_cv_score = cross_val_score(LinearRegression(), added_X, y, cv=2).mean() + removed_cv_score = cross_val_score(LinearRegression(), removed_X, y, cv=2).mean() + + assert sfs_cv_score >= plain_cv_score + + if direction == "forward": + assert (sfs_cv_score - added_cv_score) <= tol + assert (sfs_cv_score - removed_cv_score) >= tol + else: + assert (added_cv_score - sfs_cv_score) <= tol + assert (removed_cv_score - sfs_cv_score) <= tol + + +@pytest.mark.parametrize("direction", ("forward", "backward")) +@pytest.mark.parametrize( + "n_features_to_select, expected", + ( + (0.1, 1), + (1.0, 10), + (0.5, 5), + ), +) +def test_n_features_to_select_float(direction, n_features_to_select, expected): + # Test passing a float as n_features_to_select + X, y = make_regression(n_features=10) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select=n_features_to_select, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + assert sfs.n_features_to_select_ == expected + + +@pytest.mark.parametrize("seed", range(10)) +@pytest.mark.parametrize("direction", ("forward", "backward")) +@pytest.mark.parametrize( + "n_features_to_select, expected_selected_features", + [ + (2, [0, 2]), # f1 is dropped since it has no predictive power + (1, [2]), # f2 is more predictive than f0 so it's kept + ], +) +def test_sanity(seed, direction, n_features_to_select, expected_selected_features): + # Basic sanity check: 3 features, only f0 and f2 are correlated with the + # target, f2 having a stronger correlation than f0. We expect f1 to be + # dropped, and f2 to always be selected. + + rng = np.random.RandomState(seed) + n_samples = 100 + X = rng.randn(n_samples, 3) + y = 3 * X[:, 0] - 10 * X[:, 2] + + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select=n_features_to_select, + direction=direction, + cv=2, + ) + sfs.fit(X, y) + assert_array_equal(sfs.get_support(indices=True), expected_selected_features) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_support(csr_container): + # Make sure sparse data is supported + + X, y = make_regression(n_features=10) + X = csr_container(X) + sfs = SequentialFeatureSelector( + LinearRegression(), n_features_to_select="auto", cv=2 + ) + sfs.fit(X, y) + sfs.transform(X) + + +def test_nan_support(): + # Make sure nans are OK if the underlying estimator supports nans + + rng = np.random.RandomState(0) + n_samples, n_features = 40, 4 + X, y = make_regression(n_samples, n_features, random_state=0) + nan_mask = rng.randint(0, 2, size=(n_samples, n_features), dtype=bool) + X[nan_mask] = np.nan + sfs = SequentialFeatureSelector( + HistGradientBoostingRegressor(), n_features_to_select="auto", cv=2 + ) + sfs.fit(X, y) + sfs.transform(X) + + with pytest.raises(ValueError, match="Input X contains NaN"): + # LinearRegression does not support nans + SequentialFeatureSelector( + LinearRegression(), n_features_to_select="auto", cv=2 + ).fit(X, y) + + +def test_pipeline_support(): + # Make sure that pipelines can be passed into SFS and that SFS can be + # passed into a pipeline + + n_samples, n_features = 50, 3 + X, y = make_regression(n_samples, n_features, random_state=0) + + # pipeline in SFS + pipe = make_pipeline(StandardScaler(), LinearRegression()) + sfs = SequentialFeatureSelector(pipe, n_features_to_select="auto", cv=2) + sfs.fit(X, y) + sfs.transform(X) + + # SFS in pipeline + sfs = SequentialFeatureSelector( + LinearRegression(), n_features_to_select="auto", cv=2 + ) + pipe = make_pipeline(StandardScaler(), sfs) + pipe.fit(X, y) + pipe.transform(X) + + +@pytest.mark.parametrize("n_features_to_select", (2, 3)) +def test_unsupervised_model_fit(n_features_to_select): + # Make sure that models without classification labels are not being + # validated + + X, y = make_blobs(n_features=4) + sfs = SequentialFeatureSelector( + KMeans(n_init=1), + n_features_to_select=n_features_to_select, + ) + sfs.fit(X) + assert sfs.transform(X).shape[1] == n_features_to_select + + +@pytest.mark.parametrize("y", ("no_validation", 1j, 99.9, np.nan, 3)) +def test_no_y_validation_model_fit(y): + # Make sure that other non-conventional y labels are not accepted + + X, clusters = make_blobs(n_features=6) + sfs = SequentialFeatureSelector( + KMeans(), + n_features_to_select=3, + ) + + with pytest.raises((TypeError, ValueError)): + sfs.fit(X, y) + + +def test_forward_neg_tol_error(): + """Check that we raise an error when tol<0 and direction='forward'""" + X, y = make_regression(n_features=10, random_state=0) + sfs = SequentialFeatureSelector( + LinearRegression(), + n_features_to_select="auto", + direction="forward", + tol=-1e-3, + ) + + with pytest.raises(ValueError, match="tol must be positive"): + sfs.fit(X, y) + + +def test_backward_neg_tol(): + """Check that SequentialFeatureSelector works negative tol + + non-regression test for #25525 + """ + X, y = make_regression(n_features=10, random_state=0) + lr = LinearRegression() + initial_score = lr.fit(X, y).score(X, y) + + sfs = SequentialFeatureSelector( + lr, + n_features_to_select="auto", + direction="backward", + tol=-1e-3, + ) + Xr = sfs.fit_transform(X, y) + new_score = lr.fit(Xr, y).score(Xr, y) + + assert 0 < sfs.get_support().sum() < X.shape[1] + assert new_score < initial_score + + +def test_cv_generator_support(): + """Check that no exception raised when cv is generator + + non-regression test for #25957 + """ + X, y = make_classification(random_state=0) + + groups = np.zeros_like(y, dtype=int) + groups[y.size // 2 :] = 1 + + cv = LeaveOneGroupOut() + splits = cv.split(X, y, groups=groups) + + knc = KNeighborsClassifier(n_neighbors=5) + + sfs = SequentialFeatureSelector(knc, n_features_to_select=5, cv=splits) + sfs.fit(X, y) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..45e66cb338a4b7a5a410db669a13f6f9213451dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py @@ -0,0 +1,72 @@ +import numpy as np +import pytest + +from sklearn.feature_selection import VarianceThreshold +from sklearn.utils._testing import assert_array_equal +from sklearn.utils.fixes import BSR_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +data = [[0, 1, 2, 3, 4], [0, 2, 2, 3, 5], [1, 1, 2, 4, 0]] + +data2 = [[-0.13725701]] * 10 + + +@pytest.mark.parametrize( + "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_zero_variance(sparse_container): + # Test VarianceThreshold with default setting, zero variance. + X = data if sparse_container is None else sparse_container(data) + sel = VarianceThreshold().fit(X) + assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True)) + + +def test_zero_variance_value_error(): + # Test VarianceThreshold with default setting, zero variance, error cases. + with pytest.raises(ValueError): + VarianceThreshold().fit([[0, 1, 2, 3]]) + with pytest.raises(ValueError): + VarianceThreshold().fit([[0, 1], [0, 1]]) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_variance_threshold(sparse_container): + # Test VarianceThreshold with custom variance. + X = data if sparse_container is None else sparse_container(data) + X = VarianceThreshold(threshold=0.4).fit_transform(X) + assert (len(data), 1) == X.shape + + +@pytest.mark.skipif( + np.var(data2) == 0, + reason=( + "This test is not valid for this platform, " + "as it relies on numerical instabilities." + ), +) +@pytest.mark.parametrize( + "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_zero_variance_floating_point_error(sparse_container): + # Test that VarianceThreshold(0.0).fit eliminates features that have + # the same value in every sample, even when floating point errors + # cause np.var not to be 0 for the feature. + # See #13691 + X = data2 if sparse_container is None else sparse_container(data2) + msg = "No feature in X meets the variance threshold 0.00000" + with pytest.raises(ValueError, match=msg): + VarianceThreshold().fit(X) + + +@pytest.mark.parametrize( + "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_variance_nan(sparse_container): + arr = np.array(data, dtype=np.float64) + # add single NaN and feature should still be included + arr[0, 0] = np.nan + # make all values in feature NaN and feature should be rejected + arr[:, 1] = np.nan + + X = arr if sparse_container is None else sparse_container(arr) + sel = VarianceThreshold().fit(X) + assert_array_equal([0, 3, 4], sel.get_support(indices=True)) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e305bc2a657dc042d63dfd42fb8aa9734365ccbf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__init__.py @@ -0,0 +1,24 @@ +"""Transformers for missing value imputation""" +import typing + +from ._base import MissingIndicator, SimpleImputer +from ._knn import KNNImputer + +if typing.TYPE_CHECKING: + # Avoid errors in type checkers (e.g. mypy) for experimental estimators. + # TODO: remove this check once the estimator is no longer experimental. + from ._iterative import IterativeImputer # noqa + +__all__ = ["MissingIndicator", "SimpleImputer", "KNNImputer"] + + +# TODO: remove this check once the estimator is no longer experimental. +def __getattr__(name): + if name == "IterativeImputer": + raise ImportError( + f"{name} is experimental and the API might change without any " + "deprecation cycle. To use it, you need to explicitly import " + "enable_iterative_imputer:\n" + "from sklearn.experimental import enable_iterative_imputer" + ) + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33fc4c79b75ef846462fb19c50e3b3fec7993733 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cac89264d6f618d44f30ead0df02434f3136cc8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1204fde5129e5b2cfd355c5c0cf2fb3f13ea5de7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecfbf826deb00c5e8df1b2e24f86f51af40c2e8c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/_base.py b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..35aefda68d8f8342234d17a06144bee3711f1d44 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/_base.py @@ -0,0 +1,1075 @@ +# Authors: Nicolas Tresegnie +# Sergey Feldman +# License: BSD 3 clause + +import numbers +import warnings +from collections import Counter +from functools import partial + +import numpy as np +import numpy.ma as ma +from scipy import sparse as sp + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import _is_pandas_na, is_scalar_nan +from ..utils._mask import _get_mask +from ..utils._param_validation import MissingValues, StrOptions +from ..utils.fixes import _mode +from ..utils.sparsefuncs import _get_median +from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted + + +def _check_inputs_dtype(X, missing_values): + if _is_pandas_na(missing_values): + # Allow using `pd.NA` as missing values to impute numerical arrays. + return + if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real): + raise ValueError( + "'X' and 'missing_values' types are expected to be" + " both numerical. Got X.dtype={} and " + " type(missing_values)={}.".format(X.dtype, type(missing_values)) + ) + + +def _most_frequent(array, extra_value, n_repeat): + """Compute the most frequent value in a 1d array extended with + [extra_value] * n_repeat, where extra_value is assumed to be not part + of the array.""" + # Compute the most frequent value in array only + if array.size > 0: + if array.dtype == object: + # scipy.stats.mode is slow with object dtype array. + # Python Counter is more efficient + counter = Counter(array) + most_frequent_count = counter.most_common(1)[0][1] + # tie breaking similarly to scipy.stats.mode + most_frequent_value = min( + value + for value, count in counter.items() + if count == most_frequent_count + ) + else: + mode = _mode(array) + most_frequent_value = mode[0][0] + most_frequent_count = mode[1][0] + else: + most_frequent_value = 0 + most_frequent_count = 0 + + # Compare to array + [extra_value] * n_repeat + if most_frequent_count == 0 and n_repeat == 0: + return np.nan + elif most_frequent_count < n_repeat: + return extra_value + elif most_frequent_count > n_repeat: + return most_frequent_value + elif most_frequent_count == n_repeat: + # tie breaking similarly to scipy.stats.mode + return min(most_frequent_value, extra_value) + + +class _BaseImputer(TransformerMixin, BaseEstimator): + """Base class for all imputers. + + It adds automatically support for `add_indicator`. + """ + + _parameter_constraints: dict = { + "missing_values": [MissingValues()], + "add_indicator": ["boolean"], + "keep_empty_features": ["boolean"], + } + + def __init__( + self, *, missing_values=np.nan, add_indicator=False, keep_empty_features=False + ): + self.missing_values = missing_values + self.add_indicator = add_indicator + self.keep_empty_features = keep_empty_features + + def _fit_indicator(self, X): + """Fit a MissingIndicator.""" + if self.add_indicator: + self.indicator_ = MissingIndicator( + missing_values=self.missing_values, error_on_new=False + ) + self.indicator_._fit(X, precomputed=True) + else: + self.indicator_ = None + + def _transform_indicator(self, X): + """Compute the indicator mask.' + + Note that X must be the original data as passed to the imputer before + any imputation, since imputation may be done inplace in some cases. + """ + if self.add_indicator: + if not hasattr(self, "indicator_"): + raise ValueError( + "Make sure to call _fit_indicator before _transform_indicator" + ) + return self.indicator_.transform(X) + + def _concatenate_indicator(self, X_imputed, X_indicator): + """Concatenate indicator mask with the imputed data.""" + if not self.add_indicator: + return X_imputed + + if sp.issparse(X_imputed): + # sp.hstack may result in different formats between sparse arrays and + # matrices; specify the format to keep consistent behavior + hstack = partial(sp.hstack, format=X_imputed.format) + else: + hstack = np.hstack + + if X_indicator is None: + raise ValueError( + "Data from the missing indicator are not provided. Call " + "_fit_indicator and _transform_indicator in the imputer " + "implementation." + ) + + return hstack((X_imputed, X_indicator)) + + def _concatenate_indicator_feature_names_out(self, names, input_features): + if not self.add_indicator: + return names + + indicator_names = self.indicator_.get_feature_names_out(input_features) + return np.concatenate([names, indicator_names]) + + def _more_tags(self): + return {"allow_nan": is_scalar_nan(self.missing_values)} + + +class SimpleImputer(_BaseImputer): + """Univariate imputer for completing missing values with simple strategies. + + Replace missing values using a descriptive statistic (e.g. mean, median, or + most frequent) along each column, or using a constant value. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer` + estimator which is now removed. + + Parameters + ---------- + missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan + The placeholder for the missing values. All occurrences of + `missing_values` will be imputed. For pandas' dataframes with + nullable integer dtypes with missing values, `missing_values` + can be set to either `np.nan` or `pd.NA`. + + strategy : str, default='mean' + The imputation strategy. + + - If "mean", then replace missing values using the mean along + each column. Can only be used with numeric data. + - If "median", then replace missing values using the median along + each column. Can only be used with numeric data. + - If "most_frequent", then replace missing using the most frequent + value along each column. Can be used with strings or numeric data. + If there is more than one such value, only the smallest is returned. + - If "constant", then replace missing values with fill_value. Can be + used with strings or numeric data. + + .. versionadded:: 0.20 + strategy="constant" for fixed value imputation. + + fill_value : str or numerical value, default=None + When strategy == "constant", `fill_value` is used to replace all + occurrences of missing_values. For string or object data types, + `fill_value` must be a string. + If `None`, `fill_value` will be 0 when imputing numerical + data and "missing_value" for strings or object data types. + + copy : bool, default=True + If True, a copy of X will be created. If False, imputation will + be done in-place whenever possible. Note that, in the following cases, + a new copy will always be made, even if `copy=False`: + + - If `X` is not an array of floating values; + - If `X` is encoded as a CSR matrix; + - If `add_indicator=True`. + + add_indicator : bool, default=False + If True, a :class:`MissingIndicator` transform will stack onto output + of the imputer's transform. This allows a predictive estimator + to account for missingness despite imputation. If a feature has no + missing values at fit/train time, the feature won't appear on + the missing indicator even if there are missing values at + transform/test time. + + keep_empty_features : bool, default=False + If True, features that consist exclusively of missing values when + `fit` is called are returned in results when `transform` is called. + The imputed value is always `0` except when `strategy="constant"` + in which case `fill_value` will be used instead. + + .. versionadded:: 1.2 + + Attributes + ---------- + statistics_ : array of shape (n_features,) + The imputation fill value for each feature. + Computing statistics can result in `np.nan` values. + During :meth:`transform`, features corresponding to `np.nan` + statistics will be discarded. + + indicator_ : :class:`~sklearn.impute.MissingIndicator` + Indicator used to add binary indicators for missing values. + `None` if `add_indicator=False`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + IterativeImputer : Multivariate imputer that estimates values to impute for + each feature with missing values from all the others. + KNNImputer : Multivariate imputer that estimates missing features using + nearest samples. + + Notes + ----- + Columns which only contained missing values at :meth:`fit` are discarded + upon :meth:`transform` if strategy is not `"constant"`. + + In a prediction context, simple imputation usually performs poorly when + associated with a weak learner. However, with a powerful learner, it can + lead to as good or better performance than complex imputation such as + :class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.impute import SimpleImputer + >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean') + >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]) + SimpleImputer() + >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]] + >>> print(imp_mean.transform(X)) + [[ 7. 2. 3. ] + [ 4. 3.5 6. ] + [10. 3.5 9. ]] + + For a more detailed example see + :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`. + """ + + _parameter_constraints: dict = { + **_BaseImputer._parameter_constraints, + "strategy": [StrOptions({"mean", "median", "most_frequent", "constant"})], + "fill_value": "no_validation", # any object is valid + "copy": ["boolean"], + } + + def __init__( + self, + *, + missing_values=np.nan, + strategy="mean", + fill_value=None, + copy=True, + add_indicator=False, + keep_empty_features=False, + ): + super().__init__( + missing_values=missing_values, + add_indicator=add_indicator, + keep_empty_features=keep_empty_features, + ) + self.strategy = strategy + self.fill_value = fill_value + self.copy = copy + + def _validate_input(self, X, in_fit): + if self.strategy in ("most_frequent", "constant"): + # If input is a list of strings, dtype = object. + # Otherwise ValueError is raised in SimpleImputer + # with strategy='most_frequent' or 'constant' + # because the list is converted to Unicode numpy array + if isinstance(X, list) and any( + isinstance(elem, str) for row in X for elem in row + ): + dtype = object + else: + dtype = None + else: + dtype = FLOAT_DTYPES + + if not in_fit and self._fit_dtype.kind == "O": + # Use object dtype if fitted on object dtypes + dtype = self._fit_dtype + + if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values): + force_all_finite = "allow-nan" + else: + force_all_finite = True + + try: + X = self._validate_data( + X, + reset=in_fit, + accept_sparse="csc", + dtype=dtype, + force_all_finite=force_all_finite, + copy=self.copy, + ) + except ValueError as ve: + if "could not convert" in str(ve): + new_ve = ValueError( + "Cannot use {} strategy with non-numeric data:\n{}".format( + self.strategy, ve + ) + ) + raise new_ve from None + else: + raise ve + + if in_fit: + # Use the dtype seen in `fit` for non-`fit` conversion + self._fit_dtype = X.dtype + + _check_inputs_dtype(X, self.missing_values) + if X.dtype.kind not in ("i", "u", "f", "O"): + raise ValueError( + "SimpleImputer does not support data with dtype " + "{0}. Please provide either a numeric array (with" + " a floating point or integer dtype) or " + "categorical data represented either as an array " + "with integer dtype or an array of string values " + "with an object dtype.".format(X.dtype) + ) + + if sp.issparse(X) and self.missing_values == 0: + # missing_values = 0 not allowed with sparse data as it would + # force densification + raise ValueError( + "Imputation not possible when missing_values " + "== 0 and input is sparse. Provide a dense " + "array instead." + ) + + if self.strategy == "constant": + if in_fit and self.fill_value is not None: + fill_value_dtype = type(self.fill_value) + err_msg = ( + f"fill_value={self.fill_value!r} (of type {fill_value_dtype!r}) " + f"cannot be cast to the input data that is {X.dtype!r}. Make sure " + "that both dtypes are of the same kind." + ) + elif not in_fit: + fill_value_dtype = self.statistics_.dtype + err_msg = ( + f"The dtype of the filling value (i.e. {fill_value_dtype!r}) " + f"cannot be cast to the input data that is {X.dtype!r}. Make sure " + "that the dtypes of the input data is of the same kind between " + "fit and transform." + ) + else: + # By default, fill_value=None, and the replacement is always + # compatible with the input data + fill_value_dtype = X.dtype + + # Make sure we can safely cast fill_value dtype to the input data dtype + if not np.can_cast(fill_value_dtype, X.dtype, casting="same_kind"): + raise ValueError(err_msg) + + return X + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the imputer on `X`. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted estimator. + """ + X = self._validate_input(X, in_fit=True) + + # default fill_value is 0 for numerical input and "missing_value" + # otherwise + if self.fill_value is None: + if X.dtype.kind in ("i", "u", "f"): + fill_value = 0 + else: + fill_value = "missing_value" + else: + fill_value = self.fill_value + + if sp.issparse(X): + self.statistics_ = self._sparse_fit( + X, self.strategy, self.missing_values, fill_value + ) + else: + self.statistics_ = self._dense_fit( + X, self.strategy, self.missing_values, fill_value + ) + + return self + + def _sparse_fit(self, X, strategy, missing_values, fill_value): + """Fit the transformer on sparse data.""" + missing_mask = _get_mask(X, missing_values) + mask_data = missing_mask.data + n_implicit_zeros = X.shape[0] - np.diff(X.indptr) + + statistics = np.empty(X.shape[1]) + + if strategy == "constant": + # for constant strategy, self.statistics_ is used to store + # fill_value in each column + statistics.fill(fill_value) + else: + for i in range(X.shape[1]): + column = X.data[X.indptr[i] : X.indptr[i + 1]] + mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]] + column = column[~mask_column] + + # combine explicit and implicit zeros + mask_zeros = _get_mask(column, 0) + column = column[~mask_zeros] + n_explicit_zeros = mask_zeros.sum() + n_zeros = n_implicit_zeros[i] + n_explicit_zeros + + if len(column) == 0 and self.keep_empty_features: + # in case we want to keep columns with only missing values. + statistics[i] = 0 + else: + if strategy == "mean": + s = column.size + n_zeros + statistics[i] = np.nan if s == 0 else column.sum() / s + + elif strategy == "median": + statistics[i] = _get_median(column, n_zeros) + + elif strategy == "most_frequent": + statistics[i] = _most_frequent(column, 0, n_zeros) + + super()._fit_indicator(missing_mask) + + return statistics + + def _dense_fit(self, X, strategy, missing_values, fill_value): + """Fit the transformer on dense data.""" + missing_mask = _get_mask(X, missing_values) + masked_X = ma.masked_array(X, mask=missing_mask) + + super()._fit_indicator(missing_mask) + + # Mean + if strategy == "mean": + mean_masked = np.ma.mean(masked_X, axis=0) + # Avoid the warning "Warning: converting a masked element to nan." + mean = np.ma.getdata(mean_masked) + mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan + + return mean + + # Median + elif strategy == "median": + median_masked = np.ma.median(masked_X, axis=0) + # Avoid the warning "Warning: converting a masked element to nan." + median = np.ma.getdata(median_masked) + median[np.ma.getmaskarray(median_masked)] = ( + 0 if self.keep_empty_features else np.nan + ) + + return median + + # Most frequent + elif strategy == "most_frequent": + # Avoid use of scipy.stats.mstats.mode due to the required + # additional overhead and slow benchmarking performance. + # See Issue 14325 and PR 14399 for full discussion. + + # To be able access the elements by columns + X = X.transpose() + mask = missing_mask.transpose() + + if X.dtype.kind == "O": + most_frequent = np.empty(X.shape[0], dtype=object) + else: + most_frequent = np.empty(X.shape[0]) + + for i, (row, row_mask) in enumerate(zip(X[:], mask[:])): + row_mask = np.logical_not(row_mask).astype(bool) + row = row[row_mask] + if len(row) == 0 and self.keep_empty_features: + most_frequent[i] = 0 + else: + most_frequent[i] = _most_frequent(row, np.nan, 0) + + return most_frequent + + # Constant + elif strategy == "constant": + # for constant strategy, self.statistcs_ is used to store + # fill_value in each column + return np.full(X.shape[1], fill_value, dtype=X.dtype) + + def transform(self, X): + """Impute all missing values in `X`. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + The input data to complete. + + Returns + ------- + X_imputed : {ndarray, sparse matrix} of shape \ + (n_samples, n_features_out) + `X` with imputed values. + """ + check_is_fitted(self) + + X = self._validate_input(X, in_fit=False) + statistics = self.statistics_ + + if X.shape[1] != statistics.shape[0]: + raise ValueError( + "X has %d features per sample, expected %d" + % (X.shape[1], self.statistics_.shape[0]) + ) + + # compute mask before eliminating invalid features + missing_mask = _get_mask(X, self.missing_values) + + # Decide whether to keep missing features + if self.strategy == "constant" or self.keep_empty_features: + valid_statistics = statistics + valid_statistics_indexes = None + else: + # same as np.isnan but also works for object dtypes + invalid_mask = _get_mask(statistics, np.nan) + valid_mask = np.logical_not(invalid_mask) + valid_statistics = statistics[valid_mask] + valid_statistics_indexes = np.flatnonzero(valid_mask) + + if invalid_mask.any(): + invalid_features = np.arange(X.shape[1])[invalid_mask] + # use feature names warning if features are provided + if hasattr(self, "feature_names_in_"): + invalid_features = self.feature_names_in_[invalid_features] + warnings.warn( + "Skipping features without any observed values:" + f" {invalid_features}. At least one non-missing value is needed" + f" for imputation with strategy='{self.strategy}'." + ) + X = X[:, valid_statistics_indexes] + + # Do actual imputation + if sp.issparse(X): + if self.missing_values == 0: + raise ValueError( + "Imputation not possible when missing_values " + "== 0 and input is sparse. Provide a dense " + "array instead." + ) + else: + # if no invalid statistics are found, use the mask computed + # before, else recompute mask + if valid_statistics_indexes is None: + mask = missing_mask.data + else: + mask = _get_mask(X.data, self.missing_values) + indexes = np.repeat( + np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr) + )[mask] + + X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False) + else: + # use mask computed before eliminating invalid mask + if valid_statistics_indexes is None: + mask_valid_features = missing_mask + else: + mask_valid_features = missing_mask[:, valid_statistics_indexes] + n_missing = np.sum(mask_valid_features, axis=0) + values = np.repeat(valid_statistics, n_missing) + coordinates = np.where(mask_valid_features.transpose())[::-1] + + X[coordinates] = values + + X_indicator = super()._transform_indicator(missing_mask) + + return super()._concatenate_indicator(X, X_indicator) + + def inverse_transform(self, X): + """Convert the data back to the original representation. + + Inverts the `transform` operation performed on an array. + This operation can only be performed after :class:`SimpleImputer` is + instantiated with `add_indicator=True`. + + Note that `inverse_transform` can only invert the transform in + features that have binary indicators for missing values. If a feature + has no missing values at `fit` time, the feature won't have a binary + indicator, and the imputation done at `transform` time won't be + inverted. + + .. versionadded:: 0.24 + + Parameters + ---------- + X : array-like of shape \ + (n_samples, n_features + n_features_missing_indicator) + The imputed data to be reverted to original data. It has to be + an augmented array of imputed data and the missing indicator mask. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + The original `X` with missing values as it was prior + to imputation. + """ + check_is_fitted(self) + + if not self.add_indicator: + raise ValueError( + "'inverse_transform' works only when " + "'SimpleImputer' is instantiated with " + "'add_indicator=True'. " + f"Got 'add_indicator={self.add_indicator}' " + "instead." + ) + + n_features_missing = len(self.indicator_.features_) + non_empty_feature_count = X.shape[1] - n_features_missing + array_imputed = X[:, :non_empty_feature_count].copy() + missing_mask = X[:, non_empty_feature_count:].astype(bool) + + n_features_original = len(self.statistics_) + shape_original = (X.shape[0], n_features_original) + X_original = np.zeros(shape_original) + X_original[:, self.indicator_.features_] = missing_mask + full_mask = X_original.astype(bool) + + imputed_idx, original_idx = 0, 0 + while imputed_idx < len(array_imputed.T): + if not np.all(X_original[:, original_idx]): + X_original[:, original_idx] = array_imputed.T[imputed_idx] + imputed_idx += 1 + original_idx += 1 + else: + original_idx += 1 + + X_original[full_mask] = self.missing_values + return X_original + + def _more_tags(self): + return { + "allow_nan": _is_pandas_na(self.missing_values) or is_scalar_nan( + self.missing_values + ) + } + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan)) + names = input_features[non_missing_mask] + return self._concatenate_indicator_feature_names_out(names, input_features) + + +class MissingIndicator(TransformerMixin, BaseEstimator): + """Binary indicators for missing values. + + Note that this component typically should not be used in a vanilla + :class:`~sklearn.pipeline.Pipeline` consisting of transformers and a + classifier, but rather could be added using a + :class:`~sklearn.pipeline.FeatureUnion` or + :class:`~sklearn.compose.ColumnTransformer`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + missing_values : int, float, str, np.nan or None, default=np.nan + The placeholder for the missing values. All occurrences of + `missing_values` will be imputed. For pandas' dataframes with + nullable integer dtypes with missing values, `missing_values` + should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`. + + features : {'missing-only', 'all'}, default='missing-only' + Whether the imputer mask should represent all or a subset of + features. + + - If `'missing-only'` (default), the imputer mask will only represent + features containing missing values during fit time. + - If `'all'`, the imputer mask will represent all features. + + sparse : bool or 'auto', default='auto' + Whether the imputer mask format should be sparse or dense. + + - If `'auto'` (default), the imputer mask will be of same type as + input. + - If `True`, the imputer mask will be a sparse matrix. + - If `False`, the imputer mask will be a numpy array. + + error_on_new : bool, default=True + If `True`, :meth:`transform` will raise an error when there are + features with missing values that have no missing values in + :meth:`fit`. This is applicable only when `features='missing-only'`. + + Attributes + ---------- + features_ : ndarray of shape (n_missing_features,) or (n_features,) + The features indices which will be returned when calling + :meth:`transform`. They are computed during :meth:`fit`. If + `features='all'`, `features_` is equal to `range(n_features)`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SimpleImputer : Univariate imputation of missing values. + IterativeImputer : Multivariate imputation of missing values. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.impute import MissingIndicator + >>> X1 = np.array([[np.nan, 1, 3], + ... [4, 0, np.nan], + ... [8, 1, 0]]) + >>> X2 = np.array([[5, 1, np.nan], + ... [np.nan, 2, 3], + ... [2, 4, 0]]) + >>> indicator = MissingIndicator() + >>> indicator.fit(X1) + MissingIndicator() + >>> X2_tr = indicator.transform(X2) + >>> X2_tr + array([[False, True], + [ True, False], + [False, False]]) + """ + + _parameter_constraints: dict = { + "missing_values": [MissingValues()], + "features": [StrOptions({"missing-only", "all"})], + "sparse": ["boolean", StrOptions({"auto"})], + "error_on_new": ["boolean"], + } + + def __init__( + self, + *, + missing_values=np.nan, + features="missing-only", + sparse="auto", + error_on_new=True, + ): + self.missing_values = missing_values + self.features = features + self.sparse = sparse + self.error_on_new = error_on_new + + def _get_missing_features_info(self, X): + """Compute the imputer mask and the indices of the features + containing missing values. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input data with missing values. Note that `X` has been + checked in :meth:`fit` and :meth:`transform` before to call this + function. + + Returns + ------- + imputer_mask : {ndarray, sparse matrix} of shape \ + (n_samples, n_features) + The imputer mask of the original data. + + features_with_missing : ndarray of shape (n_features_with_missing) + The features containing missing values. + """ + if not self._precomputed: + imputer_mask = _get_mask(X, self.missing_values) + else: + imputer_mask = X + + if sp.issparse(X): + imputer_mask.eliminate_zeros() + + if self.features == "missing-only": + n_missing = imputer_mask.getnnz(axis=0) + + if self.sparse is False: + imputer_mask = imputer_mask.toarray() + elif imputer_mask.format == "csr": + imputer_mask = imputer_mask.tocsc() + else: + if not self._precomputed: + imputer_mask = _get_mask(X, self.missing_values) + else: + imputer_mask = X + + if self.features == "missing-only": + n_missing = imputer_mask.sum(axis=0) + + if self.sparse is True: + imputer_mask = sp.csc_matrix(imputer_mask) + + if self.features == "all": + features_indices = np.arange(X.shape[1]) + else: + features_indices = np.flatnonzero(n_missing) + + return imputer_mask, features_indices + + def _validate_input(self, X, in_fit): + if not is_scalar_nan(self.missing_values): + force_all_finite = True + else: + force_all_finite = "allow-nan" + X = self._validate_data( + X, + reset=in_fit, + accept_sparse=("csc", "csr"), + dtype=None, + force_all_finite=force_all_finite, + ) + _check_inputs_dtype(X, self.missing_values) + if X.dtype.kind not in ("i", "u", "f", "O"): + raise ValueError( + "MissingIndicator does not support data with " + "dtype {0}. Please provide either a numeric array" + " (with a floating point or integer dtype) or " + "categorical data represented either as an array " + "with integer dtype or an array of string values " + "with an object dtype.".format(X.dtype) + ) + + if sp.issparse(X) and self.missing_values == 0: + # missing_values = 0 not allowed with sparse data as it would + # force densification + raise ValueError( + "Sparse input with missing_values=0 is " + "not supported. Provide a dense " + "array instead." + ) + + return X + + def _fit(self, X, y=None, precomputed=False): + """Fit the transformer on `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + If `precomputed=True`, then `X` is a mask of the input data. + + precomputed : bool + Whether the input data is a mask. + + Returns + ------- + imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \ + n_features) + The imputer mask of the original data. + """ + if precomputed: + if not (hasattr(X, "dtype") and X.dtype.kind == "b"): + raise ValueError("precomputed is True but the input data is not a mask") + self._precomputed = True + else: + self._precomputed = False + + # Need not validate X again as it would have already been validated + # in the Imputer calling MissingIndicator + if not self._precomputed: + X = self._validate_input(X, in_fit=True) + else: + # only create `n_features_in_` in the precomputed case + self._check_n_features(X, reset=True) + + self._n_features = X.shape[1] + + missing_features_info = self._get_missing_features_info(X) + self.features_ = missing_features_info[1] + + return missing_features_info[0] + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the transformer on `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Fitted estimator. + """ + self._fit(X, y) + + return self + + def transform(self, X): + """Generate missing values indicator for `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data to complete. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \ + or (n_samples, n_features_with_missing) + The missing indicator for input data. The data type of `Xt` + will be boolean. + """ + check_is_fitted(self) + + # Need not validate X again as it would have already been validated + # in the Imputer calling MissingIndicator + if not self._precomputed: + X = self._validate_input(X, in_fit=False) + else: + if not (hasattr(X, "dtype") and X.dtype.kind == "b"): + raise ValueError("precomputed is True but the input data is not a mask") + + imputer_mask, features = self._get_missing_features_info(X) + + if self.features == "missing-only": + features_diff_fit_trans = np.setdiff1d(features, self.features_) + if self.error_on_new and features_diff_fit_trans.size > 0: + raise ValueError( + "The features {} have missing values " + "in transform but have no missing values " + "in fit.".format(features_diff_fit_trans) + ) + + if self.features_.size < self._n_features: + imputer_mask = imputer_mask[:, self.features_] + + return imputer_mask + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Generate missing values indicator for `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data to complete. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \ + or (n_samples, n_features_with_missing) + The missing indicator for input data. The data type of `Xt` + will be boolean. + """ + imputer_mask = self._fit(X, y) + + if self.features_.size < self._n_features: + imputer_mask = imputer_mask[:, self.features_] + + return imputer_mask + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + prefix = self.__class__.__name__.lower() + return np.asarray( + [ + f"{prefix}_{feature_name}" + for feature_name in input_features[self.features_] + ], + dtype=object, + ) + + def _more_tags(self): + return { + "allow_nan": True, + "X_types": ["2darray", "string"], + "preserves_dtype": [], + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/_iterative.py b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/_iterative.py new file mode 100644 index 0000000000000000000000000000000000000000..11bca36773e6493b38359eb4bb6e3f6300a6aa5f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/_iterative.py @@ -0,0 +1,906 @@ +import warnings +from collections import namedtuple +from numbers import Integral, Real +from time import time + +import numpy as np +from scipy import stats + +from ..base import _fit_context, clone +from ..exceptions import ConvergenceWarning +from ..preprocessing import normalize +from ..utils import ( + _safe_assign, + _safe_indexing, + check_array, + check_random_state, + is_scalar_nan, +) +from ..utils._mask import _get_mask +from ..utils._param_validation import HasMethods, Interval, StrOptions +from ..utils.metadata_routing import _RoutingNotSupportedMixin +from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted +from ._base import SimpleImputer, _BaseImputer, _check_inputs_dtype + +_ImputerTriplet = namedtuple( + "_ImputerTriplet", ["feat_idx", "neighbor_feat_idx", "estimator"] +) + + +def _assign_where(X1, X2, cond): + """Assign X2 to X1 where cond is True. + + Parameters + ---------- + X1 : ndarray or dataframe of shape (n_samples, n_features) + Data. + + X2 : ndarray of shape (n_samples, n_features) + Data to be assigned. + + cond : ndarray of shape (n_samples, n_features) + Boolean mask to assign data. + """ + if hasattr(X1, "mask"): # pandas dataframes + X1.mask(cond=cond, other=X2, inplace=True) + else: # ndarrays + X1[cond] = X2[cond] + + +class IterativeImputer(_RoutingNotSupportedMixin, _BaseImputer): + """Multivariate imputer that estimates each feature from all the others. + + A strategy for imputing missing values by modeling each feature with + missing values as a function of other features in a round-robin fashion. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.21 + + .. note:: + + This estimator is still **experimental** for now: the predictions + and the API might change without any deprecation cycle. To use it, + you need to explicitly import `enable_iterative_imputer`:: + + >>> # explicitly require this experimental feature + >>> from sklearn.experimental import enable_iterative_imputer # noqa + >>> # now you can import normally from sklearn.impute + >>> from sklearn.impute import IterativeImputer + + Parameters + ---------- + estimator : estimator object, default=BayesianRidge() + The estimator to use at each step of the round-robin imputation. + If `sample_posterior=True`, the estimator must support + `return_std` in its `predict` method. + + missing_values : int or np.nan, default=np.nan + The placeholder for the missing values. All occurrences of + `missing_values` will be imputed. For pandas' dataframes with + nullable integer dtypes with missing values, `missing_values` + should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`. + + sample_posterior : bool, default=False + Whether to sample from the (Gaussian) predictive posterior of the + fitted estimator for each imputation. Estimator must support + `return_std` in its `predict` method if set to `True`. Set to + `True` if using `IterativeImputer` for multiple imputations. + + max_iter : int, default=10 + Maximum number of imputation rounds to perform before returning the + imputations computed during the final round. A round is a single + imputation of each feature with missing values. The stopping criterion + is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`, + where `X_t` is `X` at iteration `t`. Note that early stopping is only + applied if `sample_posterior=False`. + + tol : float, default=1e-3 + Tolerance of the stopping condition. + + n_nearest_features : int, default=None + Number of other features to use to estimate the missing values of + each feature column. Nearness between features is measured using + the absolute correlation coefficient between each feature pair (after + initial imputation). To ensure coverage of features throughout the + imputation process, the neighbor features are not necessarily nearest, + but are drawn with probability proportional to correlation for each + imputed target feature. Can provide significant speed-up when the + number of features is huge. If `None`, all features will be used. + + initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \ + default='mean' + Which strategy to use to initialize the missing values. Same as the + `strategy` parameter in :class:`~sklearn.impute.SimpleImputer`. + + fill_value : str or numerical value, default=None + When `strategy="constant"`, `fill_value` is used to replace all + occurrences of missing_values. For string or object data types, + `fill_value` must be a string. + If `None`, `fill_value` will be 0 when imputing numerical + data and "missing_value" for strings or object data types. + + .. versionadded:: 1.3 + + imputation_order : {'ascending', 'descending', 'roman', 'arabic', \ + 'random'}, default='ascending' + The order in which the features will be imputed. Possible values: + + - `'ascending'`: From features with fewest missing values to most. + - `'descending'`: From features with most missing values to fewest. + - `'roman'`: Left to right. + - `'arabic'`: Right to left. + - `'random'`: A random order for each round. + + skip_complete : bool, default=False + If `True` then features with missing values during :meth:`transform` + which did not have any missing values during :meth:`fit` will be + imputed with the initial imputation method only. Set to `True` if you + have many features with no missing values at both :meth:`fit` and + :meth:`transform` time to save compute. + + min_value : float or array-like of shape (n_features,), default=-np.inf + Minimum possible imputed value. Broadcast to shape `(n_features,)` if + scalar. If array-like, expects shape `(n_features,)`, one min value for + each feature. The default is `-np.inf`. + + .. versionchanged:: 0.23 + Added support for array-like. + + max_value : float or array-like of shape (n_features,), default=np.inf + Maximum possible imputed value. Broadcast to shape `(n_features,)` if + scalar. If array-like, expects shape `(n_features,)`, one max value for + each feature. The default is `np.inf`. + + .. versionchanged:: 0.23 + Added support for array-like. + + verbose : int, default=0 + Verbosity flag, controls the debug messages that are issued + as functions are evaluated. The higher, the more verbose. Can be 0, 1, + or 2. + + random_state : int, RandomState instance or None, default=None + The seed of the pseudo random number generator to use. Randomizes + selection of estimator features if `n_nearest_features` is not `None`, + the `imputation_order` if `random`, and the sampling from posterior if + `sample_posterior=True`. Use an integer for determinism. + See :term:`the Glossary `. + + add_indicator : bool, default=False + If `True`, a :class:`MissingIndicator` transform will stack onto output + of the imputer's transform. This allows a predictive estimator + to account for missingness despite imputation. If a feature has no + missing values at fit/train time, the feature won't appear on + the missing indicator even if there are missing values at + transform/test time. + + keep_empty_features : bool, default=False + If True, features that consist exclusively of missing values when + `fit` is called are returned in results when `transform` is called. + The imputed value is always `0` except when + `initial_strategy="constant"` in which case `fill_value` will be + used instead. + + .. versionadded:: 1.2 + + Attributes + ---------- + initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer` + Imputer used to initialize the missing values. + + imputation_sequence_ : list of tuples + Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where + `feat_idx` is the current feature to be imputed, + `neighbor_feat_idx` is the array of other features used to impute the + current feature, and `estimator` is the trained estimator used for + the imputation. Length is `self.n_features_with_missing_ * + self.n_iter_`. + + n_iter_ : int + Number of iteration rounds that occurred. Will be less than + `self.max_iter` if early stopping criterion was reached. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_features_with_missing_ : int + Number of features with missing values. + + indicator_ : :class:`~sklearn.impute.MissingIndicator` + Indicator used to add binary indicators for missing values. + `None` if `add_indicator=False`. + + random_state_ : RandomState instance + RandomState instance that is generated either from a seed, the random + number generator or by `np.random`. + + See Also + -------- + SimpleImputer : Univariate imputer for completing missing values + with simple strategies. + KNNImputer : Multivariate imputer that estimates missing features using + nearest samples. + + Notes + ----- + To support imputation in inductive mode we store each feature's estimator + during the :meth:`fit` phase, and predict without refitting (in order) + during the :meth:`transform` phase. + + Features which contain all missing values at :meth:`fit` are discarded upon + :meth:`transform`. + + Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))` + where :math:`k` = `max_iter`, :math:`n` the number of samples and + :math:`p` the number of features. It thus becomes prohibitively costly when + the number of features increases. Setting + `n_nearest_features << n_features`, `skip_complete=True` or increasing `tol` + can help to reduce its computational cost. + + Depending on the nature of missing values, simple imputers can be + preferable in a prediction context. + + References + ---------- + .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice: + Multivariate Imputation by Chained Equations in R". Journal of + Statistical Software 45: 1-67. + `_ + + .. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in + Multivariate Data Suitable for use with an Electronic Computer". + Journal of the Royal Statistical Society 22(2): 302-306. + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.experimental import enable_iterative_imputer + >>> from sklearn.impute import IterativeImputer + >>> imp_mean = IterativeImputer(random_state=0) + >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]) + IterativeImputer(random_state=0) + >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]] + >>> imp_mean.transform(X) + array([[ 6.9584..., 2. , 3. ], + [ 4. , 2.6000..., 6. ], + [10. , 4.9999..., 9. ]]) + + For a more detailed example see + :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py` or + :ref:`sphx_glr_auto_examples_impute_plot_iterative_imputer_variants_comparison.py`. + """ + + _parameter_constraints: dict = { + **_BaseImputer._parameter_constraints, + "estimator": [None, HasMethods(["fit", "predict"])], + "sample_posterior": ["boolean"], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "n_nearest_features": [None, Interval(Integral, 1, None, closed="left")], + "initial_strategy": [ + StrOptions({"mean", "median", "most_frequent", "constant"}) + ], + "fill_value": "no_validation", # any object is valid + "imputation_order": [ + StrOptions({"ascending", "descending", "roman", "arabic", "random"}) + ], + "skip_complete": ["boolean"], + "min_value": [None, Interval(Real, None, None, closed="both"), "array-like"], + "max_value": [None, Interval(Real, None, None, closed="both"), "array-like"], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + estimator=None, + *, + missing_values=np.nan, + sample_posterior=False, + max_iter=10, + tol=1e-3, + n_nearest_features=None, + initial_strategy="mean", + fill_value=None, + imputation_order="ascending", + skip_complete=False, + min_value=-np.inf, + max_value=np.inf, + verbose=0, + random_state=None, + add_indicator=False, + keep_empty_features=False, + ): + super().__init__( + missing_values=missing_values, + add_indicator=add_indicator, + keep_empty_features=keep_empty_features, + ) + + self.estimator = estimator + self.sample_posterior = sample_posterior + self.max_iter = max_iter + self.tol = tol + self.n_nearest_features = n_nearest_features + self.initial_strategy = initial_strategy + self.fill_value = fill_value + self.imputation_order = imputation_order + self.skip_complete = skip_complete + self.min_value = min_value + self.max_value = max_value + self.verbose = verbose + self.random_state = random_state + + def _impute_one_feature( + self, + X_filled, + mask_missing_values, + feat_idx, + neighbor_feat_idx, + estimator=None, + fit_mode=True, + ): + """Impute a single feature from the others provided. + + This function predicts the missing values of one of the features using + the current estimates of all the other features. The `estimator` must + support `return_std=True` in its `predict` method for this function + to work. + + Parameters + ---------- + X_filled : ndarray + Input data with the most recent imputations. + + mask_missing_values : ndarray + Input data's missing indicator matrix. + + feat_idx : int + Index of the feature currently being imputed. + + neighbor_feat_idx : ndarray + Indices of the features to be used in imputing `feat_idx`. + + estimator : object + The estimator to use at this step of the round-robin imputation. + If `sample_posterior=True`, the estimator must support + `return_std` in its `predict` method. + If None, it will be cloned from self._estimator. + + fit_mode : boolean, default=True + Whether to fit and predict with the estimator or just predict. + + Returns + ------- + X_filled : ndarray + Input data with `X_filled[missing_row_mask, feat_idx]` updated. + + estimator : estimator with sklearn API + The fitted estimator used to impute + `X_filled[missing_row_mask, feat_idx]`. + """ + if estimator is None and fit_mode is False: + raise ValueError( + "If fit_mode is False, then an already-fitted " + "estimator should be passed in." + ) + + if estimator is None: + estimator = clone(self._estimator) + + missing_row_mask = mask_missing_values[:, feat_idx] + if fit_mode: + X_train = _safe_indexing( + _safe_indexing(X_filled, neighbor_feat_idx, axis=1), + ~missing_row_mask, + axis=0, + ) + y_train = _safe_indexing( + _safe_indexing(X_filled, feat_idx, axis=1), + ~missing_row_mask, + axis=0, + ) + estimator.fit(X_train, y_train) + + # if no missing values, don't predict + if np.sum(missing_row_mask) == 0: + return X_filled, estimator + + # get posterior samples if there is at least one missing value + X_test = _safe_indexing( + _safe_indexing(X_filled, neighbor_feat_idx, axis=1), + missing_row_mask, + axis=0, + ) + if self.sample_posterior: + mus, sigmas = estimator.predict(X_test, return_std=True) + imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype) + # two types of problems: (1) non-positive sigmas + # (2) mus outside legal range of min_value and max_value + # (results in inf sample) + positive_sigmas = sigmas > 0 + imputed_values[~positive_sigmas] = mus[~positive_sigmas] + mus_too_low = mus < self._min_value[feat_idx] + imputed_values[mus_too_low] = self._min_value[feat_idx] + mus_too_high = mus > self._max_value[feat_idx] + imputed_values[mus_too_high] = self._max_value[feat_idx] + # the rest can be sampled without statistical issues + inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high + mus = mus[inrange_mask] + sigmas = sigmas[inrange_mask] + a = (self._min_value[feat_idx] - mus) / sigmas + b = (self._max_value[feat_idx] - mus) / sigmas + + truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas) + imputed_values[inrange_mask] = truncated_normal.rvs( + random_state=self.random_state_ + ) + else: + imputed_values = estimator.predict(X_test) + imputed_values = np.clip( + imputed_values, self._min_value[feat_idx], self._max_value[feat_idx] + ) + + # update the feature + _safe_assign( + X_filled, + imputed_values, + row_indexer=missing_row_mask, + column_indexer=feat_idx, + ) + return X_filled, estimator + + def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat): + """Get a list of other features to predict `feat_idx`. + + If `self.n_nearest_features` is less than or equal to the total + number of features, then use a probability proportional to the absolute + correlation between `feat_idx` and each other feature to randomly + choose a subsample of the other features (without replacement). + + Parameters + ---------- + n_features : int + Number of features in `X`. + + feat_idx : int + Index of the feature currently being imputed. + + abs_corr_mat : ndarray, shape (n_features, n_features) + Absolute correlation matrix of `X`. The diagonal has been zeroed + out and each feature has been normalized to sum to 1. Can be None. + + Returns + ------- + neighbor_feat_idx : array-like + The features to use to impute `feat_idx`. + """ + if self.n_nearest_features is not None and self.n_nearest_features < n_features: + p = abs_corr_mat[:, feat_idx] + neighbor_feat_idx = self.random_state_.choice( + np.arange(n_features), self.n_nearest_features, replace=False, p=p + ) + else: + inds_left = np.arange(feat_idx) + inds_right = np.arange(feat_idx + 1, n_features) + neighbor_feat_idx = np.concatenate((inds_left, inds_right)) + return neighbor_feat_idx + + def _get_ordered_idx(self, mask_missing_values): + """Decide in what order we will update the features. + + As a homage to the MICE R package, we will have 4 main options of + how to order the updates, and use a random order if anything else + is specified. + + Also, this function skips features which have no missing values. + + Parameters + ---------- + mask_missing_values : array-like, shape (n_samples, n_features) + Input data's missing indicator matrix, where `n_samples` is the + number of samples and `n_features` is the number of features. + + Returns + ------- + ordered_idx : ndarray, shape (n_features,) + The order in which to impute the features. + """ + frac_of_missing_values = mask_missing_values.mean(axis=0) + if self.skip_complete: + missing_values_idx = np.flatnonzero(frac_of_missing_values) + else: + missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0]) + if self.imputation_order == "roman": + ordered_idx = missing_values_idx + elif self.imputation_order == "arabic": + ordered_idx = missing_values_idx[::-1] + elif self.imputation_order == "ascending": + n = len(frac_of_missing_values) - len(missing_values_idx) + ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:] + elif self.imputation_order == "descending": + n = len(frac_of_missing_values) - len(missing_values_idx) + ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1] + elif self.imputation_order == "random": + ordered_idx = missing_values_idx + self.random_state_.shuffle(ordered_idx) + return ordered_idx + + def _get_abs_corr_mat(self, X_filled, tolerance=1e-6): + """Get absolute correlation matrix between features. + + Parameters + ---------- + X_filled : ndarray, shape (n_samples, n_features) + Input data with the most recent imputations. + + tolerance : float, default=1e-6 + `abs_corr_mat` can have nans, which will be replaced + with `tolerance`. + + Returns + ------- + abs_corr_mat : ndarray, shape (n_features, n_features) + Absolute correlation matrix of `X` at the beginning of the + current round. The diagonal has been zeroed out and each feature's + absolute correlations with all others have been normalized to sum + to 1. + """ + n_features = X_filled.shape[1] + if self.n_nearest_features is None or self.n_nearest_features >= n_features: + return None + with np.errstate(invalid="ignore"): + # if a feature in the neighborhood has only a single value + # (e.g., categorical feature), the std. dev. will be null and + # np.corrcoef will raise a warning due to a division by zero + abs_corr_mat = np.abs(np.corrcoef(X_filled.T)) + # np.corrcoef is not defined for features with zero std + abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance + # ensures exploration, i.e. at least some probability of sampling + np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat) + # features are not their own neighbors + np.fill_diagonal(abs_corr_mat, 0) + # needs to sum to 1 for np.random.choice sampling + abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False) + return abs_corr_mat + + def _initial_imputation(self, X, in_fit=False): + """Perform initial imputation for input `X`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + in_fit : bool, default=False + Whether function is called in :meth:`fit`. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + X_filled : ndarray of shape (n_samples, n_features) + Input data with the most recent imputations. + + mask_missing_values : ndarray of shape (n_samples, n_features) + Input data's missing indicator matrix, where `n_samples` is the + number of samples and `n_features` is the number of features, + masked by non-missing features. + + X_missing_mask : ndarray, shape (n_samples, n_features) + Input data's mask matrix indicating missing datapoints, where + `n_samples` is the number of samples and `n_features` is the + number of features. + """ + if is_scalar_nan(self.missing_values): + force_all_finite = "allow-nan" + else: + force_all_finite = True + + X = self._validate_data( + X, + dtype=FLOAT_DTYPES, + order="F", + reset=in_fit, + force_all_finite=force_all_finite, + ) + _check_inputs_dtype(X, self.missing_values) + + X_missing_mask = _get_mask(X, self.missing_values) + mask_missing_values = X_missing_mask.copy() + if self.initial_imputer_ is None: + self.initial_imputer_ = SimpleImputer( + missing_values=self.missing_values, + strategy=self.initial_strategy, + fill_value=self.fill_value, + keep_empty_features=self.keep_empty_features, + ).set_output(transform="default") + X_filled = self.initial_imputer_.fit_transform(X) + else: + X_filled = self.initial_imputer_.transform(X) + + valid_mask = np.flatnonzero( + np.logical_not(np.isnan(self.initial_imputer_.statistics_)) + ) + + if not self.keep_empty_features: + # drop empty features + Xt = X[:, valid_mask] + mask_missing_values = mask_missing_values[:, valid_mask] + else: + # mark empty features as not missing and keep the original + # imputation + mask_missing_values[:, valid_mask] = True + Xt = X + + return Xt, X_filled, mask_missing_values, X_missing_mask + + @staticmethod + def _validate_limit(limit, limit_type, n_features): + """Validate the limits (min/max) of the feature values. + + Converts scalar min/max limits to vectors of shape `(n_features,)`. + + Parameters + ---------- + limit: scalar or array-like + The user-specified limit (i.e, min_value or max_value). + limit_type: {'max', 'min'} + Type of limit to validate. + n_features: int + Number of features in the dataset. + + Returns + ------- + limit: ndarray, shape(n_features,) + Array of limits, one for each feature. + """ + limit_bound = np.inf if limit_type == "max" else -np.inf + limit = limit_bound if limit is None else limit + if np.isscalar(limit): + limit = np.full(n_features, limit) + limit = check_array(limit, force_all_finite=False, copy=False, ensure_2d=False) + if not limit.shape[0] == n_features: + raise ValueError( + f"'{limit_type}_value' should be of " + f"shape ({n_features},) when an array-like " + f"is provided. Got {limit.shape}, instead." + ) + return limit + + @_fit_context( + # IterativeImputer.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None): + """Fit the imputer on `X` and return the transformed `X`. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : array-like, shape (n_samples, n_features) + The imputed input data. + """ + self.random_state_ = getattr( + self, "random_state_", check_random_state(self.random_state) + ) + + if self.estimator is None: + from ..linear_model import BayesianRidge + + self._estimator = BayesianRidge() + else: + self._estimator = clone(self.estimator) + + self.imputation_sequence_ = [] + + self.initial_imputer_ = None + + X, Xt, mask_missing_values, complete_mask = self._initial_imputation( + X, in_fit=True + ) + + super()._fit_indicator(complete_mask) + X_indicator = super()._transform_indicator(complete_mask) + + if self.max_iter == 0 or np.all(mask_missing_values): + self.n_iter_ = 0 + return super()._concatenate_indicator(Xt, X_indicator) + + # Edge case: a single feature. We return the initial ... + if Xt.shape[1] == 1: + self.n_iter_ = 0 + return super()._concatenate_indicator(Xt, X_indicator) + + self._min_value = self._validate_limit(self.min_value, "min", X.shape[1]) + self._max_value = self._validate_limit(self.max_value, "max", X.shape[1]) + + if not np.all(np.greater(self._max_value, self._min_value)): + raise ValueError("One (or more) features have min_value >= max_value.") + + # order in which to impute + # note this is probably too slow for large feature data (d > 100000) + # and a better way would be good. + # see: https://goo.gl/KyCNwj and subsequent comments + ordered_idx = self._get_ordered_idx(mask_missing_values) + self.n_features_with_missing_ = len(ordered_idx) + + abs_corr_mat = self._get_abs_corr_mat(Xt) + + n_samples, n_features = Xt.shape + if self.verbose > 0: + print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) + start_t = time() + if not self.sample_posterior: + Xt_previous = Xt.copy() + normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values])) + for self.n_iter_ in range(1, self.max_iter + 1): + if self.imputation_order == "random": + ordered_idx = self._get_ordered_idx(mask_missing_values) + + for feat_idx in ordered_idx: + neighbor_feat_idx = self._get_neighbor_feat_idx( + n_features, feat_idx, abs_corr_mat + ) + Xt, estimator = self._impute_one_feature( + Xt, + mask_missing_values, + feat_idx, + neighbor_feat_idx, + estimator=None, + fit_mode=True, + ) + estimator_triplet = _ImputerTriplet( + feat_idx, neighbor_feat_idx, estimator + ) + self.imputation_sequence_.append(estimator_triplet) + + if self.verbose > 1: + print( + "[IterativeImputer] Ending imputation round " + "%d/%d, elapsed time %0.2f" + % (self.n_iter_, self.max_iter, time() - start_t) + ) + + if not self.sample_posterior: + inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None) + if self.verbose > 0: + print( + "[IterativeImputer] Change: {}, scaled tolerance: {} ".format( + inf_norm, normalized_tol + ) + ) + if inf_norm < normalized_tol: + if self.verbose > 0: + print("[IterativeImputer] Early stopping criterion reached.") + break + Xt_previous = Xt.copy() + else: + if not self.sample_posterior: + warnings.warn( + "[IterativeImputer] Early stopping criterion not reached.", + ConvergenceWarning, + ) + _assign_where(Xt, X, cond=~mask_missing_values) + + return super()._concatenate_indicator(Xt, X_indicator) + + def transform(self, X): + """Impute all missing values in `X`. + + Note that this is stochastic, and that if `random_state` is not fixed, + repeated calls, or permuted input, results will differ. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data to complete. + + Returns + ------- + Xt : array-like, shape (n_samples, n_features) + The imputed input data. + """ + check_is_fitted(self) + + X, Xt, mask_missing_values, complete_mask = self._initial_imputation( + X, in_fit=False + ) + + X_indicator = super()._transform_indicator(complete_mask) + + if self.n_iter_ == 0 or np.all(mask_missing_values): + return super()._concatenate_indicator(Xt, X_indicator) + + imputations_per_round = len(self.imputation_sequence_) // self.n_iter_ + i_rnd = 0 + if self.verbose > 0: + print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) + start_t = time() + for it, estimator_triplet in enumerate(self.imputation_sequence_): + Xt, _ = self._impute_one_feature( + Xt, + mask_missing_values, + estimator_triplet.feat_idx, + estimator_triplet.neighbor_feat_idx, + estimator=estimator_triplet.estimator, + fit_mode=False, + ) + if not (it + 1) % imputations_per_round: + if self.verbose > 1: + print( + "[IterativeImputer] Ending imputation round " + "%d/%d, elapsed time %0.2f" + % (i_rnd + 1, self.n_iter_, time() - start_t) + ) + i_rnd += 1 + + _assign_where(Xt, X, cond=~mask_missing_values) + + return super()._concatenate_indicator(Xt, X_indicator) + + def fit(self, X, y=None): + """Fit the imputer on `X` and return self. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Fitted estimator. + """ + self.fit_transform(X) + return self + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + names = self.initial_imputer_.get_feature_names_out(input_features) + return self._concatenate_indicator_feature_names_out(names, input_features) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/_knn.py b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/_knn.py new file mode 100644 index 0000000000000000000000000000000000000000..d20530bb67cb05017950a345455734cffd2f1008 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/_knn.py @@ -0,0 +1,401 @@ +# Authors: Ashim Bhattarai +# Thomas J Fan +# License: BSD 3 clause + +from numbers import Integral + +import numpy as np + +from ..base import _fit_context +from ..metrics import pairwise_distances_chunked +from ..metrics.pairwise import _NAN_METRICS +from ..neighbors._base import _get_weights +from ..utils import is_scalar_nan +from ..utils._mask import _get_mask +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.validation import FLOAT_DTYPES, _check_feature_names_in, check_is_fitted +from ._base import _BaseImputer + + +class KNNImputer(_BaseImputer): + """Imputation for completing missing values using k-Nearest Neighbors. + + Each sample's missing values are imputed using the mean value from + `n_neighbors` nearest neighbors found in the training set. Two samples are + close if the features that neither is missing are close. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + missing_values : int, float, str, np.nan or None, default=np.nan + The placeholder for the missing values. All occurrences of + `missing_values` will be imputed. For pandas' dataframes with + nullable integer dtypes with missing values, `missing_values` + should be set to np.nan, since `pd.NA` will be converted to np.nan. + + n_neighbors : int, default=5 + Number of neighboring samples to use for imputation. + + weights : {'uniform', 'distance'} or callable, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood are + weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - callable : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + metric : {'nan_euclidean'} or callable, default='nan_euclidean' + Distance metric for searching neighbors. Possible values: + + - 'nan_euclidean' + - callable : a user-defined function which conforms to the definition + of ``_pairwise_callable(X, Y, metric, **kwds)``. The function + accepts two arrays, X and Y, and a `missing_values` keyword in + `kwds` and returns a scalar distance value. + + copy : bool, default=True + If True, a copy of X will be created. If False, imputation will + be done in-place whenever possible. + + add_indicator : bool, default=False + If True, a :class:`MissingIndicator` transform will stack onto the + output of the imputer's transform. This allows a predictive estimator + to account for missingness despite imputation. If a feature has no + missing values at fit/train time, the feature won't appear on the + missing indicator even if there are missing values at transform/test + time. + + keep_empty_features : bool, default=False + If True, features that consist exclusively of missing values when + `fit` is called are returned in results when `transform` is called. + The imputed value is always `0`. + + .. versionadded:: 1.2 + + Attributes + ---------- + indicator_ : :class:`~sklearn.impute.MissingIndicator` + Indicator used to add binary indicators for missing values. + ``None`` if add_indicator is False. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SimpleImputer : Univariate imputer for completing missing values + with simple strategies. + IterativeImputer : Multivariate imputer that estimates values to impute for + each feature with missing values from all the others. + + References + ---------- + * `Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor + Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing + value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17 + no. 6, 2001 Pages 520-525. + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.impute import KNNImputer + >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]] + >>> imputer = KNNImputer(n_neighbors=2) + >>> imputer.fit_transform(X) + array([[1. , 2. , 4. ], + [3. , 4. , 3. ], + [5.5, 6. , 5. ], + [8. , 8. , 7. ]]) + + For a more detailed example see + :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`. + """ + + _parameter_constraints: dict = { + **_BaseImputer._parameter_constraints, + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "weights": [StrOptions({"uniform", "distance"}), callable, Hidden(None)], + "metric": [StrOptions(set(_NAN_METRICS)), callable], + "copy": ["boolean"], + } + + def __init__( + self, + *, + missing_values=np.nan, + n_neighbors=5, + weights="uniform", + metric="nan_euclidean", + copy=True, + add_indicator=False, + keep_empty_features=False, + ): + super().__init__( + missing_values=missing_values, + add_indicator=add_indicator, + keep_empty_features=keep_empty_features, + ) + self.n_neighbors = n_neighbors + self.weights = weights + self.metric = metric + self.copy = copy + + def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col): + """Helper function to impute a single column. + + Parameters + ---------- + dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors) + Distance matrix between the receivers and potential donors from + training set. There must be at least one non-nan distance between + a receiver and a potential donor. + + n_neighbors : int + Number of neighbors to consider. + + fit_X_col : ndarray of shape (n_potential_donors,) + Column of potential donors from training set. + + mask_fit_X_col : ndarray of shape (n_potential_donors,) + Missing mask for fit_X_col. + + Returns + ------- + imputed_values: ndarray of shape (n_receivers,) + Imputed values for receiver. + """ + # Get donors + donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[ + :, :n_neighbors + ] + + # Get weight matrix from distance matrix + donors_dist = dist_pot_donors[ + np.arange(donors_idx.shape[0])[:, None], donors_idx + ] + + weight_matrix = _get_weights(donors_dist, self.weights) + + # fill nans with zeros + if weight_matrix is not None: + weight_matrix[np.isnan(weight_matrix)] = 0.0 + + # Retrieve donor values and calculate kNN average + donors = fit_X_col.take(donors_idx) + donors_mask = mask_fit_X_col.take(donors_idx) + donors = np.ma.array(donors, mask=donors_mask) + + return np.ma.average(donors, axis=1, weights=weight_matrix).data + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the imputer on X. + + Parameters + ---------- + X : array-like shape of (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + The fitted `KNNImputer` class instance. + """ + # Check data integrity and calling arguments + if not is_scalar_nan(self.missing_values): + force_all_finite = True + else: + force_all_finite = "allow-nan" + + X = self._validate_data( + X, + accept_sparse=False, + dtype=FLOAT_DTYPES, + force_all_finite=force_all_finite, + copy=self.copy, + ) + + self._fit_X = X + self._mask_fit_X = _get_mask(self._fit_X, self.missing_values) + self._valid_mask = ~np.all(self._mask_fit_X, axis=0) + + super()._fit_indicator(self._mask_fit_X) + + return self + + def transform(self, X): + """Impute all missing values in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data to complete. + + Returns + ------- + X : array-like of shape (n_samples, n_output_features) + The imputed dataset. `n_output_features` is the number of features + that is not always missing during `fit`. + """ + + check_is_fitted(self) + if not is_scalar_nan(self.missing_values): + force_all_finite = True + else: + force_all_finite = "allow-nan" + X = self._validate_data( + X, + accept_sparse=False, + dtype=FLOAT_DTYPES, + force_all_finite=force_all_finite, + copy=self.copy, + reset=False, + ) + + mask = _get_mask(X, self.missing_values) + mask_fit_X = self._mask_fit_X + valid_mask = self._valid_mask + + X_indicator = super()._transform_indicator(mask) + + # Removes columns where the training data is all nan + if not np.any(mask): + # No missing values in X + if self.keep_empty_features: + Xc = X + Xc[:, ~valid_mask] = 0 + else: + Xc = X[:, valid_mask] + + # Even if there are no missing values in X, we still concatenate Xc + # with the missing value indicator matrix, X_indicator. + # This is to ensure that the output maintains consistency in terms + # of columns, regardless of whether missing values exist in X or not. + return super()._concatenate_indicator(Xc, X_indicator) + + row_missing_idx = np.flatnonzero(mask.any(axis=1)) + + non_missing_fix_X = np.logical_not(mask_fit_X) + + # Maps from indices from X to indices in dist matrix + dist_idx_map = np.zeros(X.shape[0], dtype=int) + dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0]) + + def process_chunk(dist_chunk, start): + row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)] + + # Find and impute missing by column + for col in range(X.shape[1]): + if not valid_mask[col]: + # column was all missing during training + continue + + col_mask = mask[row_missing_chunk, col] + if not np.any(col_mask): + # column has no missing values + continue + + (potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col]) + + # receivers_idx are indices in X + receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)] + + # distances for samples that needed imputation for column + dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][ + :, potential_donors_idx + ] + + # receivers with all nan distances impute with mean + all_nan_dist_mask = np.isnan(dist_subset).all(axis=1) + all_nan_receivers_idx = receivers_idx[all_nan_dist_mask] + + if all_nan_receivers_idx.size: + col_mean = np.ma.array( + self._fit_X[:, col], mask=mask_fit_X[:, col] + ).mean() + X[all_nan_receivers_idx, col] = col_mean + + if len(all_nan_receivers_idx) == len(receivers_idx): + # all receivers imputed with mean + continue + + # receivers with at least one defined distance + receivers_idx = receivers_idx[~all_nan_dist_mask] + dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][ + :, potential_donors_idx + ] + + n_neighbors = min(self.n_neighbors, len(potential_donors_idx)) + value = self._calc_impute( + dist_subset, + n_neighbors, + self._fit_X[potential_donors_idx, col], + mask_fit_X[potential_donors_idx, col], + ) + X[receivers_idx, col] = value + + # process in fixed-memory chunks + gen = pairwise_distances_chunked( + X[row_missing_idx, :], + self._fit_X, + metric=self.metric, + missing_values=self.missing_values, + force_all_finite=force_all_finite, + reduce_func=process_chunk, + ) + for chunk in gen: + # process_chunk modifies X in place. No return value. + pass + + if self.keep_empty_features: + Xc = X + Xc[:, ~valid_mask] = 0 + else: + Xc = X[:, valid_mask] + + return super()._concatenate_indicator(Xc, X_indicator) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + names = input_features[self._valid_mask] + return self._concatenate_indicator_feature_names_out(names, input_features) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3132dbc8d6dd2d357362beabd4629d28fdaad0ca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b75b016d6cb25420280e461bedafd350f4c550fd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a495f2bfd09ea6f475046f4a28d2a1932aa0f472 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7a92864c1335348f9339db5d367cb0bc22d7c67 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1a025e03cc2c17e1d8c361081c36a454045ab21 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..0c1bd83f7ca9ea8adde76940e2f7fdd86d89ea5c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py @@ -0,0 +1,107 @@ +import numpy as np +import pytest + +from sklearn.impute._base import _BaseImputer +from sklearn.impute._iterative import _assign_where +from sklearn.utils._mask import _get_mask +from sklearn.utils._testing import _convert_container, assert_allclose + + +@pytest.fixture +def data(): + X = np.random.randn(10, 2) + X[::2] = np.nan + return X + + +class NoFitIndicatorImputer(_BaseImputer): + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + return self._concatenate_indicator(X, self._transform_indicator(X)) + + +class NoTransformIndicatorImputer(_BaseImputer): + def fit(self, X, y=None): + mask = _get_mask(X, value_to_mask=np.nan) + super()._fit_indicator(mask) + return self + + def transform(self, X, y=None): + return self._concatenate_indicator(X, None) + + +class NoPrecomputedMaskFit(_BaseImputer): + def fit(self, X, y=None): + self._fit_indicator(X) + return self + + def transform(self, X): + return self._concatenate_indicator(X, self._transform_indicator(X)) + + +class NoPrecomputedMaskTransform(_BaseImputer): + def fit(self, X, y=None): + mask = _get_mask(X, value_to_mask=np.nan) + self._fit_indicator(mask) + return self + + def transform(self, X): + return self._concatenate_indicator(X, self._transform_indicator(X)) + + +def test_base_imputer_not_fit(data): + imputer = NoFitIndicatorImputer(add_indicator=True) + err_msg = "Make sure to call _fit_indicator before _transform_indicator" + with pytest.raises(ValueError, match=err_msg): + imputer.fit(data).transform(data) + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(data) + + +def test_base_imputer_not_transform(data): + imputer = NoTransformIndicatorImputer(add_indicator=True) + err_msg = ( + "Call _fit_indicator and _transform_indicator in the imputer implementation" + ) + with pytest.raises(ValueError, match=err_msg): + imputer.fit(data).transform(data) + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(data) + + +def test_base_no_precomputed_mask_fit(data): + imputer = NoPrecomputedMaskFit(add_indicator=True) + err_msg = "precomputed is True but the input data is not a mask" + with pytest.raises(ValueError, match=err_msg): + imputer.fit(data) + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(data) + + +def test_base_no_precomputed_mask_transform(data): + imputer = NoPrecomputedMaskTransform(add_indicator=True) + err_msg = "precomputed is True but the input data is not a mask" + imputer.fit(data) + with pytest.raises(ValueError, match=err_msg): + imputer.transform(data) + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(data) + + +@pytest.mark.parametrize("X1_type", ["array", "dataframe"]) +def test_assign_where(X1_type): + """Check the behaviour of the private helpers `_assign_where`.""" + rng = np.random.RandomState(0) + + n_samples, n_features = 10, 5 + X1 = _convert_container(rng.randn(n_samples, n_features), constructor_name=X1_type) + X2 = rng.randn(n_samples, n_features) + mask = rng.randint(0, 2, size=(n_samples, n_features)).astype(bool) + + _assign_where(X1, X2, mask) + + if X1_type == "dataframe": + X1 = X1.to_numpy() + assert_allclose(X1[mask], X2[mask]) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..4d41b44fb0252666952c70caed372e1f2a048bf0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py @@ -0,0 +1,220 @@ +import numpy as np +import pytest + +from sklearn.experimental import enable_iterative_imputer # noqa +from sklearn.impute import IterativeImputer, KNNImputer, SimpleImputer +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def imputers(): + return [IterativeImputer(tol=0.1), KNNImputer(), SimpleImputer()] + + +def sparse_imputers(): + return [SimpleImputer()] + + +# ConvergenceWarning will be raised by the IterativeImputer +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +def test_imputation_missing_value_in_test_array(imputer): + # [Non Regression Test for issue #13968] Missing value in test set should + # not throw an error and return a finite dataset + train = [[1], [2]] + test = [[3], [np.nan]] + imputer.set_params(add_indicator=True) + imputer.fit(train).transform(test) + + +# ConvergenceWarning will be raised by the IterativeImputer +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize("marker", [np.nan, -1, 0]) +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +def test_imputers_add_indicator(marker, imputer): + X = np.array( + [ + [marker, 1, 5, marker, 1], + [2, marker, 1, marker, 2], + [6, 3, marker, marker, 3], + [1, 2, 9, marker, 4], + ] + ) + X_true_indicator = np.array( + [ + [1.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) + imputer.set_params(missing_values=marker, add_indicator=True) + + X_trans = imputer.fit_transform(X) + assert_allclose(X_trans[:, -4:], X_true_indicator) + assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) + + imputer.set_params(add_indicator=False) + X_trans_no_indicator = imputer.fit_transform(X) + assert_allclose(X_trans[:, :-4], X_trans_no_indicator) + + +# ConvergenceWarning will be raised by the IterativeImputer +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize("marker", [np.nan, -1]) +@pytest.mark.parametrize( + "imputer", sparse_imputers(), ids=lambda x: x.__class__.__name__ +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_imputers_add_indicator_sparse(imputer, marker, csr_container): + X = csr_container( + [ + [marker, 1, 5, marker, 1], + [2, marker, 1, marker, 2], + [6, 3, marker, marker, 3], + [1, 2, 9, marker, 4], + ] + ) + X_true_indicator = csr_container( + [ + [1.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) + imputer.set_params(missing_values=marker, add_indicator=True) + + X_trans = imputer.fit_transform(X) + assert_allclose_dense_sparse(X_trans[:, -4:], X_true_indicator) + assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) + + imputer.set_params(add_indicator=False) + X_trans_no_indicator = imputer.fit_transform(X) + assert_allclose_dense_sparse(X_trans[:, :-4], X_trans_no_indicator) + + +# ConvergenceWarning will be raised by the IterativeImputer +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +@pytest.mark.parametrize("add_indicator", [True, False]) +def test_imputers_pandas_na_integer_array_support(imputer, add_indicator): + # Test pandas IntegerArray with pd.NA + pd = pytest.importorskip("pandas") + marker = np.nan + imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker) + + X = np.array( + [ + [marker, 1, 5, marker, 1], + [2, marker, 1, marker, 2], + [6, 3, marker, marker, 3], + [1, 2, 9, marker, 4], + ] + ) + # fit on numpy array + X_trans_expected = imputer.fit_transform(X) + + # Creates dataframe with IntegerArrays with pd.NA + X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c", "d", "e"]) + + # fit on pandas dataframe with IntegerArrays + X_trans = imputer.fit_transform(X_df) + + assert_allclose(X_trans_expected, X_trans) + + +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +@pytest.mark.parametrize("add_indicator", [True, False]) +def test_imputers_feature_names_out_pandas(imputer, add_indicator): + """Check feature names out for imputers.""" + pd = pytest.importorskip("pandas") + marker = np.nan + imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker) + + X = np.array( + [ + [marker, 1, 5, 3, marker, 1], + [2, marker, 1, 4, marker, 2], + [6, 3, 7, marker, marker, 3], + [1, 2, 9, 8, marker, 4], + ] + ) + X_df = pd.DataFrame(X, columns=["a", "b", "c", "d", "e", "f"]) + imputer.fit(X_df) + + names = imputer.get_feature_names_out() + + if add_indicator: + expected_names = [ + "a", + "b", + "c", + "d", + "f", + "missingindicator_a", + "missingindicator_b", + "missingindicator_d", + "missingindicator_e", + ] + assert_array_equal(expected_names, names) + else: + expected_names = ["a", "b", "c", "d", "f"] + assert_array_equal(expected_names, names) + + +@pytest.mark.parametrize("keep_empty_features", [True, False]) +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +def test_keep_empty_features(imputer, keep_empty_features): + """Check that the imputer keeps features with only missing values.""" + X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]]) + imputer = imputer.set_params( + add_indicator=False, keep_empty_features=keep_empty_features + ) + + for method in ["fit_transform", "transform"]: + X_imputed = getattr(imputer, method)(X) + if keep_empty_features: + assert X_imputed.shape == X.shape + else: + assert X_imputed.shape == (X.shape[0], X.shape[1] - 1) + + +@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) +@pytest.mark.parametrize("missing_value_test", [np.nan, 1]) +def test_imputation_adds_missing_indicator_if_add_indicator_is_true( + imputer, missing_value_test +): + """Check that missing indicator always exists when add_indicator=True. + + Non-regression test for gh-26590. + """ + X_train = np.array([[0, np.nan], [1, 2]]) + + # Test data where missing_value_test variable can be set to np.nan or 1. + X_test = np.array([[0, missing_value_test], [1, 2]]) + + imputer.set_params(add_indicator=True) + imputer.fit(X_train) + + X_test_imputed_with_indicator = imputer.transform(X_test) + assert X_test_imputed_with_indicator.shape == (2, 3) + + imputer.set_params(add_indicator=False) + imputer.fit(X_train) + X_test_imputed_without_indicator = imputer.transform(X_test) + assert X_test_imputed_without_indicator.shape == (2, 2) + + assert_allclose( + X_test_imputed_with_indicator[:, :-1], X_test_imputed_without_indicator + ) + if np.isnan(missing_value_test): + expected_missing_indicator = [1, 0] + else: + expected_missing_indicator = [0, 0] + + assert_allclose(X_test_imputed_with_indicator[:, -1], expected_missing_indicator) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py new file mode 100644 index 0000000000000000000000000000000000000000..9322536ebcf473f1a031da965b5b613b08d4f281 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py @@ -0,0 +1,1754 @@ +import io +import re +import warnings +from itertools import product + +import numpy as np +import pytest +from scipy import sparse +from scipy.stats import kstest + +from sklearn import tree +from sklearn.datasets import load_diabetes +from sklearn.dummy import DummyRegressor +from sklearn.exceptions import ConvergenceWarning + +# make IterativeImputer available +from sklearn.experimental import enable_iterative_imputer # noqa +from sklearn.impute import IterativeImputer, KNNImputer, MissingIndicator, SimpleImputer +from sklearn.impute._base import _most_frequent +from sklearn.linear_model import ARDRegression, BayesianRidge, RidgeCV +from sklearn.model_selection import GridSearchCV +from sklearn.pipeline import Pipeline, make_union +from sklearn.random_projection import _sparse_random_matrix +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_allclose_dense_sparse, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import ( + BSR_CONTAINERS, + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + LIL_CONTAINERS, +) + + +def _assert_array_equal_and_same_dtype(x, y): + assert_array_equal(x, y) + assert x.dtype == y.dtype + + +def _assert_allclose_and_same_dtype(x, y): + assert_allclose(x, y) + assert x.dtype == y.dtype + + +def _check_statistics( + X, X_true, strategy, statistics, missing_values, sparse_container +): + """Utility function for testing imputation for a given strategy. + + Test with dense and sparse arrays + + Check that: + - the statistics (mean, median, mode) are correct + - the missing values are imputed correctly""" + + err_msg = "Parameters: strategy = %s, missing_values = %s, sparse = {0}" % ( + strategy, + missing_values, + ) + + assert_ae = assert_array_equal + + if X.dtype.kind == "f" or X_true.dtype.kind == "f": + assert_ae = assert_array_almost_equal + + # Normal matrix + imputer = SimpleImputer(missing_values=missing_values, strategy=strategy) + X_trans = imputer.fit(X).transform(X.copy()) + assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False)) + assert_ae(X_trans, X_true, err_msg=err_msg.format(False)) + + # Sparse matrix + imputer = SimpleImputer(missing_values=missing_values, strategy=strategy) + imputer.fit(sparse_container(X)) + X_trans = imputer.transform(sparse_container(X.copy())) + + if sparse.issparse(X_trans): + X_trans = X_trans.toarray() + + assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True)) + assert_ae(X_trans, X_true, err_msg=err_msg.format(True)) + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_imputation_shape(strategy, csr_container): + # Verify the shapes of the imputed matrix for different strategies. + X = np.random.randn(10, 2) + X[::2] = np.nan + + imputer = SimpleImputer(strategy=strategy) + X_imputed = imputer.fit_transform(csr_container(X)) + assert X_imputed.shape == (10, 2) + X_imputed = imputer.fit_transform(X) + assert X_imputed.shape == (10, 2) + + iterative_imputer = IterativeImputer(initial_strategy=strategy) + X_imputed = iterative_imputer.fit_transform(X) + assert X_imputed.shape == (10, 2) + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) +def test_imputation_deletion_warning(strategy): + X = np.ones((3, 5)) + X[:, 0] = np.nan + imputer = SimpleImputer(strategy=strategy).fit(X) + + with pytest.warns(UserWarning, match="Skipping"): + imputer.transform(X) + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) +def test_imputation_deletion_warning_feature_names(strategy): + pd = pytest.importorskip("pandas") + + missing_values = np.nan + feature_names = np.array(["a", "b", "c", "d"], dtype=object) + X = pd.DataFrame( + [ + [missing_values, missing_values, 1, missing_values], + [4, missing_values, 2, 10], + ], + columns=feature_names, + ) + + imputer = SimpleImputer(strategy=strategy).fit(X) + + # check SimpleImputer returning feature name attribute correctly + assert_array_equal(imputer.feature_names_in_, feature_names) + + # ensure that skipped feature warning includes feature name + with pytest.warns( + UserWarning, match=r"Skipping features without any observed values: \['b'\]" + ): + imputer.transform(X) + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_imputation_error_sparse_0(strategy, csc_container): + # check that error are raised when missing_values = 0 and input is sparse + X = np.ones((3, 5)) + X[0] = 0 + X = csc_container(X) + + imputer = SimpleImputer(strategy=strategy, missing_values=0) + with pytest.raises(ValueError, match="Provide a dense array"): + imputer.fit(X) + + imputer.fit(X.toarray()) + with pytest.raises(ValueError, match="Provide a dense array"): + imputer.transform(X) + + +def safe_median(arr, *args, **kwargs): + # np.median([]) raises a TypeError for numpy >= 1.10.1 + length = arr.size if hasattr(arr, "size") else len(arr) + return np.nan if length == 0 else np.median(arr, *args, **kwargs) + + +def safe_mean(arr, *args, **kwargs): + # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 + length = arr.size if hasattr(arr, "size") else len(arr) + return np.nan if length == 0 else np.mean(arr, *args, **kwargs) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_imputation_mean_median(csc_container): + # Test imputation using the mean and median strategies, when + # missing_values != 0. + rng = np.random.RandomState(0) + + dim = 10 + dec = 10 + shape = (dim * dim, dim + dec) + + zeros = np.zeros(shape[0]) + values = np.arange(1, shape[0] + 1) + values[4::2] = -values[4::2] + + tests = [ + ("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))), + ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v)))), + ] + + for strategy, test_missing_values, true_value_fun in tests: + X = np.empty(shape) + X_true = np.empty(shape) + true_statistics = np.empty(shape[1]) + + # Create a matrix X with columns + # - with only zeros, + # - with only missing values + # - with zeros, missing values and values + # And a matrix X_true containing all true values + for j in range(shape[1]): + nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) + nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) + nb_values = shape[0] - nb_zeros - nb_missing_values + + z = zeros[:nb_zeros] + p = np.repeat(test_missing_values, nb_missing_values) + v = values[rng.permutation(len(values))[:nb_values]] + + true_statistics[j] = true_value_fun(z, v, p) + + # Create the columns + X[:, j] = np.hstack((v, z, p)) + + if 0 == test_missing_values: + # XXX unreached code as of v0.22 + X_true[:, j] = np.hstack( + (v, np.repeat(true_statistics[j], nb_missing_values + nb_zeros)) + ) + else: + X_true[:, j] = np.hstack( + (v, z, np.repeat(true_statistics[j], nb_missing_values)) + ) + + # Shuffle them the same way + np.random.RandomState(j).shuffle(X[:, j]) + np.random.RandomState(j).shuffle(X_true[:, j]) + + # Mean doesn't support columns containing NaNs, median does + if strategy == "median": + cols_to_keep = ~np.isnan(X_true).any(axis=0) + else: + cols_to_keep = ~np.isnan(X_true).all(axis=0) + + X_true = X_true[:, cols_to_keep] + + _check_statistics( + X, X_true, strategy, true_statistics, test_missing_values, csc_container + ) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_imputation_median_special_cases(csc_container): + # Test median imputation with sparse boundary cases + X = np.array( + [ + [0, np.nan, np.nan], # odd: implicit zero + [5, np.nan, np.nan], # odd: explicit nonzero + [0, 0, np.nan], # even: average two zeros + [-5, 0, np.nan], # even: avg zero and neg + [0, 5, np.nan], # even: avg zero and pos + [4, 5, np.nan], # even: avg nonzeros + [-4, -5, np.nan], # even: avg negatives + [-1, 2, np.nan], # even: crossing neg and pos + ] + ).transpose() + + X_imputed_median = np.array( + [ + [0, 0, 0], + [5, 5, 5], + [0, 0, 0], + [-5, 0, -2.5], + [0, 5, 2.5], + [4, 5, 4.5], + [-4, -5, -4.5], + [-1, 2, 0.5], + ] + ).transpose() + statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, 0.5] + + _check_statistics( + X, X_imputed_median, "median", statistics_median, np.nan, csc_container + ) + + +@pytest.mark.parametrize("strategy", ["mean", "median"]) +@pytest.mark.parametrize("dtype", [None, object, str]) +def test_imputation_mean_median_error_invalid_type(strategy, dtype): + X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype) + msg = "non-numeric data:\ncould not convert string to float:" + with pytest.raises(ValueError, match=msg): + imputer = SimpleImputer(strategy=strategy) + imputer.fit_transform(X) + + +@pytest.mark.parametrize("strategy", ["mean", "median"]) +@pytest.mark.parametrize("type", ["list", "dataframe"]) +def test_imputation_mean_median_error_invalid_type_list_pandas(strategy, type): + X = [["a", "b", 3], [4, "e", 6], ["g", "h", 9]] + if type == "dataframe": + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X) + msg = "non-numeric data:\ncould not convert string to float:" + with pytest.raises(ValueError, match=msg): + imputer = SimpleImputer(strategy=strategy) + imputer.fit_transform(X) + + +@pytest.mark.parametrize("strategy", ["constant", "most_frequent"]) +@pytest.mark.parametrize("dtype", [str, np.dtype("U"), np.dtype("S")]) +def test_imputation_const_mostf_error_invalid_types(strategy, dtype): + # Test imputation on non-numeric data using "most_frequent" and "constant" + # strategy + X = np.array( + [ + [np.nan, np.nan, "a", "f"], + [np.nan, "c", np.nan, "d"], + [np.nan, "b", "d", np.nan], + [np.nan, "c", "d", "h"], + ], + dtype=dtype, + ) + + err_msg = "SimpleImputer does not support data" + with pytest.raises(ValueError, match=err_msg): + imputer = SimpleImputer(strategy=strategy) + imputer.fit(X).transform(X) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_imputation_most_frequent(csc_container): + # Test imputation using the most-frequent strategy. + X = np.array( + [ + [-1, -1, 0, 5], + [-1, 2, -1, 3], + [-1, 1, 3, -1], + [-1, 2, 3, 7], + ] + ) + + X_true = np.array( + [ + [2, 0, 5], + [2, 3, 3], + [1, 3, 3], + [2, 3, 7], + ] + ) + + # scipy.stats.mode, used in SimpleImputer, doesn't return the first most + # frequent as promised in the doc but the lowest most frequent. When this + # test will fail after an update of scipy, SimpleImputer will need to be + # updated to be consistent with the new (correct) behaviour + _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1, csc_container) + + +@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) +def test_imputation_most_frequent_objects(marker): + # Test imputation using the most-frequent strategy. + X = np.array( + [ + [marker, marker, "a", "f"], + [marker, "c", marker, "d"], + [marker, "b", "d", marker], + [marker, "c", "d", "h"], + ], + dtype=object, + ) + + X_true = np.array( + [ + ["c", "a", "f"], + ["c", "d", "d"], + ["b", "d", "d"], + ["c", "d", "h"], + ], + dtype=object, + ) + + imputer = SimpleImputer(missing_values=marker, strategy="most_frequent") + X_trans = imputer.fit(X).transform(X) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("dtype", [object, "category"]) +def test_imputation_most_frequent_pandas(dtype): + # Test imputation using the most frequent strategy on pandas df + pd = pytest.importorskip("pandas") + + f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,") + + df = pd.read_csv(f, dtype=dtype) + + X_true = np.array( + [["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"]], + dtype=object, + ) + + imputer = SimpleImputer(strategy="most_frequent") + X_trans = imputer.fit_transform(df) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1.0, np.nan)]) +def test_imputation_constant_error_invalid_type(X_data, missing_value): + # Verify that exceptions are raised on invalid fill_value type + X = np.full((3, 5), X_data, dtype=float) + X[0, 0] = missing_value + + fill_value = "x" + err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast" + with pytest.raises(ValueError, match=re.escape(err_msg)): + imputer = SimpleImputer( + missing_values=missing_value, strategy="constant", fill_value=fill_value + ) + imputer.fit_transform(X) + + +def test_imputation_constant_integer(): + # Test imputation using the constant strategy on integers + X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]]) + + X_true = np.array([[0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0]]) + + imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0) + X_trans = imputer.fit_transform(X) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("array_constructor", CSR_CONTAINERS + [np.asarray]) +def test_imputation_constant_float(array_constructor): + # Test imputation using the constant strategy on floats + X = np.array( + [ + [np.nan, 1.1, 0, np.nan], + [1.2, np.nan, 1.3, np.nan], + [0, 0, np.nan, np.nan], + [1.4, 1.5, 0, np.nan], + ] + ) + + X_true = np.array( + [[-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1]] + ) + + X = array_constructor(X) + + X_true = array_constructor(X_true) + + imputer = SimpleImputer(strategy="constant", fill_value=-1) + X_trans = imputer.fit_transform(X) + + assert_allclose_dense_sparse(X_trans, X_true) + + +@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) +def test_imputation_constant_object(marker): + # Test imputation using the constant strategy on objects + X = np.array( + [ + [marker, "a", "b", marker], + ["c", marker, "d", marker], + ["e", "f", marker, marker], + ["g", "h", "i", marker], + ], + dtype=object, + ) + + X_true = np.array( + [ + ["missing", "a", "b", "missing"], + ["c", "missing", "d", "missing"], + ["e", "f", "missing", "missing"], + ["g", "h", "i", "missing"], + ], + dtype=object, + ) + + imputer = SimpleImputer( + missing_values=marker, strategy="constant", fill_value="missing" + ) + X_trans = imputer.fit_transform(X) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("dtype", [object, "category"]) +def test_imputation_constant_pandas(dtype): + # Test imputation using the constant strategy on pandas df + pd = pytest.importorskip("pandas") + + f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,") + + df = pd.read_csv(f, dtype=dtype) + + X_true = np.array( + [ + ["missing_value", "i", "x", "missing_value"], + ["a", "missing_value", "y", "missing_value"], + ["a", "j", "missing_value", "missing_value"], + ["b", "j", "x", "missing_value"], + ], + dtype=object, + ) + + imputer = SimpleImputer(strategy="constant") + X_trans = imputer.fit_transform(df) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize("X", [[[1], [2]], [[1], [np.nan]]]) +def test_iterative_imputer_one_feature(X): + # check we exit early when there is a single feature + imputer = IterativeImputer().fit(X) + assert imputer.n_iter_ == 0 + imputer = IterativeImputer() + imputer.fit([[1], [2]]) + assert imputer.n_iter_ == 0 + imputer.fit([[1], [np.nan]]) + assert imputer.n_iter_ == 0 + + +def test_imputation_pipeline_grid_search(): + # Test imputation within a pipeline + gridsearch. + X = _sparse_random_matrix(100, 100, density=0.10) + missing_values = X.data[0] + + pipeline = Pipeline( + [ + ("imputer", SimpleImputer(missing_values=missing_values)), + ("tree", tree.DecisionTreeRegressor(random_state=0)), + ] + ) + + parameters = {"imputer__strategy": ["mean", "median", "most_frequent"]} + + Y = _sparse_random_matrix(100, 1, density=0.10).toarray() + gs = GridSearchCV(pipeline, parameters) + gs.fit(X, Y) + + +def test_imputation_copy(): + # Test imputation with copy + X_orig = _sparse_random_matrix(5, 5, density=0.75, random_state=0) + + # copy=True, dense => copy + X = X_orig.copy().toarray() + imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True) + Xt = imputer.fit(X).transform(X) + Xt[0, 0] = -1 + assert not np.all(X == Xt) + + # copy=True, sparse csr => copy + X = X_orig.copy() + imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True) + Xt = imputer.fit(X).transform(X) + Xt.data[0] = -1 + assert not np.all(X.data == Xt.data) + + # copy=False, dense => no copy + X = X_orig.copy().toarray() + imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False) + Xt = imputer.fit(X).transform(X) + Xt[0, 0] = -1 + assert_array_almost_equal(X, Xt) + + # copy=False, sparse csc => no copy + X = X_orig.copy().tocsc() + imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) + Xt = imputer.fit(X).transform(X) + Xt.data[0] = -1 + assert_array_almost_equal(X.data, Xt.data) + + # copy=False, sparse csr => copy + X = X_orig.copy() + imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) + Xt = imputer.fit(X).transform(X) + Xt.data[0] = -1 + assert not np.all(X.data == Xt.data) + + # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is + # made, even if copy=False. + + +def test_iterative_imputer_zero_iters(): + rng = np.random.RandomState(0) + + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + missing_flag = X == 0 + X[missing_flag] = np.nan + + imputer = IterativeImputer(max_iter=0) + X_imputed = imputer.fit_transform(X) + # with max_iter=0, only initial imputation is performed + assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) + + # repeat but force n_iter_ to 0 + imputer = IterativeImputer(max_iter=5).fit(X) + # transformed should not be equal to initial imputation + assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X)) + + imputer.n_iter_ = 0 + # now they should be equal as only initial imputation is done + assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X)) + + +def test_iterative_imputer_verbose(): + rng = np.random.RandomState(0) + + n = 100 + d = 3 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1) + imputer.fit(X) + imputer.transform(X) + imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2) + imputer.fit(X) + imputer.transform(X) + + +def test_iterative_imputer_all_missing(): + n = 100 + d = 3 + X = np.zeros((n, d)) + imputer = IterativeImputer(missing_values=0, max_iter=1) + X_imputed = imputer.fit_transform(X) + assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) + + +@pytest.mark.parametrize( + "imputation_order", ["random", "roman", "ascending", "descending", "arabic"] +) +def test_iterative_imputer_imputation_order(imputation_order): + rng = np.random.RandomState(0) + n = 100 + d = 10 + max_iter = 2 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + X[:, 0] = 1 # this column should not be discarded by IterativeImputer + + imputer = IterativeImputer( + missing_values=0, + max_iter=max_iter, + n_nearest_features=5, + sample_posterior=False, + skip_complete=True, + min_value=0, + max_value=1, + verbose=1, + imputation_order=imputation_order, + random_state=rng, + ) + imputer.fit_transform(X) + ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_] + + assert len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_ + + if imputation_order == "roman": + assert np.all(ordered_idx[: d - 1] == np.arange(1, d)) + elif imputation_order == "arabic": + assert np.all(ordered_idx[: d - 1] == np.arange(d - 1, 0, -1)) + elif imputation_order == "random": + ordered_idx_round_1 = ordered_idx[: d - 1] + ordered_idx_round_2 = ordered_idx[d - 1 :] + assert ordered_idx_round_1 != ordered_idx_round_2 + elif "ending" in imputation_order: + assert len(ordered_idx) == max_iter * (d - 1) + + +@pytest.mark.parametrize( + "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()] +) +def test_iterative_imputer_estimators(estimator): + rng = np.random.RandomState(0) + + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + + imputer = IterativeImputer( + missing_values=0, max_iter=1, estimator=estimator, random_state=rng + ) + imputer.fit_transform(X) + + # check that types are correct for estimators + hashes = [] + for triplet in imputer.imputation_sequence_: + expected_type = ( + type(estimator) if estimator is not None else type(BayesianRidge()) + ) + assert isinstance(triplet.estimator, expected_type) + hashes.append(id(triplet.estimator)) + + # check that each estimator is unique + assert len(set(hashes)) == len(hashes) + + +def test_iterative_imputer_clip(): + rng = np.random.RandomState(0) + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + + imputer = IterativeImputer( + missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng + ) + + Xt = imputer.fit_transform(X) + assert_allclose(np.min(Xt[X == 0]), 0.1) + assert_allclose(np.max(Xt[X == 0]), 0.2) + assert_allclose(Xt[X != 0], X[X != 0]) + + +def test_iterative_imputer_clip_truncnorm(): + rng = np.random.RandomState(0) + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() + X[:, 0] = 1 + + imputer = IterativeImputer( + missing_values=0, + max_iter=2, + n_nearest_features=5, + sample_posterior=True, + min_value=0.1, + max_value=0.2, + verbose=1, + imputation_order="random", + random_state=rng, + ) + Xt = imputer.fit_transform(X) + assert_allclose(np.min(Xt[X == 0]), 0.1) + assert_allclose(np.max(Xt[X == 0]), 0.2) + assert_allclose(Xt[X != 0], X[X != 0]) + + +def test_iterative_imputer_truncated_normal_posterior(): + # test that the values that are imputed using `sample_posterior=True` + # with boundaries (`min_value` and `max_value` are not None) are drawn + # from a distribution that looks gaussian via the Kolmogorov Smirnov test. + # note that starting from the wrong random seed will make this test fail + # because random sampling doesn't occur at all when the imputation + # is outside of the (min_value, max_value) range + rng = np.random.RandomState(42) + + X = rng.normal(size=(5, 5)) + X[0][0] = np.nan + + imputer = IterativeImputer( + min_value=0, max_value=0.5, sample_posterior=True, random_state=rng + ) + + imputer.fit_transform(X) + # generate multiple imputations for the single missing value + imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)]) + + assert all(imputations >= 0) + assert all(imputations <= 0.5) + + mu, sigma = imputations.mean(), imputations.std() + ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm") + if sigma == 0: + sigma += 1e-12 + ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm") + # we want to fail to reject null hypothesis + # null hypothesis: distributions are the same + assert ks_statistic < 0.2 or p_value > 0.1, "The posterior does appear to be normal" + + +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) +def test_iterative_imputer_missing_at_transform(strategy): + rng = np.random.RandomState(0) + n = 100 + d = 10 + X_train = rng.randint(low=0, high=3, size=(n, d)) + X_test = rng.randint(low=0, high=3, size=(n, d)) + + X_train[:, 0] = 1 # definitely no missing values in 0th column + X_test[0, 0] = 0 # definitely missing value in 0th column + + imputer = IterativeImputer( + missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng + ).fit(X_train) + initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train) + + # if there were no missing values at time of fit, then imputer will + # only use the initial imputer for that feature at transform + assert_allclose( + imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0] + ) + + +def test_iterative_imputer_transform_stochasticity(): + rng1 = np.random.RandomState(0) + rng2 = np.random.RandomState(1) + n = 100 + d = 10 + X = _sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray() + + # when sample_posterior=True, two transforms shouldn't be equal + imputer = IterativeImputer( + missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1 + ) + imputer.fit(X) + + X_fitted_1 = imputer.transform(X) + X_fitted_2 = imputer.transform(X) + + # sufficient to assert that the means are not the same + assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2)) + + # when sample_posterior=False, and n_nearest_features=None + # and imputation_order is not random + # the two transforms should be identical even if rng are different + imputer1 = IterativeImputer( + missing_values=0, + max_iter=1, + sample_posterior=False, + n_nearest_features=None, + imputation_order="ascending", + random_state=rng1, + ) + + imputer2 = IterativeImputer( + missing_values=0, + max_iter=1, + sample_posterior=False, + n_nearest_features=None, + imputation_order="ascending", + random_state=rng2, + ) + imputer1.fit(X) + imputer2.fit(X) + + X_fitted_1a = imputer1.transform(X) + X_fitted_1b = imputer1.transform(X) + X_fitted_2 = imputer2.transform(X) + + assert_allclose(X_fitted_1a, X_fitted_1b) + assert_allclose(X_fitted_1a, X_fitted_2) + + +def test_iterative_imputer_no_missing(): + rng = np.random.RandomState(0) + X = rng.rand(100, 100) + X[:, 0] = np.nan + m1 = IterativeImputer(max_iter=10, random_state=rng) + m2 = IterativeImputer(max_iter=10, random_state=rng) + pred1 = m1.fit(X).transform(X) + pred2 = m2.fit_transform(X) + # should exclude the first column entirely + assert_allclose(X[:, 1:], pred1) + # fit and fit_transform should both be identical + assert_allclose(pred1, pred2) + + +def test_iterative_imputer_rank_one(): + rng = np.random.RandomState(0) + d = 50 + A = rng.rand(d, 1) + B = rng.rand(1, d) + X = np.dot(A, B) + nan_mask = rng.rand(d, d) < 0.5 + X_missing = X.copy() + X_missing[nan_mask] = np.nan + + imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng) + X_filled = imputer.fit_transform(X_missing) + assert_allclose(X_filled, X, atol=0.02) + + +@pytest.mark.parametrize("rank", [3, 5]) +def test_iterative_imputer_transform_recovery(rank): + rng = np.random.RandomState(0) + n = 70 + d = 70 + A = rng.rand(n, rank) + B = rng.rand(rank, d) + X_filled = np.dot(A, B) + nan_mask = rng.rand(n, d) < 0.5 + X_missing = X_filled.copy() + X_missing[nan_mask] = np.nan + + # split up data in half + n = n // 2 + X_train = X_missing[:n] + X_test_filled = X_filled[n:] + X_test = X_missing[n:] + + imputer = IterativeImputer( + max_iter=5, imputation_order="descending", verbose=1, random_state=rng + ).fit(X_train) + X_test_est = imputer.transform(X_test) + assert_allclose(X_test_filled, X_test_est, atol=0.1) + + +def test_iterative_imputer_additive_matrix(): + rng = np.random.RandomState(0) + n = 100 + d = 10 + A = rng.randn(n, d) + B = rng.randn(n, d) + X_filled = np.zeros(A.shape) + for i in range(d): + for j in range(d): + X_filled[:, (i + j) % d] += (A[:, i] + B[:, j]) / 2 + # a quarter is randomly missing + nan_mask = rng.rand(n, d) < 0.25 + X_missing = X_filled.copy() + X_missing[nan_mask] = np.nan + + # split up data + n = n // 2 + X_train = X_missing[:n] + X_test_filled = X_filled[n:] + X_test = X_missing[n:] + + imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train) + X_test_est = imputer.transform(X_test) + assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01) + + +def test_iterative_imputer_early_stopping(): + rng = np.random.RandomState(0) + n = 50 + d = 5 + A = rng.rand(n, 1) + B = rng.rand(1, d) + X = np.dot(A, B) + nan_mask = rng.rand(n, d) < 0.5 + X_missing = X.copy() + X_missing[nan_mask] = np.nan + + imputer = IterativeImputer( + max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng + ) + X_filled_100 = imputer.fit_transform(X_missing) + assert len(imputer.imputation_sequence_) == d * imputer.n_iter_ + + imputer = IterativeImputer( + max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng + ) + X_filled_early = imputer.fit_transform(X_missing) + assert_allclose(X_filled_100, X_filled_early, atol=1e-7) + + imputer = IterativeImputer( + max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng + ) + imputer.fit(X_missing) + assert imputer.n_iter_ == imputer.max_iter + + +def test_iterative_imputer_catch_warning(): + # check that we catch a RuntimeWarning due to a division by zero when a + # feature is constant in the dataset + X, y = load_diabetes(return_X_y=True) + n_samples, n_features = X.shape + + # simulate that a feature only contain one category during fit + X[:, 3] = 1 + + # add some missing values + rng = np.random.RandomState(0) + missing_rate = 0.15 + for feat in range(n_features): + sample_idx = rng.choice( + np.arange(n_samples), size=int(n_samples * missing_rate), replace=False + ) + X[sample_idx, feat] = np.nan + + imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + X_fill = imputer.fit_transform(X, y) + assert not np.any(np.isnan(X_fill)) + + +@pytest.mark.parametrize( + "min_value, max_value, correct_output", + [ + (0, 100, np.array([[0] * 3, [100] * 3])), + (None, None, np.array([[-np.inf] * 3, [np.inf] * 3])), + (-np.inf, np.inf, np.array([[-np.inf] * 3, [np.inf] * 3])), + ([-5, 5, 10], [100, 200, 300], np.array([[-5, 5, 10], [100, 200, 300]])), + ( + [-5, -np.inf, 10], + [100, 200, np.inf], + np.array([[-5, -np.inf, 10], [100, 200, np.inf]]), + ), + ], + ids=["scalars", "None-default", "inf", "lists", "lists-with-inf"], +) +def test_iterative_imputer_min_max_array_like(min_value, max_value, correct_output): + # check that passing scalar or array-like + # for min_value and max_value in IterativeImputer works + X = np.random.RandomState(0).randn(10, 3) + imputer = IterativeImputer(min_value=min_value, max_value=max_value) + imputer.fit(X) + + assert isinstance(imputer._min_value, np.ndarray) and isinstance( + imputer._max_value, np.ndarray + ) + assert (imputer._min_value.shape[0] == X.shape[1]) and ( + imputer._max_value.shape[0] == X.shape[1] + ) + + assert_allclose(correct_output[0, :], imputer._min_value) + assert_allclose(correct_output[1, :], imputer._max_value) + + +@pytest.mark.parametrize( + "min_value, max_value, err_msg", + [ + (100, 0, "min_value >= max_value."), + (np.inf, -np.inf, "min_value >= max_value."), + ([-5, 5], [100, 200, 0], "_value' should be of shape"), + ], +) +def test_iterative_imputer_catch_min_max_error(min_value, max_value, err_msg): + # check that passing scalar or array-like + # for min_value and max_value in IterativeImputer works + X = np.random.random((10, 3)) + imputer = IterativeImputer(min_value=min_value, max_value=max_value) + with pytest.raises(ValueError, match=err_msg): + imputer.fit(X) + + +@pytest.mark.parametrize( + "min_max_1, min_max_2", + [([None, None], [-np.inf, np.inf]), ([-10, 10], [[-10] * 4, [10] * 4])], + ids=["None-vs-inf", "Scalar-vs-vector"], +) +def test_iterative_imputer_min_max_array_like_imputation(min_max_1, min_max_2): + # Test that None/inf and scalar/vector give the same imputation + X_train = np.array( + [ + [np.nan, 2, 2, 1], + [10, np.nan, np.nan, 7], + [3, 1, np.nan, 1], + [np.nan, 4, 2, np.nan], + ] + ) + X_test = np.array( + [[np.nan, 2, np.nan, 5], [2, 4, np.nan, np.nan], [np.nan, 1, 10, 1]] + ) + imputer1 = IterativeImputer( + min_value=min_max_1[0], max_value=min_max_1[1], random_state=0 + ) + imputer2 = IterativeImputer( + min_value=min_max_2[0], max_value=min_max_2[1], random_state=0 + ) + X_test_imputed1 = imputer1.fit(X_train).transform(X_test) + X_test_imputed2 = imputer2.fit(X_train).transform(X_test) + assert_allclose(X_test_imputed1[:, 0], X_test_imputed2[:, 0]) + + +@pytest.mark.parametrize("skip_complete", [True, False]) +def test_iterative_imputer_skip_non_missing(skip_complete): + # check the imputing strategy when missing data are present in the + # testing set only. + # taken from: https://github.com/scikit-learn/scikit-learn/issues/14383 + rng = np.random.RandomState(0) + X_train = np.array([[5, 2, 2, 1], [10, 1, 2, 7], [3, 1, 1, 1], [8, 4, 2, 2]]) + X_test = np.array([[np.nan, 2, 4, 5], [np.nan, 4, 1, 2], [np.nan, 1, 10, 1]]) + imputer = IterativeImputer( + initial_strategy="mean", skip_complete=skip_complete, random_state=rng + ) + X_test_est = imputer.fit(X_train).transform(X_test) + if skip_complete: + # impute with the initial strategy: 'mean' + assert_allclose(X_test_est[:, 0], np.mean(X_train[:, 0])) + else: + assert_allclose(X_test_est[:, 0], [11, 7, 12], rtol=1e-4) + + +@pytest.mark.parametrize("rs_imputer", [None, 1, np.random.RandomState(seed=1)]) +@pytest.mark.parametrize("rs_estimator", [None, 1, np.random.RandomState(seed=1)]) +def test_iterative_imputer_dont_set_random_state(rs_imputer, rs_estimator): + class ZeroEstimator: + def __init__(self, random_state): + self.random_state = random_state + + def fit(self, *args, **kgards): + return self + + def predict(self, X): + return np.zeros(X.shape[0]) + + estimator = ZeroEstimator(random_state=rs_estimator) + imputer = IterativeImputer(random_state=rs_imputer) + X_train = np.zeros((10, 3)) + imputer.fit(X_train) + assert estimator.random_state == rs_estimator + + +@pytest.mark.parametrize( + "X_fit, X_trans, params, msg_err", + [ + ( + np.array([[-1, 1], [1, 2]]), + np.array([[-1, 1], [1, -1]]), + {"features": "missing-only", "sparse": "auto"}, + "have missing values in transform but have no missing values in fit", + ), + ( + np.array([["a", "b"], ["c", "a"]], dtype=str), + np.array([["a", "b"], ["c", "a"]], dtype=str), + {}, + "MissingIndicator does not support data with dtype", + ), + ], +) +def test_missing_indicator_error(X_fit, X_trans, params, msg_err): + indicator = MissingIndicator(missing_values=-1) + indicator.set_params(**params) + with pytest.raises(ValueError, match=msg_err): + indicator.fit(X_fit).transform(X_trans) + + +def _generate_missing_indicator_cases(): + missing_values_dtypes = [(0, np.int32), (np.nan, np.float64), (-1, np.int32)] + arr_types = ( + [np.array] + + CSC_CONTAINERS + + CSR_CONTAINERS + + COO_CONTAINERS + + LIL_CONTAINERS + + BSR_CONTAINERS + ) + return [ + (arr_type, missing_values, dtype) + for arr_type, (missing_values, dtype) in product( + arr_types, missing_values_dtypes + ) + if not (missing_values == 0 and arr_type is not np.array) + ] + + +@pytest.mark.parametrize( + "arr_type, missing_values, dtype", _generate_missing_indicator_cases() +) +@pytest.mark.parametrize( + "param_features, n_features, features_indices", + [("missing-only", 3, np.array([0, 1, 2])), ("all", 3, np.array([0, 1, 2]))], +) +def test_missing_indicator_new( + missing_values, arr_type, dtype, param_features, n_features, features_indices +): + X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]]) + X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) + X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]]) + X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]]) + + # convert the input to the right array format and right dtype + X_fit = arr_type(X_fit).astype(dtype) + X_trans = arr_type(X_trans).astype(dtype) + X_fit_expected = X_fit_expected.astype(dtype) + X_trans_expected = X_trans_expected.astype(dtype) + + indicator = MissingIndicator( + missing_values=missing_values, features=param_features, sparse=False + ) + X_fit_mask = indicator.fit_transform(X_fit) + X_trans_mask = indicator.transform(X_trans) + + assert X_fit_mask.shape[1] == n_features + assert X_trans_mask.shape[1] == n_features + + assert_array_equal(indicator.features_, features_indices) + assert_allclose(X_fit_mask, X_fit_expected[:, features_indices]) + assert_allclose(X_trans_mask, X_trans_expected[:, features_indices]) + + assert X_fit_mask.dtype == bool + assert X_trans_mask.dtype == bool + assert isinstance(X_fit_mask, np.ndarray) + assert isinstance(X_trans_mask, np.ndarray) + + indicator.set_params(sparse=True) + X_fit_mask_sparse = indicator.fit_transform(X_fit) + X_trans_mask_sparse = indicator.transform(X_trans) + + assert X_fit_mask_sparse.dtype == bool + assert X_trans_mask_sparse.dtype == bool + assert X_fit_mask_sparse.format == "csc" + assert X_trans_mask_sparse.format == "csc" + assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask) + assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask) + + +@pytest.mark.parametrize( + "arr_type", + CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS + LIL_CONTAINERS + BSR_CONTAINERS, +) +def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type): + # test for sparse input and missing_value == 0 + + missing_values = 0 + X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) + X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) + + # convert the input to the right array format + X_fit_sparse = arr_type(X_fit) + X_trans_sparse = arr_type(X_trans) + + indicator = MissingIndicator(missing_values=missing_values) + + with pytest.raises(ValueError, match="Sparse input with missing_values=0"): + indicator.fit_transform(X_fit_sparse) + + indicator.fit_transform(X_fit) + with pytest.raises(ValueError, match="Sparse input with missing_values=0"): + indicator.transform(X_trans_sparse) + + +@pytest.mark.parametrize("param_sparse", [True, False, "auto"]) +@pytest.mark.parametrize( + "arr_type, missing_values", + [(np.array, 0)] + + list( + product( + CSC_CONTAINERS + + CSR_CONTAINERS + + COO_CONTAINERS + + LIL_CONTAINERS + + BSR_CONTAINERS, + [np.nan], + ) + ), +) +def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse): + # check the format of the output with different sparse parameter + X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) + X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) + X_fit = arr_type(X_fit).astype(np.float64) + X_trans = arr_type(X_trans).astype(np.float64) + + indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse) + X_fit_mask = indicator.fit_transform(X_fit) + X_trans_mask = indicator.transform(X_trans) + + if param_sparse is True: + assert X_fit_mask.format == "csc" + assert X_trans_mask.format == "csc" + elif param_sparse == "auto" and missing_values == 0: + assert isinstance(X_fit_mask, np.ndarray) + assert isinstance(X_trans_mask, np.ndarray) + elif param_sparse is False: + assert isinstance(X_fit_mask, np.ndarray) + assert isinstance(X_trans_mask, np.ndarray) + else: + if sparse.issparse(X_fit): + assert X_fit_mask.format == "csc" + assert X_trans_mask.format == "csc" + else: + assert isinstance(X_fit_mask, np.ndarray) + assert isinstance(X_trans_mask, np.ndarray) + + +def test_missing_indicator_string(): + X = np.array([["a", "b", "c"], ["b", "c", "a"]], dtype=object) + indicator = MissingIndicator(missing_values="a", features="all") + X_trans = indicator.fit_transform(X) + assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]])) + + +@pytest.mark.parametrize( + "X, missing_values, X_trans_exp", + [ + ( + np.array([["a", "b"], ["b", "a"]], dtype=object), + "a", + np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object), + ), + ( + np.array([[np.nan, 1.0], [1.0, np.nan]]), + np.nan, + np.array([[1.0, 1.0, True, False], [1.0, 1.0, False, True]]), + ), + ( + np.array([[np.nan, "b"], ["b", np.nan]], dtype=object), + np.nan, + np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object), + ), + ( + np.array([[None, "b"], ["b", None]], dtype=object), + None, + np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object), + ), + ], +) +def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp): + trans = make_union( + SimpleImputer(missing_values=missing_values, strategy="most_frequent"), + MissingIndicator(missing_values=missing_values), + ) + X_trans = trans.fit_transform(X) + assert_array_equal(X_trans, X_trans_exp) + + +@pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) +@pytest.mark.parametrize( + "imputer_missing_values, missing_value, err_msg", + [ + ("NaN", np.nan, "Input X contains NaN"), + ("-1", -1, "types are expected to be both numerical."), + ], +) +def test_inconsistent_dtype_X_missing_values( + imputer_constructor, imputer_missing_values, missing_value, err_msg +): + # regression test for issue #11390. Comparison between incoherent dtype + # for X and missing_values was not raising a proper error. + rng = np.random.RandomState(42) + X = rng.randn(10, 10) + X[0, 0] = missing_value + + imputer = imputer_constructor(missing_values=imputer_missing_values) + + with pytest.raises(ValueError, match=err_msg): + imputer.fit_transform(X) + + +def test_missing_indicator_no_missing(): + # check that all features are dropped if there are no missing values when + # features='missing-only' (#13491) + X = np.array([[1, 1], [1, 1]]) + + mi = MissingIndicator(features="missing-only", missing_values=-1) + Xt = mi.fit_transform(X) + + assert Xt.shape[1] == 0 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_missing_indicator_sparse_no_explicit_zeros(csr_container): + # Check that non missing values don't become explicit zeros in the mask + # generated by missing indicator when X is sparse. (#13491) + X = csr_container([[0, 1, 2], [1, 2, 0], [2, 0, 1]]) + + mi = MissingIndicator(features="all", missing_values=1) + Xt = mi.fit_transform(X) + + assert Xt.getnnz() == Xt.sum() + + +@pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) +def test_imputer_without_indicator(imputer_constructor): + X = np.array([[1, 1], [1, 1]]) + imputer = imputer_constructor() + imputer.fit(X) + + assert imputer.indicator_ is None + + +@pytest.mark.parametrize( + "arr_type", + CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS + LIL_CONTAINERS + BSR_CONTAINERS, +) +def test_simple_imputation_add_indicator_sparse_matrix(arr_type): + X_sparse = arr_type([[np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9]]) + X_true = np.array( + [ + [3.0, 1.0, 5.0, 1.0, 0.0, 0.0], + [2.0, 2.0, 1.0, 0.0, 1.0, 0.0], + [6.0, 3.0, 5.0, 0.0, 0.0, 1.0], + [1.0, 2.0, 9.0, 0.0, 0.0, 0.0], + ] + ) + + imputer = SimpleImputer(missing_values=np.nan, add_indicator=True) + X_trans = imputer.fit_transform(X_sparse) + + assert sparse.issparse(X_trans) + assert X_trans.shape == X_true.shape + assert_allclose(X_trans.toarray(), X_true) + + +@pytest.mark.parametrize( + "strategy, expected", [("most_frequent", "b"), ("constant", "missing_value")] +) +def test_simple_imputation_string_list(strategy, expected): + X = [["a", "b"], ["c", np.nan]] + + X_true = np.array([["a", "b"], ["c", expected]], dtype=object) + + imputer = SimpleImputer(strategy=strategy) + X_trans = imputer.fit_transform(X) + + assert_array_equal(X_trans, X_true) + + +@pytest.mark.parametrize( + "order, idx_order", + [("ascending", [3, 4, 2, 0, 1]), ("descending", [1, 0, 2, 4, 3])], +) +def test_imputation_order(order, idx_order): + # regression test for #15393 + rng = np.random.RandomState(42) + X = rng.rand(100, 5) + X[:50, 1] = np.nan + X[:30, 0] = np.nan + X[:20, 2] = np.nan + X[:10, 4] = np.nan + + with pytest.warns(ConvergenceWarning): + trs = IterativeImputer(max_iter=1, imputation_order=order, random_state=0).fit( + X + ) + idx = [x.feat_idx for x in trs.imputation_sequence_] + assert idx == idx_order + + +@pytest.mark.parametrize("missing_value", [-1, np.nan]) +def test_simple_imputation_inverse_transform(missing_value): + # Test inverse_transform feature for np.nan + X_1 = np.array( + [ + [9, missing_value, 3, -1], + [4, -1, 5, 4], + [6, 7, missing_value, -1], + [8, 9, 0, missing_value], + ] + ) + + X_2 = np.array( + [ + [5, 4, 2, 1], + [2, 1, missing_value, 3], + [9, missing_value, 7, 1], + [6, 4, 2, missing_value], + ] + ) + + X_3 = np.array( + [ + [1, missing_value, 5, 9], + [missing_value, 4, missing_value, missing_value], + [2, missing_value, 7, missing_value], + [missing_value, 3, missing_value, 8], + ] + ) + + X_4 = np.array( + [ + [1, 1, 1, 3], + [missing_value, 2, missing_value, 1], + [2, 3, 3, 4], + [missing_value, 4, missing_value, 2], + ] + ) + + imputer = SimpleImputer( + missing_values=missing_value, strategy="mean", add_indicator=True + ) + + X_1_trans = imputer.fit_transform(X_1) + X_1_inv_trans = imputer.inverse_transform(X_1_trans) + + X_2_trans = imputer.transform(X_2) # test on new data + X_2_inv_trans = imputer.inverse_transform(X_2_trans) + + assert_array_equal(X_1_inv_trans, X_1) + assert_array_equal(X_2_inv_trans, X_2) + + for X in [X_3, X_4]: + X_trans = imputer.fit_transform(X) + X_inv_trans = imputer.inverse_transform(X_trans) + assert_array_equal(X_inv_trans, X) + + +@pytest.mark.parametrize("missing_value", [-1, np.nan]) +def test_simple_imputation_inverse_transform_exceptions(missing_value): + X_1 = np.array( + [ + [9, missing_value, 3, -1], + [4, -1, 5, 4], + [6, 7, missing_value, -1], + [8, 9, 0, missing_value], + ] + ) + + imputer = SimpleImputer(missing_values=missing_value, strategy="mean") + X_1_trans = imputer.fit_transform(X_1) + with pytest.raises( + ValueError, match=f"Got 'add_indicator={imputer.add_indicator}'" + ): + imputer.inverse_transform(X_1_trans) + + +@pytest.mark.parametrize( + "expected,array,dtype,extra_value,n_repeat", + [ + # array of object dtype + ("extra_value", ["a", "b", "c"], object, "extra_value", 2), + ( + "most_frequent_value", + ["most_frequent_value", "most_frequent_value", "value"], + object, + "extra_value", + 1, + ), + ("a", ["min_value", "min_valuevalue"], object, "a", 2), + ("min_value", ["min_value", "min_value", "value"], object, "z", 2), + # array of numeric dtype + (10, [1, 2, 3], int, 10, 2), + (1, [1, 1, 2], int, 10, 1), + (10, [20, 20, 1], int, 10, 2), + (1, [1, 1, 20], int, 10, 2), + ], +) +def test_most_frequent(expected, array, dtype, extra_value, n_repeat): + assert expected == _most_frequent( + np.array(array, dtype=dtype), extra_value, n_repeat + ) + + +@pytest.mark.parametrize( + "initial_strategy", ["mean", "median", "most_frequent", "constant"] +) +def test_iterative_imputer_keep_empty_features(initial_strategy): + """Check the behaviour of the iterative imputer with different initial strategy + and keeping empty features (i.e. features containing only missing values). + """ + X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]]) + + imputer = IterativeImputer( + initial_strategy=initial_strategy, keep_empty_features=True + ) + X_imputed = imputer.fit_transform(X) + assert_allclose(X_imputed[:, 1], 0) + X_imputed = imputer.transform(X) + assert_allclose(X_imputed[:, 1], 0) + + +def test_iterative_imputer_constant_fill_value(): + """Check that we propagate properly the parameter `fill_value`.""" + X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]]) + + fill_value = 100 + imputer = IterativeImputer( + missing_values=-1, + initial_strategy="constant", + fill_value=fill_value, + max_iter=0, + ) + imputer.fit_transform(X) + assert_array_equal(imputer.initial_imputer_.statistics_, fill_value) + + +@pytest.mark.parametrize("keep_empty_features", [True, False]) +def test_knn_imputer_keep_empty_features(keep_empty_features): + """Check the behaviour of `keep_empty_features` for `KNNImputer`.""" + X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]]) + + imputer = KNNImputer(keep_empty_features=keep_empty_features) + + for method in ["fit_transform", "transform"]: + X_imputed = getattr(imputer, method)(X) + if keep_empty_features: + assert X_imputed.shape == X.shape + assert_array_equal(X_imputed[:, 1], 0) + else: + assert X_imputed.shape == (X.shape[0], X.shape[1] - 1) + + +def test_simple_impute_pd_na(): + pd = pytest.importorskip("pandas") + + # Impute pandas array of string types. + df = pd.DataFrame({"feature": pd.Series(["abc", None, "de"], dtype="string")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value="na") + _assert_array_equal_and_same_dtype( + imputer.fit_transform(df), np.array([["abc"], ["na"], ["de"]], dtype=object) + ) + + # Impute pandas array of string types without any missing values. + df = pd.DataFrame({"feature": pd.Series(["abc", "de", "fgh"], dtype="string")}) + imputer = SimpleImputer(fill_value="ok", strategy="constant") + _assert_array_equal_and_same_dtype( + imputer.fit_transform(df), np.array([["abc"], ["de"], ["fgh"]], dtype=object) + ) + + # Impute pandas array of integer types. + df = pd.DataFrame({"feature": pd.Series([1, None, 3], dtype="Int64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-1) + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64") + ) + + # Use `np.nan` also works. + imputer = SimpleImputer(missing_values=np.nan, strategy="constant", fill_value=-1) + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64") + ) + + # Impute pandas array of integer types with 'median' strategy. + df = pd.DataFrame({"feature": pd.Series([1, None, 2, 3], dtype="Int64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="median") + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype="float64") + ) + + # Impute pandas array of integer types with 'mean' strategy. + df = pd.DataFrame({"feature": pd.Series([1, None, 2], dtype="Int64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="mean") + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype="float64") + ) + + # Impute pandas array of float types. + df = pd.DataFrame({"feature": pd.Series([1.0, None, 3.0], dtype="float64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-2.0) + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype="float64") + ) + + # Impute pandas array of float types with 'median' strategy. + df = pd.DataFrame({"feature": pd.Series([1.0, None, 2.0, 3.0], dtype="float64")}) + imputer = SimpleImputer(missing_values=pd.NA, strategy="median") + _assert_allclose_and_same_dtype( + imputer.fit_transform(df), + np.array([[1.0], [2.0], [2.0], [3.0]], dtype="float64"), + ) + + +def test_missing_indicator_feature_names_out(): + """Check that missing indicator return the feature names with a prefix.""" + pd = pytest.importorskip("pandas") + + missing_values = np.nan + X = pd.DataFrame( + [ + [missing_values, missing_values, 1, missing_values], + [4, missing_values, 2, 10], + ], + columns=["a", "b", "c", "d"], + ) + + indicator = MissingIndicator(missing_values=missing_values).fit(X) + feature_names = indicator.get_feature_names_out() + expected_names = ["missingindicator_a", "missingindicator_b", "missingindicator_d"] + assert_array_equal(expected_names, feature_names) + + +def test_imputer_lists_fit_transform(): + """Check transform uses object dtype when fitted on an object dtype. + + Non-regression test for #19572. + """ + + X = [["a", "b"], ["c", "b"], ["a", "a"]] + imp_frequent = SimpleImputer(strategy="most_frequent").fit(X) + X_trans = imp_frequent.transform([[np.nan, np.nan]]) + assert X_trans.dtype == object + assert_array_equal(X_trans, [["a", "b"]]) + + +@pytest.mark.parametrize("dtype_test", [np.float32, np.float64]) +def test_imputer_transform_preserves_numeric_dtype(dtype_test): + """Check transform preserves numeric dtype independent of fit dtype.""" + X = np.asarray( + [[1.2, 3.4, np.nan], [np.nan, 1.2, 1.3], [4.2, 2, 1]], dtype=np.float64 + ) + imp = SimpleImputer().fit(X) + + X_test = np.asarray([[np.nan, np.nan, np.nan]], dtype=dtype_test) + X_trans = imp.transform(X_test) + assert X_trans.dtype == dtype_test + + +@pytest.mark.parametrize("array_type", ["array", "sparse"]) +@pytest.mark.parametrize("keep_empty_features", [True, False]) +def test_simple_imputer_constant_keep_empty_features(array_type, keep_empty_features): + """Check the behaviour of `keep_empty_features` with `strategy='constant'. + For backward compatibility, a column full of missing values will always be + fill and never dropped. + """ + X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]]) + X = _convert_container(X, array_type) + fill_value = 10 + imputer = SimpleImputer( + strategy="constant", + fill_value=fill_value, + keep_empty_features=keep_empty_features, + ) + + for method in ["fit_transform", "transform"]: + X_imputed = getattr(imputer, method)(X) + assert X_imputed.shape == X.shape + constant_feature = ( + X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0] + ) + assert_array_equal(constant_feature, fill_value) + + +@pytest.mark.parametrize("array_type", ["array", "sparse"]) +@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) +@pytest.mark.parametrize("keep_empty_features", [True, False]) +def test_simple_imputer_keep_empty_features(strategy, array_type, keep_empty_features): + """Check the behaviour of `keep_empty_features` with all strategies but + 'constant'. + """ + X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]]) + X = _convert_container(X, array_type) + imputer = SimpleImputer(strategy=strategy, keep_empty_features=keep_empty_features) + + for method in ["fit_transform", "transform"]: + X_imputed = getattr(imputer, method)(X) + if keep_empty_features: + assert X_imputed.shape == X.shape + constant_feature = ( + X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0] + ) + assert_array_equal(constant_feature, 0) + else: + assert X_imputed.shape == (X.shape[0], X.shape[1] - 1) + + +def test_simple_imputer_constant_fill_value_casting(): + """Check that we raise a proper error message when we cannot cast the fill value + to the input data type. Otherwise, check that the casting is done properly. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28309 + """ + # cannot cast fill_value at fit + fill_value = 1.5 + X_int64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.int64) + imputer = SimpleImputer( + strategy="constant", fill_value=fill_value, missing_values=2 + ) + err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast" + with pytest.raises(ValueError, match=re.escape(err_msg)): + imputer.fit(X_int64) + + # cannot cast fill_value at transform + X_float64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float64) + imputer.fit(X_float64) + err_msg = ( + f"The dtype of the filling value (i.e. {imputer.statistics_.dtype!r}) " + "cannot be cast" + ) + with pytest.raises(ValueError, match=re.escape(err_msg)): + imputer.transform(X_int64) + + # check that no error is raised when having the same kind of dtype + fill_value_list = [np.float64(1.5), 1.5, 1] + X_float32 = X_float64.astype(np.float32) + + for fill_value in fill_value_list: + imputer = SimpleImputer( + strategy="constant", fill_value=fill_value, missing_values=2 + ) + X_trans = imputer.fit_transform(X_float32) + assert X_trans.dtype == X_float32.dtype diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py new file mode 100644 index 0000000000000000000000000000000000000000..141c2ea90dbd9b3b4db6277e0c59adcf106931c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py @@ -0,0 +1,547 @@ +import numpy as np +import pytest + +from sklearn import config_context +from sklearn.impute import KNNImputer +from sklearn.metrics.pairwise import nan_euclidean_distances, pairwise_distances +from sklearn.neighbors import KNeighborsRegressor +from sklearn.utils._testing import assert_allclose + + +@pytest.mark.parametrize("weights", ["uniform", "distance"]) +@pytest.mark.parametrize("n_neighbors", range(1, 6)) +def test_knn_imputer_shape(weights, n_neighbors): + # Verify the shapes of the imputed matrix for different weights and + # number of neighbors. + n_rows = 10 + n_cols = 2 + X = np.random.rand(n_rows, n_cols) + X[0, 0] = np.nan + + imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights) + X_imputed = imputer.fit_transform(X) + assert X_imputed.shape == (n_rows, n_cols) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_default_with_invalid_input(na): + # Test imputation with default values and invalid input + + # Test with inf present + X = np.array( + [ + [np.inf, 1, 1, 2, na], + [2, 1, 2, 2, 3], + [3, 2, 3, 3, 8], + [na, 6, 0, 5, 13], + [na, 7, 0, 7, 8], + [6, 6, 2, 5, 7], + ] + ) + with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"): + KNNImputer(missing_values=na).fit(X) + + # Test with inf present in matrix passed in transform() + X = np.array( + [ + [np.inf, 1, 1, 2, na], + [2, 1, 2, 2, 3], + [3, 2, 3, 3, 8], + [na, 6, 0, 5, 13], + [na, 7, 0, 7, 8], + [6, 6, 2, 5, 7], + ] + ) + + X_fit = np.array( + [ + [0, 1, 1, 2, na], + [2, 1, 2, 2, 3], + [3, 2, 3, 3, 8], + [na, 6, 0, 5, 13], + [na, 7, 0, 7, 8], + [6, 6, 2, 5, 7], + ] + ) + imputer = KNNImputer(missing_values=na).fit(X_fit) + with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"): + imputer.transform(X) + + # Test with missing_values=0 when NaN present + imputer = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform") + X = np.array( + [ + [np.nan, 0, 0, 0, 5], + [np.nan, 1, 0, np.nan, 3], + [np.nan, 2, 0, 0, 0], + [np.nan, 6, 0, 5, 13], + ] + ) + msg = "Input X contains NaN" + with pytest.raises(ValueError, match=msg): + imputer.fit(X) + + X = np.array( + [ + [0, 0], + [np.nan, 2], + ] + ) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_removes_all_na_features(na): + X = np.array( + [ + [1, 1, na, 1, 1, 1.0], + [2, 3, na, 2, 2, 2], + [3, 4, na, 3, 3, na], + [6, 4, na, na, 6, 6], + ] + ) + knn = KNNImputer(missing_values=na, n_neighbors=2).fit(X) + + X_transform = knn.transform(X) + assert not np.isnan(X_transform).any() + assert X_transform.shape == (4, 5) + + X_test = np.arange(0, 12).reshape(2, 6) + X_transform = knn.transform(X_test) + assert_allclose(X_test[:, [0, 1, 3, 4, 5]], X_transform) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_zero_nan_imputes_the_same(na): + # Test with an imputable matrix and compare with different missing_values + X_zero = np.array( + [ + [1, 0, 1, 1, 1.0], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 0], + [6, 6, 0, 6, 6], + ] + ) + + X_nan = np.array( + [ + [1, na, 1, 1, 1.0], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, na], + [6, 6, na, 6, 6], + ] + ) + + X_imputed = np.array( + [ + [1, 2.5, 1, 1, 1.0], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 1.5], + [6, 6, 2.5, 6, 6], + ] + ) + + imputer_zero = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform") + + imputer_nan = KNNImputer(missing_values=na, n_neighbors=2, weights="uniform") + + assert_allclose(imputer_zero.fit_transform(X_zero), X_imputed) + assert_allclose( + imputer_zero.fit_transform(X_zero), imputer_nan.fit_transform(X_nan) + ) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_verify(na): + # Test with an imputable matrix + X = np.array( + [ + [1, 0, 0, 1], + [2, 1, 2, na], + [3, 2, 3, na], + [na, 4, 5, 5], + [6, na, 6, 7], + [8, 8, 8, 8], + [16, 15, 18, 19], + ] + ) + + X_imputed = np.array( + [ + [1, 0, 0, 1], + [2, 1, 2, 8], + [3, 2, 3, 8], + [4, 4, 5, 5], + [6, 3, 6, 7], + [8, 8, 8, 8], + [16, 15, 18, 19], + ] + ) + + imputer = KNNImputer(missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed) + + # Test when there is not enough neighbors + X = np.array( + [ + [1, 0, 0, na], + [2, 1, 2, na], + [3, 2, 3, na], + [4, 4, 5, na], + [6, 7, 6, na], + [8, 8, 8, na], + [20, 20, 20, 20], + [22, 22, 22, 22], + ] + ) + + # Not enough neighbors, use column mean from training + X_impute_value = (20 + 22) / 2 + X_imputed = np.array( + [ + [1, 0, 0, X_impute_value], + [2, 1, 2, X_impute_value], + [3, 2, 3, X_impute_value], + [4, 4, 5, X_impute_value], + [6, 7, 6, X_impute_value], + [8, 8, 8, X_impute_value], + [20, 20, 20, 20], + [22, 22, 22, 22], + ] + ) + + imputer = KNNImputer(missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed) + + # Test when data in fit() and transform() are different + X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 16]]) + + X1 = np.array([[1, 0], [3, 2], [4, na]]) + + X_2_1 = (0 + 3 + 6 + 7 + 8) / 5 + X1_imputed = np.array([[1, 0], [3, 2], [4, X_2_1]]) + + imputer = KNNImputer(missing_values=na) + assert_allclose(imputer.fit(X).transform(X1), X1_imputed) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_one_n_neighbors(na): + X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]]) + + X_imputed = np.array([[0, 0], [4, 2], [4, 3], [5, 3], [7, 7], [7, 8], [14, 13]]) + + imputer = KNNImputer(n_neighbors=1, missing_values=na) + + assert_allclose(imputer.fit_transform(X), X_imputed) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_all_samples_are_neighbors(na): + X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]]) + + X_imputed = np.array([[0, 0], [6, 2], [4, 3], [5, 5.5], [7, 7], [6, 8], [14, 13]]) + + n_neighbors = X.shape[0] - 1 + imputer = KNNImputer(n_neighbors=n_neighbors, missing_values=na) + + assert_allclose(imputer.fit_transform(X), X_imputed) + + n_neighbors = X.shape[0] + imputer_plus1 = KNNImputer(n_neighbors=n_neighbors, missing_values=na) + assert_allclose(imputer_plus1.fit_transform(X), X_imputed) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_weight_uniform(na): + X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]) + + # Test with "uniform" weight (or unweighted) + X_imputed_uniform = np.array( + [[0, 0], [5, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]] + ) + + imputer = KNNImputer(weights="uniform", missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed_uniform) + + # Test with "callable" weight + def no_weight(dist): + return None + + imputer = KNNImputer(weights=no_weight, missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed_uniform) + + # Test with "callable" uniform weight + def uniform_weight(dist): + return np.ones_like(dist) + + imputer = KNNImputer(weights=uniform_weight, missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed_uniform) + + +@pytest.mark.parametrize("na", [np.nan, -1]) +def test_knn_imputer_weight_distance(na): + X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]) + + # Test with "distance" weight + nn = KNeighborsRegressor(metric="euclidean", weights="distance") + X_rows_idx = [0, 2, 3, 4, 5, 6] + nn.fit(X[X_rows_idx, 1:], X[X_rows_idx, 0]) + knn_imputed_value = nn.predict(X[1:2, 1:])[0] + + # Manual calculation + X_neighbors_idx = [0, 2, 3, 4, 5] + dist = nan_euclidean_distances(X[1:2, :], X, missing_values=na) + weights = 1 / dist[:, X_neighbors_idx].ravel() + manual_imputed_value = np.average(X[X_neighbors_idx, 0], weights=weights) + + X_imputed_distance1 = np.array( + [[0, 0], [manual_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]] + ) + + # NearestNeighbor calculation + X_imputed_distance2 = np.array( + [[0, 0], [knn_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]] + ) + + imputer = KNNImputer(weights="distance", missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed_distance1) + assert_allclose(imputer.fit_transform(X), X_imputed_distance2) + + # Test with weights = "distance" and n_neighbors=2 + X = np.array( + [ + [na, 0, 0], + [2, 1, 2], + [3, 2, 3], + [4, 5, 5], + ] + ) + + # neighbors are rows 1, 2, the nan_euclidean_distances are: + dist_0_1 = np.sqrt((3 / 2) * ((1 - 0) ** 2 + (2 - 0) ** 2)) + dist_0_2 = np.sqrt((3 / 2) * ((2 - 0) ** 2 + (3 - 0) ** 2)) + imputed_value = np.average([2, 3], weights=[1 / dist_0_1, 1 / dist_0_2]) + + X_imputed = np.array( + [ + [imputed_value, 0, 0], + [2, 1, 2], + [3, 2, 3], + [4, 5, 5], + ] + ) + + imputer = KNNImputer(n_neighbors=2, weights="distance", missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed) + + # Test with varying missingness patterns + X = np.array( + [ + [1, 0, 0, 1], + [0, na, 1, na], + [1, 1, 1, na], + [0, 1, 0, 0], + [0, 0, 0, 0], + [1, 0, 1, 1], + [10, 10, 10, 10], + ] + ) + + # Get weights of donor neighbors + dist = nan_euclidean_distances(X, missing_values=na) + r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]] + r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]] + r1c1_nbor_wt = 1 / r1c1_nbor_dists + r1c3_nbor_wt = 1 / r1c3_nbor_dists + + r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]] + r2c3_nbor_wt = 1 / r2c3_nbor_dists + + # Collect donor values + col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy() + col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy() + + # Final imputed values + r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt) + r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt) + r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt) + + X_imputed = np.array( + [ + [1, 0, 0, 1], + [0, r1c1_imp, 1, r1c3_imp], + [1, 1, 1, r2c3_imp], + [0, 1, 0, 0], + [0, 0, 0, 0], + [1, 0, 1, 1], + [10, 10, 10, 10], + ] + ) + + imputer = KNNImputer(weights="distance", missing_values=na) + assert_allclose(imputer.fit_transform(X), X_imputed) + + X = np.array( + [ + [0, 0, 0, na], + [1, 1, 1, na], + [2, 2, na, 2], + [3, 3, 3, 3], + [4, 4, 4, 4], + [5, 5, 5, 5], + [6, 6, 6, 6], + [na, 7, 7, 7], + ] + ) + + dist = pairwise_distances( + X, metric="nan_euclidean", squared=False, missing_values=na + ) + + # Calculate weights + r0c3_w = 1.0 / dist[0, 2:-1] + r1c3_w = 1.0 / dist[1, 2:-1] + r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)] + r7c0_w = 1.0 / dist[7, 2:7] + + # Calculate weighted averages + r0c3 = np.average(X[2:-1, -1], weights=r0c3_w) + r1c3 = np.average(X[2:-1, -1], weights=r1c3_w) + r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w) + r7c0 = np.average(X[2:7, 0], weights=r7c0_w) + + X_imputed = np.array( + [ + [0, 0, 0, r0c3], + [1, 1, 1, r1c3], + [2, 2, r2c2, 2], + [3, 3, 3, 3], + [4, 4, 4, 4], + [5, 5, 5, 5], + [6, 6, 6, 6], + [r7c0, 7, 7, 7], + ] + ) + + imputer_comp_wt = KNNImputer(missing_values=na, weights="distance") + assert_allclose(imputer_comp_wt.fit_transform(X), X_imputed) + + +def test_knn_imputer_callable_metric(): + # Define callable metric that returns the l1 norm: + def custom_callable(x, y, missing_values=np.nan, squared=False): + x = np.ma.array(x, mask=np.isnan(x)) + y = np.ma.array(y, mask=np.isnan(y)) + dist = np.nansum(np.abs(x - y)) + return dist + + X = np.array([[4, 3, 3, np.nan], [6, 9, 6, 9], [4, 8, 6, 9], [np.nan, 9, 11, 10.0]]) + + X_0_3 = (9 + 9) / 2 + X_3_0 = (6 + 4) / 2 + X_imputed = np.array( + [[4, 3, 3, X_0_3], [6, 9, 6, 9], [4, 8, 6, 9], [X_3_0, 9, 11, 10.0]] + ) + + imputer = KNNImputer(n_neighbors=2, metric=custom_callable) + assert_allclose(imputer.fit_transform(X), X_imputed) + + +@pytest.mark.parametrize("working_memory", [None, 0]) +@pytest.mark.parametrize("na", [-1, np.nan]) +# Note that we use working_memory=0 to ensure that chunking is tested, even +# for a small dataset. However, it should raise a UserWarning that we ignore. +@pytest.mark.filterwarnings("ignore:adhere to working_memory") +def test_knn_imputer_with_simple_example(na, working_memory): + X = np.array( + [ + [0, na, 0, na], + [1, 1, 1, na], + [2, 2, na, 2], + [3, 3, 3, 3], + [4, 4, 4, 4], + [5, 5, 5, 5], + [6, 6, 6, 6], + [na, 7, 7, 7], + ] + ) + + r0c1 = np.mean(X[1:6, 1]) + r0c3 = np.mean(X[2:-1, -1]) + r1c3 = np.mean(X[2:-1, -1]) + r2c2 = np.mean(X[[0, 1, 3, 4, 5], 2]) + r7c0 = np.mean(X[2:-1, 0]) + + X_imputed = np.array( + [ + [0, r0c1, 0, r0c3], + [1, 1, 1, r1c3], + [2, 2, r2c2, 2], + [3, 3, 3, 3], + [4, 4, 4, 4], + [5, 5, 5, 5], + [6, 6, 6, 6], + [r7c0, 7, 7, 7], + ] + ) + + with config_context(working_memory=working_memory): + imputer_comp = KNNImputer(missing_values=na) + assert_allclose(imputer_comp.fit_transform(X), X_imputed) + + +@pytest.mark.parametrize("na", [-1, np.nan]) +@pytest.mark.parametrize("weights", ["uniform", "distance"]) +def test_knn_imputer_not_enough_valid_distances(na, weights): + # Samples with needed feature has nan distance + X1 = np.array([[na, 11], [na, 1], [3, na]]) + X1_imputed = np.array([[3, 11], [3, 1], [3, 6]]) + + knn = KNNImputer(missing_values=na, n_neighbors=1, weights=weights) + assert_allclose(knn.fit_transform(X1), X1_imputed) + + X2 = np.array([[4, na]]) + X2_imputed = np.array([[4, 6]]) + assert_allclose(knn.transform(X2), X2_imputed) + + +@pytest.mark.parametrize("na", [-1, np.nan]) +def test_knn_imputer_drops_all_nan_features(na): + X1 = np.array([[na, 1], [na, 2]]) + knn = KNNImputer(missing_values=na, n_neighbors=1) + X1_expected = np.array([[1], [2]]) + assert_allclose(knn.fit_transform(X1), X1_expected) + + X2 = np.array([[1, 2], [3, na]]) + X2_expected = np.array([[2], [1.5]]) + assert_allclose(knn.transform(X2), X2_expected) + + +@pytest.mark.parametrize("working_memory", [None, 0]) +@pytest.mark.parametrize("na", [-1, np.nan]) +def test_knn_imputer_distance_weighted_not_enough_neighbors(na, working_memory): + X = np.array([[3, na], [2, na], [na, 4], [5, 6], [6, 8], [na, 5]]) + + dist = pairwise_distances( + X, metric="nan_euclidean", squared=False, missing_values=na + ) + + X_01 = np.average(X[3:5, 1], weights=1 / dist[0, 3:5]) + X_11 = np.average(X[3:5, 1], weights=1 / dist[1, 3:5]) + X_20 = np.average(X[3:5, 0], weights=1 / dist[2, 3:5]) + X_50 = np.average(X[3:5, 0], weights=1 / dist[5, 3:5]) + + X_expected = np.array([[3, X_01], [2, X_11], [X_20, 4], [5, 6], [6, 8], [X_50, 5]]) + + with config_context(working_memory=working_memory): + knn_3 = KNNImputer(missing_values=na, n_neighbors=3, weights="distance") + assert_allclose(knn_3.fit_transform(X), X_expected) + + knn_4 = KNNImputer(missing_values=na, n_neighbors=4, weights="distance") + assert_allclose(knn_4.fit_transform(X), X_expected) + + +@pytest.mark.parametrize("na, allow_nan", [(-1, False), (np.nan, True)]) +def test_knn_tags(na, allow_nan): + knn = KNNImputer(missing_values=na) + assert knn._get_tags()["allow_nan"] == allow_nan diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..421f22008cce618fec1ab64616d43ee836e2abbe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_data.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f91bd812f9be0cc264852243a531c9c0152e9fc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_data.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6710d4da844087ebb1a26aff6c28542b8782d03e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_encoders.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_encoders.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbd30b911a84da2af03a7d71e6d9a1eb39a7b27f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_encoders.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_function_transformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_function_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3869d698259dbcaa74ff6483f8be5dee276b9b6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_function_transformer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_label.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_label.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e89c0bebfa9c0ce01ce08a5e41b0898f56c8648 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_label.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b73d6618d44399ae42d45726a9423d3fc3815a2b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_target_encoder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_target_encoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b6640f9143c7912c7e9bde2b3b1f307f8be3f2d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_target_encoder.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36bc8b5e0f4206b8707d7be1102077804a72a521 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c59bd440ee54987211e8bce33960c149159ca8f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_discretization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_discretization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1c4f36b34d43566db23eedd9de5decdff6f847d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_discretization.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_encoders.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_encoders.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a65d77b75a177ea11434e1f962b554082cf3d7e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_encoders.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26e5141ac3cb1217e17f2b4c10ca89c9b4d21cab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_label.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_label.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e27832f704513008e3fe8b70899859ae44556ee Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_label.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eb0ed608418156974830403f482fa2eb322e6e8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc differ