diff --git a/ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..60c8b08f8bf94841a258ffc47254149ef58e7d2d --- /dev/null +++ b/ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abdbb6e5879602ced860d8749aae3e59dc1d3279503c862d79d0a45f0799aac8 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..d1a69903317e86bfeef705bf398c507747b9fc29 --- /dev/null +++ b/ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b91267d51d0a7708191247e92c23bd9e3e3da1637f74347e0b8405b2032d84f0 +size 33555627 diff --git a/ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9b1764450751540f11b3091346dc27332feadf8c --- /dev/null +++ b/ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e214d708817677d080df793018a70ebfcb45824fdfb5b4d6aeffccd1376d9bc5 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..c9a916a0ecee237cb1247d2aad18536146b7e5f5 --- /dev/null +++ b/ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9def264e9fa9726d9c0d521ebc4cfb6f9add5248c53f6a40e7df2420a7316881 +size 9372 diff --git a/ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..58ceea7b60d070dc1e42479000babcb0fdbaf52d --- /dev/null +++ b/ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f660069a4b9e01c9a6abe669b43072ddd0add8ff5668f1cac8a94f71da23b8 +size 9387 diff --git a/ckpts/universal/global_step40/zero/22.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/22.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..2765bf8764e3dce72c123b2d292ed8f51705790e --- /dev/null +++ b/ckpts/universal/global_step40/zero/22.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61d211221b0b9dc9c67d7fc07a8da293905b0c719d41af55533a9300564bdff0 +size 9293 diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ced8aa9d29242760ae40c0062bd79432d55675 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py @@ -0,0 +1,115 @@ +""" +Utilities useful during the build. +""" +# author: Andy Mueller, Gael Varoquaux +# license: BSD + + +import contextlib +import os + +import sklearn + +from .._min_dependencies import CYTHON_MIN_VERSION +from ..externals._packaging.version import parse +from .openmp_helpers import check_openmp_support +from .pre_build_helpers import basic_check_build + +DEFAULT_ROOT = "sklearn" + + +def _check_cython_version(): + message = ( + "Please install Cython with a version >= {0} in order " + "to build a scikit-learn from source." + ).format(CYTHON_MIN_VERSION) + try: + import Cython + except ModuleNotFoundError as e: + # Re-raise with more informative error message instead: + raise ModuleNotFoundError(message) from e + + if parse(Cython.__version__) < parse(CYTHON_MIN_VERSION): + message += " The current version of Cython is {} installed in {}.".format( + Cython.__version__, Cython.__path__ + ) + raise ValueError(message) + + +def cythonize_extensions(extension): + """Check that a recent Cython is available and cythonize extensions""" + _check_cython_version() + from Cython.Build import cythonize + + # Fast fail before cythonization if compiler fails compiling basic test + # code even without OpenMP + basic_check_build() + + # check simple compilation with OpenMP. If it fails scikit-learn will be + # built without OpenMP and the test test_openmp_supported in the test suite + # will fail. + # `check_openmp_support` compiles a small test program to see if the + # compilers are properly configured to build with OpenMP. This is expensive + # and we only want to call this function once. + # The result of this check is cached as a private attribute on the sklearn + # module (only at build-time) to be used in the build_ext subclass defined + # in the top-level setup.py file to actually build the compiled extensions + # with OpenMP flags if needed. + sklearn._OPENMP_SUPPORTED = check_openmp_support() + + n_jobs = 1 + with contextlib.suppress(ImportError): + import joblib + + n_jobs = joblib.cpu_count() + + # Additional checks for Cython + cython_enable_debug_directives = ( + os.environ.get("SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES", "0") != "0" + ) + + compiler_directives = { + "language_level": 3, + "boundscheck": cython_enable_debug_directives, + "wraparound": False, + "initializedcheck": False, + "nonecheck": False, + "cdivision": True, + "profile": False, + } + + return cythonize( + extension, + nthreads=n_jobs, + compiler_directives=compiler_directives, + annotate=False, + ) + + +def gen_from_templates(templates): + """Generate cython files from a list of templates""" + # Lazy import because cython is not a runtime dependency. + from Cython import Tempita + + for template in templates: + outfile = template.replace(".tp", "") + + # if the template is not updated, no need to output the cython file + if not ( + os.path.exists(outfile) + and os.stat(template).st_mtime < os.stat(outfile).st_mtime + ): + with open(template, "r") as f: + tmpl = f.read() + + tmpl_ = Tempita.sub(tmpl) + + warn_msg = ( + "# WARNING: Do not edit this file directly.\n" + f"# It is automatically generated from {template!r}.\n" + "# Changes must be made there.\n\n" + ) + + with open(outfile, "w") as f: + f.write(warn_msg) + f.write(tmpl_) diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d142fe3d00bd0465123b4b1b5a899e8479ada89 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f7d2ccc1e5dd8c0dba4d148ca67cd74838e2b92 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef042587b0941528f760559815ae8aa97e7fd7dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b689e4f395811e73cc9bc048162d7b59a7d3e515 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9fa32a72d6578637d5bb73229164b1bfdb44727 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py b/venv/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..9172d40830bb970488431e31e4d11ef474dfe349 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py @@ -0,0 +1,123 @@ +"""Helpers for OpenMP support during the build.""" + +# This code is adapted for a large part from the astropy openmp helpers, which +# can be found at: https://github.com/astropy/extension-helpers/blob/master/extension_helpers/_openmp_helpers.py # noqa + + +import os +import sys +import textwrap +import warnings + +from .pre_build_helpers import compile_test_program + + +def get_openmp_flag(): + if sys.platform == "win32": + return ["/openmp"] + elif sys.platform == "darwin" and "openmp" in os.getenv("CPPFLAGS", ""): + # -fopenmp can't be passed as compile flag when using Apple-clang. + # OpenMP support has to be enabled during preprocessing. + # + # For example, our macOS wheel build jobs use the following environment + # variables to build with Apple-clang and the brew installed "libomp": + # + # export CPPFLAGS="$CPPFLAGS -Xpreprocessor -fopenmp" + # export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include" + # export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include" + # export LDFLAGS="$LDFLAGS -Wl,-rpath,/usr/local/opt/libomp/lib + # -L/usr/local/opt/libomp/lib -lomp" + return [] + # Default flag for GCC and clang: + return ["-fopenmp"] + + +def check_openmp_support(): + """Check whether OpenMP test code can be compiled and run""" + if "PYODIDE_PACKAGE_ABI" in os.environ: + # Pyodide doesn't support OpenMP + return False + + code = textwrap.dedent("""\ + #include + #include + int main(void) { + #pragma omp parallel + printf("nthreads=%d\\n", omp_get_num_threads()); + return 0; + } + """) + + extra_preargs = os.getenv("LDFLAGS", None) + if extra_preargs is not None: + extra_preargs = extra_preargs.strip().split(" ") + # FIXME: temporary fix to link against system libraries on linux + # "-Wl,--sysroot=/" should be removed + extra_preargs = [ + flag + for flag in extra_preargs + if flag.startswith(("-L", "-Wl,-rpath", "-l", "-Wl,--sysroot=/")) + ] + + extra_postargs = get_openmp_flag() + + openmp_exception = None + try: + output = compile_test_program( + code, extra_preargs=extra_preargs, extra_postargs=extra_postargs + ) + + if output and "nthreads=" in output[0]: + nthreads = int(output[0].strip().split("=")[1]) + openmp_supported = len(output) == nthreads + elif "PYTHON_CROSSENV" in os.environ: + # Since we can't run the test program when cross-compiling + # assume that openmp is supported if the program can be + # compiled. + openmp_supported = True + else: + openmp_supported = False + + except Exception as exception: + # We could be more specific and only catch: CompileError, LinkError, + # and subprocess.CalledProcessError. + # setuptools introduced CompileError and LinkError, but that requires + # version 61.1. Even the latest version of Ubuntu (22.04LTS) only + # ships with 59.6. So for now we catch all exceptions and reraise a + # generic exception with the original error message instead: + openmp_supported = False + openmp_exception = exception + + if not openmp_supported: + if os.getenv("SKLEARN_FAIL_NO_OPENMP"): + raise Exception( + "Failed to build scikit-learn with OpenMP support" + ) from openmp_exception + else: + message = textwrap.dedent(""" + + *********** + * WARNING * + *********** + + It seems that scikit-learn cannot be built with OpenMP. + + - Make sure you have followed the installation instructions: + + https://scikit-learn.org/dev/developers/advanced_installation.html + + - If your compiler supports OpenMP but you still see this + message, please submit a bug report at: + + https://github.com/scikit-learn/scikit-learn/issues + + - The build will continue with OpenMP-based parallelism + disabled. Note however that some estimators will run in + sequential mode instead of leveraging thread-based + parallelism. + + *** + """) + warnings.warn(message) + + return openmp_supported diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py b/venv/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..f3eb054bb037ec5b884b471fdcd148484f88c3f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py @@ -0,0 +1,73 @@ +"""Helpers to check build environment before actual build of scikit-learn""" + +import glob +import os +import subprocess +import sys +import tempfile +import textwrap + +from setuptools.command.build_ext import customize_compiler, new_compiler + + +def compile_test_program(code, extra_preargs=None, extra_postargs=None): + """Check that some C code can be compiled and run""" + ccompiler = new_compiler() + customize_compiler(ccompiler) + + start_dir = os.path.abspath(".") + + with tempfile.TemporaryDirectory() as tmp_dir: + try: + os.chdir(tmp_dir) + + # Write test program + with open("test_program.c", "w") as f: + f.write(code) + + os.mkdir("objects") + + # Compile, test program + ccompiler.compile( + ["test_program.c"], output_dir="objects", extra_postargs=extra_postargs + ) + + # Link test program + objects = glob.glob(os.path.join("objects", "*" + ccompiler.obj_extension)) + ccompiler.link_executable( + objects, + "test_program", + extra_preargs=extra_preargs, + extra_postargs=extra_postargs, + ) + + if "PYTHON_CROSSENV" not in os.environ: + # Run test program if not cross compiling + # will raise a CalledProcessError if return code was non-zero + output = subprocess.check_output("./test_program") + output = output.decode(sys.stdout.encoding or "utf-8").splitlines() + else: + # Return an empty output if we are cross compiling + # as we cannot run the test_program + output = [] + except Exception: + raise + finally: + os.chdir(start_dir) + + return output + + +def basic_check_build(): + """Check basic compilation and linking of C code""" + if "PYODIDE_PACKAGE_ABI" in os.environ: + # The following check won't work in pyodide + return + + code = textwrap.dedent("""\ + #include + int main(void) { + return 0; + } + """) + compile_test_program(code) diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py b/venv/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py new file mode 100644 index 0000000000000000000000000000000000000000..8da4b9c0e7ace17320687fa88534143530381ea9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py @@ -0,0 +1,57 @@ +import argparse +import os + +from Cython import Tempita as tempita + +# XXX: If this import ever fails (does it really?), vendor either +# cython.tempita or numpy/npy_tempita. + + +def process_tempita(fromfile, outfile=None): + """Process tempita templated file and write out the result. + + The template file is expected to end in `.c.tp` or `.pyx.tp`: + E.g. processing `template.c.in` generates `template.c`. + + """ + with open(fromfile, "r", encoding="utf-8") as f: + template_content = f.read() + + template = tempita.Template(template_content) + content = template.substitute() + + with open(outfile, "w", encoding="utf-8") as f: + f.write(content) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("infile", type=str, help="Path to the input file") + parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory") + parser.add_argument( + "-i", + "--ignore", + type=str, + help=( + "An ignored input - may be useful to add a " + "dependency between custom targets" + ), + ) + args = parser.parse_args() + + if not args.infile.endswith(".tp"): + raise ValueError(f"Unexpected extension: {args.infile}") + + if not args.outdir: + raise ValueError("Missing `--outdir` argument to tempita.py") + + outdir_abs = os.path.join(os.getcwd(), args.outdir) + outfile = os.path.join( + outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0] + ) + + process_tempita(args.infile, outfile) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/sklearn/_build_utils/version.py b/venv/lib/python3.10/site-packages/sklearn/_build_utils/version.py new file mode 100644 index 0000000000000000000000000000000000000000..1f8688a008e9d13851333e8c336a93692d4d4419 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_build_utils/version.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +""" Extract version number from __init__.py +""" + +import os + +sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py") + +data = open(sklearn_init).readlines() +version_line = next(line for line in data if line.startswith("__version__")) + +version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "") + +print(version) diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/__init__.py b/venv/lib/python3.10/site-packages/sklearn/compose/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b137cdf9e07f2f275c5c78c7ab6ab289c23413f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/compose/__init__.py @@ -0,0 +1,20 @@ +"""Meta-estimators for building composite models with transformers + +In addition to its current contents, this module will eventually be home to +refurbished versions of Pipeline and FeatureUnion. + +""" + +from ._column_transformer import ( + ColumnTransformer, + make_column_selector, + make_column_transformer, +) +from ._target import TransformedTargetRegressor + +__all__ = [ + "ColumnTransformer", + "make_column_transformer", + "TransformedTargetRegressor", + "make_column_selector", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py b/venv/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..78b66df28c94c4d6c147d40b918824f02c317345 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py @@ -0,0 +1,1463 @@ +""" +The :mod:`sklearn.compose._column_transformer` module implements utilities +to work with heterogeneous data and to apply different transformers to +different columns. +""" + +# Author: Andreas Mueller +# Joris Van den Bossche +# License: BSD +import warnings +from collections import Counter +from itertools import chain +from numbers import Integral, Real + +import numpy as np +from scipy import sparse + +from ..base import TransformerMixin, _fit_context, clone +from ..pipeline import _fit_transform_one, _name_estimators, _transform_one +from ..preprocessing import FunctionTransformer +from ..utils import Bunch, _get_column_indices, _safe_indexing +from ..utils._estimator_html_repr import _VisualBlock +from ..utils._metadata_requests import METHODS +from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions +from ..utils._set_output import ( + _get_container_adapter, + _get_output_config, + _safe_set_output, +) +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import _BaseComposition +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_feature_names_in, + _get_feature_names, + _is_pandas_df, + _num_samples, + check_array, + check_is_fitted, +) + +__all__ = ["ColumnTransformer", "make_column_transformer", "make_column_selector"] + + +_ERR_MSG_1DCOLUMN = ( + "1D data passed to a transformer that expects 2D data. " + "Try to specify the column selection as a list of one " + "item instead of a scalar." +) + + +class ColumnTransformer(TransformerMixin, _BaseComposition): + """Applies transformers to columns of an array or pandas DataFrame. + + This estimator allows different columns or column subsets of the input + to be transformed separately and the features generated by each transformer + will be concatenated to form a single feature space. + This is useful for heterogeneous or columnar data, to combine several + feature extraction mechanisms or transformations into a single transformer. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + transformers : list of tuples + List of (name, transformer, columns) tuples specifying the + transformer objects to be applied to subsets of the data. + + name : str + Like in Pipeline and FeatureUnion, this allows the transformer and + its parameters to be set using ``set_params`` and searched in grid + search. + transformer : {'drop', 'passthrough'} or estimator + Estimator must support :term:`fit` and :term:`transform`. + Special-cased strings 'drop' and 'passthrough' are accepted as + well, to indicate to drop the columns or to pass them through + untransformed, respectively. + columns : str, array-like of str, int, array-like of int, \ + array-like of bool, slice or callable + Indexes the data on its second axis. Integers are interpreted as + positional columns, while strings can reference DataFrame columns + by name. A scalar string or int should be used where + ``transformer`` expects X to be a 1d array-like (vector), + otherwise a 2d array will be passed to the transformer. + A callable is passed the input data `X` and can return any of the + above. To select multiple columns by name or dtype, you can use + :obj:`make_column_selector`. + + remainder : {'drop', 'passthrough'} or estimator, default='drop' + By default, only the specified columns in `transformers` are + transformed and combined in the output, and the non-specified + columns are dropped. (default of ``'drop'``). + By specifying ``remainder='passthrough'``, all remaining columns that + were not specified in `transformers`, but present in the data passed + to `fit` will be automatically passed through. This subset of columns + is concatenated with the output of the transformers. For dataframes, + extra columns not seen during `fit` will be excluded from the output + of `transform`. + By setting ``remainder`` to be an estimator, the remaining + non-specified columns will use the ``remainder`` estimator. The + estimator must support :term:`fit` and :term:`transform`. + Note that using this feature requires that the DataFrame columns + input at :term:`fit` and :term:`transform` have identical order. + + sparse_threshold : float, default=0.3 + If the output of the different transformers contains sparse matrices, + these will be stacked as a sparse matrix if the overall density is + lower than this value. Use ``sparse_threshold=0`` to always return + dense. When the transformed output consists of all dense data, the + stacked result will be dense, and this keyword will be ignored. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + transformer_weights : dict, default=None + Multiplicative weights for features per transformer. The output of the + transformer is multiplied by these weights. Keys are transformer names, + values the weights. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + verbose_feature_names_out : bool, default=True + If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix + all feature names with the name of the transformer that generated that + feature. + If False, :meth:`ColumnTransformer.get_feature_names_out` will not + prefix any feature names and will error if feature names are not + unique. + + .. versionadded:: 1.0 + + Attributes + ---------- + transformers_ : list + The collection of fitted transformers as tuples of (name, + fitted_transformer, column). `fitted_transformer` can be an estimator, + or `'drop'`; `'passthrough'` is replaced with an equivalent + :class:`~sklearn.preprocessing.FunctionTransformer`. In case there were + no columns selected, this will be the unfitted transformer. If there + are remaining columns, the final element is a tuple of the form: + ('remainder', transformer, remaining_columns) corresponding to the + ``remainder`` parameter. If there are remaining columns, then + ``len(transformers_)==len(transformers)+1``, otherwise + ``len(transformers_)==len(transformers)``. + + named_transformers_ : :class:`~sklearn.utils.Bunch` + Read-only attribute to access any transformer by given name. + Keys are transformer names and values are the fitted transformer + objects. + + sparse_output_ : bool + Boolean flag indicating whether the output of ``transform`` is a + sparse matrix or a dense numpy array, which depends on the output + of the individual transformers and the `sparse_threshold` keyword. + + output_indices_ : dict + A dictionary from each transformer name to a slice, where the slice + corresponds to indices in the transformed output. This is useful to + inspect which transformer is responsible for which transformed + feature(s). + + .. versionadded:: 1.0 + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying transformers expose such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + make_column_transformer : Convenience function for + combining the outputs of multiple transformer objects applied to + column subsets of the original feature space. + make_column_selector : Convenience function for selecting + columns based on datatype or the columns name with a regex pattern. + + Notes + ----- + The order of the columns in the transformed feature matrix follows the + order of how the columns are specified in the `transformers` list. + Columns of the original feature matrix that are not specified are + dropped from the resulting transformed feature matrix, unless specified + in the `passthrough` keyword. Those columns specified with `passthrough` + are added at the right to the output of the transformers. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.compose import ColumnTransformer + >>> from sklearn.preprocessing import Normalizer + >>> ct = ColumnTransformer( + ... [("norm1", Normalizer(norm='l1'), [0, 1]), + ... ("norm2", Normalizer(norm='l1'), slice(2, 4))]) + >>> X = np.array([[0., 1., 2., 2.], + ... [1., 1., 0., 1.]]) + >>> # Normalizer scales each row of X to unit norm. A separate scaling + >>> # is applied for the two first and two last elements of each + >>> # row independently. + >>> ct.fit_transform(X) + array([[0. , 1. , 0.5, 0.5], + [0.5, 0.5, 0. , 1. ]]) + + :class:`ColumnTransformer` can be configured with a transformer that requires + a 1d array by setting the column to a string: + + >>> from sklearn.feature_extraction.text import CountVectorizer + >>> from sklearn.preprocessing import MinMaxScaler + >>> import pandas as pd # doctest: +SKIP + >>> X = pd.DataFrame({ + ... "documents": ["First item", "second one here", "Is this the last?"], + ... "width": [3, 4, 5], + ... }) # doctest: +SKIP + >>> # "documents" is a string which configures ColumnTransformer to + >>> # pass the documents column as a 1d array to the CountVectorizer + >>> ct = ColumnTransformer( + ... [("text_preprocess", CountVectorizer(), "documents"), + ... ("num_preprocess", MinMaxScaler(), ["width"])]) + >>> X_trans = ct.fit_transform(X) # doctest: +SKIP + + For a more detailed example of usage, see + :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`. + """ + + _required_parameters = ["transformers"] + + _parameter_constraints: dict = { + "transformers": [list, Hidden(tuple)], + "remainder": [ + StrOptions({"drop", "passthrough"}), + HasMethods(["fit", "transform"]), + HasMethods(["fit_transform", "transform"]), + ], + "sparse_threshold": [Interval(Real, 0, 1, closed="both")], + "n_jobs": [Integral, None], + "transformer_weights": [dict, None], + "verbose": ["verbose"], + "verbose_feature_names_out": ["boolean"], + } + + def __init__( + self, + transformers, + *, + remainder="drop", + sparse_threshold=0.3, + n_jobs=None, + transformer_weights=None, + verbose=False, + verbose_feature_names_out=True, + ): + self.transformers = transformers + self.remainder = remainder + self.sparse_threshold = sparse_threshold + self.n_jobs = n_jobs + self.transformer_weights = transformer_weights + self.verbose = verbose + self.verbose_feature_names_out = verbose_feature_names_out + + @property + def _transformers(self): + """ + Internal list of transformer only containing the name and + transformers, dropping the columns. + + DO NOT USE: This is for the implementation of get_params via + BaseComposition._get_params which expects lists of tuples of len 2. + + To iterate through the transformers, use ``self._iter`` instead. + """ + try: + return [(name, trans) for name, trans, _ in self.transformers] + except (TypeError, ValueError): + return self.transformers + + @_transformers.setter + def _transformers(self, value): + """DO NOT USE: This is for the implementation of set_params via + BaseComposition._get_params which gives lists of tuples of len 2. + """ + try: + self.transformers = [ + (name, trans, col) + for ((name, trans), (_, _, col)) in zip(value, self.transformers) + ] + except (TypeError, ValueError): + self.transformers = value + + def set_output(self, *, transform=None): + """Set the output container when `"transform"` and `"fit_transform"` are called. + + Calling `set_output` will set the output of all estimators in `transformers` + and `transformers_`. + + Parameters + ---------- + transform : {"default", "pandas"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.4 + `"polars"` option was added. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + super().set_output(transform=transform) + + transformers = ( + trans + for _, trans, _ in chain( + self.transformers, getattr(self, "transformers_", []) + ) + if trans not in {"passthrough", "drop"} + ) + for trans in transformers: + _safe_set_output(trans, transform=transform) + + if self.remainder not in {"passthrough", "drop"}: + _safe_set_output(self.remainder, transform=transform) + + return self + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Returns the parameters given in the constructor as well as the + estimators contained within the `transformers` of the + `ColumnTransformer`. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + return self._get_params("_transformers", deep=deep) + + def set_params(self, **kwargs): + """Set the parameters of this estimator. + + Valid parameter keys can be listed with ``get_params()``. Note that you + can directly set the parameters of the estimators contained in + `transformers` of `ColumnTransformer`. + + Parameters + ---------- + **kwargs : dict + Estimator parameters. + + Returns + ------- + self : ColumnTransformer + This estimator. + """ + self._set_params("_transformers", **kwargs) + return self + + def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns): + """ + Generate (name, trans, column, weight) tuples. + + + Parameters + ---------- + fitted : bool + If True, use the fitted transformers (``self.transformers_``) to + iterate through transformers, else use the transformers passed by + the user (``self.transformers``). + + column_as_labels : bool + If True, columns are returned as string labels. If False, columns + are returned as they were given by the user. This can only be True + if the ``ColumnTransformer`` is already fitted. + + skip_drop : bool + If True, 'drop' transformers are filtered out. + + skip_empty_columns : bool + If True, transformers with empty selected columns are filtered out. + + Yields + ------ + A generator of tuples containing: + - name : the name of the transformer + - transformer : the transformer object + - columns : the columns for that transformer + - weight : the weight of the transformer + """ + if fitted: + transformers = self.transformers_ + else: + # interleave the validated column specifiers + transformers = [ + (name, trans, column) + for (name, trans, _), column in zip(self.transformers, self._columns) + ] + # add transformer tuple for remainder + if self._remainder[2]: + transformers = chain(transformers, [self._remainder]) + get_weight = (self.transformer_weights or {}).get + + for name, trans, columns in transformers: + if skip_drop and trans == "drop": + continue + if skip_empty_columns and _is_empty_column_selection(columns): + continue + + if column_as_labels: + # Convert all columns to using their string labels + columns_is_scalar = np.isscalar(columns) + + indices = self._transformer_to_input_indices[name] + columns = self.feature_names_in_[indices] + + if columns_is_scalar: + # selection is done with one dimension + columns = columns[0] + + yield (name, trans, columns, get_weight(name)) + + def _validate_transformers(self): + """Validate names of transformers and the transformers themselves. + + This checks whether given transformers have the required methods, i.e. + `fit` or `fit_transform` and `transform` implemented. + """ + if not self.transformers: + return + + names, transformers, _ = zip(*self.transformers) + + # validate names + self._validate_names(names) + + # validate estimators + for t in transformers: + if t in ("drop", "passthrough"): + continue + if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr( + t, "transform" + ): + # Used to validate the transformers in the `transformers` list + raise TypeError( + "All estimators should implement fit and " + "transform, or can be 'drop' or 'passthrough' " + "specifiers. '%s' (type %s) doesn't." % (t, type(t)) + ) + + def _validate_column_callables(self, X): + """ + Converts callable column specifications. + + This stores a dictionary of the form `{step_name: column_indices}` and + calls the `columns` on `X` if `columns` is a callable for a given + transformer. + + The results are then stored in `self._transformer_to_input_indices`. + """ + all_columns = [] + transformer_to_input_indices = {} + for name, _, columns in self.transformers: + if callable(columns): + columns = columns(X) + all_columns.append(columns) + transformer_to_input_indices[name] = _get_column_indices(X, columns) + + self._columns = all_columns + self._transformer_to_input_indices = transformer_to_input_indices + + def _validate_remainder(self, X): + """ + Validates ``remainder`` and defines ``_remainder`` targeting + the remaining columns. + """ + cols = set(chain(*self._transformer_to_input_indices.values())) + remaining = sorted(set(range(self.n_features_in_)) - cols) + self._remainder = ("remainder", self.remainder, remaining) + self._transformer_to_input_indices["remainder"] = remaining + + @property + def named_transformers_(self): + """Access the fitted transformer by name. + + Read-only attribute to access any transformer by given name. + Keys are transformer names and values are the fitted transformer + objects. + """ + # Use Bunch object to improve autocomplete + return Bunch(**{name: trans for name, trans, _ in self.transformers_}) + + def _get_feature_name_out_for_transformer(self, name, trans, feature_names_in): + """Gets feature names of transformer. + + Used in conjunction with self._iter(fitted=True) in get_feature_names_out. + """ + column_indices = self._transformer_to_input_indices[name] + names = feature_names_in[column_indices] + # An actual transformer + if not hasattr(trans, "get_feature_names_out"): + raise AttributeError( + f"Transformer {name} (type {type(trans).__name__}) does " + "not provide get_feature_names_out." + ) + return trans.get_feature_names_out(names) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self) + input_features = _check_feature_names_in(self, input_features) + + # List of tuples (name, feature_names_out) + transformer_with_feature_names_out = [] + for name, trans, *_ in self._iter( + fitted=True, + column_as_labels=False, + skip_empty_columns=True, + skip_drop=True, + ): + feature_names_out = self._get_feature_name_out_for_transformer( + name, trans, input_features + ) + if feature_names_out is None: + continue + transformer_with_feature_names_out.append((name, feature_names_out)) + + if not transformer_with_feature_names_out: + # No feature names + return np.array([], dtype=object) + + return self._add_prefix_for_feature_names_out( + transformer_with_feature_names_out + ) + + def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out): + """Add prefix for feature names out that includes the transformer names. + + Parameters + ---------- + transformer_with_feature_names_out : list of tuples of (str, array-like of str) + The tuple consistent of the transformer's name and its feature names out. + + Returns + ------- + feature_names_out : ndarray of shape (n_features,), dtype=str + Transformed feature names. + """ + if self.verbose_feature_names_out: + # Prefix the feature names out with the transformers name + names = list( + chain.from_iterable( + (f"{name}__{i}" for i in feature_names_out) + for name, feature_names_out in transformer_with_feature_names_out + ) + ) + return np.asarray(names, dtype=object) + + # verbose_feature_names_out is False + # Check that names are all unique without a prefix + feature_names_count = Counter( + chain.from_iterable(s for _, s in transformer_with_feature_names_out) + ) + top_6_overlap = [ + name for name, count in feature_names_count.most_common(6) if count > 1 + ] + top_6_overlap.sort() + if top_6_overlap: + if len(top_6_overlap) == 6: + # There are more than 5 overlapping names, we only show the 5 + # of the feature names + names_repr = str(top_6_overlap[:5])[:-1] + ", ...]" + else: + names_repr = str(top_6_overlap) + raise ValueError( + f"Output feature names: {names_repr} are not unique. Please set " + "verbose_feature_names_out=True to add prefixes to feature names" + ) + + return np.concatenate( + [name for _, name in transformer_with_feature_names_out], + ) + + def _update_fitted_transformers(self, transformers): + """Set self.transformers_ from given transformers. + + Parameters + ---------- + transformers : list of estimators + The fitted estimators as the output of + `self._call_func_on_transformers(func=_fit_transform_one, ...)`. + That function doesn't include 'drop' or transformers for which no + column is selected. 'drop' is kept as is, and for the no-column + transformers the unfitted transformer is put in + `self.transformers_`. + """ + # transformers are fitted; excludes 'drop' cases + fitted_transformers = iter(transformers) + transformers_ = [] + + for name, old, column, _ in self._iter( + fitted=False, + column_as_labels=False, + skip_drop=False, + skip_empty_columns=False, + ): + if old == "drop": + trans = "drop" + elif _is_empty_column_selection(column): + trans = old + else: + trans = next(fitted_transformers) + transformers_.append((name, trans, column)) + + # sanity check that transformers is exhausted + assert not list(fitted_transformers) + self.transformers_ = transformers_ + + def _validate_output(self, result): + """ + Ensure that the output of each transformer is 2D. Otherwise + hstack can raise an error or produce incorrect results. + """ + names = [ + name + for name, _, _, _ in self._iter( + fitted=True, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + ] + for Xs, name in zip(result, names): + if not getattr(Xs, "ndim", 0) == 2 and not hasattr(Xs, "__dataframe__"): + raise ValueError( + "The output of the '{0}' transformer should be 2D (numpy array, " + "scipy sparse array, dataframe).".format(name) + ) + if _get_output_config("transform", self)["dense"] == "pandas": + return + try: + import pandas as pd + except ImportError: + return + for Xs, name in zip(result, names): + if not _is_pandas_df(Xs): + continue + for col_name, dtype in Xs.dtypes.to_dict().items(): + if getattr(dtype, "na_value", None) is not pd.NA: + continue + if pd.NA not in Xs[col_name].values: + continue + class_name = self.__class__.__name__ + # TODO(1.6): replace warning with ValueError + warnings.warn( + ( + f"The output of the '{name}' transformer for column" + f" '{col_name}' has dtype {dtype} and uses pandas.NA to" + " represent null values. Storing this output in a numpy array" + " can cause errors in downstream scikit-learn estimators, and" + " inefficiencies. Starting with scikit-learn version 1.6, this" + " will raise a ValueError. To avoid this problem you can (i)" + " store the output in a pandas DataFrame by using" + f" {class_name}.set_output(transform='pandas') or (ii) modify" + f" the input data or the '{name}' transformer to avoid the" + " presence of pandas.NA (for example by using" + " pandas.DataFrame.astype)." + ), + FutureWarning, + ) + + def _record_output_indices(self, Xs): + """ + Record which transformer produced which column. + """ + idx = 0 + self.output_indices_ = {} + + for transformer_idx, (name, _, _, _) in enumerate( + self._iter( + fitted=True, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + ): + n_columns = Xs[transformer_idx].shape[1] + self.output_indices_[name] = slice(idx, idx + n_columns) + idx += n_columns + + # `_iter` only generates transformers that have a non empty + # selection. Here we set empty slices for transformers that + # generate no output, which are safe for indexing + all_names = [t[0] for t in self.transformers] + ["remainder"] + for name in all_names: + if name not in self.output_indices_: + self.output_indices_[name] = slice(0, 0) + + def _log_message(self, name, idx, total): + if not self.verbose: + return None + return "(%d of %d) Processing %s" % (idx, total, name) + + def _call_func_on_transformers(self, X, y, func, column_as_labels, routed_params): + """ + Private function to fit and/or transform on demand. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + The data to be used in fit and/or transform. + + y : array-like of shape (n_samples,) + Targets. + + func : callable + Function to call, which can be _fit_transform_one or + _transform_one. + + column_as_labels : bool + Used to iterate through transformers. If True, columns are returned + as strings. If False, columns are returned as they were given by + the user. Can be True only if the ``ColumnTransformer`` is already + fitted. + + routed_params : dict + The routed parameters as the output from ``process_routing``. + + Returns + ------- + Return value (transformers and/or transformed X data) depends + on the passed function. + """ + if func is _fit_transform_one: + fitted = False + else: # func is _transform_one + fitted = True + + transformers = list( + self._iter( + fitted=fitted, + column_as_labels=column_as_labels, + skip_drop=True, + skip_empty_columns=True, + ) + ) + try: + jobs = [] + for idx, (name, trans, column, weight) in enumerate(transformers, start=1): + if func is _fit_transform_one: + if trans == "passthrough": + output_config = _get_output_config("transform", self) + trans = FunctionTransformer( + accept_sparse=True, + check_inverse=False, + feature_names_out="one-to-one", + ).set_output(transform=output_config["dense"]) + + extra_args = dict( + message_clsname="ColumnTransformer", + message=self._log_message(name, idx, len(transformers)), + ) + else: # func is _transform_one + extra_args = {} + jobs.append( + delayed(func)( + transformer=clone(trans) if not fitted else trans, + X=_safe_indexing(X, column, axis=1), + y=y, + weight=weight, + **extra_args, + params=routed_params[name], + ) + ) + + return Parallel(n_jobs=self.n_jobs)(jobs) + + except ValueError as e: + if "Expected 2D array, got 1D array instead" in str(e): + raise ValueError(_ERR_MSG_1DCOLUMN) from e + else: + raise + + def fit(self, X, y=None, **params): + """Fit all transformers using X. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + Input data, of which specified subsets are used to fit the + transformers. + + y : array-like of shape (n_samples,...), default=None + Targets for supervised learning. + + **params : dict, default=None + Parameters to be passed to the underlying transformers' ``fit`` and + ``transform`` methods. + + You can only pass this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.4 + + Returns + ------- + self : ColumnTransformer + This estimator. + """ + _raise_for_params(params, self, "fit") + # we use fit_transform to make sure to set sparse_output_ (for which we + # need the transformed data) to have consistent output type in predict + self.fit_transform(X, y=y, **params) + return self + + @_fit_context( + # estimators in ColumnTransformer.transformers are not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None, **params): + """Fit all transformers, transform the data and concatenate results. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + Input data, of which specified subsets are used to fit the + transformers. + + y : array-like of shape (n_samples,), default=None + Targets for supervised learning. + + **params : dict, default=None + Parameters to be passed to the underlying transformers' ``fit`` and + ``transform`` methods. + + You can only pass this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.4 + + Returns + ------- + X_t : {array-like, sparse matrix} of \ + shape (n_samples, sum_n_components) + Horizontally stacked results of transformers. sum_n_components is the + sum of n_components (output dimension) over transformers. If + any result is a sparse matrix, everything will be converted to + sparse matrices. + """ + _raise_for_params(params, self, "fit_transform") + self._check_feature_names(X, reset=True) + + X = _check_X(X) + # set n_features_in_ attribute + self._check_n_features(X, reset=True) + self._validate_transformers() + n_samples = _num_samples(X) + + self._validate_column_callables(X) + self._validate_remainder(X) + + if _routing_enabled(): + routed_params = process_routing(self, "fit_transform", **params) + else: + routed_params = self._get_empty_routing() + + result = self._call_func_on_transformers( + X, + y, + _fit_transform_one, + column_as_labels=False, + routed_params=routed_params, + ) + + if not result: + self._update_fitted_transformers([]) + # All transformers are None + return np.zeros((n_samples, 0)) + + Xs, transformers = zip(*result) + + # determine if concatenated output will be sparse or not + if any(sparse.issparse(X) for X in Xs): + nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs) + total = sum( + X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs + ) + density = nnz / total + self.sparse_output_ = density < self.sparse_threshold + else: + self.sparse_output_ = False + + self._update_fitted_transformers(transformers) + self._validate_output(Xs) + self._record_output_indices(Xs) + + return self._hstack(list(Xs), n_samples=n_samples) + + def transform(self, X, **params): + """Transform X separately by each transformer, concatenate results. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + The data to be transformed by subset. + + **params : dict, default=None + Parameters to be passed to the underlying transformers' ``transform`` + method. + + You can only pass this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.4 + + Returns + ------- + X_t : {array-like, sparse matrix} of \ + shape (n_samples, sum_n_components) + Horizontally stacked results of transformers. sum_n_components is the + sum of n_components (output dimension) over transformers. If + any result is a sparse matrix, everything will be converted to + sparse matrices. + """ + _raise_for_params(params, self, "transform") + check_is_fitted(self) + X = _check_X(X) + + # If ColumnTransformer is fit using a dataframe, and now a dataframe is + # passed to be transformed, we select columns by name instead. This + # enables the user to pass X at transform time with extra columns which + # were not present in fit time, and the order of the columns doesn't + # matter. + fit_dataframe_and_transform_dataframe = hasattr(self, "feature_names_in_") and ( + _is_pandas_df(X) or hasattr(X, "__dataframe__") + ) + + n_samples = _num_samples(X) + column_names = _get_feature_names(X) + + if fit_dataframe_and_transform_dataframe: + named_transformers = self.named_transformers_ + # check that all names seen in fit are in transform, unless + # they were dropped + non_dropped_indices = [ + ind + for name, ind in self._transformer_to_input_indices.items() + if name in named_transformers and named_transformers[name] != "drop" + ] + + all_indices = set(chain(*non_dropped_indices)) + all_names = set(self.feature_names_in_[ind] for ind in all_indices) + + diff = all_names - set(column_names) + if diff: + raise ValueError(f"columns are missing: {diff}") + else: + # ndarray was used for fitting or transforming, thus we only + # check that n_features_in_ is consistent + self._check_n_features(X, reset=False) + + if _routing_enabled(): + routed_params = process_routing(self, "transform", **params) + else: + routed_params = self._get_empty_routing() + + Xs = self._call_func_on_transformers( + X, + None, + _transform_one, + column_as_labels=fit_dataframe_and_transform_dataframe, + routed_params=routed_params, + ) + self._validate_output(Xs) + + if not Xs: + # All transformers are None + return np.zeros((n_samples, 0)) + + return self._hstack(list(Xs), n_samples=n_samples) + + def _hstack(self, Xs, *, n_samples): + """Stacks Xs horizontally. + + This allows subclasses to control the stacking behavior, while reusing + everything else from ColumnTransformer. + + Parameters + ---------- + Xs : list of {array-like, sparse matrix, dataframe} + The container to concatenate. + n_samples : int + The number of samples in the input data to checking the transformation + consistency. + """ + if self.sparse_output_: + try: + # since all columns should be numeric before stacking them + # in a sparse matrix, `check_array` is used for the + # dtype conversion if necessary. + converted_Xs = [ + check_array(X, accept_sparse=True, force_all_finite=False) + for X in Xs + ] + except ValueError as e: + raise ValueError( + "For a sparse output, all columns should " + "be a numeric or convertible to a numeric." + ) from e + + return sparse.hstack(converted_Xs).tocsr() + else: + Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs] + adapter = _get_container_adapter("transform", self) + if adapter and all(adapter.is_supported_container(X) for X in Xs): + # rename before stacking as it avoids to error on temporary duplicated + # columns + transformer_names = [ + t[0] + for t in self._iter( + fitted=True, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + ] + feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0] + if self.verbose_feature_names_out: + # `_add_prefix_for_feature_names_out` takes care about raising + # an error if there are duplicated columns. + feature_names_outs = self._add_prefix_for_feature_names_out( + list(zip(transformer_names, feature_names_outs)) + ) + else: + # check for duplicated columns and raise if any + feature_names_outs = list(chain.from_iterable(feature_names_outs)) + feature_names_count = Counter(feature_names_outs) + if any(count > 1 for count in feature_names_count.values()): + duplicated_feature_names = sorted( + name + for name, count in feature_names_count.items() + if count > 1 + ) + err_msg = ( + "Duplicated feature names found before concatenating the" + " outputs of the transformers:" + f" {duplicated_feature_names}.\n" + ) + for transformer_name, X in zip(transformer_names, Xs): + if X.shape[1] == 0: + continue + dup_cols_in_transformer = sorted( + set(X.columns).intersection(duplicated_feature_names) + ) + if len(dup_cols_in_transformer): + err_msg += ( + f"Transformer {transformer_name} has conflicting " + f"columns names: {dup_cols_in_transformer}.\n" + ) + raise ValueError( + err_msg + + "Either make sure that the transformers named above " + "do not generate columns with conflicting names or set " + "verbose_feature_names_out=True to automatically " + "prefix to the output feature names with the name " + "of the transformer to prevent any conflicting " + "names." + ) + + names_idx = 0 + for X in Xs: + if X.shape[1] == 0: + continue + names_out = feature_names_outs[names_idx : names_idx + X.shape[1]] + adapter.rename_columns(X, names_out) + names_idx += X.shape[1] + + output = adapter.hstack(Xs) + output_samples = output.shape[0] + if output_samples != n_samples: + raise ValueError( + "Concatenating DataFrames from the transformer's output lead to" + " an inconsistent number of samples. The output may have Pandas" + " Indexes that do not match, or that transformers are returning" + " number of samples which are not the same as the number input" + " samples." + ) + + return output + + return np.hstack(Xs) + + def _sk_visual_block_(self): + if isinstance(self.remainder, str) and self.remainder == "drop": + transformers = self.transformers + elif hasattr(self, "_remainder"): + remainder_columns = self._remainder[2] + if ( + hasattr(self, "feature_names_in_") + and remainder_columns + and not all(isinstance(col, str) for col in remainder_columns) + ): + remainder_columns = self.feature_names_in_[remainder_columns].tolist() + transformers = chain( + self.transformers, [("remainder", self.remainder, remainder_columns)] + ) + else: + transformers = chain(self.transformers, [("remainder", self.remainder, "")]) + + names, transformers, name_details = zip(*transformers) + return _VisualBlock( + "parallel", transformers, names=names, name_details=name_details + ) + + def _get_empty_routing(self): + """Return empty routing. + + Used while routing can be disabled. + + TODO: Remove when ``set_config(enable_metadata_routing=False)`` is no + more an option. + """ + return Bunch( + **{ + name: Bunch(**{method: {} for method in METHODS}) + for name, step, _, _ in self._iter( + fitted=False, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + } + ) + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__) + # Here we don't care about which columns are used for which + # transformers, and whether or not a transformer is used at all, which + # might happen if no columns are selected for that transformer. We + # request all metadata requested by all transformers. + transformers = chain(self.transformers, [("remainder", self.remainder, None)]) + for name, step, _ in transformers: + method_mapping = MethodMapping() + if hasattr(step, "fit_transform"): + ( + method_mapping.add(caller="fit", callee="fit_transform").add( + caller="fit_transform", callee="fit_transform" + ) + ) + else: + ( + method_mapping.add(caller="fit", callee="fit") + .add(caller="fit", callee="transform") + .add(caller="fit_transform", callee="fit") + .add(caller="fit_transform", callee="transform") + ) + method_mapping.add(caller="transform", callee="transform") + router.add(method_mapping=method_mapping, **{name: step}) + + return router + + +def _check_X(X): + """Use check_array only when necessary, e.g. on lists and other non-array-likes.""" + if hasattr(X, "__array__") or hasattr(X, "__dataframe__") or sparse.issparse(X): + return X + return check_array(X, force_all_finite="allow-nan", dtype=object) + + +def _is_empty_column_selection(column): + """ + Return True if the column selection is empty (empty list or all-False + boolean array). + + """ + if hasattr(column, "dtype") and np.issubdtype(column.dtype, np.bool_): + return not column.any() + elif hasattr(column, "__len__"): + return ( + len(column) == 0 + or all(isinstance(col, bool) for col in column) + and not any(column) + ) + else: + return False + + +def _get_transformer_list(estimators): + """ + Construct (name, trans, column) tuples from list + + """ + transformers, columns = zip(*estimators) + names, _ = zip(*_name_estimators(transformers)) + + transformer_list = list(zip(names, transformers, columns)) + return transformer_list + + +# This function is not validated using validate_params because +# it's just a factory for ColumnTransformer. +def make_column_transformer( + *transformers, + remainder="drop", + sparse_threshold=0.3, + n_jobs=None, + verbose=False, + verbose_feature_names_out=True, +): + """Construct a ColumnTransformer from the given transformers. + + This is a shorthand for the ColumnTransformer constructor; it does not + require, and does not permit, naming the transformers. Instead, they will + be given names automatically based on their types. It also does not allow + weighting with ``transformer_weights``. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + *transformers : tuples + Tuples of the form (transformer, columns) specifying the + transformer objects to be applied to subsets of the data. + + transformer : {'drop', 'passthrough'} or estimator + Estimator must support :term:`fit` and :term:`transform`. + Special-cased strings 'drop' and 'passthrough' are accepted as + well, to indicate to drop the columns or to pass them through + untransformed, respectively. + columns : str, array-like of str, int, array-like of int, slice, \ + array-like of bool or callable + Indexes the data on its second axis. Integers are interpreted as + positional columns, while strings can reference DataFrame columns + by name. A scalar string or int should be used where + ``transformer`` expects X to be a 1d array-like (vector), + otherwise a 2d array will be passed to the transformer. + A callable is passed the input data `X` and can return any of the + above. To select multiple columns by name or dtype, you can use + :obj:`make_column_selector`. + + remainder : {'drop', 'passthrough'} or estimator, default='drop' + By default, only the specified columns in `transformers` are + transformed and combined in the output, and the non-specified + columns are dropped. (default of ``'drop'``). + By specifying ``remainder='passthrough'``, all remaining columns that + were not specified in `transformers` will be automatically passed + through. This subset of columns is concatenated with the output of + the transformers. + By setting ``remainder`` to be an estimator, the remaining + non-specified columns will use the ``remainder`` estimator. The + estimator must support :term:`fit` and :term:`transform`. + + sparse_threshold : float, default=0.3 + If the transformed output consists of a mix of sparse and dense data, + it will be stacked as a sparse matrix if the density is lower than this + value. Use ``sparse_threshold=0`` to always return dense. + When the transformed output consists of all sparse or all dense data, + the stacked result will be sparse or dense, respectively, and this + keyword will be ignored. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + verbose_feature_names_out : bool, default=True + If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix + all feature names with the name of the transformer that generated that + feature. + If False, :meth:`ColumnTransformer.get_feature_names_out` will not + prefix any feature names and will error if feature names are not + unique. + + .. versionadded:: 1.0 + + Returns + ------- + ct : ColumnTransformer + Returns a :class:`ColumnTransformer` object. + + See Also + -------- + ColumnTransformer : Class that allows combining the + outputs of multiple transformer objects used on column subsets + of the data into a single feature space. + + Examples + -------- + >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder + >>> from sklearn.compose import make_column_transformer + >>> make_column_transformer( + ... (StandardScaler(), ['numerical_column']), + ... (OneHotEncoder(), ['categorical_column'])) + ColumnTransformer(transformers=[('standardscaler', StandardScaler(...), + ['numerical_column']), + ('onehotencoder', OneHotEncoder(...), + ['categorical_column'])]) + """ + # transformer_weights keyword is not passed through because the user + # would need to know the automatically generated names of the transformers + transformer_list = _get_transformer_list(transformers) + return ColumnTransformer( + transformer_list, + n_jobs=n_jobs, + remainder=remainder, + sparse_threshold=sparse_threshold, + verbose=verbose, + verbose_feature_names_out=verbose_feature_names_out, + ) + + +class make_column_selector: + """Create a callable to select columns to be used with + :class:`ColumnTransformer`. + + :func:`make_column_selector` can select columns based on datatype or the + columns name with a regex. When using multiple selection criteria, **all** + criteria must match for a column to be selected. + + For an example of how to use :func:`make_column_selector` within a + :class:`ColumnTransformer` to select columns based on data type (i.e. + `dtype`), refer to + :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`. + + Parameters + ---------- + pattern : str, default=None + Name of columns containing this regex pattern will be included. If + None, column selection will not be selected based on pattern. + + dtype_include : column dtype or list of column dtypes, default=None + A selection of dtypes to include. For more details, see + :meth:`pandas.DataFrame.select_dtypes`. + + dtype_exclude : column dtype or list of column dtypes, default=None + A selection of dtypes to exclude. For more details, see + :meth:`pandas.DataFrame.select_dtypes`. + + Returns + ------- + selector : callable + Callable for column selection to be used by a + :class:`ColumnTransformer`. + + See Also + -------- + ColumnTransformer : Class that allows combining the + outputs of multiple transformer objects used on column subsets + of the data into a single feature space. + + Examples + -------- + >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder + >>> from sklearn.compose import make_column_transformer + >>> from sklearn.compose import make_column_selector + >>> import numpy as np + >>> import pandas as pd # doctest: +SKIP + >>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'], + ... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP + >>> ct = make_column_transformer( + ... (StandardScaler(), + ... make_column_selector(dtype_include=np.number)), # rating + ... (OneHotEncoder(), + ... make_column_selector(dtype_include=object))) # city + >>> ct.fit_transform(X) # doctest: +SKIP + array([[ 0.90453403, 1. , 0. , 0. ], + [-1.50755672, 1. , 0. , 0. ], + [-0.30151134, 0. , 1. , 0. ], + [ 0.90453403, 0. , 0. , 1. ]]) + """ + + def __init__(self, pattern=None, *, dtype_include=None, dtype_exclude=None): + self.pattern = pattern + self.dtype_include = dtype_include + self.dtype_exclude = dtype_exclude + + def __call__(self, df): + """Callable for column selection to be used by a + :class:`ColumnTransformer`. + + Parameters + ---------- + df : dataframe of shape (n_features, n_samples) + DataFrame to select columns from. + """ + if not hasattr(df, "iloc"): + raise ValueError( + "make_column_selector can only be applied to pandas dataframes" + ) + df_row = df.iloc[:1] + if self.dtype_include is not None or self.dtype_exclude is not None: + df_row = df_row.select_dtypes( + include=self.dtype_include, exclude=self.dtype_exclude + ) + cols = df_row.columns + if self.pattern is not None: + cols = cols[cols.str.contains(self.pattern, regex=True)] + return cols.tolist() diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/_target.py b/venv/lib/python3.10/site-packages/sklearn/compose/_target.py new file mode 100644 index 0000000000000000000000000000000000000000..b90d235ac758bb31717b96cf82a04d92ac614599 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/compose/_target.py @@ -0,0 +1,342 @@ +# Authors: Andreas Mueller +# Guillaume Lemaitre +# License: BSD 3 clause + +import warnings + +import numpy as np + +from ..base import BaseEstimator, RegressorMixin, _fit_context, clone +from ..exceptions import NotFittedError +from ..preprocessing import FunctionTransformer +from ..utils import _safe_indexing, check_array +from ..utils._param_validation import HasMethods +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.validation import check_is_fitted + +__all__ = ["TransformedTargetRegressor"] + + +class TransformedTargetRegressor( + _RoutingNotSupportedMixin, RegressorMixin, BaseEstimator +): + """Meta-estimator to regress on a transformed target. + + Useful for applying a non-linear transformation to the target `y` in + regression problems. This transformation can be given as a Transformer + such as the :class:`~sklearn.preprocessing.QuantileTransformer` or as a + function and its inverse such as `np.log` and `np.exp`. + + The computation during :meth:`fit` is:: + + regressor.fit(X, func(y)) + + or:: + + regressor.fit(X, transformer.transform(y)) + + The computation during :meth:`predict` is:: + + inverse_func(regressor.predict(X)) + + or:: + + transformer.inverse_transform(regressor.predict(X)) + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + regressor : object, default=None + Regressor object such as derived from + :class:`~sklearn.base.RegressorMixin`. This regressor will + automatically be cloned each time prior to fitting. If `regressor is + None`, :class:`~sklearn.linear_model.LinearRegression` is created and used. + + transformer : object, default=None + Estimator object such as derived from + :class:`~sklearn.base.TransformerMixin`. Cannot be set at the same time + as `func` and `inverse_func`. If `transformer is None` as well as + `func` and `inverse_func`, the transformer will be an identity + transformer. Note that the transformer will be cloned during fitting. + Also, the transformer is restricting `y` to be a numpy array. + + func : function, default=None + Function to apply to `y` before passing to :meth:`fit`. Cannot be set + at the same time as `transformer`. The function needs to return a + 2-dimensional array. If `func is None`, the function used will be the + identity function. + + inverse_func : function, default=None + Function to apply to the prediction of the regressor. Cannot be set at + the same time as `transformer`. The function needs to return a + 2-dimensional array. The inverse function is used to return + predictions to the same space of the original training labels. + + check_inverse : bool, default=True + Whether to check that `transform` followed by `inverse_transform` + or `func` followed by `inverse_func` leads to the original targets. + + Attributes + ---------- + regressor_ : object + Fitted regressor. + + transformer_ : object + Transformer used in :meth:`fit` and :meth:`predict`. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying regressor exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.preprocessing.FunctionTransformer : Construct a transformer from an + arbitrary callable. + + Notes + ----- + Internally, the target `y` is always converted into a 2-dimensional array + to be used by scikit-learn transformers. At the time of prediction, the + output will be reshaped to a have the same number of dimensions as `y`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import LinearRegression + >>> from sklearn.compose import TransformedTargetRegressor + >>> tt = TransformedTargetRegressor(regressor=LinearRegression(), + ... func=np.log, inverse_func=np.exp) + >>> X = np.arange(4).reshape(-1, 1) + >>> y = np.exp(2 * X).ravel() + >>> tt.fit(X, y) + TransformedTargetRegressor(...) + >>> tt.score(X, y) + 1.0 + >>> tt.regressor_.coef_ + array([2.]) + + For a more detailed example use case refer to + :ref:`sphx_glr_auto_examples_compose_plot_transformed_target.py`. + """ + + _parameter_constraints: dict = { + "regressor": [HasMethods(["fit", "predict"]), None], + "transformer": [HasMethods("transform"), None], + "func": [callable, None], + "inverse_func": [callable, None], + "check_inverse": ["boolean"], + } + + def __init__( + self, + regressor=None, + *, + transformer=None, + func=None, + inverse_func=None, + check_inverse=True, + ): + self.regressor = regressor + self.transformer = transformer + self.func = func + self.inverse_func = inverse_func + self.check_inverse = check_inverse + + def _fit_transformer(self, y): + """Check transformer and fit transformer. + + Create the default transformer, fit it and make additional inverse + check on a subset (optional). + + """ + if self.transformer is not None and ( + self.func is not None or self.inverse_func is not None + ): + raise ValueError( + "'transformer' and functions 'func'/'inverse_func' cannot both be set." + ) + elif self.transformer is not None: + self.transformer_ = clone(self.transformer) + else: + if self.func is not None and self.inverse_func is None: + raise ValueError( + "When 'func' is provided, 'inverse_func' must also be provided" + ) + self.transformer_ = FunctionTransformer( + func=self.func, + inverse_func=self.inverse_func, + validate=True, + check_inverse=self.check_inverse, + ) + # XXX: sample_weight is not currently passed to the + # transformer. However, if transformer starts using sample_weight, the + # code should be modified accordingly. At the time to consider the + # sample_prop feature, it is also a good use case to be considered. + self.transformer_.fit(y) + if self.check_inverse: + idx_selected = slice(None, None, max(1, y.shape[0] // 10)) + y_sel = _safe_indexing(y, idx_selected) + y_sel_t = self.transformer_.transform(y_sel) + if not np.allclose(y_sel, self.transformer_.inverse_transform(y_sel_t)): + warnings.warn( + ( + "The provided functions or transformer are" + " not strictly inverse of each other. If" + " you are sure you want to proceed regardless" + ", set 'check_inverse=False'" + ), + UserWarning, + ) + + @_fit_context( + # TransformedTargetRegressor.regressor/transformer are not validated yet. + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + **fit_params : dict + Parameters passed to the `fit` method of the underlying + regressor. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", **fit_params) + if y is None: + raise ValueError( + f"This {self.__class__.__name__} estimator " + "requires y to be passed, but the target y is None." + ) + y = check_array( + y, + input_name="y", + accept_sparse=False, + force_all_finite=True, + ensure_2d=False, + dtype="numeric", + allow_nd=True, + ) + + # store the number of dimension of the target to predict an array of + # similar shape at predict + self._training_dim = y.ndim + + # transformers are designed to modify X which is 2d dimensional, we + # need to modify y accordingly. + if y.ndim == 1: + y_2d = y.reshape(-1, 1) + else: + y_2d = y + self._fit_transformer(y_2d) + + # transform y and convert back to 1d array if needed + y_trans = self.transformer_.transform(y_2d) + # FIXME: a FunctionTransformer can return a 1D array even when validate + # is set to True. Therefore, we need to check the number of dimension + # first. + if y_trans.ndim == 2 and y_trans.shape[1] == 1: + y_trans = y_trans.squeeze(axis=1) + + if self.regressor is None: + from ..linear_model import LinearRegression + + self.regressor_ = LinearRegression() + else: + self.regressor_ = clone(self.regressor) + + self.regressor_.fit(X, y_trans, **fit_params) + + if hasattr(self.regressor_, "feature_names_in_"): + self.feature_names_in_ = self.regressor_.feature_names_in_ + + return self + + def predict(self, X, **predict_params): + """Predict using the base regressor, applying inverse. + + The regressor is used to predict and the `inverse_func` or + `inverse_transform` is applied before returning the prediction. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + **predict_params : dict of str -> object + Parameters passed to the `predict` method of the underlying + regressor. + + Returns + ------- + y_hat : ndarray of shape (n_samples,) + Predicted values. + """ + check_is_fitted(self) + pred = self.regressor_.predict(X, **predict_params) + if pred.ndim == 1: + pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1)) + else: + pred_trans = self.transformer_.inverse_transform(pred) + if ( + self._training_dim == 1 + and pred_trans.ndim == 2 + and pred_trans.shape[1] == 1 + ): + pred_trans = pred_trans.squeeze(axis=1) + + return pred_trans + + def _more_tags(self): + regressor = self.regressor + if regressor is None: + from ..linear_model import LinearRegression + + regressor = LinearRegression() + + return { + "poor_score": True, + "multioutput": _safe_tags(regressor, key="multioutput"), + } + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`.""" + # For consistency with other estimators we raise a AttributeError so + # that hasattr() returns False the estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.regressor_.n_features_in_ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2198db187d3eadfff9fdc75aed6cc514cb90e77 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b27924b705ea5a329dddcdb1c6934ad4eb95c409 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/__init__.py b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff52f3a228770703cf17e73077d7eda0a74c3b64 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/__init__.py b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63069ae099c637e1899e0f2e5be58f82a244ce8b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__init__.py b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..15fc11fc81f20138388335f81cbdfca5b366e348 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__init__.py @@ -0,0 +1 @@ +from ._laplacian import laplacian diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85775493a0667f5f23076d8c0f37a0407b7b0eb8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff9fddf1ead0e9726869935f2424ae9837909e00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/_laplacian.py b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/_laplacian.py new file mode 100644 index 0000000000000000000000000000000000000000..f862d261d66dec71a2660917cd620987f2a74cd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/_laplacian.py @@ -0,0 +1,557 @@ +""" +This file is a copy of the scipy.sparse.csgraph._laplacian module from SciPy 1.12 + +scipy.sparse.csgraph.laplacian supports sparse arrays only starting from Scipy 1.12, +see https://github.com/scipy/scipy/pull/19156. This vendored file can be removed as +soon as Scipy 1.12 becomes the minimum supported version. + +Laplacian of a compressed-sparse graph +""" + +# License: BSD 3 clause + +import numpy as np +from scipy.sparse import issparse +from scipy.sparse.linalg import LinearOperator + + +############################################################################### +# Graph laplacian +def laplacian( + csgraph, + normed=False, + return_diag=False, + use_out_degree=False, + *, + copy=True, + form="array", + dtype=None, + symmetrized=False, +): + """ + Return the Laplacian of a directed graph. + + Parameters + ---------- + csgraph : array_like or sparse matrix, 2 dimensions + Compressed-sparse graph, with shape (N, N). + normed : bool, optional + If True, then compute symmetrically normalized Laplacian. + Default: False. + return_diag : bool, optional + If True, then also return an array related to vertex degrees. + Default: False. + use_out_degree : bool, optional + If True, then use out-degree instead of in-degree. + This distinction matters only if the graph is asymmetric. + Default: False. + copy : bool, optional + If False, then change `csgraph` in place if possible, + avoiding doubling the memory use. + Default: True, for backward compatibility. + form : 'array', or 'function', or 'lo' + Determines the format of the output Laplacian: + + * 'array' is a numpy array; + * 'function' is a pointer to evaluating the Laplacian-vector + or Laplacian-matrix product; + * 'lo' results in the format of the `LinearOperator`. + + Choosing 'function' or 'lo' always avoids doubling + the memory use, ignoring `copy` value. + Default: 'array', for backward compatibility. + dtype : None or one of numeric numpy dtypes, optional + The dtype of the output. If ``dtype=None``, the dtype of the + output matches the dtype of the input csgraph, except for + the case ``normed=True`` and integer-like csgraph, where + the output dtype is 'float' allowing accurate normalization, + but dramatically increasing the memory use. + Default: None, for backward compatibility. + symmetrized : bool, optional + If True, then the output Laplacian is symmetric/Hermitian. + The symmetrization is done by ``csgraph + csgraph.T.conj`` + without dividing by 2 to preserve integer dtypes if possible + prior to the construction of the Laplacian. + The symmetrization will increase the memory footprint of + sparse matrices unless the sparsity pattern is symmetric or + `form` is 'function' or 'lo'. + Default: False, for backward compatibility. + + Returns + ------- + lap : ndarray, or sparse matrix, or `LinearOperator` + The N x N Laplacian of csgraph. It will be a NumPy array (dense) + if the input was dense, or a sparse matrix otherwise, or + the format of a function or `LinearOperator` if + `form` equals 'function' or 'lo', respectively. + diag : ndarray, optional + The length-N main diagonal of the Laplacian matrix. + For the normalized Laplacian, this is the array of square roots + of vertex degrees or 1 if the degree is zero. + + Notes + ----- + The Laplacian matrix of a graph is sometimes referred to as the + "Kirchhoff matrix" or just the "Laplacian", and is useful in many + parts of spectral graph theory. + In particular, the eigen-decomposition of the Laplacian can give + insight into many properties of the graph, e.g., + is commonly used for spectral data embedding and clustering. + + The constructed Laplacian doubles the memory use if ``copy=True`` and + ``form="array"`` which is the default. + Choosing ``copy=False`` has no effect unless ``form="array"`` + or the matrix is sparse in the ``coo`` format, or dense array, except + for the integer input with ``normed=True`` that forces the float output. + + Sparse input is reformatted into ``coo`` if ``form="array"``, + which is the default. + + If the input adjacency matrix is not symmetric, the Laplacian is + also non-symmetric unless ``symmetrized=True`` is used. + + Diagonal entries of the input adjacency matrix are ignored and + replaced with zeros for the purpose of normalization where ``normed=True``. + The normalization uses the inverse square roots of row-sums of the input + adjacency matrix, and thus may fail if the row-sums contain + negative or complex with a non-zero imaginary part values. + + The normalization is symmetric, making the normalized Laplacian also + symmetric if the input csgraph was symmetric. + + References + ---------- + .. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csgraph + + Our first illustration is the symmetric graph + + >>> G = np.arange(4) * np.arange(4)[:, np.newaxis] + >>> G + array([[0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6], + [0, 3, 6, 9]]) + + and its symmetric Laplacian matrix + + >>> csgraph.laplacian(G) + array([[ 0, 0, 0, 0], + [ 0, 5, -2, -3], + [ 0, -2, 8, -6], + [ 0, -3, -6, 9]]) + + The non-symmetric graph + + >>> G = np.arange(9).reshape(3, 3) + >>> G + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + has different row- and column sums, resulting in two varieties + of the Laplacian matrix, using an in-degree, which is the default + + >>> L_in_degree = csgraph.laplacian(G) + >>> L_in_degree + array([[ 9, -1, -2], + [-3, 8, -5], + [-6, -7, 7]]) + + or alternatively an out-degree + + >>> L_out_degree = csgraph.laplacian(G, use_out_degree=True) + >>> L_out_degree + array([[ 3, -1, -2], + [-3, 8, -5], + [-6, -7, 13]]) + + Constructing a symmetric Laplacian matrix, one can add the two as + + >>> L_in_degree + L_out_degree.T + array([[ 12, -4, -8], + [ -4, 16, -12], + [ -8, -12, 20]]) + + or use the ``symmetrized=True`` option + + >>> csgraph.laplacian(G, symmetrized=True) + array([[ 12, -4, -8], + [ -4, 16, -12], + [ -8, -12, 20]]) + + that is equivalent to symmetrizing the original graph + + >>> csgraph.laplacian(G + G.T) + array([[ 12, -4, -8], + [ -4, 16, -12], + [ -8, -12, 20]]) + + The goal of normalization is to make the non-zero diagonal entries + of the Laplacian matrix to be all unit, also scaling off-diagonal + entries correspondingly. The normalization can be done manually, e.g., + + >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]) + >>> L, d = csgraph.laplacian(G, return_diag=True) + >>> L + array([[ 2, -1, -1], + [-1, 2, -1], + [-1, -1, 2]]) + >>> d + array([2, 2, 2]) + >>> scaling = np.sqrt(d) + >>> scaling + array([1.41421356, 1.41421356, 1.41421356]) + >>> (1/scaling)*L*(1/scaling) + array([[ 1. , -0.5, -0.5], + [-0.5, 1. , -0.5], + [-0.5, -0.5, 1. ]]) + + Or using ``normed=True`` option + + >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True) + >>> L + array([[ 1. , -0.5, -0.5], + [-0.5, 1. , -0.5], + [-0.5, -0.5, 1. ]]) + + which now instead of the diagonal returns the scaling coefficients + + >>> d + array([1.41421356, 1.41421356, 1.41421356]) + + Zero scaling coefficients are substituted with 1s, where scaling + has thus no effect, e.g., + + >>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]]) + >>> G + array([[0, 0, 0], + [0, 0, 1], + [0, 1, 0]]) + >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True) + >>> L + array([[ 0., -0., -0.], + [-0., 1., -1.], + [-0., -1., 1.]]) + >>> d + array([1., 1., 1.]) + + Only the symmetric normalization is implemented, resulting + in a symmetric Laplacian matrix if and only if its graph is symmetric + and has all non-negative degrees, like in the examples above. + + The output Laplacian matrix is by default a dense array or a sparse matrix + inferring its shape, format, and dtype from the input graph matrix: + + >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32) + >>> G + array([[0., 1., 1.], + [1., 0., 1.], + [1., 1., 0.]], dtype=float32) + >>> csgraph.laplacian(G) + array([[ 2., -1., -1.], + [-1., 2., -1.], + [-1., -1., 2.]], dtype=float32) + + but can alternatively be generated matrix-free as a LinearOperator: + + >>> L = csgraph.laplacian(G, form="lo") + >>> L + <3x3 _CustomLinearOperator with dtype=float32> + >>> L(np.eye(3)) + array([[ 2., -1., -1.], + [-1., 2., -1.], + [-1., -1., 2.]]) + + or as a lambda-function: + + >>> L = csgraph.laplacian(G, form="function") + >>> L + . at 0x0000012AE6F5A598> + >>> L(np.eye(3)) + array([[ 2., -1., -1.], + [-1., 2., -1.], + [-1., -1., 2.]]) + + The Laplacian matrix is used for + spectral data clustering and embedding + as well as for spectral graph partitioning. + Our final example illustrates the latter + for a noisy directed linear graph. + + >>> from scipy.sparse import diags, random + >>> from scipy.sparse.linalg import lobpcg + + Create a directed linear graph with ``N=35`` vertices + using a sparse adjacency matrix ``G``: + + >>> N = 35 + >>> G = diags(np.ones(N-1), 1, format="csr") + + Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``: + + >>> rng = np.random.default_rng() + >>> G += 1e-2 * random(N, N, density=0.1, random_state=rng) + + Set initial approximations for eigenvectors: + + >>> X = rng.random((N, 2)) + + The constant vector of ones is always a trivial eigenvector + of the non-normalized Laplacian to be filtered out: + + >>> Y = np.ones((N, 1)) + + Alternating (1) the sign of the graph weights allows determining + labels for spectral max- and min- cuts in a single loop. + Since the graph is undirected, the option ``symmetrized=True`` + must be used in the construction of the Laplacian. + The option ``normed=True`` cannot be used in (2) for the negative weights + here as the symmetric normalization evaluates square roots. + The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees + a fixed memory footprint and read-only access to the graph. + Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector + that determines the labels as the signs of its components in (5). + Since the sign in an eigenvector is not deterministic and can flip, + we fix the sign of the first component to be always +1 in (4). + + >>> for cut in ["max", "min"]: + ... G = -G # 1. + ... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2. + ... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3. + ... eves *= np.sign(eves[0, 0]) # 4. + ... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5. + max-cut labels: + [1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1] + min-cut labels: + [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] + + As anticipated for a (slightly noisy) linear graph, + the max-cut strips all the edges of the graph coloring all + odd vertices into one color and all even vertices into another one, + while the balanced min-cut partitions the graph + in the middle by deleting a single edge. + Both determined partitions are optimal. + """ + if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]: + raise ValueError("csgraph must be a square matrix or array") + + if normed and ( + np.issubdtype(csgraph.dtype, np.signedinteger) + or np.issubdtype(csgraph.dtype, np.uint) + ): + csgraph = csgraph.astype(np.float64) + + if form == "array": + create_lap = _laplacian_sparse if issparse(csgraph) else _laplacian_dense + else: + create_lap = ( + _laplacian_sparse_flo if issparse(csgraph) else _laplacian_dense_flo + ) + + degree_axis = 1 if use_out_degree else 0 + + lap, d = create_lap( + csgraph, + normed=normed, + axis=degree_axis, + copy=copy, + form=form, + dtype=dtype, + symmetrized=symmetrized, + ) + if return_diag: + return lap, d + return lap + + +def _setdiag_dense(m, d): + step = len(d) + 1 + m.flat[::step] = d + + +def _laplace(m, d): + return lambda v: v * d[:, np.newaxis] - m @ v + + +def _laplace_normed(m, d, nd): + laplace = _laplace(m, d) + return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis]) + + +def _laplace_sym(m, d): + return ( + lambda v: v * d[:, np.newaxis] + - m @ v + - np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m)) + ) + + +def _laplace_normed_sym(m, d, nd): + laplace_sym = _laplace_sym(m, d) + return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis]) + + +def _linearoperator(mv, shape, dtype): + return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype) + + +def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized): + # The keyword argument `copy` is unused and has no effect here. + del copy + + if dtype is None: + dtype = graph.dtype + + graph_sum = np.asarray(graph.sum(axis=axis)).ravel() + graph_diagonal = graph.diagonal() + diag = graph_sum - graph_diagonal + if symmetrized: + graph_sum += np.asarray(graph.sum(axis=1 - axis)).ravel() + diag = graph_sum - graph_diagonal - graph_diagonal + + if normed: + isolated_node_mask = diag == 0 + w = np.where(isolated_node_mask, 1, np.sqrt(diag)) + if symmetrized: + md = _laplace_normed_sym(graph, graph_sum, 1.0 / w) + else: + md = _laplace_normed(graph, graph_sum, 1.0 / w) + if form == "function": + return md, w.astype(dtype, copy=False) + elif form == "lo": + m = _linearoperator(md, shape=graph.shape, dtype=dtype) + return m, w.astype(dtype, copy=False) + else: + raise ValueError(f"Invalid form: {form!r}") + else: + if symmetrized: + md = _laplace_sym(graph, graph_sum) + else: + md = _laplace(graph, graph_sum) + if form == "function": + return md, diag.astype(dtype, copy=False) + elif form == "lo": + m = _linearoperator(md, shape=graph.shape, dtype=dtype) + return m, diag.astype(dtype, copy=False) + else: + raise ValueError(f"Invalid form: {form!r}") + + +def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized): + # The keyword argument `form` is unused and has no effect here. + del form + + if dtype is None: + dtype = graph.dtype + + needs_copy = False + if graph.format in ("lil", "dok"): + m = graph.tocoo() + else: + m = graph + if copy: + needs_copy = True + + if symmetrized: + m += m.T.conj() + + w = np.asarray(m.sum(axis=axis)).ravel() - m.diagonal() + if normed: + m = m.tocoo(copy=needs_copy) + isolated_node_mask = w == 0 + w = np.where(isolated_node_mask, 1, np.sqrt(w)) + m.data /= w[m.row] + m.data /= w[m.col] + m.data *= -1 + m.setdiag(1 - isolated_node_mask) + else: + if m.format == "dia": + m = m.copy() + else: + m = m.tocoo(copy=needs_copy) + m.data *= -1 + m.setdiag(w) + + return m.astype(dtype, copy=False), w.astype(dtype) + + +def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized): + if copy: + m = np.array(graph) + else: + m = np.asarray(graph) + + if dtype is None: + dtype = m.dtype + + graph_sum = m.sum(axis=axis) + graph_diagonal = m.diagonal() + diag = graph_sum - graph_diagonal + if symmetrized: + graph_sum += m.sum(axis=1 - axis) + diag = graph_sum - graph_diagonal - graph_diagonal + + if normed: + isolated_node_mask = diag == 0 + w = np.where(isolated_node_mask, 1, np.sqrt(diag)) + if symmetrized: + md = _laplace_normed_sym(m, graph_sum, 1.0 / w) + else: + md = _laplace_normed(m, graph_sum, 1.0 / w) + if form == "function": + return md, w.astype(dtype, copy=False) + elif form == "lo": + m = _linearoperator(md, shape=graph.shape, dtype=dtype) + return m, w.astype(dtype, copy=False) + else: + raise ValueError(f"Invalid form: {form!r}") + else: + if symmetrized: + md = _laplace_sym(m, graph_sum) + else: + md = _laplace(m, graph_sum) + if form == "function": + return md, diag.astype(dtype, copy=False) + elif form == "lo": + m = _linearoperator(md, shape=graph.shape, dtype=dtype) + return m, diag.astype(dtype, copy=False) + else: + raise ValueError(f"Invalid form: {form!r}") + + +def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized): + if form != "array": + raise ValueError(f'{form!r} must be "array"') + + if dtype is None: + dtype = graph.dtype + + if copy: + m = np.array(graph) + else: + m = np.asarray(graph) + + if dtype is None: + dtype = m.dtype + + if symmetrized: + m += m.T.conj() + np.fill_diagonal(m, 0) + w = m.sum(axis=axis) + if normed: + isolated_node_mask = w == 0 + w = np.where(isolated_node_mask, 1, np.sqrt(w)) + m /= w + m /= w[:, np.newaxis] + m *= -1 + _setdiag_dense(m, 1 - isolated_node_mask) + else: + m *= -1 + _setdiag_dense(m, w) + + return m.astype(dtype, copy=False), w.astype(dtype, copy=False) diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/__init__.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7d316d95ada4dcc0534851b08b221ae3270174d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/__init__.py @@ -0,0 +1,88 @@ +import typing + +from ._plot import LearningCurveDisplay, ValidationCurveDisplay +from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV +from ._split import ( + BaseCrossValidator, + BaseShuffleSplit, + GroupKFold, + GroupShuffleSplit, + KFold, + LeaveOneGroupOut, + LeaveOneOut, + LeavePGroupsOut, + LeavePOut, + PredefinedSplit, + RepeatedKFold, + RepeatedStratifiedKFold, + ShuffleSplit, + StratifiedGroupKFold, + StratifiedKFold, + StratifiedShuffleSplit, + TimeSeriesSplit, + check_cv, + train_test_split, +) +from ._validation import ( + cross_val_predict, + cross_val_score, + cross_validate, + learning_curve, + permutation_test_score, + validation_curve, +) + +if typing.TYPE_CHECKING: + # Avoid errors in type checkers (e.g. mypy) for experimental estimators. + # TODO: remove this check once the estimator is no longer experimental. + from ._search_successive_halving import ( # noqa + HalvingGridSearchCV, + HalvingRandomSearchCV, + ) + + +__all__ = [ + "BaseCrossValidator", + "BaseShuffleSplit", + "GridSearchCV", + "TimeSeriesSplit", + "KFold", + "GroupKFold", + "GroupShuffleSplit", + "LeaveOneGroupOut", + "LeaveOneOut", + "LeavePGroupsOut", + "LeavePOut", + "RepeatedKFold", + "RepeatedStratifiedKFold", + "ParameterGrid", + "ParameterSampler", + "PredefinedSplit", + "RandomizedSearchCV", + "ShuffleSplit", + "StratifiedKFold", + "StratifiedGroupKFold", + "StratifiedShuffleSplit", + "check_cv", + "cross_val_predict", + "cross_val_score", + "cross_validate", + "learning_curve", + "LearningCurveDisplay", + "permutation_test_score", + "train_test_split", + "validation_curve", + "ValidationCurveDisplay", +] + + +# TODO: remove this check once the estimator is no longer experimental. +def __getattr__(name): + if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}: + raise ImportError( + f"{name} is experimental and the API might change without any " + "deprecation cycle. To use it, you need to explicitly import " + "enable_halving_search_cv:\n" + "from sklearn.experimental import enable_halving_search_cv" + ) + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8c2ebe1242ff851d0a0db7f5b91aa8e71eaf0c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30db6f8028910abe2d73b1bac130dfe074521f94 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aba6010f58a5143decf9bc0dbef3b9a645a4ac3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed9d7f30735b16bc7030981f90ab851c6b40d80e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00770e2fe7403912dbab1bab3ce6f0895c9b27d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d5eeae9e60e32b151ca934d68fa3603fc4504bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/_plot.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..741c893ae2ea96db7dedc015afeb0b8d7cc9178a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/_plot.py @@ -0,0 +1,907 @@ +import warnings + +import numpy as np + +from ..utils import check_matplotlib_support +from ..utils._plotting import _interval_max_min_ratio, _validate_score_name +from ._validation import learning_curve, validation_curve + + +class _BaseCurveDisplay: + def _plot_curve( + self, + x_data, + *, + ax=None, + negate_score=False, + score_name=None, + score_type="test", + log_scale="deprecated", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + check_matplotlib_support(f"{self.__class__.__name__}.plot") + + import matplotlib.pyplot as plt + + if ax is None: + _, ax = plt.subplots() + + if negate_score: + train_scores, test_scores = -self.train_scores, -self.test_scores + else: + train_scores, test_scores = self.train_scores, self.test_scores + + if std_display_style not in ("errorbar", "fill_between", None): + raise ValueError( + f"Unknown std_display_style: {std_display_style}. Should be one of" + " 'errorbar', 'fill_between', or None." + ) + + if score_type not in ("test", "train", "both"): + raise ValueError( + f"Unknown score_type: {score_type}. Should be one of 'test', " + "'train', or 'both'." + ) + + if score_type == "train": + scores = {"Train": train_scores} + elif score_type == "test": + scores = {"Test": test_scores} + else: # score_type == "both" + scores = {"Train": train_scores, "Test": test_scores} + + if std_display_style in ("fill_between", None): + # plot the mean score + if line_kw is None: + line_kw = {} + + self.lines_ = [] + for line_label, score in scores.items(): + self.lines_.append( + *ax.plot( + x_data, + score.mean(axis=1), + label=line_label, + **line_kw, + ) + ) + self.errorbar_ = None + self.fill_between_ = None # overwritten below by fill_between + + if std_display_style == "errorbar": + if errorbar_kw is None: + errorbar_kw = {} + + self.errorbar_ = [] + for line_label, score in scores.items(): + self.errorbar_.append( + ax.errorbar( + x_data, + score.mean(axis=1), + score.std(axis=1), + label=line_label, + **errorbar_kw, + ) + ) + self.lines_, self.fill_between_ = None, None + elif std_display_style == "fill_between": + if fill_between_kw is None: + fill_between_kw = {} + default_fill_between_kw = {"alpha": 0.5} + fill_between_kw = {**default_fill_between_kw, **fill_between_kw} + + self.fill_between_ = [] + for line_label, score in scores.items(): + self.fill_between_.append( + ax.fill_between( + x_data, + score.mean(axis=1) - score.std(axis=1), + score.mean(axis=1) + score.std(axis=1), + **fill_between_kw, + ) + ) + + score_name = self.score_name if score_name is None else score_name + + ax.legend() + + # TODO(1.5): to be removed + if log_scale != "deprecated": + warnings.warn( + ( + "The `log_scale` parameter is deprecated as of version 1.3 " + "and will be removed in 1.5. You can use display.ax_.set_xscale " + "and display.ax_.set_yscale instead." + ), + FutureWarning, + ) + xscale = "log" if log_scale else "linear" + else: + # We found that a ratio, smaller or bigger than 5, between the largest and + # smallest gap of the x values is a good indicator to choose between linear + # and log scale. + if _interval_max_min_ratio(x_data) > 5: + xscale = "symlog" if x_data.min() <= 0 else "log" + else: + xscale = "linear" + ax.set_xscale(xscale) + ax.set_ylabel(f"{score_name}") + + self.ax_ = ax + self.figure_ = ax.figure + + +class LearningCurveDisplay(_BaseCurveDisplay): + """Learning Curve visualization. + + It is recommended to use + :meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator` to + create a :class:`~sklearn.model_selection.LearningCurveDisplay` instance. + All parameters are stored as attributes. + + Read more in the :ref:`User Guide ` for general information + about the visualization API and + :ref:`detailed documentation ` regarding the learning + curve visualization. + + .. versionadded:: 1.2 + + Parameters + ---------- + train_sizes : ndarray of shape (n_unique_ticks,) + Numbers of training examples that has been used to generate the + learning curve. + + train_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on test set. + + score_name : str, default=None + The name of the score used in `learning_curve`. It will override the name + inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if + `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a + string or a callable, we infer the name. We replace `_` by spaces and capitalize + the first letter. We remove `neg_` and replace it by `"Negative"` if + `negate_score` is `False` or just remove it otherwise. + + Attributes + ---------- + ax_ : matplotlib Axes + Axes with the learning curve. + + figure_ : matplotlib Figure + Figure containing the learning curve. + + errorbar_ : list of matplotlib Artist or None + When the `std_display_style` is `"errorbar"`, this is a list of + `matplotlib.container.ErrorbarContainer` objects. If another style is + used, `errorbar_` is `None`. + + lines_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.lines.Line2D` objects corresponding to the mean train and + test scores. If another style is used, `line_` is `None`. + + fill_between_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.collections.PolyCollection` objects. If another style is + used, `fill_between_` is `None`. + + See Also + -------- + sklearn.model_selection.learning_curve : Compute the learning curve. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import LearningCurveDisplay, learning_curve + >>> from sklearn.tree import DecisionTreeClassifier + >>> X, y = load_iris(return_X_y=True) + >>> tree = DecisionTreeClassifier(random_state=0) + >>> train_sizes, train_scores, test_scores = learning_curve( + ... tree, X, y) + >>> display = LearningCurveDisplay(train_sizes=train_sizes, + ... train_scores=train_scores, test_scores=test_scores, score_name="Score") + >>> display.plot() + <...> + >>> plt.show() + """ + + def __init__(self, *, train_sizes, train_scores, test_scores, score_name=None): + self.train_sizes = train_sizes + self.train_scores = train_scores + self.test_scores = test_scores + self.score_name = score_name + + def plot( + self, + ax=None, + *, + negate_score=False, + score_name=None, + score_type="both", + log_scale="deprecated", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Plot visualization. + + Parameters + ---------- + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.learning_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + log_scale : bool, default="deprecated" + Whether or not to use a logarithmic scale for the x-axis. + + .. deprecated:: 1.3 + `log_scale` is deprecated in 1.3 and will be removed in 1.5. + Use `display.ax_.set_xscale` and `display.ax_.set_yscale` instead. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If None, no standard deviation representation is + displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.LearningCurveDisplay` + Object that stores computed values. + """ + self._plot_curve( + self.train_sizes, + ax=ax, + negate_score=negate_score, + score_name=score_name, + score_type=score_type, + log_scale=log_scale, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) + self.ax_.set_xlabel("Number of samples in the training set") + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + groups=None, + train_sizes=np.linspace(0.1, 1.0, 5), + cv=None, + scoring=None, + exploit_incremental_learning=False, + n_jobs=None, + pre_dispatch="all", + verbose=0, + shuffle=False, + random_state=None, + error_score=np.nan, + fit_params=None, + ax=None, + negate_score=False, + score_name=None, + score_type="both", + log_scale="deprecated", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Create a learning curve display from an estimator. + + Read more in the :ref:`User Guide ` for general + information about the visualization API and :ref:`detailed + documentation ` regarding the learning curve + visualization. + + Parameters + ---------- + estimator : object type that implements the "fit" and "predict" methods + An object of that type which is cloned for each validation. + + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + train_sizes : array-like of shape (n_ticks,), \ + default=np.linspace(0.1, 1.0, 5) + Relative or absolute numbers of training examples that will be used + to generate the learning curve. If the dtype is float, it is + regarded as a fraction of the maximum size of the training set + (that is determined by the selected validation method), i.e. it has + to be within (0, 1]. Otherwise it is interpreted as absolute sizes + of the training sets. Note that for classification the number of + samples usually have to be big enough to contain at least one + sample from each class. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and `y` is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. In all + other cases, :class:`~sklearn.model_selection.KFold` is used. These + splitters are instantiated with `shuffle=False` so the splits will + be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + scoring : str or callable, default=None + A string (see :ref:`scoring_parameter`) or + a scorer callable object / function with signature + `scorer(estimator, X, y)` (see :ref:`scoring`). + + exploit_incremental_learning : bool, default=False + If the estimator supports incremental learning, this will be + used to speed up fitting for different training set sizes. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and + computing the score are parallelized over the different training + and test sets. `None` means 1 unless in a + :obj:`joblib.parallel_backend` context. `-1` means using all + processors. See :term:`Glossary ` for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + shuffle : bool, default=False + Whether to shuffle training data before taking prefixes of it + based on`train_sizes`. + + random_state : int, RandomState instance or None, default=None + Used when `shuffle` is True. Pass an int for reproducible + output across multiple function calls. + See :term:`Glossary `. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator + fitting. If set to 'raise', the error is raised. If a numeric value + is given, FitFailedWarning is raised. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.learning_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + log_scale : bool, default="deprecated" + Whether or not to use a logarithmic scale for the x-axis. + + .. deprecated:: 1.3 + `log_scale` is deprecated in 1.3 and will be removed in 1.5. + Use `display.ax_.xscale` and `display.ax_.yscale` instead. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If `None`, no representation of the standard deviation + is displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.LearningCurveDisplay` + Object that stores computed values. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import LearningCurveDisplay + >>> from sklearn.tree import DecisionTreeClassifier + >>> X, y = load_iris(return_X_y=True) + >>> tree = DecisionTreeClassifier(random_state=0) + >>> LearningCurveDisplay.from_estimator(tree, X, y) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + + score_name = _validate_score_name(score_name, scoring, negate_score) + + train_sizes, train_scores, test_scores = learning_curve( + estimator, + X, + y, + groups=groups, + train_sizes=train_sizes, + cv=cv, + scoring=scoring, + exploit_incremental_learning=exploit_incremental_learning, + n_jobs=n_jobs, + pre_dispatch=pre_dispatch, + verbose=verbose, + shuffle=shuffle, + random_state=random_state, + error_score=error_score, + return_times=False, + fit_params=fit_params, + ) + + viz = cls( + train_sizes=train_sizes, + train_scores=train_scores, + test_scores=test_scores, + score_name=score_name, + ) + return viz.plot( + ax=ax, + negate_score=negate_score, + score_type=score_type, + log_scale=log_scale, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) + + +class ValidationCurveDisplay(_BaseCurveDisplay): + """Validation Curve visualization. + + It is recommended to use + :meth:`~sklearn.model_selection.ValidationCurveDisplay.from_estimator` to + create a :class:`~sklearn.model_selection.ValidationCurveDisplay` instance. + All parameters are stored as attributes. + + Read more in the :ref:`User Guide ` for general information + about the visualization API and :ref:`detailed documentation + ` regarding the validation curve visualization. + + .. versionadded:: 1.3 + + Parameters + ---------- + param_name : str + Name of the parameter that has been varied. + + param_range : array-like of shape (n_ticks,) + The values of the parameter that have been evaluated. + + train_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on test set. + + score_name : str, default=None + The name of the score used in `validation_curve`. It will override the name + inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if + `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a + string or a callable, we infer the name. We replace `_` by spaces and capitalize + the first letter. We remove `neg_` and replace it by `"Negative"` if + `negate_score` is `False` or just remove it otherwise. + + Attributes + ---------- + ax_ : matplotlib Axes + Axes with the validation curve. + + figure_ : matplotlib Figure + Figure containing the validation curve. + + errorbar_ : list of matplotlib Artist or None + When the `std_display_style` is `"errorbar"`, this is a list of + `matplotlib.container.ErrorbarContainer` objects. If another style is + used, `errorbar_` is `None`. + + lines_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.lines.Line2D` objects corresponding to the mean train and + test scores. If another style is used, `line_` is `None`. + + fill_between_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.collections.PolyCollection` objects. If another style is + used, `fill_between_` is `None`. + + See Also + -------- + sklearn.model_selection.validation_curve : Compute the validation curve. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import ValidationCurveDisplay, validation_curve + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(n_samples=1_000, random_state=0) + >>> logistic_regression = LogisticRegression() + >>> param_name, param_range = "C", np.logspace(-8, 3, 10) + >>> train_scores, test_scores = validation_curve( + ... logistic_regression, X, y, param_name=param_name, param_range=param_range + ... ) + >>> display = ValidationCurveDisplay( + ... param_name=param_name, param_range=param_range, + ... train_scores=train_scores, test_scores=test_scores, score_name="Score" + ... ) + >>> display.plot() + <...> + >>> plt.show() + """ + + def __init__( + self, *, param_name, param_range, train_scores, test_scores, score_name=None + ): + self.param_name = param_name + self.param_range = param_range + self.train_scores = train_scores + self.test_scores = test_scores + self.score_name = score_name + + def plot( + self, + ax=None, + *, + negate_score=False, + score_name=None, + score_type="both", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Plot visualization. + + Parameters + ---------- + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.validation_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If None, no standard deviation representation is + displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.ValidationCurveDisplay` + Object that stores computed values. + """ + self._plot_curve( + self.param_range, + ax=ax, + negate_score=negate_score, + score_name=score_name, + score_type=score_type, + log_scale="deprecated", + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) + self.ax_.set_xlabel(f"{self.param_name}") + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + param_name, + param_range, + groups=None, + cv=None, + scoring=None, + n_jobs=None, + pre_dispatch="all", + verbose=0, + error_score=np.nan, + fit_params=None, + ax=None, + negate_score=False, + score_name=None, + score_type="both", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Create a validation curve display from an estimator. + + Read more in the :ref:`User Guide ` for general + information about the visualization API and :ref:`detailed + documentation ` regarding the validation curve + visualization. + + Parameters + ---------- + estimator : object type that implements the "fit" and "predict" methods + An object of that type which is cloned for each validation. + + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + param_name : str + Name of the parameter that will be varied. + + param_range : array-like of shape (n_values,) + The values of the parameter that will be evaluated. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and `y` is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. In all + other cases, :class:`~sklearn.model_selection.KFold` is used. These + splitters are instantiated with `shuffle=False` so the splits will + be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + scoring : str or callable, default=None + A string (see :ref:`scoring_parameter`) or + a scorer callable object / function with signature + `scorer(estimator, X, y)` (see :ref:`scoring`). + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and + computing the score are parallelized over the different training + and test sets. `None` means 1 unless in a + :obj:`joblib.parallel_backend` context. `-1` means using all + processors. See :term:`Glossary ` for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator + fitting. If set to 'raise', the error is raised. If a numeric value + is given, FitFailedWarning is raised. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.validation_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If `None`, no representation of the standard deviation + is displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.ValidationCurveDisplay` + Object that stores computed values. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import ValidationCurveDisplay + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(n_samples=1_000, random_state=0) + >>> logistic_regression = LogisticRegression() + >>> param_name, param_range = "C", np.logspace(-8, 3, 10) + >>> ValidationCurveDisplay.from_estimator( + ... logistic_regression, X, y, param_name=param_name, + ... param_range=param_range, + ... ) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + + score_name = _validate_score_name(score_name, scoring, negate_score) + + train_scores, test_scores = validation_curve( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + groups=groups, + cv=cv, + scoring=scoring, + n_jobs=n_jobs, + pre_dispatch=pre_dispatch, + verbose=verbose, + error_score=error_score, + fit_params=fit_params, + ) + + viz = cls( + param_name=param_name, + param_range=np.asarray(param_range), + train_scores=train_scores, + test_scores=test_scores, + score_name=score_name, + ) + return viz.plot( + ax=ax, + negate_score=negate_score, + score_type=score_type, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/_search.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/_search.py new file mode 100644 index 0000000000000000000000000000000000000000..9de03c2c663ec0b8165f2c42f9183d2da7164815 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/_search.py @@ -0,0 +1,1918 @@ +""" +The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the +parameters of an estimator. +""" + +# Author: Alexandre Gramfort , +# Gael Varoquaux +# Andreas Mueller +# Olivier Grisel +# Raghav RV +# License: BSD 3 clause + +import numbers +import operator +import time +import warnings +from abc import ABCMeta, abstractmethod +from collections import defaultdict +from collections.abc import Iterable, Mapping, Sequence +from functools import partial, reduce +from itertools import product + +import numpy as np +from numpy.ma import MaskedArray +from scipy.stats import rankdata + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier +from ..exceptions import NotFittedError +from ..metrics import check_scoring +from ..metrics._scorer import ( + _check_multimetric_scoring, + _MultimetricScorer, + get_scorer_names, +) +from ..utils import Bunch, check_random_state +from ..utils._param_validation import HasMethods, Interval, StrOptions +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import available_if +from ..utils.parallel import Parallel, delayed +from ..utils.random import sample_without_replacement +from ..utils.validation import _check_method_params, check_is_fitted, indexable +from ._split import check_cv +from ._validation import ( + _aggregate_score_dicts, + _fit_and_score, + _insert_error_scores, + _normalize_score_results, + _warn_or_raise_about_fit_failures, +) + +__all__ = ["GridSearchCV", "ParameterGrid", "ParameterSampler", "RandomizedSearchCV"] + + +class ParameterGrid: + """Grid of parameters with a discrete number of values for each. + + Can be used to iterate over parameter value combinations with the + Python built-in function iter. + The order of the generated parameter combinations is deterministic. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + param_grid : dict of str to sequence, or sequence of such + The parameter grid to explore, as a dictionary mapping estimator + parameters to sequences of allowed values. + + An empty dict signifies default parameters. + + A sequence of dicts signifies a sequence of grids to search, and is + useful to avoid exploring parameter combinations that make no sense + or have no effect. See the examples below. + + Examples + -------- + >>> from sklearn.model_selection import ParameterGrid + >>> param_grid = {'a': [1, 2], 'b': [True, False]} + >>> list(ParameterGrid(param_grid)) == ( + ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, + ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) + True + + >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] + >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, + ... {'kernel': 'rbf', 'gamma': 1}, + ... {'kernel': 'rbf', 'gamma': 10}] + True + >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} + True + + See Also + -------- + GridSearchCV : Uses :class:`ParameterGrid` to perform a full parallelized + parameter search. + """ + + def __init__(self, param_grid): + if not isinstance(param_grid, (Mapping, Iterable)): + raise TypeError( + f"Parameter grid should be a dict or a list, got: {param_grid!r} of" + f" type {type(param_grid).__name__}" + ) + + if isinstance(param_grid, Mapping): + # wrap dictionary in a singleton list to support either dict + # or list of dicts + param_grid = [param_grid] + + # check if all entries are dictionaries of lists + for grid in param_grid: + if not isinstance(grid, dict): + raise TypeError(f"Parameter grid is not a dict ({grid!r})") + for key, value in grid.items(): + if isinstance(value, np.ndarray) and value.ndim > 1: + raise ValueError( + f"Parameter array for {key!r} should be one-dimensional, got:" + f" {value!r} with shape {value.shape}" + ) + if isinstance(value, str) or not isinstance( + value, (np.ndarray, Sequence) + ): + raise TypeError( + f"Parameter grid for parameter {key!r} needs to be a list or a" + f" numpy array, but got {value!r} (of type " + f"{type(value).__name__}) instead. Single values " + "need to be wrapped in a list with one element." + ) + if len(value) == 0: + raise ValueError( + f"Parameter grid for parameter {key!r} need " + f"to be a non-empty sequence, got: {value!r}" + ) + + self.param_grid = param_grid + + def __iter__(self): + """Iterate over the points in the grid. + + Returns + ------- + params : iterator over dict of str to any + Yields dictionaries mapping each estimator parameter to one of its + allowed values. + """ + for p in self.param_grid: + # Always sort the keys of a dictionary, for reproducibility + items = sorted(p.items()) + if not items: + yield {} + else: + keys, values = zip(*items) + for v in product(*values): + params = dict(zip(keys, v)) + yield params + + def __len__(self): + """Number of points on the grid.""" + # Product function that can handle iterables (np.prod can't). + product = partial(reduce, operator.mul) + return sum( + product(len(v) for v in p.values()) if p else 1 for p in self.param_grid + ) + + def __getitem__(self, ind): + """Get the parameters that would be ``ind``th in iteration + + Parameters + ---------- + ind : int + The iteration index + + Returns + ------- + params : dict of str to any + Equal to list(self)[ind] + """ + # This is used to make discrete sampling without replacement memory + # efficient. + for sub_grid in self.param_grid: + # XXX: could memoize information used here + if not sub_grid: + if ind == 0: + return {} + else: + ind -= 1 + continue + + # Reverse so most frequent cycling parameter comes first + keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) + sizes = [len(v_list) for v_list in values_lists] + total = np.prod(sizes) + + if ind >= total: + # Try the next grid + ind -= total + else: + out = {} + for key, v_list, n in zip(keys, values_lists, sizes): + ind, offset = divmod(ind, n) + out[key] = v_list[offset] + return out + + raise IndexError("ParameterGrid index out of range") + + +class ParameterSampler: + """Generator on parameters sampled from given distributions. + + Non-deterministic iterable over random candidate combinations for hyper- + parameter search. If all parameters are presented as a list, + sampling without replacement is performed. If at least one parameter + is given as a distribution, sampling with replacement is used. + It is highly recommended to use continuous distributions for continuous + parameters. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + param_distributions : dict + Dictionary with parameters names (`str`) as keys and distributions + or lists of parameters to try. Distributions must provide a ``rvs`` + method for sampling (such as those from scipy.stats.distributions). + If a list is given, it is sampled uniformly. + If a list of dicts is given, first a dict is sampled uniformly, and + then a parameter is sampled using that dict as above. + + n_iter : int + Number of parameter settings that are produced. + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for random uniform sampling + from lists of possible values instead of scipy.stats distributions. + Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + Returns + ------- + params : dict of str to any + **Yields** dictionaries mapping each estimator parameter to + as sampled value. + + Examples + -------- + >>> from sklearn.model_selection import ParameterSampler + >>> from scipy.stats.distributions import expon + >>> import numpy as np + >>> rng = np.random.RandomState(0) + >>> param_grid = {'a':[1, 2], 'b': expon()} + >>> param_list = list(ParameterSampler(param_grid, n_iter=4, + ... random_state=rng)) + >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) + ... for d in param_list] + >>> rounded_list == [{'b': 0.89856, 'a': 1}, + ... {'b': 0.923223, 'a': 1}, + ... {'b': 1.878964, 'a': 2}, + ... {'b': 1.038159, 'a': 2}] + True + """ + + def __init__(self, param_distributions, n_iter, *, random_state=None): + if not isinstance(param_distributions, (Mapping, Iterable)): + raise TypeError( + "Parameter distribution is not a dict or a list," + f" got: {param_distributions!r} of type " + f"{type(param_distributions).__name__}" + ) + + if isinstance(param_distributions, Mapping): + # wrap dictionary in a singleton list to support either dict + # or list of dicts + param_distributions = [param_distributions] + + for dist in param_distributions: + if not isinstance(dist, dict): + raise TypeError( + "Parameter distribution is not a dict ({!r})".format(dist) + ) + for key in dist: + if not isinstance(dist[key], Iterable) and not hasattr( + dist[key], "rvs" + ): + raise TypeError( + f"Parameter grid for parameter {key!r} is not iterable " + f"or a distribution (value={dist[key]})" + ) + self.n_iter = n_iter + self.random_state = random_state + self.param_distributions = param_distributions + + def _is_all_lists(self): + return all( + all(not hasattr(v, "rvs") for v in dist.values()) + for dist in self.param_distributions + ) + + def __iter__(self): + rng = check_random_state(self.random_state) + + # if all distributions are given as lists, we want to sample without + # replacement + if self._is_all_lists(): + # look up sampled parameter settings in parameter grid + param_grid = ParameterGrid(self.param_distributions) + grid_size = len(param_grid) + n_iter = self.n_iter + + if grid_size < n_iter: + warnings.warn( + "The total space of parameters %d is smaller " + "than n_iter=%d. Running %d iterations. For exhaustive " + "searches, use GridSearchCV." % (grid_size, self.n_iter, grid_size), + UserWarning, + ) + n_iter = grid_size + for i in sample_without_replacement(grid_size, n_iter, random_state=rng): + yield param_grid[i] + + else: + for _ in range(self.n_iter): + dist = rng.choice(self.param_distributions) + # Always sort the keys of a dictionary, for reproducibility + items = sorted(dist.items()) + params = dict() + for k, v in items: + if hasattr(v, "rvs"): + params[k] = v.rvs(random_state=rng) + else: + params[k] = v[rng.randint(len(v))] + yield params + + def __len__(self): + """Number of points that will be sampled.""" + if self._is_all_lists(): + grid_size = len(ParameterGrid(self.param_distributions)) + return min(self.n_iter, grid_size) + else: + return self.n_iter + + +def _check_refit(search_cv, attr): + if not search_cv.refit: + raise AttributeError( + f"This {type(search_cv).__name__} instance was initialized with " + f"`refit=False`. {attr} is available only after refitting on the best " + "parameters. You can refit an estimator manually using the " + "`best_params_` attribute" + ) + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + Calling a prediction method will only be available if `refit=True`. In + such case, we check first the fitted best estimator. If it is not + fitted, we check the unfitted estimator. + + Checking the unfitted estimator allows to use `hasattr` on the `SearchCV` + instance even before calling `fit`. + """ + + def check(self): + _check_refit(self, attr) + if hasattr(self, "best_estimator_"): + # raise an AttributeError if `attr` does not exist + getattr(self.best_estimator_, attr) + return True + # raise an AttributeError if `attr` does not exist + getattr(self.estimator, attr) + return True + + return check + + +class BaseSearchCV(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): + """Abstract base class for hyper parameter search with cross-validation.""" + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "n_jobs": [numbers.Integral, None], + "refit": ["boolean", str, callable], + "cv": ["cv_object"], + "verbose": ["verbose"], + "pre_dispatch": [numbers.Integral, str], + "error_score": [StrOptions({"raise"}), numbers.Real], + "return_train_score": ["boolean"], + } + + @abstractmethod + def __init__( + self, + estimator, + *, + scoring=None, + n_jobs=None, + refit=True, + cv=None, + verbose=0, + pre_dispatch="2*n_jobs", + error_score=np.nan, + return_train_score=True, + ): + self.scoring = scoring + self.estimator = estimator + self.n_jobs = n_jobs + self.refit = refit + self.cv = cv + self.verbose = verbose + self.pre_dispatch = pre_dispatch + self.error_score = error_score + self.return_train_score = return_train_score + + @property + def _estimator_type(self): + return self.estimator._estimator_type + + def _more_tags(self): + # allows cross-validation to see 'precomputed' metrics + return { + "pairwise": _safe_tags(self.estimator, "pairwise"), + "_xfail_checks": { + "check_supervised_y_2d": "DataConversionWarning not caught" + }, + } + + def score(self, X, y=None, **params): + """Return the score on the given data, if the estimator has been refit. + + This uses the score defined by ``scoring`` where provided, and the + ``best_estimator_.score`` method otherwise. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples, n_output) \ + or (n_samples,), default=None + Target relative to X for classification or regression; + None for unsupervised learning. + + **params : dict + Parameters to be passed to the underlying scorer(s). + + ..versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + score : float + The score defined by ``scoring`` if provided, and the + ``best_estimator_.score`` method otherwise. + """ + _check_refit(self, "score") + check_is_fitted(self) + + _raise_for_params(params, self, "score") + + if _routing_enabled(): + score_params = process_routing(self, "score", **params).scorer["score"] + else: + score_params = dict() + + if self.scorer_ is None: + raise ValueError( + "No score function explicitly defined, " + "and the estimator doesn't provide one %s" + % self.best_estimator_ + ) + if isinstance(self.scorer_, dict): + if self.multimetric_: + scorer = self.scorer_[self.refit] + else: + scorer = self.scorer_ + return scorer(self.best_estimator_, X, y, **score_params) + + # callable + score = self.scorer_(self.best_estimator_, X, y, **score_params) + if self.multimetric_: + score = score[self.refit] + return score + + @available_if(_estimator_has("score_samples")) + def score_samples(self, X): + """Call score_samples on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``score_samples``. + + .. versionadded:: 0.24 + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements + of the underlying estimator. + + Returns + ------- + y_score : ndarray of shape (n_samples,) + The ``best_estimator_.score_samples`` method. + """ + check_is_fitted(self) + return self.best_estimator_.score_samples(X) + + @available_if(_estimator_has("predict")) + def predict(self, X): + """Call predict on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``predict``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + The predicted labels or values for `X` based on the estimator with + the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.predict(X) + + @available_if(_estimator_has("predict_proba")) + def predict_proba(self, X): + """Call predict_proba on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``predict_proba``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes) + Predicted class probabilities for `X` based on the estimator with + the best found parameters. The order of the classes corresponds + to that in the fitted attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.best_estimator_.predict_proba(X) + + @available_if(_estimator_has("predict_log_proba")) + def predict_log_proba(self, X): + """Call predict_log_proba on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``predict_log_proba``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes) + Predicted class log-probabilities for `X` based on the estimator + with the best found parameters. The order of the classes + corresponds to that in the fitted attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.best_estimator_.predict_log_proba(X) + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Call decision_function on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``decision_function``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_score : ndarray of shape (n_samples,) or (n_samples, n_classes) \ + or (n_samples, n_classes * (n_classes-1) / 2) + Result of the decision function for `X` based on the estimator with + the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.decision_function(X) + + @available_if(_estimator_has("transform")) + def transform(self, X): + """Call transform on the estimator with the best found parameters. + + Only available if the underlying estimator supports ``transform`` and + ``refit=True``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) + `X` transformed in the new space based on the estimator with + the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.transform(X) + + @available_if(_estimator_has("inverse_transform")) + def inverse_transform(self, Xt): + """Call inverse_transform on the estimator with the best found params. + + Only available if the underlying estimator implements + ``inverse_transform`` and ``refit=True``. + + Parameters + ---------- + Xt : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Result of the `inverse_transform` function for `Xt` based on the + estimator with the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.inverse_transform(Xt) + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`. + + Only available when `refit=True`. + """ + # For consistency with other estimators we raise a AttributeError so + # that hasattr() fails if the search estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.best_estimator_.n_features_in_ + + @property + def classes_(self): + """Class labels. + + Only available when `refit=True` and the estimator is a classifier. + """ + _estimator_has("classes_")(self) + return self.best_estimator_.classes_ + + def _run_search(self, evaluate_candidates): + """Repeatedly calls `evaluate_candidates` to conduct a search. + + This method, implemented in sub-classes, makes it possible to + customize the scheduling of evaluations: GridSearchCV and + RandomizedSearchCV schedule evaluations for their whole parameter + search space at once but other more sequential approaches are also + possible: for instance is possible to iteratively schedule evaluations + for new regions of the parameter search space based on previously + collected evaluation results. This makes it possible to implement + Bayesian optimization or more generally sequential model-based + optimization by deriving from the BaseSearchCV abstract base class. + For example, Successive Halving is implemented by calling + `evaluate_candidates` multiples times (once per iteration of the SH + process), each time passing a different set of candidates with `X` + and `y` of increasing sizes. + + Parameters + ---------- + evaluate_candidates : callable + This callback accepts: + - a list of candidates, where each candidate is a dict of + parameter settings. + - an optional `cv` parameter which can be used to e.g. + evaluate candidates on different dataset splits, or + evaluate candidates on subsampled data (as done in the + SucessiveHaling estimators). By default, the original `cv` + parameter is used, and it is available as a private + `_checked_cv_orig` attribute. + - an optional `more_results` dict. Each key will be added to + the `cv_results_` attribute. Values should be lists of + length `n_candidates` + + It returns a dict of all results so far, formatted like + ``cv_results_``. + + Important note (relevant whether the default cv is used or not): + in randomized splitters, and unless the random_state parameter of + cv was set to an int, calling cv.split() multiple times will + yield different splits. Since cv.split() is called in + evaluate_candidates, this means that candidates will be evaluated + on different splits each time evaluate_candidates is called. This + might be a methodological issue depending on the search strategy + that you're implementing. To prevent randomized splitters from + being used, you may use _split._yields_constant_splits() + + Examples + -------- + + :: + + def _run_search(self, evaluate_candidates): + 'Try C=0.1 only if C=1 is better than C=10' + all_results = evaluate_candidates([{'C': 1}, {'C': 10}]) + score = all_results['mean_test_score'] + if score[0] < score[1]: + evaluate_candidates([{'C': 0.1}]) + """ + raise NotImplementedError("_run_search not implemented.") + + def _check_refit_for_multimetric(self, scores): + """Check `refit` is compatible with `scores` is valid""" + multimetric_refit_msg = ( + "For multi-metric scoring, the parameter refit must be set to a " + "scorer key or a callable to refit an estimator with the best " + "parameter setting on the whole data and make the best_* " + "attributes available for that metric. If this is not needed, " + f"refit should be set to False explicitly. {self.refit!r} was " + "passed." + ) + + valid_refit_dict = isinstance(self.refit, str) and self.refit in scores + + if ( + self.refit is not False + and not valid_refit_dict + and not callable(self.refit) + ): + raise ValueError(multimetric_refit_msg) + + @staticmethod + def _select_best_index(refit, refit_metric, results): + """Select index of the best combination of hyperparemeters.""" + if callable(refit): + # If callable, refit is expected to return the index of the best + # parameter set. + best_index = refit(results) + if not isinstance(best_index, numbers.Integral): + raise TypeError("best_index_ returned is not an integer") + if best_index < 0 or best_index >= len(results["params"]): + raise IndexError("best_index_ index out of range") + else: + best_index = results[f"rank_test_{refit_metric}"].argmin() + return best_index + + def _get_scorers(self, convert_multimetric): + """Get the scorer(s) to be used. + + This is used in ``fit`` and ``get_metadata_routing``. + + Parameters + ---------- + convert_multimetric : bool + Whether to convert a dict of scorers to a _MultimetricScorer. This + is used in ``get_metadata_routing`` to include the routing info for + multiple scorers. + + Returns + ------- + scorers, refit_metric + """ + refit_metric = "score" + + if callable(self.scoring): + scorers = self.scoring + elif self.scoring is None or isinstance(self.scoring, str): + scorers = check_scoring(self.estimator, self.scoring) + else: + scorers = _check_multimetric_scoring(self.estimator, self.scoring) + self._check_refit_for_multimetric(scorers) + refit_metric = self.refit + if convert_multimetric and isinstance(scorers, dict): + scorers = _MultimetricScorer( + scorers=scorers, raise_exc=(self.error_score == "raise") + ) + + return scorers, refit_metric + + def _get_routed_params_for_fit(self, params): + """Get the parameters to be used for routing. + + This is a method instead of a snippet in ``fit`` since it's used twice, + here in ``fit``, and in ``HalvingRandomSearchCV.fit``. + """ + if _routing_enabled(): + routed_params = process_routing(self, "fit", **params) + else: + params = params.copy() + groups = params.pop("groups", None) + routed_params = Bunch( + estimator=Bunch(fit=params), + splitter=Bunch(split={"groups": groups}), + scorer=Bunch(score={}), + ) + return routed_params + + @_fit_context( + # *SearchCV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **params): + """Run fit with all sets of parameters. + + Parameters + ---------- + + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples, n_output) \ + or (n_samples,), default=None + Target relative to X for classification or regression; + None for unsupervised learning. + + **params : dict of str -> object + Parameters passed to the ``fit`` method of the estimator, the scorer, + and the CV splitter. + + If a fit parameter is an array-like whose length is equal to + `num_samples` then it will be split across CV groups along with `X` + and `y`. For example, the :term:`sample_weight` parameter is split + because `len(sample_weights) = len(X)`. + + Returns + ------- + self : object + Instance of fitted estimator. + """ + estimator = self.estimator + # Here we keep a dict of scorers as is, and only convert to a + # _MultimetricScorer at a later stage. Issue: + # https://github.com/scikit-learn/scikit-learn/issues/27001 + scorers, refit_metric = self._get_scorers(convert_multimetric=False) + + X, y = indexable(X, y) + params = _check_method_params(X, params=params) + + routed_params = self._get_routed_params_for_fit(params) + + cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator)) + n_splits = cv_orig.get_n_splits(X, y, **routed_params.splitter.split) + + base_estimator = clone(self.estimator) + + parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch) + + fit_and_score_kwargs = dict( + scorer=scorers, + fit_params=routed_params.estimator.fit, + score_params=routed_params.scorer.score, + return_train_score=self.return_train_score, + return_n_test_samples=True, + return_times=True, + return_parameters=False, + error_score=self.error_score, + verbose=self.verbose, + ) + results = {} + with parallel: + all_candidate_params = [] + all_out = [] + all_more_results = defaultdict(list) + + def evaluate_candidates(candidate_params, cv=None, more_results=None): + cv = cv or cv_orig + candidate_params = list(candidate_params) + n_candidates = len(candidate_params) + + if self.verbose > 0: + print( + "Fitting {0} folds for each of {1} candidates," + " totalling {2} fits".format( + n_splits, n_candidates, n_candidates * n_splits + ) + ) + + out = parallel( + delayed(_fit_and_score)( + clone(base_estimator), + X, + y, + train=train, + test=test, + parameters=parameters, + split_progress=(split_idx, n_splits), + candidate_progress=(cand_idx, n_candidates), + **fit_and_score_kwargs, + ) + for (cand_idx, parameters), (split_idx, (train, test)) in product( + enumerate(candidate_params), + enumerate(cv.split(X, y, **routed_params.splitter.split)), + ) + ) + + if len(out) < 1: + raise ValueError( + "No fits were performed. " + "Was the CV iterator empty? " + "Were there no candidates?" + ) + elif len(out) != n_candidates * n_splits: + raise ValueError( + "cv.split and cv.get_n_splits returned " + "inconsistent results. Expected {} " + "splits, got {}".format(n_splits, len(out) // n_candidates) + ) + + _warn_or_raise_about_fit_failures(out, self.error_score) + + # For callable self.scoring, the return type is only know after + # calling. If the return type is a dictionary, the error scores + # can now be inserted with the correct key. The type checking + # of out will be done in `_insert_error_scores`. + if callable(self.scoring): + _insert_error_scores(out, self.error_score) + + all_candidate_params.extend(candidate_params) + all_out.extend(out) + + if more_results is not None: + for key, value in more_results.items(): + all_more_results[key].extend(value) + + nonlocal results + results = self._format_results( + all_candidate_params, n_splits, all_out, all_more_results + ) + + return results + + self._run_search(evaluate_candidates) + + # multimetric is determined here because in the case of a callable + # self.scoring the return type is only known after calling + first_test_score = all_out[0]["test_scores"] + self.multimetric_ = isinstance(first_test_score, dict) + + # check refit_metric now for a callabe scorer that is multimetric + if callable(self.scoring) and self.multimetric_: + self._check_refit_for_multimetric(first_test_score) + refit_metric = self.refit + + # For multi-metric evaluation, store the best_index_, best_params_ and + # best_score_ iff refit is one of the scorer names + # In single metric evaluation, refit_metric is "score" + if self.refit or not self.multimetric_: + self.best_index_ = self._select_best_index( + self.refit, refit_metric, results + ) + if not callable(self.refit): + # With a non-custom callable, we can select the best score + # based on the best index + self.best_score_ = results[f"mean_test_{refit_metric}"][ + self.best_index_ + ] + self.best_params_ = results["params"][self.best_index_] + + if self.refit: + # here we clone the estimator as well as the parameters, since + # sometimes the parameters themselves might be estimators, e.g. + # when we search over different estimators in a pipeline. + # ref: https://github.com/scikit-learn/scikit-learn/pull/26786 + self.best_estimator_ = clone(base_estimator).set_params( + **clone(self.best_params_, safe=False) + ) + + refit_start_time = time.time() + if y is not None: + self.best_estimator_.fit(X, y, **routed_params.estimator.fit) + else: + self.best_estimator_.fit(X, **routed_params.estimator.fit) + refit_end_time = time.time() + self.refit_time_ = refit_end_time - refit_start_time + + if hasattr(self.best_estimator_, "feature_names_in_"): + self.feature_names_in_ = self.best_estimator_.feature_names_in_ + + # Store the only scorer not as a dict for single metric evaluation + self.scorer_ = scorers + + self.cv_results_ = results + self.n_splits_ = n_splits + + return self + + def _format_results(self, candidate_params, n_splits, out, more_results=None): + n_candidates = len(candidate_params) + out = _aggregate_score_dicts(out) + + results = dict(more_results or {}) + for key, val in results.items(): + # each value is a list (as per evaluate_candidate's convention) + # we convert it to an array for consistency with the other keys + results[key] = np.asarray(val) + + def _store(key_name, array, weights=None, splits=False, rank=False): + """A small helper to store the scores/times to the cv_results_""" + # When iterated first by splits, then by parameters + # We want `array` to have `n_candidates` rows and `n_splits` cols. + array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits) + if splits: + for split_idx in range(n_splits): + # Uses closure to alter the results + results["split%d_%s" % (split_idx, key_name)] = array[:, split_idx] + + array_means = np.average(array, axis=1, weights=weights) + results["mean_%s" % key_name] = array_means + + if key_name.startswith(("train_", "test_")) and np.any( + ~np.isfinite(array_means) + ): + warnings.warn( + ( + f"One or more of the {key_name.split('_')[0]} scores " + f"are non-finite: {array_means}" + ), + category=UserWarning, + ) + + # Weighted std is not directly available in numpy + array_stds = np.sqrt( + np.average( + (array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights + ) + ) + results["std_%s" % key_name] = array_stds + + if rank: + # When the fit/scoring fails `array_means` contains NaNs, we + # will exclude them from the ranking process and consider them + # as tied with the worst performers. + if np.isnan(array_means).all(): + # All fit/scoring routines failed. + rank_result = np.ones_like(array_means, dtype=np.int32) + else: + min_array_means = np.nanmin(array_means) - 1 + array_means = np.nan_to_num(array_means, nan=min_array_means) + rank_result = rankdata(-array_means, method="min").astype( + np.int32, copy=False + ) + results["rank_%s" % key_name] = rank_result + + _store("fit_time", out["fit_time"]) + _store("score_time", out["score_time"]) + # Use one MaskedArray and mask all the places where the param is not + # applicable for that candidate. Use defaultdict as each candidate may + # not contain all the params + param_results = defaultdict( + partial( + MaskedArray, + np.empty( + n_candidates, + ), + mask=True, + dtype=object, + ) + ) + for cand_idx, params in enumerate(candidate_params): + for name, value in params.items(): + # An all masked empty array gets created for the key + # `"param_%s" % name` at the first occurrence of `name`. + # Setting the value at an index also unmasks that index + param_results["param_%s" % name][cand_idx] = value + + results.update(param_results) + # Store a list of param dicts at the key 'params' + results["params"] = candidate_params + + test_scores_dict = _normalize_score_results(out["test_scores"]) + if self.return_train_score: + train_scores_dict = _normalize_score_results(out["train_scores"]) + + for scorer_name in test_scores_dict: + # Computed the (weighted) mean and std for test scores alone + _store( + "test_%s" % scorer_name, + test_scores_dict[scorer_name], + splits=True, + rank=True, + weights=None, + ) + if self.return_train_score: + _store( + "train_%s" % scorer_name, + train_scores_dict[scorer_name], + splits=True, + ) + + return results + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__) + router.add( + estimator=self.estimator, + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + + scorer, _ = self._get_scorers(convert_multimetric=True) + router.add( + scorer=scorer, + method_mapping=MethodMapping() + .add(caller="score", callee="score") + .add(caller="fit", callee="score"), + ) + router.add( + splitter=self.cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + return router + + +class GridSearchCV(BaseSearchCV): + """Exhaustive search over specified parameter values for an estimator. + + Important members are fit, predict. + + GridSearchCV implements a "fit" and a "score" method. + It also implements "score_samples", "predict", "predict_proba", + "decision_function", "transform" and "inverse_transform" if they are + implemented in the estimator used. + + The parameters of the estimator used to apply these methods are optimized + by cross-validated grid-search over a parameter grid. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_grid : dict or list of dictionaries + Dictionary with parameters names (`str`) as keys and lists of + parameter settings to try as values, or a list of such + dictionaries, in which case the grids spanned by each dictionary + in the list are explored. This enables searching over any sequence + of parameter settings. + + scoring : str, callable, list, tuple or dict, default=None + Strategy to evaluate the performance of the cross-validated model on + the test set. + + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + refit : bool, str, or callable, default=True + Refit an estimator using the best found parameters on the whole + dataset. + + For multiple metric evaluation, this needs to be a `str` denoting the + scorer that would be used to find the best parameters for refitting + the estimator at the end. + + Where there are considerations other than maximum score in + choosing a best estimator, ``refit`` can be set to a function which + returns the selected ``best_index_`` given ``cv_results_``. In that + case, the ``best_estimator_`` and ``best_params_`` will be set + according to the returned ``best_index_`` while the ``best_score_`` + attribute will not be available. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``GridSearchCV`` instance. + + Also for multiple metric evaluation, the attributes ``best_index_``, + ``best_score_`` and ``best_params_`` will only be available if + ``refit`` is set and all of them will be determined w.r.t this specific + scorer. + + See ``scoring`` parameter to know more about multiple metric + evaluation. + + See :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_digits.py` + to see how to design a custom selection strategy using a callable + via `refit`. + + .. versionchanged:: 0.20 + Support for callable added. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : int + Controls the verbosity: the higher, the more messages. + + - >1 : the computation time for each fold and parameter candidate is + displayed; + - >2 : the score is also displayed; + - >3 : the fold and candidate parameter indexes are also displayed + together with the starting time of the computation. + + pre_dispatch : int, or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + .. versionadded:: 0.19 + + .. versionchanged:: 0.21 + Default value was changed from ``True`` to ``False`` + + Attributes + ---------- + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. + + For instance the below given table + + +------------+-----------+------------+-----------------+---+---------+ + |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...| + +============+===========+============+=================+===+=========+ + | 'poly' | -- | 2 | 0.80 |...| 2 | + +------------+-----------+------------+-----------------+---+---------+ + | 'poly' | -- | 3 | 0.70 |...| 4 | + +------------+-----------+------------+-----------------+---+---------+ + | 'rbf' | 0.1 | -- | 0.80 |...| 3 | + +------------+-----------+------------+-----------------+---+---------+ + | 'rbf' | 0.2 | -- | 0.93 |...| 1 | + +------------+-----------+------------+-----------------+---+---------+ + + will be represented by a ``cv_results_`` dict of:: + + { + 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'], + mask = [False False False False]...) + 'param_gamma': masked_array(data = [-- -- 0.1 0.2], + mask = [ True True False False]...), + 'param_degree': masked_array(data = [2.0 3.0 -- --], + mask = [False False True True]...), + 'split0_test_score' : [0.80, 0.70, 0.80, 0.93], + 'split1_test_score' : [0.82, 0.50, 0.70, 0.78], + 'mean_test_score' : [0.81, 0.60, 0.75, 0.85], + 'std_test_score' : [0.01, 0.10, 0.05, 0.08], + 'rank_test_score' : [2, 4, 3, 1], + 'split0_train_score' : [0.80, 0.92, 0.70, 0.93], + 'split1_train_score' : [0.82, 0.55, 0.70, 0.87], + 'mean_train_score' : [0.81, 0.74, 0.70, 0.90], + 'std_train_score' : [0.01, 0.19, 0.00, 0.03], + 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], + 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], + 'mean_score_time' : [0.01, 0.06, 0.04, 0.04], + 'std_score_time' : [0.00, 0.00, 0.00, 0.01], + 'params' : [{'kernel': 'poly', 'degree': 2}, ...], + } + + NOTE + + The key ``'params'`` is used to store a list of parameter + settings dicts for all the parameter candidates. + + The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and + ``std_score_time`` are all in seconds. + + For multi-metric evaluation, the scores for all the scorers are + available in the ``cv_results_`` dict at the keys ending with that + scorer's name (``'_'``) instead of ``'_score'`` shown + above. ('split0_test_precision', 'mean_train_precision' etc.) + + best_estimator_ : estimator + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + See ``refit`` parameter for more information on allowed values. + + best_score_ : float + Mean cross-validated score of the best_estimator + + For multi-metric evaluation, this is present only if ``refit`` is + specified. + + This attribute is not available if ``refit`` is a function. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + For multi-metric evaluation, this is present only if ``refit`` is + specified. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + For multi-metric evaluation, this is present only if ``refit`` is + specified. + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + For multi-metric evaluation, this attribute holds the validated + ``scoring`` dict which maps the scorer key to the scorer callable. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + .. versionadded:: 0.20 + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + ParameterGrid : Generates all the combinations of a hyperparameter grid. + train_test_split : Utility function to split the data into a development + set usable for fitting a GridSearchCV instance and an evaluation set + for its final evaluation. + sklearn.metrics.make_scorer : Make a scorer from a performance metric or + loss function. + + Notes + ----- + The parameters selected are those that maximize the score of the left out + data, unless an explicit score is passed in which case it is used instead. + + If `n_jobs` was set to a value higher than one, the data is copied for each + point in the grid (and not `n_jobs` times). This is done for efficiency + reasons if individual jobs take very little time, but may raise errors if + the dataset is large and not enough memory is available. A workaround in + this case is to set `pre_dispatch`. Then, the memory is copied only + `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * + n_jobs`. + + Examples + -------- + >>> from sklearn import svm, datasets + >>> from sklearn.model_selection import GridSearchCV + >>> iris = datasets.load_iris() + >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} + >>> svc = svm.SVC() + >>> clf = GridSearchCV(svc, parameters) + >>> clf.fit(iris.data, iris.target) + GridSearchCV(estimator=SVC(), + param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')}) + >>> sorted(clf.cv_results_.keys()) + ['mean_fit_time', 'mean_score_time', 'mean_test_score',... + 'param_C', 'param_kernel', 'params',... + 'rank_test_score', 'split0_test_score',... + 'split2_test_score', ... + 'std_fit_time', 'std_score_time', 'std_test_score'] + """ + + _required_parameters = ["estimator", "param_grid"] + + _parameter_constraints: dict = { + **BaseSearchCV._parameter_constraints, + "param_grid": [dict, list], + } + + def __init__( + self, + estimator, + param_grid, + *, + scoring=None, + n_jobs=None, + refit=True, + cv=None, + verbose=0, + pre_dispatch="2*n_jobs", + error_score=np.nan, + return_train_score=False, + ): + super().__init__( + estimator=estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + cv=cv, + verbose=verbose, + pre_dispatch=pre_dispatch, + error_score=error_score, + return_train_score=return_train_score, + ) + self.param_grid = param_grid + + def _run_search(self, evaluate_candidates): + """Search all candidates in param_grid""" + evaluate_candidates(ParameterGrid(self.param_grid)) + + +class RandomizedSearchCV(BaseSearchCV): + """Randomized search on hyper parameters. + + RandomizedSearchCV implements a "fit" and a "score" method. + It also implements "score_samples", "predict", "predict_proba", + "decision_function", "transform" and "inverse_transform" if they are + implemented in the estimator used. + + The parameters of the estimator used to apply these methods are optimized + by cross-validated search over parameter settings. + + In contrast to GridSearchCV, not all parameter values are tried out, but + rather a fixed number of parameter settings is sampled from the specified + distributions. The number of parameter settings that are tried is + given by n_iter. + + If all parameters are presented as a list, + sampling without replacement is performed. If at least one parameter + is given as a distribution, sampling with replacement is used. + It is highly recommended to use continuous distributions for continuous + parameters. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.14 + + Parameters + ---------- + estimator : estimator object + An object of that type is instantiated for each grid point. + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_distributions : dict or list of dicts + Dictionary with parameters names (`str`) as keys and distributions + or lists of parameters to try. Distributions must provide a ``rvs`` + method for sampling (such as those from scipy.stats.distributions). + If a list is given, it is sampled uniformly. + If a list of dicts is given, first a dict is sampled uniformly, and + then a parameter is sampled using that dict as above. + + n_iter : int, default=10 + Number of parameter settings that are sampled. n_iter trades + off runtime vs quality of the solution. + + scoring : str, callable, list, tuple or dict, default=None + Strategy to evaluate the performance of the cross-validated model on + the test set. + + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + If None, the estimator's score method is used. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + refit : bool, str, or callable, default=True + Refit an estimator using the best found parameters on the whole + dataset. + + For multiple metric evaluation, this needs to be a `str` denoting the + scorer that would be used to find the best parameters for refitting + the estimator at the end. + + Where there are considerations other than maximum score in + choosing a best estimator, ``refit`` can be set to a function which + returns the selected ``best_index_`` given the ``cv_results``. In that + case, the ``best_estimator_`` and ``best_params_`` will be set + according to the returned ``best_index_`` while the ``best_score_`` + attribute will not be available. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``RandomizedSearchCV`` instance. + + Also for multiple metric evaluation, the attributes ``best_index_``, + ``best_score_`` and ``best_params_`` will only be available if + ``refit`` is set and all of them will be determined w.r.t this specific + scorer. + + See ``scoring`` parameter to know more about multiple metric + evaluation. + + .. versionchanged:: 0.20 + Support for callable added. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : int + Controls the verbosity: the higher, the more messages. + + - >1 : the computation time for each fold and parameter candidate is + displayed; + - >2 : the score is also displayed; + - >3 : the fold and candidate parameter indexes are also displayed + together with the starting time of the computation. + + pre_dispatch : int, or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for random uniform sampling + from lists of possible values instead of scipy.stats distributions. + Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + .. versionadded:: 0.19 + + .. versionchanged:: 0.21 + Default value was changed from ``True`` to ``False`` + + Attributes + ---------- + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. + + For instance the below given table + + +--------------+-------------+-------------------+---+---------------+ + | param_kernel | param_gamma | split0_test_score |...|rank_test_score| + +==============+=============+===================+===+===============+ + | 'rbf' | 0.1 | 0.80 |...| 1 | + +--------------+-------------+-------------------+---+---------------+ + | 'rbf' | 0.2 | 0.84 |...| 3 | + +--------------+-------------+-------------------+---+---------------+ + | 'rbf' | 0.3 | 0.70 |...| 2 | + +--------------+-------------+-------------------+---+---------------+ + + will be represented by a ``cv_results_`` dict of:: + + { + 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'], + mask = False), + 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), + 'split0_test_score' : [0.80, 0.84, 0.70], + 'split1_test_score' : [0.82, 0.50, 0.70], + 'mean_test_score' : [0.81, 0.67, 0.70], + 'std_test_score' : [0.01, 0.24, 0.00], + 'rank_test_score' : [1, 3, 2], + 'split0_train_score' : [0.80, 0.92, 0.70], + 'split1_train_score' : [0.82, 0.55, 0.70], + 'mean_train_score' : [0.81, 0.74, 0.70], + 'std_train_score' : [0.01, 0.19, 0.00], + 'mean_fit_time' : [0.73, 0.63, 0.43], + 'std_fit_time' : [0.01, 0.02, 0.01], + 'mean_score_time' : [0.01, 0.06, 0.04], + 'std_score_time' : [0.00, 0.00, 0.00], + 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], + } + + NOTE + + The key ``'params'`` is used to store a list of parameter + settings dicts for all the parameter candidates. + + The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and + ``std_score_time`` are all in seconds. + + For multi-metric evaluation, the scores for all the scorers are + available in the ``cv_results_`` dict at the keys ending with that + scorer's name (``'_'``) instead of ``'_score'`` shown + above. ('split0_test_precision', 'mean_train_precision' etc.) + + best_estimator_ : estimator + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + For multi-metric evaluation, this attribute is present only if + ``refit`` is specified. + + See ``refit`` parameter for more information on allowed values. + + best_score_ : float + Mean cross-validated score of the best_estimator. + + For multi-metric evaluation, this is not available if ``refit`` is + ``False``. See ``refit`` parameter for more information. + + This attribute is not available if ``refit`` is a function. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + For multi-metric evaluation, this is not available if ``refit`` is + ``False``. See ``refit`` parameter for more information. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + For multi-metric evaluation, this is not available if ``refit`` is + ``False``. See ``refit`` parameter for more information. + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + For multi-metric evaluation, this attribute holds the validated + ``scoring`` dict which maps the scorer key to the scorer callable. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + .. versionadded:: 0.20 + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + GridSearchCV : Does exhaustive search over a grid of parameters. + ParameterSampler : A generator over parameter settings, constructed from + param_distributions. + + Notes + ----- + The parameters selected are those that maximize the score of the held-out + data, according to the scoring parameter. + + If `n_jobs` was set to a value higher than one, the data is copied for each + parameter setting(and not `n_jobs` times). This is done for efficiency + reasons if individual jobs take very little time, but may raise errors if + the dataset is large and not enough memory is available. A workaround in + this case is to set `pre_dispatch`. Then, the memory is copied only + `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * + n_jobs`. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.model_selection import RandomizedSearchCV + >>> from scipy.stats import uniform + >>> iris = load_iris() + >>> logistic = LogisticRegression(solver='saga', tol=1e-2, max_iter=200, + ... random_state=0) + >>> distributions = dict(C=uniform(loc=0, scale=4), + ... penalty=['l2', 'l1']) + >>> clf = RandomizedSearchCV(logistic, distributions, random_state=0) + >>> search = clf.fit(iris.data, iris.target) + >>> search.best_params_ + {'C': 2..., 'penalty': 'l1'} + """ + + _required_parameters = ["estimator", "param_distributions"] + + _parameter_constraints: dict = { + **BaseSearchCV._parameter_constraints, + "param_distributions": [dict, list], + "n_iter": [Interval(numbers.Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + def __init__( + self, + estimator, + param_distributions, + *, + n_iter=10, + scoring=None, + n_jobs=None, + refit=True, + cv=None, + verbose=0, + pre_dispatch="2*n_jobs", + random_state=None, + error_score=np.nan, + return_train_score=False, + ): + self.param_distributions = param_distributions + self.n_iter = n_iter + self.random_state = random_state + super().__init__( + estimator=estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + cv=cv, + verbose=verbose, + pre_dispatch=pre_dispatch, + error_score=error_score, + return_train_score=return_train_score, + ) + + def _run_search(self, evaluate_candidates): + """Search n_iter candidates from param_distributions""" + evaluate_candidates( + ParameterSampler( + self.param_distributions, self.n_iter, random_state=self.random_state + ) + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/_search_successive_halving.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/_search_successive_halving.py new file mode 100644 index 0000000000000000000000000000000000000000..b1cf5ee50965cee9623516d002522ff0d2a502eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/_search_successive_halving.py @@ -0,0 +1,1079 @@ +from abc import abstractmethod +from copy import deepcopy +from math import ceil, floor, log +from numbers import Integral, Real + +import numpy as np + +from ..base import _fit_context, is_classifier +from ..metrics._scorer import get_scorer_names +from ..utils import resample +from ..utils._param_validation import Interval, StrOptions +from ..utils.multiclass import check_classification_targets +from ..utils.validation import _num_samples +from . import ParameterGrid, ParameterSampler +from ._search import BaseSearchCV +from ._split import _yields_constant_splits, check_cv + +__all__ = ["HalvingGridSearchCV", "HalvingRandomSearchCV"] + + +class _SubsampleMetaSplitter: + """Splitter that subsamples a given fraction of the dataset""" + + def __init__(self, *, base_cv, fraction, subsample_test, random_state): + self.base_cv = base_cv + self.fraction = fraction + self.subsample_test = subsample_test + self.random_state = random_state + + def split(self, X, y, **kwargs): + for train_idx, test_idx in self.base_cv.split(X, y, **kwargs): + train_idx = resample( + train_idx, + replace=False, + random_state=self.random_state, + n_samples=int(self.fraction * len(train_idx)), + ) + if self.subsample_test: + test_idx = resample( + test_idx, + replace=False, + random_state=self.random_state, + n_samples=int(self.fraction * len(test_idx)), + ) + yield train_idx, test_idx + + +def _top_k(results, k, itr): + # Return the best candidates of a given iteration + iteration, mean_test_score, params = ( + np.asarray(a) + for a in (results["iter"], results["mean_test_score"], results["params"]) + ) + iter_indices = np.flatnonzero(iteration == itr) + scores = mean_test_score[iter_indices] + # argsort() places NaNs at the end of the array so we move NaNs to the + # front of the array so the last `k` items are the those with the + # highest scores. + sorted_indices = np.roll(np.argsort(scores), np.count_nonzero(np.isnan(scores))) + return np.array(params[iter_indices][sorted_indices[-k:]]) + + +class BaseSuccessiveHalving(BaseSearchCV): + """Implements successive halving. + + Ref: + Almost optimal exploration in multi-armed bandits, ICML 13 + Zohar Karnin, Tomer Koren, Oren Somekh + """ + + _parameter_constraints: dict = { + **BaseSearchCV._parameter_constraints, + # overwrite `scoring` since multi-metrics are not supported + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "random_state": ["random_state"], + "max_resources": [ + Interval(Integral, 0, None, closed="neither"), + StrOptions({"auto"}), + ], + "min_resources": [ + Interval(Integral, 0, None, closed="neither"), + StrOptions({"exhaust", "smallest"}), + ], + "resource": [str], + "factor": [Interval(Real, 0, None, closed="neither")], + "aggressive_elimination": ["boolean"], + } + _parameter_constraints.pop("pre_dispatch") # not used in this class + + def __init__( + self, + estimator, + *, + scoring=None, + n_jobs=None, + refit=True, + cv=5, + verbose=0, + random_state=None, + error_score=np.nan, + return_train_score=True, + max_resources="auto", + min_resources="exhaust", + resource="n_samples", + factor=3, + aggressive_elimination=False, + ): + super().__init__( + estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + cv=cv, + verbose=verbose, + error_score=error_score, + return_train_score=return_train_score, + ) + + self.random_state = random_state + self.max_resources = max_resources + self.resource = resource + self.factor = factor + self.min_resources = min_resources + self.aggressive_elimination = aggressive_elimination + + def _check_input_parameters(self, X, y, split_params): + # We need to enforce that successive calls to cv.split() yield the same + # splits: see https://github.com/scikit-learn/scikit-learn/issues/15149 + if not _yields_constant_splits(self._checked_cv_orig): + raise ValueError( + "The cv parameter must yield consistent folds across " + "calls to split(). Set its random_state to an int, or set " + "shuffle=False." + ) + + if ( + self.resource != "n_samples" + and self.resource not in self.estimator.get_params() + ): + raise ValueError( + f"Cannot use resource={self.resource} which is not supported " + f"by estimator {self.estimator.__class__.__name__}" + ) + + if isinstance(self, HalvingRandomSearchCV): + if self.min_resources == self.n_candidates == "exhaust": + # for n_candidates=exhaust to work, we need to know what + # min_resources is. Similarly min_resources=exhaust needs to + # know the actual number of candidates. + raise ValueError( + "n_candidates and min_resources cannot be both set to 'exhaust'." + ) + + self.min_resources_ = self.min_resources + if self.min_resources_ in ("smallest", "exhaust"): + if self.resource == "n_samples": + n_splits = self._checked_cv_orig.get_n_splits(X, y, **split_params) + # please see https://gph.is/1KjihQe for a justification + magic_factor = 2 + self.min_resources_ = n_splits * magic_factor + if is_classifier(self.estimator): + y = self._validate_data(X="no_validation", y=y) + check_classification_targets(y) + n_classes = np.unique(y).shape[0] + self.min_resources_ *= n_classes + else: + self.min_resources_ = 1 + # if 'exhaust', min_resources_ might be set to a higher value later + # in _run_search + + self.max_resources_ = self.max_resources + if self.max_resources_ == "auto": + if not self.resource == "n_samples": + raise ValueError( + "resource can only be 'n_samples' when max_resources='auto'" + ) + self.max_resources_ = _num_samples(X) + + if self.min_resources_ > self.max_resources_: + raise ValueError( + f"min_resources_={self.min_resources_} is greater " + f"than max_resources_={self.max_resources_}." + ) + + if self.min_resources_ == 0: + raise ValueError( + f"min_resources_={self.min_resources_}: you might have passed " + "an empty dataset X." + ) + + @staticmethod + def _select_best_index(refit, refit_metric, results): + """Custom refit callable to return the index of the best candidate. + + We want the best candidate out of the last iteration. By default + BaseSearchCV would return the best candidate out of all iterations. + + Currently, we only support for a single metric thus `refit` and + `refit_metric` are not required. + """ + last_iter = np.max(results["iter"]) + last_iter_indices = np.flatnonzero(results["iter"] == last_iter) + + test_scores = results["mean_test_score"][last_iter_indices] + # If all scores are NaNs there is no way to pick between them, + # so we (arbitrarily) declare the zero'th entry the best one + if np.isnan(test_scores).all(): + best_idx = 0 + else: + best_idx = np.nanargmax(test_scores) + + return last_iter_indices[best_idx] + + @_fit_context( + # Halving*SearchCV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **params): + """Run fit with all sets of parameters. + + Parameters + ---------- + + X : array-like, shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_output), optional + Target relative to X for classification or regression; + None for unsupervised learning. + + **params : dict of string -> object + Parameters passed to the ``fit`` method of the estimator. + + Returns + ------- + self : object + Instance of fitted estimator. + """ + self._checked_cv_orig = check_cv( + self.cv, y, classifier=is_classifier(self.estimator) + ) + + routed_params = self._get_routed_params_for_fit(params) + self._check_input_parameters( + X=X, y=y, split_params=routed_params.splitter.split + ) + + self._n_samples_orig = _num_samples(X) + + super().fit(X, y=y, **params) + + # Set best_score_: BaseSearchCV does not set it, as refit is a callable + self.best_score_ = self.cv_results_["mean_test_score"][self.best_index_] + + return self + + def _run_search(self, evaluate_candidates): + candidate_params = self._generate_candidate_params() + + if self.resource != "n_samples" and any( + self.resource in candidate for candidate in candidate_params + ): + # Can only check this now since we need the candidates list + raise ValueError( + f"Cannot use parameter {self.resource} as the resource since " + "it is part of the searched parameters." + ) + + # n_required_iterations is the number of iterations needed so that the + # last iterations evaluates less than `factor` candidates. + n_required_iterations = 1 + floor(log(len(candidate_params), self.factor)) + + if self.min_resources == "exhaust": + # To exhaust the resources, we want to start with the biggest + # min_resources possible so that the last (required) iteration + # uses as many resources as possible + last_iteration = n_required_iterations - 1 + self.min_resources_ = max( + self.min_resources_, + self.max_resources_ // self.factor**last_iteration, + ) + + # n_possible_iterations is the number of iterations that we can + # actually do starting from min_resources and without exceeding + # max_resources. Depending on max_resources and the number of + # candidates, this may be higher or smaller than + # n_required_iterations. + n_possible_iterations = 1 + floor( + log(self.max_resources_ // self.min_resources_, self.factor) + ) + + if self.aggressive_elimination: + n_iterations = n_required_iterations + else: + n_iterations = min(n_possible_iterations, n_required_iterations) + + if self.verbose: + print(f"n_iterations: {n_iterations}") + print(f"n_required_iterations: {n_required_iterations}") + print(f"n_possible_iterations: {n_possible_iterations}") + print(f"min_resources_: {self.min_resources_}") + print(f"max_resources_: {self.max_resources_}") + print(f"aggressive_elimination: {self.aggressive_elimination}") + print(f"factor: {self.factor}") + + self.n_resources_ = [] + self.n_candidates_ = [] + + for itr in range(n_iterations): + power = itr # default + if self.aggressive_elimination: + # this will set n_resources to the initial value (i.e. the + # value of n_resources at the first iteration) for as many + # iterations as needed (while candidates are being + # eliminated), and then go on as usual. + power = max(0, itr - n_required_iterations + n_possible_iterations) + + n_resources = int(self.factor**power * self.min_resources_) + # guard, probably not needed + n_resources = min(n_resources, self.max_resources_) + self.n_resources_.append(n_resources) + + n_candidates = len(candidate_params) + self.n_candidates_.append(n_candidates) + + if self.verbose: + print("-" * 10) + print(f"iter: {itr}") + print(f"n_candidates: {n_candidates}") + print(f"n_resources: {n_resources}") + + if self.resource == "n_samples": + # subsampling will be done in cv.split() + cv = _SubsampleMetaSplitter( + base_cv=self._checked_cv_orig, + fraction=n_resources / self._n_samples_orig, + subsample_test=True, + random_state=self.random_state, + ) + + else: + # Need copy so that the n_resources of next iteration does + # not overwrite + candidate_params = [c.copy() for c in candidate_params] + for candidate in candidate_params: + candidate[self.resource] = n_resources + cv = self._checked_cv_orig + + more_results = { + "iter": [itr] * n_candidates, + "n_resources": [n_resources] * n_candidates, + } + + results = evaluate_candidates( + candidate_params, cv, more_results=more_results + ) + + n_candidates_to_keep = ceil(n_candidates / self.factor) + candidate_params = _top_k(results, n_candidates_to_keep, itr) + + self.n_remaining_candidates_ = len(candidate_params) + self.n_required_iterations_ = n_required_iterations + self.n_possible_iterations_ = n_possible_iterations + self.n_iterations_ = n_iterations + + @abstractmethod + def _generate_candidate_params(self): + pass + + def _more_tags(self): + tags = deepcopy(super()._more_tags()) + tags["_xfail_checks"].update( + { + "check_fit2d_1sample": ( + "Fail during parameter check since min/max resources requires" + " more samples" + ), + } + ) + return tags + + +class HalvingGridSearchCV(BaseSuccessiveHalving): + """Search over specified parameter values with successive halving. + + The search strategy starts evaluating all the candidates with a small + amount of resources and iteratively selects the best candidates, using + more and more resources. + + Read more in the :ref:`User guide `. + + .. note:: + + This estimator is still **experimental** for now: the predictions + and the API might change without any deprecation cycle. To use it, + you need to explicitly import ``enable_halving_search_cv``:: + + >>> # explicitly require this experimental feature + >>> from sklearn.experimental import enable_halving_search_cv # noqa + >>> # now you can import normally from model_selection + >>> from sklearn.model_selection import HalvingGridSearchCV + + Parameters + ---------- + estimator : estimator object + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_grid : dict or list of dictionaries + Dictionary with parameters names (string) as keys and lists of + parameter settings to try as values, or a list of such + dictionaries, in which case the grids spanned by each dictionary + in the list are explored. This enables searching over any sequence + of parameter settings. + + factor : int or float, default=3 + The 'halving' parameter, which determines the proportion of candidates + that are selected for each subsequent iteration. For example, + ``factor=3`` means that only one third of the candidates are selected. + + resource : ``'n_samples'`` or str, default='n_samples' + Defines the resource that increases with each iteration. By default, + the resource is the number of samples. It can also be set to any + parameter of the base estimator that accepts positive integer + values, e.g. 'n_iterations' or 'n_estimators' for a gradient + boosting estimator. In this case ``max_resources`` cannot be 'auto' + and must be set explicitly. + + max_resources : int, default='auto' + The maximum amount of resource that any candidate is allowed to use + for a given iteration. By default, this is set to ``n_samples`` when + ``resource='n_samples'`` (default), else an error is raised. + + min_resources : {'exhaust', 'smallest'} or int, default='exhaust' + The minimum amount of resource that any candidate is allowed to use + for a given iteration. Equivalently, this defines the amount of + resources `r0` that are allocated for each candidate at the first + iteration. + + - 'smallest' is a heuristic that sets `r0` to a small value: + + - ``n_splits * 2`` when ``resource='n_samples'`` for a regression + problem + - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a + classification problem + - ``1`` when ``resource != 'n_samples'`` + + - 'exhaust' will set `r0` such that the **last** iteration uses as + much resources as possible. Namely, the last iteration will use the + highest value smaller than ``max_resources`` that is a multiple of + both ``min_resources`` and ``factor``. In general, using 'exhaust' + leads to a more accurate estimator, but is slightly more time + consuming. + + Note that the amount of resources used at each iteration is always a + multiple of ``min_resources``. + + aggressive_elimination : bool, default=False + This is only relevant in cases where there isn't enough resources to + reduce the remaining candidates to at most `factor` after the last + iteration. If ``True``, then the search process will 'replay' the + first iteration for as long as needed until the number of candidates + is small enough. This is ``False`` by default, which means that the + last iteration may evaluate more than ``factor`` candidates. See + :ref:`aggressive_elimination` for more details. + + cv : int, cross-validation generator or iterable, default=5 + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. note:: + Due to implementation details, the folds produced by `cv` must be + the same across multiple calls to `cv.split()`. For + built-in `scikit-learn` iterators, this can be achieved by + deactivating shuffling (`shuffle=False`), or by setting the + `cv`'s `random_state` parameter to an integer. + + scoring : str, callable, or None, default=None + A single string (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + If None, the estimator's score method is used. + + refit : bool, default=True + If True, refit an estimator using the best found parameters on the + whole dataset. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``HalvingGridSearchCV`` instance. + + error_score : 'raise' or numeric + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. Default is ``np.nan``. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for subsampling the dataset + when `resources != 'n_samples'`. Ignored otherwise. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_jobs : int or None, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int + Controls the verbosity: the higher, the more messages. + + Attributes + ---------- + n_resources_ : list of int + The amount of resources used at each iteration. + + n_candidates_ : list of int + The number of candidate parameters that were evaluated at each + iteration. + + n_remaining_candidates_ : int + The number of candidate parameters that are left after the last + iteration. It corresponds to `ceil(n_candidates[-1] / factor)` + + max_resources_ : int + The maximum number of resources that any candidate is allowed to use + for a given iteration. Note that since the number of resources used + at each iteration must be a multiple of ``min_resources_``, the + actual number of resources used at the last iteration may be smaller + than ``max_resources_``. + + min_resources_ : int + The amount of resources that are allocated for each candidate at the + first iteration. + + n_iterations_ : int + The actual number of iterations that were run. This is equal to + ``n_required_iterations_`` if ``aggressive_elimination`` is ``True``. + Else, this is equal to ``min(n_possible_iterations_, + n_required_iterations_)``. + + n_possible_iterations_ : int + The number of iterations that are possible starting with + ``min_resources_`` resources and without exceeding + ``max_resources_``. + + n_required_iterations_ : int + The number of iterations that are required to end up with less than + ``factor`` candidates at the last iteration, starting with + ``min_resources_`` resources. This will be smaller than + ``n_possible_iterations_`` when there isn't enough resources. + + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. It contains lots of information + for analysing the results of a search. + Please refer to the :ref:`User guide` + for details. + + best_estimator_ : estimator or dict + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + best_score_ : float + Mean cross-validated score of the best_estimator. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + :class:`HalvingRandomSearchCV`: + Random search over a set of parameters using successive halving. + + Notes + ----- + The parameters selected are those that maximize the score of the held-out + data, according to the scoring parameter. + + All parameter combinations scored with a NaN will share the lowest rank. + + Examples + -------- + + >>> from sklearn.datasets import load_iris + >>> from sklearn.ensemble import RandomForestClassifier + >>> from sklearn.experimental import enable_halving_search_cv # noqa + >>> from sklearn.model_selection import HalvingGridSearchCV + ... + >>> X, y = load_iris(return_X_y=True) + >>> clf = RandomForestClassifier(random_state=0) + ... + >>> param_grid = {"max_depth": [3, None], + ... "min_samples_split": [5, 10]} + >>> search = HalvingGridSearchCV(clf, param_grid, resource='n_estimators', + ... max_resources=10, + ... random_state=0).fit(X, y) + >>> search.best_params_ # doctest: +SKIP + {'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9} + """ + + _required_parameters = ["estimator", "param_grid"] + + _parameter_constraints: dict = { + **BaseSuccessiveHalving._parameter_constraints, + "param_grid": [dict, list], + } + + def __init__( + self, + estimator, + param_grid, + *, + factor=3, + resource="n_samples", + max_resources="auto", + min_resources="exhaust", + aggressive_elimination=False, + cv=5, + scoring=None, + refit=True, + error_score=np.nan, + return_train_score=True, + random_state=None, + n_jobs=None, + verbose=0, + ): + super().__init__( + estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + verbose=verbose, + cv=cv, + random_state=random_state, + error_score=error_score, + return_train_score=return_train_score, + max_resources=max_resources, + resource=resource, + factor=factor, + min_resources=min_resources, + aggressive_elimination=aggressive_elimination, + ) + self.param_grid = param_grid + + def _generate_candidate_params(self): + return ParameterGrid(self.param_grid) + + +class HalvingRandomSearchCV(BaseSuccessiveHalving): + """Randomized search on hyper parameters. + + The search strategy starts evaluating all the candidates with a small + amount of resources and iteratively selects the best candidates, using more + and more resources. + + The candidates are sampled at random from the parameter space and the + number of sampled candidates is determined by ``n_candidates``. + + Read more in the :ref:`User guide`. + + .. note:: + + This estimator is still **experimental** for now: the predictions + and the API might change without any deprecation cycle. To use it, + you need to explicitly import ``enable_halving_search_cv``:: + + >>> # explicitly require this experimental feature + >>> from sklearn.experimental import enable_halving_search_cv # noqa + >>> # now you can import normally from model_selection + >>> from sklearn.model_selection import HalvingRandomSearchCV + + Parameters + ---------- + estimator : estimator object + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_distributions : dict or list of dicts + Dictionary with parameters names (`str`) as keys and distributions + or lists of parameters to try. Distributions must provide a ``rvs`` + method for sampling (such as those from scipy.stats.distributions). + If a list is given, it is sampled uniformly. + If a list of dicts is given, first a dict is sampled uniformly, and + then a parameter is sampled using that dict as above. + + n_candidates : "exhaust" or int, default="exhaust" + The number of candidate parameters to sample, at the first + iteration. Using 'exhaust' will sample enough candidates so that the + last iteration uses as many resources as possible, based on + `min_resources`, `max_resources` and `factor`. In this case, + `min_resources` cannot be 'exhaust'. + + factor : int or float, default=3 + The 'halving' parameter, which determines the proportion of candidates + that are selected for each subsequent iteration. For example, + ``factor=3`` means that only one third of the candidates are selected. + + resource : ``'n_samples'`` or str, default='n_samples' + Defines the resource that increases with each iteration. By default, + the resource is the number of samples. It can also be set to any + parameter of the base estimator that accepts positive integer + values, e.g. 'n_iterations' or 'n_estimators' for a gradient + boosting estimator. In this case ``max_resources`` cannot be 'auto' + and must be set explicitly. + + max_resources : int, default='auto' + The maximum number of resources that any candidate is allowed to use + for a given iteration. By default, this is set ``n_samples`` when + ``resource='n_samples'`` (default), else an error is raised. + + min_resources : {'exhaust', 'smallest'} or int, default='smallest' + The minimum amount of resource that any candidate is allowed to use + for a given iteration. Equivalently, this defines the amount of + resources `r0` that are allocated for each candidate at the first + iteration. + + - 'smallest' is a heuristic that sets `r0` to a small value: + + - ``n_splits * 2`` when ``resource='n_samples'`` for a regression + problem + - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a + classification problem + - ``1`` when ``resource != 'n_samples'`` + + - 'exhaust' will set `r0` such that the **last** iteration uses as + much resources as possible. Namely, the last iteration will use the + highest value smaller than ``max_resources`` that is a multiple of + both ``min_resources`` and ``factor``. In general, using 'exhaust' + leads to a more accurate estimator, but is slightly more time + consuming. 'exhaust' isn't available when `n_candidates='exhaust'`. + + Note that the amount of resources used at each iteration is always a + multiple of ``min_resources``. + + aggressive_elimination : bool, default=False + This is only relevant in cases where there isn't enough resources to + reduce the remaining candidates to at most `factor` after the last + iteration. If ``True``, then the search process will 'replay' the + first iteration for as long as needed until the number of candidates + is small enough. This is ``False`` by default, which means that the + last iteration may evaluate more than ``factor`` candidates. See + :ref:`aggressive_elimination` for more details. + + cv : int, cross-validation generator or an iterable, default=5 + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. note:: + Due to implementation details, the folds produced by `cv` must be + the same across multiple calls to `cv.split()`. For + built-in `scikit-learn` iterators, this can be achieved by + deactivating shuffling (`shuffle=False`), or by setting the + `cv`'s `random_state` parameter to an integer. + + scoring : str, callable, or None, default=None + A single string (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + If None, the estimator's score method is used. + + refit : bool, default=True + If True, refit an estimator using the best found parameters on the + whole dataset. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``HalvingRandomSearchCV`` instance. + + error_score : 'raise' or numeric + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. Default is ``np.nan``. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for subsampling the dataset + when `resources != 'n_samples'`. Also used for random uniform + sampling from lists of possible values instead of scipy.stats + distributions. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_jobs : int or None, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int + Controls the verbosity: the higher, the more messages. + + Attributes + ---------- + n_resources_ : list of int + The amount of resources used at each iteration. + + n_candidates_ : list of int + The number of candidate parameters that were evaluated at each + iteration. + + n_remaining_candidates_ : int + The number of candidate parameters that are left after the last + iteration. It corresponds to `ceil(n_candidates[-1] / factor)` + + max_resources_ : int + The maximum number of resources that any candidate is allowed to use + for a given iteration. Note that since the number of resources used at + each iteration must be a multiple of ``min_resources_``, the actual + number of resources used at the last iteration may be smaller than + ``max_resources_``. + + min_resources_ : int + The amount of resources that are allocated for each candidate at the + first iteration. + + n_iterations_ : int + The actual number of iterations that were run. This is equal to + ``n_required_iterations_`` if ``aggressive_elimination`` is ``True``. + Else, this is equal to ``min(n_possible_iterations_, + n_required_iterations_)``. + + n_possible_iterations_ : int + The number of iterations that are possible starting with + ``min_resources_`` resources and without exceeding + ``max_resources_``. + + n_required_iterations_ : int + The number of iterations that are required to end up with less than + ``factor`` candidates at the last iteration, starting with + ``min_resources_`` resources. This will be smaller than + ``n_possible_iterations_`` when there isn't enough resources. + + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. It contains lots of information + for analysing the results of a search. + Please refer to the :ref:`User guide` + for details. + + best_estimator_ : estimator or dict + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + best_score_ : float + Mean cross-validated score of the best_estimator. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + :class:`HalvingGridSearchCV`: + Search over a grid of parameters using successive halving. + + Notes + ----- + The parameters selected are those that maximize the score of the held-out + data, according to the scoring parameter. + + All parameter combinations scored with a NaN will share the lowest rank. + + Examples + -------- + + >>> from sklearn.datasets import load_iris + >>> from sklearn.ensemble import RandomForestClassifier + >>> from sklearn.experimental import enable_halving_search_cv # noqa + >>> from sklearn.model_selection import HalvingRandomSearchCV + >>> from scipy.stats import randint + >>> import numpy as np + ... + >>> X, y = load_iris(return_X_y=True) + >>> clf = RandomForestClassifier(random_state=0) + >>> np.random.seed(0) + ... + >>> param_distributions = {"max_depth": [3, None], + ... "min_samples_split": randint(2, 11)} + >>> search = HalvingRandomSearchCV(clf, param_distributions, + ... resource='n_estimators', + ... max_resources=10, + ... random_state=0).fit(X, y) + >>> search.best_params_ # doctest: +SKIP + {'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9} + """ + + _required_parameters = ["estimator", "param_distributions"] + + _parameter_constraints: dict = { + **BaseSuccessiveHalving._parameter_constraints, + "param_distributions": [dict, list], + "n_candidates": [ + Interval(Integral, 0, None, closed="neither"), + StrOptions({"exhaust"}), + ], + } + + def __init__( + self, + estimator, + param_distributions, + *, + n_candidates="exhaust", + factor=3, + resource="n_samples", + max_resources="auto", + min_resources="smallest", + aggressive_elimination=False, + cv=5, + scoring=None, + refit=True, + error_score=np.nan, + return_train_score=True, + random_state=None, + n_jobs=None, + verbose=0, + ): + super().__init__( + estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + verbose=verbose, + cv=cv, + random_state=random_state, + error_score=error_score, + return_train_score=return_train_score, + max_resources=max_resources, + resource=resource, + factor=factor, + min_resources=min_resources, + aggressive_elimination=aggressive_elimination, + ) + self.param_distributions = param_distributions + self.n_candidates = n_candidates + + def _generate_candidate_params(self): + n_candidates_first_iter = self.n_candidates + if n_candidates_first_iter == "exhaust": + # This will generate enough candidate so that the last iteration + # uses as much resources as possible + n_candidates_first_iter = self.max_resources_ // self.min_resources_ + return ParameterSampler( + self.param_distributions, + n_candidates_first_iter, + random_state=self.random_state, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/_split.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/_split.py new file mode 100644 index 0000000000000000000000000000000000000000..1f89832daba227163f0639268deeebd7a26cae62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/_split.py @@ -0,0 +1,2794 @@ +""" +The :mod:`sklearn.model_selection._split` module includes classes and +functions to split the data based on a preset strategy. +""" + +# Author: Alexandre Gramfort +# Gael Varoquaux +# Olivier Grisel +# Raghav RV +# Leandro Hermida +# Rodion Martynov +# License: BSD 3 clause + +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from collections import defaultdict +from collections.abc import Iterable +from inspect import signature +from itertools import chain, combinations +from math import ceil, floor + +import numpy as np +from scipy.special import comb + +from ..utils import ( + _approximate_mode, + _safe_indexing, + check_random_state, + indexable, + metadata_routing, +) +from ..utils._param_validation import Interval, RealNotInt, validate_params +from ..utils.metadata_routing import _MetadataRequester +from ..utils.multiclass import type_of_target +from ..utils.validation import _num_samples, check_array, column_or_1d + +__all__ = [ + "BaseCrossValidator", + "KFold", + "GroupKFold", + "LeaveOneGroupOut", + "LeaveOneOut", + "LeavePGroupsOut", + "LeavePOut", + "RepeatedStratifiedKFold", + "RepeatedKFold", + "ShuffleSplit", + "GroupShuffleSplit", + "StratifiedKFold", + "StratifiedGroupKFold", + "StratifiedShuffleSplit", + "PredefinedSplit", + "train_test_split", + "check_cv", +] + + +class GroupsConsumerMixin(_MetadataRequester): + """A Mixin to ``groups`` by default. + + This Mixin makes the object to request ``groups`` by default as ``True``. + + .. versionadded:: 1.3 + """ + + __metadata_request__split = {"groups": True} + + +class BaseCrossValidator(_MetadataRequester, metaclass=ABCMeta): + """Base class for all cross-validators. + + Implementations must define `_iter_test_masks` or `_iter_test_indices`. + """ + + # This indicates that by default CV splitters don't have a "groups" kwarg, + # unless indicated by inheriting from ``GroupsConsumerMixin``. + # This also prevents ``set_split_request`` to be generated for splitters + # which don't support ``groups``. + __metadata_request__split = {"groups": metadata_routing.UNUSED} + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + X, y, groups = indexable(X, y, groups) + indices = np.arange(_num_samples(X)) + for test_index in self._iter_test_masks(X, y, groups): + train_index = indices[np.logical_not(test_index)] + test_index = indices[test_index] + yield train_index, test_index + + # Since subclasses must implement either _iter_test_masks or + # _iter_test_indices, neither can be abstract. + def _iter_test_masks(self, X=None, y=None, groups=None): + """Generates boolean masks corresponding to test sets. + + By default, delegates to _iter_test_indices(X, y, groups) + """ + for test_index in self._iter_test_indices(X, y, groups): + test_mask = np.zeros(_num_samples(X), dtype=bool) + test_mask[test_index] = True + yield test_mask + + def _iter_test_indices(self, X=None, y=None, groups=None): + """Generates integer indices corresponding to test sets.""" + raise NotImplementedError + + @abstractmethod + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator.""" + + def __repr__(self): + return _build_repr(self) + + +class LeaveOneOut(BaseCrossValidator): + """Leave-One-Out cross-validator. + + Provides train/test indices to split data in train/test sets. Each + sample is used once as a test set (singleton) while the remaining + samples form the training set. + + Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and + ``LeavePOut(p=1)`` where ``n`` is the number of samples. + + Due to the high number of test sets (which is the same as the + number of samples) this cross-validation method can be very costly. + For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit` + or :class:`StratifiedKFold`. + + Read more in the :ref:`User Guide `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeaveOneOut + >>> X = np.array([[1, 2], [3, 4]]) + >>> y = np.array([1, 2]) + >>> loo = LeaveOneOut() + >>> loo.get_n_splits(X) + 2 + >>> print(loo) + LeaveOneOut() + >>> for i, (train_index, test_index) in enumerate(loo.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1] + Test: index=[0] + Fold 1: + Train: index=[0] + Test: index=[1] + + See Also + -------- + LeaveOneGroupOut : For splitting the data according to explicit, + domain-specific stratification of the dataset. + GroupKFold : K-fold iterator variant with non-overlapping groups. + """ + + def _iter_test_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + if n_samples <= 1: + raise ValueError( + "Cannot perform LeaveOneOut with n_samples={}.".format(n_samples) + ) + return range(n_samples) + + def get_n_splits(self, X, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + if X is None: + raise ValueError("The 'X' parameter should not be None.") + return _num_samples(X) + + +class LeavePOut(BaseCrossValidator): + """Leave-P-Out cross-validator. + + Provides train/test indices to split data in train/test sets. This results + in testing on all distinct samples of size p, while the remaining n - p + samples form the training set in each iteration. + + Note: ``LeavePOut(p)`` is NOT equivalent to + ``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets. + + Due to the high number of iterations which grows combinatorically with the + number of samples this cross-validation method can be very costly. For + large datasets one should favor :class:`KFold`, :class:`StratifiedKFold` + or :class:`ShuffleSplit`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + p : int + Size of the test sets. Must be strictly less than the number of + samples. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeavePOut + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + >>> y = np.array([1, 2, 3, 4]) + >>> lpo = LeavePOut(2) + >>> lpo.get_n_splits(X) + 6 + >>> print(lpo) + LeavePOut(p=2) + >>> for i, (train_index, test_index) in enumerate(lpo.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[2 3] + Test: index=[0 1] + Fold 1: + Train: index=[1 3] + Test: index=[0 2] + Fold 2: + Train: index=[1 2] + Test: index=[0 3] + Fold 3: + Train: index=[0 3] + Test: index=[1 2] + Fold 4: + Train: index=[0 2] + Test: index=[1 3] + Fold 5: + Train: index=[0 1] + Test: index=[2 3] + """ + + def __init__(self, p): + self.p = p + + def _iter_test_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + if n_samples <= self.p: + raise ValueError( + "p={} must be strictly less than the number of samples={}".format( + self.p, n_samples + ) + ) + for combination in combinations(range(n_samples), self.p): + yield np.array(combination) + + def get_n_splits(self, X, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + """ + if X is None: + raise ValueError("The 'X' parameter should not be None.") + return int(comb(_num_samples(X), self.p, exact=True)) + + +class _BaseKFold(BaseCrossValidator, metaclass=ABCMeta): + """Base class for K-Fold cross-validators and TimeSeriesSplit.""" + + @abstractmethod + def __init__(self, n_splits, *, shuffle, random_state): + if not isinstance(n_splits, numbers.Integral): + raise ValueError( + "The number of folds must be of Integral type. " + "%s of type %s was passed." % (n_splits, type(n_splits)) + ) + n_splits = int(n_splits) + + if n_splits <= 1: + raise ValueError( + "k-fold cross-validation requires at least one" + " train/test split by setting n_splits=2 or more," + " got n_splits={0}.".format(n_splits) + ) + + if not isinstance(shuffle, bool): + raise TypeError("shuffle must be True or False; got {0}".format(shuffle)) + + if not shuffle and random_state is not None: # None is the default + raise ValueError( + ( + "Setting a random_state has no effect since shuffle is " + "False. You should leave " + "random_state to its default (None), or set shuffle=True." + ), + ) + + self.n_splits = n_splits + self.shuffle = shuffle + self.random_state = random_state + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + X, y, groups = indexable(X, y, groups) + n_samples = _num_samples(X) + if self.n_splits > n_samples: + raise ValueError( + ( + "Cannot have number of splits n_splits={0} greater" + " than the number of samples: n_samples={1}." + ).format(self.n_splits, n_samples) + ) + + for train, test in super().split(X, y, groups): + yield train, test + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return self.n_splits + + +class KFold(_BaseKFold): + """K-Fold cross-validator. + + Provides train/test indices to split data in train/test sets. Split + dataset into k consecutive folds (without shuffling by default). + + Each fold is then used once as a validation while the k - 1 remaining + folds form the training set. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + shuffle : bool, default=False + Whether to shuffle the data before splitting into batches. + Note that the samples within each split will not be shuffled. + + random_state : int, RandomState instance or None, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold. Otherwise, this + parameter has no effect. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import KFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([1, 2, 3, 4]) + >>> kf = KFold(n_splits=2) + >>> kf.get_n_splits(X) + 2 + >>> print(kf) + KFold(n_splits=2, random_state=None, shuffle=False) + >>> for i, (train_index, test_index) in enumerate(kf.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[2 3] + Test: index=[0 1] + Fold 1: + Train: index=[0 1] + Test: index=[2 3] + + Notes + ----- + The first ``n_samples % n_splits`` folds have size + ``n_samples // n_splits + 1``, other folds have size + ``n_samples // n_splits``, where ``n_samples`` is the number of samples. + + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + + See Also + -------- + StratifiedKFold : Takes class information into account to avoid building + folds with imbalanced class distributions (for binary or multiclass + classification tasks). + + GroupKFold : K-fold iterator variant with non-overlapping groups. + + RepeatedKFold : Repeats K-Fold n times. + """ + + def __init__(self, n_splits=5, *, shuffle=False, random_state=None): + super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) + + def _iter_test_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + indices = np.arange(n_samples) + if self.shuffle: + check_random_state(self.random_state).shuffle(indices) + + n_splits = self.n_splits + fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int) + fold_sizes[: n_samples % n_splits] += 1 + current = 0 + for fold_size in fold_sizes: + start, stop = current, current + fold_size + yield indices[start:stop] + current = stop + + +class GroupKFold(GroupsConsumerMixin, _BaseKFold): + """K-fold iterator variant with non-overlapping groups. + + Each group will appear exactly once in the test set across all folds (the + number of distinct groups has to be at least equal to the number of folds). + + The folds are approximately balanced in the sense that the number of + distinct groups is approximately the same in each fold. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + Notes + ----- + Groups appear in an arbitrary order throughout the folds. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import GroupKFold + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> groups = np.array([0, 0, 2, 2, 3, 3]) + >>> group_kfold = GroupKFold(n_splits=2) + >>> group_kfold.get_n_splits(X, y, groups) + 2 + >>> print(group_kfold) + GroupKFold(n_splits=2) + >>> for i, (train_index, test_index) in enumerate(group_kfold.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2 3], group=[2 2] + Test: index=[0 1 4 5], group=[0 0 3 3] + Fold 1: + Train: index=[0 1 4 5], group=[0 0 3 3] + Test: index=[2 3], group=[2 2] + + See Also + -------- + LeaveOneGroupOut : For splitting the data according to explicit + domain-specific stratification of the dataset. + + StratifiedKFold : Takes class information into account to avoid building + folds with imbalanced class proportions (for binary or multiclass + classification tasks). + """ + + def __init__(self, n_splits=5): + super().__init__(n_splits, shuffle=False, random_state=None) + + def _iter_test_indices(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + + unique_groups, groups = np.unique(groups, return_inverse=True) + n_groups = len(unique_groups) + + if self.n_splits > n_groups: + raise ValueError( + "Cannot have number of splits n_splits=%d greater" + " than the number of groups: %d." % (self.n_splits, n_groups) + ) + + # Weight groups by their number of occurrences + n_samples_per_group = np.bincount(groups) + + # Distribute the most frequent groups first + indices = np.argsort(n_samples_per_group)[::-1] + n_samples_per_group = n_samples_per_group[indices] + + # Total weight of each fold + n_samples_per_fold = np.zeros(self.n_splits) + + # Mapping from group index to fold index + group_to_fold = np.zeros(len(unique_groups)) + + # Distribute samples by adding the largest weight to the lightest fold + for group_index, weight in enumerate(n_samples_per_group): + lightest_fold = np.argmin(n_samples_per_fold) + n_samples_per_fold[lightest_fold] += weight + group_to_fold[indices[group_index]] = lightest_fold + + indices = group_to_fold[groups] + + for f in range(self.n_splits): + yield np.where(indices == f)[0] + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + return super().split(X, y, groups) + + +class StratifiedKFold(_BaseKFold): + """Stratified K-Fold cross-validator. + + Provides train/test indices to split data in train/test sets. + + This cross-validation object is a variation of KFold that returns + stratified folds. The folds are made by preserving the percentage of + samples for each class. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + shuffle : bool, default=False + Whether to shuffle each class's samples before splitting into batches. + Note that the samples within each split will not be shuffled. + + random_state : int, RandomState instance or None, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold for each class. + Otherwise, leave `random_state` as `None`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import StratifiedKFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> skf = StratifiedKFold(n_splits=2) + >>> skf.get_n_splits(X, y) + 2 + >>> print(skf) + StratifiedKFold(n_splits=2, random_state=None, shuffle=False) + >>> for i, (train_index, test_index) in enumerate(skf.split(X, y)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 3] + Test: index=[0 2] + Fold 1: + Train: index=[0 2] + Test: index=[1 3] + + Notes + ----- + The implementation is designed to: + + * Generate test sets such that all contain the same distribution of + classes, or as close as possible. + * Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to + ``y = [1, 0]`` should not change the indices generated. + * Preserve order dependencies in the dataset ordering, when + ``shuffle=False``: all samples from class k in some test set were + contiguous in y, or separated in y by samples from classes other than k. + * Generate test sets where the smallest and largest differ by at most one + sample. + + .. versionchanged:: 0.22 + The previous implementation did not follow the last constraint. + + See Also + -------- + RepeatedStratifiedKFold : Repeats Stratified K-Fold n times. + """ + + def __init__(self, n_splits=5, *, shuffle=False, random_state=None): + super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) + + def _make_test_folds(self, X, y=None): + rng = check_random_state(self.random_state) + y = np.asarray(y) + type_of_target_y = type_of_target(y) + allowed_target_types = ("binary", "multiclass") + if type_of_target_y not in allowed_target_types: + raise ValueError( + "Supported target types are: {}. Got {!r} instead.".format( + allowed_target_types, type_of_target_y + ) + ) + + y = column_or_1d(y) + + _, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True) + # y_inv encodes y according to lexicographic order. We invert y_idx to + # map the classes so that they are encoded by order of appearance: + # 0 represents the first label appearing in y, 1 the second, etc. + _, class_perm = np.unique(y_idx, return_inverse=True) + y_encoded = class_perm[y_inv] + + n_classes = len(y_idx) + y_counts = np.bincount(y_encoded) + min_groups = np.min(y_counts) + if np.all(self.n_splits > y_counts): + raise ValueError( + "n_splits=%d cannot be greater than the" + " number of members in each class." % (self.n_splits) + ) + if self.n_splits > min_groups: + warnings.warn( + "The least populated class in y has only %d" + " members, which is less than n_splits=%d." + % (min_groups, self.n_splits), + UserWarning, + ) + + # Determine the optimal number of samples from each class in each fold, + # using round robin over the sorted y. (This can be done direct from + # counts, but that code is unreadable.) + y_order = np.sort(y_encoded) + allocation = np.asarray( + [ + np.bincount(y_order[i :: self.n_splits], minlength=n_classes) + for i in range(self.n_splits) + ] + ) + + # To maintain the data order dependencies as best as possible within + # the stratification constraint, we assign samples from each class in + # blocks (and then mess that up when shuffle=True). + test_folds = np.empty(len(y), dtype="i") + for k in range(n_classes): + # since the kth column of allocation stores the number of samples + # of class k in each test set, this generates blocks of fold + # indices corresponding to the allocation for class k. + folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k]) + if self.shuffle: + rng.shuffle(folds_for_class) + test_folds[y_encoded == k] = folds_for_class + return test_folds + + def _iter_test_masks(self, X, y=None, groups=None): + test_folds = self._make_test_folds(X, y) + for i in range(self.n_splits): + yield test_folds == i + + def split(self, X, y, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Note that providing ``y`` is sufficient to generate the splits and + hence ``np.zeros(n_samples)`` may be used as a placeholder for + ``X`` instead of actual training data. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + Stratification is done based on the y labels. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + y = check_array(y, input_name="y", ensure_2d=False, dtype=None) + return super().split(X, y, groups) + + +class StratifiedGroupKFold(GroupsConsumerMixin, _BaseKFold): + """Stratified K-Fold iterator variant with non-overlapping groups. + + This cross-validation object is a variation of StratifiedKFold attempts to + return stratified folds with non-overlapping groups. The folds are made by + preserving the percentage of samples for each class. + + Each group will appear exactly once in the test set across all folds (the + number of distinct groups has to be at least equal to the number of folds). + + The difference between :class:`~sklearn.model_selection.GroupKFold` + and :class:`~sklearn.model_selection.StratifiedGroupKFold` is that + the former attempts to create balanced folds such that the number of + distinct groups is approximately the same in each fold, whereas + StratifiedGroupKFold attempts to create folds which preserve the + percentage of samples for each class as much as possible given the + constraint of non-overlapping groups between splits. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + shuffle : bool, default=False + Whether to shuffle each class's samples before splitting into batches. + Note that the samples within each split will not be shuffled. + This implementation can only shuffle groups that have approximately the + same y distribution, no global shuffle will be performed. + + random_state : int or RandomState instance, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold for each class. + Otherwise, leave `random_state` as `None`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import StratifiedGroupKFold + >>> X = np.ones((17, 2)) + >>> y = np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + >>> groups = np.array([1, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8]) + >>> sgkf = StratifiedGroupKFold(n_splits=3) + >>> sgkf.get_n_splits(X, y) + 3 + >>> print(sgkf) + StratifiedGroupKFold(n_splits=3, random_state=None, shuffle=False) + >>> for i, (train_index, test_index) in enumerate(sgkf.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" group={groups[train_index]}") + ... print(f" Test: index={test_index}") + ... print(f" group={groups[test_index]}") + Fold 0: + Train: index=[ 0 1 2 3 7 8 9 10 11 15 16] + group=[1 1 2 2 4 5 5 5 5 8 8] + Test: index=[ 4 5 6 12 13 14] + group=[3 3 3 6 6 7] + Fold 1: + Train: index=[ 4 5 6 7 8 9 10 11 12 13 14] + group=[3 3 3 4 5 5 5 5 6 6 7] + Test: index=[ 0 1 2 3 15 16] + group=[1 1 2 2 8 8] + Fold 2: + Train: index=[ 0 1 2 3 4 5 6 12 13 14 15 16] + group=[1 1 2 2 3 3 3 6 6 7 8 8] + Test: index=[ 7 8 9 10 11] + group=[4 5 5 5 5] + + Notes + ----- + The implementation is designed to: + + * Mimic the behavior of StratifiedKFold as much as possible for trivial + groups (e.g. when each group contains only one sample). + * Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to + ``y = [1, 0]`` should not change the indices generated. + * Stratify based on samples as much as possible while keeping + non-overlapping groups constraint. That means that in some cases when + there is a small number of groups containing a large number of samples + the stratification will not be possible and the behavior will be close + to GroupKFold. + + See also + -------- + StratifiedKFold: Takes class information into account to build folds which + retain class distributions (for binary or multiclass classification + tasks). + + GroupKFold: K-fold iterator variant with non-overlapping groups. + """ + + def __init__(self, n_splits=5, shuffle=False, random_state=None): + super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) + + def _iter_test_indices(self, X, y, groups): + # Implementation is based on this kaggle kernel: + # https://www.kaggle.com/jakubwasikowski/stratified-group-k-fold-cross-validation + # and is a subject to Apache 2.0 License. You may obtain a copy of the + # License at http://www.apache.org/licenses/LICENSE-2.0 + # Changelist: + # - Refactored function to a class following scikit-learn KFold + # interface. + # - Added heuristic for assigning group to the least populated fold in + # cases when all other criteria are equal + # - Swtch from using python ``Counter`` to ``np.unique`` to get class + # distribution + # - Added scikit-learn checks for input: checking that target is binary + # or multiclass, checking passed random state, checking that number + # of splits is less than number of members in each class, checking + # that least populated class has more members than there are splits. + rng = check_random_state(self.random_state) + y = np.asarray(y) + type_of_target_y = type_of_target(y) + allowed_target_types = ("binary", "multiclass") + if type_of_target_y not in allowed_target_types: + raise ValueError( + "Supported target types are: {}. Got {!r} instead.".format( + allowed_target_types, type_of_target_y + ) + ) + + y = column_or_1d(y) + _, y_inv, y_cnt = np.unique(y, return_inverse=True, return_counts=True) + if np.all(self.n_splits > y_cnt): + raise ValueError( + "n_splits=%d cannot be greater than the" + " number of members in each class." % (self.n_splits) + ) + n_smallest_class = np.min(y_cnt) + if self.n_splits > n_smallest_class: + warnings.warn( + "The least populated class in y has only %d" + " members, which is less than n_splits=%d." + % (n_smallest_class, self.n_splits), + UserWarning, + ) + n_classes = len(y_cnt) + + _, groups_inv, groups_cnt = np.unique( + groups, return_inverse=True, return_counts=True + ) + y_counts_per_group = np.zeros((len(groups_cnt), n_classes)) + for class_idx, group_idx in zip(y_inv, groups_inv): + y_counts_per_group[group_idx, class_idx] += 1 + + y_counts_per_fold = np.zeros((self.n_splits, n_classes)) + groups_per_fold = defaultdict(set) + + if self.shuffle: + rng.shuffle(y_counts_per_group) + + # Stable sort to keep shuffled order for groups with the same + # class distribution variance + sorted_groups_idx = np.argsort( + -np.std(y_counts_per_group, axis=1), kind="mergesort" + ) + + for group_idx in sorted_groups_idx: + group_y_counts = y_counts_per_group[group_idx] + best_fold = self._find_best_fold( + y_counts_per_fold=y_counts_per_fold, + y_cnt=y_cnt, + group_y_counts=group_y_counts, + ) + y_counts_per_fold[best_fold] += group_y_counts + groups_per_fold[best_fold].add(group_idx) + + for i in range(self.n_splits): + test_indices = [ + idx + for idx, group_idx in enumerate(groups_inv) + if group_idx in groups_per_fold[i] + ] + yield test_indices + + def _find_best_fold(self, y_counts_per_fold, y_cnt, group_y_counts): + best_fold = None + min_eval = np.inf + min_samples_in_fold = np.inf + for i in range(self.n_splits): + y_counts_per_fold[i] += group_y_counts + # Summarise the distribution over classes in each proposed fold + std_per_class = np.std(y_counts_per_fold / y_cnt.reshape(1, -1), axis=0) + y_counts_per_fold[i] -= group_y_counts + fold_eval = np.mean(std_per_class) + samples_in_fold = np.sum(y_counts_per_fold[i]) + is_current_fold_better = ( + fold_eval < min_eval + or np.isclose(fold_eval, min_eval) + and samples_in_fold < min_samples_in_fold + ) + if is_current_fold_better: + min_eval = fold_eval + min_samples_in_fold = samples_in_fold + best_fold = i + return best_fold + + +class TimeSeriesSplit(_BaseKFold): + """Time Series cross-validator. + + Provides train/test indices to split time series data samples + that are observed at fixed time intervals, in train/test sets. + In each split, test indices must be higher than before, and thus shuffling + in cross validator is inappropriate. + + This cross-validation object is a variation of :class:`KFold`. + In the kth split, it returns first k folds as train set and the + (k+1)th fold as test set. + + Note that unlike standard cross-validation methods, successive + training sets are supersets of those that come before them. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + .. versionadded:: 0.18 + + Parameters + ---------- + n_splits : int, default=5 + Number of splits. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + max_train_size : int, default=None + Maximum size for a single training set. + + test_size : int, default=None + Used to limit the size of the test set. Defaults to + ``n_samples // (n_splits + 1)``, which is the maximum allowed value + with ``gap=0``. + + .. versionadded:: 0.24 + + gap : int, default=0 + Number of samples to exclude from the end of each train set before + the test set. + + .. versionadded:: 0.24 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import TimeSeriesSplit + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> tscv = TimeSeriesSplit() + >>> print(tscv) + TimeSeriesSplit(gap=0, max_train_size=None, n_splits=5, test_size=None) + >>> for i, (train_index, test_index) in enumerate(tscv.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[0] + Test: index=[1] + Fold 1: + Train: index=[0 1] + Test: index=[2] + Fold 2: + Train: index=[0 1 2] + Test: index=[3] + Fold 3: + Train: index=[0 1 2 3] + Test: index=[4] + Fold 4: + Train: index=[0 1 2 3 4] + Test: index=[5] + >>> # Fix test_size to 2 with 12 samples + >>> X = np.random.randn(12, 2) + >>> y = np.random.randint(0, 2, 12) + >>> tscv = TimeSeriesSplit(n_splits=3, test_size=2) + >>> for i, (train_index, test_index) in enumerate(tscv.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[0 1 2 3 4 5] + Test: index=[6 7] + Fold 1: + Train: index=[0 1 2 3 4 5 6 7] + Test: index=[8 9] + Fold 2: + Train: index=[0 1 2 3 4 5 6 7 8 9] + Test: index=[10 11] + >>> # Add in a 2 period gap + >>> tscv = TimeSeriesSplit(n_splits=3, test_size=2, gap=2) + >>> for i, (train_index, test_index) in enumerate(tscv.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[0 1 2 3] + Test: index=[6 7] + Fold 1: + Train: index=[0 1 2 3 4 5] + Test: index=[8 9] + Fold 2: + Train: index=[0 1 2 3 4 5 6 7] + Test: index=[10 11] + + For a more extended example see + :ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py`. + + Notes + ----- + The training set has size ``i * n_samples // (n_splits + 1) + + n_samples % (n_splits + 1)`` in the ``i`` th split, + with a test set of size ``n_samples//(n_splits + 1)`` by default, + where ``n_samples`` is the number of samples. + """ + + def __init__(self, n_splits=5, *, max_train_size=None, test_size=None, gap=0): + super().__init__(n_splits, shuffle=False, random_state=None) + self.max_train_size = max_train_size + self.test_size = test_size + self.gap = gap + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Always ignored, exists for compatibility. + + groups : array-like of shape (n_samples,) + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + X, y, groups = indexable(X, y, groups) + n_samples = _num_samples(X) + n_splits = self.n_splits + n_folds = n_splits + 1 + gap = self.gap + test_size = ( + self.test_size if self.test_size is not None else n_samples // n_folds + ) + + # Make sure we have enough samples for the given split parameters + if n_folds > n_samples: + raise ValueError( + f"Cannot have number of folds={n_folds} greater" + f" than the number of samples={n_samples}." + ) + if n_samples - gap - (test_size * n_splits) <= 0: + raise ValueError( + f"Too many splits={n_splits} for number of samples" + f"={n_samples} with test_size={test_size} and gap={gap}." + ) + + indices = np.arange(n_samples) + test_starts = range(n_samples - n_splits * test_size, n_samples, test_size) + + for test_start in test_starts: + train_end = test_start - gap + if self.max_train_size and self.max_train_size < train_end: + yield ( + indices[train_end - self.max_train_size : train_end], + indices[test_start : test_start + test_size], + ) + else: + yield ( + indices[:train_end], + indices[test_start : test_start + test_size], + ) + + +class LeaveOneGroupOut(GroupsConsumerMixin, BaseCrossValidator): + """Leave One Group Out cross-validator. + + Provides train/test indices to split data such that each training set is + comprised of all samples except ones belonging to one specific group. + Arbitrary domain specific group information is provided an array integers + that encodes the group of each sample. + + For instance the groups could be the year of collection of the samples + and thus allow for cross-validation against time-based splits. + + Read more in the :ref:`User Guide `. + + Notes + ----- + Splits are ordered according to the index of the group left out. The first + split has testing set consisting of the group whose index in `groups` is + lowest, and so on. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeaveOneGroupOut + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + >>> y = np.array([1, 2, 1, 2]) + >>> groups = np.array([1, 1, 2, 2]) + >>> logo = LeaveOneGroupOut() + >>> logo.get_n_splits(X, y, groups) + 2 + >>> logo.get_n_splits(groups=groups) # 'groups' is always required + 2 + >>> print(logo) + LeaveOneGroupOut() + >>> for i, (train_index, test_index) in enumerate(logo.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2 3], group=[2 2] + Test: index=[0 1], group=[1 1] + Fold 1: + Train: index=[0 1], group=[1 1] + Test: index=[2 3], group=[2 2] + + See also + -------- + GroupKFold: K-fold iterator variant with non-overlapping groups. + """ + + def _iter_test_masks(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + # We make a copy of groups to avoid side-effects during iteration + groups = check_array( + groups, input_name="groups", copy=True, ensure_2d=False, dtype=None + ) + unique_groups = np.unique(groups) + if len(unique_groups) <= 1: + raise ValueError( + "The groups parameter contains fewer than 2 unique groups " + "(%s). LeaveOneGroupOut expects at least 2." % unique_groups + ) + for i in unique_groups: + yield groups == i + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. This 'groups' parameter must always be specified to + calculate the number of splits, though the other parameters can be + omitted. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + return len(np.unique(groups)) + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + return super().split(X, y, groups) + + +class LeavePGroupsOut(GroupsConsumerMixin, BaseCrossValidator): + """Leave P Group(s) Out cross-validator. + + Provides train/test indices to split data according to a third-party + provided group. This group information can be used to encode arbitrary + domain specific stratifications of the samples as integers. + + For instance the groups could be the year of collection of the samples + and thus allow for cross-validation against time-based splits. + + The difference between LeavePGroupsOut and LeaveOneGroupOut is that + the former builds the test sets with all the samples assigned to + ``p`` different values of the groups while the latter uses samples + all assigned the same groups. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_groups : int + Number of groups (``p``) to leave out in the test split. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeavePGroupsOut + >>> X = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([1, 2, 1]) + >>> groups = np.array([1, 2, 3]) + >>> lpgo = LeavePGroupsOut(n_groups=2) + >>> lpgo.get_n_splits(X, y, groups) + 3 + >>> lpgo.get_n_splits(groups=groups) # 'groups' is always required + 3 + >>> print(lpgo) + LeavePGroupsOut(n_groups=2) + >>> for i, (train_index, test_index) in enumerate(lpgo.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2], group=[3] + Test: index=[0 1], group=[1 2] + Fold 1: + Train: index=[1], group=[2] + Test: index=[0 2], group=[1 3] + Fold 2: + Train: index=[0], group=[1] + Test: index=[1 2], group=[2 3] + + See Also + -------- + GroupKFold : K-fold iterator variant with non-overlapping groups. + """ + + def __init__(self, n_groups): + self.n_groups = n_groups + + def _iter_test_masks(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array( + groups, input_name="groups", copy=True, ensure_2d=False, dtype=None + ) + unique_groups = np.unique(groups) + if self.n_groups >= len(unique_groups): + raise ValueError( + "The groups parameter contains fewer than (or equal to) " + "n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut " + "expects that at least n_groups + 1 (%d) unique groups be " + "present" % (self.n_groups, unique_groups, self.n_groups + 1) + ) + combi = combinations(range(len(unique_groups)), self.n_groups) + for indices in combi: + test_index = np.zeros(_num_samples(X), dtype=bool) + for l in unique_groups[np.array(indices)]: + test_index[groups == l] = True + yield test_index + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. This 'groups' parameter must always be specified to + calculate the number of splits, though the other parameters can be + omitted. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + return int(comb(len(np.unique(groups)), self.n_groups, exact=True)) + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + return super().split(X, y, groups) + + +class _RepeatedSplits(_MetadataRequester, metaclass=ABCMeta): + """Repeated splits for an arbitrary randomized CV splitter. + + Repeats splits for cross-validators n times with different randomization + in each repetition. + + Parameters + ---------- + cv : callable + Cross-validator class. + + n_repeats : int, default=10 + Number of times cross-validator needs to be repeated. + + random_state : int, RandomState instance or None, default=None + Passes `random_state` to the arbitrary repeating cross validator. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + **cvargs : additional params + Constructor parameters for cv. Must not contain random_state + and shuffle. + """ + + # This indicates that by default CV splitters don't have a "groups" kwarg, + # unless indicated by inheriting from ``GroupsConsumerMixin``. + # This also prevents ``set_split_request`` to be generated for splitters + # which don't support ``groups``. + __metadata_request__split = {"groups": metadata_routing.UNUSED} + + def __init__(self, cv, *, n_repeats=10, random_state=None, **cvargs): + if not isinstance(n_repeats, numbers.Integral): + raise ValueError("Number of repetitions must be of Integral type.") + + if n_repeats <= 0: + raise ValueError("Number of repetitions must be greater than 0.") + + if any(key in cvargs for key in ("random_state", "shuffle")): + raise ValueError("cvargs must not contain random_state or shuffle.") + + self.cv = cv + self.n_repeats = n_repeats + self.random_state = random_state + self.cvargs = cvargs + + def split(self, X, y=None, groups=None): + """Generates indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + n_repeats = self.n_repeats + rng = check_random_state(self.random_state) + + for idx in range(n_repeats): + cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) + for train_index, test_index in cv.split(X, y, groups): + yield train_index, test_index + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + ``np.zeros(n_samples)`` may be used as a placeholder. + + y : object + Always ignored, exists for compatibility. + ``np.zeros(n_samples)`` may be used as a placeholder. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + rng = check_random_state(self.random_state) + cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) + return cv.get_n_splits(X, y, groups) * self.n_repeats + + def __repr__(self): + return _build_repr(self) + + +class RepeatedKFold(_RepeatedSplits): + """Repeated K-Fold cross validator. + + Repeats K-Fold n times with different randomization in each repetition. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + n_repeats : int, default=10 + Number of times cross-validator needs to be repeated. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of each repeated cross-validation instance. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import RepeatedKFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124) + >>> rkf.get_n_splits(X, y) + 4 + >>> print(rkf) + RepeatedKFold(n_repeats=2, n_splits=2, random_state=2652124) + >>> for i, (train_index, test_index) in enumerate(rkf.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + ... + Fold 0: + Train: index=[0 1] + Test: index=[2 3] + Fold 1: + Train: index=[2 3] + Test: index=[0 1] + Fold 2: + Train: index=[1 2] + Test: index=[0 3] + Fold 3: + Train: index=[0 3] + Test: index=[1 2] + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + + See Also + -------- + RepeatedStratifiedKFold : Repeats Stratified K-Fold n times. + """ + + def __init__(self, *, n_splits=5, n_repeats=10, random_state=None): + super().__init__( + KFold, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits + ) + + +class RepeatedStratifiedKFold(_RepeatedSplits): + """Repeated Stratified K-Fold cross validator. + + Repeats Stratified K-Fold n times with different randomization in each + repetition. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + n_repeats : int, default=10 + Number of times cross-validator needs to be repeated. + + random_state : int, RandomState instance or None, default=None + Controls the generation of the random states for each repetition. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import RepeatedStratifiedKFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2, + ... random_state=36851234) + >>> rskf.get_n_splits(X, y) + 4 + >>> print(rskf) + RepeatedStratifiedKFold(n_repeats=2, n_splits=2, random_state=36851234) + >>> for i, (train_index, test_index) in enumerate(rskf.split(X, y)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + ... + Fold 0: + Train: index=[1 2] + Test: index=[0 3] + Fold 1: + Train: index=[0 3] + Test: index=[1 2] + Fold 2: + Train: index=[1 3] + Test: index=[0 2] + Fold 3: + Train: index=[0 2] + Test: index=[1 3] + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + + See Also + -------- + RepeatedKFold : Repeats K-Fold n times. + """ + + def __init__(self, *, n_splits=5, n_repeats=10, random_state=None): + super().__init__( + StratifiedKFold, + n_repeats=n_repeats, + random_state=random_state, + n_splits=n_splits, + ) + + +class BaseShuffleSplit(_MetadataRequester, metaclass=ABCMeta): + """Base class for ShuffleSplit and StratifiedShuffleSplit.""" + + # This indicates that by default CV splitters don't have a "groups" kwarg, + # unless indicated by inheriting from ``GroupsConsumerMixin``. + # This also prevents ``set_split_request`` to be generated for splitters + # which don't support ``groups``. + __metadata_request__split = {"groups": metadata_routing.UNUSED} + + def __init__( + self, n_splits=10, *, test_size=None, train_size=None, random_state=None + ): + self.n_splits = n_splits + self.test_size = test_size + self.train_size = train_size + self.random_state = random_state + self._default_test_size = 0.1 + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + X, y, groups = indexable(X, y, groups) + for train, test in self._iter_indices(X, y, groups): + yield train, test + + @abstractmethod + def _iter_indices(self, X, y=None, groups=None): + """Generate (train, test) indices""" + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return self.n_splits + + def __repr__(self): + return _build_repr(self) + + +class ShuffleSplit(BaseShuffleSplit): + """Random permutation cross-validator. + + Yields indices to split data into training and test sets. + + Note: contrary to other cross-validation strategies, random splits + do not guarantee that all folds will be different, although this is + still very likely for sizeable datasets. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=10 + Number of re-shuffling & splitting iterations. + + test_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to include in the test split. If int, represents the + absolute number of test samples. If None, the value is set to the + complement of the train size. If ``train_size`` is also None, it will + be set to 0.1. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the train split. If + int, represents the absolute number of train samples. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import ShuffleSplit + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [3, 4], [5, 6]]) + >>> y = np.array([1, 2, 1, 2, 1, 2]) + >>> rs = ShuffleSplit(n_splits=5, test_size=.25, random_state=0) + >>> rs.get_n_splits(X) + 5 + >>> print(rs) + ShuffleSplit(n_splits=5, random_state=0, test_size=0.25, train_size=None) + >>> for i, (train_index, test_index) in enumerate(rs.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 3 0 4] + Test: index=[5 2] + Fold 1: + Train: index=[4 0 2 5] + Test: index=[1 3] + Fold 2: + Train: index=[1 2 4 0] + Test: index=[3 5] + Fold 3: + Train: index=[3 4 1 0] + Test: index=[5 2] + Fold 4: + Train: index=[3 5 1 0] + Test: index=[2 4] + >>> # Specify train and test size + >>> rs = ShuffleSplit(n_splits=5, train_size=0.5, test_size=.25, + ... random_state=0) + >>> for i, (train_index, test_index) in enumerate(rs.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 3 0] + Test: index=[5 2] + Fold 1: + Train: index=[4 0 2] + Test: index=[1 3] + Fold 2: + Train: index=[1 2 4] + Test: index=[3 5] + Fold 3: + Train: index=[3 4 1] + Test: index=[5 2] + Fold 4: + Train: index=[3 5 1] + Test: index=[2 4] + """ + + def __init__( + self, n_splits=10, *, test_size=None, train_size=None, random_state=None + ): + super().__init__( + n_splits=n_splits, + test_size=test_size, + train_size=train_size, + random_state=random_state, + ) + self._default_test_size = 0.1 + + def _iter_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + n_train, n_test = _validate_shuffle_split( + n_samples, + self.test_size, + self.train_size, + default_test_size=self._default_test_size, + ) + + rng = check_random_state(self.random_state) + for i in range(self.n_splits): + # random partition + permutation = rng.permutation(n_samples) + ind_test = permutation[:n_test] + ind_train = permutation[n_test : (n_test + n_train)] + yield ind_train, ind_test + + +class GroupShuffleSplit(GroupsConsumerMixin, ShuffleSplit): + """Shuffle-Group(s)-Out cross-validation iterator. + + Provides randomized train/test indices to split data according to a + third-party provided group. This group information can be used to encode + arbitrary domain specific stratifications of the samples as integers. + + For instance the groups could be the year of collection of the samples + and thus allow for cross-validation against time-based splits. + + The difference between LeavePGroupsOut and GroupShuffleSplit is that + the former generates splits using all subsets of size ``p`` unique groups, + whereas GroupShuffleSplit generates a user-determined number of random + test splits, each with a user-determined fraction of unique groups. + + For example, a less computationally intensive alternative to + ``LeavePGroupsOut(p=10)`` would be + ``GroupShuffleSplit(test_size=10, n_splits=100)``. + + Note: The parameters ``test_size`` and ``train_size`` refer to groups, and + not to samples, as in ShuffleSplit. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of re-shuffling & splitting iterations. + + test_size : float, int, default=0.2 + If float, should be between 0.0 and 1.0 and represent the proportion + of groups to include in the test split (rounded up). If int, + represents the absolute number of test groups. If None, the value is + set to the complement of the train size. + The default will change in version 0.21. It will remain 0.2 only + if ``train_size`` is unspecified, otherwise it will complement + the specified ``train_size``. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the groups to include in the train split. If + int, represents the absolute number of train groups. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import GroupShuffleSplit + >>> X = np.ones(shape=(8, 2)) + >>> y = np.ones(shape=(8, 1)) + >>> groups = np.array([1, 1, 2, 2, 2, 3, 3, 3]) + >>> print(groups.shape) + (8,) + >>> gss = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42) + >>> gss.get_n_splits() + 2 + >>> print(gss) + GroupShuffleSplit(n_splits=2, random_state=42, test_size=None, train_size=0.7) + >>> for i, (train_index, test_index) in enumerate(gss.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2 3 4 5 6 7], group=[2 2 2 3 3 3] + Test: index=[0 1], group=[1 1] + Fold 1: + Train: index=[0 1 5 6 7], group=[1 1 3 3 3] + Test: index=[2 3 4], group=[2 2 2] + + See Also + -------- + ShuffleSplit : Shuffles samples to create independent test/train sets. + + LeavePGroupsOut : Train set leaves out all possible subsets of `p` groups. + """ + + def __init__( + self, n_splits=5, *, test_size=None, train_size=None, random_state=None + ): + super().__init__( + n_splits=n_splits, + test_size=test_size, + train_size=train_size, + random_state=random_state, + ) + self._default_test_size = 0.2 + + def _iter_indices(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + classes, group_indices = np.unique(groups, return_inverse=True) + for group_train, group_test in super()._iter_indices(X=classes): + # these are the indices of classes in the partition + # invert them into data indices + + train = np.flatnonzero(np.isin(group_indices, group_train)) + test = np.flatnonzero(np.isin(group_indices, group_test)) + + yield train, test + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + return super().split(X, y, groups) + + +class StratifiedShuffleSplit(BaseShuffleSplit): + """Stratified ShuffleSplit cross-validator. + + Provides train/test indices to split data in train/test sets. + + This cross-validation object is a merge of StratifiedKFold and + ShuffleSplit, which returns stratified randomized folds. The folds + are made by preserving the percentage of samples for each class. + + Note: like the ShuffleSplit strategy, stratified random splits + do not guarantee that all folds will be different, although this is + still very likely for sizeable datasets. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=10 + Number of re-shuffling & splitting iterations. + + test_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to include in the test split. If int, represents the + absolute number of test samples. If None, the value is set to the + complement of the train size. If ``train_size`` is also None, it will + be set to 0.1. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the train split. If + int, represents the absolute number of train samples. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import StratifiedShuffleSplit + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 0, 1, 1, 1]) + >>> sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0) + >>> sss.get_n_splits(X, y) + 5 + >>> print(sss) + StratifiedShuffleSplit(n_splits=5, random_state=0, ...) + >>> for i, (train_index, test_index) in enumerate(sss.split(X, y)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[5 2 3] + Test: index=[4 1 0] + Fold 1: + Train: index=[5 1 4] + Test: index=[0 2 3] + Fold 2: + Train: index=[5 0 2] + Test: index=[4 3 1] + Fold 3: + Train: index=[4 1 0] + Test: index=[2 3 5] + Fold 4: + Train: index=[0 5 1] + Test: index=[3 4 2] + """ + + def __init__( + self, n_splits=10, *, test_size=None, train_size=None, random_state=None + ): + super().__init__( + n_splits=n_splits, + test_size=test_size, + train_size=train_size, + random_state=random_state, + ) + self._default_test_size = 0.1 + + def _iter_indices(self, X, y, groups=None): + n_samples = _num_samples(X) + y = check_array(y, input_name="y", ensure_2d=False, dtype=None) + n_train, n_test = _validate_shuffle_split( + n_samples, + self.test_size, + self.train_size, + default_test_size=self._default_test_size, + ) + + if y.ndim == 2: + # for multi-label y, map each distinct row to a string repr + # using join because str(row) uses an ellipsis if len(row) > 1000 + y = np.array([" ".join(row.astype("str")) for row in y]) + + classes, y_indices = np.unique(y, return_inverse=True) + n_classes = classes.shape[0] + + class_counts = np.bincount(y_indices) + if np.min(class_counts) < 2: + raise ValueError( + "The least populated class in y has only 1" + " member, which is too few. The minimum" + " number of groups for any class cannot" + " be less than 2." + ) + + if n_train < n_classes: + raise ValueError( + "The train_size = %d should be greater or " + "equal to the number of classes = %d" % (n_train, n_classes) + ) + if n_test < n_classes: + raise ValueError( + "The test_size = %d should be greater or " + "equal to the number of classes = %d" % (n_test, n_classes) + ) + + # Find the sorted list of instances for each class: + # (np.unique above performs a sort, so code is O(n logn) already) + class_indices = np.split( + np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1] + ) + + rng = check_random_state(self.random_state) + + for _ in range(self.n_splits): + # if there are ties in the class-counts, we want + # to make sure to break them anew in each iteration + n_i = _approximate_mode(class_counts, n_train, rng) + class_counts_remaining = class_counts - n_i + t_i = _approximate_mode(class_counts_remaining, n_test, rng) + + train = [] + test = [] + + for i in range(n_classes): + permutation = rng.permutation(class_counts[i]) + perm_indices_class_i = class_indices[i].take(permutation, mode="clip") + + train.extend(perm_indices_class_i[: n_i[i]]) + test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]]) + + train = rng.permutation(train) + test = rng.permutation(test) + + yield train, test + + def split(self, X, y, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Note that providing ``y`` is sufficient to generate the splits and + hence ``np.zeros(n_samples)`` may be used as a placeholder for + ``X`` instead of actual training data. + + y : array-like of shape (n_samples,) or (n_samples, n_labels) + The target variable for supervised learning problems. + Stratification is done based on the y labels. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + y = check_array(y, input_name="y", ensure_2d=False, dtype=None) + return super().split(X, y, groups) + + +def _validate_shuffle_split(n_samples, test_size, train_size, default_test_size=None): + """ + Validation helper to check if the test/test sizes are meaningful w.r.t. the + size of the data (n_samples). + """ + if test_size is None and train_size is None: + test_size = default_test_size + + test_size_type = np.asarray(test_size).dtype.kind + train_size_type = np.asarray(train_size).dtype.kind + + if ( + test_size_type == "i" + and (test_size >= n_samples or test_size <= 0) + or test_size_type == "f" + and (test_size <= 0 or test_size >= 1) + ): + raise ValueError( + "test_size={0} should be either positive and smaller" + " than the number of samples {1} or a float in the " + "(0, 1) range".format(test_size, n_samples) + ) + + if ( + train_size_type == "i" + and (train_size >= n_samples or train_size <= 0) + or train_size_type == "f" + and (train_size <= 0 or train_size >= 1) + ): + raise ValueError( + "train_size={0} should be either positive and smaller" + " than the number of samples {1} or a float in the " + "(0, 1) range".format(train_size, n_samples) + ) + + if train_size is not None and train_size_type not in ("i", "f"): + raise ValueError("Invalid value for train_size: {}".format(train_size)) + if test_size is not None and test_size_type not in ("i", "f"): + raise ValueError("Invalid value for test_size: {}".format(test_size)) + + if train_size_type == "f" and test_size_type == "f" and train_size + test_size > 1: + raise ValueError( + "The sum of test_size and train_size = {}, should be in the (0, 1)" + " range. Reduce test_size and/or train_size.".format(train_size + test_size) + ) + + if test_size_type == "f": + n_test = ceil(test_size * n_samples) + elif test_size_type == "i": + n_test = float(test_size) + + if train_size_type == "f": + n_train = floor(train_size * n_samples) + elif train_size_type == "i": + n_train = float(train_size) + + if train_size is None: + n_train = n_samples - n_test + elif test_size is None: + n_test = n_samples - n_train + + if n_train + n_test > n_samples: + raise ValueError( + "The sum of train_size and test_size = %d, " + "should be smaller than the number of " + "samples %d. Reduce test_size and/or " + "train_size." % (n_train + n_test, n_samples) + ) + + n_train, n_test = int(n_train), int(n_test) + + if n_train == 0: + raise ValueError( + "With n_samples={}, test_size={} and train_size={}, the " + "resulting train set will be empty. Adjust any of the " + "aforementioned parameters.".format(n_samples, test_size, train_size) + ) + + return n_train, n_test + + +class PredefinedSplit(BaseCrossValidator): + """Predefined split cross-validator. + + Provides train/test indices to split data into train/test sets using a + predefined scheme specified by the user with the ``test_fold`` parameter. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.16 + + Parameters + ---------- + test_fold : array-like of shape (n_samples,) + The entry ``test_fold[i]`` represents the index of the test set that + sample ``i`` belongs to. It is possible to exclude sample ``i`` from + any test set (i.e. include sample ``i`` in every training set) by + setting ``test_fold[i]`` equal to -1. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import PredefinedSplit + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> test_fold = [0, 1, -1, 1] + >>> ps = PredefinedSplit(test_fold) + >>> ps.get_n_splits() + 2 + >>> print(ps) + PredefinedSplit(test_fold=array([ 0, 1, -1, 1])) + >>> for i, (train_index, test_index) in enumerate(ps.split()): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 2 3] + Test: index=[0] + Fold 1: + Train: index=[0 2] + Test: index=[1 3] + """ + + def __init__(self, test_fold): + self.test_fold = np.array(test_fold, dtype=int) + self.test_fold = column_or_1d(self.test_fold) + self.unique_folds = np.unique(self.test_fold) + self.unique_folds = self.unique_folds[self.unique_folds != -1] + + def split(self, X=None, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + ind = np.arange(len(self.test_fold)) + for test_index in self._iter_test_masks(): + train_index = ind[np.logical_not(test_index)] + test_index = ind[test_index] + yield train_index, test_index + + def _iter_test_masks(self): + """Generates boolean masks corresponding to test sets.""" + for f in self.unique_folds: + test_index = np.where(self.test_fold == f)[0] + test_mask = np.zeros(len(self.test_fold), dtype=bool) + test_mask[test_index] = True + yield test_mask + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return len(self.unique_folds) + + +class _CVIterableWrapper(BaseCrossValidator): + """Wrapper class for old style cv objects and iterables.""" + + def __init__(self, cv): + self.cv = list(cv) + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return len(self.cv) + + def split(self, X=None, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + for train, test in self.cv: + yield train, test + + +def check_cv(cv=5, y=None, *, classifier=False): + """Input checker utility for building a cross-validator. + + Parameters + ---------- + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable that generates (train, test) splits as arrays of indices. + + For integer/None inputs, if classifier is True and ``y`` is either + binary or multiclass, :class:`StratifiedKFold` is used. In all other + cases, :class:`KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value changed from 3-fold to 5-fold. + + y : array-like, default=None + The target variable for supervised learning problems. + + classifier : bool, default=False + Whether the task is a classification task, in which case + stratified KFold will be used. + + Returns + ------- + checked_cv : a cross-validator instance. + The return value is a cross-validator which generates the train/test + splits via the ``split`` method. + + Examples + -------- + >>> from sklearn.model_selection import check_cv + >>> check_cv(cv=5, y=None, classifier=False) + KFold(...) + >>> check_cv(cv=5, y=[1, 1, 0, 0, 0, 0], classifier=True) + StratifiedKFold(...) + """ + cv = 5 if cv is None else cv + if isinstance(cv, numbers.Integral): + if ( + classifier + and (y is not None) + and (type_of_target(y, input_name="y") in ("binary", "multiclass")) + ): + return StratifiedKFold(cv) + else: + return KFold(cv) + + if not hasattr(cv, "split") or isinstance(cv, str): + if not isinstance(cv, Iterable) or isinstance(cv, str): + raise ValueError( + "Expected cv as an integer, cross-validation " + "object (from sklearn.model_selection) " + "or an iterable. Got %s." % cv + ) + return _CVIterableWrapper(cv) + + return cv # New style cv objects are passed without any modification + + +@validate_params( + { + "test_size": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(numbers.Integral, 1, None, closed="left"), + None, + ], + "train_size": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(numbers.Integral, 1, None, closed="left"), + None, + ], + "random_state": ["random_state"], + "shuffle": ["boolean"], + "stratify": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def train_test_split( + *arrays, + test_size=None, + train_size=None, + random_state=None, + shuffle=True, + stratify=None, +): + """Split arrays or matrices into random train and test subsets. + + Quick utility that wraps input validation, + ``next(ShuffleSplit().split(X, y))``, and application to input data + into a single call for splitting (and optionally subsampling) data into a + one-liner. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + *arrays : sequence of indexables with same length / shape[0] + Allowed inputs are lists, numpy arrays, scipy-sparse + matrices or pandas dataframes. + + test_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to include in the test split. If int, represents the + absolute number of test samples. If None, the value is set to the + complement of the train size. If ``train_size`` is also None, it will + be set to 0.25. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the train split. If + int, represents the absolute number of train samples. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the shuffling applied to the data before applying the split. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + shuffle : bool, default=True + Whether or not to shuffle the data before splitting. If shuffle=False + then stratify must be None. + + stratify : array-like, default=None + If not None, data is split in a stratified fashion, using this as + the class labels. + Read more in the :ref:`User Guide `. + + Returns + ------- + splitting : list, length=2 * len(arrays) + List containing train-test split of inputs. + + .. versionadded:: 0.16 + If the input is sparse, the output will be a + ``scipy.sparse.csr_matrix``. Else, output type is the same as the + input type. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import train_test_split + >>> X, y = np.arange(10).reshape((5, 2)), range(5) + >>> X + array([[0, 1], + [2, 3], + [4, 5], + [6, 7], + [8, 9]]) + >>> list(y) + [0, 1, 2, 3, 4] + + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, test_size=0.33, random_state=42) + ... + >>> X_train + array([[4, 5], + [0, 1], + [6, 7]]) + >>> y_train + [2, 0, 3] + >>> X_test + array([[2, 3], + [8, 9]]) + >>> y_test + [1, 4] + + >>> train_test_split(y, shuffle=False) + [[0, 1, 2], [3, 4]] + """ + n_arrays = len(arrays) + if n_arrays == 0: + raise ValueError("At least one array required as input") + + arrays = indexable(*arrays) + + n_samples = _num_samples(arrays[0]) + n_train, n_test = _validate_shuffle_split( + n_samples, test_size, train_size, default_test_size=0.25 + ) + + if shuffle is False: + if stratify is not None: + raise ValueError( + "Stratified train/test split is not implemented for shuffle=False" + ) + + train = np.arange(n_train) + test = np.arange(n_train, n_train + n_test) + + else: + if stratify is not None: + CVClass = StratifiedShuffleSplit + else: + CVClass = ShuffleSplit + + cv = CVClass(test_size=n_test, train_size=n_train, random_state=random_state) + + train, test = next(cv.split(X=arrays[0], y=stratify)) + + return list( + chain.from_iterable( + (_safe_indexing(a, train), _safe_indexing(a, test)) for a in arrays + ) + ) + + +# Tell nose that train_test_split is not a test. +# (Needed for external libraries that may use nose.) +# Use setattr to avoid mypy errors when monkeypatching. +setattr(train_test_split, "__test__", False) + + +def _pprint(params, offset=0, printer=repr): + """Pretty print the dictionary 'params' + + Parameters + ---------- + params : dict + The dictionary to pretty print + + offset : int, default=0 + The offset in characters to add at the begin of each line. + + printer : callable, default=repr + The function to convert entries to strings, typically + the builtin str or repr + + """ + # Do a multi-line justified repr: + options = np.get_printoptions() + np.set_printoptions(precision=5, threshold=64, edgeitems=2) + params_list = list() + this_line_length = offset + line_sep = ",\n" + (1 + offset // 2) * " " + for i, (k, v) in enumerate(sorted(params.items())): + if isinstance(v, float): + # use str for representing floating point numbers + # this way we get consistent representation across + # architectures and versions. + this_repr = "%s=%s" % (k, str(v)) + else: + # use repr of the rest + this_repr = "%s=%s" % (k, printer(v)) + if len(this_repr) > 500: + this_repr = this_repr[:300] + "..." + this_repr[-100:] + if i > 0: + if this_line_length + len(this_repr) >= 75 or "\n" in this_repr: + params_list.append(line_sep) + this_line_length = len(line_sep) + else: + params_list.append(", ") + this_line_length += 2 + params_list.append(this_repr) + this_line_length += len(this_repr) + + np.set_printoptions(**options) + lines = "".join(params_list) + # Strip trailing space to avoid nightmare in doctests + lines = "\n".join(l.rstrip(" ") for l in lines.split("\n")) + return lines + + +def _build_repr(self): + # XXX This is copied from BaseEstimator's get_params + cls = self.__class__ + init = getattr(cls.__init__, "deprecated_original", cls.__init__) + # Ignore varargs, kw and default values and pop self + init_signature = signature(init) + # Consider the constructor parameters excluding 'self' + if init is object.__init__: + args = [] + else: + args = sorted( + [ + p.name + for p in init_signature.parameters.values() + if p.name != "self" and p.kind != p.VAR_KEYWORD + ] + ) + class_name = self.__class__.__name__ + params = dict() + for key in args: + # We need deprecation warnings to always be on in order to + # catch deprecated param values. + # This is set in utils/__init__.py but it gets overwritten + # when running under python3 somehow. + warnings.simplefilter("always", FutureWarning) + try: + with warnings.catch_warnings(record=True) as w: + value = getattr(self, key, None) + if value is None and hasattr(self, "cvargs"): + value = self.cvargs.get(key, None) + if len(w) and w[0].category == FutureWarning: + # if the parameter is deprecated, don't show it + continue + finally: + warnings.filters.pop(0) + params[key] = value + + return "%s(%s)" % (class_name, _pprint(params, offset=len(class_name))) + + +def _yields_constant_splits(cv): + # Return True if calling cv.split() always returns the same splits + # We assume that if a cv doesn't have a shuffle parameter, it shuffles by + # default (e.g. ShuffleSplit). If it actually doesn't shuffle (e.g. + # LeaveOneOut), then it won't have a random_state parameter anyway, in + # which case it will default to 0, leading to output=True + shuffle = getattr(cv, "shuffle", True) + random_state = getattr(cv, "random_state", 0) + return isinstance(random_state, numbers.Integral) or not shuffle diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/_validation.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..75c956f2d38a73229b8607d83c53913ab782e231 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/_validation.py @@ -0,0 +1,2360 @@ +""" +The :mod:`sklearn.model_selection._validation` module includes classes and +functions to validate the model. +""" + +# Author: Alexandre Gramfort +# Gael Varoquaux +# Olivier Grisel +# Raghav RV +# Michal Karbownik +# License: BSD 3 clause + + +import numbers +import time +import warnings +from collections import Counter +from contextlib import suppress +from functools import partial +from numbers import Real +from traceback import format_exc + +import numpy as np +import scipy.sparse as sp +from joblib import logger + +from ..base import clone, is_classifier +from ..exceptions import FitFailedWarning, UnsetMetadataPassedError +from ..metrics import check_scoring, get_scorer_names +from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer +from ..preprocessing import LabelEncoder +from ..utils import Bunch, _safe_indexing, check_random_state, indexable +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + StrOptions, + validate_params, +) +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import _safe_split +from ..utils.parallel import Parallel, delayed +from ..utils.validation import _check_method_params, _num_samples +from ._split import check_cv + +__all__ = [ + "cross_validate", + "cross_val_score", + "cross_val_predict", + "permutation_test_score", + "learning_curve", + "validation_curve", +] + + +def _check_params_groups_deprecation(fit_params, params, groups): + """A helper function to check deprecations on `groups` and `fit_params`. + + To be removed when set_config(enable_metadata_routing=False) is not possible. + """ + if params is not None and fit_params is not None: + raise ValueError( + "`params` and `fit_params` cannot both be provided. Pass parameters " + "via `params`. `fit_params` is deprecated and will be removed in " + "version 1.6." + ) + elif fit_params is not None: + warnings.warn( + ( + "`fit_params` is deprecated and will be removed in version 1.6. " + "Pass parameters via `params` instead." + ), + FutureWarning, + ) + params = fit_params + + params = {} if params is None else params + + if groups is not None and _routing_enabled(): + raise ValueError( + "`groups` can only be passed if metadata routing is not enabled via" + " `sklearn.set_config(enable_metadata_routing=True)`. When routing is" + " enabled, pass `groups` alongside other metadata via the `params` argument" + " instead." + ) + + return params + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "fit_params": [dict, None], + "params": [dict, None], + "pre_dispatch": [Integral, str], + "return_train_score": ["boolean"], + "return_estimator": ["boolean"], + "return_indices": ["boolean"], + "error_score": [StrOptions({"raise"}), Real], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def cross_validate( + estimator, + X, + y=None, + *, + groups=None, + scoring=None, + cv=None, + n_jobs=None, + verbose=0, + fit_params=None, + params=None, + pre_dispatch="2*n_jobs", + return_train_score=False, + return_estimator=False, + return_indices=False, + error_score=np.nan, +): + """Evaluate metric(s) by cross-validation and also record fit/score times. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to fit. Can be for example a list, or an array. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + .. versionchanged:: 1.4 + ``groups`` can only be passed if metadata routing is not enabled + via ``sklearn.set_config(enable_metadata_routing=True)``. When routing + is enabled, pass ``groups`` alongside other metadata via the ``params`` + argument instead. E.g.: + ``cross_validate(..., params={'groups': groups})``. + + scoring : str, callable, list, tuple, or dict, default=None + Strategy to evaluate the performance of the cross-validated model on + the test set. + + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the cross-validation splits. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + The verbosity level. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. deprecated:: 1.4 + This parameter is deprecated and will be removed in version 1.6. Use + ``params`` instead. + + params : dict, default=None + Parameters to pass to the underlying estimator's ``fit``, the scorer, + and the CV splitter. + + .. versionadded:: 1.4 + + pre_dispatch : int or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + return_train_score : bool, default=False + Whether to include train scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + .. versionadded:: 0.19 + + .. versionchanged:: 0.21 + Default value was changed from ``True`` to ``False`` + + return_estimator : bool, default=False + Whether to return the estimators fitted on each split. + + .. versionadded:: 0.20 + + return_indices : bool, default=False + Whether to return the train-test indices selected for each split. + + .. versionadded:: 1.3 + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + Returns + ------- + scores : dict of float arrays of shape (n_splits,) + Array of scores of the estimator for each run of the cross validation. + + A dict of arrays containing the score/time arrays for each scorer is + returned. The possible keys for this ``dict`` are: + + ``test_score`` + The score array for test scores on each cv split. + Suffix ``_score`` in ``test_score`` changes to a specific + metric like ``test_r2`` or ``test_auc`` if there are + multiple scoring metrics in the scoring parameter. + ``train_score`` + The score array for train scores on each cv split. + Suffix ``_score`` in ``train_score`` changes to a specific + metric like ``train_r2`` or ``train_auc`` if there are + multiple scoring metrics in the scoring parameter. + This is available only if ``return_train_score`` parameter + is ``True``. + ``fit_time`` + The time for fitting the estimator on the train + set for each cv split. + ``score_time`` + The time for scoring the estimator on the test set for each + cv split. (Note time for scoring on the train set is not + included even if ``return_train_score`` is set to ``True`` + ``estimator`` + The estimator objects for each cv split. + This is available only if ``return_estimator`` parameter + is set to ``True``. + ``indices`` + The train/test positional indices for each cv split. A dictionary + is returned where the keys are either `"train"` or `"test"` + and the associated values are a list of integer-dtyped NumPy + arrays with the indices. Available only if `return_indices=True`. + + See Also + -------- + cross_val_score : Run cross-validation for single metric evaluation. + + cross_val_predict : Get predictions from each split of cross-validation for + diagnostic purposes. + + sklearn.metrics.make_scorer : Make a scorer from a performance metric or + loss function. + + Examples + -------- + >>> from sklearn import datasets, linear_model + >>> from sklearn.model_selection import cross_validate + >>> from sklearn.metrics import make_scorer + >>> from sklearn.metrics import confusion_matrix + >>> from sklearn.svm import LinearSVC + >>> diabetes = datasets.load_diabetes() + >>> X = diabetes.data[:150] + >>> y = diabetes.target[:150] + >>> lasso = linear_model.Lasso() + + Single metric evaluation using ``cross_validate`` + + >>> cv_results = cross_validate(lasso, X, y, cv=3) + >>> sorted(cv_results.keys()) + ['fit_time', 'score_time', 'test_score'] + >>> cv_results['test_score'] + array([0.3315057 , 0.08022103, 0.03531816]) + + Multiple metric evaluation using ``cross_validate`` + (please refer the ``scoring`` parameter doc for more information) + + >>> scores = cross_validate(lasso, X, y, cv=3, + ... scoring=('r2', 'neg_mean_squared_error'), + ... return_train_score=True) + >>> print(scores['test_neg_mean_squared_error']) + [-3635.5... -3573.3... -6114.7...] + >>> print(scores['train_r2']) + [0.28009951 0.3908844 0.22784907] + """ + params = _check_params_groups_deprecation(fit_params, params, groups) + + X, y = indexable(X, y) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + + if callable(scoring): + scorers = scoring + elif scoring is None or isinstance(scoring, str): + scorers = check_scoring(estimator, scoring) + else: + scorers = _check_multimetric_scoring(estimator, scoring) + + if _routing_enabled(): + # `cross_validate` will create a `_MultiMetricScorer` if `scoring` is a + # dict at a later stage. We need the same object for the purpose of + # routing. However, creating it here and passing it around would create + # a much larger diff since the dict is used in many places. + if isinstance(scorers, dict): + _scorer = _MultimetricScorer( + scorers=scorers, raise_exc=(error_score == "raise") + ) + else: + _scorer = scorers + # For estimators, a MetadataRouter is created in get_metadata_routing + # methods. For these router methods, we create the router to use + # `process_routing` on it. + router = ( + MetadataRouter(owner="cross_validate") + .add( + splitter=cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + .add( + estimator=estimator, + # TODO(SLEP6): also pass metadata to the predict method for + # scoring? + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + .add( + scorer=_scorer, + method_mapping=MethodMapping().add(caller="fit", callee="score"), + ) + ) + try: + routed_params = process_routing(router, "fit", **params) + except UnsetMetadataPassedError as e: + # The default exception would mention `fit` since in the above + # `process_routing` code, we pass `fit` as the caller. However, + # the user is not calling `fit` directly, so we change the message + # to make it more suitable for this case. + unrequested_params = sorted(e.unrequested_params) + raise UnsetMetadataPassedError( + message=( + f"{unrequested_params} are passed to cross validation but are not" + " explicitly set as requested or not requested for cross_validate's" + f" estimator: {estimator.__class__.__name__}. Call" + " `.set_fit_request({{metadata}}=True)` on the estimator for" + f" each metadata in {unrequested_params} that you" + " want to use and `metadata=False` for not using it. See the" + " Metadata Routing User guide" + " for more" + " information." + ), + unrequested_params=e.unrequested_params, + routed_params=e.routed_params, + ) + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split={"groups": groups}) + routed_params.estimator = Bunch(fit=params) + routed_params.scorer = Bunch(score={}) + + indices = cv.split(X, y, **routed_params.splitter.split) + if return_indices: + # materialize the indices since we need to store them in the returned dict + indices = list(indices) + + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) + results = parallel( + delayed(_fit_and_score)( + clone(estimator), + X, + y, + scorer=scorers, + train=train, + test=test, + verbose=verbose, + parameters=None, + fit_params=routed_params.estimator.fit, + score_params=routed_params.scorer.score, + return_train_score=return_train_score, + return_times=True, + return_estimator=return_estimator, + error_score=error_score, + ) + for train, test in indices + ) + + _warn_or_raise_about_fit_failures(results, error_score) + + # For callable scoring, the return type is only know after calling. If the + # return type is a dictionary, the error scores can now be inserted with + # the correct key. + if callable(scoring): + _insert_error_scores(results, error_score) + + results = _aggregate_score_dicts(results) + + ret = {} + ret["fit_time"] = results["fit_time"] + ret["score_time"] = results["score_time"] + + if return_estimator: + ret["estimator"] = results["estimator"] + + if return_indices: + ret["indices"] = {} + ret["indices"]["train"], ret["indices"]["test"] = zip(*indices) + + test_scores_dict = _normalize_score_results(results["test_scores"]) + if return_train_score: + train_scores_dict = _normalize_score_results(results["train_scores"]) + + for name in test_scores_dict: + ret["test_%s" % name] = test_scores_dict[name] + if return_train_score: + key = "train_%s" % name + ret[key] = train_scores_dict[name] + + return ret + + +def _insert_error_scores(results, error_score): + """Insert error in `results` by replacing them inplace with `error_score`. + + This only applies to multimetric scores because `_fit_and_score` will + handle the single metric case. + """ + successful_score = None + failed_indices = [] + for i, result in enumerate(results): + if result["fit_error"] is not None: + failed_indices.append(i) + elif successful_score is None: + successful_score = result["test_scores"] + + if isinstance(successful_score, dict): + formatted_error = {name: error_score for name in successful_score} + for i in failed_indices: + results[i]["test_scores"] = formatted_error.copy() + if "train_scores" in results[i]: + results[i]["train_scores"] = formatted_error.copy() + + +def _normalize_score_results(scores, scaler_score_key="score"): + """Creates a scoring dictionary based on the type of `scores`""" + if isinstance(scores[0], dict): + # multimetric scoring + return _aggregate_score_dicts(scores) + # scaler + return {scaler_score_key: scores} + + +def _warn_or_raise_about_fit_failures(results, error_score): + fit_errors = [ + result["fit_error"] for result in results if result["fit_error"] is not None + ] + if fit_errors: + num_failed_fits = len(fit_errors) + num_fits = len(results) + fit_errors_counter = Counter(fit_errors) + delimiter = "-" * 80 + "\n" + fit_errors_summary = "\n".join( + f"{delimiter}{n} fits failed with the following error:\n{error}" + for error, n in fit_errors_counter.items() + ) + + if num_failed_fits == num_fits: + all_fits_failed_message = ( + f"\nAll the {num_fits} fits failed.\n" + "It is very likely that your model is misconfigured.\n" + "You can try to debug the error by setting error_score='raise'.\n\n" + f"Below are more details about the failures:\n{fit_errors_summary}" + ) + raise ValueError(all_fits_failed_message) + + else: + some_fits_failed_message = ( + f"\n{num_failed_fits} fits failed out of a total of {num_fits}.\n" + "The score on these train-test partitions for these parameters" + f" will be set to {error_score}.\n" + "If these failures are not expected, you can try to debug them " + "by setting error_score='raise'.\n\n" + f"Below are more details about the failures:\n{fit_errors_summary}" + ) + warnings.warn(some_fits_failed_message, FitFailedWarning) + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "fit_params": [dict, None], + "params": [dict, None], + "pre_dispatch": [Integral, str, None], + "error_score": [StrOptions({"raise"}), Real], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def cross_val_score( + estimator, + X, + y=None, + *, + groups=None, + scoring=None, + cv=None, + n_jobs=None, + verbose=0, + fit_params=None, + params=None, + pre_dispatch="2*n_jobs", + error_score=np.nan, +): + """Evaluate a score by cross-validation. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to fit. Can be for example a list, or an array. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + .. versionchanged:: 1.4 + ``groups`` can only be passed if metadata routing is not enabled + via ``sklearn.set_config(enable_metadata_routing=True)``. When routing + is enabled, pass ``groups`` alongside other metadata via the ``params`` + argument instead. E.g.: + ``cross_val_score(..., params={'groups': groups})``. + + scoring : str or callable, default=None + A str (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)`` which should return only + a single value. + + Similar to :func:`cross_validate` + but only a single metric is permitted. + + If `None`, the estimator's default scorer (if available) is used. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - `None`, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable that generates (train, test) splits as arrays of indices. + + For `int`/`None` inputs, if the estimator is a classifier and `y` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + `cv` default value if `None` changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the cross-validation splits. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + The verbosity level. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. deprecated:: 1.4 + This parameter is deprecated and will be removed in version 1.6. Use + ``params`` instead. + + params : dict, default=None + Parameters to pass to the underlying estimator's ``fit``, the scorer, + and the CV splitter. + + .. versionadded:: 1.4 + + pre_dispatch : int or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - ``None``, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + Returns + ------- + scores : ndarray of float of shape=(len(list(cv)),) + Array of scores of the estimator for each run of the cross validation. + + See Also + -------- + cross_validate : To run cross-validation on multiple metrics and also to + return train scores, fit times and score times. + + cross_val_predict : Get predictions from each split of cross-validation for + diagnostic purposes. + + sklearn.metrics.make_scorer : Make a scorer from a performance metric or + loss function. + + Examples + -------- + >>> from sklearn import datasets, linear_model + >>> from sklearn.model_selection import cross_val_score + >>> diabetes = datasets.load_diabetes() + >>> X = diabetes.data[:150] + >>> y = diabetes.target[:150] + >>> lasso = linear_model.Lasso() + >>> print(cross_val_score(lasso, X, y, cv=3)) + [0.3315057 0.08022103 0.03531816] + """ + # To ensure multimetric format is not supported + scorer = check_scoring(estimator, scoring=scoring) + + cv_results = cross_validate( + estimator=estimator, + X=X, + y=y, + groups=groups, + scoring={"score": scorer}, + cv=cv, + n_jobs=n_jobs, + verbose=verbose, + fit_params=fit_params, + params=params, + pre_dispatch=pre_dispatch, + error_score=error_score, + ) + return cv_results["test_score"] + + +def _fit_and_score( + estimator, + X, + y, + *, + scorer, + train, + test, + verbose, + parameters, + fit_params, + score_params, + return_train_score=False, + return_parameters=False, + return_n_test_samples=False, + return_times=False, + return_estimator=False, + split_progress=None, + candidate_progress=None, + error_score=np.nan, +): + """Fit estimator and compute scores for a given dataset split. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : array-like of shape (n_samples, n_features) + The data to fit. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + The target variable to try to predict in the case of + supervised learning. + + scorer : A single callable or dict mapping scorer name to the callable + If it is a single callable, the return value for ``train_scores`` and + ``test_scores`` is a single float. + + For a dict, it should be one mapping the scorer name to the scorer + callable object / function. + + The callable object / fn should have signature + ``scorer(estimator, X, y)``. + + train : array-like of shape (n_train_samples,) + Indices of training samples. + + test : array-like of shape (n_test_samples,) + Indices of test samples. + + verbose : int + The verbosity level. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + parameters : dict or None + Parameters to be set on the estimator. + + fit_params : dict or None + Parameters that will be passed to ``estimator.fit``. + + score_params : dict or None + Parameters that will be passed to the scorer. + + return_train_score : bool, default=False + Compute and return score on training set. + + return_parameters : bool, default=False + Return parameters that has been used for the estimator. + + split_progress : {list, tuple} of int, default=None + A list or tuple of format (, ). + + candidate_progress : {list, tuple} of int, default=None + A list or tuple of format + (, ). + + return_n_test_samples : bool, default=False + Whether to return the ``n_test_samples``. + + return_times : bool, default=False + Whether to return the fit/score times. + + return_estimator : bool, default=False + Whether to return the fitted estimator. + + Returns + ------- + result : dict with the following attributes + train_scores : dict of scorer name -> float + Score on training set (for all the scorers), + returned only if `return_train_score` is `True`. + test_scores : dict of scorer name -> float + Score on testing set (for all the scorers). + n_test_samples : int + Number of test samples. + fit_time : float + Time spent for fitting in seconds. + score_time : float + Time spent for scoring in seconds. + parameters : dict or None + The parameters that have been evaluated. + estimator : estimator object + The fitted estimator. + fit_error : str or None + Traceback str if the fit failed, None if the fit succeeded. + """ + if not isinstance(error_score, numbers.Number) and error_score != "raise": + raise ValueError( + "error_score must be the string 'raise' or a numeric value. " + "(Hint: if using 'raise', please make sure that it has been " + "spelled correctly.)" + ) + + progress_msg = "" + if verbose > 2: + if split_progress is not None: + progress_msg = f" {split_progress[0]+1}/{split_progress[1]}" + if candidate_progress and verbose > 9: + progress_msg += f"; {candidate_progress[0]+1}/{candidate_progress[1]}" + + if verbose > 1: + if parameters is None: + params_msg = "" + else: + sorted_keys = sorted(parameters) # Ensure deterministic o/p + params_msg = ", ".join(f"{k}={parameters[k]}" for k in sorted_keys) + if verbose > 9: + start_msg = f"[CV{progress_msg}] START {params_msg}" + print(f"{start_msg}{(80 - len(start_msg)) * '.'}") + + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + fit_params = _check_method_params(X, params=fit_params, indices=train) + score_params = score_params if score_params is not None else {} + score_params_train = _check_method_params(X, params=score_params, indices=train) + score_params_test = _check_method_params(X, params=score_params, indices=test) + + if parameters is not None: + # here we clone the parameters, since sometimes the parameters + # themselves might be estimators, e.g. when we search over different + # estimators in a pipeline. + # ref: https://github.com/scikit-learn/scikit-learn/pull/26786 + estimator = estimator.set_params(**clone(parameters, safe=False)) + + start_time = time.time() + + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + + result = {} + try: + if y_train is None: + estimator.fit(X_train, **fit_params) + else: + estimator.fit(X_train, y_train, **fit_params) + + except Exception: + # Note fit time as time until error + fit_time = time.time() - start_time + score_time = 0.0 + if error_score == "raise": + raise + elif isinstance(error_score, numbers.Number): + if isinstance(scorer, dict): + test_scores = {name: error_score for name in scorer} + if return_train_score: + train_scores = test_scores.copy() + else: + test_scores = error_score + if return_train_score: + train_scores = error_score + result["fit_error"] = format_exc() + else: + result["fit_error"] = None + + fit_time = time.time() - start_time + test_scores = _score( + estimator, X_test, y_test, scorer, score_params_test, error_score + ) + score_time = time.time() - start_time - fit_time + if return_train_score: + train_scores = _score( + estimator, X_train, y_train, scorer, score_params_train, error_score + ) + + if verbose > 1: + total_time = score_time + fit_time + end_msg = f"[CV{progress_msg}] END " + result_msg = params_msg + (";" if params_msg else "") + if verbose > 2: + if isinstance(test_scores, dict): + for scorer_name in sorted(test_scores): + result_msg += f" {scorer_name}: (" + if return_train_score: + scorer_scores = train_scores[scorer_name] + result_msg += f"train={scorer_scores:.3f}, " + result_msg += f"test={test_scores[scorer_name]:.3f})" + else: + result_msg += ", score=" + if return_train_score: + result_msg += f"(train={train_scores:.3f}, test={test_scores:.3f})" + else: + result_msg += f"{test_scores:.3f}" + result_msg += f" total time={logger.short_format_time(total_time)}" + + # Right align the result_msg + end_msg += "." * (80 - len(end_msg) - len(result_msg)) + end_msg += result_msg + print(end_msg) + + result["test_scores"] = test_scores + if return_train_score: + result["train_scores"] = train_scores + if return_n_test_samples: + result["n_test_samples"] = _num_samples(X_test) + if return_times: + result["fit_time"] = fit_time + result["score_time"] = score_time + if return_parameters: + result["parameters"] = parameters + if return_estimator: + result["estimator"] = estimator + return result + + +def _score(estimator, X_test, y_test, scorer, score_params, error_score="raise"): + """Compute the score(s) of an estimator on a given test set. + + Will return a dict of floats if `scorer` is a dict, otherwise a single + float is returned. + """ + if isinstance(scorer, dict): + # will cache method calls if needed. scorer() returns a dict + scorer = _MultimetricScorer(scorers=scorer, raise_exc=(error_score == "raise")) + + score_params = {} if score_params is None else score_params + + try: + if y_test is None: + scores = scorer(estimator, X_test, **score_params) + else: + scores = scorer(estimator, X_test, y_test, **score_params) + except Exception: + if isinstance(scorer, _MultimetricScorer): + # If `_MultimetricScorer` raises exception, the `error_score` + # parameter is equal to "raise". + raise + else: + if error_score == "raise": + raise + else: + scores = error_score + warnings.warn( + ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}. Details: \n" + f"{format_exc()}" + ), + UserWarning, + ) + + # Check non-raised error messages in `_MultimetricScorer` + if isinstance(scorer, _MultimetricScorer): + exception_messages = [ + (name, str_e) for name, str_e in scores.items() if isinstance(str_e, str) + ] + if exception_messages: + # error_score != "raise" + for name, str_e in exception_messages: + scores[name] = error_score + warnings.warn( + ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}. Details: \n" + f"{str_e}" + ), + UserWarning, + ) + + error_msg = "scoring must return a number, got %s (%s) instead. (scorer=%s)" + if isinstance(scores, dict): + for name, score in scores.items(): + if hasattr(score, "item"): + with suppress(ValueError): + # e.g. unwrap memmapped scalars + score = score.item() + if not isinstance(score, numbers.Number): + raise ValueError(error_msg % (score, type(score), name)) + scores[name] = score + else: # scalar + if hasattr(scores, "item"): + with suppress(ValueError): + # e.g. unwrap memmapped scalars + scores = scores.item() + if not isinstance(scores, numbers.Number): + raise ValueError(error_msg % (scores, type(scores), scorer)) + return scores + + +@validate_params( + { + "estimator": [HasMethods(["fit", "predict"])], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "fit_params": [dict, None], + "params": [dict, None], + "pre_dispatch": [Integral, str, None], + "method": [ + StrOptions( + { + "predict", + "predict_proba", + "predict_log_proba", + "decision_function", + } + ) + ], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def cross_val_predict( + estimator, + X, + y=None, + *, + groups=None, + cv=None, + n_jobs=None, + verbose=0, + fit_params=None, + params=None, + pre_dispatch="2*n_jobs", + method="predict", +): + """Generate cross-validated estimates for each input data point. + + The data is split according to the cv parameter. Each sample belongs + to exactly one test set, and its prediction is computed with an + estimator fitted on the corresponding training set. + + Passing these predictions into an evaluation metric may not be a valid + way to measure generalization performance. Results can differ from + :func:`cross_validate` and :func:`cross_val_score` unless all tests sets + have equal size and the metric decomposes over samples. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator + The estimator instance to use to fit the data. It must implement a `fit` + method and the method given by the `method` parameter. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to fit. Can be, for example a list, or an array at least 2d. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + .. versionchanged:: 1.4 + ``groups`` can only be passed if metadata routing is not enabled + via ``sklearn.set_config(enable_metadata_routing=True)``. When routing + is enabled, pass ``groups`` alongside other metadata via the ``params`` + argument instead. E.g.: + ``cross_val_predict(..., params={'groups': groups})``. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable that generates (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and + predicting are parallelized over the cross-validation splits. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + The verbosity level. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. deprecated:: 1.4 + This parameter is deprecated and will be removed in version 1.6. Use + ``params`` instead. + + params : dict, default=None + Parameters to pass to the underlying estimator's ``fit`` and the CV + splitter. + + .. versionadded:: 1.4 + + pre_dispatch : int or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + method : {'predict', 'predict_proba', 'predict_log_proba', \ + 'decision_function'}, default='predict' + The method to be invoked by `estimator`. + + Returns + ------- + predictions : ndarray + This is the result of calling `method`. Shape: + + - When `method` is 'predict' and in special case where `method` is + 'decision_function' and the target is binary: (n_samples,) + - When `method` is one of {'predict_proba', 'predict_log_proba', + 'decision_function'} (unless special case above): + (n_samples, n_classes) + - If `estimator` is :term:`multioutput`, an extra dimension + 'n_outputs' is added to the end of each shape above. + + See Also + -------- + cross_val_score : Calculate score for each CV split. + cross_validate : Calculate one or more scores and timings for each CV + split. + + Notes + ----- + In the case that one or more classes are absent in a training portion, a + default score needs to be assigned to all instances for that class if + ``method`` produces columns per class, as in {'decision_function', + 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is + 0. In order to ensure finite output, we approximate negative infinity by + the minimum finite float value for the dtype in other cases. + + Examples + -------- + >>> from sklearn import datasets, linear_model + >>> from sklearn.model_selection import cross_val_predict + >>> diabetes = datasets.load_diabetes() + >>> X = diabetes.data[:150] + >>> y = diabetes.target[:150] + >>> lasso = linear_model.Lasso() + >>> y_pred = cross_val_predict(lasso, X, y, cv=3) + """ + params = _check_params_groups_deprecation(fit_params, params, groups) + X, y = indexable(X, y) + + if _routing_enabled(): + # For estimators, a MetadataRouter is created in get_metadata_routing + # methods. For these router methods, we create the router to use + # `process_routing` on it. + router = ( + MetadataRouter(owner="cross_validate") + .add( + splitter=cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + .add( + estimator=estimator, + # TODO(SLEP6): also pass metadata for the predict method. + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + ) + try: + routed_params = process_routing(router, "fit", **params) + except UnsetMetadataPassedError as e: + # The default exception would mention `fit` since in the above + # `process_routing` code, we pass `fit` as the caller. However, + # the user is not calling `fit` directly, so we change the message + # to make it more suitable for this case. + unrequested_params = sorted(e.unrequested_params) + raise UnsetMetadataPassedError( + message=( + f"{unrequested_params} are passed to `cross_val_predict` but are" + " not explicitly set as requested or not requested for" + f" cross_validate's estimator: {estimator.__class__.__name__} Call" + " `.set_fit_request({{metadata}}=True)` on the estimator for" + f" each metadata in {unrequested_params} that you want to use and" + " `metadata=False` for not using it. See the Metadata Routing User" + " guide " + " for more information." + ), + unrequested_params=e.unrequested_params, + routed_params=e.routed_params, + ) + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split={"groups": groups}) + routed_params.estimator = Bunch(fit=params) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + splits = list(cv.split(X, y, **routed_params.splitter.split)) + + test_indices = np.concatenate([test for _, test in splits]) + if not _check_is_permutation(test_indices, _num_samples(X)): + raise ValueError("cross_val_predict only works for partitions") + + # If classification methods produce multiple columns of output, + # we need to manually encode classes to ensure consistent column ordering. + encode = ( + method in ["decision_function", "predict_proba", "predict_log_proba"] + and y is not None + ) + if encode: + y = np.asarray(y) + if y.ndim == 1: + le = LabelEncoder() + y = le.fit_transform(y) + elif y.ndim == 2: + y_enc = np.zeros_like(y, dtype=int) + for i_label in range(y.shape[1]): + y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label]) + y = y_enc + + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) + predictions = parallel( + delayed(_fit_and_predict)( + clone(estimator), + X, + y, + train, + test, + routed_params.estimator.fit, + method, + ) + for train, test in splits + ) + + inv_test_indices = np.empty(len(test_indices), dtype=int) + inv_test_indices[test_indices] = np.arange(len(test_indices)) + + if sp.issparse(predictions[0]): + predictions = sp.vstack(predictions, format=predictions[0].format) + elif encode and isinstance(predictions[0], list): + # `predictions` is a list of method outputs from each fold. + # If each of those is also a list, then treat this as a + # multioutput-multiclass task. We need to separately concatenate + # the method outputs for each label into an `n_labels` long list. + n_labels = y.shape[1] + concat_pred = [] + for i_label in range(n_labels): + label_preds = np.concatenate([p[i_label] for p in predictions]) + concat_pred.append(label_preds) + predictions = concat_pred + else: + predictions = np.concatenate(predictions) + + if isinstance(predictions, list): + return [p[inv_test_indices] for p in predictions] + else: + return predictions[inv_test_indices] + + +def _fit_and_predict(estimator, X, y, train, test, fit_params, method): + """Fit estimator and predict values for a given dataset split. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' and 'predict' + The object to use to fit the data. + + X : array-like of shape (n_samples, n_features) + The data to fit. + + .. versionchanged:: 0.20 + X is only required to be an object with finite length or shape now + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + The target variable to try to predict in the case of + supervised learning. + + train : array-like of shape (n_train_samples,) + Indices of training samples. + + test : array-like of shape (n_test_samples,) + Indices of test samples. + + fit_params : dict or None + Parameters that will be passed to ``estimator.fit``. + + method : str + Invokes the passed method name of the passed estimator. + + Returns + ------- + predictions : sequence + Result of calling 'estimator.method' + """ + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + fit_params = _check_method_params(X, params=fit_params, indices=train) + + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, _ = _safe_split(estimator, X, y, test, train) + + if y_train is None: + estimator.fit(X_train, **fit_params) + else: + estimator.fit(X_train, y_train, **fit_params) + func = getattr(estimator, method) + predictions = func(X_test) + + encode = ( + method in ["decision_function", "predict_proba", "predict_log_proba"] + and y is not None + ) + + if encode: + if isinstance(predictions, list): + predictions = [ + _enforce_prediction_order( + estimator.classes_[i_label], + predictions[i_label], + n_classes=len(set(y[:, i_label])), + method=method, + ) + for i_label in range(len(predictions)) + ] + else: + # A 2D y array should be a binary label indicator matrix + n_classes = len(set(y)) if y.ndim == 1 else y.shape[1] + predictions = _enforce_prediction_order( + estimator.classes_, predictions, n_classes, method + ) + return predictions + + +def _enforce_prediction_order(classes, predictions, n_classes, method): + """Ensure that prediction arrays have correct column order + + When doing cross-validation, if one or more classes are + not present in the subset of data used for training, + then the output prediction array might not have the same + columns as other folds. Use the list of class names + (assumed to be ints) to enforce the correct column order. + + Note that `classes` is the list of classes in this fold + (a subset of the classes in the full training set) + and `n_classes` is the number of classes in the full training set. + """ + if n_classes != len(classes): + recommendation = ( + "To fix this, use a cross-validation " + "technique resulting in properly " + "stratified folds" + ) + warnings.warn( + "Number of classes in training fold ({}) does " + "not match total number of classes ({}). " + "Results may not be appropriate for your use case. " + "{}".format(len(classes), n_classes, recommendation), + RuntimeWarning, + ) + if method == "decision_function": + if predictions.ndim == 2 and predictions.shape[1] != len(classes): + # This handles the case when the shape of predictions + # does not match the number of classes used to train + # it with. This case is found when sklearn.svm.SVC is + # set to `decision_function_shape='ovo'`. + raise ValueError( + "Output shape {} of {} does not match " + "number of classes ({}) in fold. " + "Irregular decision_function outputs " + "are not currently supported by " + "cross_val_predict".format(predictions.shape, method, len(classes)) + ) + if len(classes) <= 2: + # In this special case, `predictions` contains a 1D array. + raise ValueError( + "Only {} class/es in training fold, but {} " + "in overall dataset. This " + "is not supported for decision_function " + "with imbalanced folds. {}".format( + len(classes), n_classes, recommendation + ) + ) + + float_min = np.finfo(predictions.dtype).min + default_values = { + "decision_function": float_min, + "predict_log_proba": float_min, + "predict_proba": 0, + } + predictions_for_all_classes = np.full( + (_num_samples(predictions), n_classes), + default_values[method], + dtype=predictions.dtype, + ) + predictions_for_all_classes[:, classes] = predictions + predictions = predictions_for_all_classes + return predictions + + +def _check_is_permutation(indices, n_samples): + """Check whether indices is a reordering of the array np.arange(n_samples) + + Parameters + ---------- + indices : ndarray + int array to test + n_samples : int + number of expected elements + + Returns + ------- + is_partition : bool + True iff sorted(indices) is np.arange(n) + """ + if len(indices) != n_samples: + return False + hit = np.zeros(n_samples, dtype=bool) + hit[indices] = True + if not np.all(hit): + return False + return True + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "cv": ["cv_object"], + "n_permutations": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "verbose": ["verbose"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "fit_params": [dict, None], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def permutation_test_score( + estimator, + X, + y, + *, + groups=None, + cv=None, + n_permutations=100, + n_jobs=None, + random_state=0, + verbose=0, + scoring=None, + fit_params=None, +): + """Evaluate the significance of a cross-validated score with permutations. + + Permutes targets to generate 'randomized data' and compute the empirical + p-value against the null hypothesis that features and targets are + independent. + + The p-value represents the fraction of randomized data sets where the + estimator performed as well or better than in the original data. A small + p-value suggests that there is a real dependency between features and + targets which has been used by the estimator to give good predictions. + A large p-value may be due to lack of real dependency between features + and targets or the estimator was not able to use the dependency to + give good predictions. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : array-like of shape at least 2D + The data to fit. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Labels to constrain permutation within groups, i.e. ``y`` values + are permuted among samples with the same group identifier. + When not specified, ``y`` values are permuted among all samples. + + When a grouped cross-validator is used, the group labels are + also passed on to the ``split`` method of the cross-validator. The + cross-validator uses them for grouping the samples while splitting + the dataset into train/test set. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - `None`, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For `int`/`None` inputs, if the estimator is a classifier and `y` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + `cv` default value if `None` changed from 3-fold to 5-fold. + + n_permutations : int, default=100 + Number of times to permute ``y``. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the cross-validated score are parallelized over the permutations. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance or None, default=0 + Pass an int for reproducible output for permutation of + ``y`` values among samples. See :term:`Glossary `. + + verbose : int, default=0 + The verbosity level. + + scoring : str or callable, default=None + A single str (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + + If `None` the estimator's score method is used. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. versionadded:: 0.24 + + Returns + ------- + score : float + The true score without permuting targets. + + permutation_scores : array of shape (n_permutations,) + The scores obtained for each permutations. + + pvalue : float + The p-value, which approximates the probability that the score would + be obtained by chance. This is calculated as: + + `(C + 1) / (n_permutations + 1)` + + Where C is the number of permutations whose score >= the true score. + + The best possible p-value is 1/(n_permutations + 1), the worst is 1.0. + + Notes + ----- + This function implements Test 1 in: + + Ojala and Garriga. `Permutation Tests for Studying Classifier + Performance + `_. The + Journal of Machine Learning Research (2010) vol. 11 + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.model_selection import permutation_test_score + >>> X, y = make_classification(random_state=0) + >>> estimator = LogisticRegression() + >>> score, permutation_scores, pvalue = permutation_test_score( + ... estimator, X, y, random_state=0 + ... ) + >>> print(f"Original Score: {score:.3f}") + Original Score: 0.810 + >>> print( + ... f"Permutation Scores: {permutation_scores.mean():.3f} +/- " + ... f"{permutation_scores.std():.3f}" + ... ) + Permutation Scores: 0.505 +/- 0.057 + >>> print(f"P-value: {pvalue:.3f}") + P-value: 0.010 + """ + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + scorer = check_scoring(estimator, scoring=scoring) + random_state = check_random_state(random_state) + + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + score = _permutation_test_score( + clone(estimator), X, y, groups, cv, scorer, fit_params=fit_params + ) + permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(_permutation_test_score)( + clone(estimator), + X, + _shuffle(y, groups, random_state), + groups, + cv, + scorer, + fit_params=fit_params, + ) + for _ in range(n_permutations) + ) + permutation_scores = np.array(permutation_scores) + pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) + return score, permutation_scores, pvalue + + +def _permutation_test_score(estimator, X, y, groups, cv, scorer, fit_params): + """Auxiliary function for permutation_test_score""" + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + avg_score = [] + for train, test in cv.split(X, y, groups): + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + fit_params = _check_method_params(X, params=fit_params, indices=train) + estimator.fit(X_train, y_train, **fit_params) + avg_score.append(scorer(estimator, X_test, y_test)) + return np.mean(avg_score) + + +def _shuffle(y, groups, random_state): + """Return a shuffled copy of y eventually shuffle among same groups.""" + if groups is None: + indices = random_state.permutation(len(y)) + else: + indices = np.arange(len(groups)) + for group in np.unique(groups): + this_mask = groups == group + indices[this_mask] = random_state.permutation(indices[this_mask]) + return _safe_indexing(y, indices) + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "train_sizes": ["array-like"], + "cv": ["cv_object"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "exploit_incremental_learning": ["boolean"], + "n_jobs": [Integral, None], + "pre_dispatch": [Integral, str], + "verbose": ["verbose"], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "error_score": [StrOptions({"raise"}), Real], + "return_times": ["boolean"], + "fit_params": [dict, None], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def learning_curve( + estimator, + X, + y, + *, + groups=None, + train_sizes=np.linspace(0.1, 1.0, 5), + cv=None, + scoring=None, + exploit_incremental_learning=False, + n_jobs=None, + pre_dispatch="all", + verbose=0, + shuffle=False, + random_state=None, + error_score=np.nan, + return_times=False, + fit_params=None, +): + """Learning curve. + + Determines cross-validated training and test scores for different training + set sizes. + + A cross-validation generator splits the whole dataset k times in training + and test data. Subsets of the training set with varying sizes will be used + to train the estimator and a score for each training subset size and the + test set will be computed. Afterwards, the scores will be averaged over + all k runs for each training subset size. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object type that implements the "fit" method + An object of that type which is cloned for each validation. It must + also implement "predict" unless `scoring` is a callable that doesn't + rely on "predict" to compute a score. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + train_sizes : array-like of shape (n_ticks,), \ + default=np.linspace(0.1, 1.0, 5) + Relative or absolute numbers of training examples that will be used to + generate the learning curve. If the dtype is float, it is regarded as a + fraction of the maximum size of the training set (that is determined + by the selected validation method), i.e. it has to be within (0, 1]. + Otherwise it is interpreted as absolute sizes of the training sets. + Note that for classification the number of samples usually have to + be big enough to contain at least one sample from each class. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + scoring : str or callable, default=None + A str (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + exploit_incremental_learning : bool, default=False + If the estimator supports incremental learning, this will be + used to speed up fitting for different training set sizes. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the different training and test sets. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + shuffle : bool, default=False + Whether to shuffle training data before taking prefixes of it + based on``train_sizes``. + + random_state : int, RandomState instance or None, default=None + Used when ``shuffle`` is True. Pass an int for reproducible + output across multiple function calls. + See :term:`Glossary `. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + return_times : bool, default=False + Whether to return the fit and score times. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. versionadded:: 0.24 + + Returns + ------- + train_sizes_abs : array of shape (n_unique_ticks,) + Numbers of training examples that has been used to generate the + learning curve. Note that the number of ticks might be less + than n_ticks because duplicate entries will be removed. + + train_scores : array of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : array of shape (n_ticks, n_cv_folds) + Scores on test set. + + fit_times : array of shape (n_ticks, n_cv_folds) + Times spent for fitting in seconds. Only present if ``return_times`` + is True. + + score_times : array of shape (n_ticks, n_cv_folds) + Times spent for scoring in seconds. Only present if ``return_times`` + is True. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.tree import DecisionTreeClassifier + >>> from sklearn.model_selection import learning_curve + >>> X, y = make_classification(n_samples=100, n_features=10, random_state=42) + >>> tree = DecisionTreeClassifier(max_depth=4, random_state=42) + >>> train_size_abs, train_scores, test_scores = learning_curve( + ... tree, X, y, train_sizes=[0.3, 0.6, 0.9] + ... ) + >>> for train_size, cv_train_scores, cv_test_scores in zip( + ... train_size_abs, train_scores, test_scores + ... ): + ... print(f"{train_size} samples were used to train the model") + ... print(f"The average train accuracy is {cv_train_scores.mean():.2f}") + ... print(f"The average test accuracy is {cv_test_scores.mean():.2f}") + 24 samples were used to train the model + The average train accuracy is 1.00 + The average test accuracy is 0.85 + 48 samples were used to train the model + The average train accuracy is 1.00 + The average test accuracy is 0.90 + 72 samples were used to train the model + The average train accuracy is 1.00 + The average test accuracy is 0.93 + """ + if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): + raise ValueError( + "An estimator must support the partial_fit interface " + "to exploit incremental learning" + ) + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + # Store it as list as we will be iterating over the list multiple times + cv_iter = list(cv.split(X, y, groups)) + + scorer = check_scoring(estimator, scoring=scoring) + + n_max_training_samples = len(cv_iter[0][0]) + # Because the lengths of folds can be significantly different, it is + # not guaranteed that we use all of the available training data when we + # use the first 'n_max_training_samples' samples. + train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) + n_unique_ticks = train_sizes_abs.shape[0] + if verbose > 0: + print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) + + parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) + + if shuffle: + rng = check_random_state(random_state) + cv_iter = ((rng.permutation(train), test) for train, test in cv_iter) + + if exploit_incremental_learning: + classes = np.unique(y) if is_classifier(estimator) else None + out = parallel( + delayed(_incremental_fit_estimator)( + clone(estimator), + X, + y, + classes, + train, + test, + train_sizes_abs, + scorer, + return_times, + error_score=error_score, + fit_params=fit_params, + ) + for train, test in cv_iter + ) + out = np.asarray(out).transpose((2, 1, 0)) + else: + train_test_proportions = [] + for train, test in cv_iter: + for n_train_samples in train_sizes_abs: + train_test_proportions.append((train[:n_train_samples], test)) + + results = parallel( + delayed(_fit_and_score)( + clone(estimator), + X, + y, + scorer=scorer, + train=train, + test=test, + verbose=verbose, + parameters=None, + fit_params=fit_params, + # TODO(SLEP6): support score params here + score_params=None, + return_train_score=True, + error_score=error_score, + return_times=return_times, + ) + for train, test in train_test_proportions + ) + _warn_or_raise_about_fit_failures(results, error_score) + results = _aggregate_score_dicts(results) + train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T + test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T + out = [train_scores, test_scores] + + if return_times: + fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T + score_times = results["score_time"].reshape(-1, n_unique_ticks).T + out.extend([fit_times, score_times]) + + ret = train_sizes_abs, out[0], out[1] + + if return_times: + ret = ret + (out[2], out[3]) + + return ret + + +def _translate_train_sizes(train_sizes, n_max_training_samples): + """Determine absolute sizes of training subsets and validate 'train_sizes'. + + Examples: + _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] + _translate_train_sizes([5, 10], 10) -> [5, 10] + + Parameters + ---------- + train_sizes : array-like of shape (n_ticks,) + Numbers of training examples that will be used to generate the + learning curve. If the dtype is float, it is regarded as a + fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. + + n_max_training_samples : int + Maximum number of training samples (upper bound of 'train_sizes'). + + Returns + ------- + train_sizes_abs : array of shape (n_unique_ticks,) + Numbers of training examples that will be used to generate the + learning curve. Note that the number of ticks might be less + than n_ticks because duplicate entries will be removed. + """ + train_sizes_abs = np.asarray(train_sizes) + n_ticks = train_sizes_abs.shape[0] + n_min_required_samples = np.min(train_sizes_abs) + n_max_required_samples = np.max(train_sizes_abs) + if np.issubdtype(train_sizes_abs.dtype, np.floating): + if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: + raise ValueError( + "train_sizes has been interpreted as fractions " + "of the maximum number of training samples and " + "must be within (0, 1], but is within [%f, %f]." + % (n_min_required_samples, n_max_required_samples) + ) + train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype( + dtype=int, copy=False + ) + train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) + else: + if ( + n_min_required_samples <= 0 + or n_max_required_samples > n_max_training_samples + ): + raise ValueError( + "train_sizes has been interpreted as absolute " + "numbers of training samples and must be within " + "(0, %d], but is within [%d, %d]." + % ( + n_max_training_samples, + n_min_required_samples, + n_max_required_samples, + ) + ) + + train_sizes_abs = np.unique(train_sizes_abs) + if n_ticks > train_sizes_abs.shape[0]: + warnings.warn( + "Removed duplicate entries from 'train_sizes'. Number " + "of ticks will be less than the size of " + "'train_sizes': %d instead of %d." % (train_sizes_abs.shape[0], n_ticks), + RuntimeWarning, + ) + + return train_sizes_abs + + +def _incremental_fit_estimator( + estimator, + X, + y, + classes, + train, + test, + train_sizes, + scorer, + return_times, + error_score, + fit_params, +): + """Train estimator on training subsets incrementally and compute scores.""" + train_scores, test_scores, fit_times, score_times = [], [], [], [] + partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) + if fit_params is None: + fit_params = {} + if classes is None: + partial_fit_func = partial(estimator.partial_fit, **fit_params) + else: + partial_fit_func = partial(estimator.partial_fit, classes=classes, **fit_params) + + for n_train_samples, partial_train in partitions: + train_subset = train[:n_train_samples] + X_train, y_train = _safe_split(estimator, X, y, train_subset) + X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) + X_test, y_test = _safe_split(estimator, X, y, test, train_subset) + start_fit = time.time() + if y_partial_train is None: + partial_fit_func(X_partial_train) + else: + partial_fit_func(X_partial_train, y_partial_train) + fit_time = time.time() - start_fit + fit_times.append(fit_time) + + start_score = time.time() + + # TODO(SLEP6): support score params in the following two calls + test_scores.append( + _score( + estimator, + X_test, + y_test, + scorer, + score_params=None, + error_score=error_score, + ) + ) + train_scores.append( + _score( + estimator, + X_train, + y_train, + scorer, + score_params=None, + error_score=error_score, + ) + ) + score_time = time.time() - start_score + score_times.append(score_time) + + ret = ( + (train_scores, test_scores, fit_times, score_times) + if return_times + else (train_scores, test_scores) + ) + + return np.array(ret).T + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "param_name": [str], + "param_range": ["array-like"], + "groups": ["array-like", None], + "cv": ["cv_object"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "n_jobs": [Integral, None], + "pre_dispatch": [Integral, str], + "verbose": ["verbose"], + "error_score": [StrOptions({"raise"}), Real], + "fit_params": [dict, None], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def validation_curve( + estimator, + X, + y, + *, + param_name, + param_range, + groups=None, + cv=None, + scoring=None, + n_jobs=None, + pre_dispatch="all", + verbose=0, + error_score=np.nan, + fit_params=None, +): + """Validation curve. + + Determine training and test scores for varying parameter values. + + Compute scores for an estimator with different values of a specified + parameter. This is similar to grid search with one parameter. However, this + will also compute training scores and is merely a utility for plotting the + results. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object type that implements the "fit" method + An object of that type which is cloned for each validation. It must + also implement "predict" unless `scoring` is a callable that doesn't + rely on "predict" to compute a score. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + param_name : str + Name of the parameter that will be varied. + + param_range : array-like of shape (n_values,) + The values of the parameter that will be evaluated. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + scoring : str or callable, default=None + A str (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the combinations of each parameter + value and each cross-validation split. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. versionadded:: 0.24 + + Returns + ------- + train_scores : array of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : array of shape (n_ticks, n_cv_folds) + Scores on test set. + + Notes + ----- + See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import validation_curve + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(n_samples=1_000, random_state=0) + >>> logistic_regression = LogisticRegression() + >>> param_name, param_range = "C", np.logspace(-8, 3, 10) + >>> train_scores, test_scores = validation_curve( + ... logistic_regression, X, y, param_name=param_name, param_range=param_range + ... ) + >>> print(f"The average train accuracy is {train_scores.mean():.2f}") + The average train accuracy is 0.81 + >>> print(f"The average test accuracy is {test_scores.mean():.2f}") + The average test accuracy is 0.81 + """ + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + scorer = check_scoring(estimator, scoring=scoring) + + parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) + results = parallel( + delayed(_fit_and_score)( + clone(estimator), + X, + y, + scorer=scorer, + train=train, + test=test, + verbose=verbose, + parameters={param_name: v}, + fit_params=fit_params, + # TODO(SLEP6): support score params here + score_params=None, + return_train_score=True, + error_score=error_score, + ) + # NOTE do not change order of iteration to allow one time cv splitters + for train, test in cv.split(X, y, groups) + for v in param_range + ) + n_params = len(param_range) + + results = _aggregate_score_dicts(results) + train_scores = results["train_scores"].reshape(-1, n_params).T + test_scores = results["test_scores"].reshape(-1, n_params).T + + return train_scores, test_scores + + +def _aggregate_score_dicts(scores): + """Aggregate the list of dict to dict of np ndarray + + The aggregated output of _aggregate_score_dicts will be a list of dict + of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...] + Convert it to a dict of array {'prec': np.array([0.1 ...]), ...} + + Parameters + ---------- + + scores : list of dict + List of dicts of the scores for all scorers. This is a flat list, + assumed originally to be of row major order. + + Example + ------- + + >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, + ... {'a': 10, 'b': 10}] # doctest: +SKIP + >>> _aggregate_score_dicts(scores) # doctest: +SKIP + {'a': array([1, 2, 3, 10]), + 'b': array([10, 2, 3, 10])} + """ + return { + key: ( + np.asarray([score[key] for score in scores]) + if isinstance(scores[0][key], numbers.Number) + else [score[key] for score in scores] + ) + for key in scores[0] + } diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e94476addb3e8305952e0d9d1b9270899880355 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7aab02888596826c97afb1d75b0412dfa6be0500 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71a6805f85032f015217412b43564adf49d118eb Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4260537b77aa22857f2c36565cb6db66c4388fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50436e0436972c003a296747e49a26cf780f8044 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7aa027f563208a3d4156f38f467c7caa1d73e579 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90948d56545e725d4a44ab75ec58d99e5a5a6a89 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py new file mode 100644 index 0000000000000000000000000000000000000000..54a993db76933a5e710f0ddd20a4efd0118ecf95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py @@ -0,0 +1,24 @@ +""" +Common utilities for testing model selection. +""" + +import numpy as np + +from sklearn.model_selection import KFold + + +class OneTimeSplitter: + """A wrapper to make KFold single entry cv iterator""" + + def __init__(self, n_splits=4, n_samples=99): + self.n_splits = n_splits + self.n_samples = n_samples + self.indices = iter(KFold(n_splits=n_splits).split(np.ones(n_samples))) + + def split(self, X=None, y=None, groups=None): + """Split can be called only once""" + for index in self.indices: + yield index + + def get_n_splits(self, X=None, y=None, groups=None): + return self.n_splits diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..1a7268150fd90ceaa67d18a8455e85941e0c016e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py @@ -0,0 +1,595 @@ +import numpy as np +import pytest + +from sklearn.datasets import load_iris +from sklearn.model_selection import ( + LearningCurveDisplay, + ValidationCurveDisplay, + learning_curve, + validation_curve, +) +from sklearn.tree import DecisionTreeClassifier +from sklearn.utils import shuffle +from sklearn.utils._testing import assert_allclose, assert_array_equal + + +@pytest.fixture +def data(): + return shuffle(*load_iris(return_X_y=True), random_state=0) + + +@pytest.mark.parametrize( + "params, err_type, err_msg", + [ + ({"std_display_style": "invalid"}, ValueError, "Unknown std_display_style:"), + ({"score_type": "invalid"}, ValueError, "Unknown score_type:"), + ], +) +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_parameters_validation( + pyplot, data, params, err_type, err_msg, CurveDisplay, specific_params +): + """Check that we raise a proper error when passing invalid parameters.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + with pytest.raises(err_type, match=err_msg): + CurveDisplay.from_estimator(estimator, X, y, **specific_params, **params) + + +def test_learning_curve_display_default_usage(pyplot, data): + """Check the default usage of the LearningCurveDisplay class.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + train_sizes = [0.3, 0.6, 0.9] + display = LearningCurveDisplay.from_estimator( + estimator, X, y, train_sizes=train_sizes + ) + + import matplotlib as mpl + + assert display.errorbar_ is None + + assert isinstance(display.lines_, list) + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + + assert isinstance(display.fill_between_, list) + for fill in display.fill_between_: + assert isinstance(fill, mpl.collections.PolyCollection) + assert fill.get_alpha() == 0.5 + + assert display.score_name == "Score" + assert display.ax_.get_xlabel() == "Number of samples in the training set" + assert display.ax_.get_ylabel() == "Score" + + _, legend_labels = display.ax_.get_legend_handles_labels() + assert legend_labels == ["Train", "Test"] + + train_sizes_abs, train_scores, test_scores = learning_curve( + estimator, X, y, train_sizes=train_sizes + ) + + assert_array_equal(display.train_sizes, train_sizes_abs) + assert_allclose(display.train_scores, train_scores) + assert_allclose(display.test_scores, test_scores) + + +def test_validation_curve_display_default_usage(pyplot, data): + """Check the default usage of the ValidationCurveDisplay class.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + param_name, param_range = "max_depth", [1, 3, 5] + display = ValidationCurveDisplay.from_estimator( + estimator, X, y, param_name=param_name, param_range=param_range + ) + + import matplotlib as mpl + + assert display.errorbar_ is None + + assert isinstance(display.lines_, list) + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + + assert isinstance(display.fill_between_, list) + for fill in display.fill_between_: + assert isinstance(fill, mpl.collections.PolyCollection) + assert fill.get_alpha() == 0.5 + + assert display.score_name == "Score" + assert display.ax_.get_xlabel() == f"{param_name}" + assert display.ax_.get_ylabel() == "Score" + + _, legend_labels = display.ax_.get_legend_handles_labels() + assert legend_labels == ["Train", "Test"] + + train_scores, test_scores = validation_curve( + estimator, X, y, param_name=param_name, param_range=param_range + ) + + assert_array_equal(display.param_range, param_range) + assert_allclose(display.train_scores, train_scores) + assert_allclose(display.test_scores, test_scores) + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_negate_score(pyplot, data, CurveDisplay, specific_params): + """Check the behaviour of the `negate_score` parameter calling `from_estimator` and + `plot`. + """ + X, y = data + estimator = DecisionTreeClassifier(max_depth=1, random_state=0) + + negate_score = False + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, negate_score=negate_score + ) + + positive_scores = display.lines_[0].get_data()[1] + assert (positive_scores >= 0).all() + assert display.ax_.get_ylabel() == "Score" + + negate_score = True + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, negate_score=negate_score + ) + + negative_scores = display.lines_[0].get_data()[1] + assert (negative_scores <= 0).all() + assert_allclose(negative_scores, -positive_scores) + assert display.ax_.get_ylabel() == "Negative score" + + negate_score = False + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, negate_score=negate_score + ) + assert display.ax_.get_ylabel() == "Score" + display.plot(negate_score=not negate_score) + assert display.ax_.get_ylabel() == "Score" + assert (display.lines_[0].get_data()[1] < 0).all() + + +@pytest.mark.parametrize( + "score_name, ylabel", [(None, "Score"), ("Accuracy", "Accuracy")] +) +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_score_name( + pyplot, data, score_name, ylabel, CurveDisplay, specific_params +): + """Check that we can overwrite the default score name shown on the y-axis.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, score_name=score_name + ) + + assert display.ax_.get_ylabel() == ylabel + X, y = data + estimator = DecisionTreeClassifier(max_depth=1, random_state=0) + + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, score_name=score_name + ) + + assert display.score_name == ylabel + + +@pytest.mark.parametrize("std_display_style", (None, "errorbar")) +def test_learning_curve_display_score_type(pyplot, data, std_display_style): + """Check the behaviour of setting the `score_type` parameter.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + train_sizes = [0.3, 0.6, 0.9] + train_sizes_abs, train_scores, test_scores = learning_curve( + estimator, X, y, train_sizes=train_sizes + ) + + score_type = "train" + display = LearningCurveDisplay.from_estimator( + estimator, + X, + y, + train_sizes=train_sizes, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, train_sizes_abs) + assert_allclose(y_data, train_scores.mean(axis=1)) + + score_type = "test" + display = LearningCurveDisplay.from_estimator( + estimator, + X, + y, + train_sizes=train_sizes, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Test"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, train_sizes_abs) + assert_allclose(y_data, test_scores.mean(axis=1)) + + score_type = "both" + display = LearningCurveDisplay.from_estimator( + estimator, + X, + y, + train_sizes=train_sizes, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train", "Test"] + + if std_display_style is None: + assert len(display.lines_) == 2 + assert display.errorbar_ is None + x_data_train, y_data_train = display.lines_[0].get_data() + x_data_test, y_data_test = display.lines_[1].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 2 + x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data() + x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data() + + assert_array_equal(x_data_train, train_sizes_abs) + assert_allclose(y_data_train, train_scores.mean(axis=1)) + assert_array_equal(x_data_test, train_sizes_abs) + assert_allclose(y_data_test, test_scores.mean(axis=1)) + + +@pytest.mark.parametrize("std_display_style", (None, "errorbar")) +def test_validation_curve_display_score_type(pyplot, data, std_display_style): + """Check the behaviour of setting the `score_type` parameter.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + param_name, param_range = "max_depth", [1, 3, 5] + train_scores, test_scores = validation_curve( + estimator, X, y, param_name=param_name, param_range=param_range + ) + + score_type = "train" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, param_range) + assert_allclose(y_data, train_scores.mean(axis=1)) + + score_type = "test" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Test"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, param_range) + assert_allclose(y_data, test_scores.mean(axis=1)) + + score_type = "both" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train", "Test"] + + if std_display_style is None: + assert len(display.lines_) == 2 + assert display.errorbar_ is None + x_data_train, y_data_train = display.lines_[0].get_data() + x_data_test, y_data_test = display.lines_[1].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 2 + x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data() + x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data() + + assert_array_equal(x_data_train, param_range) + assert_allclose(y_data_train, train_scores.mean(axis=1)) + assert_array_equal(x_data_test, param_range) + assert_allclose(y_data_test, test_scores.mean(axis=1)) + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params, expected_xscale", + [ + ( + ValidationCurveDisplay, + {"param_name": "max_depth", "param_range": np.arange(1, 5)}, + "linear", + ), + (LearningCurveDisplay, {"train_sizes": np.linspace(0.1, 0.9, num=5)}, "linear"), + ( + ValidationCurveDisplay, + { + "param_name": "max_depth", + "param_range": np.round(np.logspace(0, 2, num=5)).astype(np.int64), + }, + "log", + ), + (LearningCurveDisplay, {"train_sizes": np.logspace(-1, 0, num=5)}, "log"), + ], +) +def test_curve_display_xscale_auto( + pyplot, data, CurveDisplay, specific_params, expected_xscale +): + """Check the behaviour of the x-axis scaling depending on the data provided.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + display = CurveDisplay.from_estimator(estimator, X, y, **specific_params) + assert display.ax_.get_xscale() == expected_xscale + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_std_display_style(pyplot, data, CurveDisplay, specific_params): + """Check the behaviour of the parameter `std_display_style`.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + import matplotlib as mpl + + std_display_style = None + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + ) + + assert len(display.lines_) == 2 + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + assert display.errorbar_ is None + assert display.fill_between_ is None + _, legend_label = display.ax_.get_legend_handles_labels() + assert len(legend_label) == 2 + + std_display_style = "fill_between" + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + ) + + assert len(display.lines_) == 2 + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + assert display.errorbar_ is None + assert len(display.fill_between_) == 2 + for fill_between in display.fill_between_: + assert isinstance(fill_between, mpl.collections.PolyCollection) + _, legend_label = display.ax_.get_legend_handles_labels() + assert len(legend_label) == 2 + + std_display_style = "errorbar" + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + ) + + assert display.lines_ is None + assert len(display.errorbar_) == 2 + for errorbar in display.errorbar_: + assert isinstance(errorbar, mpl.container.ErrorbarContainer) + assert display.fill_between_ is None + _, legend_label = display.ax_.get_legend_handles_labels() + assert len(legend_label) == 2 + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_plot_kwargs(pyplot, data, CurveDisplay, specific_params): + """Check the behaviour of the different plotting keyword arguments: `line_kw`, + `fill_between_kw`, and `errorbar_kw`.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + std_display_style = "fill_between" + line_kw = {"color": "red"} + fill_between_kw = {"color": "red", "alpha": 1.0} + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + ) + + assert display.lines_[0].get_color() == "red" + assert_allclose( + display.fill_between_[0].get_facecolor(), + [[1.0, 0.0, 0.0, 1.0]], # trust me, it's red + ) + + std_display_style = "errorbar" + errorbar_kw = {"color": "red"} + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + errorbar_kw=errorbar_kw, + ) + + assert display.errorbar_[0].lines[0].get_color() == "red" + + +# TODO(1.5): to be removed +def test_learning_curve_display_deprecate_log_scale(data, pyplot): + """Check that we warn for the deprecated parameter `log_scale`.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + with pytest.warns(FutureWarning, match="`log_scale` parameter is deprecated"): + display = LearningCurveDisplay.from_estimator( + estimator, X, y, train_sizes=[0.3, 0.6, 0.9], log_scale=True + ) + + assert display.ax_.get_xscale() == "log" + assert display.ax_.get_yscale() == "linear" + + with pytest.warns(FutureWarning, match="`log_scale` parameter is deprecated"): + display = LearningCurveDisplay.from_estimator( + estimator, X, y, train_sizes=[0.3, 0.6, 0.9], log_scale=False + ) + + assert display.ax_.get_xscale() == "linear" + assert display.ax_.get_yscale() == "linear" + + +@pytest.mark.parametrize( + "param_range, xscale", + [([5, 10, 15], "linear"), ([-50, 5, 50, 500], "symlog"), ([5, 50, 500], "log")], +) +def test_validation_curve_xscale_from_param_range_provided_as_a_list( + pyplot, data, param_range, xscale +): + """Check the induced xscale from the provided param_range values.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + param_name = "max_depth" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + ) + + assert display.ax_.get_xscale() == xscale + + +@pytest.mark.parametrize( + "Display, params", + [ + (LearningCurveDisplay, {}), + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + ], +) +def test_subclassing_displays(pyplot, data, Display, params): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + class SubclassOfDisplay(Display): + pass + + display = SubclassOfDisplay.from_estimator(estimator, X, y, **params) + assert isinstance(display, SubclassOfDisplay) diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py new file mode 100644 index 0000000000000000000000000000000000000000..c0db76c5c6ef654340742eeaf3f744637a06fd8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py @@ -0,0 +1,2537 @@ +"""Test the search module""" + +import pickle +import re +import sys +from collections.abc import Iterable, Sized +from functools import partial +from io import StringIO +from itertools import chain, product +from types import GeneratorType + +import numpy as np +import pytest +from scipy.stats import bernoulli, expon, uniform + +from sklearn.base import BaseEstimator, ClassifierMixin, is_classifier +from sklearn.cluster import KMeans +from sklearn.datasets import ( + make_blobs, + make_classification, + make_multilabel_classification, +) +from sklearn.ensemble import HistGradientBoostingClassifier +from sklearn.exceptions import FitFailedWarning +from sklearn.experimental import enable_halving_search_cv # noqa +from sklearn.impute import SimpleImputer +from sklearn.linear_model import ( + LinearRegression, + Ridge, + SGDClassifier, +) +from sklearn.metrics import ( + accuracy_score, + confusion_matrix, + f1_score, + make_scorer, + r2_score, + recall_score, + roc_auc_score, +) +from sklearn.metrics.pairwise import euclidean_distances +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + GroupShuffleSplit, + HalvingGridSearchCV, + KFold, + LeaveOneGroupOut, + LeavePGroupsOut, + ParameterGrid, + ParameterSampler, + RandomizedSearchCV, + StratifiedKFold, + StratifiedShuffleSplit, + train_test_split, +) +from sklearn.model_selection._search import BaseSearchCV +from sklearn.model_selection.tests.common import OneTimeSplitter +from sklearn.neighbors import KernelDensity, KNeighborsClassifier, LocalOutlierFactor +from sklearn.pipeline import Pipeline +from sklearn.svm import SVC, LinearSVC +from sklearn.tests.metadata_routing_common import ( + ConsumingScorer, + _Registry, + check_recorded_metadata, +) +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._mocking import CheckingClassifier, MockDataFrame +from sklearn.utils._testing import ( + MinimalClassifier, + MinimalRegressor, + MinimalTransformer, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import CSR_CONTAINERS +from sklearn.utils.validation import _num_samples + + +# Neither of the following two estimators inherit from BaseEstimator, +# to test hyperparameter search on user-defined classifiers. +class MockClassifier: + """Dummy classifier to test the parameter search algorithms""" + + def __init__(self, foo_param=0): + self.foo_param = foo_param + + def fit(self, X, Y): + assert len(X) == len(Y) + self.classes_ = np.unique(Y) + return self + + def predict(self, T): + return T.shape[0] + + def transform(self, X): + return X + self.foo_param + + def inverse_transform(self, X): + return X - self.foo_param + + predict_proba = predict + predict_log_proba = predict + decision_function = predict + + def score(self, X=None, Y=None): + if self.foo_param > 1: + score = 1.0 + else: + score = 0.0 + return score + + def get_params(self, deep=False): + return {"foo_param": self.foo_param} + + def set_params(self, **params): + self.foo_param = params["foo_param"] + return self + + +class LinearSVCNoScore(LinearSVC): + """A LinearSVC classifier that has no score method.""" + + @property + def score(self): + raise AttributeError + + +X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) +y = np.array([1, 1, 2, 2]) + + +def assert_grid_iter_equals_getitem(grid): + assert list(grid) == [grid[i] for i in range(len(grid))] + + +@pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) +@pytest.mark.parametrize( + "input, error_type, error_message", + [ + (0, TypeError, r"Parameter .* a dict or a list, got: 0 of type int"), + ([{"foo": [0]}, 0], TypeError, r"Parameter .* is not a dict \(0\)"), + ( + {"foo": 0}, + TypeError, + r"Parameter (grid|distribution) for parameter 'foo' (is not|needs to be) " + r"(a list or a numpy array|iterable or a distribution).*", + ), + ], +) +def test_validate_parameter_input(klass, input, error_type, error_message): + with pytest.raises(error_type, match=error_message): + klass(input) + + +def test_parameter_grid(): + # Test basic properties of ParameterGrid. + params1 = {"foo": [1, 2, 3]} + grid1 = ParameterGrid(params1) + assert isinstance(grid1, Iterable) + assert isinstance(grid1, Sized) + assert len(grid1) == 3 + assert_grid_iter_equals_getitem(grid1) + + params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} + grid2 = ParameterGrid(params2) + assert len(grid2) == 6 + + # loop to assert we can iterate over the grid multiple times + for i in range(2): + # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) + points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) + assert points == set( + ("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]) + ) + assert_grid_iter_equals_getitem(grid2) + + # Special case: empty grid (useful to get default estimator settings) + empty = ParameterGrid({}) + assert len(empty) == 1 + assert list(empty) == [{}] + assert_grid_iter_equals_getitem(empty) + with pytest.raises(IndexError): + empty[1] + + has_empty = ParameterGrid([{"C": [1, 10]}, {}, {"C": [0.5]}]) + assert len(has_empty) == 4 + assert list(has_empty) == [{"C": 1}, {"C": 10}, {}, {"C": 0.5}] + assert_grid_iter_equals_getitem(has_empty) + + +def test_grid_search(): + # Test that the best estimator contains the right value for foo_param + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=3, verbose=3) + # make sure it selects the smallest parameter in case of ties + old_stdout = sys.stdout + sys.stdout = StringIO() + grid_search.fit(X, y) + sys.stdout = old_stdout + assert grid_search.best_estimator_.foo_param == 2 + + assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) + + # Smoke test the score etc: + grid_search.score(X, y) + grid_search.predict_proba(X) + grid_search.decision_function(X) + grid_search.transform(X) + + # Test exception handling on scoring + grid_search.scoring = "sklearn" + with pytest.raises(ValueError): + grid_search.fit(X, y) + + +def test_grid_search_pipeline_steps(): + # check that parameters that are estimators are cloned before fitting + pipe = Pipeline([("regressor", LinearRegression())]) + param_grid = {"regressor": [LinearRegression(), Ridge()]} + grid_search = GridSearchCV(pipe, param_grid, cv=2) + grid_search.fit(X, y) + regressor_results = grid_search.cv_results_["param_regressor"] + assert isinstance(regressor_results[0], LinearRegression) + assert isinstance(regressor_results[1], Ridge) + assert not hasattr(regressor_results[0], "coef_") + assert not hasattr(regressor_results[1], "coef_") + assert regressor_results[0] is not grid_search.best_estimator_ + assert regressor_results[1] is not grid_search.best_estimator_ + # check that we didn't modify the parameter grid that was passed + assert not hasattr(param_grid["regressor"][0], "coef_") + assert not hasattr(param_grid["regressor"][1], "coef_") + + +@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) +def test_SearchCV_with_fit_params(SearchCV): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_fit_params=["spam", "eggs"]) + searcher = SearchCV(clf, {"foo_param": [1, 2, 3]}, cv=2, error_score="raise") + + # The CheckingClassifier generates an assertion error if + # a parameter is missing or has length != len(X). + err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." + with pytest.raises(AssertionError, match=err_msg): + searcher.fit(X, y, spam=np.ones(10)) + + err_msg = "Fit parameter spam has length 1; expected" + with pytest.raises(AssertionError, match=err_msg): + searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) + searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) + + +@ignore_warnings +def test_grid_search_no_score(): + # Test grid-search on classifier that has no score function. + clf = LinearSVC(dual="auto", random_state=0) + X, y = make_blobs(random_state=0, centers=2) + Cs = [0.1, 1, 10] + clf_no_score = LinearSVCNoScore(dual="auto", random_state=0) + grid_search = GridSearchCV(clf, {"C": Cs}, scoring="accuracy") + grid_search.fit(X, y) + + grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs}, scoring="accuracy") + # smoketest grid search + grid_search_no_score.fit(X, y) + + # check that best params are equal + assert grid_search_no_score.best_params_ == grid_search.best_params_ + # check that we can call score and that it gives the correct result + assert grid_search.score(X, y) == grid_search_no_score.score(X, y) + + # giving no scoring function raises an error + grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs}) + with pytest.raises(TypeError, match="no scoring"): + grid_search_no_score.fit([[1]]) + + +def test_grid_search_score_method(): + X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0) + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [0.1]} + + search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) + search_accuracy = GridSearchCV(clf, grid, scoring="accuracy").fit(X, y) + search_no_score_method_auc = GridSearchCV( + LinearSVCNoScore(dual="auto"), grid, scoring="roc_auc" + ).fit(X, y) + search_auc = GridSearchCV(clf, grid, scoring="roc_auc").fit(X, y) + + # Check warning only occurs in situation where behavior changed: + # estimator requires score method to compete with scoring parameter + score_no_scoring = search_no_scoring.score(X, y) + score_accuracy = search_accuracy.score(X, y) + score_no_score_auc = search_no_score_method_auc.score(X, y) + score_auc = search_auc.score(X, y) + + # ensure the test is sane + assert score_auc < 1.0 + assert score_accuracy < 1.0 + assert score_auc != score_accuracy + + assert_almost_equal(score_accuracy, score_no_scoring) + assert_almost_equal(score_auc, score_no_score_auc) + + +def test_grid_search_groups(): + # Check if ValueError (when groups is None) propagates to GridSearchCV + # And also check if groups is correctly passed to the cv object + rng = np.random.RandomState(0) + + X, y = make_classification(n_samples=15, n_classes=2, random_state=0) + groups = rng.randint(0, 3, 15) + + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [1]} + + group_cvs = [ + LeaveOneGroupOut(), + LeavePGroupsOut(2), + GroupKFold(n_splits=3), + GroupShuffleSplit(), + ] + error_msg = "The 'groups' parameter should not be None." + for cv in group_cvs: + gs = GridSearchCV(clf, grid, cv=cv) + with pytest.raises(ValueError, match=error_msg): + gs.fit(X, y) + gs.fit(X, y, groups=groups) + + non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] + for cv in non_group_cvs: + gs = GridSearchCV(clf, grid, cv=cv) + # Should not raise an error + gs.fit(X, y) + + +def test_classes__property(): + # Test that classes_ property matches best_estimator_.classes_ + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + Cs = [0.1, 1, 10] + + grid_search = GridSearchCV(LinearSVC(dual="auto", random_state=0), {"C": Cs}) + grid_search.fit(X, y) + assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) + + # Test that regressors do not have a classes_ attribute + grid_search = GridSearchCV(Ridge(), {"alpha": [1.0, 2.0]}) + grid_search.fit(X, y) + assert not hasattr(grid_search, "classes_") + + # Test that the grid searcher has no classes_ attribute before it's fit + grid_search = GridSearchCV(LinearSVC(dual="auto", random_state=0), {"C": Cs}) + assert not hasattr(grid_search, "classes_") + + # Test that the grid searcher has no classes_ attribute without a refit + grid_search = GridSearchCV( + LinearSVC(dual="auto", random_state=0), {"C": Cs}, refit=False + ) + grid_search.fit(X, y) + assert not hasattr(grid_search, "classes_") + + +def test_trivial_cv_results_attr(): + # Test search over a "grid" with only one point. + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1]}, cv=3) + grid_search.fit(X, y) + assert hasattr(grid_search, "cv_results_") + + random_search = RandomizedSearchCV(clf, {"foo_param": [0]}, n_iter=1, cv=3) + random_search.fit(X, y) + assert hasattr(grid_search, "cv_results_") + + +def test_no_refit(): + # Test that GSCV can be used for model selection alone without refitting + clf = MockClassifier() + for scoring in [None, ["accuracy", "precision"]]: + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, refit=False, cv=3) + grid_search.fit(X, y) + assert ( + not hasattr(grid_search, "best_estimator_") + and hasattr(grid_search, "best_index_") + and hasattr(grid_search, "best_params_") + ) + + # Make sure the functions predict/transform etc. raise meaningful + # error messages + for fn_name in ( + "predict", + "predict_proba", + "predict_log_proba", + "transform", + "inverse_transform", + ): + outer_msg = f"has no attribute '{fn_name}'" + inner_msg = ( + f"`refit=False`. {fn_name} is available only after " + "refitting on the best parameters" + ) + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + getattr(grid_search, fn_name)(X) + + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + # Test that an invalid refit param raises appropriate error messages + error_msg = ( + "For multi-metric scoring, the parameter refit must be set to a scorer key" + ) + for refit in [True, "recall", "accuracy"]: + with pytest.raises(ValueError, match=error_msg): + GridSearchCV( + clf, {}, refit=refit, scoring={"acc": "accuracy", "prec": "precision"} + ).fit(X, y) + + +def test_grid_search_error(): + # Test that grid search will capture errors on data with different length + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + with pytest.raises(ValueError): + cv.fit(X_[:180], y_) + + +def test_grid_search_one_grid_point(): + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} + + clf = SVC(gamma="auto") + cv = GridSearchCV(clf, param_dict) + cv.fit(X_, y_) + + clf = SVC(C=1.0, kernel="rbf", gamma=0.1) + clf.fit(X_, y_) + + assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) + + +def test_grid_search_when_param_grid_includes_range(): + # Test that the best estimator contains the right value for foo_param + clf = MockClassifier() + grid_search = None + grid_search = GridSearchCV(clf, {"foo_param": range(1, 4)}, cv=3) + grid_search.fit(X, y) + assert grid_search.best_estimator_.foo_param == 2 + + +def test_grid_search_bad_param_grid(): + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + param_dict = {"C": 1} + clf = SVC(gamma="auto") + error_msg = re.escape( + "Parameter grid for parameter 'C' needs to be a list or " + "a numpy array, but got 1 (of type int) instead. Single " + "values need to be wrapped in a list with one element." + ) + search = GridSearchCV(clf, param_dict) + with pytest.raises(TypeError, match=error_msg): + search.fit(X, y) + + param_dict = {"C": []} + clf = SVC() + error_msg = re.escape( + "Parameter grid for parameter 'C' need to be a non-empty sequence, got: []" + ) + search = GridSearchCV(clf, param_dict) + with pytest.raises(ValueError, match=error_msg): + search.fit(X, y) + + param_dict = {"C": "1,2,3"} + clf = SVC(gamma="auto") + error_msg = re.escape( + "Parameter grid for parameter 'C' needs to be a list or a numpy array, " + "but got '1,2,3' (of type str) instead. Single values need to be " + "wrapped in a list with one element." + ) + search = GridSearchCV(clf, param_dict) + with pytest.raises(TypeError, match=error_msg): + search.fit(X, y) + + param_dict = {"C": np.ones((3, 2))} + clf = SVC() + search = GridSearchCV(clf, param_dict) + with pytest.raises(ValueError): + search.fit(X, y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_grid_search_sparse(csr_container): + # Test that grid search works with both dense and sparse matrices + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + cv.fit(X_[:180], y_[:180]) + y_pred = cv.predict(X_[180:]) + C = cv.best_estimator_.C + + X_ = csr_container(X_) + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + cv.fit(X_[:180].tocoo(), y_[:180]) + y_pred2 = cv.predict(X_[180:]) + C2 = cv.best_estimator_.C + + assert np.mean(y_pred == y_pred2) >= 0.9 + assert C == C2 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_grid_search_sparse_scoring(csr_container): + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1") + cv.fit(X_[:180], y_[:180]) + y_pred = cv.predict(X_[180:]) + C = cv.best_estimator_.C + + X_ = csr_container(X_) + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1") + cv.fit(X_[:180], y_[:180]) + y_pred2 = cv.predict(X_[180:]) + C2 = cv.best_estimator_.C + + assert_array_equal(y_pred, y_pred2) + assert C == C2 + # Smoke test the score + # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), + # cv.score(X_[:180], y[:180])) + + # test loss where greater is worse + def f1_loss(y_true_, y_pred_): + return -f1_score(y_true_, y_pred_) + + F1Loss = make_scorer(f1_loss, greater_is_better=False) + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring=F1Loss) + cv.fit(X_[:180], y_[:180]) + y_pred3 = cv.predict(X_[180:]) + C3 = cv.best_estimator_.C + + assert C == C3 + assert_array_equal(y_pred, y_pred3) + + +def test_grid_search_precomputed_kernel(): + # Test that grid search works when the input features are given in the + # form of a precomputed kernel matrix + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + # compute the training kernel matrix corresponding to the linear kernel + K_train = np.dot(X_[:180], X_[:180].T) + y_train = y_[:180] + + clf = SVC(kernel="precomputed") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + cv.fit(K_train, y_train) + + assert cv.best_score_ >= 0 + + # compute the test kernel matrix + K_test = np.dot(X_[180:], X_[:180].T) + y_test = y_[180:] + + y_pred = cv.predict(K_test) + + assert np.mean(y_pred == y_test) >= 0 + + # test error is raised when the precomputed kernel is not array-like + # or sparse + with pytest.raises(ValueError): + cv.fit(K_train.tolist(), y_train) + + +def test_grid_search_precomputed_kernel_error_nonsquare(): + # Test that grid search returns an error with a non-square precomputed + # training kernel matrix + K_train = np.zeros((10, 20)) + y_train = np.ones((10,)) + clf = SVC(kernel="precomputed") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + with pytest.raises(ValueError): + cv.fit(K_train, y_train) + + +class BrokenClassifier(BaseEstimator): + """Broken classifier that cannot be fit twice""" + + def __init__(self, parameter=None): + self.parameter = parameter + + def fit(self, X, y): + assert not hasattr(self, "has_been_fit_") + self.has_been_fit_ = True + + def predict(self, X): + return np.zeros(X.shape[0]) + + +@ignore_warnings +def test_refit(): + # Regression test for bug in refitting + # Simulates re-fitting a broken estimator; this used to break with + # sparse SVMs. + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + clf = GridSearchCV( + BrokenClassifier(), [{"parameter": [0, 1]}], scoring="precision", refit=True + ) + clf.fit(X, y) + + +def test_refit_callable(): + """ + Test refit=callable, which adds flexibility in identifying the + "best" estimator. + """ + + def refit_callable(cv_results): + """ + A dummy function tests `refit=callable` interface. + Return the index of a model that has the least + `mean_test_score`. + """ + # Fit a dummy clf with `refit=True` to get a list of keys in + # clf.cv_results_. + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.01, 0.1, 1]}, + scoring="precision", + refit=True, + ) + clf.fit(X, y) + # Ensure that `best_index_ != 0` for this dummy clf + assert clf.best_index_ != 0 + + # Assert every key matches those in `cv_results` + for key in clf.cv_results_.keys(): + assert key in cv_results + + return cv_results["mean_test_score"].argmin() + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.01, 0.1, 1]}, + scoring="precision", + refit=refit_callable, + ) + clf.fit(X, y) + + assert clf.best_index_ == 0 + # Ensure `best_score_` is disabled when using `refit=callable` + assert not hasattr(clf, "best_score_") + + +def test_refit_callable_invalid_type(): + """ + Test implementation catches the errors when 'best_index_' returns an + invalid result. + """ + + def refit_callable_invalid_type(cv_results): + """ + A dummy function tests when returned 'best_index_' is not integer. + """ + return None + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.1, 1]}, + scoring="precision", + refit=refit_callable_invalid_type, + ) + with pytest.raises(TypeError, match="best_index_ returned is not an integer"): + clf.fit(X, y) + + +@pytest.mark.parametrize("out_bound_value", [-1, 2]) +@pytest.mark.parametrize("search_cv", [RandomizedSearchCV, GridSearchCV]) +def test_refit_callable_out_bound(out_bound_value, search_cv): + """ + Test implementation catches the errors when 'best_index_' returns an + out of bound result. + """ + + def refit_callable_out_bound(cv_results): + """ + A dummy function tests when returned 'best_index_' is out of bounds. + """ + return out_bound_value + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + + clf = search_cv( + LinearSVC(dual="auto", random_state=42), + {"C": [0.1, 1]}, + scoring="precision", + refit=refit_callable_out_bound, + ) + with pytest.raises(IndexError, match="best_index_ index out of range"): + clf.fit(X, y) + + +def test_refit_callable_multi_metric(): + """ + Test refit=callable in multiple metric evaluation setting + """ + + def refit_callable(cv_results): + """ + A dummy function tests `refit=callable` interface. + Return the index of a model that has the least + `mean_test_prec`. + """ + assert "mean_test_prec" in cv_results + return cv_results["mean_test_prec"].argmin() + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + scoring = {"Accuracy": make_scorer(accuracy_score), "prec": "precision"} + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.01, 0.1, 1]}, + scoring=scoring, + refit=refit_callable, + ) + clf.fit(X, y) + + assert clf.best_index_ == 0 + # Ensure `best_score_` is disabled when using `refit=callable` + assert not hasattr(clf, "best_score_") + + +def test_gridsearch_nd(): + # Pass X as list in GridSearchCV + X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) + y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) + + def check_X(x): + return x.shape[1:] == (5, 3, 2) + + def check_y(x): + return x.shape[1:] == (7, 11) + + clf = CheckingClassifier( + check_X=check_X, + check_y=check_y, + methods_to_check=["fit"], + ) + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}) + grid_search.fit(X_4d, y_3d).score(X, y) + assert hasattr(grid_search, "cv_results_") + + +def test_X_as_list(): + # Pass X as list in GridSearchCV + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + clf = CheckingClassifier( + check_X=lambda x: isinstance(x, list), + methods_to_check=["fit"], + ) + cv = KFold(n_splits=3) + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv) + grid_search.fit(X.tolist(), y).score(X, y) + assert hasattr(grid_search, "cv_results_") + + +def test_y_as_list(): + # Pass y as list in GridSearchCV + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + clf = CheckingClassifier( + check_y=lambda x: isinstance(x, list), + methods_to_check=["fit"], + ) + cv = KFold(n_splits=3) + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv) + grid_search.fit(X, y.tolist()).score(X, y) + assert hasattr(grid_search, "cv_results_") + + +@ignore_warnings +def test_pandas_input(): + # check cross_val_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((DataFrame, Series)) + except ImportError: + pass + + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + for InputFeatureType, TargetType in types: + # X dataframe, y series + X_df, y_ser = InputFeatureType(X), TargetType(y) + + def check_df(x): + return isinstance(x, InputFeatureType) + + def check_series(x): + return isinstance(x, TargetType) + + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}) + grid_search.fit(X_df, y_ser).score(X_df, y_ser) + grid_search.predict(X_df) + assert hasattr(grid_search, "cv_results_") + + +def test_unsupervised_grid_search(): + # test grid-search with unsupervised estimator + X, y = make_blobs(n_samples=50, random_state=0) + km = KMeans(random_state=0, init="random", n_init=1) + + # Multi-metric evaluation unsupervised + scoring = ["adjusted_rand_score", "fowlkes_mallows_score"] + for refit in ["adjusted_rand_score", "fowlkes_mallows_score"]: + grid_search = GridSearchCV( + km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit + ) + grid_search.fit(X, y) + # Both ARI and FMS can find the right number :) + assert grid_search.best_params_["n_clusters"] == 3 + + # Single metric evaluation unsupervised + grid_search = GridSearchCV( + km, param_grid=dict(n_clusters=[2, 3, 4]), scoring="fowlkes_mallows_score" + ) + grid_search.fit(X, y) + assert grid_search.best_params_["n_clusters"] == 3 + + # Now without a score, and without y + grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) + grid_search.fit(X) + assert grid_search.best_params_["n_clusters"] == 4 + + +def test_gridsearch_no_predict(): + # test grid-search with an estimator without predict. + # slight duplication of a test from KDE + def custom_scoring(estimator, X): + return 42 if estimator.bandwidth == 0.1 else 0 + + X, _ = make_blobs(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) + search = GridSearchCV( + KernelDensity(), + param_grid=dict(bandwidth=[0.01, 0.1, 1]), + scoring=custom_scoring, + ) + search.fit(X) + assert search.best_params_["bandwidth"] == 0.1 + assert search.best_score_ == 42 + + +def test_param_sampler(): + # test basic properties of param sampler + param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} + sampler = ParameterSampler( + param_distributions=param_distributions, n_iter=10, random_state=0 + ) + samples = [x for x in sampler] + assert len(samples) == 10 + for sample in samples: + assert sample["kernel"] in ["rbf", "linear"] + assert 0 <= sample["C"] <= 1 + + # test that repeated calls yield identical parameters + param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} + sampler = ParameterSampler( + param_distributions=param_distributions, n_iter=3, random_state=0 + ) + assert [x for x in sampler] == [x for x in sampler] + + param_distributions = {"C": uniform(0, 1)} + sampler = ParameterSampler( + param_distributions=param_distributions, n_iter=10, random_state=0 + ) + assert [x for x in sampler] == [x for x in sampler] + + +def check_cv_results_array_types(search, param_keys, score_keys): + # Check if the search `cv_results`'s array are of correct types + cv_results = search.cv_results_ + assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) + assert all(cv_results[key].dtype == object for key in param_keys) + assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) + assert all( + cv_results[key].dtype == np.float64 + for key in score_keys + if not key.startswith("rank") + ) + + scorer_keys = search.scorer_.keys() if search.multimetric_ else ["score"] + + for key in scorer_keys: + assert cv_results["rank_test_%s" % key].dtype == np.int32 + + +def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand, extra_keys=()): + # Test the search.cv_results_ contains all the required results + all_keys = param_keys + score_keys + extra_keys + assert_array_equal(sorted(cv_results.keys()), sorted(all_keys + ("params",))) + assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) + + +def test_grid_search_cv_results(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_grid_points = 6 + params = [ + dict( + kernel=[ + "rbf", + ], + C=[1, 10], + gamma=[0.1, 1], + ), + dict( + kernel=[ + "poly", + ], + degree=[1, 2], + ), + ] + + param_keys = ("param_C", "param_degree", "param_gamma", "param_kernel") + score_keys = ( + "mean_test_score", + "mean_train_score", + "rank_test_score", + "split0_test_score", + "split1_test_score", + "split2_test_score", + "split0_train_score", + "split1_train_score", + "split2_train_score", + "std_test_score", + "std_train_score", + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ) + n_candidates = n_grid_points + + search = GridSearchCV(SVC(), cv=3, param_grid=params, return_train_score=True) + search.fit(X, y) + cv_results = search.cv_results_ + # Check if score and timing are reasonable + assert all(cv_results["rank_test_score"] >= 1) + assert (all(cv_results[k] >= 0) for k in score_keys if k != "rank_test_score") + assert ( + all(cv_results[k] <= 1) + for k in score_keys + if "time" not in k and k != "rank_test_score" + ) + # Check cv_results structure + check_cv_results_array_types(search, param_keys, score_keys) + check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) + # Check masking + cv_results = search.cv_results_ + + poly_results = [ + ( + cv_results["param_C"].mask[i] + and cv_results["param_gamma"].mask[i] + and not cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "poly" + ] + assert all(poly_results) + assert len(poly_results) == 2 + + rbf_results = [ + ( + not cv_results["param_C"].mask[i] + and not cv_results["param_gamma"].mask[i] + and cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "rbf" + ] + assert all(rbf_results) + assert len(rbf_results) == 4 + + +def test_random_search_cv_results(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_search_iter = 30 + + params = [ + {"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)}, + {"kernel": ["poly"], "degree": [2, 3]}, + ] + param_keys = ("param_C", "param_degree", "param_gamma", "param_kernel") + score_keys = ( + "mean_test_score", + "mean_train_score", + "rank_test_score", + "split0_test_score", + "split1_test_score", + "split2_test_score", + "split0_train_score", + "split1_train_score", + "split2_train_score", + "std_test_score", + "std_train_score", + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ) + n_candidates = n_search_iter + + search = RandomizedSearchCV( + SVC(), + n_iter=n_search_iter, + cv=3, + param_distributions=params, + return_train_score=True, + ) + search.fit(X, y) + cv_results = search.cv_results_ + # Check results structure + check_cv_results_array_types(search, param_keys, score_keys) + check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) + assert all( + ( + cv_results["param_C"].mask[i] + and cv_results["param_gamma"].mask[i] + and not cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "poly" + ) + assert all( + ( + not cv_results["param_C"].mask[i] + and not cv_results["param_gamma"].mask[i] + and cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "rbf" + ) + + +@pytest.mark.parametrize( + "SearchCV, specialized_params", + [ + (GridSearchCV, {"param_grid": {"C": [1, 10]}}), + (RandomizedSearchCV, {"param_distributions": {"C": [1, 10]}, "n_iter": 2}), + ], +) +def test_search_default_iid(SearchCV, specialized_params): + # Test the IID parameter TODO: Clearly this test does something else??? + # noise-free simple 2d-data + X, y = make_blobs( + centers=[[0, 0], [1, 0], [0, 1], [1, 1]], + random_state=0, + cluster_std=0.1, + shuffle=False, + n_samples=80, + ) + # split dataset into two folds that are not iid + # first one contains data of all 4 blobs, second only from two. + mask = np.ones(X.shape[0], dtype=bool) + mask[np.where(y == 1)[0][::2]] = 0 + mask[np.where(y == 2)[0][::2]] = 0 + # this leads to perfect classification on one fold and a score of 1/3 on + # the other + # create "cv" for splits + cv = [[mask, ~mask], [~mask, mask]] + + common_params = {"estimator": SVC(), "cv": cv, "return_train_score": True} + search = SearchCV(**common_params, **specialized_params) + search.fit(X, y) + + test_cv_scores = np.array( + [ + search.cv_results_["split%d_test_score" % s][0] + for s in range(search.n_splits_) + ] + ) + test_mean = search.cv_results_["mean_test_score"][0] + test_std = search.cv_results_["std_test_score"][0] + + train_cv_scores = np.array( + [ + search.cv_results_["split%d_train_score" % s][0] + for s in range(search.n_splits_) + ] + ) + train_mean = search.cv_results_["mean_train_score"][0] + train_std = search.cv_results_["std_train_score"][0] + + assert search.cv_results_["param_C"][0] == 1 + # scores are the same as above + assert_allclose(test_cv_scores, [1, 1.0 / 3.0]) + assert_allclose(train_cv_scores, [1, 1]) + # Unweighted mean/std is used + assert test_mean == pytest.approx(np.mean(test_cv_scores)) + assert test_std == pytest.approx(np.std(test_cv_scores)) + + # For the train scores, we do not take a weighted mean irrespective of + # i.i.d. or not + assert train_mean == pytest.approx(1) + assert train_std == pytest.approx(0) + + +def test_grid_search_cv_results_multimetric(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_splits = 3 + params = [ + dict( + kernel=[ + "rbf", + ], + C=[1, 10], + gamma=[0.1, 1], + ), + dict( + kernel=[ + "poly", + ], + degree=[1, 2], + ), + ] + + grid_searches = [] + for scoring in ( + {"accuracy": make_scorer(accuracy_score), "recall": make_scorer(recall_score)}, + "accuracy", + "recall", + ): + grid_search = GridSearchCV( + SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False + ) + grid_search.fit(X, y) + grid_searches.append(grid_search) + + compare_cv_results_multimetric_with_single(*grid_searches) + + +def test_random_search_cv_results_multimetric(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_splits = 3 + n_search_iter = 30 + + # Scipy 0.12's stats dists do not accept seed, hence we use param grid + params = dict(C=np.logspace(-4, 1, 3), gamma=np.logspace(-5, 0, 3, base=0.1)) + for refit in (True, False): + random_searches = [] + for scoring in (("accuracy", "recall"), "accuracy", "recall"): + # If True, for multi-metric pass refit='accuracy' + if refit: + probability = True + refit = "accuracy" if isinstance(scoring, tuple) else refit + else: + probability = False + clf = SVC(probability=probability, random_state=42) + random_search = RandomizedSearchCV( + clf, + n_iter=n_search_iter, + cv=n_splits, + param_distributions=params, + scoring=scoring, + refit=refit, + random_state=0, + ) + random_search.fit(X, y) + random_searches.append(random_search) + + compare_cv_results_multimetric_with_single(*random_searches) + compare_refit_methods_when_refit_with_acc( + random_searches[0], random_searches[1], refit + ) + + +def compare_cv_results_multimetric_with_single(search_multi, search_acc, search_rec): + """Compare multi-metric cv_results with the ensemble of multiple + single metric cv_results from single metric grid/random search""" + + assert search_multi.multimetric_ + assert_array_equal(sorted(search_multi.scorer_), ("accuracy", "recall")) + + cv_results_multi = search_multi.cv_results_ + cv_results_acc_rec = { + re.sub("_score$", "_accuracy", k): v for k, v in search_acc.cv_results_.items() + } + cv_results_acc_rec.update( + {re.sub("_score$", "_recall", k): v for k, v in search_rec.cv_results_.items()} + ) + + # Check if score and timing are reasonable, also checks if the keys + # are present + assert all( + ( + np.all(cv_results_multi[k] <= 1) + for k in ( + "mean_score_time", + "std_score_time", + "mean_fit_time", + "std_fit_time", + ) + ) + ) + + # Compare the keys, other than time keys, among multi-metric and + # single metric grid search results. np.testing.assert_equal performs a + # deep nested comparison of the two cv_results dicts + np.testing.assert_equal( + {k: v for k, v in cv_results_multi.items() if not k.endswith("_time")}, + {k: v for k, v in cv_results_acc_rec.items() if not k.endswith("_time")}, + ) + + +def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit): + """Compare refit multi-metric search methods with single metric methods""" + assert search_acc.refit == refit + if refit: + assert search_multi.refit == "accuracy" + else: + assert not search_multi.refit + return # search cannot predict/score without refit + + X, y = make_blobs(n_samples=100, n_features=4, random_state=42) + for method in ("predict", "predict_proba", "predict_log_proba"): + assert_almost_equal( + getattr(search_multi, method)(X), getattr(search_acc, method)(X) + ) + assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y)) + for key in ("best_index_", "best_score_", "best_params_"): + assert getattr(search_multi, key) == getattr(search_acc, key) + + +@pytest.mark.parametrize( + "search_cv", + [ + RandomizedSearchCV( + estimator=DecisionTreeClassifier(), + param_distributions={"max_depth": [5, 10]}, + ), + GridSearchCV( + estimator=DecisionTreeClassifier(), param_grid={"max_depth": [5, 10]} + ), + ], +) +def test_search_cv_score_samples_error(search_cv): + X, y = make_blobs(n_samples=100, n_features=4, random_state=42) + search_cv.fit(X, y) + + # Make sure to error out when underlying estimator does not implement + # the method `score_samples` + outer_msg = f"'{search_cv.__class__.__name__}' has no attribute 'score_samples'" + inner_msg = "'DecisionTreeClassifier' object has no attribute 'score_samples'" + + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + search_cv.score_samples(X) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg == str(exec_info.value.__cause__) + + +@pytest.mark.parametrize( + "search_cv", + [ + RandomizedSearchCV( + estimator=LocalOutlierFactor(novelty=True), + param_distributions={"n_neighbors": [5, 10]}, + scoring="precision", + ), + GridSearchCV( + estimator=LocalOutlierFactor(novelty=True), + param_grid={"n_neighbors": [5, 10]}, + scoring="precision", + ), + ], +) +def test_search_cv_score_samples_method(search_cv): + # Set parameters + rng = np.random.RandomState(42) + n_samples = 300 + outliers_fraction = 0.15 + n_outliers = int(outliers_fraction * n_samples) + n_inliers = n_samples - n_outliers + + # Create dataset + X = make_blobs( + n_samples=n_inliers, + n_features=2, + centers=[[0, 0], [0, 0]], + cluster_std=0.5, + random_state=0, + )[0] + # Add some noisy points + X = np.concatenate([X, rng.uniform(low=-6, high=6, size=(n_outliers, 2))], axis=0) + + # Define labels to be able to score the estimator with `search_cv` + y_true = np.array([1] * n_samples) + y_true[-n_outliers:] = -1 + + # Fit on data + search_cv.fit(X, y_true) + + # Verify that the stand alone estimator yields the same results + # as the ones obtained with *SearchCV + assert_allclose( + search_cv.score_samples(X), search_cv.best_estimator_.score_samples(X) + ) + + +def test_search_cv_results_rank_tie_breaking(): + X, y = make_blobs(n_samples=50, random_state=42) + + # The two C values are close enough to give similar models + # which would result in a tie of their mean cv-scores + param_grid = {"C": [1, 1.001, 0.001]} + + grid_search = GridSearchCV(SVC(), param_grid=param_grid, return_train_score=True) + random_search = RandomizedSearchCV( + SVC(), n_iter=3, param_distributions=param_grid, return_train_score=True + ) + + for search in (grid_search, random_search): + search.fit(X, y) + cv_results = search.cv_results_ + # Check tie breaking strategy - + # Check that there is a tie in the mean scores between + # candidates 1 and 2 alone + assert_almost_equal( + cv_results["mean_test_score"][0], cv_results["mean_test_score"][1] + ) + assert_almost_equal( + cv_results["mean_train_score"][0], cv_results["mean_train_score"][1] + ) + assert not np.allclose( + cv_results["mean_test_score"][1], cv_results["mean_test_score"][2] + ) + assert not np.allclose( + cv_results["mean_train_score"][1], cv_results["mean_train_score"][2] + ) + # 'min' rank should be assigned to the tied candidates + assert_almost_equal(search.cv_results_["rank_test_score"], [1, 1, 3]) + + +def test_search_cv_results_none_param(): + X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1] + estimators = (DecisionTreeRegressor(), DecisionTreeClassifier()) + est_parameters = {"random_state": [0, None]} + cv = KFold() + + for est in estimators: + grid_search = GridSearchCV( + est, + est_parameters, + cv=cv, + ).fit(X, y) + assert_array_equal(grid_search.cv_results_["param_random_state"], [0, None]) + + +@ignore_warnings() +def test_search_cv_timing(): + svc = LinearSVC(dual="auto", random_state=0) + + X = [ + [ + 1, + ], + [ + 2, + ], + [ + 3, + ], + [ + 4, + ], + ] + y = [0, 1, 1, 0] + + gs = GridSearchCV(svc, {"C": [0, 1]}, cv=2, error_score=0) + rs = RandomizedSearchCV(svc, {"C": [0, 1]}, cv=2, error_score=0, n_iter=2) + + for search in (gs, rs): + search.fit(X, y) + for key in ["mean_fit_time", "std_fit_time"]: + # NOTE The precision of time.time in windows is not high + # enough for the fit/score times to be non-zero for trivial X and y + assert np.all(search.cv_results_[key] >= 0) + assert np.all(search.cv_results_[key] < 1) + + for key in ["mean_score_time", "std_score_time"]: + assert search.cv_results_[key][1] >= 0 + assert search.cv_results_[key][0] == 0.0 + assert np.all(search.cv_results_[key] < 1) + + assert hasattr(search, "refit_time_") + assert isinstance(search.refit_time_, float) + assert search.refit_time_ >= 0 + + +def test_grid_search_correct_score_results(): + # test that correct scores are used + n_splits = 3 + clf = LinearSVC(dual="auto", random_state=0) + X, y = make_blobs(random_state=0, centers=2) + Cs = [0.1, 1, 10] + for score in ["f1", "roc_auc"]: + grid_search = GridSearchCV(clf, {"C": Cs}, scoring=score, cv=n_splits) + cv_results = grid_search.fit(X, y).cv_results_ + + # Test scorer names + result_keys = list(cv_results.keys()) + expected_keys = ("mean_test_score", "rank_test_score") + tuple( + "split%d_test_score" % cv_i for cv_i in range(n_splits) + ) + assert all(np.isin(expected_keys, result_keys)) + + cv = StratifiedKFold(n_splits=n_splits) + n_splits = grid_search.n_splits_ + for candidate_i, C in enumerate(Cs): + clf.set_params(C=C) + cv_scores = np.array( + [ + grid_search.cv_results_["split%d_test_score" % s][candidate_i] + for s in range(n_splits) + ] + ) + for i, (train, test) in enumerate(cv.split(X, y)): + clf.fit(X[train], y[train]) + if score == "f1": + correct_score = f1_score(y[test], clf.predict(X[test])) + elif score == "roc_auc": + dec = clf.decision_function(X[test]) + correct_score = roc_auc_score(y[test], dec) + assert_almost_equal(correct_score, cv_scores[i]) + + +def test_pickle(): + # Test that a fit search can be pickled + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, refit=True, cv=3) + grid_search.fit(X, y) + grid_search_pickled = pickle.loads(pickle.dumps(grid_search)) + assert_array_almost_equal(grid_search.predict(X), grid_search_pickled.predict(X)) + + random_search = RandomizedSearchCV( + clf, {"foo_param": [1, 2, 3]}, refit=True, n_iter=3, cv=3 + ) + random_search.fit(X, y) + random_search_pickled = pickle.loads(pickle.dumps(random_search)) + assert_array_almost_equal( + random_search.predict(X), random_search_pickled.predict(X) + ) + + +def test_grid_search_with_multioutput_data(): + # Test search with multi-output estimator + + X, y = make_multilabel_classification(return_indicator=True, random_state=0) + + est_parameters = {"max_depth": [1, 2, 3, 4]} + cv = KFold() + + estimators = [ + DecisionTreeRegressor(random_state=0), + DecisionTreeClassifier(random_state=0), + ] + + # Test with grid search cv + for est in estimators: + grid_search = GridSearchCV(est, est_parameters, cv=cv) + grid_search.fit(X, y) + res_params = grid_search.cv_results_["params"] + for cand_i in range(len(res_params)): + est.set_params(**res_params[cand_i]) + + for i, (train, test) in enumerate(cv.split(X, y)): + est.fit(X[train], y[train]) + correct_score = est.score(X[test], y[test]) + assert_almost_equal( + correct_score, + grid_search.cv_results_["split%d_test_score" % i][cand_i], + ) + + # Test with a randomized search + for est in estimators: + random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) + random_search.fit(X, y) + res_params = random_search.cv_results_["params"] + for cand_i in range(len(res_params)): + est.set_params(**res_params[cand_i]) + + for i, (train, test) in enumerate(cv.split(X, y)): + est.fit(X[train], y[train]) + correct_score = est.score(X[test], y[test]) + assert_almost_equal( + correct_score, + random_search.cv_results_["split%d_test_score" % i][cand_i], + ) + + +def test_predict_proba_disabled(): + # Test predict_proba when disabled on estimator. + X = np.arange(20).reshape(5, -1) + y = [0, 0, 1, 1, 1] + clf = SVC(probability=False) + gs = GridSearchCV(clf, {}, cv=2).fit(X, y) + assert not hasattr(gs, "predict_proba") + + +def test_grid_search_allows_nans(): + # Test GridSearchCV with SimpleImputer + X = np.arange(20, dtype=np.float64).reshape(5, -1) + X[2, :] = np.nan + y = [0, 0, 1, 1, 1] + p = Pipeline( + [ + ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)), + ("classifier", MockClassifier()), + ] + ) + GridSearchCV(p, {"classifier__foo_param": [1, 2, 3]}, cv=2).fit(X, y) + + +class FailingClassifier(BaseEstimator): + """Classifier that raises a ValueError on fit()""" + + FAILING_PARAMETER = 2 + + def __init__(self, parameter=None): + self.parameter = parameter + + def fit(self, X, y=None): + if self.parameter == FailingClassifier.FAILING_PARAMETER: + raise ValueError("Failing classifier failed as required") + + def predict(self, X): + return np.zeros(X.shape[0]) + + def score(self, X=None, Y=None): + return 0.0 + + +def test_grid_search_failing_classifier(): + # GridSearchCV with on_error != 'raise' + # Ensures that a warning is raised and score reset where appropriate. + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + # refit=False because we only want to check that errors caused by fits + # to individual folds will be caught and warnings raised instead. If + # refit was done, then an exception would be raised on refit and not + # caught by grid_search (expected behavior), and this would cause an + # error in this test. + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring="accuracy", + refit=False, + error_score=0.0, + ) + + warning_message = re.compile( + "5 fits failed.+total of 15.+The score on these" + r" train-test partitions for these parameters will be set to 0\.0.+" + "5 fits failed with the following error.+ValueError.+Failing classifier failed" + " as required", + flags=re.DOTALL, + ) + with pytest.warns(FitFailedWarning, match=warning_message): + gs.fit(X, y) + n_candidates = len(gs.cv_results_["params"]) + + # Ensure that grid scores were set to zero as required for those fits + # that are expected to fail. + def get_cand_scores(i): + return np.array( + [gs.cv_results_["split%d_test_score" % s][i] for s in range(gs.n_splits_)] + ) + + assert all( + ( + np.all(get_cand_scores(cand_i) == 0.0) + for cand_i in range(n_candidates) + if gs.cv_results_["param_parameter"][cand_i] + == FailingClassifier.FAILING_PARAMETER + ) + ) + + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring="accuracy", + refit=False, + error_score=float("nan"), + ) + warning_message = re.compile( + "5 fits failed.+total of 15.+The score on these" + r" train-test partitions for these parameters will be set to nan.+" + "5 fits failed with the following error.+ValueError.+Failing classifier failed" + " as required", + flags=re.DOTALL, + ) + with pytest.warns(FitFailedWarning, match=warning_message): + gs.fit(X, y) + n_candidates = len(gs.cv_results_["params"]) + assert all( + np.all(np.isnan(get_cand_scores(cand_i))) + for cand_i in range(n_candidates) + if gs.cv_results_["param_parameter"][cand_i] + == FailingClassifier.FAILING_PARAMETER + ) + + ranks = gs.cv_results_["rank_test_score"] + + # Check that succeeded estimators have lower ranks + assert ranks[0] <= 2 and ranks[1] <= 2 + # Check that failed estimator has the highest rank + assert ranks[clf.FAILING_PARAMETER] == 3 + assert gs.best_index_ != clf.FAILING_PARAMETER + + +def test_grid_search_classifier_all_fits_fail(): + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + gs = GridSearchCV( + clf, + [{"parameter": [FailingClassifier.FAILING_PARAMETER] * 3}], + error_score=0.0, + ) + + warning_message = re.compile( + ( + "All the 15 fits failed.+15 fits failed with the following" + " error.+ValueError.+Failing classifier failed as required" + ), + flags=re.DOTALL, + ) + with pytest.raises(ValueError, match=warning_message): + gs.fit(X, y) + + +def test_grid_search_failing_classifier_raise(): + # GridSearchCV with on_error == 'raise' raises the error + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + # refit=False because we want to test the behaviour of the grid search part + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring="accuracy", + refit=False, + error_score="raise", + ) + + # FailingClassifier issues a ValueError so this is what we look for. + with pytest.raises(ValueError): + gs.fit(X, y) + + +def test_parameters_sampler_replacement(): + # raise warning if n_iter is bigger than total parameter space + params = [ + {"first": [0, 1], "second": ["a", "b", "c"]}, + {"third": ["two", "values"]}, + ] + sampler = ParameterSampler(params, n_iter=9) + n_iter = 9 + grid_size = 8 + expected_warning = ( + "The total space of parameters %d is smaller " + "than n_iter=%d. Running %d iterations. For " + "exhaustive searches, use GridSearchCV." % (grid_size, n_iter, grid_size) + ) + with pytest.warns(UserWarning, match=expected_warning): + list(sampler) + + # degenerates to GridSearchCV if n_iter the same as grid_size + sampler = ParameterSampler(params, n_iter=8) + samples = list(sampler) + assert len(samples) == 8 + for values in ParameterGrid(params): + assert values in samples + assert len(ParameterSampler(params, n_iter=1000)) == 8 + + # test sampling without replacement in a large grid + params = {"a": range(10), "b": range(10), "c": range(10)} + sampler = ParameterSampler(params, n_iter=99, random_state=42) + samples = list(sampler) + assert len(samples) == 99 + hashable_samples = ["a%db%dc%d" % (p["a"], p["b"], p["c"]) for p in samples] + assert len(set(hashable_samples)) == 99 + + # doesn't go into infinite loops + params_distribution = {"first": bernoulli(0.5), "second": ["a", "b", "c"]} + sampler = ParameterSampler(params_distribution, n_iter=7) + samples = list(sampler) + assert len(samples) == 7 + + +def test_stochastic_gradient_loss_param(): + # Make sure the predict_proba works when loss is specified + # as one of the parameters in the param_grid. + param_grid = { + "loss": ["log_loss"], + } + X = np.arange(24).reshape(6, -1) + y = [0, 0, 0, 1, 1, 1] + clf = GridSearchCV( + estimator=SGDClassifier(loss="hinge"), param_grid=param_grid, cv=3 + ) + + # When the estimator is not fitted, `predict_proba` is not available as the + # loss is 'hinge'. + assert not hasattr(clf, "predict_proba") + clf.fit(X, y) + clf.predict_proba(X) + clf.predict_log_proba(X) + + # Make sure `predict_proba` is not available when setting loss=['hinge'] + # in param_grid + param_grid = { + "loss": ["hinge"], + } + clf = GridSearchCV( + estimator=SGDClassifier(loss="hinge"), param_grid=param_grid, cv=3 + ) + assert not hasattr(clf, "predict_proba") + clf.fit(X, y) + assert not hasattr(clf, "predict_proba") + + +def test_search_train_scores_set_to_false(): + X = np.arange(6).reshape(6, -1) + y = [0, 0, 0, 1, 1, 1] + clf = LinearSVC(dual="auto", random_state=0) + + gs = GridSearchCV(clf, param_grid={"C": [0.1, 0.2]}, cv=3) + gs.fit(X, y) + + +def test_grid_search_cv_splits_consistency(): + # Check if a one time iterable is accepted as a cv parameter. + n_samples = 100 + n_splits = 5 + X, y = make_classification(n_samples=n_samples, random_state=0) + + gs = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), + return_train_score=True, + ) + gs.fit(X, y) + + gs2 = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=KFold(n_splits=n_splits), + return_train_score=True, + ) + gs2.fit(X, y) + + # Give generator as a cv parameter + assert isinstance( + KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), + GeneratorType, + ) + gs3 = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), + return_train_score=True, + ) + gs3.fit(X, y) + + gs4 = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=KFold(n_splits=n_splits, shuffle=True, random_state=0), + return_train_score=True, + ) + gs4.fit(X, y) + + def _pop_time_keys(cv_results): + for key in ( + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ): + cv_results.pop(key) + return cv_results + + # Check if generators are supported as cv and + # that the splits are consistent + np.testing.assert_equal( + _pop_time_keys(gs3.cv_results_), _pop_time_keys(gs4.cv_results_) + ) + + # OneTimeSplitter is a non-re-entrant cv where split can be called only + # once if ``cv.split`` is called once per param setting in GridSearchCV.fit + # the 2nd and 3rd parameter will not be evaluated as no train/test indices + # will be generated for the 2nd and subsequent cv.split calls. + # This is a check to make sure cv.split is not called once per param + # setting. + np.testing.assert_equal( + {k: v for k, v in gs.cv_results_.items() if not k.endswith("_time")}, + {k: v for k, v in gs2.cv_results_.items() if not k.endswith("_time")}, + ) + + # Check consistency of folds across the parameters + gs = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.1, 0.2, 0.2]}, + cv=KFold(n_splits=n_splits, shuffle=True), + return_train_score=True, + ) + gs.fit(X, y) + + # As the first two param settings (C=0.1) and the next two param + # settings (C=0.2) are same, the test and train scores must also be + # same as long as the same train/test indices are generated for all + # the cv splits, for both param setting + for score_type in ("train", "test"): + per_param_scores = {} + for param_i in range(4): + per_param_scores[param_i] = [ + gs.cv_results_["split%d_%s_score" % (s, score_type)][param_i] + for s in range(5) + ] + + assert_array_almost_equal(per_param_scores[0], per_param_scores[1]) + assert_array_almost_equal(per_param_scores[2], per_param_scores[3]) + + +def test_transform_inverse_transform_round_trip(): + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=3, verbose=3) + + grid_search.fit(X, y) + X_round_trip = grid_search.inverse_transform(grid_search.transform(X)) + assert_array_equal(X, X_round_trip) + + +def test_custom_run_search(): + def check_results(results, gscv): + exp_results = gscv.cv_results_ + assert sorted(results.keys()) == sorted(exp_results) + for k in results: + if not k.endswith("_time"): + # XXX: results['params'] is a list :| + results[k] = np.asanyarray(results[k]) + if results[k].dtype.kind == "O": + assert_array_equal( + exp_results[k], results[k], err_msg="Checking " + k + ) + else: + assert_allclose(exp_results[k], results[k], err_msg="Checking " + k) + + def fit_grid(param_grid): + return GridSearchCV(clf, param_grid, return_train_score=True).fit(X, y) + + class CustomSearchCV(BaseSearchCV): + def __init__(self, estimator, **kwargs): + super().__init__(estimator, **kwargs) + + def _run_search(self, evaluate): + results = evaluate([{"max_depth": 1}, {"max_depth": 2}]) + check_results(results, fit_grid({"max_depth": [1, 2]})) + results = evaluate([{"min_samples_split": 5}, {"min_samples_split": 10}]) + check_results( + results, + fit_grid([{"max_depth": [1, 2]}, {"min_samples_split": [5, 10]}]), + ) + + # Using regressor to make sure each score differs + clf = DecisionTreeRegressor(random_state=0) + X, y = make_classification(n_samples=100, n_informative=4, random_state=0) + mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y) + gscv = fit_grid([{"max_depth": [1, 2]}, {"min_samples_split": [5, 10]}]) + + results = mycv.cv_results_ + check_results(results, gscv) + for attr in dir(gscv): + if ( + attr[0].islower() + and attr[-1:] == "_" + and attr + not in { + "cv_results_", + "best_estimator_", + "refit_time_", + "classes_", + "scorer_", + } + ): + assert getattr(gscv, attr) == getattr(mycv, attr), ( + "Attribute %s not equal" % attr + ) + + +def test__custom_fit_no_run_search(): + class NoRunSearchSearchCV(BaseSearchCV): + def __init__(self, estimator, **kwargs): + super().__init__(estimator, **kwargs) + + def fit(self, X, y=None, groups=None, **fit_params): + return self + + # this should not raise any exceptions + NoRunSearchSearchCV(SVC()).fit(X, y) + + class BadSearchCV(BaseSearchCV): + def __init__(self, estimator, **kwargs): + super().__init__(estimator, **kwargs) + + with pytest.raises(NotImplementedError, match="_run_search not implemented."): + # this should raise a NotImplementedError + BadSearchCV(SVC()).fit(X, y) + + +def test_empty_cv_iterator_error(): + # Use global X, y + + # create cv + cv = KFold(n_splits=3).split(X) + + # pop all of it, this should cause the expected ValueError + [u for u in cv] + # cv is empty now + + train_size = 100 + ridge = RandomizedSearchCV(Ridge(), {"alpha": [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) + + # assert that this raises an error + with pytest.raises( + ValueError, + match=( + "No fits were performed. " + "Was the CV iterator empty\\? " + "Were there no candidates\\?" + ), + ): + ridge.fit(X[:train_size], y[:train_size]) + + +def test_random_search_bad_cv(): + # Use global X, y + + class BrokenKFold(KFold): + def get_n_splits(self, *args, **kw): + return 1 + + # create bad cv + cv = BrokenKFold(n_splits=3) + + train_size = 100 + ridge = RandomizedSearchCV(Ridge(), {"alpha": [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) + + # assert that this raises an error + with pytest.raises( + ValueError, + match=( + "cv.split and cv.get_n_splits returned " + "inconsistent results. Expected \\d+ " + "splits, got \\d+" + ), + ): + ridge.fit(X[:train_size], y[:train_size]) + + +@pytest.mark.parametrize("return_train_score", [False, True]) +@pytest.mark.parametrize( + "SearchCV, specialized_params", + [ + (GridSearchCV, {"param_grid": {"max_depth": [2, 3, 5, 8]}}), + ( + RandomizedSearchCV, + {"param_distributions": {"max_depth": [2, 3, 5, 8]}, "n_iter": 4}, + ), + ], +) +def test_searchcv_raise_warning_with_non_finite_score( + SearchCV, specialized_params, return_train_score +): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/10529 + # Check that we raise a UserWarning when a non-finite score is + # computed in the SearchCV + X, y = make_classification(n_classes=2, random_state=0) + + class FailingScorer: + """Scorer that will fail for some split but not all.""" + + def __init__(self): + self.n_counts = 0 + + def __call__(self, estimator, X, y): + self.n_counts += 1 + if self.n_counts % 5 == 0: + return np.nan + return 1 + + grid = SearchCV( + DecisionTreeClassifier(), + scoring=FailingScorer(), + cv=3, + return_train_score=return_train_score, + **specialized_params, + ) + + with pytest.warns(UserWarning) as warn_msg: + grid.fit(X, y) + + set_with_warning = ["test", "train"] if return_train_score else ["test"] + assert len(warn_msg) == len(set_with_warning) + for msg, dataset in zip(warn_msg, set_with_warning): + assert f"One or more of the {dataset} scores are non-finite" in str(msg.message) + + # all non-finite scores should be equally ranked last + last_rank = grid.cv_results_["rank_test_score"].max() + non_finite_mask = np.isnan(grid.cv_results_["mean_test_score"]) + assert_array_equal(grid.cv_results_["rank_test_score"][non_finite_mask], last_rank) + # all finite scores should be better ranked than the non-finite scores + assert np.all(grid.cv_results_["rank_test_score"][~non_finite_mask] < last_rank) + + +def test_callable_multimetric_confusion_matrix(): + # Test callable with many metrics inserts the correct names and metrics + # into the search cv object + def custom_scorer(clf, X, y): + y_pred = clf.predict(X) + cm = confusion_matrix(y, y_pred) + return {"tn": cm[0, 0], "fp": cm[0, 1], "fn": cm[1, 0], "tp": cm[1, 1]} + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + search = GridSearchCV(est, {"C": [0.1, 1]}, scoring=custom_scorer, refit="fp") + + search.fit(X, y) + + score_names = ["tn", "fp", "fn", "tp"] + for name in score_names: + assert "mean_test_{}".format(name) in search.cv_results_ + + y_pred = search.predict(X) + cm = confusion_matrix(y, y_pred) + assert search.score(X, y) == pytest.approx(cm[0, 1]) + + +def test_callable_multimetric_same_as_list_of_strings(): + # Test callable multimetric is the same as a list of strings + def custom_scorer(est, X, y): + y_pred = est.predict(X) + return { + "recall": recall_score(y, y_pred), + "accuracy": accuracy_score(y, y_pred), + } + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + search_callable = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=custom_scorer, refit="recall" + ) + search_str = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=["recall", "accuracy"], refit="recall" + ) + + search_callable.fit(X, y) + search_str.fit(X, y) + + assert search_callable.best_score_ == pytest.approx(search_str.best_score_) + assert search_callable.best_index_ == search_str.best_index_ + assert search_callable.score(X, y) == pytest.approx(search_str.score(X, y)) + + +def test_callable_single_metric_same_as_single_string(): + # Tests callable scorer is the same as scoring with a single string + def custom_scorer(est, X, y): + y_pred = est.predict(X) + return recall_score(y, y_pred) + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + search_callable = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=custom_scorer, refit=True + ) + search_str = GridSearchCV(est, {"C": [0.1, 1]}, scoring="recall", refit="recall") + search_list_str = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=["recall"], refit="recall" + ) + search_callable.fit(X, y) + search_str.fit(X, y) + search_list_str.fit(X, y) + + assert search_callable.best_score_ == pytest.approx(search_str.best_score_) + assert search_callable.best_index_ == search_str.best_index_ + assert search_callable.score(X, y) == pytest.approx(search_str.score(X, y)) + + assert search_list_str.best_score_ == pytest.approx(search_str.best_score_) + assert search_list_str.best_index_ == search_str.best_index_ + assert search_list_str.score(X, y) == pytest.approx(search_str.score(X, y)) + + +def test_callable_multimetric_error_on_invalid_key(): + # Raises when the callable scorer does not return a dict with `refit` key. + def bad_scorer(est, X, y): + return {"bad_name": 1} + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.1, 1]}, + scoring=bad_scorer, + refit="good_name", + ) + + msg = ( + "For multi-metric scoring, the parameter refit must be set to a " + "scorer key or a callable to refit" + ) + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_callable_multimetric_error_failing_clf(): + # Warns when there is an estimator the fails to fit with a float + # error_score + def custom_scorer(est, X, y): + return {"acc": 1} + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring=custom_scorer, + refit=False, + error_score=0.1, + ) + + warning_message = re.compile( + "5 fits failed.+total of 15.+The score on these" + r" train-test partitions for these parameters will be set to 0\.1", + flags=re.DOTALL, + ) + with pytest.warns(FitFailedWarning, match=warning_message): + gs.fit(X, y) + + assert_allclose(gs.cv_results_["mean_test_acc"], [1, 1, 0.1]) + + +def test_callable_multimetric_clf_all_fits_fail(): + # Warns and raises when all estimator fails to fit. + def custom_scorer(est, X, y): + return {"acc": 1} + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + gs = GridSearchCV( + clf, + [{"parameter": [FailingClassifier.FAILING_PARAMETER] * 3}], + scoring=custom_scorer, + refit=False, + error_score=0.1, + ) + + individual_fit_error_message = "ValueError: Failing classifier failed as required" + error_message = re.compile( + ( + "All the 15 fits failed.+your model is misconfigured.+" + f"{individual_fit_error_message}" + ), + flags=re.DOTALL, + ) + + with pytest.raises(ValueError, match=error_message): + gs.fit(X, y) + + +def test_n_features_in(): + # make sure grid search and random search delegate n_features_in to the + # best estimator + n_features = 4 + X, y = make_classification(n_features=n_features) + gbdt = HistGradientBoostingClassifier() + param_grid = {"max_iter": [3, 4]} + gs = GridSearchCV(gbdt, param_grid) + rs = RandomizedSearchCV(gbdt, param_grid, n_iter=1) + assert not hasattr(gs, "n_features_in_") + assert not hasattr(rs, "n_features_in_") + gs.fit(X, y) + rs.fit(X, y) + assert gs.n_features_in_ == n_features + assert rs.n_features_in_ == n_features + + +@pytest.mark.parametrize("pairwise", [True, False]) +def test_search_cv_pairwise_property_delegated_to_base_estimator(pairwise): + """ + Test implementation of BaseSearchCV has the pairwise tag + which matches the pairwise tag of its estimator. + This test make sure pairwise tag is delegated to the base estimator. + + Non-regression test for issue #13920. + """ + + class TestEstimator(BaseEstimator): + def _more_tags(self): + return {"pairwise": pairwise} + + est = TestEstimator() + attr_message = "BaseSearchCV pairwise tag must match estimator" + cv = GridSearchCV(est, {"n_neighbors": [10]}) + assert pairwise == cv._get_tags()["pairwise"], attr_message + + +def test_search_cv__pairwise_property_delegated_to_base_estimator(): + """ + Test implementation of BaseSearchCV has the pairwise property + which matches the pairwise tag of its estimator. + This test make sure pairwise tag is delegated to the base estimator. + + Non-regression test for issue #13920. + """ + + class EstimatorPairwise(BaseEstimator): + def __init__(self, pairwise=True): + self.pairwise = pairwise + + def _more_tags(self): + return {"pairwise": self.pairwise} + + est = EstimatorPairwise() + attr_message = "BaseSearchCV _pairwise property must match estimator" + + for _pairwise_setting in [True, False]: + est.set_params(pairwise=_pairwise_setting) + cv = GridSearchCV(est, {"n_neighbors": [10]}) + assert _pairwise_setting == cv._get_tags()["pairwise"], attr_message + + +def test_search_cv_pairwise_property_equivalence_of_precomputed(): + """ + Test implementation of BaseSearchCV has the pairwise tag + which matches the pairwise tag of its estimator. + This test ensures the equivalence of 'precomputed'. + + Non-regression test for issue #13920. + """ + n_samples = 50 + n_splits = 2 + X, y = make_classification(n_samples=n_samples, random_state=0) + grid_params = {"n_neighbors": [10]} + + # defaults to euclidean metric (minkowski p = 2) + clf = KNeighborsClassifier() + cv = GridSearchCV(clf, grid_params, cv=n_splits) + cv.fit(X, y) + preds_original = cv.predict(X) + + # precompute euclidean metric to validate pairwise is working + X_precomputed = euclidean_distances(X) + clf = KNeighborsClassifier(metric="precomputed") + cv = GridSearchCV(clf, grid_params, cv=n_splits) + cv.fit(X_precomputed, y) + preds_precomputed = cv.predict(X_precomputed) + + attr_message = "GridSearchCV not identical with precomputed metric" + assert (preds_original == preds_precomputed).all(), attr_message + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [(GridSearchCV, {"a": [0.1, 0.01]}), (RandomizedSearchCV, {"a": uniform(1, 3)})], +) +def test_scalar_fit_param(SearchCV, param_search): + # unofficially sanctioned tolerance for scalar values in fit_params + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15805 + class TestEstimator(ClassifierMixin, BaseEstimator): + def __init__(self, a=None): + self.a = a + + def fit(self, X, y, r=None): + self.r_ = r + + def predict(self, X): + return np.zeros(shape=(len(X))) + + model = SearchCV(TestEstimator(), param_search) + X, y = make_classification(random_state=42) + model.fit(X, y, r=42) + assert model.best_estimator_.r_ == 42 + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, {"alpha": [0.1, 0.01]}), + (RandomizedSearchCV, {"alpha": uniform(0.01, 0.1)}), + ], +) +def test_scalar_fit_param_compat(SearchCV, param_search): + # check support for scalar values in fit_params, for instance in LightGBM + # that do not exactly respect the scikit-learn API contract but that we do + # not want to break without an explicit deprecation cycle and API + # recommendations for implementing early stopping with a user provided + # validation set. non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15805 + X_train, X_valid, y_train, y_valid = train_test_split( + *make_classification(random_state=42), random_state=42 + ) + + class _FitParamClassifier(SGDClassifier): + def fit( + self, + X, + y, + sample_weight=None, + tuple_of_arrays=None, + scalar_param=None, + callable_param=None, + ): + super().fit(X, y, sample_weight=sample_weight) + assert scalar_param > 0 + assert callable(callable_param) + + # The tuple of arrays should be preserved as tuple. + assert isinstance(tuple_of_arrays, tuple) + assert tuple_of_arrays[0].ndim == 2 + assert tuple_of_arrays[1].ndim == 1 + return self + + def _fit_param_callable(): + pass + + model = SearchCV(_FitParamClassifier(), param_search) + + # NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which + # is not the case for the following parameters. But this abuse is common in + # popular third-party libraries and we should tolerate this behavior for + # now and be careful not to break support for those without following + # proper deprecation cycle. + fit_params = { + "tuple_of_arrays": (X_valid, y_valid), + "callable_param": _fit_param_callable, + "scalar_param": 42, + } + model.fit(X_train, y_train, **fit_params) + + +# FIXME: Replace this test with a full `check_estimator` once we have API only +# checks. +@pytest.mark.filterwarnings("ignore:The total space of parameters 4 is") +@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) +@pytest.mark.parametrize("Predictor", [MinimalRegressor, MinimalClassifier]) +def test_search_cv_using_minimal_compatible_estimator(SearchCV, Predictor): + # Check that third-party library can run tests without inheriting from + # BaseEstimator. + rng = np.random.RandomState(0) + X, y = rng.randn(25, 2), np.array([0] * 5 + [1] * 20) + + model = Pipeline( + [("transformer", MinimalTransformer()), ("predictor", Predictor())] + ) + + params = { + "transformer__param": [1, 10], + "predictor__parama": [1, 10], + } + search = SearchCV(model, params, error_score="raise") + search.fit(X, y) + + assert search.best_params_.keys() == params.keys() + + y_pred = search.predict(X) + if is_classifier(search): + assert_array_equal(y_pred, 1) + assert search.score(X, y) == pytest.approx(accuracy_score(y, y_pred)) + else: + assert_allclose(y_pred, y.mean()) + assert search.score(X, y) == pytest.approx(r2_score(y, y_pred)) + + +@pytest.mark.parametrize("return_train_score", [True, False]) +def test_search_cv_verbose_3(capsys, return_train_score): + """Check that search cv with verbose>2 shows the score for single + metrics. non-regression test for #19658.""" + X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0) + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [0.1]} + + GridSearchCV( + clf, + grid, + scoring="accuracy", + verbose=3, + cv=3, + return_train_score=return_train_score, + ).fit(X, y) + captured = capsys.readouterr().out + if return_train_score: + match = re.findall(r"score=\(train=[\d\.]+, test=[\d.]+\)", captured) + else: + match = re.findall(r"score=[\d\.]+", captured) + assert len(match) == 3 + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, "param_grid"), + (RandomizedSearchCV, "param_distributions"), + (HalvingGridSearchCV, "param_grid"), + ], +) +def test_search_estimator_param(SearchCV, param_search): + # test that SearchCV object doesn't change the object given in the parameter grid + X, y = make_classification(random_state=42) + + params = {"clf": [LinearSVC(dual="auto")], "clf__C": [0.01]} + orig_C = params["clf"][0].C + + pipe = Pipeline([("trs", MinimalTransformer()), ("clf", None)]) + + param_grid_search = {param_search: params} + gs = SearchCV(pipe, refit=True, cv=2, scoring="accuracy", **param_grid_search).fit( + X, y + ) + + # testing that the original object in params is not changed + assert params["clf"][0].C == orig_C + # testing that the GS is setting the parameter of the step correctly + assert gs.best_estimator_.named_steps["clf"].C == 0.01 + + +# Metadata Routing Tests +# ====================== + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, "param_grid"), + (RandomizedSearchCV, "param_distributions"), + ], +) +def test_multi_metric_search_forwards_metadata(SearchCV, param_search): + """Test that *SearchCV forwards metadata correctly when passed multiple metrics.""" + X, y = make_classification(random_state=42) + n_samples = _num_samples(X) + rng = np.random.RandomState(0) + score_weights = rng.rand(n_samples) + score_metadata = rng.rand(n_samples) + + est = LinearSVC(dual="auto") + param_grid_search = {param_search: {"C": [1]}} + + scorer_registry = _Registry() + scorer = ConsumingScorer(registry=scorer_registry).set_score_request( + sample_weight="score_weights", metadata="score_metadata" + ) + scoring = dict(my_scorer=scorer, accuracy="accuracy") + SearchCV(est, refit="accuracy", cv=2, scoring=scoring, **param_grid_search).fit( + X, y, score_weights=score_weights, score_metadata=score_metadata + ) + assert len(scorer_registry) + for _scorer in scorer_registry: + check_recorded_metadata( + obj=_scorer, + method="score", + split_params=("sample_weight", "metadata"), + sample_weight=score_weights, + metadata=score_metadata, + ) + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, "param_grid"), + (RandomizedSearchCV, "param_distributions"), + (HalvingGridSearchCV, "param_grid"), + ], +) +def test_score_rejects_params_with_no_routing_enabled(SearchCV, param_search): + """*SearchCV should reject **params when metadata routing is not enabled + since this is added only when routing is enabled.""" + X, y = make_classification(random_state=42) + est = LinearSVC(dual="auto") + param_grid_search = {param_search: {"C": [1]}} + + gs = SearchCV(est, cv=2, **param_grid_search).fit(X, y) + + with pytest.raises(ValueError, match="is only supported if"): + gs.score(X, y, metadata=1) + + +# End of Metadata Routing Tests +# ============================= diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py new file mode 100644 index 0000000000000000000000000000000000000000..57bc6b22351b9403c1939da22c31b6da3241886f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py @@ -0,0 +1,2025 @@ +"""Test the split module""" +import re +import warnings +from itertools import combinations, combinations_with_replacement, permutations + +import numpy as np +import pytest +from scipy import stats +from scipy.sparse import issparse +from scipy.special import comb + +from sklearn import config_context +from sklearn.datasets import load_digits, make_classification +from sklearn.dummy import DummyClassifier +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + GroupShuffleSplit, + KFold, + LeaveOneGroupOut, + LeaveOneOut, + LeavePGroupsOut, + LeavePOut, + PredefinedSplit, + RepeatedKFold, + RepeatedStratifiedKFold, + ShuffleSplit, + StratifiedGroupKFold, + StratifiedKFold, + StratifiedShuffleSplit, + TimeSeriesSplit, + check_cv, + cross_val_score, + train_test_split, +) +from sklearn.model_selection._split import ( + _build_repr, + _validate_shuffle_split, + _yields_constant_splits, +) +from sklearn.svm import SVC +from sklearn.tests.metadata_routing_common import assert_request_is_empty +from sklearn.utils._array_api import ( + _convert_to_numpy, + get_namespace, + yield_namespace_device_dtype_combinations, +) +from sklearn.utils._array_api import ( + device as array_api_device, +) +from sklearn.utils._mocking import MockDataFrame +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.estimator_checks import ( + _array_api_for_tests, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.validation import _num_samples + +NO_GROUP_SPLITTERS = [ + KFold(), + StratifiedKFold(), + TimeSeriesSplit(), + LeaveOneOut(), + LeavePOut(p=2), + ShuffleSplit(), + StratifiedShuffleSplit(test_size=0.5), + PredefinedSplit([1, 1, 2, 2]), + RepeatedKFold(), + RepeatedStratifiedKFold(), +] + +GROUP_SPLITTERS = [ + GroupKFold(), + LeavePGroupsOut(n_groups=1), + StratifiedGroupKFold(), + LeaveOneGroupOut(), + GroupShuffleSplit(), +] + +ALL_SPLITTERS = NO_GROUP_SPLITTERS + GROUP_SPLITTERS # type: ignore + +X = np.ones(10) +y = np.arange(10) // 2 +test_groups = ( + np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), + np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), + np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), + np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), + [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], + ["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"], +) +digits = load_digits() + + +@ignore_warnings +def test_cross_validator_with_default_params(): + n_samples = 4 + n_unique_groups = 4 + n_splits = 2 + p = 2 + n_shuffle_splits = 10 # (the default value) + + X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + X_1d = np.array([1, 2, 3, 4]) + y = np.array([1, 1, 2, 2]) + groups = np.array([1, 2, 3, 4]) + loo = LeaveOneOut() + lpo = LeavePOut(p) + kf = KFold(n_splits) + skf = StratifiedKFold(n_splits) + lolo = LeaveOneGroupOut() + lopo = LeavePGroupsOut(p) + ss = ShuffleSplit(random_state=0) + ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2 + sgkf = StratifiedGroupKFold(n_splits) + + loo_repr = "LeaveOneOut()" + lpo_repr = "LeavePOut(p=2)" + kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)" + skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)" + lolo_repr = "LeaveOneGroupOut()" + lopo_repr = "LeavePGroupsOut(n_groups=2)" + ss_repr = ( + "ShuffleSplit(n_splits=10, random_state=0, test_size=None, train_size=None)" + ) + ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))" + sgkf_repr = "StratifiedGroupKFold(n_splits=2, random_state=None, shuffle=False)" + + n_splits_expected = [ + n_samples, + comb(n_samples, p), + n_splits, + n_splits, + n_unique_groups, + comb(n_unique_groups, p), + n_shuffle_splits, + 2, + n_splits, + ] + + for i, (cv, cv_repr) in enumerate( + zip( + [loo, lpo, kf, skf, lolo, lopo, ss, ps, sgkf], + [ + loo_repr, + lpo_repr, + kf_repr, + skf_repr, + lolo_repr, + lopo_repr, + ss_repr, + ps_repr, + sgkf_repr, + ], + ) + ): + # Test if get_n_splits works correctly + assert n_splits_expected[i] == cv.get_n_splits(X, y, groups) + + # Test if the cross-validator works as expected even if + # the data is 1d + np.testing.assert_equal( + list(cv.split(X, y, groups)), list(cv.split(X_1d, y, groups)) + ) + # Test that train, test indices returned are integers + for train, test in cv.split(X, y, groups): + assert np.asarray(train).dtype.kind == "i" + assert np.asarray(test).dtype.kind == "i" + + # Test if the repr works without any errors + assert cv_repr == repr(cv) + + # ValueError for get_n_splits methods + msg = "The 'X' parameter should not be None." + with pytest.raises(ValueError, match=msg): + loo.get_n_splits(None, y, groups) + with pytest.raises(ValueError, match=msg): + lpo.get_n_splits(None, y, groups) + + +def test_2d_y(): + # smoke test for 2d y and multi-label + n_samples = 30 + rng = np.random.RandomState(1) + X = rng.randint(0, 3, size=(n_samples, 2)) + y = rng.randint(0, 3, size=(n_samples,)) + y_2d = y.reshape(-1, 1) + y_multilabel = rng.randint(0, 2, size=(n_samples, 3)) + groups = rng.randint(0, 3, size=(n_samples,)) + splitters = [ + LeaveOneOut(), + LeavePOut(p=2), + KFold(), + StratifiedKFold(), + RepeatedKFold(), + RepeatedStratifiedKFold(), + StratifiedGroupKFold(), + ShuffleSplit(), + StratifiedShuffleSplit(test_size=0.5), + GroupShuffleSplit(), + LeaveOneGroupOut(), + LeavePGroupsOut(n_groups=2), + GroupKFold(n_splits=3), + TimeSeriesSplit(), + PredefinedSplit(test_fold=groups), + ] + for splitter in splitters: + list(splitter.split(X, y, groups)) + list(splitter.split(X, y_2d, groups)) + try: + list(splitter.split(X, y_multilabel, groups)) + except ValueError as e: + allowed_target_types = ("binary", "multiclass") + msg = "Supported target types are: {}. Got 'multilabel".format( + allowed_target_types + ) + assert msg in str(e) + + +def check_valid_split(train, test, n_samples=None): + # Use python sets to get more informative assertion failure messages + train, test = set(train), set(test) + + # Train and test split should not overlap + assert train.intersection(test) == set() + + if n_samples is not None: + # Check that the union of train an test split cover all the indices + assert train.union(test) == set(range(n_samples)) + + +def check_cv_coverage(cv, X, y, groups, expected_n_splits): + n_samples = _num_samples(X) + # Check that a all the samples appear at least once in a test fold + assert cv.get_n_splits(X, y, groups) == expected_n_splits + + collected_test_samples = set() + iterations = 0 + for train, test in cv.split(X, y, groups): + check_valid_split(train, test, n_samples=n_samples) + iterations += 1 + collected_test_samples.update(test) + + # Check that the accumulated test samples cover the whole dataset + assert iterations == expected_n_splits + if n_samples is not None: + assert collected_test_samples == set(range(n_samples)) + + +def test_kfold_valueerrors(): + X1 = np.array([[1, 2], [3, 4], [5, 6]]) + X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]) + # Check that errors are raised if there is not enough samples + (ValueError, next, KFold(4).split(X1)) + + # Check that a warning is raised if the least populated class has too few + # members. + y = np.array([3, 3, -1, -1, 3]) + + skf_3 = StratifiedKFold(3) + with pytest.warns(Warning, match="The least populated class"): + next(skf_3.split(X2, y)) + + sgkf_3 = StratifiedGroupKFold(3) + naive_groups = np.arange(len(y)) + with pytest.warns(Warning, match="The least populated class"): + next(sgkf_3.split(X2, y, naive_groups)) + + # Check that despite the warning the folds are still computed even + # though all the classes are not necessarily represented at on each + # side of the split at each split + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + check_cv_coverage(sgkf_3, X2, y, groups=naive_groups, expected_n_splits=3) + + # Check that errors are raised if all n_groups for individual + # classes are less than n_splits. + y = np.array([3, 3, -1, -1, 2]) + + with pytest.raises(ValueError): + next(skf_3.split(X2, y)) + with pytest.raises(ValueError): + next(sgkf_3.split(X2, y)) + + # Error when number of folds is <= 1 + with pytest.raises(ValueError): + KFold(0) + with pytest.raises(ValueError): + KFold(1) + error_string = "k-fold cross-validation requires at least one train/test split" + with pytest.raises(ValueError, match=error_string): + StratifiedKFold(0) + with pytest.raises(ValueError, match=error_string): + StratifiedKFold(1) + with pytest.raises(ValueError, match=error_string): + StratifiedGroupKFold(0) + with pytest.raises(ValueError, match=error_string): + StratifiedGroupKFold(1) + + # When n_splits is not integer: + with pytest.raises(ValueError): + KFold(1.5) + with pytest.raises(ValueError): + KFold(2.0) + with pytest.raises(ValueError): + StratifiedKFold(1.5) + with pytest.raises(ValueError): + StratifiedKFold(2.0) + with pytest.raises(ValueError): + StratifiedGroupKFold(1.5) + with pytest.raises(ValueError): + StratifiedGroupKFold(2.0) + + # When shuffle is not a bool: + with pytest.raises(TypeError): + KFold(n_splits=4, shuffle=None) + + +def test_kfold_indices(): + # Check all indices are returned in the test folds + X1 = np.ones(18) + kf = KFold(3) + check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3) + + # Check all indices are returned in the test folds even when equal-sized + # folds are not possible + X2 = np.ones(17) + kf = KFold(3) + check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3) + + # Check if get_n_splits returns the number of folds + assert 5 == KFold(5).get_n_splits(X2) + + +def test_kfold_no_shuffle(): + # Manually check that KFold preserves the data ordering on toy datasets + X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + + splits = KFold(2).split(X2[:-1]) + train, test = next(splits) + assert_array_equal(test, [0, 1]) + assert_array_equal(train, [2, 3]) + + train, test = next(splits) + assert_array_equal(test, [2, 3]) + assert_array_equal(train, [0, 1]) + + splits = KFold(2).split(X2) + train, test = next(splits) + assert_array_equal(test, [0, 1, 2]) + assert_array_equal(train, [3, 4]) + + train, test = next(splits) + assert_array_equal(test, [3, 4]) + assert_array_equal(train, [0, 1, 2]) + + +def test_stratified_kfold_no_shuffle(): + # Manually check that StratifiedKFold preserves the data ordering as much + # as possible on toy datasets in order to avoid hiding sample dependencies + # when possible + X, y = np.ones(4), [1, 1, 0, 0] + splits = StratifiedKFold(2).split(X, y) + train, test = next(splits) + assert_array_equal(test, [0, 2]) + assert_array_equal(train, [1, 3]) + + train, test = next(splits) + assert_array_equal(test, [1, 3]) + assert_array_equal(train, [0, 2]) + + X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0] + splits = StratifiedKFold(2).split(X, y) + train, test = next(splits) + assert_array_equal(test, [0, 1, 3, 4]) + assert_array_equal(train, [2, 5, 6]) + + train, test = next(splits) + assert_array_equal(test, [2, 5, 6]) + assert_array_equal(train, [0, 1, 3, 4]) + + # Check if get_n_splits returns the number of folds + assert 5 == StratifiedKFold(5).get_n_splits(X, y) + + # Make sure string labels are also supported + X = np.ones(7) + y1 = ["1", "1", "1", "0", "0", "0", "0"] + y2 = [1, 1, 1, 0, 0, 0, 0] + np.testing.assert_equal( + list(StratifiedKFold(2).split(X, y1)), list(StratifiedKFold(2).split(X, y2)) + ) + + # Check equivalence to KFold + y = [0, 1, 0, 1, 0, 1, 0, 1] + X = np.ones_like(y) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y)), list(KFold(3).split(X, y)) + ) + + +@pytest.mark.parametrize("shuffle", [False, True]) +@pytest.mark.parametrize("k", [4, 5, 6, 7, 8, 9, 10]) +@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold]) +def test_stratified_kfold_ratios(k, shuffle, kfold): + # Check that stratified kfold preserves class ratios in individual splits + # Repeat with shuffling turned off and on + n_samples = 1000 + X = np.ones(n_samples) + y = np.array( + [4] * int(0.10 * n_samples) + + [0] * int(0.89 * n_samples) + + [1] * int(0.01 * n_samples) + ) + # ensure perfect stratification with StratifiedGroupKFold + groups = np.arange(len(y)) + distr = np.bincount(y) / len(y) + + test_sizes = [] + random_state = None if not shuffle else 0 + skf = kfold(k, random_state=random_state, shuffle=shuffle) + for train, test in skf.split(X, y, groups=groups): + assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02) + assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02) + test_sizes.append(len(test)) + assert np.ptp(test_sizes) <= 1 + + +@pytest.mark.parametrize("shuffle", [False, True]) +@pytest.mark.parametrize("k", [4, 6, 7]) +@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold]) +def test_stratified_kfold_label_invariance(k, shuffle, kfold): + # Check that stratified kfold gives the same indices regardless of labels + n_samples = 100 + y = np.array( + [2] * int(0.10 * n_samples) + + [0] * int(0.89 * n_samples) + + [1] * int(0.01 * n_samples) + ) + X = np.ones(len(y)) + # ensure perfect stratification with StratifiedGroupKFold + groups = np.arange(len(y)) + + def get_splits(y): + random_state = None if not shuffle else 0 + return [ + (list(train), list(test)) + for train, test in kfold( + k, random_state=random_state, shuffle=shuffle + ).split(X, y, groups=groups) + ] + + splits_base = get_splits(y) + for perm in permutations([0, 1, 2]): + y_perm = np.take(perm, y) + splits_perm = get_splits(y_perm) + assert splits_perm == splits_base + + +def test_kfold_balance(): + # Check that KFold returns folds with balanced sizes + for i in range(11, 17): + kf = KFold(5).split(X=np.ones(i)) + sizes = [len(test) for _, test in kf] + + assert (np.max(sizes) - np.min(sizes)) <= 1 + assert np.sum(sizes) == i + + +@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold]) +def test_stratifiedkfold_balance(kfold): + # Check that KFold returns folds with balanced sizes (only when + # stratification is possible) + # Repeat with shuffling turned off and on + X = np.ones(17) + y = [0] * 3 + [1] * 14 + # ensure perfect stratification with StratifiedGroupKFold + groups = np.arange(len(y)) + + for shuffle in (True, False): + cv = kfold(3, shuffle=shuffle) + for i in range(11, 17): + skf = cv.split(X[:i], y[:i], groups[:i]) + sizes = [len(test) for _, test in skf] + + assert (np.max(sizes) - np.min(sizes)) <= 1 + assert np.sum(sizes) == i + + +def test_shuffle_kfold(): + # Check the indices are shuffled properly + kf = KFold(3) + kf2 = KFold(3, shuffle=True, random_state=0) + kf3 = KFold(3, shuffle=True, random_state=1) + + X = np.ones(300) + + all_folds = np.zeros(300) + for (tr1, te1), (tr2, te2), (tr3, te3) in zip( + kf.split(X), kf2.split(X), kf3.split(X) + ): + for tr_a, tr_b in combinations((tr1, tr2, tr3), 2): + # Assert that there is no complete overlap + assert len(np.intersect1d(tr_a, tr_b)) != len(tr1) + + # Set all test indices in successive iterations of kf2 to 1 + all_folds[te2] = 1 + + # Check that all indices are returned in the different test folds + assert sum(all_folds) == 300 + + +@pytest.mark.parametrize("kfold", [KFold, StratifiedKFold, StratifiedGroupKFold]) +def test_shuffle_kfold_stratifiedkfold_reproducibility(kfold): + X = np.ones(15) # Divisible by 3 + y = [0] * 7 + [1] * 8 + groups_1 = np.arange(len(y)) + X2 = np.ones(16) # Not divisible by 3 + y2 = [0] * 8 + [1] * 8 + groups_2 = np.arange(len(y2)) + + # Check that when the shuffle is True, multiple split calls produce the + # same split when random_state is int + kf = kfold(3, shuffle=True, random_state=0) + + np.testing.assert_equal( + list(kf.split(X, y, groups_1)), list(kf.split(X, y, groups_1)) + ) + + # Check that when the shuffle is True, multiple split calls often + # (not always) produce different splits when random_state is + # RandomState instance or None + kf = kfold(3, shuffle=True, random_state=np.random.RandomState(0)) + for data in zip((X, X2), (y, y2), (groups_1, groups_2)): + # Test if the two splits are different cv + for (_, test_a), (_, test_b) in zip(kf.split(*data), kf.split(*data)): + # cv.split(...) returns an array of tuples, each tuple + # consisting of an array with train indices and test indices + # Ensure that the splits for data are not same + # when random state is not set + with pytest.raises(AssertionError): + np.testing.assert_array_equal(test_a, test_b) + + +def test_shuffle_stratifiedkfold(): + # Check that shuffling is happening when requested, and for proper + # sample coverage + X_40 = np.ones(40) + y = [0] * 20 + [1] * 20 + kf0 = StratifiedKFold(5, shuffle=True, random_state=0) + kf1 = StratifiedKFold(5, shuffle=True, random_state=1) + for (_, test0), (_, test1) in zip(kf0.split(X_40, y), kf1.split(X_40, y)): + assert set(test0) != set(test1) + check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5) + + # Ensure that we shuffle each class's samples with different + # random_state in StratifiedKFold + # See https://github.com/scikit-learn/scikit-learn/pull/13124 + X = np.arange(10) + y = [0] * 5 + [1] * 5 + kf1 = StratifiedKFold(5, shuffle=True, random_state=0) + kf2 = StratifiedKFold(5, shuffle=True, random_state=1) + test_set1 = sorted([tuple(s[1]) for s in kf1.split(X, y)]) + test_set2 = sorted([tuple(s[1]) for s in kf2.split(X, y)]) + assert test_set1 != test_set2 + + +def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 + # The digits samples are dependent: they are apparently grouped by authors + # although we don't have any information on the groups segment locations + # for this data. We can highlight this fact by computing k-fold cross- + # validation with and without shuffling: we observe that the shuffling case + # wrongly makes the IID assumption and is therefore too optimistic: it + # estimates a much higher accuracy (around 0.93) than that the non + # shuffling variant (around 0.81). + + X, y = digits.data[:600], digits.target[:600] + model = SVC(C=10, gamma=0.005) + + n_splits = 3 + + cv = KFold(n_splits=n_splits, shuffle=False) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert 0.92 > mean_score + assert mean_score > 0.80 + + # Shuffling the data artificially breaks the dependency and hides the + # overfitting of the model with regards to the writing style of the authors + # by yielding a seriously overestimated score: + + cv = KFold(n_splits, shuffle=True, random_state=0) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert mean_score > 0.92 + + cv = KFold(n_splits, shuffle=True, random_state=1) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert mean_score > 0.92 + + # Similarly, StratifiedKFold should try to shuffle the data as little + # as possible (while respecting the balanced class constraints) + # and thus be able to detect the dependency by not overestimating + # the CV score either. As the digits dataset is approximately balanced + # the estimated mean score is close to the score measured with + # non-shuffled KFold + + cv = StratifiedKFold(n_splits) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert 0.94 > mean_score + assert mean_score > 0.80 + + +def test_stratified_group_kfold_trivial(): + sgkf = StratifiedGroupKFold(n_splits=3) + # Trivial example - groups with the same distribution + y = np.array([1] * 6 + [0] * 12) + X = np.ones_like(y).reshape(-1, 1) + groups = np.asarray((1, 2, 3, 4, 5, 6, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6)) + distr = np.bincount(y) / len(y) + test_sizes = [] + for train, test in sgkf.split(X, y, groups): + # check group constraint + assert np.intersect1d(groups[train], groups[test]).size == 0 + # check y distribution + assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02) + assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02) + test_sizes.append(len(test)) + assert np.ptp(test_sizes) <= 1 + + +def test_stratified_group_kfold_approximate(): + # Not perfect stratification (even though it is possible) because of + # iteration over groups + sgkf = StratifiedGroupKFold(n_splits=3) + y = np.array([1] * 6 + [0] * 12) + X = np.ones_like(y).reshape(-1, 1) + groups = np.array([1, 2, 3, 3, 4, 4, 1, 1, 2, 2, 3, 4, 5, 5, 5, 6, 6, 6]) + expected = np.asarray([[0.833, 0.166], [0.666, 0.333], [0.5, 0.5]]) + test_sizes = [] + for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected): + # check group constraint + assert np.intersect1d(groups[train], groups[test]).size == 0 + split_dist = np.bincount(y[test]) / len(test) + assert_allclose(split_dist, expect_dist, atol=0.001) + test_sizes.append(len(test)) + assert np.ptp(test_sizes) <= 1 + + +@pytest.mark.parametrize( + "y, groups, expected", + [ + ( + np.array([0] * 6 + [1] * 6), + np.array([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]), + np.asarray([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]), + ), + ( + np.array([0] * 9 + [1] * 3), + np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6]), + np.asarray([[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]), + ), + ], +) +def test_stratified_group_kfold_homogeneous_groups(y, groups, expected): + sgkf = StratifiedGroupKFold(n_splits=3) + X = np.ones_like(y).reshape(-1, 1) + for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected): + # check group constraint + assert np.intersect1d(groups[train], groups[test]).size == 0 + split_dist = np.bincount(y[test]) / len(test) + assert_allclose(split_dist, expect_dist, atol=0.001) + + +@pytest.mark.parametrize("cls_distr", [(0.4, 0.6), (0.3, 0.7), (0.2, 0.8), (0.8, 0.2)]) +@pytest.mark.parametrize("n_groups", [5, 30, 70]) +def test_stratified_group_kfold_against_group_kfold(cls_distr, n_groups): + # Check that given sufficient amount of samples StratifiedGroupKFold + # produces better stratified folds than regular GroupKFold + n_splits = 5 + sgkf = StratifiedGroupKFold(n_splits=n_splits) + gkf = GroupKFold(n_splits=n_splits) + rng = np.random.RandomState(0) + n_points = 1000 + y = rng.choice(2, size=n_points, p=cls_distr) + X = np.ones_like(y).reshape(-1, 1) + g = rng.choice(n_groups, n_points) + sgkf_folds = sgkf.split(X, y, groups=g) + gkf_folds = gkf.split(X, y, groups=g) + sgkf_entr = 0 + gkf_entr = 0 + for (sgkf_train, sgkf_test), (_, gkf_test) in zip(sgkf_folds, gkf_folds): + # check group constraint + assert np.intersect1d(g[sgkf_train], g[sgkf_test]).size == 0 + sgkf_distr = np.bincount(y[sgkf_test]) / len(sgkf_test) + gkf_distr = np.bincount(y[gkf_test]) / len(gkf_test) + sgkf_entr += stats.entropy(sgkf_distr, qk=cls_distr) + gkf_entr += stats.entropy(gkf_distr, qk=cls_distr) + sgkf_entr /= n_splits + gkf_entr /= n_splits + assert sgkf_entr <= gkf_entr + + +def test_shuffle_split(): + ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X) + ss2 = ShuffleSplit(test_size=2, random_state=0).split(X) + ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X) + ss4 = ShuffleSplit(test_size=int(2), random_state=0).split(X) + for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): + assert_array_equal(t1[0], t2[0]) + assert_array_equal(t2[0], t3[0]) + assert_array_equal(t3[0], t4[0]) + assert_array_equal(t1[1], t2[1]) + assert_array_equal(t2[1], t3[1]) + assert_array_equal(t3[1], t4[1]) + + +@pytest.mark.parametrize("split_class", [ShuffleSplit, StratifiedShuffleSplit]) +@pytest.mark.parametrize( + "train_size, exp_train, exp_test", [(None, 9, 1), (8, 8, 2), (0.8, 8, 2)] +) +def test_shuffle_split_default_test_size(split_class, train_size, exp_train, exp_test): + # Check that the default value has the expected behavior, i.e. 0.1 if both + # unspecified or complement train_size unless both are specified. + X = np.ones(10) + y = np.ones(10) + + X_train, X_test = next(split_class(train_size=train_size).split(X, y)) + + assert len(X_train) == exp_train + assert len(X_test) == exp_test + + +@pytest.mark.parametrize( + "train_size, exp_train, exp_test", [(None, 8, 2), (7, 7, 3), (0.7, 7, 3)] +) +def test_group_shuffle_split_default_test_size(train_size, exp_train, exp_test): + # Check that the default value has the expected behavior, i.e. 0.2 if both + # unspecified or complement train_size unless both are specified. + X = np.ones(10) + y = np.ones(10) + groups = range(10) + + X_train, X_test = next(GroupShuffleSplit(train_size=train_size).split(X, y, groups)) + + assert len(X_train) == exp_train + assert len(X_test) == exp_test + + +@ignore_warnings +def test_stratified_shuffle_split_init(): + X = np.arange(7) + y = np.asarray([0, 1, 1, 1, 2, 2, 2]) + # Check that error is raised if there is a class with only one sample + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(3, test_size=0.2).split(X, y)) + + # Check that error is raised if the test set size is smaller than n_classes + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(3, test_size=2).split(X, y)) + # Check that error is raised if the train set size is smaller than + # n_classes + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(3, test_size=3, train_size=2).split(X, y)) + + X = np.arange(9) + y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) + + # Train size or test size too small + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(train_size=2).split(X, y)) + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(test_size=2).split(X, y)) + + +def test_stratified_shuffle_split_respects_test_size(): + y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]) + test_size = 5 + train_size = 10 + sss = StratifiedShuffleSplit( + 6, test_size=test_size, train_size=train_size, random_state=0 + ).split(np.ones(len(y)), y) + for train, test in sss: + assert len(train) == train_size + assert len(test) == test_size + + +def test_stratified_shuffle_split_iter(): + ys = [ + np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), + np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), + np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), + np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), + np.array([-1] * 800 + [1] * 50), + np.concatenate([[i] * (100 + i) for i in range(11)]), + [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], + ["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"], + ] + + for y in ys: + sss = StratifiedShuffleSplit(6, test_size=0.33, random_state=0).split( + np.ones(len(y)), y + ) + y = np.asanyarray(y) # To make it indexable for y[train] + # this is how test-size is computed internally + # in _validate_shuffle_split + test_size = np.ceil(0.33 * len(y)) + train_size = len(y) - test_size + for train, test in sss: + assert_array_equal(np.unique(y[train]), np.unique(y[test])) + # Checks if folds keep classes proportions + p_train = np.bincount(np.unique(y[train], return_inverse=True)[1]) / float( + len(y[train]) + ) + p_test = np.bincount(np.unique(y[test], return_inverse=True)[1]) / float( + len(y[test]) + ) + assert_array_almost_equal(p_train, p_test, 1) + assert len(train) + len(test) == y.size + assert len(train) == train_size + assert len(test) == test_size + assert_array_equal(np.intersect1d(train, test), []) + + +def test_stratified_shuffle_split_even(): + # Test the StratifiedShuffleSplit, indices are drawn with a + # equal chance + n_folds = 5 + n_splits = 1000 + + def assert_counts_are_ok(idx_counts, p): + # Here we test that the distribution of the counts + # per index is close enough to a binomial + threshold = 0.05 / n_splits + bf = stats.binom(n_splits, p) + for count in idx_counts: + prob = bf.pmf(count) + assert ( + prob > threshold + ), "An index is not drawn with chance corresponding to even draws" + + for n_samples in (6, 22): + groups = np.array((n_samples // 2) * [0, 1]) + splits = StratifiedShuffleSplit( + n_splits=n_splits, test_size=1.0 / n_folds, random_state=0 + ) + + train_counts = [0] * n_samples + test_counts = [0] * n_samples + n_splits_actual = 0 + for train, test in splits.split(X=np.ones(n_samples), y=groups): + n_splits_actual += 1 + for counter, ids in [(train_counts, train), (test_counts, test)]: + for id in ids: + counter[id] += 1 + assert n_splits_actual == n_splits + + n_train, n_test = _validate_shuffle_split( + n_samples, test_size=1.0 / n_folds, train_size=1.0 - (1.0 / n_folds) + ) + + assert len(train) == n_train + assert len(test) == n_test + assert len(set(train).intersection(test)) == 0 + + group_counts = np.unique(groups) + assert splits.test_size == 1.0 / n_folds + assert n_train + n_test == len(groups) + assert len(group_counts) == 2 + ex_test_p = float(n_test) / n_samples + ex_train_p = float(n_train) / n_samples + + assert_counts_are_ok(train_counts, ex_train_p) + assert_counts_are_ok(test_counts, ex_test_p) + + +def test_stratified_shuffle_split_overlap_train_test_bug(): + # See https://github.com/scikit-learn/scikit-learn/issues/6121 for + # the original bug report + y = [0, 1, 2, 3] * 3 + [4, 5] * 5 + X = np.ones_like(y) + + sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) + + train, test = next(sss.split(X=X, y=y)) + + # no overlap + assert_array_equal(np.intersect1d(train, test), []) + + # complete partition + assert_array_equal(np.union1d(train, test), np.arange(len(y))) + + +def test_stratified_shuffle_split_multilabel(): + # fix for issue 9037 + for y in [ + np.array([[0, 1], [1, 0], [1, 0], [0, 1]]), + np.array([[0, 1], [1, 1], [1, 1], [0, 1]]), + ]: + X = np.ones_like(y) + sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) + train, test = next(sss.split(X=X, y=y)) + y_train = y[train] + y_test = y[test] + + # no overlap + assert_array_equal(np.intersect1d(train, test), []) + + # complete partition + assert_array_equal(np.union1d(train, test), np.arange(len(y))) + + # correct stratification of entire rows + # (by design, here y[:, 0] uniquely determines the entire row of y) + expected_ratio = np.mean(y[:, 0]) + assert expected_ratio == np.mean(y_train[:, 0]) + assert expected_ratio == np.mean(y_test[:, 0]) + + +def test_stratified_shuffle_split_multilabel_many_labels(): + # fix in PR #9922: for multilabel data with > 1000 labels, str(row) + # truncates with an ellipsis for elements in positions 4 through + # len(row) - 4, so labels were not being correctly split using the powerset + # method for transforming a multilabel problem to a multiclass one; this + # test checks that this problem is fixed. + row_with_many_zeros = [1, 0, 1] + [0] * 1000 + [1, 0, 1] + row_with_many_ones = [1, 0, 1] + [1] * 1000 + [1, 0, 1] + y = np.array([row_with_many_zeros] * 10 + [row_with_many_ones] * 100) + X = np.ones_like(y) + + sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) + train, test = next(sss.split(X=X, y=y)) + y_train = y[train] + y_test = y[test] + + # correct stratification of entire rows + # (by design, here y[:, 4] uniquely determines the entire row of y) + expected_ratio = np.mean(y[:, 4]) + assert expected_ratio == np.mean(y_train[:, 4]) + assert expected_ratio == np.mean(y_test[:, 4]) + + +def test_predefinedsplit_with_kfold_split(): + # Check that PredefinedSplit can reproduce a split generated by Kfold. + folds = np.full(10, -1.0) + kf_train = [] + kf_test = [] + for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)): + kf_train.append(train_ind) + kf_test.append(test_ind) + folds[test_ind] = i + ps = PredefinedSplit(folds) + # n_splits is simply the no of unique folds + assert len(np.unique(folds)) == ps.get_n_splits() + ps_train, ps_test = zip(*ps.split()) + assert_array_equal(ps_train, kf_train) + assert_array_equal(ps_test, kf_test) + + +def test_group_shuffle_split(): + for groups_i in test_groups: + X = y = np.ones(len(groups_i)) + n_splits = 6 + test_size = 1.0 / 3 + slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0) + + # Make sure the repr works + repr(slo) + + # Test that the length is correct + assert slo.get_n_splits(X, y, groups=groups_i) == n_splits + + l_unique = np.unique(groups_i) + l = np.asarray(groups_i) + + for train, test in slo.split(X, y, groups=groups_i): + # First test: no train group is in the test set and vice versa + l_train_unique = np.unique(l[train]) + l_test_unique = np.unique(l[test]) + assert not np.any(np.isin(l[train], l_test_unique)) + assert not np.any(np.isin(l[test], l_train_unique)) + + # Second test: train and test add up to all the data + assert l[train].size + l[test].size == l.size + + # Third test: train and test are disjoint + assert_array_equal(np.intersect1d(train, test), []) + + # Fourth test: + # unique train and test groups are correct, +- 1 for rounding error + assert abs(len(l_test_unique) - round(test_size * len(l_unique))) <= 1 + assert ( + abs(len(l_train_unique) - round((1.0 - test_size) * len(l_unique))) <= 1 + ) + + +def test_leave_one_p_group_out(): + logo = LeaveOneGroupOut() + lpgo_1 = LeavePGroupsOut(n_groups=1) + lpgo_2 = LeavePGroupsOut(n_groups=2) + + # Make sure the repr works + assert repr(logo) == "LeaveOneGroupOut()" + assert repr(lpgo_1) == "LeavePGroupsOut(n_groups=1)" + assert repr(lpgo_2) == "LeavePGroupsOut(n_groups=2)" + assert repr(LeavePGroupsOut(n_groups=3)) == "LeavePGroupsOut(n_groups=3)" + + for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1), (lpgo_2, 2))): + for i, groups_i in enumerate(test_groups): + n_groups = len(np.unique(groups_i)) + n_splits = n_groups if p_groups_out == 1 else n_groups * (n_groups - 1) / 2 + X = y = np.ones(len(groups_i)) + + # Test that the length is correct + assert cv.get_n_splits(X, y, groups=groups_i) == n_splits + + groups_arr = np.asarray(groups_i) + + # Split using the original list / array / list of string groups_i + for train, test in cv.split(X, y, groups=groups_i): + # First test: no train group is in the test set and vice versa + assert_array_equal( + np.intersect1d(groups_arr[train], groups_arr[test]).tolist(), [] + ) + + # Second test: train and test add up to all the data + assert len(train) + len(test) == len(groups_i) + + # Third test: + # The number of groups in test must be equal to p_groups_out + assert np.unique(groups_arr[test]).shape[0], p_groups_out + + # check get_n_splits() with dummy parameters + assert logo.get_n_splits(None, None, ["a", "b", "c", "b", "c"]) == 3 + assert logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]) == 3 + assert lpgo_2.get_n_splits(None, None, np.arange(4)) == 6 + assert lpgo_1.get_n_splits(groups=np.arange(4)) == 4 + + # raise ValueError if a `groups` parameter is illegal + with pytest.raises(ValueError): + logo.get_n_splits(None, None, [0.0, np.nan, 0.0]) + with pytest.raises(ValueError): + lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0]) + + msg = "The 'groups' parameter should not be None." + with pytest.raises(ValueError, match=msg): + logo.get_n_splits(None, None, None) + with pytest.raises(ValueError, match=msg): + lpgo_1.get_n_splits(None, None, None) + + +def test_leave_group_out_changing_groups(): + # Check that LeaveOneGroupOut and LeavePGroupsOut work normally if + # the groups variable is changed before calling split + groups = np.array([0, 1, 2, 1, 1, 2, 0, 0]) + X = np.ones(len(groups)) + groups_changing = np.array(groups, copy=True) + lolo = LeaveOneGroupOut().split(X, groups=groups) + lolo_changing = LeaveOneGroupOut().split(X, groups=groups) + lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups) + lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups) + groups_changing[:] = 0 + for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: + for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): + assert_array_equal(train, train_chan) + assert_array_equal(test, test_chan) + + # n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3 + assert 3 == LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X, groups=groups) + # n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups) + assert 3 == LeaveOneGroupOut().get_n_splits(X, y=X, groups=groups) + + +def test_leave_group_out_order_dependence(): + # Check that LeaveOneGroupOut orders the splits according to the index + # of the group left out. + groups = np.array([2, 2, 0, 0, 1, 1]) + X = np.ones(len(groups)) + + splits = iter(LeaveOneGroupOut().split(X, groups=groups)) + + expected_indices = [ + ([0, 1, 4, 5], [2, 3]), + ([0, 1, 2, 3], [4, 5]), + ([2, 3, 4, 5], [0, 1]), + ] + + for expected_train, expected_test in expected_indices: + train, test = next(splits) + assert_array_equal(train, expected_train) + assert_array_equal(test, expected_test) + + +def test_leave_one_p_group_out_error_on_fewer_number_of_groups(): + X = y = groups = np.ones(0) + msg = re.escape("Found array with 0 sample(s)") + with pytest.raises(ValueError, match=msg): + next(LeaveOneGroupOut().split(X, y, groups)) + + X = y = groups = np.ones(1) + msg = re.escape( + f"The groups parameter contains fewer than 2 unique groups ({groups})." + " LeaveOneGroupOut expects at least 2." + ) + with pytest.raises(ValueError, match=msg): + next(LeaveOneGroupOut().split(X, y, groups)) + + X = y = groups = np.ones(1) + msg = re.escape( + "The groups parameter contains fewer than (or equal to) n_groups " + f"(3) numbers of unique groups ({groups}). LeavePGroupsOut expects " + "that at least n_groups + 1 (4) unique groups " + "be present" + ) + with pytest.raises(ValueError, match=msg): + next(LeavePGroupsOut(n_groups=3).split(X, y, groups)) + + X = y = groups = np.arange(3) + msg = re.escape( + "The groups parameter contains fewer than (or equal to) n_groups " + f"(3) numbers of unique groups ({groups}). LeavePGroupsOut expects " + "that at least n_groups + 1 (4) unique groups " + "be present" + ) + with pytest.raises(ValueError, match=msg): + next(LeavePGroupsOut(n_groups=3).split(X, y, groups)) + + +@ignore_warnings +def test_repeated_cv_value_errors(): + # n_repeats is not integer or <= 0 + for cv in (RepeatedKFold, RepeatedStratifiedKFold): + with pytest.raises(ValueError): + cv(n_repeats=0) + with pytest.raises(ValueError): + cv(n_repeats=1.5) + + +@pytest.mark.parametrize("RepeatedCV", [RepeatedKFold, RepeatedStratifiedKFold]) +def test_repeated_cv_repr(RepeatedCV): + n_splits, n_repeats = 2, 6 + repeated_cv = RepeatedCV(n_splits=n_splits, n_repeats=n_repeats) + repeated_cv_repr = "{}(n_repeats=6, n_splits=2, random_state=None)".format( + repeated_cv.__class__.__name__ + ) + assert repeated_cv_repr == repr(repeated_cv) + + +def test_repeated_kfold_determinstic_split(): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + random_state = 258173307 + rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=random_state) + + # split should produce same and deterministic splits on + # each call + for _ in range(3): + splits = rkf.split(X) + train, test = next(splits) + assert_array_equal(train, [2, 4]) + assert_array_equal(test, [0, 1, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 3]) + assert_array_equal(test, [2, 4]) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [2, 3, 4]) + + train, test = next(splits) + assert_array_equal(train, [2, 3, 4]) + assert_array_equal(test, [0, 1]) + + with pytest.raises(StopIteration): + next(splits) + + +def test_get_n_splits_for_repeated_kfold(): + n_splits = 3 + n_repeats = 4 + rkf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats) + expected_n_splits = n_splits * n_repeats + assert expected_n_splits == rkf.get_n_splits() + + +def test_get_n_splits_for_repeated_stratified_kfold(): + n_splits = 3 + n_repeats = 4 + rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats) + expected_n_splits = n_splits * n_repeats + assert expected_n_splits == rskf.get_n_splits() + + +def test_repeated_stratified_kfold_determinstic_split(): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + y = [1, 1, 1, 0, 0] + random_state = 1944695409 + rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2, random_state=random_state) + + # split should produce same and deterministic splits on + # each call + for _ in range(3): + splits = rskf.split(X, y) + train, test = next(splits) + assert_array_equal(train, [1, 4]) + assert_array_equal(test, [0, 2, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 2, 3]) + assert_array_equal(test, [1, 4]) + + train, test = next(splits) + assert_array_equal(train, [2, 3]) + assert_array_equal(test, [0, 1, 4]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 4]) + assert_array_equal(test, [2, 3]) + + with pytest.raises(StopIteration): + next(splits) + + +def test_train_test_split_errors(): + pytest.raises(ValueError, train_test_split) + + pytest.raises(ValueError, train_test_split, range(3), train_size=1.1) + + pytest.raises(ValueError, train_test_split, range(3), test_size=0.6, train_size=0.6) + pytest.raises( + ValueError, + train_test_split, + range(3), + test_size=np.float32(0.6), + train_size=np.float32(0.6), + ) + pytest.raises(ValueError, train_test_split, range(3), test_size="wrong_type") + pytest.raises(ValueError, train_test_split, range(3), test_size=2, train_size=4) + pytest.raises(TypeError, train_test_split, range(3), some_argument=1.1) + pytest.raises(ValueError, train_test_split, range(3), range(42)) + pytest.raises(ValueError, train_test_split, range(10), shuffle=False, stratify=True) + + with pytest.raises( + ValueError, + match=r"train_size=11 should be either positive and " + r"smaller than the number of samples 10 or a " + r"float in the \(0, 1\) range", + ): + train_test_split(range(10), train_size=11, test_size=1) + + +@pytest.mark.parametrize( + "train_size, exp_train, exp_test", [(None, 7, 3), (8, 8, 2), (0.8, 8, 2)] +) +def test_train_test_split_default_test_size(train_size, exp_train, exp_test): + # Check that the default value has the expected behavior, i.e. complement + # train_size unless both are specified. + X_train, X_test = train_test_split(X, train_size=train_size) + + assert len(X_train) == exp_train + assert len(X_test) == exp_test + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "shuffle,stratify", + ( + (True, None), + (True, np.hstack((np.ones(6), np.zeros(4)))), + # stratification only works with shuffling + (False, None), + ), +) +def test_array_api_train_test_split( + shuffle, stratify, array_namespace, device, dtype_name +): + xp = _array_api_for_tests(array_namespace, device) + + X = np.arange(100).reshape((10, 10)) + y = np.arange(10) + + X_np = X.astype(dtype_name) + X_xp = xp.asarray(X_np, device=device) + + y_np = y.astype(dtype_name) + y_xp = xp.asarray(y_np, device=device) + + X_train_np, X_test_np, y_train_np, y_test_np = train_test_split( + X_np, y, random_state=0, shuffle=shuffle, stratify=stratify + ) + with config_context(array_api_dispatch=True): + if stratify is not None: + stratify_xp = xp.asarray(stratify) + else: + stratify_xp = stratify + X_train_xp, X_test_xp, y_train_xp, y_test_xp = train_test_split( + X_xp, y_xp, shuffle=shuffle, stratify=stratify_xp, random_state=0 + ) + + # Check that namespace is preserved, has to happen with + # array_api_dispatch enabled. + assert get_namespace(X_train_xp)[0] == get_namespace(X_xp)[0] + assert get_namespace(X_test_xp)[0] == get_namespace(X_xp)[0] + assert get_namespace(y_train_xp)[0] == get_namespace(y_xp)[0] + assert get_namespace(y_test_xp)[0] == get_namespace(y_xp)[0] + + # Check device and dtype is preserved on output + assert array_api_device(X_train_xp) == array_api_device(X_xp) + assert array_api_device(y_train_xp) == array_api_device(y_xp) + assert array_api_device(X_test_xp) == array_api_device(X_xp) + assert array_api_device(y_test_xp) == array_api_device(y_xp) + + assert X_train_xp.dtype == X_xp.dtype + assert y_train_xp.dtype == y_xp.dtype + assert X_test_xp.dtype == X_xp.dtype + assert y_test_xp.dtype == y_xp.dtype + + assert_allclose( + _convert_to_numpy(X_train_xp, xp=xp), + X_train_np, + ) + assert_allclose( + _convert_to_numpy(X_test_xp, xp=xp), + X_test_np, + ) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_train_test_split(coo_container): + X = np.arange(100).reshape((10, 10)) + X_s = coo_container(X) + y = np.arange(10) + + # simple test + split = train_test_split(X, y, test_size=None, train_size=0.5) + X_train, X_test, y_train, y_test = split + assert len(y_test) == len(y_train) + # test correspondence of X and y + assert_array_equal(X_train[:, 0], y_train * 10) + assert_array_equal(X_test[:, 0], y_test * 10) + + # don't convert lists to anything else by default + split = train_test_split(X, X_s, y.tolist()) + X_train, X_test, X_s_train, X_s_test, y_train, y_test = split + assert isinstance(y_train, list) + assert isinstance(y_test, list) + + # allow nd-arrays + X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) + y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) + split = train_test_split(X_4d, y_3d) + assert split[0].shape == (7, 5, 3, 2) + assert split[1].shape == (3, 5, 3, 2) + assert split[2].shape == (7, 7, 11) + assert split[3].shape == (3, 7, 11) + + # test stratification option + y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) + for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): + train, test = train_test_split( + y, test_size=test_size, stratify=y, random_state=0 + ) + assert len(test) == exp_test_size + assert len(test) + len(train) == len(y) + # check the 1:1 ratio of ones and twos in the data is preserved + assert np.sum(train == 1) == np.sum(train == 2) + + # test unshuffled split + y = np.arange(10) + for test_size in [2, 0.2]: + train, test = train_test_split(y, shuffle=False, test_size=test_size) + assert_array_equal(test, [8, 9]) + assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7]) + + +def test_train_test_split_32bit_overflow(): + """Check for integer overflow on 32-bit platforms. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20774 + """ + + # A number 'n' big enough for expression 'n * n * train_size' to cause + # an overflow for signed 32-bit integer + big_number = 100000 + + # Definition of 'y' is a part of reproduction - population for at least + # one class should be in the same order of magnitude as size of X + X = np.arange(big_number) + y = X > (0.99 * big_number) + + split = train_test_split(X, y, stratify=y, train_size=0.25) + X_train, X_test, y_train, y_test = split + + assert X_train.size + X_test.size == big_number + assert y_train.size + y_test.size == big_number + + +@ignore_warnings +def test_train_test_split_pandas(): + # check train_test_split doesn't destroy pandas dataframe + types = [MockDataFrame] + try: + from pandas import DataFrame + + types.append(DataFrame) + except ImportError: + pass + for InputFeatureType in types: + # X dataframe + X_df = InputFeatureType(X) + X_train, X_test = train_test_split(X_df) + assert isinstance(X_train, InputFeatureType) + assert isinstance(X_test, InputFeatureType) + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_train_test_split_sparse(sparse_container): + # check that train_test_split converts scipy sparse matrices + # to csr, as stated in the documentation + X = np.arange(100).reshape((10, 10)) + X_s = sparse_container(X) + X_train, X_test = train_test_split(X_s) + assert issparse(X_train) and X_train.format == "csr" + assert issparse(X_test) and X_test.format == "csr" + + +def test_train_test_split_mock_pandas(): + # X mock dataframe + X_df = MockDataFrame(X) + X_train, X_test = train_test_split(X_df) + assert isinstance(X_train, MockDataFrame) + assert isinstance(X_test, MockDataFrame) + X_train_arr, X_test_arr = train_test_split(X_df) + + +def test_train_test_split_list_input(): + # Check that when y is a list / list of string labels, it works. + X = np.ones(7) + y1 = ["1"] * 4 + ["0"] * 3 + y2 = np.hstack((np.ones(4), np.zeros(3))) + y3 = y2.tolist() + + for stratify in (True, False): + X_train1, X_test1, y_train1, y_test1 = train_test_split( + X, y1, stratify=y1 if stratify else None, random_state=0 + ) + X_train2, X_test2, y_train2, y_test2 = train_test_split( + X, y2, stratify=y2 if stratify else None, random_state=0 + ) + X_train3, X_test3, y_train3, y_test3 = train_test_split( + X, y3, stratify=y3 if stratify else None, random_state=0 + ) + + np.testing.assert_equal(X_train1, X_train2) + np.testing.assert_equal(y_train2, y_train3) + np.testing.assert_equal(X_test1, X_test3) + np.testing.assert_equal(y_test3, y_test2) + + +@pytest.mark.parametrize( + "test_size, train_size", + [(2.0, None), (1.0, None), (0.1, 0.95), (None, 1j), (11, None), (10, None), (8, 3)], +) +def test_shufflesplit_errors(test_size, train_size): + with pytest.raises(ValueError): + next(ShuffleSplit(test_size=test_size, train_size=train_size).split(X)) + + +def test_shufflesplit_reproducible(): + # Check that iterating twice on the ShuffleSplit gives the same + # sequence of train-test when the random_state is given + ss = ShuffleSplit(random_state=21) + assert_array_equal([a for a, b in ss.split(X)], [a for a, b in ss.split(X)]) + + +def test_stratifiedshufflesplit_list_input(): + # Check that when y is a list / list of string labels, it works. + sss = StratifiedShuffleSplit(test_size=2, random_state=42) + X = np.ones(7) + y1 = ["1"] * 4 + ["0"] * 3 + y2 = np.hstack((np.ones(4), np.zeros(3))) + y3 = y2.tolist() + + np.testing.assert_equal(list(sss.split(X, y1)), list(sss.split(X, y2))) + np.testing.assert_equal(list(sss.split(X, y3)), list(sss.split(X, y2))) + + +def test_train_test_split_allow_nans(): + # Check that train_test_split allows input data with NaNs + X = np.arange(200, dtype=np.float64).reshape(10, -1) + X[2, :] = np.nan + y = np.repeat([0, 1], X.shape[0] / 2) + train_test_split(X, y, test_size=0.2, random_state=42) + + +def test_check_cv(): + X = np.ones(9) + cv = check_cv(3, classifier=False) + # Use numpy.testing.assert_equal which recursively compares + # lists of lists + np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) + + y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) + cv = check_cv(3, y_binary, classifier=True) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y_binary)), list(cv.split(X, y_binary)) + ) + + y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) + cv = check_cv(3, y_multiclass, classifier=True) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y_multiclass)), list(cv.split(X, y_multiclass)) + ) + # also works with 2d multiclass + y_multiclass_2d = y_multiclass.reshape(-1, 1) + cv = check_cv(3, y_multiclass_2d, classifier=True) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y_multiclass_2d)), + list(cv.split(X, y_multiclass_2d)), + ) + + assert not np.all( + next(StratifiedKFold(3).split(X, y_multiclass_2d))[0] + == next(KFold(3).split(X, y_multiclass_2d))[0] + ) + + X = np.ones(5) + y_multilabel = np.array( + [[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 1], [0, 0, 1, 0]] + ) + cv = check_cv(3, y_multilabel, classifier=True) + np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) + + y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) + cv = check_cv(3, y_multioutput, classifier=True) + np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) + + with pytest.raises(ValueError): + check_cv(cv="lolo") + + +def test_cv_iterable_wrapper(): + kf_iter = KFold().split(X, y) + kf_iter_wrapped = check_cv(kf_iter) + # Since the wrapped iterable is enlisted and stored, + # split can be called any number of times to produce + # consistent results. + np.testing.assert_equal( + list(kf_iter_wrapped.split(X, y)), list(kf_iter_wrapped.split(X, y)) + ) + # If the splits are randomized, successive calls to split yields different + # results + kf_randomized_iter = KFold(shuffle=True, random_state=0).split(X, y) + kf_randomized_iter_wrapped = check_cv(kf_randomized_iter) + # numpy's assert_array_equal properly compares nested lists + np.testing.assert_equal( + list(kf_randomized_iter_wrapped.split(X, y)), + list(kf_randomized_iter_wrapped.split(X, y)), + ) + + try: + splits_are_equal = True + np.testing.assert_equal( + list(kf_iter_wrapped.split(X, y)), + list(kf_randomized_iter_wrapped.split(X, y)), + ) + except AssertionError: + splits_are_equal = False + assert not splits_are_equal, ( + "If the splits are randomized, " + "successive calls to split should yield different results" + ) + + +@pytest.mark.parametrize("kfold", [GroupKFold, StratifiedGroupKFold]) +def test_group_kfold(kfold): + rng = np.random.RandomState(0) + + # Parameters of the test + n_groups = 15 + n_samples = 1000 + n_splits = 5 + + X = y = np.ones(n_samples) + + # Construct the test data + tolerance = 0.05 * n_samples # 5 percent error allowed + groups = rng.randint(0, n_groups, n_samples) + + ideal_n_groups_per_fold = n_samples // n_splits + + len(np.unique(groups)) + # Get the test fold indices from the test set indices of each fold + folds = np.zeros(n_samples) + lkf = kfold(n_splits=n_splits) + for i, (_, test) in enumerate(lkf.split(X, y, groups)): + folds[test] = i + + # Check that folds have approximately the same size + assert len(folds) == len(groups) + for i in np.unique(folds): + assert tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold) + + # Check that each group appears only in 1 fold + for group in np.unique(groups): + assert len(np.unique(folds[groups == group])) == 1 + + # Check that no group is on both sides of the split + groups = np.asarray(groups, dtype=object) + for train, test in lkf.split(X, y, groups): + assert len(np.intersect1d(groups[train], groups[test])) == 0 + + # Construct the test data + groups = np.array( + [ + "Albert", + "Jean", + "Bertrand", + "Michel", + "Jean", + "Francis", + "Robert", + "Michel", + "Rachel", + "Lois", + "Michelle", + "Bernard", + "Marion", + "Laura", + "Jean", + "Rachel", + "Franck", + "John", + "Gael", + "Anna", + "Alix", + "Robert", + "Marion", + "David", + "Tony", + "Abel", + "Becky", + "Madmood", + "Cary", + "Mary", + "Alexandre", + "David", + "Francis", + "Barack", + "Abdoul", + "Rasha", + "Xi", + "Silvia", + ] + ) + + n_groups = len(np.unique(groups)) + n_samples = len(groups) + n_splits = 5 + tolerance = 0.05 * n_samples # 5 percent error allowed + ideal_n_groups_per_fold = n_samples // n_splits + + X = y = np.ones(n_samples) + + # Get the test fold indices from the test set indices of each fold + folds = np.zeros(n_samples) + for i, (_, test) in enumerate(lkf.split(X, y, groups)): + folds[test] = i + + # Check that folds have approximately the same size + assert len(folds) == len(groups) + for i in np.unique(folds): + assert tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold) + + # Check that each group appears only in 1 fold + with warnings.catch_warnings(): + warnings.simplefilter("ignore", FutureWarning) + for group in np.unique(groups): + assert len(np.unique(folds[groups == group])) == 1 + + # Check that no group is on both sides of the split + groups = np.asarray(groups, dtype=object) + for train, test in lkf.split(X, y, groups): + assert len(np.intersect1d(groups[train], groups[test])) == 0 + + # groups can also be a list + cv_iter = list(lkf.split(X, y, groups.tolist())) + for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups), cv_iter): + assert_array_equal(train1, train2) + assert_array_equal(test1, test2) + + # Should fail if there are more folds than groups + groups = np.array([1, 1, 1, 2, 2]) + X = y = np.ones(len(groups)) + with pytest.raises(ValueError, match="Cannot have number of splits.*greater"): + next(GroupKFold(n_splits=3).split(X, y, groups)) + + +def test_time_series_cv(): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]] + + # Should fail if there are more folds than samples + with pytest.raises(ValueError, match="Cannot have number of folds.*greater"): + next(TimeSeriesSplit(n_splits=7).split(X)) + + tscv = TimeSeriesSplit(2) + + # Manually check that Time Series CV preserves the data + # ordering on toy datasets + splits = tscv.split(X[:-1]) + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [2, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3]) + assert_array_equal(test, [4, 5]) + + splits = TimeSeriesSplit(2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2]) + assert_array_equal(test, [3, 4]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4]) + assert_array_equal(test, [5, 6]) + + # Check get_n_splits returns the correct number of splits + splits = TimeSeriesSplit(2).split(X) + n_splits_actual = len(list(splits)) + assert n_splits_actual == tscv.get_n_splits() + assert n_splits_actual == 2 + + +def _check_time_series_max_train_size(splits, check_splits, max_train_size): + for (train, test), (check_train, check_test) in zip(splits, check_splits): + assert_array_equal(test, check_test) + assert len(check_train) <= max_train_size + suffix_start = max(len(train) - max_train_size, 0) + assert_array_equal(check_train, train[suffix_start:]) + + +def test_time_series_max_train_size(): + X = np.zeros((6, 1)) + splits = TimeSeriesSplit(n_splits=3).split(X) + check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X) + _check_time_series_max_train_size(splits, check_splits, max_train_size=3) + + # Test for the case where the size of a fold is greater than max_train_size + check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X) + _check_time_series_max_train_size(splits, check_splits, max_train_size=2) + + # Test for the case where the size of each fold is less than max_train_size + check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X) + _check_time_series_max_train_size(splits, check_splits, max_train_size=2) + + +def test_time_series_test_size(): + X = np.zeros((10, 1)) + + # Test alone + splits = TimeSeriesSplit(n_splits=3, test_size=3).split(X) + + train, test = next(splits) + assert_array_equal(train, [0]) + assert_array_equal(test, [1, 2, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3]) + assert_array_equal(test, [4, 5, 6]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6]) + assert_array_equal(test, [7, 8, 9]) + + # Test with max_train_size + splits = TimeSeriesSplit(n_splits=2, test_size=2, max_train_size=4).split(X) + + train, test = next(splits) + assert_array_equal(train, [2, 3, 4, 5]) + assert_array_equal(test, [6, 7]) + + train, test = next(splits) + assert_array_equal(train, [4, 5, 6, 7]) + assert_array_equal(test, [8, 9]) + + # Should fail with not enough data points for configuration + with pytest.raises(ValueError, match="Too many splits.*with test_size"): + splits = TimeSeriesSplit(n_splits=5, test_size=2).split(X) + next(splits) + + +def test_time_series_gap(): + X = np.zeros((10, 1)) + + # Test alone + splits = TimeSeriesSplit(n_splits=2, gap=2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [4, 5, 6]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4]) + assert_array_equal(test, [7, 8, 9]) + + # Test with max_train_size + splits = TimeSeriesSplit(n_splits=3, gap=2, max_train_size=2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [4, 5]) + + train, test = next(splits) + assert_array_equal(train, [2, 3]) + assert_array_equal(test, [6, 7]) + + train, test = next(splits) + assert_array_equal(train, [4, 5]) + assert_array_equal(test, [8, 9]) + + # Test with test_size + splits = TimeSeriesSplit(n_splits=2, gap=2, max_train_size=4, test_size=2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3]) + assert_array_equal(test, [6, 7]) + + train, test = next(splits) + assert_array_equal(train, [2, 3, 4, 5]) + assert_array_equal(test, [8, 9]) + + # Test with additional test_size + splits = TimeSeriesSplit(n_splits=2, gap=2, test_size=3).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [4, 5, 6]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4]) + assert_array_equal(test, [7, 8, 9]) + + # Verify proper error is thrown + with pytest.raises(ValueError, match="Too many splits.*and gap"): + splits = TimeSeriesSplit(n_splits=4, gap=2).split(X) + next(splits) + + +def test_nested_cv(): + # Test if nested cross validation works with different combinations of cv + rng = np.random.RandomState(0) + + X, y = make_classification(n_samples=15, n_classes=2, random_state=0) + groups = rng.randint(0, 5, 15) + + cvs = [ + LeaveOneGroupOut(), + StratifiedKFold(n_splits=2), + LeaveOneOut(), + GroupKFold(n_splits=3), + StratifiedKFold(), + StratifiedGroupKFold(), + StratifiedShuffleSplit(n_splits=3, random_state=0), + ] + + for inner_cv, outer_cv in combinations_with_replacement(cvs, 2): + gs = GridSearchCV( + DummyClassifier(), + param_grid={"strategy": ["stratified", "most_frequent"]}, + cv=inner_cv, + error_score="raise", + ) + cross_val_score( + gs, X=X, y=y, groups=groups, cv=outer_cv, params={"groups": groups} + ) + + +def test_build_repr(): + class MockSplitter: + def __init__(self, a, b=0, c=None): + self.a = a + self.b = b + self.c = c + + def __repr__(self): + return _build_repr(self) + + assert repr(MockSplitter(5, 6)) == "MockSplitter(a=5, b=6, c=None)" + + +@pytest.mark.parametrize( + "CVSplitter", (ShuffleSplit, GroupShuffleSplit, StratifiedShuffleSplit) +) +def test_shuffle_split_empty_trainset(CVSplitter): + cv = CVSplitter(test_size=0.99) + X, y = [[1]], [0] # 1 sample + with pytest.raises( + ValueError, + match=( + "With n_samples=1, test_size=0.99 and train_size=None, " + "the resulting train set will be empty" + ), + ): + next(cv.split(X, y, groups=[1])) + + +def test_train_test_split_empty_trainset(): + (X,) = [[1]] # 1 sample + with pytest.raises( + ValueError, + match=( + "With n_samples=1, test_size=0.99 and train_size=None, " + "the resulting train set will be empty" + ), + ): + train_test_split(X, test_size=0.99) + + X = [[1], [1], [1]] # 3 samples, ask for more than 2 thirds + with pytest.raises( + ValueError, + match=( + "With n_samples=3, test_size=0.67 and train_size=None, " + "the resulting train set will be empty" + ), + ): + train_test_split(X, test_size=0.67) + + +def test_leave_one_out_empty_trainset(): + # LeaveOneGroup out expect at least 2 groups so no need to check + cv = LeaveOneOut() + X, y = [[1]], [0] # 1 sample + with pytest.raises(ValueError, match="Cannot perform LeaveOneOut with n_samples=1"): + next(cv.split(X, y)) + + +def test_leave_p_out_empty_trainset(): + # No need to check LeavePGroupsOut + cv = LeavePOut(p=2) + X, y = [[1], [2]], [0, 3] # 2 samples + with pytest.raises( + ValueError, match="p=2 must be strictly less than the number of samples=2" + ): + next(cv.split(X, y, groups=[1, 2])) + + +@pytest.mark.parametrize("Klass", (KFold, StratifiedKFold, StratifiedGroupKFold)) +def test_random_state_shuffle_false(Klass): + # passing a non-default random_state when shuffle=False makes no sense + with pytest.raises(ValueError, match="has no effect since shuffle is False"): + Klass(3, shuffle=False, random_state=0) + + +@pytest.mark.parametrize( + "cv, expected", + [ + (KFold(), True), + (KFold(shuffle=True, random_state=123), True), + (StratifiedKFold(), True), + (StratifiedKFold(shuffle=True, random_state=123), True), + (StratifiedGroupKFold(shuffle=True, random_state=123), True), + (StratifiedGroupKFold(), True), + (RepeatedKFold(random_state=123), True), + (RepeatedStratifiedKFold(random_state=123), True), + (ShuffleSplit(random_state=123), True), + (GroupShuffleSplit(random_state=123), True), + (StratifiedShuffleSplit(random_state=123), True), + (GroupKFold(), True), + (TimeSeriesSplit(), True), + (LeaveOneOut(), True), + (LeaveOneGroupOut(), True), + (LeavePGroupsOut(n_groups=2), True), + (LeavePOut(p=2), True), + (KFold(shuffle=True, random_state=None), False), + (KFold(shuffle=True, random_state=None), False), + (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False), + (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False), + (RepeatedKFold(random_state=None), False), + (RepeatedKFold(random_state=np.random.RandomState(0)), False), + (RepeatedStratifiedKFold(random_state=None), False), + (RepeatedStratifiedKFold(random_state=np.random.RandomState(0)), False), + (ShuffleSplit(random_state=None), False), + (ShuffleSplit(random_state=np.random.RandomState(0)), False), + (GroupShuffleSplit(random_state=None), False), + (GroupShuffleSplit(random_state=np.random.RandomState(0)), False), + (StratifiedShuffleSplit(random_state=None), False), + (StratifiedShuffleSplit(random_state=np.random.RandomState(0)), False), + ], +) +def test_yields_constant_splits(cv, expected): + assert _yields_constant_splits(cv) == expected + + +@pytest.mark.parametrize("cv", ALL_SPLITTERS, ids=[str(cv) for cv in ALL_SPLITTERS]) +def test_splitter_get_metadata_routing(cv): + """Check get_metadata_routing returns the correct MetadataRouter.""" + assert hasattr(cv, "get_metadata_routing") + metadata = cv.get_metadata_routing() + if cv in GROUP_SPLITTERS: + assert metadata.split.requests["groups"] is True + elif cv in NO_GROUP_SPLITTERS: + assert not metadata.split.requests + + assert_request_is_empty(metadata, exclude=["split"]) + + +@pytest.mark.parametrize("cv", ALL_SPLITTERS, ids=[str(cv) for cv in ALL_SPLITTERS]) +def test_splitter_set_split_request(cv): + """Check set_split_request is defined for group splitters and not for others.""" + if cv in GROUP_SPLITTERS: + assert hasattr(cv, "set_split_request") + elif cv in NO_GROUP_SPLITTERS: + assert not hasattr(cv, "set_split_request") diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py new file mode 100644 index 0000000000000000000000000000000000000000..6c89f89afa68481c761e700bb231a7dafb452c65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py @@ -0,0 +1,848 @@ +from math import ceil + +import numpy as np +import pytest +from scipy.stats import expon, norm, randint + +from sklearn.datasets import make_classification +from sklearn.dummy import DummyClassifier +from sklearn.experimental import enable_halving_search_cv # noqa +from sklearn.model_selection import ( + GroupKFold, + GroupShuffleSplit, + HalvingGridSearchCV, + HalvingRandomSearchCV, + KFold, + LeaveOneGroupOut, + LeavePGroupsOut, + ShuffleSplit, + StratifiedKFold, + StratifiedShuffleSplit, +) +from sklearn.model_selection._search_successive_halving import ( + _SubsampleMetaSplitter, + _top_k, +) +from sklearn.model_selection.tests.test_search import ( + check_cv_results_array_types, + check_cv_results_keys, +) +from sklearn.svm import SVC, LinearSVC + + +class FastClassifier(DummyClassifier): + """Dummy classifier that accepts parameters a, b, ... z. + + These parameter don't affect the predictions and are useful for fast + grid searching.""" + + # update the constraints such that we accept all parameters from a to z + _parameter_constraints: dict = { + **DummyClassifier._parameter_constraints, + **{ + chr(key): "no_validation" # type: ignore + for key in range(ord("a"), ord("z") + 1) + }, + } + + def __init__( + self, strategy="stratified", random_state=None, constant=None, **kwargs + ): + super().__init__( + strategy=strategy, random_state=random_state, constant=constant + ) + + def get_params(self, deep=False): + params = super().get_params(deep=deep) + for char in range(ord("a"), ord("z") + 1): + params[chr(char)] = "whatever" + return params + + +class SometimesFailClassifier(DummyClassifier): + def __init__( + self, + strategy="stratified", + random_state=None, + constant=None, + n_estimators=10, + fail_fit=False, + fail_predict=False, + a=0, + ): + self.fail_fit = fail_fit + self.fail_predict = fail_predict + self.n_estimators = n_estimators + self.a = a + + super().__init__( + strategy=strategy, random_state=random_state, constant=constant + ) + + def fit(self, X, y): + if self.fail_fit: + raise Exception("fitting failed") + return super().fit(X, y) + + def predict(self, X): + if self.fail_predict: + raise Exception("predict failed") + return super().predict(X) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.FitFailedWarning") +@pytest.mark.filterwarnings("ignore:Scoring failed:UserWarning") +@pytest.mark.filterwarnings("ignore:One or more of the:UserWarning") +@pytest.mark.parametrize("HalvingSearch", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize("fail_at", ("fit", "predict")) +def test_nan_handling(HalvingSearch, fail_at): + """Check the selection of the best scores in presence of failure represented by + NaN values.""" + n_samples = 1_000 + X, y = make_classification(n_samples=n_samples, random_state=0) + + search = HalvingSearch( + SometimesFailClassifier(), + {f"fail_{fail_at}": [False, True], "a": range(3)}, + resource="n_estimators", + max_resources=6, + min_resources=1, + factor=2, + ) + + search.fit(X, y) + + # estimators that failed during fit/predict should always rank lower + # than ones where the fit/predict succeeded + assert not search.best_params_[f"fail_{fail_at}"] + scores = search.cv_results_["mean_test_score"] + ranks = search.cv_results_["rank_test_score"] + + # some scores should be NaN + assert np.isnan(scores).any() + + unique_nan_ranks = np.unique(ranks[np.isnan(scores)]) + # all NaN scores should have the same rank + assert unique_nan_ranks.shape[0] == 1 + # NaNs should have the lowest rank + assert (unique_nan_ranks[0] >= ranks).all() + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize( + ( + "aggressive_elimination," + "max_resources," + "expected_n_iterations," + "expected_n_required_iterations," + "expected_n_possible_iterations," + "expected_n_remaining_candidates," + "expected_n_candidates," + "expected_n_resources," + ), + [ + # notice how it loops at the beginning + # also, the number of candidates evaluated at the last iteration is + # <= factor + (True, "limited", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]), + # no aggressive elimination: we end up with less iterations, and + # the number of candidates at the last iter is > factor, which isn't + # ideal + (False, "limited", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]), + # # When the amount of resource isn't limited, aggressive_elimination + # # has no effect. Here the default min_resources='exhaust' will take + # # over. + (True, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]), + (False, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]), + ], +) +def test_aggressive_elimination( + Est, + aggressive_elimination, + max_resources, + expected_n_iterations, + expected_n_required_iterations, + expected_n_possible_iterations, + expected_n_remaining_candidates, + expected_n_candidates, + expected_n_resources, +): + # Test the aggressive_elimination parameter. + + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": ("l1", "l2"), "b": list(range(30))} + base_estimator = FastClassifier() + + if max_resources == "limited": + max_resources = 180 + else: + max_resources = n_samples + + sh = Est( + base_estimator, + param_grid, + aggressive_elimination=aggressive_elimination, + max_resources=max_resources, + factor=3, + ) + sh.set_params(verbose=True) # just for test coverage + + if Est is HalvingRandomSearchCV: + # same number of candidates as with the grid + sh.set_params(n_candidates=2 * 30, min_resources="exhaust") + + sh.fit(X, y) + + assert sh.n_iterations_ == expected_n_iterations + assert sh.n_required_iterations_ == expected_n_required_iterations + assert sh.n_possible_iterations_ == expected_n_possible_iterations + assert sh.n_resources_ == expected_n_resources + assert sh.n_candidates_ == expected_n_candidates + assert sh.n_remaining_candidates_ == expected_n_remaining_candidates + assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_ + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize( + ( + "min_resources," + "max_resources," + "expected_n_iterations," + "expected_n_possible_iterations," + "expected_n_resources," + ), + [ + # with enough resources + ("smallest", "auto", 2, 4, [20, 60]), + # with enough resources but min_resources set manually + (50, "auto", 2, 3, [50, 150]), + # without enough resources, only one iteration can be done + ("smallest", 30, 1, 1, [20]), + # with exhaust: use as much resources as possible at the last iter + ("exhaust", "auto", 2, 2, [333, 999]), + ("exhaust", 1000, 2, 2, [333, 999]), + ("exhaust", 999, 2, 2, [333, 999]), + ("exhaust", 600, 2, 2, [200, 600]), + ("exhaust", 599, 2, 2, [199, 597]), + ("exhaust", 300, 2, 2, [100, 300]), + ("exhaust", 60, 2, 2, [20, 60]), + ("exhaust", 50, 1, 1, [20]), + ("exhaust", 20, 1, 1, [20]), + ], +) +def test_min_max_resources( + Est, + min_resources, + max_resources, + expected_n_iterations, + expected_n_possible_iterations, + expected_n_resources, +): + # Test the min_resources and max_resources parameters, and how they affect + # the number of resources used at each iteration + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": [1, 2], "b": [1, 2, 3]} + base_estimator = FastClassifier() + + sh = Est( + base_estimator, + param_grid, + factor=3, + min_resources=min_resources, + max_resources=max_resources, + ) + if Est is HalvingRandomSearchCV: + sh.set_params(n_candidates=6) # same number as with the grid + + sh.fit(X, y) + + expected_n_required_iterations = 2 # given 6 combinations and factor = 3 + assert sh.n_iterations_ == expected_n_iterations + assert sh.n_required_iterations_ == expected_n_required_iterations + assert sh.n_possible_iterations_ == expected_n_possible_iterations + assert sh.n_resources_ == expected_n_resources + if min_resources == "exhaust": + assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_) + + +@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) +@pytest.mark.parametrize( + "max_resources, n_iterations, n_possible_iterations", + [ + ("auto", 5, 9), # all resources are used + (1024, 5, 9), + (700, 5, 8), + (512, 5, 8), + (511, 5, 7), + (32, 4, 4), + (31, 3, 3), + (16, 3, 3), + (4, 1, 1), # max_resources == min_resources, only one iteration is + # possible + ], +) +def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations): + # test the number of actual iterations that were run depending on + # max_resources + + n_samples = 1024 + X, y = make_classification(n_samples=n_samples, random_state=1) + param_grid = {"a": [1, 2], "b": list(range(10))} + base_estimator = FastClassifier() + factor = 2 + + sh = Est( + base_estimator, + param_grid, + cv=2, + factor=factor, + max_resources=max_resources, + min_resources=4, + ) + if Est is HalvingRandomSearchCV: + sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV + sh.fit(X, y) + assert sh.n_required_iterations_ == 5 + assert sh.n_iterations_ == n_iterations + assert sh.n_possible_iterations_ == n_possible_iterations + + +@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) +def test_resource_parameter(Est): + # Test the resource parameter + + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": [1, 2], "b": list(range(10))} + base_estimator = FastClassifier() + sh = Est(base_estimator, param_grid, cv=2, resource="c", max_resources=10, factor=3) + sh.fit(X, y) + assert set(sh.n_resources_) == set([1, 3, 9]) + for r_i, params, param_c in zip( + sh.cv_results_["n_resources"], + sh.cv_results_["params"], + sh.cv_results_["param_c"], + ): + assert r_i == params["c"] == param_c + + with pytest.raises( + ValueError, match="Cannot use resource=1234 which is not supported " + ): + sh = HalvingGridSearchCV( + base_estimator, param_grid, cv=2, resource="1234", max_resources=10 + ) + sh.fit(X, y) + + with pytest.raises( + ValueError, + match=( + "Cannot use parameter c as the resource since it is part " + "of the searched parameters." + ), + ): + param_grid = {"a": [1, 2], "b": [1, 2], "c": [1, 3]} + sh = HalvingGridSearchCV( + base_estimator, param_grid, cv=2, resource="c", max_resources=10 + ) + sh.fit(X, y) + + +@pytest.mark.parametrize( + "max_resources, n_candidates, expected_n_candidates", + [ + (512, "exhaust", 128), # generate exactly as much as needed + (32, "exhaust", 8), + (32, 8, 8), + (32, 7, 7), # ask for less than what we could + (32, 9, 9), # ask for more than 'reasonable' + ], +) +def test_random_search(max_resources, n_candidates, expected_n_candidates): + # Test random search and make sure the number of generated candidates is + # as expected + + n_samples = 1024 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": norm, "b": norm} + base_estimator = FastClassifier() + sh = HalvingRandomSearchCV( + base_estimator, + param_grid, + n_candidates=n_candidates, + cv=2, + max_resources=max_resources, + factor=2, + min_resources=4, + ) + sh.fit(X, y) + assert sh.n_candidates_[0] == expected_n_candidates + if n_candidates == "exhaust": + # Make sure 'exhaust' makes the last iteration use as much resources as + # we can + assert sh.n_resources_[-1] == max_resources + + +@pytest.mark.parametrize( + "param_distributions, expected_n_candidates", + [ + ({"a": [1, 2]}, 2), # all lists, sample less than n_candidates + ({"a": randint(1, 3)}, 10), # not all list, respect n_candidates + ], +) +def test_random_search_discrete_distributions( + param_distributions, expected_n_candidates +): + # Make sure random search samples the appropriate number of candidates when + # we ask for more than what's possible. How many parameters are sampled + # depends whether the distributions are 'all lists' or not (see + # ParameterSampler for details). This is somewhat redundant with the checks + # in ParameterSampler but interaction bugs were discovered during + # development of SH + + n_samples = 1024 + X, y = make_classification(n_samples=n_samples, random_state=0) + base_estimator = FastClassifier() + sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10) + sh.fit(X, y) + assert sh.n_candidates_[0] == expected_n_candidates + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize( + "params, expected_error_message", + [ + ( + {"resource": "not_a_parameter"}, + "Cannot use resource=not_a_parameter which is not supported", + ), + ( + {"resource": "a", "max_resources": 100}, + "Cannot use parameter a as the resource since it is part of", + ), + ( + {"max_resources": "auto", "resource": "b"}, + "resource can only be 'n_samples' when max_resources='auto'", + ), + ( + {"min_resources": 15, "max_resources": 14}, + "min_resources_=15 is greater than max_resources_=14", + ), + ({"cv": KFold(shuffle=True)}, "must yield consistent folds"), + ({"cv": ShuffleSplit()}, "must yield consistent folds"), + ], +) +def test_input_errors(Est, params, expected_error_message): + base_estimator = FastClassifier() + param_grid = {"a": [1]} + X, y = make_classification(100) + + sh = Est(base_estimator, param_grid, **params) + + with pytest.raises(ValueError, match=expected_error_message): + sh.fit(X, y) + + +@pytest.mark.parametrize( + "params, expected_error_message", + [ + ( + {"n_candidates": "exhaust", "min_resources": "exhaust"}, + "cannot be both set to 'exhaust'", + ), + ], +) +def test_input_errors_randomized(params, expected_error_message): + # tests specific to HalvingRandomSearchCV + + base_estimator = FastClassifier() + param_grid = {"a": [1]} + X, y = make_classification(100) + + sh = HalvingRandomSearchCV(base_estimator, param_grid, **params) + + with pytest.raises(ValueError, match=expected_error_message): + sh.fit(X, y) + + +@pytest.mark.parametrize( + "fraction, subsample_test, expected_train_size, expected_test_size", + [ + (0.5, True, 40, 10), + (0.5, False, 40, 20), + (0.2, True, 16, 4), + (0.2, False, 16, 20), + ], +) +def test_subsample_splitter_shapes( + fraction, subsample_test, expected_train_size, expected_test_size +): + # Make sure splits returned by SubsampleMetaSplitter are of appropriate + # size + + n_samples = 100 + X, y = make_classification(n_samples) + cv = _SubsampleMetaSplitter( + base_cv=KFold(5), + fraction=fraction, + subsample_test=subsample_test, + random_state=None, + ) + + for train, test in cv.split(X, y): + assert train.shape[0] == expected_train_size + assert test.shape[0] == expected_test_size + if subsample_test: + assert train.shape[0] + test.shape[0] == int(n_samples * fraction) + else: + assert test.shape[0] == n_samples // cv.base_cv.get_n_splits() + + +@pytest.mark.parametrize("subsample_test", (True, False)) +def test_subsample_splitter_determinism(subsample_test): + # Make sure _SubsampleMetaSplitter is consistent across calls to split(): + # - we're OK having training sets differ (they're always sampled with a + # different fraction anyway) + # - when we don't subsample the test set, we want it to be always the same. + # This check is the most important. This is ensured by the determinism + # of the base_cv. + + # Note: we could force both train and test splits to be always the same if + # we drew an int seed in _SubsampleMetaSplitter.__init__ + + n_samples = 100 + X, y = make_classification(n_samples) + cv = _SubsampleMetaSplitter( + base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None + ) + + folds_a = list(cv.split(X, y, groups=None)) + folds_b = list(cv.split(X, y, groups=None)) + + for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b): + assert not np.all(train_a == train_b) + + if subsample_test: + assert not np.all(test_a == test_b) + else: + assert np.all(test_a == test_b) + assert np.all(X[test_a] == X[test_b]) + + +@pytest.mark.parametrize( + "k, itr, expected", + [ + (1, 0, ["c"]), + (2, 0, ["a", "c"]), + (4, 0, ["d", "b", "a", "c"]), + (10, 0, ["d", "b", "a", "c"]), + (1, 1, ["e"]), + (2, 1, ["f", "e"]), + (10, 1, ["f", "e"]), + (1, 2, ["i"]), + (10, 2, ["g", "h", "i"]), + ], +) +def test_top_k(k, itr, expected): + results = { # this isn't a 'real world' result dict + "iter": [0, 0, 0, 0, 1, 1, 2, 2, 2], + "mean_test_score": [4, 3, 5, 1, 11, 10, 5, 6, 9], + "params": ["a", "b", "c", "d", "e", "f", "g", "h", "i"], + } + got = _top_k(results, k=k, itr=itr) + assert np.all(got == expected) + + +@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) +def test_cv_results(Est): + # test that the cv_results_ matches correctly the logic of the + # tournament: in particular that the candidates continued in each + # successive iteration are those that were best in the previous iteration + pd = pytest.importorskip("pandas") + + rng = np.random.RandomState(0) + + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": ("l1", "l2"), "b": list(range(30))} + base_estimator = FastClassifier() + + # generate random scores: we want to avoid ties, which would otherwise + # mess with the ordering and make testing harder + def scorer(est, X, y): + return rng.rand() + + sh = Est(base_estimator, param_grid, factor=2, scoring=scorer) + if Est is HalvingRandomSearchCV: + # same number of candidates as with the grid + sh.set_params(n_candidates=2 * 30, min_resources="exhaust") + + sh.fit(X, y) + + # non-regression check for + # https://github.com/scikit-learn/scikit-learn/issues/19203 + assert isinstance(sh.cv_results_["iter"], np.ndarray) + assert isinstance(sh.cv_results_["n_resources"], np.ndarray) + + cv_results_df = pd.DataFrame(sh.cv_results_) + + # just make sure we don't have ties + assert len(cv_results_df["mean_test_score"].unique()) == len(cv_results_df) + + cv_results_df["params_str"] = cv_results_df["params"].apply(str) + table = cv_results_df.pivot( + index="params_str", columns="iter", values="mean_test_score" + ) + + # table looks like something like this: + # iter 0 1 2 3 4 5 + # params_str + # {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN + # {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN + # {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN + # {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN + # {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN + # ... + + # where a NaN indicates that the candidate wasn't evaluated at a given + # iteration, because it wasn't part of the top-K at some previous + # iteration. We here make sure that candidates that aren't in the top-k at + # any given iteration are indeed not evaluated at the subsequent + # iterations. + nan_mask = pd.isna(table) + n_iter = sh.n_iterations_ + for it in range(n_iter - 1): + already_discarded_mask = nan_mask[it] + + # make sure that if a candidate is already discarded, we don't evaluate + # it later + assert ( + already_discarded_mask & nan_mask[it + 1] == already_discarded_mask + ).all() + + # make sure that the number of discarded candidate is correct + discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1] + kept_mask = ~already_discarded_mask & ~discarded_now_mask + assert kept_mask.sum() == sh.n_candidates_[it + 1] + + # make sure that all discarded candidates have a lower score than the + # kept candidates + discarded_max_score = table[it].where(discarded_now_mask).max() + kept_min_score = table[it].where(kept_mask).min() + assert discarded_max_score < kept_min_score + + # We now make sure that the best candidate is chosen only from the last + # iteration. + # We also make sure this is true even if there were higher scores in + # earlier rounds (this isn't generally the case, but worth ensuring it's + # possible). + + last_iter = cv_results_df["iter"].max() + idx_best_last_iter = cv_results_df[cv_results_df["iter"] == last_iter][ + "mean_test_score" + ].idxmax() + idx_best_all_iters = cv_results_df["mean_test_score"].idxmax() + + assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]["params"] + assert ( + cv_results_df.iloc[idx_best_last_iter]["mean_test_score"] + < cv_results_df.iloc[idx_best_all_iters]["mean_test_score"] + ) + assert ( + cv_results_df.iloc[idx_best_last_iter]["params"] + != cv_results_df.iloc[idx_best_all_iters]["params"] + ) + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +def test_base_estimator_inputs(Est): + # make sure that the base estimators are passed the correct parameters and + # number of samples at each iteration. + pd = pytest.importorskip("pandas") + + passed_n_samples_fit = [] + passed_n_samples_predict = [] + passed_params = [] + + class FastClassifierBookKeeping(FastClassifier): + def fit(self, X, y): + passed_n_samples_fit.append(X.shape[0]) + return super().fit(X, y) + + def predict(self, X): + passed_n_samples_predict.append(X.shape[0]) + return super().predict(X) + + def set_params(self, **params): + passed_params.append(params) + return super().set_params(**params) + + n_samples = 1024 + n_splits = 2 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": ("l1", "l2"), "b": list(range(30))} + base_estimator = FastClassifierBookKeeping() + + sh = Est( + base_estimator, + param_grid, + factor=2, + cv=n_splits, + return_train_score=False, + refit=False, + ) + if Est is HalvingRandomSearchCV: + # same number of candidates as with the grid + sh.set_params(n_candidates=2 * 30, min_resources="exhaust") + + sh.fit(X, y) + + assert len(passed_n_samples_fit) == len(passed_n_samples_predict) + passed_n_samples = [ + x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict) + ] + + # Lists are of length n_splits * n_iter * n_candidates_at_i. + # Each chunk of size n_splits corresponds to the n_splits folds for the + # same candidate at the same iteration, so they contain equal values. We + # subsample such that the lists are of length n_iter * n_candidates_at_it + passed_n_samples = passed_n_samples[::n_splits] + passed_params = passed_params[::n_splits] + + cv_results_df = pd.DataFrame(sh.cv_results_) + + assert len(passed_params) == len(passed_n_samples) == len(cv_results_df) + + uniques, counts = np.unique(passed_n_samples, return_counts=True) + assert (sh.n_resources_ == uniques).all() + assert (sh.n_candidates_ == counts).all() + + assert (cv_results_df["params"] == passed_params).all() + assert (cv_results_df["n_resources"] == passed_n_samples).all() + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +def test_groups_support(Est): + # Check if ValueError (when groups is None) propagates to + # HalvingGridSearchCV and HalvingRandomSearchCV + # And also check if groups is correctly passed to the cv object + rng = np.random.RandomState(0) + + X, y = make_classification(n_samples=50, n_classes=2, random_state=0) + groups = rng.randint(0, 3, 50) + + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [1]} + + group_cvs = [ + LeaveOneGroupOut(), + LeavePGroupsOut(2), + GroupKFold(n_splits=3), + GroupShuffleSplit(random_state=0), + ] + error_msg = "The 'groups' parameter should not be None." + for cv in group_cvs: + gs = Est(clf, grid, cv=cv, random_state=0) + with pytest.raises(ValueError, match=error_msg): + gs.fit(X, y) + gs.fit(X, y, groups=groups) + + non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)] + for cv in non_group_cvs: + gs = Est(clf, grid, cv=cv) + # Should not raise an error + gs.fit(X, y) + + +@pytest.mark.parametrize("SearchCV", [HalvingRandomSearchCV, HalvingGridSearchCV]) +def test_min_resources_null(SearchCV): + """Check that we raise an error if the minimum resources is set to 0.""" + base_estimator = FastClassifier() + param_grid = {"a": [1]} + X = np.empty(0).reshape(0, 3) + + search = SearchCV(base_estimator, param_grid, min_resources="smallest") + + err_msg = "min_resources_=0: you might have passed an empty dataset X." + with pytest.raises(ValueError, match=err_msg): + search.fit(X, []) + + +@pytest.mark.parametrize("SearchCV", [HalvingGridSearchCV, HalvingRandomSearchCV]) +def test_select_best_index(SearchCV): + """Check the selection strategy of the halving search.""" + results = { # this isn't a 'real world' result dict + "iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]), + "mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]), + "params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]), + } + + # we expect the index of 'i' + best_index = SearchCV._select_best_index(None, None, results) + assert best_index == 8 + + +def test_halving_random_search_list_of_dicts(): + """Check the behaviour of the `HalvingRandomSearchCV` with `param_distribution` + being a list of dictionary. + """ + X, y = make_classification(n_samples=150, n_features=4, random_state=42) + + params = [ + {"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)}, + {"kernel": ["poly"], "degree": [2, 3]}, + ] + param_keys = ( + "param_C", + "param_degree", + "param_gamma", + "param_kernel", + ) + score_keys = ( + "mean_test_score", + "mean_train_score", + "rank_test_score", + "split0_test_score", + "split1_test_score", + "split2_test_score", + "split0_train_score", + "split1_train_score", + "split2_train_score", + "std_test_score", + "std_train_score", + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ) + extra_keys = ("n_resources", "iter") + + search = HalvingRandomSearchCV( + SVC(), cv=3, param_distributions=params, return_train_score=True, random_state=0 + ) + search.fit(X, y) + n_candidates = sum(search.n_candidates_) + cv_results = search.cv_results_ + # Check results structure + check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates, extra_keys) + check_cv_results_array_types(search, param_keys, score_keys) + + assert all( + ( + cv_results["param_C"].mask[i] + and cv_results["param_gamma"].mask[i] + and not cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "poly" + ) + assert all( + ( + not cv_results["param_C"].mask[i] + and not cv_results["param_gamma"].mask[i] + and cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "rbf" + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..e1ecfd14f45a3a75f24a9d0258bcf768a9365704 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py @@ -0,0 +1,2621 @@ +"""Test the validation module""" +import os +import re +import sys +import tempfile +import warnings +from functools import partial +from io import StringIO +from time import sleep + +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn.base import BaseEstimator, clone +from sklearn.cluster import KMeans +from sklearn.datasets import ( + load_diabetes, + load_digits, + load_iris, + make_classification, + make_multilabel_classification, + make_regression, +) +from sklearn.ensemble import RandomForestClassifier +from sklearn.exceptions import FitFailedWarning +from sklearn.impute import SimpleImputer +from sklearn.linear_model import ( + LogisticRegression, + PassiveAggressiveClassifier, + Ridge, + RidgeClassifier, + SGDClassifier, +) +from sklearn.metrics import ( + accuracy_score, + check_scoring, + confusion_matrix, + explained_variance_score, + make_scorer, + mean_squared_error, + precision_recall_fscore_support, + precision_score, + r2_score, +) +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + GroupShuffleSplit, + KFold, + LeaveOneGroupOut, + LeaveOneOut, + LeavePGroupsOut, + ShuffleSplit, + StratifiedKFold, + cross_val_predict, + cross_val_score, + cross_validate, + learning_curve, + permutation_test_score, + validation_curve, +) +from sklearn.model_selection._validation import ( + _check_is_permutation, + _fit_and_score, + _score, +) +from sklearn.model_selection.tests.common import OneTimeSplitter +from sklearn.model_selection.tests.test_search import FailingClassifier +from sklearn.multiclass import OneVsRestClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.neural_network import MLPRegressor +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import LabelEncoder, scale +from sklearn.svm import SVC, LinearSVC +from sklearn.tests.metadata_routing_common import ( + ConsumingClassifier, + ConsumingScorer, + ConsumingSplitter, + _Registry, + check_recorded_metadata, +) +from sklearn.utils import shuffle +from sklearn.utils._mocking import CheckingClassifier, MockDataFrame +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.validation import _num_samples + + +class MockImprovingEstimator(BaseEstimator): + """Dummy classifier to test the learning curve""" + + def __init__(self, n_max_train_sizes): + self.n_max_train_sizes = n_max_train_sizes + self.train_sizes = 0 + self.X_subset = None + + def fit(self, X_subset, y_subset=None): + self.X_subset = X_subset + self.train_sizes = X_subset.shape[0] + return self + + def predict(self, X): + raise NotImplementedError + + def score(self, X=None, Y=None): + # training score becomes worse (2 -> 1), test error better (0 -> 1) + if self._is_training_data(X): + return 2.0 - float(self.train_sizes) / self.n_max_train_sizes + else: + return float(self.train_sizes) / self.n_max_train_sizes + + def _is_training_data(self, X): + return X is self.X_subset + + +class MockIncrementalImprovingEstimator(MockImprovingEstimator): + """Dummy classifier that provides partial_fit""" + + def __init__(self, n_max_train_sizes, expected_fit_params=None): + super().__init__(n_max_train_sizes) + self.x = None + self.expected_fit_params = expected_fit_params + + def _is_training_data(self, X): + return self.x in X + + def partial_fit(self, X, y=None, **params): + self.train_sizes += X.shape[0] + self.x = X[0] + if self.expected_fit_params: + missing = set(self.expected_fit_params) - set(params) + if missing: + raise AssertionError( + f"Expected fit parameter(s) {list(missing)} not seen." + ) + for key, value in params.items(): + if key in self.expected_fit_params and _num_samples( + value + ) != _num_samples(X): + raise AssertionError( + f"Fit parameter {key} has length {_num_samples(value)}" + f"; expected {_num_samples(X)}." + ) + + +class MockEstimatorWithParameter(BaseEstimator): + """Dummy classifier to test the validation curve""" + + def __init__(self, param=0.5): + self.X_subset = None + self.param = param + + def fit(self, X_subset, y_subset): + self.X_subset = X_subset + self.train_sizes = X_subset.shape[0] + return self + + def predict(self, X): + raise NotImplementedError + + def score(self, X=None, y=None): + return self.param if self._is_training_data(X) else 1 - self.param + + def _is_training_data(self, X): + return X is self.X_subset + + +class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter): + """Dummy classifier that disallows repeated calls of fit method""" + + def fit(self, X_subset, y_subset): + assert not hasattr(self, "fit_called_"), "fit is called the second time" + self.fit_called_ = True + return super().fit(X_subset, y_subset) + + def predict(self, X): + raise NotImplementedError + + +class MockClassifier: + """Dummy classifier to test the cross-validation""" + + def __init__(self, a=0, allow_nd=False): + self.a = a + self.allow_nd = allow_nd + + def fit( + self, + X, + Y=None, + sample_weight=None, + class_prior=None, + sparse_sample_weight=None, + sparse_param=None, + dummy_int=None, + dummy_str=None, + dummy_obj=None, + callback=None, + ): + """The dummy arguments are to test that this fit function can + accept non-array arguments through cross-validation, such as: + - int + - str (this is actually array-like) + - object + - function + """ + self.dummy_int = dummy_int + self.dummy_str = dummy_str + self.dummy_obj = dummy_obj + if callback is not None: + callback(self) + + if self.allow_nd: + X = X.reshape(len(X), -1) + if X.ndim >= 3 and not self.allow_nd: + raise ValueError("X cannot be d") + if sample_weight is not None: + assert sample_weight.shape[0] == X.shape[0], ( + "MockClassifier extra fit_param " + "sample_weight.shape[0] is {0}, should be {1}".format( + sample_weight.shape[0], X.shape[0] + ) + ) + if class_prior is not None: + assert class_prior.shape[0] == len(np.unique(y)), ( + "MockClassifier extra fit_param class_prior.shape[0]" + " is {0}, should be {1}".format(class_prior.shape[0], len(np.unique(y))) + ) + if sparse_sample_weight is not None: + fmt = ( + "MockClassifier extra fit_param sparse_sample_weight" + ".shape[0] is {0}, should be {1}" + ) + assert sparse_sample_weight.shape[0] == X.shape[0], fmt.format( + sparse_sample_weight.shape[0], X.shape[0] + ) + if sparse_param is not None: + fmt = ( + "MockClassifier extra fit_param sparse_param.shape " + "is ({0}, {1}), should be ({2}, {3})" + ) + assert sparse_param.shape == P.shape, fmt.format( + sparse_param.shape[0], + sparse_param.shape[1], + P.shape[0], + P.shape[1], + ) + return self + + def predict(self, T): + if self.allow_nd: + T = T.reshape(len(T), -1) + return T[:, 0] + + def predict_proba(self, T): + return T + + def score(self, X=None, Y=None): + return 1.0 / (1 + np.abs(self.a)) + + def get_params(self, deep=False): + return {"a": self.a, "allow_nd": self.allow_nd} + + +# XXX: use 2D array, since 1D X is being detected as a single sample in +# check_consistent_length +X = np.ones((10, 2)) +y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]) +# The number of samples per class needs to be > n_splits, +# for StratifiedKFold(n_splits=3) +y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3]) +P = np.eye(5) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_score(coo_container): + clf = MockClassifier() + X_sparse = coo_container(X) + + for a in range(-10, 10): + clf.a = a + # Smoke test + scores = cross_val_score(clf, X, y2) + assert_array_equal(scores, clf.score(X, y2)) + + # test with multioutput y + multioutput_y = np.column_stack([y2, y2[::-1]]) + scores = cross_val_score(clf, X_sparse, multioutput_y) + assert_array_equal(scores, clf.score(X_sparse, multioutput_y)) + + scores = cross_val_score(clf, X_sparse, y2) + assert_array_equal(scores, clf.score(X_sparse, y2)) + + # test with multioutput y + scores = cross_val_score(clf, X_sparse, multioutput_y) + assert_array_equal(scores, clf.score(X_sparse, multioutput_y)) + + # test with X and y as list + list_check = lambda x: isinstance(x, list) + clf = CheckingClassifier(check_X=list_check) + scores = cross_val_score(clf, X.tolist(), y2.tolist(), cv=3) + + clf = CheckingClassifier(check_y=list_check) + scores = cross_val_score(clf, X, y2.tolist(), cv=3) + + # test with 3d X and + X_3d = X[:, :, np.newaxis] + clf = MockClassifier(allow_nd=True) + scores = cross_val_score(clf, X_3d, y2) + + clf = MockClassifier(allow_nd=False) + with pytest.raises(ValueError): + cross_val_score(clf, X_3d, y2, error_score="raise") + + +def test_cross_validate_many_jobs(): + # regression test for #12154: cv='warn' with n_jobs>1 trigger a copy of + # the parameters leading to a failure in check_cv due to cv is 'warn' + # instead of cv == 'warn'. + X, y = load_iris(return_X_y=True) + clf = SVC(gamma="auto") + grid = GridSearchCV(clf, param_grid={"C": [1, 10]}) + cross_validate(grid, X, y, n_jobs=2) + + +def test_cross_validate_invalid_scoring_param(): + X, y = make_classification(random_state=0) + estimator = MockClassifier() + + # Test the errors + error_message_regexp = ".*must be unique strings.*" + + # List/tuple of callables should raise a message advising users to use + # dict of names to callables mapping + with pytest.raises(ValueError, match=error_message_regexp): + cross_validate( + estimator, + X, + y, + scoring=(make_scorer(precision_score), make_scorer(accuracy_score)), + ) + with pytest.raises(ValueError, match=error_message_regexp): + cross_validate(estimator, X, y, scoring=(make_scorer(precision_score),)) + + # So should empty lists/tuples + with pytest.raises(ValueError, match=error_message_regexp + "Empty list.*"): + cross_validate(estimator, X, y, scoring=()) + + # So should duplicated entries + with pytest.raises(ValueError, match=error_message_regexp + "Duplicate.*"): + cross_validate(estimator, X, y, scoring=("f1_micro", "f1_micro")) + + # Nested Lists should raise a generic error message + with pytest.raises(ValueError, match=error_message_regexp): + cross_validate(estimator, X, y, scoring=[[make_scorer(precision_score)]]) + + # Empty dict should raise invalid scoring error + with pytest.raises(ValueError, match="An empty dict"): + cross_validate(estimator, X, y, scoring=(dict())) + + multiclass_scorer = make_scorer(precision_recall_fscore_support) + + # Multiclass Scorers that return multiple values are not supported yet + # the warning message we're expecting to see + warning_message = ( + "Scoring failed. The score on this train-test " + f"partition for these parameters will be set to {np.nan}. " + "Details: \n" + ) + + with pytest.warns(UserWarning, match=warning_message): + cross_validate(estimator, X, y, scoring=multiclass_scorer) + + with pytest.warns(UserWarning, match=warning_message): + cross_validate(estimator, X, y, scoring={"foo": multiclass_scorer}) + + +def test_cross_validate_nested_estimator(): + # Non-regression test to ensure that nested + # estimators are properly returned in a list + # https://github.com/scikit-learn/scikit-learn/pull/17745 + (X, y) = load_iris(return_X_y=True) + pipeline = Pipeline( + [ + ("imputer", SimpleImputer()), + ("classifier", MockClassifier()), + ] + ) + + results = cross_validate(pipeline, X, y, return_estimator=True) + estimators = results["estimator"] + + assert isinstance(estimators, list) + assert all(isinstance(estimator, Pipeline) for estimator in estimators) + + +@pytest.mark.parametrize("use_sparse", [False, True]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_cross_validate(use_sparse: bool, csr_container): + # Compute train and test mse/r2 scores + cv = KFold() + + # Regression + X_reg, y_reg = make_regression(n_samples=30, random_state=0) + reg = Ridge(random_state=0) + + # Classification + X_clf, y_clf = make_classification(n_samples=30, random_state=0) + clf = SVC(kernel="linear", random_state=0) + + if use_sparse: + X_reg = csr_container(X_reg) + X_clf = csr_container(X_clf) + + for X, y, est in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)): + # It's okay to evaluate regression metrics on classification too + mse_scorer = check_scoring(est, scoring="neg_mean_squared_error") + r2_scorer = check_scoring(est, scoring="r2") + train_mse_scores = [] + test_mse_scores = [] + train_r2_scores = [] + test_r2_scores = [] + fitted_estimators = [] + + for train, test in cv.split(X, y): + est = clone(est).fit(X[train], y[train]) + train_mse_scores.append(mse_scorer(est, X[train], y[train])) + train_r2_scores.append(r2_scorer(est, X[train], y[train])) + test_mse_scores.append(mse_scorer(est, X[test], y[test])) + test_r2_scores.append(r2_scorer(est, X[test], y[test])) + fitted_estimators.append(est) + + train_mse_scores = np.array(train_mse_scores) + test_mse_scores = np.array(test_mse_scores) + train_r2_scores = np.array(train_r2_scores) + test_r2_scores = np.array(test_r2_scores) + fitted_estimators = np.array(fitted_estimators) + + scores = ( + train_mse_scores, + test_mse_scores, + train_r2_scores, + test_r2_scores, + fitted_estimators, + ) + + # To ensure that the test does not suffer from + # large statistical fluctuations due to slicing small datasets, + # we pass the cross-validation instance + check_cross_validate_single_metric(est, X, y, scores, cv) + check_cross_validate_multi_metric(est, X, y, scores, cv) + + +def check_cross_validate_single_metric(clf, X, y, scores, cv): + ( + train_mse_scores, + test_mse_scores, + train_r2_scores, + test_r2_scores, + fitted_estimators, + ) = scores + # Test single metric evaluation when scoring is string or singleton list + for return_train_score, dict_len in ((True, 4), (False, 3)): + # Single metric passed as a string + if return_train_score: + mse_scores_dict = cross_validate( + clf, + X, + y, + scoring="neg_mean_squared_error", + return_train_score=True, + cv=cv, + ) + assert_array_almost_equal(mse_scores_dict["train_score"], train_mse_scores) + else: + mse_scores_dict = cross_validate( + clf, + X, + y, + scoring="neg_mean_squared_error", + return_train_score=False, + cv=cv, + ) + assert isinstance(mse_scores_dict, dict) + assert len(mse_scores_dict) == dict_len + assert_array_almost_equal(mse_scores_dict["test_score"], test_mse_scores) + + # Single metric passed as a list + if return_train_score: + # It must be True by default - deprecated + r2_scores_dict = cross_validate( + clf, X, y, scoring=["r2"], return_train_score=True, cv=cv + ) + assert_array_almost_equal(r2_scores_dict["train_r2"], train_r2_scores, True) + else: + r2_scores_dict = cross_validate( + clf, X, y, scoring=["r2"], return_train_score=False, cv=cv + ) + assert isinstance(r2_scores_dict, dict) + assert len(r2_scores_dict) == dict_len + assert_array_almost_equal(r2_scores_dict["test_r2"], test_r2_scores) + + # Test return_estimator option + mse_scores_dict = cross_validate( + clf, X, y, scoring="neg_mean_squared_error", return_estimator=True, cv=cv + ) + for k, est in enumerate(mse_scores_dict["estimator"]): + est_coef = est.coef_.copy() + if issparse(est_coef): + est_coef = est_coef.toarray() + + fitted_est_coef = fitted_estimators[k].coef_.copy() + if issparse(fitted_est_coef): + fitted_est_coef = fitted_est_coef.toarray() + + assert_almost_equal(est_coef, fitted_est_coef) + assert_almost_equal(est.intercept_, fitted_estimators[k].intercept_) + + +def check_cross_validate_multi_metric(clf, X, y, scores, cv): + # Test multimetric evaluation when scoring is a list / dict + ( + train_mse_scores, + test_mse_scores, + train_r2_scores, + test_r2_scores, + fitted_estimators, + ) = scores + + def custom_scorer(clf, X, y): + y_pred = clf.predict(X) + return { + "r2": r2_score(y, y_pred), + "neg_mean_squared_error": -mean_squared_error(y, y_pred), + } + + all_scoring = ( + ("r2", "neg_mean_squared_error"), + { + "r2": make_scorer(r2_score), + "neg_mean_squared_error": "neg_mean_squared_error", + }, + custom_scorer, + ) + + keys_sans_train = { + "test_r2", + "test_neg_mean_squared_error", + "fit_time", + "score_time", + } + keys_with_train = keys_sans_train.union( + {"train_r2", "train_neg_mean_squared_error"} + ) + + for return_train_score in (True, False): + for scoring in all_scoring: + if return_train_score: + # return_train_score must be True by default - deprecated + cv_results = cross_validate( + clf, X, y, scoring=scoring, return_train_score=True, cv=cv + ) + assert_array_almost_equal(cv_results["train_r2"], train_r2_scores) + assert_array_almost_equal( + cv_results["train_neg_mean_squared_error"], train_mse_scores + ) + else: + cv_results = cross_validate( + clf, X, y, scoring=scoring, return_train_score=False, cv=cv + ) + assert isinstance(cv_results, dict) + assert set(cv_results.keys()) == ( + keys_with_train if return_train_score else keys_sans_train + ) + assert_array_almost_equal(cv_results["test_r2"], test_r2_scores) + assert_array_almost_equal( + cv_results["test_neg_mean_squared_error"], test_mse_scores + ) + + # Make sure all the arrays are of np.ndarray type + assert type(cv_results["test_r2"]) == np.ndarray + assert type(cv_results["test_neg_mean_squared_error"]) == np.ndarray + assert type(cv_results["fit_time"]) == np.ndarray + assert type(cv_results["score_time"]) == np.ndarray + + # Ensure all the times are within sane limits + assert np.all(cv_results["fit_time"] >= 0) + assert np.all(cv_results["fit_time"] < 10) + assert np.all(cv_results["score_time"] >= 0) + assert np.all(cv_results["score_time"] < 10) + + +def test_cross_val_score_predict_groups(): + # Check if ValueError (when groups is None) propagates to cross_val_score + # and cross_val_predict + # And also check if groups is correctly passed to the cv object + X, y = make_classification(n_samples=20, n_classes=2, random_state=0) + + clf = SVC(kernel="linear") + + group_cvs = [ + LeaveOneGroupOut(), + LeavePGroupsOut(2), + GroupKFold(), + GroupShuffleSplit(), + ] + error_message = "The 'groups' parameter should not be None." + for cv in group_cvs: + with pytest.raises(ValueError, match=error_message): + cross_val_score(estimator=clf, X=X, y=y, cv=cv) + with pytest.raises(ValueError, match=error_message): + cross_val_predict(estimator=clf, X=X, y=y, cv=cv) + + +@pytest.mark.filterwarnings("ignore: Using or importing the ABCs from") +def test_cross_val_score_pandas(): + # check cross_val_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + # 3 fold cross val is used so we need at least 3 samples per class + X_df, y_ser = InputFeatureType(X), TargetType(y2) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + cross_val_score(clf, X_df, y_ser, cv=3) + + +def test_cross_val_score_mask(): + # test that cross_val_score works with boolean masks + svm = SVC(kernel="linear") + iris = load_iris() + X, y = iris.data, iris.target + kfold = KFold(5) + scores_indices = cross_val_score(svm, X, y, cv=kfold) + kfold = KFold(5) + cv_masks = [] + for train, test in kfold.split(X, y): + mask_train = np.zeros(len(y), dtype=bool) + mask_test = np.zeros(len(y), dtype=bool) + mask_train[train] = 1 + mask_test[test] = 1 + cv_masks.append((train, test)) + scores_masks = cross_val_score(svm, X, y, cv=cv_masks) + assert_array_equal(scores_indices, scores_masks) + + +def test_cross_val_score_precomputed(): + # test for svm with precomputed kernel + svm = SVC(kernel="precomputed") + iris = load_iris() + X, y = iris.data, iris.target + linear_kernel = np.dot(X, X.T) + score_precomputed = cross_val_score(svm, linear_kernel, y) + svm = SVC(kernel="linear") + score_linear = cross_val_score(svm, X, y) + assert_array_almost_equal(score_precomputed, score_linear) + + # test with callable + svm = SVC(kernel=lambda x, y: np.dot(x, y.T)) + score_callable = cross_val_score(svm, X, y) + assert_array_almost_equal(score_precomputed, score_callable) + + # Error raised for non-square X + svm = SVC(kernel="precomputed") + with pytest.raises(ValueError): + cross_val_score(svm, X, y) + + # test error is raised when the precomputed kernel is not array-like + # or sparse + with pytest.raises(ValueError): + cross_val_score(svm, linear_kernel.tolist(), y) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_score_fit_params(coo_container): + clf = MockClassifier() + n_samples = X.shape[0] + n_classes = len(np.unique(y)) + + W_sparse = coo_container( + (np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1) + ) + P_sparse = coo_container(np.eye(5)) + + DUMMY_INT = 42 + DUMMY_STR = "42" + DUMMY_OBJ = object() + + def assert_fit_params(clf): + # Function to test that the values are passed correctly to the + # classifier arguments for non-array type + + assert clf.dummy_int == DUMMY_INT + assert clf.dummy_str == DUMMY_STR + assert clf.dummy_obj == DUMMY_OBJ + + fit_params = { + "sample_weight": np.ones(n_samples), + "class_prior": np.full(n_classes, 1.0 / n_classes), + "sparse_sample_weight": W_sparse, + "sparse_param": P_sparse, + "dummy_int": DUMMY_INT, + "dummy_str": DUMMY_STR, + "dummy_obj": DUMMY_OBJ, + "callback": assert_fit_params, + } + cross_val_score(clf, X, y, params=fit_params) + + +def test_cross_val_score_score_func(): + clf = MockClassifier() + _score_func_args = [] + + def score_func(y_test, y_predict): + _score_func_args.append((y_test, y_predict)) + return 1.0 + + with warnings.catch_warnings(record=True): + scoring = make_scorer(score_func) + score = cross_val_score(clf, X, y, scoring=scoring, cv=3) + assert_array_equal(score, [1.0, 1.0, 1.0]) + # Test that score function is called only 3 times (for cv=3) + assert len(_score_func_args) == 3 + + +def test_cross_val_score_with_score_func_classification(): + iris = load_iris() + clf = SVC(kernel="linear") + + # Default score (should be the accuracy score) + scores = cross_val_score(clf, iris.data, iris.target) + assert_array_almost_equal(scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2) + + # Correct classification score (aka. zero / one score) - should be the + # same as the default estimator score + zo_scores = cross_val_score(clf, iris.data, iris.target, scoring="accuracy") + assert_array_almost_equal(zo_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2) + + # F1 score (class are balanced so f1_score should be equal to zero/one + # score + f1_scores = cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted") + assert_array_almost_equal(f1_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2) + + +def test_cross_val_score_with_score_func_regression(): + X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) + reg = Ridge() + + # Default score of the Ridge regression estimator + scores = cross_val_score(reg, X, y) + assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) + + # R2 score (aka. determination coefficient) - should be the + # same as the default estimator score + r2_scores = cross_val_score(reg, X, y, scoring="r2") + assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) + + # Mean squared error; this is a loss function, so "scores" are negative + neg_mse_scores = cross_val_score(reg, X, y, scoring="neg_mean_squared_error") + expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) + assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2) + + # Explained variance + scoring = make_scorer(explained_variance_score) + ev_scores = cross_val_score(reg, X, y, scoring=scoring) + assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_permutation_score(coo_container): + iris = load_iris() + X = iris.data + X_sparse = coo_container(X) + y = iris.target + svm = SVC(kernel="linear") + cv = StratifiedKFold(2) + + score, scores, pvalue = permutation_test_score( + svm, X, y, n_permutations=30, cv=cv, scoring="accuracy" + ) + assert score > 0.9 + assert_almost_equal(pvalue, 0.0, 1) + + score_group, _, pvalue_group = permutation_test_score( + svm, + X, + y, + n_permutations=30, + cv=cv, + scoring="accuracy", + groups=np.ones(y.size), + random_state=0, + ) + assert score_group == score + assert pvalue_group == pvalue + + # check that we obtain the same results with a sparse representation + svm_sparse = SVC(kernel="linear") + cv_sparse = StratifiedKFold(2) + score_group, _, pvalue_group = permutation_test_score( + svm_sparse, + X_sparse, + y, + n_permutations=30, + cv=cv_sparse, + scoring="accuracy", + groups=np.ones(y.size), + random_state=0, + ) + + assert score_group == score + assert pvalue_group == pvalue + + # test with custom scoring object + def custom_score(y_true, y_pred): + return ((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0] + + scorer = make_scorer(custom_score) + score, _, pvalue = permutation_test_score( + svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0 + ) + assert_almost_equal(score, 0.93, 2) + assert_almost_equal(pvalue, 0.01, 3) + + # set random y + y = np.mod(np.arange(len(y)), 3) + + score, scores, pvalue = permutation_test_score( + svm, X, y, n_permutations=30, cv=cv, scoring="accuracy" + ) + + assert score < 0.5 + assert pvalue > 0.2 + + +def test_permutation_test_score_allow_nans(): + # Check that permutation_test_score allows input data with NaNs + X = np.arange(200, dtype=np.float64).reshape(10, -1) + X[2, :] = np.nan + y = np.repeat([0, 1], X.shape[0] / 2) + p = Pipeline( + [ + ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)), + ("classifier", MockClassifier()), + ] + ) + permutation_test_score(p, X, y) + + +def test_permutation_test_score_fit_params(): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_sample_weight=True) + + err_msg = r"Expected sample_weight to be passed" + with pytest.raises(AssertionError, match=err_msg): + permutation_test_score(clf, X, y) + + err_msg = r"sample_weight.shape == \(1,\), expected \(8,\)!" + with pytest.raises(ValueError, match=err_msg): + permutation_test_score(clf, X, y, fit_params={"sample_weight": np.ones(1)}) + permutation_test_score(clf, X, y, fit_params={"sample_weight": np.ones(10)}) + + +def test_cross_val_score_allow_nans(): + # Check that cross_val_score allows input data with NaNs + X = np.arange(200, dtype=np.float64).reshape(10, -1) + X[2, :] = np.nan + y = np.repeat([0, 1], X.shape[0] / 2) + p = Pipeline( + [ + ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)), + ("classifier", MockClassifier()), + ] + ) + cross_val_score(p, X, y) + + +def test_cross_val_score_multilabel(): + X = np.array( + [ + [-3, 4], + [2, 4], + [3, 3], + [0, 2], + [-3, 1], + [-2, 1], + [0, 0], + [-2, -1], + [-1, -2], + [1, -2], + ] + ) + y = np.array( + [[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]] + ) + clf = KNeighborsClassifier(n_neighbors=1) + scoring_micro = make_scorer(precision_score, average="micro") + scoring_macro = make_scorer(precision_score, average="macro") + scoring_samples = make_scorer(precision_score, average="samples") + score_micro = cross_val_score(clf, X, y, scoring=scoring_micro) + score_macro = cross_val_score(clf, X, y, scoring=scoring_macro) + score_samples = cross_val_score(clf, X, y, scoring=scoring_samples) + assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) + assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) + assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_predict(coo_container): + X, y = load_diabetes(return_X_y=True) + cv = KFold() + + est = Ridge() + + # Naive loop (should be same as cross_val_predict): + preds2 = np.zeros_like(y) + for train, test in cv.split(X, y): + est.fit(X[train], y[train]) + preds2[test] = est.predict(X[test]) + + preds = cross_val_predict(est, X, y, cv=cv) + assert_array_almost_equal(preds, preds2) + + preds = cross_val_predict(est, X, y) + assert len(preds) == len(y) + + cv = LeaveOneOut() + preds = cross_val_predict(est, X, y, cv=cv) + assert len(preds) == len(y) + + Xsp = X.copy() + Xsp *= Xsp > np.median(Xsp) + Xsp = coo_container(Xsp) + preds = cross_val_predict(est, Xsp, y) + assert_array_almost_equal(len(preds), len(y)) + + preds = cross_val_predict(KMeans(n_init="auto"), X) + assert len(preds) == len(y) + + class BadCV: + def split(self, X, y=None, groups=None): + for i in range(4): + yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) + + with pytest.raises(ValueError): + cross_val_predict(est, X, y, cv=BadCV()) + + X, y = load_iris(return_X_y=True) + + warning_message = ( + r"Number of classes in training fold \(2\) does " + r"not match total number of classes \(3\). " + "Results may not be appropriate for your use case." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + cross_val_predict( + LogisticRegression(solver="liblinear"), + X, + y, + method="predict_proba", + cv=KFold(2), + ) + + +def test_cross_val_predict_decision_function_shape(): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="decision_function" + ) + assert preds.shape == (50,) + + X, y = load_iris(return_X_y=True) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="decision_function" + ) + assert preds.shape == (150, 3) + + # This specifically tests imbalanced splits for binary + # classification with decision_function. This is only + # applicable to classifiers that can be fit on a single + # class. + X = X[:100] + y = y[:100] + error_message = ( + "Only 1 class/es in training fold," + " but 2 in overall dataset. This" + " is not supported for decision_function" + " with imbalanced folds. To fix " + "this, use a cross-validation technique " + "resulting in properly stratified folds" + ) + with pytest.raises(ValueError, match=error_message): + cross_val_predict( + RidgeClassifier(), X, y, method="decision_function", cv=KFold(2) + ) + + X, y = load_digits(return_X_y=True) + est = SVC(kernel="linear", decision_function_shape="ovo") + + preds = cross_val_predict(est, X, y, method="decision_function") + assert preds.shape == (1797, 45) + + ind = np.argsort(y) + X, y = X[ind], y[ind] + error_message_regexp = ( + r"Output shape \(599L?, 21L?\) of " + "decision_function does not match number of " + r"classes \(7\) in fold. Irregular " + "decision_function .*" + ) + with pytest.raises(ValueError, match=error_message_regexp): + cross_val_predict(est, X, y, cv=KFold(n_splits=3), method="decision_function") + + +def test_cross_val_predict_predict_proba_shape(): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_proba" + ) + assert preds.shape == (50, 2) + + X, y = load_iris(return_X_y=True) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_proba" + ) + assert preds.shape == (150, 3) + + +def test_cross_val_predict_predict_log_proba_shape(): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_log_proba" + ) + assert preds.shape == (50, 2) + + X, y = load_iris(return_X_y=True) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_log_proba" + ) + assert preds.shape == (150, 3) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_predict_input_types(coo_container): + iris = load_iris() + X, y = iris.data, iris.target + X_sparse = coo_container(X) + multioutput_y = np.column_stack([y, y[::-1]]) + + clf = Ridge(fit_intercept=False, random_state=0) + # 3 fold cv is used --> at least 3 samples per class + # Smoke test + predictions = cross_val_predict(clf, X, y) + assert predictions.shape == (150,) + + # test with multioutput y + predictions = cross_val_predict(clf, X_sparse, multioutput_y) + assert predictions.shape == (150, 2) + + predictions = cross_val_predict(clf, X_sparse, y) + assert_array_equal(predictions.shape, (150,)) + + # test with multioutput y + predictions = cross_val_predict(clf, X_sparse, multioutput_y) + assert_array_equal(predictions.shape, (150, 2)) + + # test with X and y as list + list_check = lambda x: isinstance(x, list) + clf = CheckingClassifier(check_X=list_check) + predictions = cross_val_predict(clf, X.tolist(), y.tolist()) + + clf = CheckingClassifier(check_y=list_check) + predictions = cross_val_predict(clf, X, y.tolist()) + + # test with X and y as list and non empty method + predictions = cross_val_predict( + LogisticRegression(solver="liblinear"), + X.tolist(), + y.tolist(), + method="decision_function", + ) + predictions = cross_val_predict( + LogisticRegression(solver="liblinear"), + X, + y.tolist(), + method="decision_function", + ) + + # test with 3d X and + X_3d = X[:, :, np.newaxis] + check_3d = lambda x: x.ndim == 3 + clf = CheckingClassifier(check_X=check_3d) + predictions = cross_val_predict(clf, X_3d, y) + assert_array_equal(predictions.shape, (150,)) + + +@pytest.mark.filterwarnings("ignore: Using or importing the ABCs from") +# python3.7 deprecation warnings in pandas via matplotlib :-/ +def test_cross_val_predict_pandas(): + # check cross_val_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + X_df, y_ser = InputFeatureType(X), TargetType(y2) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + cross_val_predict(clf, X_df, y_ser, cv=3) + + +def test_cross_val_predict_unbalanced(): + X, y = make_classification( + n_samples=100, + n_features=2, + n_redundant=0, + n_informative=2, + n_clusters_per_class=1, + random_state=1, + ) + # Change the first sample to a new class + y[0] = 2 + clf = LogisticRegression(random_state=1, solver="liblinear") + cv = StratifiedKFold(n_splits=2) + train, test = list(cv.split(X, y)) + yhat_proba = cross_val_predict(clf, X, y, cv=cv, method="predict_proba") + assert y[test[0]][0] == 2 # sanity check for further assertions + assert np.all(yhat_proba[test[0]][:, 2] == 0) + assert np.all(yhat_proba[test[0]][:, 0:1] > 0) + assert np.all(yhat_proba[test[1]] > 0) + assert_array_almost_equal(yhat_proba.sum(axis=1), np.ones(y.shape), decimal=12) + + +def test_cross_val_predict_y_none(): + # ensure that cross_val_predict works when y is None + mock_classifier = MockClassifier() + rng = np.random.RandomState(42) + X = rng.rand(100, 10) + y_hat = cross_val_predict(mock_classifier, X, y=None, cv=5, method="predict") + assert_allclose(X[:, 0], y_hat) + y_hat_proba = cross_val_predict( + mock_classifier, X, y=None, cv=5, method="predict_proba" + ) + assert_allclose(X, y_hat_proba) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_score_sparse_fit_params(coo_container): + iris = load_iris() + X, y = iris.data, iris.target + clf = MockClassifier() + fit_params = {"sparse_sample_weight": coo_container(np.eye(X.shape[0]))} + a = cross_val_score(clf, X, y, params=fit_params, cv=3) + assert_array_equal(a, np.ones(3)) + + +def test_learning_curve(): + n_samples = 30 + n_splits = 3 + X, y = make_classification( + n_samples=n_samples, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits)) + for shuffle_train in [False, True]: + with warnings.catch_warnings(record=True) as w: + ( + train_sizes, + train_scores, + test_scores, + fit_times, + score_times, + ) = learning_curve( + estimator, + X, + y, + cv=KFold(n_splits=n_splits), + train_sizes=np.linspace(0.1, 1.0, 10), + shuffle=shuffle_train, + return_times=True, + ) + if len(w) > 0: + raise RuntimeError("Unexpected warning: %r" % w[0].message) + assert train_scores.shape == (10, 3) + assert test_scores.shape == (10, 3) + assert fit_times.shape == (10, 3) + assert score_times.shape == (10, 3) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + # Cannot use assert_array_almost_equal for fit and score times because + # the values are hardware-dependant + assert fit_times.dtype == "float64" + assert score_times.dtype == "float64" + + # Test a custom cv splitter that can iterate only once + with warnings.catch_warnings(record=True) as w: + train_sizes2, train_scores2, test_scores2 = learning_curve( + estimator, + X, + y, + cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), + train_sizes=np.linspace(0.1, 1.0, 10), + shuffle=shuffle_train, + ) + if len(w) > 0: + raise RuntimeError("Unexpected warning: %r" % w[0].message) + assert_array_almost_equal(train_scores2, train_scores) + assert_array_almost_equal(test_scores2, test_scores) + + +def test_learning_curve_unsupervised(): + X, _ = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + train_sizes, train_scores, test_scores = learning_curve( + estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10) + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_verbose(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + train_sizes, train_scores, test_scores = learning_curve( + estimator, X, y, cv=3, verbose=1 + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + assert "[learning_curve]" in out + + +def test_learning_curve_incremental_learning_not_possible(): + X, y = make_classification( + n_samples=2, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + # The mockup does not have partial_fit() + estimator = MockImprovingEstimator(1) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, exploit_incremental_learning=True) + + +def test_learning_curve_incremental_learning(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockIncrementalImprovingEstimator(20) + for shuffle_train in [False, True]: + train_sizes, train_scores, test_scores = learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + shuffle=shuffle_train, + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_incremental_learning_unsupervised(): + X, _ = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockIncrementalImprovingEstimator(20) + train_sizes, train_scores, test_scores = learning_curve( + estimator, + X, + y=None, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_batch_and_incremental_learning_are_equal(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + train_sizes = np.linspace(0.2, 1.0, 5) + estimator = PassiveAggressiveClassifier(max_iter=1, tol=None, shuffle=False) + + train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve( + estimator, + X, + y, + train_sizes=train_sizes, + cv=3, + exploit_incremental_learning=True, + ) + train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve( + estimator, + X, + y, + cv=3, + train_sizes=train_sizes, + exploit_incremental_learning=False, + ) + + assert_array_equal(train_sizes_inc, train_sizes_batch) + assert_array_almost_equal( + train_scores_inc.mean(axis=1), train_scores_batch.mean(axis=1) + ) + assert_array_almost_equal( + test_scores_inc.mean(axis=1), test_scores_batch.mean(axis=1) + ) + + +def test_learning_curve_n_sample_range_out_of_bounds(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0, 1]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0.0, 1.0]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0.1, 1.1]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0, 20]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[1, 21]) + + +def test_learning_curve_remove_duplicate_sample_sizes(): + X, y = make_classification( + n_samples=3, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(2) + warning_message = ( + "Removed duplicate entries from 'train_sizes'. Number of ticks " + "will be less than the size of 'train_sizes': 2 instead of 3." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + train_sizes, _, _ = learning_curve( + estimator, X, y, cv=3, train_sizes=np.linspace(0.33, 1.0, 3) + ) + assert_array_equal(train_sizes, [1, 2]) + + +def test_learning_curve_with_boolean_indices(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + cv = KFold(n_splits=3) + train_sizes, train_scores, test_scores = learning_curve( + estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10) + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_with_shuffle(): + # Following test case was designed this way to verify the code + # changes made in pull request: #7506. + X = np.array( + [ + [1, 2], + [3, 4], + [5, 6], + [7, 8], + [11, 12], + [13, 14], + [15, 16], + [17, 18], + [19, 20], + [7, 8], + [9, 10], + [11, 12], + [13, 14], + [15, 16], + [17, 18], + ] + ) + y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4]) + groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4]) + # Splits on these groups fail without shuffle as the first iteration + # of the learning curve doesn't contain label 4 in the training set. + estimator = PassiveAggressiveClassifier(max_iter=5, tol=None, shuffle=False) + + cv = GroupKFold(n_splits=2) + train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve( + estimator, + X, + y, + cv=cv, + n_jobs=1, + train_sizes=np.linspace(0.3, 1.0, 3), + groups=groups, + shuffle=True, + random_state=2, + ) + assert_array_almost_equal( + train_scores_batch.mean(axis=1), np.array([0.75, 0.3, 0.36111111]) + ) + assert_array_almost_equal( + test_scores_batch.mean(axis=1), np.array([0.36111111, 0.25, 0.25]) + ) + with pytest.raises(ValueError): + learning_curve( + estimator, + X, + y, + cv=cv, + n_jobs=1, + train_sizes=np.linspace(0.3, 1.0, 3), + groups=groups, + error_score="raise", + ) + + train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve( + estimator, + X, + y, + cv=cv, + n_jobs=1, + train_sizes=np.linspace(0.3, 1.0, 3), + groups=groups, + shuffle=True, + random_state=2, + exploit_incremental_learning=True, + ) + assert_array_almost_equal( + train_scores_inc.mean(axis=1), train_scores_batch.mean(axis=1) + ) + assert_array_almost_equal( + test_scores_inc.mean(axis=1), test_scores_batch.mean(axis=1) + ) + + +def test_learning_curve_fit_params(): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_sample_weight=True) + + err_msg = r"Expected sample_weight to be passed" + with pytest.raises(AssertionError, match=err_msg): + learning_curve(clf, X, y, error_score="raise") + + err_msg = r"sample_weight.shape == \(1,\), expected \(2,\)!" + with pytest.raises(ValueError, match=err_msg): + learning_curve( + clf, X, y, error_score="raise", fit_params={"sample_weight": np.ones(1)} + ) + learning_curve( + clf, X, y, error_score="raise", fit_params={"sample_weight": np.ones(10)} + ) + + +def test_learning_curve_incremental_learning_fit_params(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockIncrementalImprovingEstimator(20, ["sample_weight"]) + err_msg = r"Expected fit parameter\(s\) \['sample_weight'\] not seen." + with pytest.raises(AssertionError, match=err_msg): + learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + error_score="raise", + ) + + err_msg = "Fit parameter sample_weight has length 3; expected" + with pytest.raises(AssertionError, match=err_msg): + learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + error_score="raise", + fit_params={"sample_weight": np.ones(3)}, + ) + + learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + error_score="raise", + fit_params={"sample_weight": np.ones(2)}, + ) + + +def test_validation_curve(): + X, y = make_classification( + n_samples=2, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + param_range = np.linspace(0, 1, 10) + with warnings.catch_warnings(record=True) as w: + train_scores, test_scores = validation_curve( + MockEstimatorWithParameter(), + X, + y, + param_name="param", + param_range=param_range, + cv=2, + ) + if len(w) > 0: + raise RuntimeError("Unexpected warning: %r" % w[0].message) + + assert_array_almost_equal(train_scores.mean(axis=1), param_range) + assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range) + + +def test_validation_curve_clone_estimator(): + X, y = make_classification( + n_samples=2, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + + param_range = np.linspace(1, 0, 10) + _, _ = validation_curve( + MockEstimatorWithSingleFitCallAllowed(), + X, + y, + param_name="param", + param_range=param_range, + cv=2, + ) + + +def test_validation_curve_cv_splits_consistency(): + n_samples = 100 + n_splits = 5 + X, y = make_classification(n_samples=100, random_state=0) + + scores1 = validation_curve( + SVC(kernel="linear", random_state=0), + X, + y, + param_name="C", + param_range=[0.1, 0.1, 0.2, 0.2], + cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), + ) + # The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the + # `split` is called for each parameter, the following should produce + # identical results for param setting 1 and param setting 2 as both have + # the same C value. + assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :], 2)) + + scores2 = validation_curve( + SVC(kernel="linear", random_state=0), + X, + y, + param_name="C", + param_range=[0.1, 0.1, 0.2, 0.2], + cv=KFold(n_splits=n_splits, shuffle=True), + ) + + # For scores2, compare the 1st and 2nd parameter's scores + # (Since the C value for 1st two param setting is 0.1, they must be + # consistent unless the train test folds differ between the param settings) + assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :], 2)) + + scores3 = validation_curve( + SVC(kernel="linear", random_state=0), + X, + y, + param_name="C", + param_range=[0.1, 0.1, 0.2, 0.2], + cv=KFold(n_splits=n_splits), + ) + + # OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check. + assert_array_almost_equal(np.array(scores3), np.array(scores1)) + + +def test_validation_curve_fit_params(): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_sample_weight=True) + + err_msg = r"Expected sample_weight to be passed" + with pytest.raises(AssertionError, match=err_msg): + validation_curve( + clf, + X, + y, + param_name="foo_param", + param_range=[1, 2, 3], + error_score="raise", + ) + + err_msg = r"sample_weight.shape == \(1,\), expected \(8,\)!" + with pytest.raises(ValueError, match=err_msg): + validation_curve( + clf, + X, + y, + param_name="foo_param", + param_range=[1, 2, 3], + error_score="raise", + fit_params={"sample_weight": np.ones(1)}, + ) + validation_curve( + clf, + X, + y, + param_name="foo_param", + param_range=[1, 2, 3], + error_score="raise", + fit_params={"sample_weight": np.ones(10)}, + ) + + +def test_check_is_permutation(): + rng = np.random.RandomState(0) + p = np.arange(100) + rng.shuffle(p) + assert _check_is_permutation(p, 100) + assert not _check_is_permutation(np.delete(p, 23), 100) + + p[0] = 23 + assert not _check_is_permutation(p, 100) + + # Check if the additional duplicate indices are caught + assert not _check_is_permutation(np.hstack((p, 0)), 100) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_cross_val_predict_sparse_prediction(csr_container): + # check that cross_val_predict gives same result for sparse and dense input + X, y = make_multilabel_classification( + n_classes=2, + n_labels=1, + allow_unlabeled=False, + return_indicator=True, + random_state=1, + ) + X_sparse = csr_container(X) + y_sparse = csr_container(y) + classif = OneVsRestClassifier(SVC(kernel="linear")) + preds = cross_val_predict(classif, X, y, cv=10) + preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10) + preds_sparse = preds_sparse.toarray() + assert_array_almost_equal(preds_sparse, preds) + + +def check_cross_val_predict_binary(est, X, y, method): + """Helper for tests of cross_val_predict with binary classification""" + cv = KFold(n_splits=3, shuffle=False) + + # Generate expected outputs + if y.ndim == 1: + exp_shape = (len(X),) if method == "decision_function" else (len(X), 2) + else: + exp_shape = y.shape + expected_predictions = np.zeros(exp_shape) + for train, test in cv.split(X, y): + est = clone(est).fit(X[train], y[train]) + expected_predictions[test] = getattr(est, method)(X[test]) + + # Check actual outputs for several representations of y + for tg in [y, y + 1, y - 2, y.astype("str")]: + assert_allclose( + cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions + ) + + +def check_cross_val_predict_multiclass(est, X, y, method): + """Helper for tests of cross_val_predict with multiclass classification""" + cv = KFold(n_splits=3, shuffle=False) + + # Generate expected outputs + float_min = np.finfo(np.float64).min + default_values = { + "decision_function": float_min, + "predict_log_proba": float_min, + "predict_proba": 0, + } + expected_predictions = np.full( + (len(X), len(set(y))), default_values[method], dtype=np.float64 + ) + _, y_enc = np.unique(y, return_inverse=True) + for train, test in cv.split(X, y_enc): + est = clone(est).fit(X[train], y_enc[train]) + fold_preds = getattr(est, method)(X[test]) + i_cols_fit = np.unique(y_enc[train]) + expected_predictions[np.ix_(test, i_cols_fit)] = fold_preds + + # Check actual outputs for several representations of y + for tg in [y, y + 1, y - 2, y.astype("str")]: + assert_allclose( + cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions + ) + + +def check_cross_val_predict_multilabel(est, X, y, method): + """Check the output of cross_val_predict for 2D targets using + Estimators which provide a predictions as a list with one + element per class. + """ + cv = KFold(n_splits=3, shuffle=False) + + # Create empty arrays of the correct size to hold outputs + float_min = np.finfo(np.float64).min + default_values = { + "decision_function": float_min, + "predict_log_proba": float_min, + "predict_proba": 0, + } + n_targets = y.shape[1] + expected_preds = [] + for i_col in range(n_targets): + n_classes_in_label = len(set(y[:, i_col])) + if n_classes_in_label == 2 and method == "decision_function": + exp_shape = (len(X),) + else: + exp_shape = (len(X), n_classes_in_label) + expected_preds.append( + np.full(exp_shape, default_values[method], dtype=np.float64) + ) + + # Generate expected outputs + y_enc_cols = [ + np.unique(y[:, i], return_inverse=True)[1][:, np.newaxis] + for i in range(y.shape[1]) + ] + y_enc = np.concatenate(y_enc_cols, axis=1) + for train, test in cv.split(X, y_enc): + est = clone(est).fit(X[train], y_enc[train]) + fold_preds = getattr(est, method)(X[test]) + for i_col in range(n_targets): + fold_cols = np.unique(y_enc[train][:, i_col]) + if expected_preds[i_col].ndim == 1: + # Decision function with <=2 classes + expected_preds[i_col][test] = fold_preds[i_col] + else: + idx = np.ix_(test, fold_cols) + expected_preds[i_col][idx] = fold_preds[i_col] + + # Check actual outputs for several representations of y + for tg in [y, y + 1, y - 2, y.astype("str")]: + cv_predict_output = cross_val_predict(est, X, tg, method=method, cv=cv) + assert len(cv_predict_output) == len(expected_preds) + for i in range(len(cv_predict_output)): + assert_allclose(cv_predict_output[i], expected_preds[i]) + + +def check_cross_val_predict_with_method_binary(est): + # This test includes the decision_function with two classes. + # This is a special case: it has only one column of output. + X, y = make_classification(n_classes=2, random_state=0) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + check_cross_val_predict_binary(est, X, y, method) + + +def check_cross_val_predict_with_method_multiclass(est): + iris = load_iris() + X, y = iris.data, iris.target + X, y = shuffle(X, y, random_state=0) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_cross_val_predict_with_method(): + check_cross_val_predict_with_method_binary(LogisticRegression(solver="liblinear")) + check_cross_val_predict_with_method_multiclass( + LogisticRegression(solver="liblinear") + ) + + +def test_cross_val_predict_method_checking(): + # Regression test for issue #9639. Tests that cross_val_predict does not + # check estimator methods (e.g. predict_proba) before fitting + iris = load_iris() + X, y = iris.data, iris.target + X, y = shuffle(X, y, random_state=0) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + est = SGDClassifier(loss="log_loss", random_state=2) + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_gridsearchcv_cross_val_predict_with_method(): + iris = load_iris() + X, y = iris.data, iris.target + X, y = shuffle(X, y, random_state=0) + est = GridSearchCV( + LogisticRegression(random_state=42, solver="liblinear"), {"C": [0.1, 1]}, cv=2 + ) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_cross_val_predict_with_method_multilabel_ovr(): + # OVR does multilabel predictions, but only arrays of + # binary indicator columns. The output of predict_proba + # is a 2D array with shape (n_samples, n_classes). + n_samp = 100 + n_classes = 4 + X, y = make_multilabel_classification( + n_samples=n_samp, n_labels=3, n_classes=n_classes, n_features=5, random_state=42 + ) + est = OneVsRestClassifier(LogisticRegression(solver="liblinear", random_state=0)) + for method in ["predict_proba", "decision_function"]: + check_cross_val_predict_binary(est, X, y, method=method) + + +class RFWithDecisionFunction(RandomForestClassifier): + # None of the current multioutput-multiclass estimators have + # decision function methods. Create a mock decision function + # to test the cross_val_predict function's handling of this case. + def decision_function(self, X): + probs = self.predict_proba(X) + msg = "This helper should only be used on multioutput-multiclass tasks" + assert isinstance(probs, list), msg + probs = [p[:, -1] if p.shape[1] == 2 else p for p in probs] + return probs + + +def test_cross_val_predict_with_method_multilabel_rf(): + # The RandomForest allows multiple classes in each label. + # Output of predict_proba is a list of outputs of predict_proba + # for each individual label. + n_classes = 4 + X, y = make_multilabel_classification( + n_samples=100, n_labels=3, n_classes=n_classes, n_features=5, random_state=42 + ) + y[:, 0] += y[:, 1] # Put three classes in the first column + for method in ["predict_proba", "predict_log_proba", "decision_function"]: + est = RFWithDecisionFunction(n_estimators=5, random_state=0) + with warnings.catch_warnings(): + # Suppress "RuntimeWarning: divide by zero encountered in log" + warnings.simplefilter("ignore") + check_cross_val_predict_multilabel(est, X, y, method=method) + + +def test_cross_val_predict_with_method_rare_class(): + # Test a multiclass problem where one class will be missing from + # one of the CV training sets. + rng = np.random.RandomState(0) + X = rng.normal(0, 1, size=(14, 10)) + y = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 3]) + est = LogisticRegression(solver="liblinear") + for method in ["predict_proba", "predict_log_proba", "decision_function"]: + with warnings.catch_warnings(): + # Suppress warning about too few examples of a class + warnings.simplefilter("ignore") + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_cross_val_predict_with_method_multilabel_rf_rare_class(): + # The RandomForest allows anything for the contents of the labels. + # Output of predict_proba is a list of outputs of predict_proba + # for each individual label. + # In this test, the first label has a class with a single example. + # We'll have one CV fold where the training data don't include it. + rng = np.random.RandomState(0) + X = rng.normal(0, 1, size=(5, 10)) + y = np.array([[0, 0], [1, 1], [2, 1], [0, 1], [1, 0]]) + for method in ["predict_proba", "predict_log_proba"]: + est = RFWithDecisionFunction(n_estimators=5, random_state=0) + with warnings.catch_warnings(): + # Suppress "RuntimeWarning: divide by zero encountered in log" + warnings.simplefilter("ignore") + check_cross_val_predict_multilabel(est, X, y, method=method) + + +def get_expected_predictions(X, y, cv, classes, est, method): + expected_predictions = np.zeros([len(y), classes]) + func = getattr(est, method) + + for train, test in cv.split(X, y): + est.fit(X[train], y[train]) + expected_predictions_ = func(X[test]) + # To avoid 2 dimensional indexing + if method == "predict_proba": + exp_pred_test = np.zeros((len(test), classes)) + else: + exp_pred_test = np.full( + (len(test), classes), np.finfo(expected_predictions.dtype).min + ) + exp_pred_test[:, est.classes_] = expected_predictions_ + expected_predictions[test] = exp_pred_test + + return expected_predictions + + +def test_cross_val_predict_class_subset(): + X = np.arange(200).reshape(100, 2) + y = np.array([x // 10 for x in range(100)]) + classes = 10 + + kfold3 = KFold(n_splits=3) + kfold4 = KFold(n_splits=4) + + le = LabelEncoder() + + methods = ["decision_function", "predict_proba", "predict_log_proba"] + for method in methods: + est = LogisticRegression(solver="liblinear") + + # Test with n_splits=3 + predictions = cross_val_predict(est, X, y, method=method, cv=kfold3) + + # Runs a naive loop (should be same as cross_val_predict): + expected_predictions = get_expected_predictions( + X, y, kfold3, classes, est, method + ) + assert_array_almost_equal(expected_predictions, predictions) + + # Test with n_splits=4 + predictions = cross_val_predict(est, X, y, method=method, cv=kfold4) + expected_predictions = get_expected_predictions( + X, y, kfold4, classes, est, method + ) + assert_array_almost_equal(expected_predictions, predictions) + + # Testing unordered labels + y = shuffle(np.repeat(range(10), 10), random_state=0) + predictions = cross_val_predict(est, X, y, method=method, cv=kfold3) + y = le.fit_transform(y) + expected_predictions = get_expected_predictions( + X, y, kfold3, classes, est, method + ) + assert_array_almost_equal(expected_predictions, predictions) + + +def test_score_memmap(): + # Ensure a scalar score of memmap type is accepted + iris = load_iris() + X, y = iris.data, iris.target + clf = MockClassifier() + tf = tempfile.NamedTemporaryFile(mode="wb", delete=False) + tf.write(b"Hello world!!!!!") + tf.close() + scores = np.memmap(tf.name, dtype=np.float64) + score = np.memmap(tf.name, shape=(), mode="r", dtype=np.float64) + try: + cross_val_score(clf, X, y, scoring=lambda est, X, y: score) + with pytest.raises(ValueError): + cross_val_score(clf, X, y, scoring=lambda est, X, y: scores) + finally: + # Best effort to release the mmap file handles before deleting the + # backing file under Windows + scores, score = None, None + for _ in range(3): + try: + os.unlink(tf.name) + break + except OSError: + sleep(1.0) + + +@pytest.mark.filterwarnings("ignore: Using or importing the ABCs from") +def test_permutation_test_score_pandas(): + # check permutation_test_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + iris = load_iris() + X, y = iris.data, iris.target + X_df, y_ser = InputFeatureType(X), TargetType(y) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + permutation_test_score(clf, X_df, y_ser) + + +def test_fit_and_score_failing(): + # Create a failing classifier to deliberately fail + failing_clf = FailingClassifier(FailingClassifier.FAILING_PARAMETER) + # dummy X data + X = np.arange(1, 10) + fit_and_score_args = dict( + estimator=failing_clf, + X=X, + y=None, + scorer=dict(), + train=None, + test=None, + verbose=0, + parameters=None, + fit_params=None, + score_params=None, + ) + # passing error score to trigger the warning message + fit_and_score_args["error_score"] = "raise" + # check if exception was raised, with default error_score='raise' + with pytest.raises(ValueError, match="Failing classifier failed as required"): + _fit_and_score(**fit_and_score_args) + + assert failing_clf.score() == 0.0 # FailingClassifier coverage + + +def test_fit_and_score_working(): + X, y = make_classification(n_samples=30, random_state=0) + clf = SVC(kernel="linear", random_state=0) + train, test = next(ShuffleSplit().split(X)) + # Test return_parameters option + fit_and_score_args = dict( + estimator=clf, + X=X, + y=y, + scorer=dict(), + train=train, + test=test, + verbose=0, + parameters={"max_iter": 100, "tol": 0.1}, + fit_params=None, + score_params=None, + return_parameters=True, + ) + result = _fit_and_score(**fit_and_score_args) + assert result["parameters"] == fit_and_score_args["parameters"] + + +class DataDependentFailingClassifier(BaseEstimator): + def __init__(self, max_x_value=None): + self.max_x_value = max_x_value + + def fit(self, X, y=None): + num_values_too_high = (X > self.max_x_value).sum() + if num_values_too_high: + raise ValueError( + f"Classifier fit failed with {num_values_too_high} values too high" + ) + + def score(self, X=None, Y=None): + return 0.0 + + +@pytest.mark.parametrize("error_score", [np.nan, 0]) +def test_cross_validate_some_failing_fits_warning(error_score): + # Create a failing classifier to deliberately fail + failing_clf = DataDependentFailingClassifier(max_x_value=8) + # dummy X data + X = np.arange(1, 10) + y = np.ones(9) + # passing error score to trigger the warning message + cross_validate_args = [failing_clf, X, y] + cross_validate_kwargs = {"cv": 3, "error_score": error_score} + # check if the warning message type is as expected + + individual_fit_error_message = ( + "ValueError: Classifier fit failed with 1 values too high" + ) + warning_message = re.compile( + ( + "2 fits failed.+total of 3.+The score on these" + " train-test partitions for these parameters will be set to" + f" {cross_validate_kwargs['error_score']}.+{individual_fit_error_message}" + ), + flags=re.DOTALL, + ) + + with pytest.warns(FitFailedWarning, match=warning_message): + cross_validate(*cross_validate_args, **cross_validate_kwargs) + + +@pytest.mark.parametrize("error_score", [np.nan, 0]) +def test_cross_validate_all_failing_fits_error(error_score): + # Create a failing classifier to deliberately fail + failing_clf = FailingClassifier(FailingClassifier.FAILING_PARAMETER) + # dummy X data + X = np.arange(1, 10) + y = np.ones(9) + + cross_validate_args = [failing_clf, X, y] + cross_validate_kwargs = {"cv": 7, "error_score": error_score} + + individual_fit_error_message = "ValueError: Failing classifier failed as required" + error_message = re.compile( + ( + "All the 7 fits failed.+your model is misconfigured.+" + f"{individual_fit_error_message}" + ), + flags=re.DOTALL, + ) + + with pytest.raises(ValueError, match=error_message): + cross_validate(*cross_validate_args, **cross_validate_kwargs) + + +def _failing_scorer(estimator, X, y, error_msg): + raise ValueError(error_msg) + + +@pytest.mark.filterwarnings("ignore:lbfgs failed to converge") +@pytest.mark.parametrize("error_score", [np.nan, 0, "raise"]) +def test_cross_val_score_failing_scorer(error_score): + # check that an estimator can fail during scoring in `cross_val_score` and + # that we can optionally replaced it with `error_score` + X, y = load_iris(return_X_y=True) + clf = LogisticRegression(max_iter=5).fit(X, y) + + error_msg = "This scorer is supposed to fail!!!" + failing_scorer = partial(_failing_scorer, error_msg=error_msg) + + if error_score == "raise": + with pytest.raises(ValueError, match=error_msg): + cross_val_score( + clf, X, y, cv=3, scoring=failing_scorer, error_score=error_score + ) + else: + warning_msg = ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}" + ) + with pytest.warns(UserWarning, match=warning_msg): + scores = cross_val_score( + clf, X, y, cv=3, scoring=failing_scorer, error_score=error_score + ) + assert_allclose(scores, error_score) + + +@pytest.mark.filterwarnings("ignore:lbfgs failed to converge") +@pytest.mark.parametrize("error_score", [np.nan, 0, "raise"]) +@pytest.mark.parametrize("return_train_score", [True, False]) +@pytest.mark.parametrize("with_multimetric", [False, True]) +def test_cross_validate_failing_scorer( + error_score, return_train_score, with_multimetric +): + # Check that an estimator can fail during scoring in `cross_validate` and + # that we can optionally replace it with `error_score`. In the multimetric + # case also check the result of a non-failing scorer where the other scorers + # are failing. + X, y = load_iris(return_X_y=True) + clf = LogisticRegression(max_iter=5).fit(X, y) + + error_msg = "This scorer is supposed to fail!!!" + failing_scorer = partial(_failing_scorer, error_msg=error_msg) + if with_multimetric: + non_failing_scorer = make_scorer(mean_squared_error) + scoring = { + "score_1": failing_scorer, + "score_2": non_failing_scorer, + "score_3": failing_scorer, + } + else: + scoring = failing_scorer + + if error_score == "raise": + with pytest.raises(ValueError, match=error_msg): + cross_validate( + clf, + X, + y, + cv=3, + scoring=scoring, + return_train_score=return_train_score, + error_score=error_score, + ) + else: + warning_msg = ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}" + ) + with pytest.warns(UserWarning, match=warning_msg): + results = cross_validate( + clf, + X, + y, + cv=3, + scoring=scoring, + return_train_score=return_train_score, + error_score=error_score, + ) + for key in results: + if "_score" in key: + if "_score_2" in key: + # check the test (and optionally train) score for the + # scorer that should be non-failing + for i in results[key]: + assert isinstance(i, float) + else: + # check the test (and optionally train) score for all + # scorers that should be assigned to `error_score`. + assert_allclose(results[key], error_score) + + +def three_params_scorer(i, j, k): + return 3.4213 + + +@pytest.mark.parametrize( + "train_score, scorer, verbose, split_prg, cdt_prg, expected", + [ + ( + False, + three_params_scorer, + 2, + (1, 3), + (0, 1), + r"\[CV\] END ...................................................." + r" total time= 0.\ds", + ), + ( + True, + {"sc1": three_params_scorer, "sc2": three_params_scorer}, + 3, + (1, 3), + (0, 1), + r"\[CV 2/3\] END sc1: \(train=3.421, test=3.421\) sc2: " + r"\(train=3.421, test=3.421\) total time= 0.\ds", + ), + ( + False, + {"sc1": three_params_scorer, "sc2": three_params_scorer}, + 10, + (1, 3), + (0, 1), + r"\[CV 2/3; 1/1\] END ....... sc1: \(test=3.421\) sc2: \(test=3.421\)" + r" total time= 0.\ds", + ), + ], +) +def test_fit_and_score_verbosity( + capsys, train_score, scorer, verbose, split_prg, cdt_prg, expected +): + X, y = make_classification(n_samples=30, random_state=0) + clf = SVC(kernel="linear", random_state=0) + train, test = next(ShuffleSplit().split(X)) + + # test print without train score + fit_and_score_args = dict( + estimator=clf, + X=X, + y=y, + scorer=scorer, + train=train, + test=test, + verbose=verbose, + parameters=None, + fit_params=None, + score_params=None, + return_train_score=train_score, + split_progress=split_prg, + candidate_progress=cdt_prg, + ) + _fit_and_score(**fit_and_score_args) + out, _ = capsys.readouterr() + outlines = out.split("\n") + if len(outlines) > 2: + assert re.match(expected, outlines[1]) + else: + assert re.match(expected, outlines[0]) + + +def test_score(): + error_message = "scoring must return a number, got None" + + def two_params_scorer(estimator, X_test): + return None + + with pytest.raises(ValueError, match=error_message): + _score( + estimator=None, + X_test=None, + y_test=None, + scorer=two_params_scorer, + score_params=None, + error_score=np.nan, + ) + + +def test_callable_multimetric_confusion_matrix_cross_validate(): + def custom_scorer(clf, X, y): + y_pred = clf.predict(X) + cm = confusion_matrix(y, y_pred) + return {"tn": cm[0, 0], "fp": cm[0, 1], "fn": cm[1, 0], "tp": cm[1, 1]} + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + est.fit(X, y) + cv_results = cross_validate(est, X, y, cv=5, scoring=custom_scorer) + + score_names = ["tn", "fp", "fn", "tp"] + for name in score_names: + assert "test_{}".format(name) in cv_results + + +def test_learning_curve_partial_fit_regressors(): + """Check that regressors with partial_fit is supported. + + Non-regression test for #22981. + """ + X, y = make_regression(random_state=42) + + # Does not error + learning_curve(MLPRegressor(), X, y, exploit_incremental_learning=True, cv=2) + + +def test_learning_curve_some_failing_fits_warning(global_random_seed): + """Checks for fit failures in `learning_curve` and raises the required warning""" + + X, y = make_classification( + n_samples=30, + n_classes=3, + n_informative=6, + shuffle=False, + random_state=global_random_seed, + ) + # sorting the target to trigger SVC error on the 2 first splits because a single + # class is present + sorted_idx = np.argsort(y) + X, y = X[sorted_idx], y[sorted_idx] + + svc = SVC() + warning_message = "10 fits failed out of a total of 25" + + with pytest.warns(FitFailedWarning, match=warning_message): + _, train_score, test_score, *_ = learning_curve( + svc, X, y, cv=5, error_score=np.nan + ) + + # the first 2 splits should lead to warnings and thus np.nan scores + for idx in range(2): + assert np.isnan(train_score[idx]).all() + assert np.isnan(test_score[idx]).all() + + for idx in range(2, train_score.shape[0]): + assert not np.isnan(train_score[idx]).any() + assert not np.isnan(test_score[idx]).any() + + +def test_cross_validate_return_indices(global_random_seed): + """Check the behaviour of `return_indices` in `cross_validate`.""" + X, y = load_iris(return_X_y=True) + X = scale(X) # scale features for better convergence + estimator = LogisticRegression() + + cv = KFold(n_splits=3, shuffle=True, random_state=global_random_seed) + cv_results = cross_validate(estimator, X, y, cv=cv, n_jobs=2, return_indices=False) + assert "indices" not in cv_results + + cv_results = cross_validate(estimator, X, y, cv=cv, n_jobs=2, return_indices=True) + assert "indices" in cv_results + train_indices = cv_results["indices"]["train"] + test_indices = cv_results["indices"]["test"] + assert len(train_indices) == cv.n_splits + assert len(test_indices) == cv.n_splits + + assert_array_equal([indices.size for indices in train_indices], 100) + assert_array_equal([indices.size for indices in test_indices], 50) + + for split_idx, (expected_train_idx, expected_test_idx) in enumerate(cv.split(X, y)): + assert_array_equal(train_indices[split_idx], expected_train_idx) + assert_array_equal(test_indices[split_idx], expected_test_idx) + + +# Tests for metadata routing in cross_val* +# ======================================== + + +# TODO(1.6): remove this test in 1.6 +def test_cross_validate_fit_param_deprecation(): + """Check that we warn about deprecating `fit_params`.""" + with pytest.warns(FutureWarning, match="`fit_params` is deprecated"): + cross_validate(estimator=ConsumingClassifier(), X=X, y=y, cv=2, fit_params={}) + + with pytest.raises( + ValueError, match="`params` and `fit_params` cannot both be provided" + ): + cross_validate( + estimator=ConsumingClassifier(), X=X, y=y, fit_params={}, params={} + ) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "cv_method", [cross_validate, cross_val_score, cross_val_predict] +) +def test_groups_with_routing_validation(cv_method): + """Check that we raise an error if `groups` are passed to the cv method instead + of `params` when metadata routing is enabled. + """ + with pytest.raises(ValueError, match="`groups` can only be passed if"): + cv_method( + estimator=ConsumingClassifier(), + X=X, + y=y, + groups=[], + ) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "cv_method", [cross_validate, cross_val_score, cross_val_predict] +) +def test_passed_unrequested_metadata(cv_method): + """Check that we raise an error when passing metadata that is not + requested.""" + err_msg = re.escape("but are not explicitly set as requested or not requested") + with pytest.raises(ValueError, match=err_msg): + cv_method( + estimator=ConsumingClassifier(), + X=X, + y=y, + params=dict(metadata=[]), + ) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "cv_method", [cross_validate, cross_val_score, cross_val_predict] +) +def test_cross_validate_routing(cv_method): + """Check that the respective cv method is properly dispatching the metadata + to the consumer.""" + scorer_registry = _Registry() + scorer = ConsumingScorer(registry=scorer_registry).set_score_request( + sample_weight="score_weights", metadata="score_metadata" + ) + splitter_registry = _Registry() + splitter = ConsumingSplitter(registry=splitter_registry).set_split_request( + groups="split_groups", metadata="split_metadata" + ) + estimator_registry = _Registry() + estimator = ConsumingClassifier(registry=estimator_registry).set_fit_request( + sample_weight="fit_sample_weight", metadata="fit_metadata" + ) + n_samples = _num_samples(X) + rng = np.random.RandomState(0) + score_weights = rng.rand(n_samples) + score_metadata = rng.rand(n_samples) + split_groups = rng.randint(0, 3, n_samples) + split_metadata = rng.rand(n_samples) + fit_sample_weight = rng.rand(n_samples) + fit_metadata = rng.rand(n_samples) + + extra_params = { + cross_validate: dict(scoring=dict(my_scorer=scorer, accuracy="accuracy")), + # cross_val_score doesn't support multiple scorers + cross_val_score: dict(scoring=scorer), + # cross_val_predict doesn't need a scorer + cross_val_predict: dict(), + } + + params = dict( + split_groups=split_groups, + split_metadata=split_metadata, + fit_sample_weight=fit_sample_weight, + fit_metadata=fit_metadata, + ) + + if cv_method is not cross_val_predict: + params.update( + score_weights=score_weights, + score_metadata=score_metadata, + ) + + cv_method( + estimator, + X=X, + y=y, + cv=splitter, + **extra_params[cv_method], + params=params, + ) + + if cv_method is not cross_val_predict: + # cross_val_predict doesn't need a scorer + assert len(scorer_registry) + for _scorer in scorer_registry: + check_recorded_metadata( + obj=_scorer, + method="score", + split_params=("sample_weight", "metadata"), + sample_weight=score_weights, + metadata=score_metadata, + ) + + assert len(splitter_registry) + for _splitter in splitter_registry: + check_recorded_metadata( + obj=_splitter, + method="split", + groups=split_groups, + metadata=split_metadata, + ) + + assert len(estimator_registry) + for _estimator in estimator_registry: + check_recorded_metadata( + obj=_estimator, + method="fit", + split_params=("sample_weight", "metadata"), + sample_weight=fit_sample_weight, + metadata=fit_metadata, + ) + + +# End of metadata routing tests +# ============================= diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/__init__.py b/venv/lib/python3.10/site-packages/sklearn/tree/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cfb42c73e11818e0f1d08190d49457531242a85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/__init__.py @@ -0,0 +1,24 @@ +""" +The :mod:`sklearn.tree` module includes decision tree-based models for +classification and regression. +""" + +from ._classes import ( + BaseDecisionTree, + DecisionTreeClassifier, + DecisionTreeRegressor, + ExtraTreeClassifier, + ExtraTreeRegressor, +) +from ._export import export_graphviz, export_text, plot_tree + +__all__ = [ + "BaseDecisionTree", + "DecisionTreeClassifier", + "DecisionTreeRegressor", + "ExtraTreeClassifier", + "ExtraTreeRegressor", + "export_graphviz", + "plot_tree", + "export_text", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45621f55c4a7a64a2d0836531390c324c8a80963 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/_classes.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/_classes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5422bd5d935f14ba1f6e9db6924003b8cb5d360 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/_classes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/_export.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/_export.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..196b24b5c127fdb8ee0a08ae0e078a22f9e3d8fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/_export.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/_reingold_tilford.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/_reingold_tilford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9900b7412eb98776d84f5c18075cac92ac164e8e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/__pycache__/_reingold_tilford.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_classes.py b/venv/lib/python3.10/site-packages/sklearn/tree/_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..00d61f184731dbb21e8146c8500ba43743fc576d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/_classes.py @@ -0,0 +1,1938 @@ +""" +This module gathers tree-based methods, including decision, regression and +randomized trees. Single and multi-output problems are both handled. +""" + +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Noel Dawe +# Satrajit Gosh +# Joly Arnaud +# Fares Hedayati +# Nelson Liu +# +# License: BSD 3 clause + +import copy +import numbers +from abc import ABCMeta, abstractmethod +from math import ceil +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import issparse + +from ..base import ( + BaseEstimator, + ClassifierMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, + clone, + is_classifier, +) +from ..utils import Bunch, check_random_state, compute_sample_weight +from ..utils._param_validation import Hidden, Interval, RealNotInt, StrOptions +from ..utils.multiclass import check_classification_targets +from ..utils.validation import ( + _assert_all_finite_element_wise, + _check_sample_weight, + assert_all_finite, + check_is_fitted, +) +from . import _criterion, _splitter, _tree +from ._criterion import Criterion +from ._splitter import Splitter +from ._tree import ( + BestFirstTreeBuilder, + DepthFirstTreeBuilder, + Tree, + _build_pruned_tree_ccp, + ccp_pruning_path, +) +from ._utils import _any_isnan_axis0 + +__all__ = [ + "DecisionTreeClassifier", + "DecisionTreeRegressor", + "ExtraTreeClassifier", + "ExtraTreeRegressor", +] + + +# ============================================================================= +# Types and constants +# ============================================================================= + +DTYPE = _tree.DTYPE +DOUBLE = _tree.DOUBLE + +CRITERIA_CLF = { + "gini": _criterion.Gini, + "log_loss": _criterion.Entropy, + "entropy": _criterion.Entropy, +} +CRITERIA_REG = { + "squared_error": _criterion.MSE, + "friedman_mse": _criterion.FriedmanMSE, + "absolute_error": _criterion.MAE, + "poisson": _criterion.Poisson, +} + +DENSE_SPLITTERS = {"best": _splitter.BestSplitter, "random": _splitter.RandomSplitter} + +SPARSE_SPLITTERS = { + "best": _splitter.BestSparseSplitter, + "random": _splitter.RandomSparseSplitter, +} + +# ============================================================================= +# Base decision tree +# ============================================================================= + + +class BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for decision trees. + + Warning: This class should not be used directly. + Use derived classes instead. + """ + + _parameter_constraints: dict = { + "splitter": [StrOptions({"best", "random"})], + "max_depth": [Interval(Integral, 1, None, closed="left"), None], + "min_samples_split": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0.0, 1.0, closed="right"), + ], + "min_samples_leaf": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0.0, 1.0, closed="neither"), + ], + "min_weight_fraction_leaf": [Interval(Real, 0.0, 0.5, closed="both")], + "max_features": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0.0, 1.0, closed="right"), + StrOptions({"sqrt", "log2"}), + None, + ], + "random_state": ["random_state"], + "max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None], + "min_impurity_decrease": [Interval(Real, 0.0, None, closed="left")], + "ccp_alpha": [Interval(Real, 0.0, None, closed="left")], + "monotonic_cst": ["array-like", None], + } + + @abstractmethod + def __init__( + self, + *, + criterion, + splitter, + max_depth, + min_samples_split, + min_samples_leaf, + min_weight_fraction_leaf, + max_features, + max_leaf_nodes, + random_state, + min_impurity_decrease, + class_weight=None, + ccp_alpha=0.0, + monotonic_cst=None, + ): + self.criterion = criterion + self.splitter = splitter + self.max_depth = max_depth + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.max_features = max_features + self.max_leaf_nodes = max_leaf_nodes + self.random_state = random_state + self.min_impurity_decrease = min_impurity_decrease + self.class_weight = class_weight + self.ccp_alpha = ccp_alpha + self.monotonic_cst = monotonic_cst + + def get_depth(self): + """Return the depth of the decision tree. + + The depth of a tree is the maximum distance between the root + and any leaf. + + Returns + ------- + self.tree_.max_depth : int + The maximum depth of the tree. + """ + check_is_fitted(self) + return self.tree_.max_depth + + def get_n_leaves(self): + """Return the number of leaves of the decision tree. + + Returns + ------- + self.tree_.n_leaves : int + Number of leaves. + """ + check_is_fitted(self) + return self.tree_.n_leaves + + def _support_missing_values(self, X): + return ( + not issparse(X) + and self._get_tags()["allow_nan"] + and self.monotonic_cst is None + ) + + def _compute_missing_values_in_feature_mask(self, X, estimator_name=None): + """Return boolean mask denoting if there are missing values for each feature. + + This method also ensures that X is finite. + + Parameter + --------- + X : array-like of shape (n_samples, n_features), dtype=DOUBLE + Input data. + + estimator_name : str or None, default=None + Name to use when raising an error. Defaults to the class name. + + Returns + ------- + missing_values_in_feature_mask : ndarray of shape (n_features,), or None + Missing value mask. If missing values are not supported or there + are no missing values, return None. + """ + estimator_name = estimator_name or self.__class__.__name__ + common_kwargs = dict(estimator_name=estimator_name, input_name="X") + + if not self._support_missing_values(X): + assert_all_finite(X, **common_kwargs) + return None + + with np.errstate(over="ignore"): + overall_sum = np.sum(X) + + if not np.isfinite(overall_sum): + # Raise a ValueError in case of the presence of an infinite element. + _assert_all_finite_element_wise(X, xp=np, allow_nan=True, **common_kwargs) + + # If the sum is not nan, then there are no missing values + if not np.isnan(overall_sum): + return None + + missing_values_in_feature_mask = _any_isnan_axis0(X) + return missing_values_in_feature_mask + + def _fit( + self, + X, + y, + sample_weight=None, + check_input=True, + missing_values_in_feature_mask=None, + ): + random_state = check_random_state(self.random_state) + + if check_input: + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be + # csr. + + # _compute_missing_values_in_feature_mask will check for finite values and + # compute the missing mask if the tree supports missing values + check_X_params = dict( + dtype=DTYPE, accept_sparse="csc", force_all_finite=False + ) + check_y_params = dict(ensure_2d=False, dtype=None) + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) + + missing_values_in_feature_mask = ( + self._compute_missing_values_in_feature_mask(X) + ) + if issparse(X): + X.sort_indices() + + if X.indices.dtype != np.intc or X.indptr.dtype != np.intc: + raise ValueError( + "No support for np.int64 index based sparse matrices" + ) + + if self.criterion == "poisson": + if np.any(y < 0): + raise ValueError( + "Some value(s) of y are negative which is" + " not allowed for Poisson regression." + ) + if np.sum(y) <= 0: + raise ValueError( + "Sum of y is not positive which is " + "necessary for Poisson regression." + ) + + # Determine output settings + n_samples, self.n_features_in_ = X.shape + is_classification = is_classifier(self) + + y = np.atleast_1d(y) + expanded_class_weight = None + + if y.ndim == 1: + # reshape is necessary to preserve the data contiguity against vs + # [:, np.newaxis] that does not. + y = np.reshape(y, (-1, 1)) + + self.n_outputs_ = y.shape[1] + + if is_classification: + check_classification_targets(y) + y = np.copy(y) + + self.classes_ = [] + self.n_classes_ = [] + + if self.class_weight is not None: + y_original = np.copy(y) + + y_encoded = np.zeros(y.shape, dtype=int) + for k in range(self.n_outputs_): + classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True) + self.classes_.append(classes_k) + self.n_classes_.append(classes_k.shape[0]) + y = y_encoded + + if self.class_weight is not None: + expanded_class_weight = compute_sample_weight( + self.class_weight, y_original + ) + + self.n_classes_ = np.array(self.n_classes_, dtype=np.intp) + + if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: + y = np.ascontiguousarray(y, dtype=DOUBLE) + + max_depth = np.iinfo(np.int32).max if self.max_depth is None else self.max_depth + + if isinstance(self.min_samples_leaf, numbers.Integral): + min_samples_leaf = self.min_samples_leaf + else: # float + min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples)) + + if isinstance(self.min_samples_split, numbers.Integral): + min_samples_split = self.min_samples_split + else: # float + min_samples_split = int(ceil(self.min_samples_split * n_samples)) + min_samples_split = max(2, min_samples_split) + + min_samples_split = max(min_samples_split, 2 * min_samples_leaf) + + if isinstance(self.max_features, str): + if self.max_features == "sqrt": + max_features = max(1, int(np.sqrt(self.n_features_in_))) + elif self.max_features == "log2": + max_features = max(1, int(np.log2(self.n_features_in_))) + elif self.max_features is None: + max_features = self.n_features_in_ + elif isinstance(self.max_features, numbers.Integral): + max_features = self.max_features + else: # float + if self.max_features > 0.0: + max_features = max(1, int(self.max_features * self.n_features_in_)) + else: + max_features = 0 + + self.max_features_ = max_features + + max_leaf_nodes = -1 if self.max_leaf_nodes is None else self.max_leaf_nodes + + if len(y) != n_samples: + raise ValueError( + "Number of labels=%d does not match number of samples=%d" + % (len(y), n_samples) + ) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, DOUBLE) + + if expanded_class_weight is not None: + if sample_weight is not None: + sample_weight = sample_weight * expanded_class_weight + else: + sample_weight = expanded_class_weight + + # Set min_weight_leaf from min_weight_fraction_leaf + if sample_weight is None: + min_weight_leaf = self.min_weight_fraction_leaf * n_samples + else: + min_weight_leaf = self.min_weight_fraction_leaf * np.sum(sample_weight) + + # Build tree + criterion = self.criterion + if not isinstance(criterion, Criterion): + if is_classification: + criterion = CRITERIA_CLF[self.criterion]( + self.n_outputs_, self.n_classes_ + ) + else: + criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples) + else: + # Make a deepcopy in case the criterion has mutable attributes that + # might be shared and modified concurrently during parallel fitting + criterion = copy.deepcopy(criterion) + + SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS + + splitter = self.splitter + if self.monotonic_cst is None: + monotonic_cst = None + else: + if self.n_outputs_ > 1: + raise ValueError( + "Monotonicity constraints are not supported with multiple outputs." + ) + # Check to correct monotonicity constraint' specification, + # by applying element-wise logical conjunction + # Note: we do not cast `np.asarray(self.monotonic_cst, dtype=np.int8)` + # straight away here so as to generate error messages for invalid + # values using the original values prior to any dtype related conversion. + monotonic_cst = np.asarray(self.monotonic_cst) + if monotonic_cst.shape[0] != X.shape[1]: + raise ValueError( + "monotonic_cst has shape {} but the input data " + "X has {} features.".format(monotonic_cst.shape[0], X.shape[1]) + ) + valid_constraints = np.isin(monotonic_cst, (-1, 0, 1)) + if not np.all(valid_constraints): + unique_constaints_value = np.unique(monotonic_cst) + raise ValueError( + "monotonic_cst must be None or an array-like of -1, 0 or 1, but" + f" got {unique_constaints_value}" + ) + monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8) + if is_classifier(self): + if self.n_classes_[0] > 2: + raise ValueError( + "Monotonicity constraints are not supported with multiclass " + "classification" + ) + # Binary classification trees are built by constraining probabilities + # of the *negative class* in order to make the implementation similar + # to regression trees. + # Since self.monotonic_cst encodes constraints on probabilities of the + # *positive class*, all signs must be flipped. + monotonic_cst *= -1 + + if not isinstance(self.splitter, Splitter): + splitter = SPLITTERS[self.splitter]( + criterion, + self.max_features_, + min_samples_leaf, + min_weight_leaf, + random_state, + monotonic_cst, + ) + + if is_classifier(self): + self.tree_ = Tree(self.n_features_in_, self.n_classes_, self.n_outputs_) + else: + self.tree_ = Tree( + self.n_features_in_, + # TODO: tree shouldn't need this in this case + np.array([1] * self.n_outputs_, dtype=np.intp), + self.n_outputs_, + ) + + # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise + if max_leaf_nodes < 0: + builder = DepthFirstTreeBuilder( + splitter, + min_samples_split, + min_samples_leaf, + min_weight_leaf, + max_depth, + self.min_impurity_decrease, + ) + else: + builder = BestFirstTreeBuilder( + splitter, + min_samples_split, + min_samples_leaf, + min_weight_leaf, + max_depth, + max_leaf_nodes, + self.min_impurity_decrease, + ) + + builder.build(self.tree_, X, y, sample_weight, missing_values_in_feature_mask) + + if self.n_outputs_ == 1 and is_classifier(self): + self.n_classes_ = self.n_classes_[0] + self.classes_ = self.classes_[0] + + self._prune_tree() + + return self + + def _validate_X_predict(self, X, check_input): + """Validate the training data on predict (probabilities).""" + if check_input: + if self._support_missing_values(X): + force_all_finite = "allow-nan" + else: + force_all_finite = True + X = self._validate_data( + X, + dtype=DTYPE, + accept_sparse="csr", + reset=False, + force_all_finite=force_all_finite, + ) + if issparse(X) and ( + X.indices.dtype != np.intc or X.indptr.dtype != np.intc + ): + raise ValueError("No support for np.int64 index based sparse matrices") + else: + # The number of features is checked regardless of `check_input` + self._check_n_features(X, reset=False) + return X + + def predict(self, X, check_input=True): + """Predict class or regression value for X. + + For a classification model, the predicted class for each sample in X is + returned. For a regression model, the predicted value based on X is + returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + The predicted classes, or the predict values. + """ + check_is_fitted(self) + X = self._validate_X_predict(X, check_input) + proba = self.tree_.predict(X) + n_samples = X.shape[0] + + # Classification + if is_classifier(self): + if self.n_outputs_ == 1: + return self.classes_.take(np.argmax(proba, axis=1), axis=0) + + else: + class_type = self.classes_[0].dtype + predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type) + for k in range(self.n_outputs_): + predictions[:, k] = self.classes_[k].take( + np.argmax(proba[:, k], axis=1), axis=0 + ) + + return predictions + + # Regression + else: + if self.n_outputs_ == 1: + return proba[:, 0] + + else: + return proba[:, :, 0] + + def apply(self, X, check_input=True): + """Return the index of the leaf that each sample is predicted as. + + .. versionadded:: 0.17 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + X_leaves : array-like of shape (n_samples,) + For each datapoint x in X, return the index of the leaf x + ends up in. Leaves are numbered within + ``[0; self.tree_.node_count)``, possibly with gaps in the + numbering. + """ + check_is_fitted(self) + X = self._validate_X_predict(X, check_input) + return self.tree_.apply(X) + + def decision_path(self, X, check_input=True): + """Return the decision path in the tree. + + .. versionadded:: 0.18 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + indicator : sparse matrix of shape (n_samples, n_nodes) + Return a node indicator CSR matrix where non zero elements + indicates that the samples goes through the nodes. + """ + X = self._validate_X_predict(X, check_input) + return self.tree_.decision_path(X) + + def _prune_tree(self): + """Prune tree using Minimal Cost-Complexity Pruning.""" + check_is_fitted(self) + + if self.ccp_alpha == 0.0: + return + + # build pruned tree + if is_classifier(self): + n_classes = np.atleast_1d(self.n_classes_) + pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_) + else: + pruned_tree = Tree( + self.n_features_in_, + # TODO: the tree shouldn't need this param + np.array([1] * self.n_outputs_, dtype=np.intp), + self.n_outputs_, + ) + _build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha) + + self.tree_ = pruned_tree + + def cost_complexity_pruning_path(self, X, y, sample_weight=None): + """Compute the pruning path during Minimal Cost-Complexity Pruning. + + See :ref:`minimal_cost_complexity_pruning` for details on the pruning + process. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csc_matrix``. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + The target values (class labels) as integers or strings. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. Splits are also + ignored if they would result in any single class carrying a + negative weight in either child node. + + Returns + ------- + ccp_path : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + ccp_alphas : ndarray + Effective alphas of subtree during pruning. + + impurities : ndarray + Sum of the impurities of the subtree leaves for the + corresponding alpha value in ``ccp_alphas``. + """ + est = clone(self).set_params(ccp_alpha=0.0) + est.fit(X, y, sample_weight=sample_weight) + return Bunch(**ccp_pruning_path(est.tree_)) + + @property + def feature_importances_(self): + """Return the feature importances. + + The importance of a feature is computed as the (normalized) total + reduction of the criterion brought by that feature. + It is also known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + Returns + ------- + feature_importances_ : ndarray of shape (n_features,) + Normalized total reduction of criteria by feature + (Gini importance). + """ + check_is_fitted(self) + + return self.tree_.compute_feature_importances() + + +# ============================================================================= +# Public estimators +# ============================================================================= + + +class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree): + """A decision tree classifier. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {"gini", "entropy", "log_loss"}, default="gini" + The function to measure the quality of a split. Supported criteria are + "gini" for the Gini impurity and "log_loss" and "entropy" both for the + Shannon information gain, see :ref:`tree_mathematical_formulation`. + + splitter : {"best", "random"}, default="best" + The strategy used to choose the split at each node. Supported + strategies are "best" to choose the best split and "random" to choose + the best random split. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : int, float or {"sqrt", "log2"}, default=None + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at + each split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the estimator. The features are always + randomly permuted at each split, even if ``splitter`` is set to + ``"best"``. When ``max_features < n_features``, the algorithm will + select ``max_features`` at random at each split before finding the best + split among them. But the best found split may vary across different + runs, even if ``max_features=n_features``. That is the case, if the + improvement of the criterion is identical for several splits and one + split has to be selected at random. To obtain a deterministic behaviour + during fitting, ``random_state`` has to be fixed to an integer. + See :term:`Glossary ` for details. + + max_leaf_nodes : int, default=None + Grow a tree with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + class_weight : dict, list of dict or "balanced", default=None + Weights associated with classes in the form ``{class_label: weight}``. + If None, all classes are supposed to have weight one. For + multi-output problems, a list of dicts can be provided in the same + order as the columns of y. + + Note that for multioutput (including multilabel) weights should be + defined for each class of every column in its own dict. For example, + for four-class multilabel classification weights should be + [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of + [{1:1}, {2:5}, {3:1}, {4:1}]. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + For multi-output, the weights of each column of y will be multiplied. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multiclass classifications (i.e. when `n_classes > 2`), + - multioutput classifications (i.e. when `n_outputs_ > 1`), + - classifications trained on data with missing values. + + The constraints hold over the probability of the positive class. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) or list of ndarray + The classes labels (single output problem), + or a list of arrays of class labels (multi-output problem). + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance [4]_. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + max_features_ : int + The inferred value of max_features. + + n_classes_ : int or list of int + The number of classes (for single output problems), + or a list containing the number of classes for each + output (for multi-output problems). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + tree_ : Tree instance + The underlying Tree object. Please refer to + ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and + :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py` + for basic usage of these attributes. + + See Also + -------- + DecisionTreeRegressor : A decision tree regressor. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + The :meth:`predict` method operates using the :func:`numpy.argmax` + function on the outputs of :meth:`predict_proba`. This means that in + case the highest predicted probabilities are tied, the classifier will + predict the tied class with the lowest index in :term:`classes_`. + + References + ---------- + + .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning + + .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification + and Regression Trees", Wadsworth, Belmont, CA, 1984. + + .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical + Learning", Springer, 2009. + + .. [4] L. Breiman, and A. Cutler, "Random Forests", + https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import cross_val_score + >>> from sklearn.tree import DecisionTreeClassifier + >>> clf = DecisionTreeClassifier(random_state=0) + >>> iris = load_iris() + >>> cross_val_score(clf, iris.data, iris.target, cv=10) + ... # doctest: +SKIP + ... + array([ 1. , 0.93..., 0.86..., 0.93..., 0.93..., + 0.93..., 0.93..., 1. , 0.93..., 1. ]) + """ + + _parameter_constraints: dict = { + **BaseDecisionTree._parameter_constraints, + "criterion": [StrOptions({"gini", "entropy", "log_loss"}), Hidden(Criterion)], + "class_weight": [dict, list, StrOptions({"balanced"}), None], + } + + def __init__( + self, + *, + criterion="gini", + splitter="best", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=None, + random_state=None, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + class_weight=None, + ccp_alpha=0.0, + monotonic_cst=None, + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + max_leaf_nodes=max_leaf_nodes, + class_weight=class_weight, + random_state=random_state, + min_impurity_decrease=min_impurity_decrease, + monotonic_cst=monotonic_cst, + ccp_alpha=ccp_alpha, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, check_input=True): + """Build a decision tree classifier from the training set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csc_matrix``. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + The target values (class labels) as integers or strings. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. Splits are also + ignored if they would result in any single class carrying a + negative weight in either child node. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + self : DecisionTreeClassifier + Fitted estimator. + """ + + super()._fit( + X, + y, + sample_weight=sample_weight, + check_input=check_input, + ) + return self + + def predict_proba(self, X, check_input=True): + """Predict class probabilities of the input samples X. + + The predicted class probability is the fraction of samples of the same + class in a leaf. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \ + such arrays if n_outputs > 1 + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + X = self._validate_X_predict(X, check_input) + proba = self.tree_.predict(X) + + if self.n_outputs_ == 1: + return proba[:, : self.n_classes_] + else: + all_proba = [] + for k in range(self.n_outputs_): + proba_k = proba[:, k, : self.n_classes_[k]] + all_proba.append(proba_k) + return all_proba + + def predict_log_proba(self, X): + """Predict class log-probabilities of the input samples X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \ + such arrays if n_outputs > 1 + The class log-probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + proba = self.predict_proba(X) + + if self.n_outputs_ == 1: + return np.log(proba) + + else: + for k in range(self.n_outputs_): + proba[k] = np.log(proba[k]) + + return proba + + def _more_tags(self): + # XXX: nan is only support for dense arrays, but we set this for common test to + # pass, specifically: check_estimators_nan_inf + allow_nan = self.splitter == "best" and self.criterion in { + "gini", + "log_loss", + "entropy", + } + return {"multilabel": True, "allow_nan": allow_nan} + + +class DecisionTreeRegressor(RegressorMixin, BaseDecisionTree): + """A decision tree regressor. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {"squared_error", "friedman_mse", "absolute_error", \ + "poisson"}, default="squared_error" + The function to measure the quality of a split. Supported criteria + are "squared_error" for the mean squared error, which is equal to + variance reduction as feature selection criterion and minimizes the L2 + loss using the mean of each terminal node, "friedman_mse", which uses + mean squared error with Friedman's improvement score for potential + splits, "absolute_error" for the mean absolute error, which minimizes + the L1 loss using the median of each terminal node, and "poisson" which + uses reduction in Poisson deviance to find splits. + + .. versionadded:: 0.18 + Mean Absolute Error (MAE) criterion. + + .. versionadded:: 0.24 + Poisson deviance criterion. + + splitter : {"best", "random"}, default="best" + The strategy used to choose the split at each node. Supported + strategies are "best" to choose the best split and "random" to choose + the best random split. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : int, float or {"sqrt", "log2"}, default=None + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at each + split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the estimator. The features are always + randomly permuted at each split, even if ``splitter`` is set to + ``"best"``. When ``max_features < n_features``, the algorithm will + select ``max_features`` at random at each split before finding the best + split among them. But the best found split may vary across different + runs, even if ``max_features=n_features``. That is the case, if the + improvement of the criterion is identical for several splits and one + split has to be selected at random. To obtain a deterministic behaviour + during fitting, ``random_state`` has to be fixed to an integer. + See :term:`Glossary ` for details. + + max_leaf_nodes : int, default=None + Grow a tree with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multioutput regressions (i.e. when `n_outputs_ > 1`), + - regressions trained on data with missing values. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + feature_importances_ : ndarray of shape (n_features,) + The feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the + (normalized) total reduction of the criterion brought + by that feature. It is also known as the Gini importance [4]_. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + max_features_ : int + The inferred value of max_features. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + tree_ : Tree instance + The underlying Tree object. Please refer to + ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and + :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py` + for basic usage of these attributes. + + See Also + -------- + DecisionTreeClassifier : A decision tree classifier. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + References + ---------- + + .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning + + .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification + and Regression Trees", Wadsworth, Belmont, CA, 1984. + + .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical + Learning", Springer, 2009. + + .. [4] L. Breiman, and A. Cutler, "Random Forests", + https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.model_selection import cross_val_score + >>> from sklearn.tree import DecisionTreeRegressor + >>> X, y = load_diabetes(return_X_y=True) + >>> regressor = DecisionTreeRegressor(random_state=0) + >>> cross_val_score(regressor, X, y, cv=10) + ... # doctest: +SKIP + ... + array([-0.39..., -0.46..., 0.02..., 0.06..., -0.50..., + 0.16..., 0.11..., -0.73..., -0.30..., -0.00...]) + """ + + _parameter_constraints: dict = { + **BaseDecisionTree._parameter_constraints, + "criterion": [ + StrOptions({"squared_error", "friedman_mse", "absolute_error", "poisson"}), + Hidden(Criterion), + ], + } + + def __init__( + self, + *, + criterion="squared_error", + splitter="best", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=None, + random_state=None, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + ccp_alpha=0.0, + monotonic_cst=None, + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + max_leaf_nodes=max_leaf_nodes, + random_state=random_state, + min_impurity_decrease=min_impurity_decrease, + ccp_alpha=ccp_alpha, + monotonic_cst=monotonic_cst, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, check_input=True): + """Build a decision tree regressor from the training set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csc_matrix``. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + The target values (real numbers). Use ``dtype=np.float64`` and + ``order='C'`` for maximum efficiency. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. Splits + that would create child nodes with net zero or negative weight are + ignored while searching for a split in each node. + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you're doing. + + Returns + ------- + self : DecisionTreeRegressor + Fitted estimator. + """ + + super()._fit( + X, + y, + sample_weight=sample_weight, + check_input=check_input, + ) + return self + + def _compute_partial_dependence_recursion(self, grid, target_features): + """Fast partial dependence computation. + + Parameters + ---------- + grid : ndarray of shape (n_samples, n_target_features) + The grid points on which the partial dependence should be + evaluated. + target_features : ndarray of shape (n_target_features) + The set of target features for which the partial dependence + should be evaluated. + + Returns + ------- + averaged_predictions : ndarray of shape (n_samples,) + The value of the partial dependence function on each grid point. + """ + grid = np.asarray(grid, dtype=DTYPE, order="C") + averaged_predictions = np.zeros( + shape=grid.shape[0], dtype=np.float64, order="C" + ) + + self.tree_.compute_partial_dependence( + grid, target_features, averaged_predictions + ) + return averaged_predictions + + def _more_tags(self): + # XXX: nan is only support for dense arrays, but we set this for common test to + # pass, specifically: check_estimators_nan_inf + allow_nan = self.splitter == "best" and self.criterion in { + "squared_error", + "friedman_mse", + "poisson", + } + return {"allow_nan": allow_nan} + + +class ExtraTreeClassifier(DecisionTreeClassifier): + """An extremely randomized tree classifier. + + Extra-trees differ from classic decision trees in the way they are built. + When looking for the best split to separate the samples of a node into two + groups, random splits are drawn for each of the `max_features` randomly + selected features and the best split among those is chosen. When + `max_features` is set 1, this amounts to building a totally random + decision tree. + + Warning: Extra-trees should only be used within ensemble methods. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {"gini", "entropy", "log_loss"}, default="gini" + The function to measure the quality of a split. Supported criteria are + "gini" for the Gini impurity and "log_loss" and "entropy" both for the + Shannon information gain, see :ref:`tree_mathematical_formulation`. + + splitter : {"random", "best"}, default="random" + The strategy used to choose the split at each node. Supported + strategies are "best" to choose the best split and "random" to choose + the best random split. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : int, float, {"sqrt", "log2"} or None, default="sqrt" + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at + each split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + .. versionchanged:: 1.1 + The default of `max_features` changed from `"auto"` to `"sqrt"`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + random_state : int, RandomState instance or None, default=None + Used to pick randomly the `max_features` used at each split. + See :term:`Glossary ` for details. + + max_leaf_nodes : int, default=None + Grow a tree with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + class_weight : dict, list of dict or "balanced", default=None + Weights associated with classes in the form ``{class_label: weight}``. + If None, all classes are supposed to have weight one. For + multi-output problems, a list of dicts can be provided in the same + order as the columns of y. + + Note that for multioutput (including multilabel) weights should be + defined for each class of every column in its own dict. For example, + for four-class multilabel classification weights should be + [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of + [{1:1}, {2:5}, {3:1}, {4:1}]. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + For multi-output, the weights of each column of y will be multiplied. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multiclass classifications (i.e. when `n_classes > 2`), + - multioutput classifications (i.e. when `n_outputs_ > 1`), + - classifications trained on data with missing values. + + The constraints hold over the probability of the positive class. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) or list of ndarray + The classes labels (single output problem), + or a list of arrays of class labels (multi-output problem). + + max_features_ : int + The inferred value of max_features. + + n_classes_ : int or list of int + The number of classes (for single output problems), + or a list containing the number of classes for each + output (for multi-output problems). + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances. + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + tree_ : Tree instance + The underlying Tree object. Please refer to + ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and + :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py` + for basic usage of these attributes. + + See Also + -------- + ExtraTreeRegressor : An extremely randomized tree regressor. + sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier. + sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor. + sklearn.ensemble.RandomForestClassifier : A random forest classifier. + sklearn.ensemble.RandomForestRegressor : A random forest regressor. + sklearn.ensemble.RandomTreesEmbedding : An ensemble of + totally random trees. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + References + ---------- + + .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", + Machine Learning, 63(1), 3-42, 2006. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.ensemble import BaggingClassifier + >>> from sklearn.tree import ExtraTreeClassifier + >>> X, y = load_iris(return_X_y=True) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> extra_tree = ExtraTreeClassifier(random_state=0) + >>> cls = BaggingClassifier(extra_tree, random_state=0).fit( + ... X_train, y_train) + >>> cls.score(X_test, y_test) + 0.8947... + """ + + def __init__( + self, + *, + criterion="gini", + splitter="random", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features="sqrt", + random_state=None, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + class_weight=None, + ccp_alpha=0.0, + monotonic_cst=None, + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + max_leaf_nodes=max_leaf_nodes, + class_weight=class_weight, + min_impurity_decrease=min_impurity_decrease, + random_state=random_state, + ccp_alpha=ccp_alpha, + monotonic_cst=monotonic_cst, + ) + + +class ExtraTreeRegressor(DecisionTreeRegressor): + """An extremely randomized tree regressor. + + Extra-trees differ from classic decision trees in the way they are built. + When looking for the best split to separate the samples of a node into two + groups, random splits are drawn for each of the `max_features` randomly + selected features and the best split among those is chosen. When + `max_features` is set 1, this amounts to building a totally random + decision tree. + + Warning: Extra-trees should only be used within ensemble methods. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {"squared_error", "friedman_mse", "absolute_error", "poisson"}, \ + default="squared_error" + The function to measure the quality of a split. Supported criteria + are "squared_error" for the mean squared error, which is equal to + variance reduction as feature selection criterion and minimizes the L2 + loss using the mean of each terminal node, "friedman_mse", which uses + mean squared error with Friedman's improvement score for potential + splits, "absolute_error" for the mean absolute error, which minimizes + the L1 loss using the median of each terminal node, and "poisson" which + uses reduction in Poisson deviance to find splits. + + .. versionadded:: 0.18 + Mean Absolute Error (MAE) criterion. + + .. versionadded:: 0.24 + Poisson deviance criterion. + + splitter : {"random", "best"}, default="random" + The strategy used to choose the split at each node. Supported + strategies are "best" to choose the best split and "random" to choose + the best random split. + + max_depth : int, default=None + The maximum depth of the tree. If None, then nodes are expanded until + all leaves are pure or until all leaves contain less than + min_samples_split samples. + + min_samples_split : int or float, default=2 + The minimum number of samples required to split an internal node: + + - If int, then consider `min_samples_split` as the minimum number. + - If float, then `min_samples_split` is a fraction and + `ceil(min_samples_split * n_samples)` are the minimum + number of samples for each split. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_samples_leaf : int or float, default=1 + The minimum number of samples required to be at a leaf node. + A split point at any depth will only be considered if it leaves at + least ``min_samples_leaf`` training samples in each of the left and + right branches. This may have the effect of smoothing the model, + especially in regression. + + - If int, then consider `min_samples_leaf` as the minimum number. + - If float, then `min_samples_leaf` is a fraction and + `ceil(min_samples_leaf * n_samples)` are the minimum + number of samples for each node. + + .. versionchanged:: 0.18 + Added float values for fractions. + + min_weight_fraction_leaf : float, default=0.0 + The minimum weighted fraction of the sum total of weights (of all + the input samples) required to be at a leaf node. Samples have + equal weight when sample_weight is not provided. + + max_features : int, float, {"sqrt", "log2"} or None, default=1.0 + The number of features to consider when looking for the best split: + + - If int, then consider `max_features` features at each split. + - If float, then `max_features` is a fraction and + `max(1, int(max_features * n_features_in_))` features are considered at each + split. + - If "sqrt", then `max_features=sqrt(n_features)`. + - If "log2", then `max_features=log2(n_features)`. + - If None, then `max_features=n_features`. + + .. versionchanged:: 1.1 + The default of `max_features` changed from `"auto"` to `1.0`. + + Note: the search for a split does not stop until at least one + valid partition of the node samples is found, even if it requires to + effectively inspect more than ``max_features`` features. + + random_state : int, RandomState instance or None, default=None + Used to pick randomly the `max_features` used at each split. + See :term:`Glossary ` for details. + + min_impurity_decrease : float, default=0.0 + A node will be split if this split induces a decrease of the impurity + greater than or equal to this value. + + The weighted impurity decrease equation is the following:: + + N_t / N * (impurity - N_t_R / N_t * right_impurity + - N_t_L / N_t * left_impurity) + + where ``N`` is the total number of samples, ``N_t`` is the number of + samples at the current node, ``N_t_L`` is the number of samples in the + left child, and ``N_t_R`` is the number of samples in the right child. + + ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, + if ``sample_weight`` is passed. + + .. versionadded:: 0.19 + + max_leaf_nodes : int, default=None + Grow a tree with ``max_leaf_nodes`` in best-first fashion. + Best nodes are defined as relative reduction in impurity. + If None then unlimited number of leaf nodes. + + ccp_alpha : non-negative float, default=0.0 + Complexity parameter used for Minimal Cost-Complexity Pruning. The + subtree with the largest cost complexity that is smaller than + ``ccp_alpha`` will be chosen. By default, no pruning is performed. See + :ref:`minimal_cost_complexity_pruning` for details. + + .. versionadded:: 0.22 + + monotonic_cst : array-like of int of shape (n_features), default=None + Indicates the monotonicity constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If monotonic_cst is None, no constraints are applied. + + Monotonicity constraints are not supported for: + - multioutput regressions (i.e. when `n_outputs_ > 1`), + - regressions trained on data with missing values. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Attributes + ---------- + max_features_ : int + The inferred value of max_features. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + feature_importances_ : ndarray of shape (n_features,) + Return impurity-based feature importances (the higher, the more + important the feature). + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_outputs_ : int + The number of outputs when ``fit`` is performed. + + tree_ : Tree instance + The underlying Tree object. Please refer to + ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and + :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py` + for basic usage of these attributes. + + See Also + -------- + ExtraTreeClassifier : An extremely randomized tree classifier. + sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier. + sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor. + + Notes + ----- + The default values for the parameters controlling the size of the trees + (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and + unpruned trees which can potentially be very large on some data sets. To + reduce memory consumption, the complexity and size of the trees should be + controlled by setting those parameter values. + + References + ---------- + + .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", + Machine Learning, 63(1), 3-42, 2006. + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.ensemble import BaggingRegressor + >>> from sklearn.tree import ExtraTreeRegressor + >>> X, y = load_diabetes(return_X_y=True) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> extra_tree = ExtraTreeRegressor(random_state=0) + >>> reg = BaggingRegressor(extra_tree, random_state=0).fit( + ... X_train, y_train) + >>> reg.score(X_test, y_test) + 0.33... + """ + + def __init__( + self, + *, + criterion="squared_error", + splitter="random", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=1.0, + random_state=None, + min_impurity_decrease=0.0, + max_leaf_nodes=None, + ccp_alpha=0.0, + monotonic_cst=None, + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + max_leaf_nodes=max_leaf_nodes, + min_impurity_decrease=min_impurity_decrease, + random_state=random_state, + ccp_alpha=ccp_alpha, + monotonic_cst=monotonic_cst, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_criterion.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/tree/_criterion.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..73f2a4422d58fc8b58e4caa6d51ccd6bc4031d4b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/_criterion.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_criterion.pxd b/venv/lib/python3.10/site-packages/sklearn/tree/_criterion.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6538b9b824a79ee00b955415c516df56fcfe2797 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/_criterion.pxd @@ -0,0 +1,117 @@ +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Joel Nothman +# Arnaud Joly +# Jacob Schreiber +# +# License: BSD 3 clause + +# See _criterion.pyx for implementation details. +cimport numpy as cnp + +from ..utils._typedefs cimport float64_t, intp_t + + +cdef class Criterion: + # The criterion computes the impurity of a node and the reduction of + # impurity of a split on that node. It also computes the output statistics + # such as the mean in regression and class probabilities in classification. + + # Internal structures + cdef const float64_t[:, ::1] y # Values of y + cdef const float64_t[:] sample_weight # Sample weights + + cdef const intp_t[:] sample_indices # Sample indices in X, y + cdef intp_t start # samples[start:pos] are the samples in the left node + cdef intp_t pos # samples[pos:end] are the samples in the right node + cdef intp_t end + cdef intp_t n_missing # Number of missing values for the feature being evaluated + cdef bint missing_go_to_left # Whether missing values go to the left node + + cdef intp_t n_outputs # Number of outputs + cdef intp_t n_samples # Number of samples + cdef intp_t n_node_samples # Number of samples in the node (end-start) + cdef float64_t weighted_n_samples # Weighted number of samples (in total) + cdef float64_t weighted_n_node_samples # Weighted number of samples in the node + cdef float64_t weighted_n_left # Weighted number of samples in the left node + cdef float64_t weighted_n_right # Weighted number of samples in the right node + cdef float64_t weighted_n_missing # Weighted number of samples that are missing + + # The criterion object is maintained such that left and right collected + # statistics correspond to samples[start:pos] and samples[pos:end]. + + # Methods + cdef int init( + self, + const float64_t[:, ::1] y, + const float64_t[:] sample_weight, + float64_t weighted_n_samples, + const intp_t[:] sample_indices, + intp_t start, + intp_t end + ) except -1 nogil + cdef void init_sum_missing(self) + cdef void init_missing(self, intp_t n_missing) noexcept nogil + cdef int reset(self) except -1 nogil + cdef int reverse_reset(self) except -1 nogil + cdef int update(self, intp_t new_pos) except -1 nogil + cdef float64_t node_impurity(self) noexcept nogil + cdef void children_impurity( + self, + float64_t* impurity_left, + float64_t* impurity_right + ) noexcept nogil + cdef void node_value( + self, + float64_t* dest + ) noexcept nogil + cdef void clip_node_value( + self, + float64_t* dest, + float64_t lower_bound, + float64_t upper_bound + ) noexcept nogil + cdef float64_t middle_value(self) noexcept nogil + cdef float64_t impurity_improvement( + self, + float64_t impurity_parent, + float64_t impurity_left, + float64_t impurity_right + ) noexcept nogil + cdef float64_t proxy_impurity_improvement(self) noexcept nogil + cdef bint check_monotonicity( + self, + cnp.int8_t monotonic_cst, + float64_t lower_bound, + float64_t upper_bound, + ) noexcept nogil + cdef inline bint _check_monotonicity( + self, + cnp.int8_t monotonic_cst, + float64_t lower_bound, + float64_t upper_bound, + float64_t sum_left, + float64_t sum_right, + ) noexcept nogil + +cdef class ClassificationCriterion(Criterion): + """Abstract criterion for classification.""" + + cdef intp_t[::1] n_classes + cdef intp_t max_n_classes + + cdef float64_t[:, ::1] sum_total # The sum of the weighted count of each label. + cdef float64_t[:, ::1] sum_left # Same as above, but for the left side of the split + cdef float64_t[:, ::1] sum_right # Same as above, but for the right side of the split + cdef float64_t[:, ::1] sum_missing # Same as above, but for missing values in X + +cdef class RegressionCriterion(Criterion): + """Abstract regression criterion.""" + + cdef float64_t sq_sum_total + + cdef float64_t[::1] sum_total # The sum of w*y. + cdef float64_t[::1] sum_left # Same as above, but for the left side of the split + cdef float64_t[::1] sum_right # Same as above, but for the right side of the split + cdef float64_t[::1] sum_missing # Same as above, but for missing values in X diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_export.py b/venv/lib/python3.10/site-packages/sklearn/tree/_export.py new file mode 100644 index 0000000000000000000000000000000000000000..f6492cf6a821f60fff8cb76da2e53ebe45ff1faa --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/_export.py @@ -0,0 +1,1135 @@ +""" +This module defines export functions for decision trees. +""" + +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Noel Dawe +# Satrajit Gosh +# Trevor Stephens +# Li Li +# Giuseppe Vettigli +# License: BSD 3 clause +from collections.abc import Iterable +from io import StringIO +from numbers import Integral + +import numpy as np + +from ..base import is_classifier +from ..utils._param_validation import HasMethods, Interval, StrOptions, validate_params +from ..utils.validation import check_array, check_is_fitted +from . import DecisionTreeClassifier, DecisionTreeRegressor, _criterion, _tree +from ._reingold_tilford import Tree, buchheim + + +def _color_brew(n): + """Generate n colors with equally spaced hues. + + Parameters + ---------- + n : int + The number of colors required. + + Returns + ------- + color_list : list, length n + List of n tuples of form (R, G, B) being the components of each color. + """ + color_list = [] + + # Initialize saturation & value; calculate chroma & value shift + s, v = 0.75, 0.9 + c = s * v + m = v - c + + for h in np.arange(25, 385, 360.0 / n).astype(int): + # Calculate some intermediate values + h_bar = h / 60.0 + x = c * (1 - abs((h_bar % 2) - 1)) + # Initialize RGB with same hue & chroma as our color + rgb = [ + (c, x, 0), + (x, c, 0), + (0, c, x), + (0, x, c), + (x, 0, c), + (c, 0, x), + (c, x, 0), + ] + r, g, b = rgb[int(h_bar)] + # Shift the initial RGB values to match value and store + rgb = [(int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m)))] + color_list.append(rgb) + + return color_list + + +class Sentinel: + def __repr__(self): + return '"tree.dot"' + + +SENTINEL = Sentinel() + + +@validate_params( + { + "decision_tree": [DecisionTreeClassifier, DecisionTreeRegressor], + "max_depth": [Interval(Integral, 0, None, closed="left"), None], + "feature_names": ["array-like", None], + "class_names": ["array-like", "boolean", None], + "label": [StrOptions({"all", "root", "none"})], + "filled": ["boolean"], + "impurity": ["boolean"], + "node_ids": ["boolean"], + "proportion": ["boolean"], + "rounded": ["boolean"], + "precision": [Interval(Integral, 0, None, closed="left"), None], + "ax": "no_validation", # delegate validation to matplotlib + "fontsize": [Interval(Integral, 0, None, closed="left"), None], + }, + prefer_skip_nested_validation=True, +) +def plot_tree( + decision_tree, + *, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + impurity=True, + node_ids=False, + proportion=False, + rounded=False, + precision=3, + ax=None, + fontsize=None, +): + """Plot a decision tree. + + The sample counts that are shown are weighted with any sample_weights that + might be present. + + The visualization is fit automatically to the size of the axis. + Use the ``figsize`` or ``dpi`` arguments of ``plt.figure`` to control + the size of the rendering. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.21 + + Parameters + ---------- + decision_tree : decision tree regressor or classifier + The decision tree to be plotted. + + max_depth : int, default=None + The maximum depth of the representation. If None, the tree is fully + generated. + + feature_names : array-like of str, default=None + Names of each of the features. + If None, generic names will be used ("x[0]", "x[1]", ...). + + class_names : array-like of str or True, default=None + Names of each of the target classes in ascending numerical order. + Only relevant for classification and not supported for multi-output. + If ``True``, shows a symbolic representation of the class name. + + label : {'all', 'root', 'none'}, default='all' + Whether to show informative labels for impurity, etc. + Options include 'all' to show at every node, 'root' to show only at + the top root node, or 'none' to not show at any node. + + filled : bool, default=False + When set to ``True``, paint nodes to indicate majority class for + classification, extremity of values for regression, or purity of node + for multi-output. + + impurity : bool, default=True + When set to ``True``, show the impurity at each node. + + node_ids : bool, default=False + When set to ``True``, show the ID number on each node. + + proportion : bool, default=False + When set to ``True``, change the display of 'values' and/or 'samples' + to be proportions and percentages respectively. + + rounded : bool, default=False + When set to ``True``, draw node boxes with rounded corners and use + Helvetica fonts instead of Times-Roman. + + precision : int, default=3 + Number of digits of precision for floating point in the values of + impurity, threshold and value attributes of each node. + + ax : matplotlib axis, default=None + Axes to plot to. If None, use current axis. Any previous content + is cleared. + + fontsize : int, default=None + Size of text font. If None, determined automatically to fit figure. + + Returns + ------- + annotations : list of artists + List containing the artists for the annotation boxes making up the + tree. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn import tree + + >>> clf = tree.DecisionTreeClassifier(random_state=0) + >>> iris = load_iris() + + >>> clf = clf.fit(iris.data, iris.target) + >>> tree.plot_tree(clf) + [...] + """ + + check_is_fitted(decision_tree) + + exporter = _MPLTreeExporter( + max_depth=max_depth, + feature_names=feature_names, + class_names=class_names, + label=label, + filled=filled, + impurity=impurity, + node_ids=node_ids, + proportion=proportion, + rounded=rounded, + precision=precision, + fontsize=fontsize, + ) + return exporter.export(decision_tree, ax=ax) + + +class _BaseTreeExporter: + def __init__( + self, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + impurity=True, + node_ids=False, + proportion=False, + rounded=False, + precision=3, + fontsize=None, + ): + self.max_depth = max_depth + self.feature_names = feature_names + self.class_names = class_names + self.label = label + self.filled = filled + self.impurity = impurity + self.node_ids = node_ids + self.proportion = proportion + self.rounded = rounded + self.precision = precision + self.fontsize = fontsize + + def get_color(self, value): + # Find the appropriate color & intensity for a node + if self.colors["bounds"] is None: + # Classification tree + color = list(self.colors["rgb"][np.argmax(value)]) + sorted_values = sorted(value, reverse=True) + if len(sorted_values) == 1: + alpha = 0.0 + else: + alpha = (sorted_values[0] - sorted_values[1]) / (1 - sorted_values[1]) + else: + # Regression tree or multi-output + color = list(self.colors["rgb"][0]) + alpha = (value - self.colors["bounds"][0]) / ( + self.colors["bounds"][1] - self.colors["bounds"][0] + ) + # compute the color as alpha against white + color = [int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color] + # Return html color code in #RRGGBB format + return "#%2x%2x%2x" % tuple(color) + + def get_fill_color(self, tree, node_id): + # Fetch appropriate color for node + if "rgb" not in self.colors: + # Initialize colors and bounds if required + self.colors["rgb"] = _color_brew(tree.n_classes[0]) + if tree.n_outputs != 1: + # Find max and min impurities for multi-output + self.colors["bounds"] = (np.min(-tree.impurity), np.max(-tree.impurity)) + elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1: + # Find max and min values in leaf nodes for regression + self.colors["bounds"] = (np.min(tree.value), np.max(tree.value)) + if tree.n_outputs == 1: + node_val = tree.value[node_id][0, :] + if ( + tree.n_classes[0] == 1 + and isinstance(node_val, Iterable) + and self.colors["bounds"] is not None + ): + # Unpack the float only for the regression tree case. + # Classification tree requires an Iterable in `get_color`. + node_val = node_val.item() + else: + # If multi-output color node by impurity + node_val = -tree.impurity[node_id] + return self.get_color(node_val) + + def node_to_str(self, tree, node_id, criterion): + # Generate the node content string + if tree.n_outputs == 1: + value = tree.value[node_id][0, :] + else: + value = tree.value[node_id] + + # Should labels be shown? + labels = (self.label == "root" and node_id == 0) or self.label == "all" + + characters = self.characters + node_string = characters[-1] + + # Write node ID + if self.node_ids: + if labels: + node_string += "node " + node_string += characters[0] + str(node_id) + characters[4] + + # Write decision criteria + if tree.children_left[node_id] != _tree.TREE_LEAF: + # Always write node decision criteria, except for leaves + if self.feature_names is not None: + feature = self.feature_names[tree.feature[node_id]] + else: + feature = "x%s%s%s" % ( + characters[1], + tree.feature[node_id], + characters[2], + ) + node_string += "%s %s %s%s" % ( + feature, + characters[3], + round(tree.threshold[node_id], self.precision), + characters[4], + ) + + # Write impurity + if self.impurity: + if isinstance(criterion, _criterion.FriedmanMSE): + criterion = "friedman_mse" + elif isinstance(criterion, _criterion.MSE) or criterion == "squared_error": + criterion = "squared_error" + elif not isinstance(criterion, str): + criterion = "impurity" + if labels: + node_string += "%s = " % criterion + node_string += ( + str(round(tree.impurity[node_id], self.precision)) + characters[4] + ) + + # Write node sample count + if labels: + node_string += "samples = " + if self.proportion: + percent = ( + 100.0 * tree.n_node_samples[node_id] / float(tree.n_node_samples[0]) + ) + node_string += str(round(percent, 1)) + "%" + characters[4] + else: + node_string += str(tree.n_node_samples[node_id]) + characters[4] + + # Write node class distribution / regression value + if not self.proportion and tree.n_classes[0] != 1: + # For classification this will show the proportion of samples + value = value * tree.weighted_n_node_samples[node_id] + if labels: + node_string += "value = " + if tree.n_classes[0] == 1: + # Regression + value_text = np.around(value, self.precision) + elif self.proportion: + # Classification + value_text = np.around(value, self.precision) + elif np.all(np.equal(np.mod(value, 1), 0)): + # Classification without floating-point weights + value_text = value.astype(int) + else: + # Classification with floating-point weights + value_text = np.around(value, self.precision) + # Strip whitespace + value_text = str(value_text.astype("S32")).replace("b'", "'") + value_text = value_text.replace("' '", ", ").replace("'", "") + if tree.n_classes[0] == 1 and tree.n_outputs == 1: + value_text = value_text.replace("[", "").replace("]", "") + value_text = value_text.replace("\n ", characters[4]) + node_string += value_text + characters[4] + + # Write node majority class + if ( + self.class_names is not None + and tree.n_classes[0] != 1 + and tree.n_outputs == 1 + ): + # Only done for single-output classification trees + if labels: + node_string += "class = " + if self.class_names is not True: + class_name = self.class_names[np.argmax(value)] + else: + class_name = "y%s%s%s" % ( + characters[1], + np.argmax(value), + characters[2], + ) + node_string += class_name + + # Clean up any trailing newlines + if node_string.endswith(characters[4]): + node_string = node_string[: -len(characters[4])] + + return node_string + characters[5] + + +class _DOTTreeExporter(_BaseTreeExporter): + def __init__( + self, + out_file=SENTINEL, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + leaves_parallel=False, + impurity=True, + node_ids=False, + proportion=False, + rotate=False, + rounded=False, + special_characters=False, + precision=3, + fontname="helvetica", + ): + super().__init__( + max_depth=max_depth, + feature_names=feature_names, + class_names=class_names, + label=label, + filled=filled, + impurity=impurity, + node_ids=node_ids, + proportion=proportion, + rounded=rounded, + precision=precision, + ) + self.leaves_parallel = leaves_parallel + self.out_file = out_file + self.special_characters = special_characters + self.fontname = fontname + self.rotate = rotate + + # PostScript compatibility for special characters + if special_characters: + self.characters = ["#", "", "", "≤", "
", ">", "<"] + else: + self.characters = ["#", "[", "]", "<=", "\\n", '"', '"'] + + # The depth of each node for plotting with 'leaf' option + self.ranks = {"leaves": []} + # The colors to render each node with + self.colors = {"bounds": None} + + def export(self, decision_tree): + # Check length of feature_names before getting into the tree node + # Raise error if length of feature_names does not match + # n_features_in_ in the decision_tree + if self.feature_names is not None: + if len(self.feature_names) != decision_tree.n_features_in_: + raise ValueError( + "Length of feature_names, %d does not match number of features, %d" + % (len(self.feature_names), decision_tree.n_features_in_) + ) + # each part writes to out_file + self.head() + # Now recurse the tree and add node & edge attributes + if isinstance(decision_tree, _tree.Tree): + self.recurse(decision_tree, 0, criterion="impurity") + else: + self.recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion) + + self.tail() + + def tail(self): + # If required, draw leaf nodes at same depth as each other + if self.leaves_parallel: + for rank in sorted(self.ranks): + self.out_file.write( + "{rank=same ; " + "; ".join(r for r in self.ranks[rank]) + "} ;\n" + ) + self.out_file.write("}") + + def head(self): + self.out_file.write("digraph Tree {\n") + + # Specify node aesthetics + self.out_file.write("node [shape=box") + rounded_filled = [] + if self.filled: + rounded_filled.append("filled") + if self.rounded: + rounded_filled.append("rounded") + if len(rounded_filled) > 0: + self.out_file.write( + ', style="%s", color="black"' % ", ".join(rounded_filled) + ) + + self.out_file.write(', fontname="%s"' % self.fontname) + self.out_file.write("] ;\n") + + # Specify graph & edge aesthetics + if self.leaves_parallel: + self.out_file.write("graph [ranksep=equally, splines=polyline] ;\n") + + self.out_file.write('edge [fontname="%s"] ;\n' % self.fontname) + + if self.rotate: + self.out_file.write("rankdir=LR ;\n") + + def recurse(self, tree, node_id, criterion, parent=None, depth=0): + if node_id == _tree.TREE_LEAF: + raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) + + left_child = tree.children_left[node_id] + right_child = tree.children_right[node_id] + + # Add node with description + if self.max_depth is None or depth <= self.max_depth: + # Collect ranks for 'leaf' option in plot_options + if left_child == _tree.TREE_LEAF: + self.ranks["leaves"].append(str(node_id)) + elif str(depth) not in self.ranks: + self.ranks[str(depth)] = [str(node_id)] + else: + self.ranks[str(depth)].append(str(node_id)) + + self.out_file.write( + "%d [label=%s" % (node_id, self.node_to_str(tree, node_id, criterion)) + ) + + if self.filled: + self.out_file.write( + ', fillcolor="%s"' % self.get_fill_color(tree, node_id) + ) + self.out_file.write("] ;\n") + + if parent is not None: + # Add edge to parent + self.out_file.write("%d -> %d" % (parent, node_id)) + if parent == 0: + # Draw True/False labels if parent is root node + angles = np.array([45, -45]) * ((self.rotate - 0.5) * -2) + self.out_file.write(" [labeldistance=2.5, labelangle=") + if node_id == 1: + self.out_file.write('%d, headlabel="True"]' % angles[0]) + else: + self.out_file.write('%d, headlabel="False"]' % angles[1]) + self.out_file.write(" ;\n") + + if left_child != _tree.TREE_LEAF: + self.recurse( + tree, + left_child, + criterion=criterion, + parent=node_id, + depth=depth + 1, + ) + self.recurse( + tree, + right_child, + criterion=criterion, + parent=node_id, + depth=depth + 1, + ) + + else: + self.ranks["leaves"].append(str(node_id)) + + self.out_file.write('%d [label="(...)"' % node_id) + if self.filled: + # color cropped nodes grey + self.out_file.write(', fillcolor="#C0C0C0"') + self.out_file.write("] ;\n" % node_id) + + if parent is not None: + # Add edge to parent + self.out_file.write("%d -> %d ;\n" % (parent, node_id)) + + +class _MPLTreeExporter(_BaseTreeExporter): + def __init__( + self, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + impurity=True, + node_ids=False, + proportion=False, + rounded=False, + precision=3, + fontsize=None, + ): + super().__init__( + max_depth=max_depth, + feature_names=feature_names, + class_names=class_names, + label=label, + filled=filled, + impurity=impurity, + node_ids=node_ids, + proportion=proportion, + rounded=rounded, + precision=precision, + ) + self.fontsize = fontsize + + # The depth of each node for plotting with 'leaf' option + self.ranks = {"leaves": []} + # The colors to render each node with + self.colors = {"bounds": None} + + self.characters = ["#", "[", "]", "<=", "\n", "", ""] + self.bbox_args = dict() + if self.rounded: + self.bbox_args["boxstyle"] = "round" + + self.arrow_args = dict(arrowstyle="<-") + + def _make_tree(self, node_id, et, criterion, depth=0): + # traverses _tree.Tree recursively, builds intermediate + # "_reingold_tilford.Tree" object + name = self.node_to_str(et, node_id, criterion=criterion) + if et.children_left[node_id] != _tree.TREE_LEAF and ( + self.max_depth is None or depth <= self.max_depth + ): + children = [ + self._make_tree( + et.children_left[node_id], et, criterion, depth=depth + 1 + ), + self._make_tree( + et.children_right[node_id], et, criterion, depth=depth + 1 + ), + ] + else: + return Tree(name, node_id) + return Tree(name, node_id, *children) + + def export(self, decision_tree, ax=None): + import matplotlib.pyplot as plt + from matplotlib.text import Annotation + + if ax is None: + ax = plt.gca() + ax.clear() + ax.set_axis_off() + my_tree = self._make_tree(0, decision_tree.tree_, decision_tree.criterion) + draw_tree = buchheim(my_tree) + + # important to make sure we're still + # inside the axis after drawing the box + # this makes sense because the width of a box + # is about the same as the distance between boxes + max_x, max_y = draw_tree.max_extents() + 1 + ax_width = ax.get_window_extent().width + ax_height = ax.get_window_extent().height + + scale_x = ax_width / max_x + scale_y = ax_height / max_y + self.recurse(draw_tree, decision_tree.tree_, ax, max_x, max_y) + + anns = [ann for ann in ax.get_children() if isinstance(ann, Annotation)] + + # update sizes of all bboxes + renderer = ax.figure.canvas.get_renderer() + + for ann in anns: + ann.update_bbox_position_size(renderer) + + if self.fontsize is None: + # get figure to data transform + # adjust fontsize to avoid overlap + # get max box width and height + extents = [ann.get_bbox_patch().get_window_extent() for ann in anns] + max_width = max([extent.width for extent in extents]) + max_height = max([extent.height for extent in extents]) + # width should be around scale_x in axis coordinates + size = anns[0].get_fontsize() * min( + scale_x / max_width, scale_y / max_height + ) + for ann in anns: + ann.set_fontsize(size) + + return anns + + def recurse(self, node, tree, ax, max_x, max_y, depth=0): + import matplotlib.pyplot as plt + + kwargs = dict( + bbox=self.bbox_args.copy(), + ha="center", + va="center", + zorder=100 - 10 * depth, + xycoords="axes fraction", + arrowprops=self.arrow_args.copy(), + ) + kwargs["arrowprops"]["edgecolor"] = plt.rcParams["text.color"] + + if self.fontsize is not None: + kwargs["fontsize"] = self.fontsize + + # offset things by .5 to center them in plot + xy = ((node.x + 0.5) / max_x, (max_y - node.y - 0.5) / max_y) + + if self.max_depth is None or depth <= self.max_depth: + if self.filled: + kwargs["bbox"]["fc"] = self.get_fill_color(tree, node.tree.node_id) + else: + kwargs["bbox"]["fc"] = ax.get_facecolor() + + if node.parent is None: + # root + ax.annotate(node.tree.label, xy, **kwargs) + else: + xy_parent = ( + (node.parent.x + 0.5) / max_x, + (max_y - node.parent.y - 0.5) / max_y, + ) + ax.annotate(node.tree.label, xy_parent, xy, **kwargs) + for child in node.children: + self.recurse(child, tree, ax, max_x, max_y, depth=depth + 1) + + else: + xy_parent = ( + (node.parent.x + 0.5) / max_x, + (max_y - node.parent.y - 0.5) / max_y, + ) + kwargs["bbox"]["fc"] = "grey" + ax.annotate("\n (...) \n", xy_parent, xy, **kwargs) + + +@validate_params( + { + "decision_tree": "no_validation", + "out_file": [str, None, HasMethods("write")], + "max_depth": [Interval(Integral, 0, None, closed="left"), None], + "feature_names": ["array-like", None], + "class_names": ["array-like", "boolean", None], + "label": [StrOptions({"all", "root", "none"})], + "filled": ["boolean"], + "leaves_parallel": ["boolean"], + "impurity": ["boolean"], + "node_ids": ["boolean"], + "proportion": ["boolean"], + "rotate": ["boolean"], + "rounded": ["boolean"], + "special_characters": ["boolean"], + "precision": [Interval(Integral, 0, None, closed="left"), None], + "fontname": [str], + }, + prefer_skip_nested_validation=True, +) +def export_graphviz( + decision_tree, + out_file=None, + *, + max_depth=None, + feature_names=None, + class_names=None, + label="all", + filled=False, + leaves_parallel=False, + impurity=True, + node_ids=False, + proportion=False, + rotate=False, + rounded=False, + special_characters=False, + precision=3, + fontname="helvetica", +): + """Export a decision tree in DOT format. + + This function generates a GraphViz representation of the decision tree, + which is then written into `out_file`. Once exported, graphical renderings + can be generated using, for example:: + + $ dot -Tps tree.dot -o tree.ps (PostScript format) + $ dot -Tpng tree.dot -o tree.png (PNG format) + + The sample counts that are shown are weighted with any sample_weights that + might be present. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + decision_tree : object + The decision tree estimator to be exported to GraphViz. + + out_file : object or str, default=None + Handle or name of the output file. If ``None``, the result is + returned as a string. + + .. versionchanged:: 0.20 + Default of out_file changed from "tree.dot" to None. + + max_depth : int, default=None + The maximum depth of the representation. If None, the tree is fully + generated. + + feature_names : array-like of shape (n_features,), default=None + An array containing the feature names. + If None, generic names will be used ("x[0]", "x[1]", ...). + + class_names : array-like of shape (n_classes,) or bool, default=None + Names of each of the target classes in ascending numerical order. + Only relevant for classification and not supported for multi-output. + If ``True``, shows a symbolic representation of the class name. + + label : {'all', 'root', 'none'}, default='all' + Whether to show informative labels for impurity, etc. + Options include 'all' to show at every node, 'root' to show only at + the top root node, or 'none' to not show at any node. + + filled : bool, default=False + When set to ``True``, paint nodes to indicate majority class for + classification, extremity of values for regression, or purity of node + for multi-output. + + leaves_parallel : bool, default=False + When set to ``True``, draw all leaf nodes at the bottom of the tree. + + impurity : bool, default=True + When set to ``True``, show the impurity at each node. + + node_ids : bool, default=False + When set to ``True``, show the ID number on each node. + + proportion : bool, default=False + When set to ``True``, change the display of 'values' and/or 'samples' + to be proportions and percentages respectively. + + rotate : bool, default=False + When set to ``True``, orient tree left to right rather than top-down. + + rounded : bool, default=False + When set to ``True``, draw node boxes with rounded corners. + + special_characters : bool, default=False + When set to ``False``, ignore special characters for PostScript + compatibility. + + precision : int, default=3 + Number of digits of precision for floating point in the values of + impurity, threshold and value attributes of each node. + + fontname : str, default='helvetica' + Name of font used to render text. + + Returns + ------- + dot_data : str + String representation of the input tree in GraphViz dot format. + Only returned if ``out_file`` is None. + + .. versionadded:: 0.18 + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn import tree + + >>> clf = tree.DecisionTreeClassifier() + >>> iris = load_iris() + + >>> clf = clf.fit(iris.data, iris.target) + >>> tree.export_graphviz(clf) + 'digraph Tree {... + """ + if feature_names is not None: + feature_names = check_array( + feature_names, ensure_2d=False, dtype=None, ensure_min_samples=0 + ) + if class_names is not None and not isinstance(class_names, bool): + class_names = check_array( + class_names, ensure_2d=False, dtype=None, ensure_min_samples=0 + ) + + check_is_fitted(decision_tree) + own_file = False + return_string = False + try: + if isinstance(out_file, str): + out_file = open(out_file, "w", encoding="utf-8") + own_file = True + + if out_file is None: + return_string = True + out_file = StringIO() + + exporter = _DOTTreeExporter( + out_file=out_file, + max_depth=max_depth, + feature_names=feature_names, + class_names=class_names, + label=label, + filled=filled, + leaves_parallel=leaves_parallel, + impurity=impurity, + node_ids=node_ids, + proportion=proportion, + rotate=rotate, + rounded=rounded, + special_characters=special_characters, + precision=precision, + fontname=fontname, + ) + exporter.export(decision_tree) + + if return_string: + return exporter.out_file.getvalue() + + finally: + if own_file: + out_file.close() + + +def _compute_depth(tree, node): + """ + Returns the depth of the subtree rooted in node. + """ + + def compute_depth_( + current_node, current_depth, children_left, children_right, depths + ): + depths += [current_depth] + left = children_left[current_node] + right = children_right[current_node] + if left != -1 and right != -1: + compute_depth_( + left, current_depth + 1, children_left, children_right, depths + ) + compute_depth_( + right, current_depth + 1, children_left, children_right, depths + ) + + depths = [] + compute_depth_(node, 1, tree.children_left, tree.children_right, depths) + return max(depths) + + +@validate_params( + { + "decision_tree": [DecisionTreeClassifier, DecisionTreeRegressor], + "feature_names": ["array-like", None], + "class_names": ["array-like", None], + "max_depth": [Interval(Integral, 0, None, closed="left"), None], + "spacing": [Interval(Integral, 1, None, closed="left"), None], + "decimals": [Interval(Integral, 0, None, closed="left"), None], + "show_weights": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def export_text( + decision_tree, + *, + feature_names=None, + class_names=None, + max_depth=10, + spacing=3, + decimals=2, + show_weights=False, +): + """Build a text report showing the rules of a decision tree. + + Note that backwards compatibility may not be supported. + + Parameters + ---------- + decision_tree : object + The decision tree estimator to be exported. + It can be an instance of + DecisionTreeClassifier or DecisionTreeRegressor. + + feature_names : array-like of shape (n_features,), default=None + An array containing the feature names. + If None generic names will be used ("feature_0", "feature_1", ...). + + class_names : array-like of shape (n_classes,), default=None + Names of each of the target classes in ascending numerical order. + Only relevant for classification and not supported for multi-output. + + - if `None`, the class names are delegated to `decision_tree.classes_`; + - otherwise, `class_names` will be used as class names instead of + `decision_tree.classes_`. The length of `class_names` must match + the length of `decision_tree.classes_`. + + .. versionadded:: 1.3 + + max_depth : int, default=10 + Only the first max_depth levels of the tree are exported. + Truncated branches will be marked with "...". + + spacing : int, default=3 + Number of spaces between edges. The higher it is, the wider the result. + + decimals : int, default=2 + Number of decimal digits to display. + + show_weights : bool, default=False + If true the classification weights will be exported on each leaf. + The classification weights are the number of samples each class. + + Returns + ------- + report : str + Text summary of all the rules in the decision tree. + + Examples + -------- + + >>> from sklearn.datasets import load_iris + >>> from sklearn.tree import DecisionTreeClassifier + >>> from sklearn.tree import export_text + >>> iris = load_iris() + >>> X = iris['data'] + >>> y = iris['target'] + >>> decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2) + >>> decision_tree = decision_tree.fit(X, y) + >>> r = export_text(decision_tree, feature_names=iris['feature_names']) + >>> print(r) + |--- petal width (cm) <= 0.80 + | |--- class: 0 + |--- petal width (cm) > 0.80 + | |--- petal width (cm) <= 1.75 + | | |--- class: 1 + | |--- petal width (cm) > 1.75 + | | |--- class: 2 + """ + if feature_names is not None: + feature_names = check_array( + feature_names, ensure_2d=False, dtype=None, ensure_min_samples=0 + ) + if class_names is not None: + class_names = check_array( + class_names, ensure_2d=False, dtype=None, ensure_min_samples=0 + ) + + check_is_fitted(decision_tree) + tree_ = decision_tree.tree_ + if is_classifier(decision_tree): + if class_names is None: + class_names = decision_tree.classes_ + elif len(class_names) != len(decision_tree.classes_): + raise ValueError( + "When `class_names` is an array, it should contain as" + " many items as `decision_tree.classes_`. Got" + f" {len(class_names)} while the tree was fitted with" + f" {len(decision_tree.classes_)} classes." + ) + right_child_fmt = "{} {} <= {}\n" + left_child_fmt = "{} {} > {}\n" + truncation_fmt = "{} {}\n" + + if feature_names is not None and len(feature_names) != tree_.n_features: + raise ValueError( + "feature_names must contain %d elements, got %d" + % (tree_.n_features, len(feature_names)) + ) + + if isinstance(decision_tree, DecisionTreeClassifier): + value_fmt = "{}{} weights: {}\n" + if not show_weights: + value_fmt = "{}{}{}\n" + else: + value_fmt = "{}{} value: {}\n" + + if feature_names is not None: + feature_names_ = [ + feature_names[i] if i != _tree.TREE_UNDEFINED else None + for i in tree_.feature + ] + else: + feature_names_ = ["feature_{}".format(i) for i in tree_.feature] + + export_text.report = "" + + def _add_leaf(value, weighted_n_node_samples, class_name, indent): + val = "" + if isinstance(decision_tree, DecisionTreeClassifier): + if show_weights: + val = [ + "{1:.{0}f}, ".format(decimals, v * weighted_n_node_samples) + for v in value + ] + val = "[" + "".join(val)[:-2] + "]" + weighted_n_node_samples + val += " class: " + str(class_name) + else: + val = ["{1:.{0}f}, ".format(decimals, v) for v in value] + val = "[" + "".join(val)[:-2] + "]" + export_text.report += value_fmt.format(indent, "", val) + + def print_tree_recurse(node, depth): + indent = ("|" + (" " * spacing)) * depth + indent = indent[:-spacing] + "-" * spacing + + value = None + if tree_.n_outputs == 1: + value = tree_.value[node][0] + else: + value = tree_.value[node].T[0] + class_name = np.argmax(value) + + if tree_.n_classes[0] != 1 and tree_.n_outputs == 1: + class_name = class_names[class_name] + + weighted_n_node_samples = tree_.weighted_n_node_samples[node] + + if depth <= max_depth + 1: + info_fmt = "" + info_fmt_left = info_fmt + info_fmt_right = info_fmt + + if tree_.feature[node] != _tree.TREE_UNDEFINED: + name = feature_names_[node] + threshold = tree_.threshold[node] + threshold = "{1:.{0}f}".format(decimals, threshold) + export_text.report += right_child_fmt.format(indent, name, threshold) + export_text.report += info_fmt_left + print_tree_recurse(tree_.children_left[node], depth + 1) + + export_text.report += left_child_fmt.format(indent, name, threshold) + export_text.report += info_fmt_right + print_tree_recurse(tree_.children_right[node], depth + 1) + else: # leaf + _add_leaf(value, weighted_n_node_samples, class_name, indent) + else: + subtree_depth = _compute_depth(tree_, node) + if subtree_depth == 1: + _add_leaf(value, weighted_n_node_samples, class_name, indent) + else: + trunc_report = "truncated branch of depth %d" % subtree_depth + export_text.report += truncation_fmt.format(indent, trunc_report) + + print_tree_recurse(0, 1) + return export_text.report diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_reingold_tilford.py b/venv/lib/python3.10/site-packages/sklearn/tree/_reingold_tilford.py new file mode 100644 index 0000000000000000000000000000000000000000..8f0b6af08bd517b3cd5f9450ebe155d52c6f24eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/_reingold_tilford.py @@ -0,0 +1,188 @@ +# Authors: William Mill (bill@billmill.org) +# License: BSD 3 clause + +import numpy as np + + +class DrawTree: + def __init__(self, tree, parent=None, depth=0, number=1): + self.x = -1.0 + self.y = depth + self.tree = tree + self.children = [ + DrawTree(c, self, depth + 1, i + 1) for i, c in enumerate(tree.children) + ] + self.parent = parent + self.thread = None + self.mod = 0 + self.ancestor = self + self.change = self.shift = 0 + self._lmost_sibling = None + # this is the number of the node in its group of siblings 1..n + self.number = number + + def left(self): + return self.thread or len(self.children) and self.children[0] + + def right(self): + return self.thread or len(self.children) and self.children[-1] + + def lbrother(self): + n = None + if self.parent: + for node in self.parent.children: + if node == self: + return n + else: + n = node + return n + + def get_lmost_sibling(self): + if not self._lmost_sibling and self.parent and self != self.parent.children[0]: + self._lmost_sibling = self.parent.children[0] + return self._lmost_sibling + + lmost_sibling = property(get_lmost_sibling) + + def __str__(self): + return "%s: x=%s mod=%s" % (self.tree, self.x, self.mod) + + def __repr__(self): + return self.__str__() + + def max_extents(self): + extents = [c.max_extents() for c in self.children] + extents.append((self.x, self.y)) + return np.max(extents, axis=0) + + +def buchheim(tree): + dt = first_walk(DrawTree(tree)) + min = second_walk(dt) + if min < 0: + third_walk(dt, -min) + return dt + + +def third_walk(tree, n): + tree.x += n + for c in tree.children: + third_walk(c, n) + + +def first_walk(v, distance=1.0): + if len(v.children) == 0: + if v.lmost_sibling: + v.x = v.lbrother().x + distance + else: + v.x = 0.0 + else: + default_ancestor = v.children[0] + for w in v.children: + first_walk(w) + default_ancestor = apportion(w, default_ancestor, distance) + # print("finished v =", v.tree, "children") + execute_shifts(v) + + midpoint = (v.children[0].x + v.children[-1].x) / 2 + + w = v.lbrother() + if w: + v.x = w.x + distance + v.mod = v.x - midpoint + else: + v.x = midpoint + return v + + +def apportion(v, default_ancestor, distance): + w = v.lbrother() + if w is not None: + # in buchheim notation: + # i == inner; o == outer; r == right; l == left; r = +; l = - + vir = vor = v + vil = w + vol = v.lmost_sibling + sir = sor = v.mod + sil = vil.mod + sol = vol.mod + while vil.right() and vir.left(): + vil = vil.right() + vir = vir.left() + vol = vol.left() + vor = vor.right() + vor.ancestor = v + shift = (vil.x + sil) - (vir.x + sir) + distance + if shift > 0: + move_subtree(ancestor(vil, v, default_ancestor), v, shift) + sir = sir + shift + sor = sor + shift + sil += vil.mod + sir += vir.mod + sol += vol.mod + sor += vor.mod + if vil.right() and not vor.right(): + vor.thread = vil.right() + vor.mod += sil - sor + else: + if vir.left() and not vol.left(): + vol.thread = vir.left() + vol.mod += sir - sol + default_ancestor = v + return default_ancestor + + +def move_subtree(wl, wr, shift): + subtrees = wr.number - wl.number + # print(wl.tree, "is conflicted with", wr.tree, 'moving', subtrees, + # 'shift', shift) + # print wl, wr, wr.number, wl.number, shift, subtrees, shift/subtrees + wr.change -= shift / subtrees + wr.shift += shift + wl.change += shift / subtrees + wr.x += shift + wr.mod += shift + + +def execute_shifts(v): + shift = change = 0 + for w in v.children[::-1]: + # print("shift:", w, shift, w.change) + w.x += shift + w.mod += shift + change += w.change + shift += w.shift + change + + +def ancestor(vil, v, default_ancestor): + # the relevant text is at the bottom of page 7 of + # "Improving Walker's Algorithm to Run in Linear Time" by Buchheim et al, + # (2002) + # https://citeseerx.ist.psu.edu/doc_view/pid/1f41c3c2a4880dc49238e46d555f16d28da2940d + if vil.ancestor in v.parent.children: + return vil.ancestor + else: + return default_ancestor + + +def second_walk(v, m=0, depth=0, min=None): + v.x += m + v.y = depth + + if min is None or v.x < min: + min = v.x + + for w in v.children: + min = second_walk(w, m + v.mod, depth + 1, min) + + return min + + +class Tree: + def __init__(self, label="", node_id=-1, *children): + self.label = label + self.node_id = node_id + if children: + self.children = children + else: + self.children = [] diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_splitter.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/tree/_splitter.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1e32804b63c41f0d08a0af09061b762ca5ca45b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/_splitter.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_splitter.pxd b/venv/lib/python3.10/site-packages/sklearn/tree/_splitter.pxd new file mode 100644 index 0000000000000000000000000000000000000000..adc14011cb7a2f54b67ef96f469329b57d022e59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/_splitter.pxd @@ -0,0 +1,114 @@ +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Joel Nothman +# Arnaud Joly +# Jacob Schreiber +# +# License: BSD 3 clause + +# See _splitter.pyx for details. +cimport numpy as cnp + +from ._criterion cimport Criterion + +from ..utils._typedefs cimport float32_t, float64_t, intp_t, int32_t, uint32_t + + +cdef struct SplitRecord: + # Data to track sample split + intp_t feature # Which feature to split on. + intp_t pos # Split samples array at the given position, + # # i.e. count of samples below threshold for feature. + # # pos is >= end if the node is a leaf. + float64_t threshold # Threshold to split at. + float64_t improvement # Impurity improvement given parent node. + float64_t impurity_left # Impurity of the left split. + float64_t impurity_right # Impurity of the right split. + float64_t lower_bound # Lower bound on value of both children for monotonicity + float64_t upper_bound # Upper bound on value of both children for monotonicity + unsigned char missing_go_to_left # Controls if missing values go to the left node. + intp_t n_missing # Number of missing values for the feature being split on + +cdef class Splitter: + # The splitter searches in the input space for a feature and a threshold + # to split the samples samples[start:end]. + # + # The impurity computations are delegated to a criterion object. + + # Internal structures + cdef public Criterion criterion # Impurity criterion + cdef public intp_t max_features # Number of features to test + cdef public intp_t min_samples_leaf # Min samples in a leaf + cdef public float64_t min_weight_leaf # Minimum weight in a leaf + + cdef object random_state # Random state + cdef uint32_t rand_r_state # sklearn_rand_r random number state + + cdef intp_t[::1] samples # Sample indices in X, y + cdef intp_t n_samples # X.shape[0] + cdef float64_t weighted_n_samples # Weighted number of samples + cdef intp_t[::1] features # Feature indices in X + cdef intp_t[::1] constant_features # Constant features indices + cdef intp_t n_features # X.shape[1] + cdef float32_t[::1] feature_values # temp. array holding feature values + + cdef intp_t start # Start position for the current node + cdef intp_t end # End position for the current node + + cdef const float64_t[:, ::1] y + # Monotonicity constraints for each feature. + # The encoding is as follows: + # -1: monotonic decrease + # 0: no constraint + # +1: monotonic increase + cdef const cnp.int8_t[:] monotonic_cst + cdef bint with_monotonic_cst + cdef const float64_t[:] sample_weight + + # The samples vector `samples` is maintained by the Splitter object such + # that the samples contained in a node are contiguous. With this setting, + # `node_split` reorganizes the node samples `samples[start:end]` in two + # subsets `samples[start:pos]` and `samples[pos:end]`. + + # The 1-d `features` array of size n_features contains the features + # indices and allows fast sampling without replacement of features. + + # The 1-d `constant_features` array of size n_features holds in + # `constant_features[:n_constant_features]` the feature ids with + # constant values for all the samples that reached a specific node. + # The value `n_constant_features` is given by the parent node to its + # child nodes. The content of the range `[n_constant_features:]` is left + # undefined, but preallocated for performance reasons + # This allows optimization with depth-based tree building. + + # Methods + cdef int init( + self, + object X, + const float64_t[:, ::1] y, + const float64_t[:] sample_weight, + const unsigned char[::1] missing_values_in_feature_mask, + ) except -1 + + cdef int node_reset( + self, + intp_t start, + intp_t end, + float64_t* weighted_n_node_samples + ) except -1 nogil + + cdef int node_split( + self, + float64_t impurity, # Impurity of the node + SplitRecord* split, + intp_t* n_constant_features, + float64_t lower_bound, + float64_t upper_bound, + ) except -1 nogil + + cdef void node_value(self, float64_t* dest) noexcept nogil + + cdef void clip_node_value(self, float64_t* dest, float64_t lower_bound, float64_t upper_bound) noexcept nogil + + cdef float64_t node_impurity(self) noexcept nogil diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_tree.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/tree/_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0ca6365fb24e8bd419bdcb4759c778f7c35af6af Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/_tree.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_tree.pxd b/venv/lib/python3.10/site-packages/sklearn/tree/_tree.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e4081921f40f972859c086301f19132724b1d867 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/_tree.pxd @@ -0,0 +1,114 @@ +# Authors: Gilles Louppe +# Peter Prettenhofer +# Brian Holt +# Joel Nothman +# Arnaud Joly +# Jacob Schreiber +# Nelson Liu +# +# License: BSD 3 clause + +# See _tree.pyx for details. + +import numpy as np +cimport numpy as cnp + +from ..utils._typedefs cimport float32_t, float64_t, intp_t, int32_t, uint32_t + +from ._splitter cimport Splitter +from ._splitter cimport SplitRecord + +cdef struct Node: + # Base storage structure for the nodes in a Tree object + + intp_t left_child # id of the left child of the node + intp_t right_child # id of the right child of the node + intp_t feature # Feature used for splitting the node + float64_t threshold # Threshold value at the node + float64_t impurity # Impurity of the node (i.e., the value of the criterion) + intp_t n_node_samples # Number of samples at the node + float64_t weighted_n_node_samples # Weighted number of samples at the node + unsigned char missing_go_to_left # Whether features have missing values + + +cdef class Tree: + # The Tree object is a binary tree structure constructed by the + # TreeBuilder. The tree structure is used for predictions and + # feature importances. + + # Input/Output layout + cdef public intp_t n_features # Number of features in X + cdef intp_t* n_classes # Number of classes in y[:, k] + cdef public intp_t n_outputs # Number of outputs in y + cdef public intp_t max_n_classes # max(n_classes) + + # Inner structures: values are stored separately from node structure, + # since size is determined at runtime. + cdef public intp_t max_depth # Max depth of the tree + cdef public intp_t node_count # Counter for node IDs + cdef public intp_t capacity # Capacity of tree, in terms of nodes + cdef Node* nodes # Array of nodes + cdef float64_t* value # (capacity, n_outputs, max_n_classes) array of values + cdef intp_t value_stride # = n_outputs * max_n_classes + + # Methods + cdef intp_t _add_node(self, intp_t parent, bint is_left, bint is_leaf, + intp_t feature, float64_t threshold, float64_t impurity, + intp_t n_node_samples, + float64_t weighted_n_node_samples, + unsigned char missing_go_to_left) except -1 nogil + cdef int _resize(self, intp_t capacity) except -1 nogil + cdef int _resize_c(self, intp_t capacity=*) except -1 nogil + + cdef cnp.ndarray _get_value_ndarray(self) + cdef cnp.ndarray _get_node_ndarray(self) + + cpdef cnp.ndarray predict(self, object X) + + cpdef cnp.ndarray apply(self, object X) + cdef cnp.ndarray _apply_dense(self, object X) + cdef cnp.ndarray _apply_sparse_csr(self, object X) + + cpdef object decision_path(self, object X) + cdef object _decision_path_dense(self, object X) + cdef object _decision_path_sparse_csr(self, object X) + + cpdef compute_node_depths(self) + cpdef compute_feature_importances(self, normalize=*) + + +# ============================================================================= +# Tree builder +# ============================================================================= + +cdef class TreeBuilder: + # The TreeBuilder recursively builds a Tree object from training samples, + # using a Splitter object for splitting internal nodes and assigning + # values to leaves. + # + # This class controls the various stopping criteria and the node splitting + # evaluation order, e.g. depth-first or best-first. + + cdef Splitter splitter # Splitting algorithm + + cdef intp_t min_samples_split # Minimum number of samples in an internal node + cdef intp_t min_samples_leaf # Minimum number of samples in a leaf + cdef float64_t min_weight_leaf # Minimum weight in a leaf + cdef intp_t max_depth # Maximal tree depth + cdef float64_t min_impurity_decrease # Impurity threshold for early stopping + + cpdef build( + self, + Tree tree, + object X, + const float64_t[:, ::1] y, + const float64_t[:] sample_weight=*, + const unsigned char[::1] missing_values_in_feature_mask=*, + ) + + cdef _check_input( + self, + object X, + const float64_t[:, ::1] y, + const float64_t[:] sample_weight, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_utils.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/tree/_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f8922b78077aec5fccbf71b4dc297e7f9af29b84 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/_utils.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/_utils.pxd b/venv/lib/python3.10/site-packages/sklearn/tree/_utils.pxd new file mode 100644 index 0000000000000000000000000000000000000000..b59d18879ca9437aa1c860131dfe7cd1e5dfcc78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/_utils.pxd @@ -0,0 +1,104 @@ +# Authors: Gilles Louppe +# Peter Prettenhofer +# Arnaud Joly +# Jacob Schreiber +# Nelson Liu +# +# License: BSD 3 clause + +# See _utils.pyx for details. + +cimport numpy as cnp +from ._tree cimport Node +from ..neighbors._quad_tree cimport Cell +from ..utils._typedefs cimport float32_t, float64_t, intp_t, int32_t, uint32_t + +cdef enum: + # Max value for our rand_r replacement (near the bottom). + # We don't use RAND_MAX because it's different across platforms and + # particularly tiny on Windows/MSVC. + # It corresponds to the maximum representable value for + # 32-bit signed integers (i.e. 2^31 - 1). + RAND_R_MAX = 2147483647 + + +# safe_realloc(&p, n) resizes the allocation of p to n * sizeof(*p) bytes or +# raises a MemoryError. It never calls free, since that's __dealloc__'s job. +# cdef float32_t *p = NULL +# safe_realloc(&p, n) +# is equivalent to p = malloc(n * sizeof(*p)) with error checking. +ctypedef fused realloc_ptr: + # Add pointer types here as needed. + (float32_t*) + (intp_t*) + (unsigned char*) + (WeightedPQueueRecord*) + (float64_t*) + (float64_t**) + (Node*) + (Cell*) + (Node**) + +cdef int safe_realloc(realloc_ptr* p, size_t nelems) except -1 nogil + + +cdef cnp.ndarray sizet_ptr_to_ndarray(intp_t* data, intp_t size) + + +cdef intp_t rand_int(intp_t low, intp_t high, + uint32_t* random_state) noexcept nogil + + +cdef float64_t rand_uniform(float64_t low, float64_t high, + uint32_t* random_state) noexcept nogil + + +cdef float64_t log(float64_t x) noexcept nogil + +# ============================================================================= +# WeightedPQueue data structure +# ============================================================================= + +# A record stored in the WeightedPQueue +cdef struct WeightedPQueueRecord: + float64_t data + float64_t weight + +cdef class WeightedPQueue: + cdef intp_t capacity + cdef intp_t array_ptr + cdef WeightedPQueueRecord* array_ + + cdef bint is_empty(self) noexcept nogil + cdef int reset(self) except -1 nogil + cdef intp_t size(self) noexcept nogil + cdef int push(self, float64_t data, float64_t weight) except -1 nogil + cdef int remove(self, float64_t data, float64_t weight) noexcept nogil + cdef int pop(self, float64_t* data, float64_t* weight) noexcept nogil + cdef int peek(self, float64_t* data, float64_t* weight) noexcept nogil + cdef float64_t get_weight_from_index(self, intp_t index) noexcept nogil + cdef float64_t get_value_from_index(self, intp_t index) noexcept nogil + + +# ============================================================================= +# WeightedMedianCalculator data structure +# ============================================================================= + +cdef class WeightedMedianCalculator: + cdef intp_t initial_capacity + cdef WeightedPQueue samples + cdef float64_t total_weight + cdef intp_t k + cdef float64_t sum_w_0_k # represents sum(weights[0:k]) = w[0] + w[1] + ... + w[k-1] + cdef intp_t size(self) noexcept nogil + cdef int push(self, float64_t data, float64_t weight) except -1 nogil + cdef int reset(self) except -1 nogil + cdef int update_median_parameters_post_push( + self, float64_t data, float64_t weight, + float64_t original_median) noexcept nogil + cdef int remove(self, float64_t data, float64_t weight) noexcept nogil + cdef int pop(self, float64_t* data, float64_t* weight) noexcept nogil + cdef int update_median_parameters_post_remove( + self, float64_t data, float64_t weight, + float64_t original_median) noexcept nogil + cdef float64_t get_median(self) noexcept nogil diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..750392882684c74be337c3602808f1f9047d8e49 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_export.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_export.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f95552d52fd04bb58161027f8f13ecf7c0f56a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_export.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_monotonic_tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_monotonic_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbeba9a7df090d6b50c8ed3ba47a690cb408db30 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_monotonic_tree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_reingold_tilford.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_reingold_tilford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3ce5064a8bb0898bc1b9699ebfdda9c3f96c649 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_reingold_tilford.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbc422c1872c643ad0769334f5e3d870fcbeba42 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tree/tests/__pycache__/test_tree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_export.py b/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_export.py new file mode 100644 index 0000000000000000000000000000000000000000..169c667b4ff3ff86fb42b3434741098fe4dd1213 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_export.py @@ -0,0 +1,519 @@ +""" +Testing for export functions of decision trees (sklearn.tree.export). +""" +from io import StringIO +from re import finditer, search +from textwrap import dedent + +import numpy as np +import pytest +from numpy.random import RandomState + +from sklearn.base import is_classifier +from sklearn.ensemble import GradientBoostingClassifier +from sklearn.exceptions import NotFittedError +from sklearn.tree import ( + DecisionTreeClassifier, + DecisionTreeRegressor, + export_graphviz, + export_text, + plot_tree, +) + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] +y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]] +w = [1, 1, 1, 0.5, 0.5, 0.5] +y_degraded = [1, 1, 1, 1, 1, 1] + + +def test_graphviz_toy(): + # Check correctness of export_graphviz + clf = DecisionTreeClassifier( + max_depth=3, min_samples_split=2, criterion="gini", random_state=2 + ) + clf.fit(X, y) + + # Test export code + contents1 = export_graphviz(clf, out_file=None) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' + 'value = [3, 3]"] ;\n' + '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + "}" + ) + + assert contents1 == contents2 + + # Test plot_options + contents1 = export_graphviz( + clf, + filled=True, + impurity=False, + proportion=True, + special_characters=True, + rounded=True, + out_file=None, + fontname="sans", + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled, rounded", color="black", ' + 'fontname="sans"] ;\n' + 'edge [fontname="sans"] ;\n' + "0 [label=0 ≤ 0.0
samples = 100.0%
" + 'value = [0.5, 0.5]>, fillcolor="#ffffff"] ;\n' + "1 [label=value = [1.0, 0.0]>, " + 'fillcolor="#e58139"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + "2 [label=value = [0.0, 1.0]>, " + 'fillcolor="#399de5"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + "}" + ) + + assert contents1 == contents2 + + # Test max_depth + contents1 = export_graphviz(clf, max_depth=0, class_names=True, out_file=None) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' + 'value = [3, 3]\\nclass = y[0]"] ;\n' + '1 [label="(...)"] ;\n' + "0 -> 1 ;\n" + '2 [label="(...)"] ;\n' + "0 -> 2 ;\n" + "}" + ) + + assert contents1 == contents2 + + # Test max_depth with plot_options + contents1 = export_graphviz( + clf, max_depth=0, filled=True, out_file=None, node_ids=True + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled", color="black", ' + 'fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="node #0\\nx[0] <= 0.0\\ngini = 0.5\\n' + 'samples = 6\\nvalue = [3, 3]", fillcolor="#ffffff"] ;\n' + '1 [label="(...)", fillcolor="#C0C0C0"] ;\n' + "0 -> 1 ;\n" + '2 [label="(...)", fillcolor="#C0C0C0"] ;\n' + "0 -> 2 ;\n" + "}" + ) + + assert contents1 == contents2 + + # Test multi-output with weighted samples + clf = DecisionTreeClassifier( + max_depth=2, min_samples_split=2, criterion="gini", random_state=2 + ) + clf = clf.fit(X, y2, sample_weight=w) + + contents1 = export_graphviz(clf, filled=True, impurity=False, out_file=None) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled", color="black", ' + 'fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="x[0] <= 0.0\\nsamples = 6\\n' + "value = [[3.0, 1.5, 0.0]\\n" + '[3.0, 1.0, 0.5]]", fillcolor="#ffffff"] ;\n' + '1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' + '[3, 0, 0]]", fillcolor="#e58139"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + '2 [label="x[0] <= 1.5\\nsamples = 3\\n' + "value = [[0.0, 1.5, 0.0]\\n" + '[0.0, 1.0, 0.5]]", fillcolor="#f1bd97"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + '3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' + '[0, 1, 0]]", fillcolor="#e58139"] ;\n' + "2 -> 3 ;\n" + '4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' + '[0.0, 0.0, 0.5]]", fillcolor="#e58139"] ;\n' + "2 -> 4 ;\n" + "}" + ) + + assert contents1 == contents2 + + # Test regression output with plot_options + clf = DecisionTreeRegressor( + max_depth=3, min_samples_split=2, criterion="squared_error", random_state=2 + ) + clf.fit(X, y) + + contents1 = export_graphviz( + clf, + filled=True, + leaves_parallel=True, + out_file=None, + rotate=True, + rounded=True, + fontname="sans", + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled, rounded", color="black", ' + 'fontname="sans"] ;\n' + "graph [ranksep=equally, splines=polyline] ;\n" + 'edge [fontname="sans"] ;\n' + "rankdir=LR ;\n" + '0 [label="x[0] <= 0.0\\nsquared_error = 1.0\\nsamples = 6\\n' + 'value = 0.0", fillcolor="#f2c09c"] ;\n' + '1 [label="squared_error = 0.0\\nsamples = 3\\' + 'nvalue = -1.0", ' + 'fillcolor="#ffffff"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=-45, " + 'headlabel="True"] ;\n' + '2 [label="squared_error = 0.0\\nsamples = 3\\nvalue = 1.0", ' + 'fillcolor="#e58139"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=45, " + 'headlabel="False"] ;\n' + "{rank=same ; 0} ;\n" + "{rank=same ; 1; 2} ;\n" + "}" + ) + + assert contents1 == contents2 + + # Test classifier with degraded learning set + clf = DecisionTreeClassifier(max_depth=3) + clf.fit(X, y_degraded) + + contents1 = export_graphviz(clf, filled=True, out_file=None) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, style="filled", color="black", ' + 'fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="gini = 0.0\\nsamples = 6\\nvalue = 6.0", ' + 'fillcolor="#ffffff"] ;\n' + "}" + ) + + +@pytest.mark.parametrize("constructor", [list, np.array]) +def test_graphviz_feature_class_names_array_support(constructor): + # Check that export_graphviz treats feature names + # and class names correctly and supports arrays + clf = DecisionTreeClassifier( + max_depth=3, min_samples_split=2, criterion="gini", random_state=2 + ) + clf.fit(X, y) + + # Test with feature_names + contents1 = export_graphviz( + clf, feature_names=constructor(["feature0", "feature1"]), out_file=None + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' + 'value = [3, 3]"] ;\n' + '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + "}" + ) + + assert contents1 == contents2 + + # Test with class_names + contents1 = export_graphviz( + clf, class_names=constructor(["yes", "no"]), out_file=None + ) + contents2 = ( + "digraph Tree {\n" + 'node [shape=box, fontname="helvetica"] ;\n' + 'edge [fontname="helvetica"] ;\n' + '0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' + 'value = [3, 3]\\nclass = yes"] ;\n' + '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' + 'class = yes"] ;\n' + "0 -> 1 [labeldistance=2.5, labelangle=45, " + 'headlabel="True"] ;\n' + '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' + 'class = no"] ;\n' + "0 -> 2 [labeldistance=2.5, labelangle=-45, " + 'headlabel="False"] ;\n' + "}" + ) + + assert contents1 == contents2 + + +def test_graphviz_errors(): + # Check for errors of export_graphviz + clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2) + + # Check not-fitted decision tree error + out = StringIO() + with pytest.raises(NotFittedError): + export_graphviz(clf, out) + + clf.fit(X, y) + + # Check if it errors when length of feature_names + # mismatches with number of features + message = "Length of feature_names, 1 does not match number of features, 2" + with pytest.raises(ValueError, match=message): + export_graphviz(clf, None, feature_names=["a"]) + + message = "Length of feature_names, 3 does not match number of features, 2" + with pytest.raises(ValueError, match=message): + export_graphviz(clf, None, feature_names=["a", "b", "c"]) + + # Check error when argument is not an estimator + message = "is not an estimator instance" + with pytest.raises(TypeError, match=message): + export_graphviz(clf.fit(X, y).tree_) + + # Check class_names error + out = StringIO() + with pytest.raises(IndexError): + export_graphviz(clf, out, class_names=[]) + + +def test_friedman_mse_in_graphviz(): + clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0) + clf.fit(X, y) + dot_data = StringIO() + export_graphviz(clf, out_file=dot_data) + + clf = GradientBoostingClassifier(n_estimators=2, random_state=0) + clf.fit(X, y) + for estimator in clf.estimators_: + export_graphviz(estimator[0], out_file=dot_data) + + for finding in finditer(r"\[.*?samples.*?\]", dot_data.getvalue()): + assert "friedman_mse" in finding.group() + + +def test_precision(): + rng_reg = RandomState(2) + rng_clf = RandomState(8) + for X, y, clf in zip( + (rng_reg.random_sample((5, 2)), rng_clf.random_sample((1000, 4))), + (rng_reg.random_sample((5,)), rng_clf.randint(2, size=(1000,))), + ( + DecisionTreeRegressor( + criterion="friedman_mse", random_state=0, max_depth=1 + ), + DecisionTreeClassifier(max_depth=1, random_state=0), + ), + ): + clf.fit(X, y) + for precision in (4, 3): + dot_data = export_graphviz( + clf, out_file=None, precision=precision, proportion=True + ) + + # With the current random state, the impurity and the threshold + # will have the number of precision set in the export_graphviz + # function. We will check the number of precision with a strict + # equality. The value reported will have only 2 precision and + # therefore, only a less equal comparison will be done. + + # check value + for finding in finditer(r"value = \d+\.\d+", dot_data): + assert len(search(r"\.\d+", finding.group()).group()) <= precision + 1 + # check impurity + if is_classifier(clf): + pattern = r"gini = \d+\.\d+" + else: + pattern = r"friedman_mse = \d+\.\d+" + + # check impurity + for finding in finditer(pattern, dot_data): + assert len(search(r"\.\d+", finding.group()).group()) == precision + 1 + # check threshold + for finding in finditer(r"<= \d+\.\d+", dot_data): + assert len(search(r"\.\d+", finding.group()).group()) == precision + 1 + + +def test_export_text_errors(): + clf = DecisionTreeClassifier(max_depth=2, random_state=0) + clf.fit(X, y) + err_msg = "feature_names must contain 2 elements, got 1" + with pytest.raises(ValueError, match=err_msg): + export_text(clf, feature_names=["a"]) + err_msg = ( + "When `class_names` is an array, it should contain as" + " many items as `decision_tree.classes_`. Got 1 while" + " the tree was fitted with 2 classes." + ) + with pytest.raises(ValueError, match=err_msg): + export_text(clf, class_names=["a"]) + + +def test_export_text(): + clf = DecisionTreeClassifier(max_depth=2, random_state=0) + clf.fit(X, y) + + expected_report = dedent(""" + |--- feature_1 <= 0.00 + | |--- class: -1 + |--- feature_1 > 0.00 + | |--- class: 1 + """).lstrip() + + assert export_text(clf) == expected_report + # testing that leaves at level 1 are not truncated + assert export_text(clf, max_depth=0) == expected_report + # testing that the rest of the tree is truncated + assert export_text(clf, max_depth=10) == expected_report + + expected_report = dedent(""" + |--- feature_1 <= 0.00 + | |--- weights: [3.00, 0.00] class: -1 + |--- feature_1 > 0.00 + | |--- weights: [0.00, 3.00] class: 1 + """).lstrip() + assert export_text(clf, show_weights=True) == expected_report + + expected_report = dedent(""" + |- feature_1 <= 0.00 + | |- class: -1 + |- feature_1 > 0.00 + | |- class: 1 + """).lstrip() + assert export_text(clf, spacing=1) == expected_report + + X_l = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, 1]] + y_l = [-1, -1, -1, 1, 1, 1, 2] + clf = DecisionTreeClassifier(max_depth=4, random_state=0) + clf.fit(X_l, y_l) + expected_report = dedent(""" + |--- feature_1 <= 0.00 + | |--- class: -1 + |--- feature_1 > 0.00 + | |--- truncated branch of depth 2 + """).lstrip() + assert export_text(clf, max_depth=0) == expected_report + + X_mo = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] + y_mo = [[-1, -1], [-1, -1], [-1, -1], [1, 1], [1, 1], [1, 1]] + + reg = DecisionTreeRegressor(max_depth=2, random_state=0) + reg.fit(X_mo, y_mo) + + expected_report = dedent(""" + |--- feature_1 <= 0.0 + | |--- value: [-1.0, -1.0] + |--- feature_1 > 0.0 + | |--- value: [1.0, 1.0] + """).lstrip() + assert export_text(reg, decimals=1) == expected_report + assert export_text(reg, decimals=1, show_weights=True) == expected_report + + X_single = [[-2], [-1], [-1], [1], [1], [2]] + reg = DecisionTreeRegressor(max_depth=2, random_state=0) + reg.fit(X_single, y_mo) + + expected_report = dedent(""" + |--- first <= 0.0 + | |--- value: [-1.0, -1.0] + |--- first > 0.0 + | |--- value: [1.0, 1.0] + """).lstrip() + assert export_text(reg, decimals=1, feature_names=["first"]) == expected_report + assert ( + export_text(reg, decimals=1, show_weights=True, feature_names=["first"]) + == expected_report + ) + + +@pytest.mark.parametrize("constructor", [list, np.array]) +def test_export_text_feature_class_names_array_support(constructor): + # Check that export_graphviz treats feature names + # and class names correctly and supports arrays + clf = DecisionTreeClassifier(max_depth=2, random_state=0) + clf.fit(X, y) + + expected_report = dedent(""" + |--- b <= 0.00 + | |--- class: -1 + |--- b > 0.00 + | |--- class: 1 + """).lstrip() + assert export_text(clf, feature_names=constructor(["a", "b"])) == expected_report + + expected_report = dedent(""" + |--- feature_1 <= 0.00 + | |--- class: cat + |--- feature_1 > 0.00 + | |--- class: dog + """).lstrip() + assert export_text(clf, class_names=constructor(["cat", "dog"])) == expected_report + + +def test_plot_tree_entropy(pyplot): + # mostly smoke tests + # Check correctness of export_graphviz for criterion = entropy + clf = DecisionTreeClassifier( + max_depth=3, min_samples_split=2, criterion="entropy", random_state=2 + ) + clf.fit(X, y) + + # Test export code + feature_names = ["first feat", "sepal_width"] + nodes = plot_tree(clf, feature_names=feature_names) + assert len(nodes) == 3 + assert ( + nodes[0].get_text() + == "first feat <= 0.0\nentropy = 1.0\nsamples = 6\nvalue = [3, 3]" + ) + assert nodes[1].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [3, 0]" + assert nodes[2].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [0, 3]" + + +def test_plot_tree_gini(pyplot): + # mostly smoke tests + # Check correctness of export_graphviz for criterion = gini + clf = DecisionTreeClassifier( + max_depth=3, min_samples_split=2, criterion="gini", random_state=2 + ) + clf.fit(X, y) + + # Test export code + feature_names = ["first feat", "sepal_width"] + nodes = plot_tree(clf, feature_names=feature_names) + assert len(nodes) == 3 + assert ( + nodes[0].get_text() + == "first feat <= 0.0\ngini = 0.5\nsamples = 6\nvalue = [3, 3]" + ) + assert nodes[1].get_text() == "gini = 0.0\nsamples = 3\nvalue = [3, 0]" + assert nodes[2].get_text() == "gini = 0.0\nsamples = 3\nvalue = [0, 3]" + + +def test_not_fitted_tree(pyplot): + # Testing if not fitted tree throws the correct error + clf = DecisionTreeRegressor() + with pytest.raises(NotFittedError): + plot_tree(clf) diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_monotonic_tree.py b/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_monotonic_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..6478c2e2dfd85cec5f931578806f305b6167305c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_monotonic_tree.py @@ -0,0 +1,508 @@ +import numpy as np +import pytest + +from sklearn.datasets import make_classification, make_regression +from sklearn.ensemble import ( + ExtraTreesClassifier, + ExtraTreesRegressor, + RandomForestClassifier, + RandomForestRegressor, +) +from sklearn.tree import ( + DecisionTreeClassifier, + DecisionTreeRegressor, + ExtraTreeClassifier, + ExtraTreeRegressor, +) +from sklearn.utils._testing import assert_allclose +from sklearn.utils.fixes import CSC_CONTAINERS + +TREE_CLASSIFIER_CLASSES = [DecisionTreeClassifier, ExtraTreeClassifier] +TREE_REGRESSOR_CLASSES = [DecisionTreeRegressor, ExtraTreeRegressor] +TREE_BASED_CLASSIFIER_CLASSES = TREE_CLASSIFIER_CLASSES + [ + RandomForestClassifier, + ExtraTreesClassifier, +] +TREE_BASED_REGRESSOR_CLASSES = TREE_REGRESSOR_CLASSES + [ + RandomForestRegressor, + ExtraTreesRegressor, +] + + +@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES) +@pytest.mark.parametrize("depth_first_builder", (True, False)) +@pytest.mark.parametrize("sparse_splitter", (True, False)) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_monotonic_constraints_classifications( + TreeClassifier, + depth_first_builder, + sparse_splitter, + global_random_seed, + csc_container, +): + n_samples = 1000 + n_samples_train = 900 + X, y = make_classification( + n_samples=n_samples, + n_classes=2, + n_features=5, + n_informative=5, + n_redundant=0, + random_state=global_random_seed, + ) + X_train, y_train = X[:n_samples_train], y[:n_samples_train] + X_test, _ = X[n_samples_train:], y[n_samples_train:] + + X_test_0incr, X_test_0decr = np.copy(X_test), np.copy(X_test) + X_test_1incr, X_test_1decr = np.copy(X_test), np.copy(X_test) + X_test_0incr[:, 0] += 10 + X_test_0decr[:, 0] -= 10 + X_test_1incr[:, 1] += 10 + X_test_1decr[:, 1] -= 10 + monotonic_cst = np.zeros(X.shape[1]) + monotonic_cst[0] = 1 + monotonic_cst[1] = -1 + + if depth_first_builder: + est = TreeClassifier(max_depth=None, monotonic_cst=monotonic_cst) + else: + est = TreeClassifier( + max_depth=None, + monotonic_cst=monotonic_cst, + max_leaf_nodes=n_samples_train, + ) + if hasattr(est, "random_state"): + est.set_params(**{"random_state": global_random_seed}) + if hasattr(est, "n_estimators"): + est.set_params(**{"n_estimators": 5}) + if sparse_splitter: + X_train = csc_container(X_train) + est.fit(X_train, y_train) + proba_test = est.predict_proba(X_test) + + assert np.logical_and( + proba_test >= 0.0, proba_test <= 1.0 + ).all(), "Probability should always be in [0, 1] range." + assert_allclose(proba_test.sum(axis=1), 1.0) + + # Monotonic increase constraint, it applies to the positive class + assert np.all(est.predict_proba(X_test_0incr)[:, 1] >= proba_test[:, 1]) + assert np.all(est.predict_proba(X_test_0decr)[:, 1] <= proba_test[:, 1]) + + # Monotonic decrease constraint, it applies to the positive class + assert np.all(est.predict_proba(X_test_1incr)[:, 1] <= proba_test[:, 1]) + assert np.all(est.predict_proba(X_test_1decr)[:, 1] >= proba_test[:, 1]) + + +@pytest.mark.parametrize("TreeRegressor", TREE_BASED_REGRESSOR_CLASSES) +@pytest.mark.parametrize("depth_first_builder", (True, False)) +@pytest.mark.parametrize("sparse_splitter", (True, False)) +@pytest.mark.parametrize("criterion", ("absolute_error", "squared_error")) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_monotonic_constraints_regressions( + TreeRegressor, + depth_first_builder, + sparse_splitter, + criterion, + global_random_seed, + csc_container, +): + n_samples = 1000 + n_samples_train = 900 + # Build a regression task using 5 informative features + X, y = make_regression( + n_samples=n_samples, + n_features=5, + n_informative=5, + random_state=global_random_seed, + ) + train = np.arange(n_samples_train) + test = np.arange(n_samples_train, n_samples) + X_train = X[train] + y_train = y[train] + X_test = np.copy(X[test]) + X_test_incr = np.copy(X_test) + X_test_decr = np.copy(X_test) + X_test_incr[:, 0] += 10 + X_test_decr[:, 1] += 10 + monotonic_cst = np.zeros(X.shape[1]) + monotonic_cst[0] = 1 + monotonic_cst[1] = -1 + + if depth_first_builder: + est = TreeRegressor( + max_depth=None, + monotonic_cst=monotonic_cst, + criterion=criterion, + ) + else: + est = TreeRegressor( + max_depth=8, + monotonic_cst=monotonic_cst, + criterion=criterion, + max_leaf_nodes=n_samples_train, + ) + if hasattr(est, "random_state"): + est.set_params(random_state=global_random_seed) + if hasattr(est, "n_estimators"): + est.set_params(**{"n_estimators": 5}) + if sparse_splitter: + X_train = csc_container(X_train) + est.fit(X_train, y_train) + y = est.predict(X_test) + # Monotonic increase constraint + y_incr = est.predict(X_test_incr) + # y_incr should always be greater than y + assert np.all(y_incr >= y) + + # Monotonic decrease constraint + y_decr = est.predict(X_test_decr) + # y_decr should always be lower than y + assert np.all(y_decr <= y) + + +@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES) +def test_multiclass_raises(TreeClassifier): + X, y = make_classification( + n_samples=100, n_features=5, n_classes=3, n_informative=3, random_state=0 + ) + y[0] = 0 + monotonic_cst = np.zeros(X.shape[1]) + monotonic_cst[0] = -1 + monotonic_cst[1] = 1 + est = TreeClassifier(max_depth=None, monotonic_cst=monotonic_cst, random_state=0) + + msg = "Monotonicity constraints are not supported with multiclass classification" + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + +@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES) +def test_multiple_output_raises(TreeClassifier): + X = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]] + y = [[1, 0, 1, 0, 1], [1, 0, 1, 0, 1]] + + est = TreeClassifier( + max_depth=None, monotonic_cst=np.array([-1, 1]), random_state=0 + ) + msg = "Monotonicity constraints are not supported with multiple output" + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + +@pytest.mark.parametrize( + "DecisionTreeEstimator", [DecisionTreeClassifier, DecisionTreeRegressor] +) +def test_missing_values_raises(DecisionTreeEstimator): + X, y = make_classification( + n_samples=100, n_features=5, n_classes=2, n_informative=3, random_state=0 + ) + X[0, 0] = np.nan + monotonic_cst = np.zeros(X.shape[1]) + monotonic_cst[0] = 1 + est = DecisionTreeEstimator( + max_depth=None, monotonic_cst=monotonic_cst, random_state=0 + ) + + msg = "Input X contains NaN" + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + +@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES) +def test_bad_monotonic_cst_raises(TreeClassifier): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + y = [1, 0, 1, 0, 1] + + msg = "monotonic_cst has shape 3 but the input data X has 2 features." + est = TreeClassifier( + max_depth=None, monotonic_cst=np.array([-1, 1, 0]), random_state=0 + ) + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + msg = "monotonic_cst must be None or an array-like of -1, 0 or 1." + est = TreeClassifier( + max_depth=None, monotonic_cst=np.array([-2, 2]), random_state=0 + ) + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + est = TreeClassifier( + max_depth=None, monotonic_cst=np.array([-1, 0.8]), random_state=0 + ) + with pytest.raises(ValueError, match=msg + "(.*)0.8]"): + est.fit(X, y) + + +def assert_1d_reg_tree_children_monotonic_bounded(tree_, monotonic_sign): + values = tree_.value + for i in range(tree_.node_count): + if tree_.children_left[i] > i and tree_.children_right[i] > i: + # Check monotonicity on children + i_left = tree_.children_left[i] + i_right = tree_.children_right[i] + if monotonic_sign == 1: + assert values[i_left] <= values[i_right] + elif monotonic_sign == -1: + assert values[i_left] >= values[i_right] + val_middle = (values[i_left] + values[i_right]) / 2 + # Check bounds on grand-children, filtering out leaf nodes + if tree_.feature[i_left] >= 0: + i_left_right = tree_.children_right[i_left] + if monotonic_sign == 1: + assert values[i_left_right] <= val_middle + elif monotonic_sign == -1: + assert values[i_left_right] >= val_middle + if tree_.feature[i_right] >= 0: + i_right_left = tree_.children_left[i_right] + if monotonic_sign == 1: + assert val_middle <= values[i_right_left] + elif monotonic_sign == -1: + assert val_middle >= values[i_right_left] + + +def test_assert_1d_reg_tree_children_monotonic_bounded(): + X = np.linspace(-1, 1, 7).reshape(-1, 1) + y = np.sin(2 * np.pi * X.ravel()) + + reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, y) + + with pytest.raises(AssertionError): + assert_1d_reg_tree_children_monotonic_bounded(reg.tree_, 1) + + with pytest.raises(AssertionError): + assert_1d_reg_tree_children_monotonic_bounded(reg.tree_, -1) + + +def assert_1d_reg_monotonic(clf, monotonic_sign, min_x, max_x, n_steps): + X_grid = np.linspace(min_x, max_x, n_steps).reshape(-1, 1) + y_pred_grid = clf.predict(X_grid) + if monotonic_sign == 1: + assert (np.diff(y_pred_grid) >= 0.0).all() + elif monotonic_sign == -1: + assert (np.diff(y_pred_grid) <= 0.0).all() + + +@pytest.mark.parametrize("TreeRegressor", TREE_REGRESSOR_CLASSES) +def test_1d_opposite_monotonicity_cst_data(TreeRegressor): + # Check that positive monotonic data with negative monotonic constraint + # yield constant predictions, equal to the average of target values + X = np.linspace(-2, 2, 10).reshape(-1, 1) + y = X.ravel() + clf = TreeRegressor(monotonic_cst=[-1]) + clf.fit(X, y) + assert clf.tree_.node_count == 1 + assert clf.tree_.value[0] == 0.0 + + # Swap monotonicity + clf = TreeRegressor(monotonic_cst=[1]) + clf.fit(X, -y) + assert clf.tree_.node_count == 1 + assert clf.tree_.value[0] == 0.0 + + +@pytest.mark.parametrize("TreeRegressor", TREE_REGRESSOR_CLASSES) +@pytest.mark.parametrize("monotonic_sign", (-1, 1)) +@pytest.mark.parametrize("depth_first_builder", (True, False)) +@pytest.mark.parametrize("criterion", ("absolute_error", "squared_error")) +def test_1d_tree_nodes_values( + TreeRegressor, monotonic_sign, depth_first_builder, criterion, global_random_seed +): + # Adaptation from test_nodes_values in test_monotonic_constraints.py + # in sklearn.ensemble._hist_gradient_boosting + # Build a single tree with only one feature, and make sure the node + # values respect the monotonicity constraints. + + # Considering the following tree with a monotonic +1 constraint, we + # should have: + # + # root + # / \ + # a b + # / \ / \ + # c d e f + # + # a <= root <= b + # c <= d <= (a + b) / 2 <= e <= f + + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + n_features = 1 + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + if depth_first_builder: + # No max_leaf_nodes, default depth first tree builder + clf = TreeRegressor( + monotonic_cst=[monotonic_sign], + criterion=criterion, + random_state=global_random_seed, + ) + else: + # max_leaf_nodes triggers best first tree builder + clf = TreeRegressor( + monotonic_cst=[monotonic_sign], + max_leaf_nodes=n_samples, + criterion=criterion, + random_state=global_random_seed, + ) + clf.fit(X, y) + + assert_1d_reg_tree_children_monotonic_bounded(clf.tree_, monotonic_sign) + assert_1d_reg_monotonic(clf, monotonic_sign, np.min(X), np.max(X), 100) + + +def assert_nd_reg_tree_children_monotonic_bounded(tree_, monotonic_cst): + upper_bound = np.full(tree_.node_count, np.inf) + lower_bound = np.full(tree_.node_count, -np.inf) + for i in range(tree_.node_count): + feature = tree_.feature[i] + node_value = tree_.value[i][0][0] # unpack value from nx1x1 array + # While building the tree, the computed middle value is slightly + # different from the average of the siblings values, because + # sum_right / weighted_n_right + # is slightly different from the value of the right sibling. + # This can cause a discrepancy up to numerical noise when clipping, + # which is resolved by comparing with some loss of precision. + assert np.float32(node_value) <= np.float32(upper_bound[i]) + assert np.float32(node_value) >= np.float32(lower_bound[i]) + + if feature < 0: + # Leaf: nothing to do + continue + + # Split node: check and update bounds for the children. + i_left = tree_.children_left[i] + i_right = tree_.children_right[i] + # unpack value from nx1x1 array + middle_value = (tree_.value[i_left][0][0] + tree_.value[i_right][0][0]) / 2 + + if monotonic_cst[feature] == 0: + # Feature without monotonicity constraint: propagate bounds + # down the tree to both children. + # Otherwise, with 2 features and a monotonic increase constraint + # (encoded by +1) on feature 0, the following tree can be accepted, + # although it does not respect the monotonic increase constraint: + # + # X[0] <= 0 + # value = 100 + # / \ + # X[0] <= -1 X[1] <= 0 + # value = 50 value = 150 + # / \ / \ + # leaf leaf leaf leaf + # value = 25 value = 75 value = 50 value = 250 + + lower_bound[i_left] = lower_bound[i] + upper_bound[i_left] = upper_bound[i] + lower_bound[i_right] = lower_bound[i] + upper_bound[i_right] = upper_bound[i] + + elif monotonic_cst[feature] == 1: + # Feature with constraint: check monotonicity + assert tree_.value[i_left] <= tree_.value[i_right] + + # Propagate bounds down the tree to both children. + lower_bound[i_left] = lower_bound[i] + upper_bound[i_left] = middle_value + lower_bound[i_right] = middle_value + upper_bound[i_right] = upper_bound[i] + + elif monotonic_cst[feature] == -1: + # Feature with constraint: check monotonicity + assert tree_.value[i_left] >= tree_.value[i_right] + + # Update and propagate bounds down the tree to both children. + lower_bound[i_left] = middle_value + upper_bound[i_left] = upper_bound[i] + lower_bound[i_right] = lower_bound[i] + upper_bound[i_right] = middle_value + + else: # pragma: no cover + raise ValueError(f"monotonic_cst[{feature}]={monotonic_cst[feature]}") + + +def test_assert_nd_reg_tree_children_monotonic_bounded(): + # Check that assert_nd_reg_tree_children_monotonic_bounded can detect + # non-monotonic tree predictions. + X = np.linspace(0, 2 * np.pi, 30).reshape(-1, 1) + y = np.sin(X).ravel() + reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, y) + + with pytest.raises(AssertionError): + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [1]) + + with pytest.raises(AssertionError): + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [-1]) + + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [0]) + + # Check that assert_nd_reg_tree_children_monotonic_bounded raises + # when the data (and therefore the model) is naturally monotonic in the + # opposite direction. + X = np.linspace(-5, 5, 5).reshape(-1, 1) + y = X.ravel() ** 3 + reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, y) + + with pytest.raises(AssertionError): + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [-1]) + + # For completeness, check that the converse holds when swapping the sign. + reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, -y) + + with pytest.raises(AssertionError): + assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [1]) + + +@pytest.mark.parametrize("TreeRegressor", TREE_REGRESSOR_CLASSES) +@pytest.mark.parametrize("monotonic_sign", (-1, 1)) +@pytest.mark.parametrize("depth_first_builder", (True, False)) +@pytest.mark.parametrize("criterion", ("absolute_error", "squared_error")) +def test_nd_tree_nodes_values( + TreeRegressor, monotonic_sign, depth_first_builder, criterion, global_random_seed +): + # Build tree with several features, and make sure the nodes + # values respect the monotonicity constraints. + + # Considering the following tree with a monotonic increase constraint on X[0], + # we should have: + # + # root + # X[0]<=t + # / \ + # a b + # X[0]<=u X[1]<=v + # / \ / \ + # c d e f + # + # i) a <= root <= b + # ii) c <= a <= d <= (a+b)/2 + # iii) (a+b)/2 <= min(e,f) + # For iii) we check that each node value is within the proper lower and + # upper bounds. + + rng = np.random.RandomState(global_random_seed) + n_samples = 1000 + n_features = 2 + monotonic_cst = [monotonic_sign, 0] + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + if depth_first_builder: + # No max_leaf_nodes, default depth first tree builder + clf = TreeRegressor( + monotonic_cst=monotonic_cst, + criterion=criterion, + random_state=global_random_seed, + ) + else: + # max_leaf_nodes triggers best first tree builder + clf = TreeRegressor( + monotonic_cst=monotonic_cst, + max_leaf_nodes=n_samples, + criterion=criterion, + random_state=global_random_seed, + ) + clf.fit(X, y) + assert_nd_reg_tree_children_monotonic_bounded(clf.tree_, monotonic_cst) diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_reingold_tilford.py b/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_reingold_tilford.py new file mode 100644 index 0000000000000000000000000000000000000000..bf0ce3ce2cffc2792db28858bff69acb8eb4d45a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_reingold_tilford.py @@ -0,0 +1,49 @@ +import numpy as np +import pytest + +from sklearn.tree._reingold_tilford import Tree, buchheim + +simple_tree = Tree("", 0, Tree("", 1), Tree("", 2)) + +bigger_tree = Tree( + "", + 0, + Tree( + "", + 1, + Tree("", 3), + Tree("", 4, Tree("", 7), Tree("", 8)), + ), + Tree("", 2, Tree("", 5), Tree("", 6)), +) + + +@pytest.mark.parametrize("tree, n_nodes", [(simple_tree, 3), (bigger_tree, 9)]) +def test_buchheim(tree, n_nodes): + def walk_tree(draw_tree): + res = [(draw_tree.x, draw_tree.y)] + for child in draw_tree.children: + # parents higher than children: + assert child.y == draw_tree.y + 1 + res.extend(walk_tree(child)) + if len(draw_tree.children): + # these trees are always binary + # parents are centered above children + assert ( + draw_tree.x == (draw_tree.children[0].x + draw_tree.children[1].x) / 2 + ) + return res + + layout = buchheim(tree) + coordinates = walk_tree(layout) + assert len(coordinates) == n_nodes + # test that x values are unique per depth / level + # we could also do it quicker using defaultdicts.. + depth = 0 + while True: + x_at_this_depth = [node[0] for node in coordinates if node[1] == depth] + if not x_at_this_depth: + # reached all leafs + break + assert len(np.unique(x_at_this_depth)) == len(x_at_this_depth) + depth += 1 diff --git a/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_tree.py b/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..000e684d6a488d459993508188008a38cdfd40d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tree/tests/test_tree.py @@ -0,0 +1,2717 @@ +""" +Testing for the tree module (sklearn.tree). +""" + +import copy +import copyreg +import io +import pickle +import struct +from itertools import chain, product + +import joblib +import numpy as np +import pytest +from joblib.numpy_pickle import NumpyPickler +from numpy.testing import assert_allclose + +from sklearn import clone, datasets, tree +from sklearn.dummy import DummyRegressor +from sklearn.exceptions import NotFittedError +from sklearn.impute import SimpleImputer +from sklearn.metrics import accuracy_score, mean_poisson_deviance, mean_squared_error +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.random_projection import _sparse_random_matrix +from sklearn.tree import ( + DecisionTreeClassifier, + DecisionTreeRegressor, + ExtraTreeClassifier, + ExtraTreeRegressor, +) +from sklearn.tree._classes import ( + CRITERIA_CLF, + CRITERIA_REG, + DENSE_SPLITTERS, + SPARSE_SPLITTERS, +) +from sklearn.tree._tree import ( + NODE_DTYPE, + TREE_LEAF, + TREE_UNDEFINED, + _check_n_classes, + _check_node_ndarray, + _check_value_ndarray, +) +from sklearn.tree._tree import Tree as CythonTree +from sklearn.utils import _IS_32BIT, compute_sample_weight +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + create_memmap_backed_data, + ignore_warnings, + skip_if_32bit, +) +from sklearn.utils.estimator_checks import check_sample_weights_invariance +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.validation import check_random_state + +CLF_CRITERIONS = ("gini", "log_loss") +REG_CRITERIONS = ("squared_error", "absolute_error", "friedman_mse", "poisson") + +CLF_TREES = { + "DecisionTreeClassifier": DecisionTreeClassifier, + "ExtraTreeClassifier": ExtraTreeClassifier, +} + +REG_TREES = { + "DecisionTreeRegressor": DecisionTreeRegressor, + "ExtraTreeRegressor": ExtraTreeRegressor, +} + +ALL_TREES: dict = dict() +ALL_TREES.update(CLF_TREES) +ALL_TREES.update(REG_TREES) + +SPARSE_TREES = [ + "DecisionTreeClassifier", + "DecisionTreeRegressor", + "ExtraTreeClassifier", + "ExtraTreeRegressor", +] + + +X_small = np.array( + [ + [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0], + [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1], + [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1], + [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1], + [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1], + [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1], + [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1], + [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1], + [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1], + [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0], + [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0], + [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0], + [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0], + [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0], + [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1], + [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1], + [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1], + [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1], + [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1], + [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1], + [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1], + [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1], + [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0], + ] +) + +y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] +y_small_reg = [ + 1.0, + 2.1, + 1.2, + 0.05, + 10, + 2.4, + 3.1, + 1.01, + 0.01, + 2.98, + 3.1, + 1.1, + 0.0, + 1.2, + 2, + 11, + 0, + 0, + 4.5, + 0.201, + 1.06, + 0.9, + 0, +] + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +true_result = [-1, 1, 1] + +# also load the iris dataset +# and randomly permute it +iris = datasets.load_iris() +rng = np.random.RandomState(1) +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + +# also load the diabetes dataset +# and randomly permute it +diabetes = datasets.load_diabetes() +perm = rng.permutation(diabetes.target.size) +diabetes.data = diabetes.data[perm] +diabetes.target = diabetes.target[perm] + +digits = datasets.load_digits() +perm = rng.permutation(digits.target.size) +digits.data = digits.data[perm] +digits.target = digits.target[perm] + +random_state = check_random_state(0) +X_multilabel, y_multilabel = datasets.make_multilabel_classification( + random_state=0, n_samples=30, n_features=10 +) + +# NB: despite their names X_sparse_* are numpy arrays (and not sparse matrices) +X_sparse_pos = random_state.uniform(size=(20, 5)) +X_sparse_pos[X_sparse_pos <= 0.8] = 0.0 +y_random = random_state.randint(0, 4, size=(20,)) +X_sparse_mix = _sparse_random_matrix(20, 10, density=0.25, random_state=0).toarray() + + +DATASETS = { + "iris": {"X": iris.data, "y": iris.target}, + "diabetes": {"X": diabetes.data, "y": diabetes.target}, + "digits": {"X": digits.data, "y": digits.target}, + "toy": {"X": X, "y": y}, + "clf_small": {"X": X_small, "y": y_small}, + "reg_small": {"X": X_small, "y": y_small_reg}, + "multilabel": {"X": X_multilabel, "y": y_multilabel}, + "sparse-pos": {"X": X_sparse_pos, "y": y_random}, + "sparse-neg": {"X": -X_sparse_pos, "y": y_random}, + "sparse-mix": {"X": X_sparse_mix, "y": y_random}, + "zeros": {"X": np.zeros((20, 3)), "y": y_random}, +} + + +def assert_tree_equal(d, s, message): + assert ( + s.node_count == d.node_count + ), "{0}: inequal number of node ({1} != {2})".format( + message, s.node_count, d.node_count + ) + + assert_array_equal( + d.children_right, s.children_right, message + ": inequal children_right" + ) + assert_array_equal( + d.children_left, s.children_left, message + ": inequal children_left" + ) + + external = d.children_right == TREE_LEAF + internal = np.logical_not(external) + + assert_array_equal( + d.feature[internal], s.feature[internal], message + ": inequal features" + ) + assert_array_equal( + d.threshold[internal], s.threshold[internal], message + ": inequal threshold" + ) + assert_array_equal( + d.n_node_samples.sum(), + s.n_node_samples.sum(), + message + ": inequal sum(n_node_samples)", + ) + assert_array_equal( + d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples" + ) + + assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity") + + assert_array_almost_equal( + d.value[external], s.value[external], err_msg=message + ": inequal value" + ) + + +def test_classification_toy(): + # Check classification on a toy dataset. + for name, Tree in CLF_TREES.items(): + clf = Tree(random_state=0) + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) + + clf = Tree(max_features=1, random_state=1) + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) + + +def test_weighted_classification_toy(): + # Check classification on a weighted toy dataset. + for name, Tree in CLF_TREES.items(): + clf = Tree(random_state=0) + + clf.fit(X, y, sample_weight=np.ones(len(X))) + assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) + + clf.fit(X, y, sample_weight=np.full(len(X), 0.5)) + assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) + + +@pytest.mark.parametrize("Tree", REG_TREES.values()) +@pytest.mark.parametrize("criterion", REG_CRITERIONS) +def test_regression_toy(Tree, criterion): + # Check regression on a toy dataset. + if criterion == "poisson": + # make target positive while not touching the original y and + # true_result + a = np.abs(np.min(y)) + 1 + y_train = np.array(y) + a + y_test = np.array(true_result) + a + else: + y_train = y + y_test = true_result + + reg = Tree(criterion=criterion, random_state=1) + reg.fit(X, y_train) + assert_allclose(reg.predict(T), y_test) + + clf = Tree(criterion=criterion, max_features=1, random_state=1) + clf.fit(X, y_train) + assert_allclose(reg.predict(T), y_test) + + +def test_xor(): + # Check on a XOR problem + y = np.zeros((10, 10)) + y[:5, :5] = 1 + y[5:, 5:] = 1 + + gridx, gridy = np.indices(y.shape) + + X = np.vstack([gridx.ravel(), gridy.ravel()]).T + y = y.ravel() + + for name, Tree in CLF_TREES.items(): + clf = Tree(random_state=0) + clf.fit(X, y) + assert clf.score(X, y) == 1.0, "Failed with {0}".format(name) + + clf = Tree(random_state=0, max_features=1) + clf.fit(X, y) + assert clf.score(X, y) == 1.0, "Failed with {0}".format(name) + + +def test_iris(): + # Check consistency on dataset iris. + for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS): + clf = Tree(criterion=criterion, random_state=0) + clf.fit(iris.data, iris.target) + score = accuracy_score(clf.predict(iris.data), iris.target) + assert score > 0.9, "Failed with {0}, criterion = {1} and score = {2}".format( + name, criterion, score + ) + + clf = Tree(criterion=criterion, max_features=2, random_state=0) + clf.fit(iris.data, iris.target) + score = accuracy_score(clf.predict(iris.data), iris.target) + assert score > 0.5, "Failed with {0}, criterion = {1} and score = {2}".format( + name, criterion, score + ) + + +@pytest.mark.parametrize("name, Tree", REG_TREES.items()) +@pytest.mark.parametrize("criterion", REG_CRITERIONS) +def test_diabetes_overfit(name, Tree, criterion): + # check consistency of overfitted trees on the diabetes dataset + # since the trees will overfit, we expect an MSE of 0 + reg = Tree(criterion=criterion, random_state=0) + reg.fit(diabetes.data, diabetes.target) + score = mean_squared_error(diabetes.target, reg.predict(diabetes.data)) + assert score == pytest.approx( + 0 + ), f"Failed with {name}, criterion = {criterion} and score = {score}" + + +@skip_if_32bit +@pytest.mark.parametrize("name, Tree", REG_TREES.items()) +@pytest.mark.parametrize( + "criterion, max_depth, metric, max_loss", + [ + ("squared_error", 15, mean_squared_error, 60), + ("absolute_error", 20, mean_squared_error, 60), + ("friedman_mse", 15, mean_squared_error, 60), + ("poisson", 15, mean_poisson_deviance, 30), + ], +) +def test_diabetes_underfit(name, Tree, criterion, max_depth, metric, max_loss): + # check consistency of trees when the depth and the number of features are + # limited + + reg = Tree(criterion=criterion, max_depth=max_depth, max_features=6, random_state=0) + reg.fit(diabetes.data, diabetes.target) + loss = metric(diabetes.target, reg.predict(diabetes.data)) + assert 0 < loss < max_loss + + +def test_probability(): + # Predict probabilities using DecisionTreeClassifier. + + for name, Tree in CLF_TREES.items(): + clf = Tree(max_depth=1, max_features=1, random_state=42) + clf.fit(iris.data, iris.target) + + prob_predict = clf.predict_proba(iris.data) + assert_array_almost_equal( + np.sum(prob_predict, 1), + np.ones(iris.data.shape[0]), + err_msg="Failed with {0}".format(name), + ) + assert_array_equal( + np.argmax(prob_predict, 1), + clf.predict(iris.data), + err_msg="Failed with {0}".format(name), + ) + assert_almost_equal( + clf.predict_proba(iris.data), + np.exp(clf.predict_log_proba(iris.data)), + 8, + err_msg="Failed with {0}".format(name), + ) + + +def test_arrayrepr(): + # Check the array representation. + # Check resize + X = np.arange(10000)[:, np.newaxis] + y = np.arange(10000) + + for name, Tree in REG_TREES.items(): + reg = Tree(max_depth=None, random_state=0) + reg.fit(X, y) + + +def test_pure_set(): + # Check when y is pure. + X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] + y = [1, 1, 1, 1, 1, 1] + + for name, TreeClassifier in CLF_TREES.items(): + clf = TreeClassifier(random_state=0) + clf.fit(X, y) + assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) + + for name, TreeRegressor in REG_TREES.items(): + reg = TreeRegressor(random_state=0) + reg.fit(X, y) + assert_almost_equal(reg.predict(X), y, err_msg="Failed with {0}".format(name)) + + +def test_numerical_stability(): + # Check numerical stability. + X = np.array( + [ + [152.08097839, 140.40744019, 129.75102234, 159.90493774], + [142.50700378, 135.81935120, 117.82884979, 162.75781250], + [127.28772736, 140.40744019, 129.75102234, 159.90493774], + [132.37025452, 143.71923828, 138.35694885, 157.84558105], + [103.10237122, 143.71928406, 138.35696411, 157.84559631], + [127.71276855, 143.71923828, 138.35694885, 157.84558105], + [120.91514587, 140.40744019, 129.75102234, 159.90493774], + ] + ) + + y = np.array([1.0, 0.70209277, 0.53896582, 0.0, 0.90914464, 0.48026916, 0.49622521]) + + with np.errstate(all="raise"): + for name, Tree in REG_TREES.items(): + reg = Tree(random_state=0) + reg.fit(X, y) + reg.fit(X, -y) + reg.fit(-X, y) + reg.fit(-X, -y) + + +def test_importances(): + # Check variable importances. + X, y = datasets.make_classification( + n_samples=5000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + for name, Tree in CLF_TREES.items(): + clf = Tree(random_state=0) + + clf.fit(X, y) + importances = clf.feature_importances_ + n_important = np.sum(importances > 0.1) + + assert importances.shape[0] == 10, "Failed with {0}".format(name) + assert n_important == 3, "Failed with {0}".format(name) + + # Check on iris that importances are the same for all builders + clf = DecisionTreeClassifier(random_state=0) + clf.fit(iris.data, iris.target) + clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data)) + clf2.fit(iris.data, iris.target) + + assert_array_equal(clf.feature_importances_, clf2.feature_importances_) + + +def test_importances_raises(): + # Check if variable importance before fit raises ValueError. + clf = DecisionTreeClassifier() + with pytest.raises(ValueError): + getattr(clf, "feature_importances_") + + +def test_importances_gini_equal_squared_error(): + # Check that gini is equivalent to squared_error for binary output variable + + X, y = datasets.make_classification( + n_samples=2000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, + ) + + # The gini index and the mean square error (variance) might differ due + # to numerical instability. Since those instabilities mainly occurs at + # high tree depth, we restrict this maximal depth. + clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit( + X, y + ) + reg = DecisionTreeRegressor( + criterion="squared_error", max_depth=5, random_state=0 + ).fit(X, y) + + assert_almost_equal(clf.feature_importances_, reg.feature_importances_) + assert_array_equal(clf.tree_.feature, reg.tree_.feature) + assert_array_equal(clf.tree_.children_left, reg.tree_.children_left) + assert_array_equal(clf.tree_.children_right, reg.tree_.children_right) + assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples) + + +def test_max_features(): + # Check max_features. + for name, TreeEstimator in ALL_TREES.items(): + est = TreeEstimator(max_features="sqrt") + est.fit(iris.data, iris.target) + assert est.max_features_ == int(np.sqrt(iris.data.shape[1])) + + est = TreeEstimator(max_features="log2") + est.fit(iris.data, iris.target) + assert est.max_features_ == int(np.log2(iris.data.shape[1])) + + est = TreeEstimator(max_features=1) + est.fit(iris.data, iris.target) + assert est.max_features_ == 1 + + est = TreeEstimator(max_features=3) + est.fit(iris.data, iris.target) + assert est.max_features_ == 3 + + est = TreeEstimator(max_features=0.01) + est.fit(iris.data, iris.target) + assert est.max_features_ == 1 + + est = TreeEstimator(max_features=0.5) + est.fit(iris.data, iris.target) + assert est.max_features_ == int(0.5 * iris.data.shape[1]) + + est = TreeEstimator(max_features=1.0) + est.fit(iris.data, iris.target) + assert est.max_features_ == iris.data.shape[1] + + est = TreeEstimator(max_features=None) + est.fit(iris.data, iris.target) + assert est.max_features_ == iris.data.shape[1] + + +def test_error(): + # Test that it gives proper exception on deficient input. + for name, TreeEstimator in CLF_TREES.items(): + # predict before fit + est = TreeEstimator() + with pytest.raises(NotFittedError): + est.predict_proba(X) + + est.fit(X, y) + X2 = [[-2, -1, 1]] # wrong feature shape for sample + with pytest.raises(ValueError): + est.predict_proba(X2) + + # Wrong dimensions + est = TreeEstimator() + y2 = y[:-1] + with pytest.raises(ValueError): + est.fit(X, y2) + + # Test with arrays that are non-contiguous. + Xf = np.asfortranarray(X) + est = TreeEstimator() + est.fit(Xf, y) + assert_almost_equal(est.predict(T), true_result) + + # predict before fitting + est = TreeEstimator() + with pytest.raises(NotFittedError): + est.predict(T) + + # predict on vector with different dims + est.fit(X, y) + t = np.asarray(T) + with pytest.raises(ValueError): + est.predict(t[:, 1:]) + + # wrong sample shape + Xt = np.array(X).T + + est = TreeEstimator() + est.fit(np.dot(X, Xt), y) + with pytest.raises(ValueError): + est.predict(X) + with pytest.raises(ValueError): + est.apply(X) + + clf = TreeEstimator() + clf.fit(X, y) + with pytest.raises(ValueError): + clf.predict(Xt) + with pytest.raises(ValueError): + clf.apply(Xt) + + # apply before fitting + est = TreeEstimator() + with pytest.raises(NotFittedError): + est.apply(T) + + # non positive target for Poisson splitting Criterion + est = DecisionTreeRegressor(criterion="poisson") + with pytest.raises(ValueError, match="y is not positive.*Poisson"): + est.fit([[0, 1, 2]], [0, 0, 0]) + with pytest.raises(ValueError, match="Some.*y are negative.*Poisson"): + est.fit([[0, 1, 2]], [5, -0.1, 2]) + + +def test_min_samples_split(): + """Test min_samples_split parameter""" + X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE) + y = iris.target + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): + TreeEstimator = ALL_TREES[name] + + # test for integer parameter + est = TreeEstimator( + min_samples_split=10, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + # count samples on nodes, -1 means it is a leaf + node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] + + assert np.min(node_samples) > 9, "Failed with {0}".format(name) + + # test for float parameter + est = TreeEstimator( + min_samples_split=0.2, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + # count samples on nodes, -1 means it is a leaf + node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] + + assert np.min(node_samples) > 9, "Failed with {0}".format(name) + + +def test_min_samples_leaf(): + # Test if leaves contain more than leaf_count training examples + X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE) + y = iris.target + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): + TreeEstimator = ALL_TREES[name] + + # test integer parameter + est = TreeEstimator( + min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + out = est.tree_.apply(X) + node_counts = np.bincount(out) + # drop inner nodes + leaf_count = node_counts[node_counts != 0] + assert np.min(leaf_count) > 4, "Failed with {0}".format(name) + + # test float parameter + est = TreeEstimator( + min_samples_leaf=0.1, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + out = est.tree_.apply(X) + node_counts = np.bincount(out) + # drop inner nodes + leaf_count = node_counts[node_counts != 0] + assert np.min(leaf_count) > 4, "Failed with {0}".format(name) + + +def check_min_weight_fraction_leaf(name, datasets, sparse_container=None): + """Test if leaves contain at least min_weight_fraction_leaf of the + training set""" + X = DATASETS[datasets]["X"].astype(np.float32) + if sparse_container is not None: + X = sparse_container(X) + y = DATASETS[datasets]["y"] + + weights = rng.rand(X.shape[0]) + total_weight = np.sum(weights) + + TreeEstimator = ALL_TREES[name] + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): + est = TreeEstimator( + min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y, sample_weight=weights) + + if sparse_container is not None: + out = est.tree_.apply(X.tocsr()) + else: + out = est.tree_.apply(X) + + node_weights = np.bincount(out, weights=weights) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert ( + np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf + ), "Failed with {0} min_weight_fraction_leaf={1}".format( + name, est.min_weight_fraction_leaf + ) + + # test case with no weights passed in + total_weight = X.shape[0] + + for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): + est = TreeEstimator( + min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0 + ) + est.fit(X, y) + + if sparse_container is not None: + out = est.tree_.apply(X.tocsr()) + else: + out = est.tree_.apply(X) + + node_weights = np.bincount(out) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert ( + np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf + ), "Failed with {0} min_weight_fraction_leaf={1}".format( + name, est.min_weight_fraction_leaf + ) + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_min_weight_fraction_leaf_on_dense_input(name): + check_min_weight_fraction_leaf(name, "iris") + + +@pytest.mark.parametrize("name", SPARSE_TREES) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_min_weight_fraction_leaf_on_sparse_input(name, csc_container): + check_min_weight_fraction_leaf(name, "multilabel", sparse_container=csc_container) + + +def check_min_weight_fraction_leaf_with_min_samples_leaf( + name, datasets, sparse_container=None +): + """Test the interaction between min_weight_fraction_leaf and + min_samples_leaf when sample_weights is not provided in fit.""" + X = DATASETS[datasets]["X"].astype(np.float32) + if sparse_container is not None: + X = sparse_container(X) + y = DATASETS[datasets]["y"] + + total_weight = X.shape[0] + TreeEstimator = ALL_TREES[name] + for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)): + # test integer min_samples_leaf + est = TreeEstimator( + min_weight_fraction_leaf=frac, + max_leaf_nodes=max_leaf_nodes, + min_samples_leaf=5, + random_state=0, + ) + est.fit(X, y) + + if sparse_container is not None: + out = est.tree_.apply(X.tocsr()) + else: + out = est.tree_.apply(X) + + node_weights = np.bincount(out) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert np.min(leaf_weights) >= max( + (total_weight * est.min_weight_fraction_leaf), 5 + ), "Failed with {0} min_weight_fraction_leaf={1}, min_samples_leaf={2}".format( + name, est.min_weight_fraction_leaf, est.min_samples_leaf + ) + for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)): + # test float min_samples_leaf + est = TreeEstimator( + min_weight_fraction_leaf=frac, + max_leaf_nodes=max_leaf_nodes, + min_samples_leaf=0.1, + random_state=0, + ) + est.fit(X, y) + + if sparse_container is not None: + out = est.tree_.apply(X.tocsr()) + else: + out = est.tree_.apply(X) + + node_weights = np.bincount(out) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert np.min(leaf_weights) >= max( + (total_weight * est.min_weight_fraction_leaf), + (total_weight * est.min_samples_leaf), + ), "Failed with {0} min_weight_fraction_leaf={1}, min_samples_leaf={2}".format( + name, est.min_weight_fraction_leaf, est.min_samples_leaf + ) + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_min_weight_fraction_leaf_with_min_samples_leaf_on_dense_input(name): + check_min_weight_fraction_leaf_with_min_samples_leaf(name, "iris") + + +@pytest.mark.parametrize("name", SPARSE_TREES) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_min_weight_fraction_leaf_with_min_samples_leaf_on_sparse_input( + name, csc_container +): + check_min_weight_fraction_leaf_with_min_samples_leaf( + name, "multilabel", sparse_container=csc_container + ) + + +def test_min_impurity_decrease(global_random_seed): + # test if min_impurity_decrease ensure that a split is made only if + # if the impurity decrease is at least that value + X, y = datasets.make_classification(n_samples=100, random_state=global_random_seed) + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): + TreeEstimator = ALL_TREES[name] + + # Check default value of min_impurity_decrease, 1e-7 + est1 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0) + # Check with explicit value of 0.05 + est2 = TreeEstimator( + max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.05, random_state=0 + ) + # Check with a much lower value of 0.0001 + est3 = TreeEstimator( + max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.0001, random_state=0 + ) + # Check with a much lower value of 0.1 + est4 = TreeEstimator( + max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.1, random_state=0 + ) + + for est, expected_decrease in ( + (est1, 1e-7), + (est2, 0.05), + (est3, 0.0001), + (est4, 0.1), + ): + assert ( + est.min_impurity_decrease <= expected_decrease + ), "Failed, min_impurity_decrease = {0} > {1}".format( + est.min_impurity_decrease, expected_decrease + ) + est.fit(X, y) + for node in range(est.tree_.node_count): + # If current node is a not leaf node, check if the split was + # justified w.r.t the min_impurity_decrease + if est.tree_.children_left[node] != TREE_LEAF: + imp_parent = est.tree_.impurity[node] + wtd_n_node = est.tree_.weighted_n_node_samples[node] + + left = est.tree_.children_left[node] + wtd_n_left = est.tree_.weighted_n_node_samples[left] + imp_left = est.tree_.impurity[left] + wtd_imp_left = wtd_n_left * imp_left + + right = est.tree_.children_right[node] + wtd_n_right = est.tree_.weighted_n_node_samples[right] + imp_right = est.tree_.impurity[right] + wtd_imp_right = wtd_n_right * imp_right + + wtd_avg_left_right_imp = wtd_imp_right + wtd_imp_left + wtd_avg_left_right_imp /= wtd_n_node + + fractional_node_weight = ( + est.tree_.weighted_n_node_samples[node] / X.shape[0] + ) + + actual_decrease = fractional_node_weight * ( + imp_parent - wtd_avg_left_right_imp + ) + + assert ( + actual_decrease >= expected_decrease + ), "Failed with {0} expected min_impurity_decrease={1}".format( + actual_decrease, expected_decrease + ) + + +def test_pickle(): + """Test pickling preserves Tree properties and performance.""" + for name, TreeEstimator in ALL_TREES.items(): + if "Classifier" in name: + X, y = iris.data, iris.target + else: + X, y = diabetes.data, diabetes.target + + est = TreeEstimator(random_state=0) + est.fit(X, y) + score = est.score(X, y) + + # test that all class properties are maintained + attributes = [ + "max_depth", + "node_count", + "capacity", + "n_classes", + "children_left", + "children_right", + "n_leaves", + "feature", + "threshold", + "impurity", + "n_node_samples", + "weighted_n_node_samples", + "value", + ] + fitted_attribute = { + attribute: getattr(est.tree_, attribute) for attribute in attributes + } + + serialized_object = pickle.dumps(est) + est2 = pickle.loads(serialized_object) + assert type(est2) == est.__class__ + + score2 = est2.score(X, y) + assert ( + score == score2 + ), "Failed to generate same score after pickling with {0}".format(name) + for attribute in fitted_attribute: + assert_array_equal( + getattr(est2.tree_, attribute), + fitted_attribute[attribute], + err_msg=( + f"Failed to generate same attribute {attribute} after pickling with" + f" {name}" + ), + ) + + +def test_multioutput(): + # Check estimators on multi-output problems. + X = [ + [-2, -1], + [-1, -1], + [-1, -2], + [1, 1], + [1, 2], + [2, 1], + [-2, 1], + [-1, 1], + [-1, 2], + [2, -1], + [1, -1], + [1, -2], + ] + + y = [ + [-1, 0], + [-1, 0], + [-1, 0], + [1, 1], + [1, 1], + [1, 1], + [-1, 2], + [-1, 2], + [-1, 2], + [1, 3], + [1, 3], + [1, 3], + ] + + T = [[-1, -1], [1, 1], [-1, 1], [1, -1]] + y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]] + + # toy classification problem + for name, TreeClassifier in CLF_TREES.items(): + clf = TreeClassifier(random_state=0) + y_hat = clf.fit(X, y).predict(T) + assert_array_equal(y_hat, y_true) + assert y_hat.shape == (4, 2) + + proba = clf.predict_proba(T) + assert len(proba) == 2 + assert proba[0].shape == (4, 2) + assert proba[1].shape == (4, 4) + + log_proba = clf.predict_log_proba(T) + assert len(log_proba) == 2 + assert log_proba[0].shape == (4, 2) + assert log_proba[1].shape == (4, 4) + + # toy regression problem + for name, TreeRegressor in REG_TREES.items(): + reg = TreeRegressor(random_state=0) + y_hat = reg.fit(X, y).predict(T) + assert_almost_equal(y_hat, y_true) + assert y_hat.shape == (4, 2) + + +def test_classes_shape(): + # Test that n_classes_ and classes_ have proper shape. + for name, TreeClassifier in CLF_TREES.items(): + # Classification, single output + clf = TreeClassifier(random_state=0) + clf.fit(X, y) + + assert clf.n_classes_ == 2 + assert_array_equal(clf.classes_, [-1, 1]) + + # Classification, multi-output + _y = np.vstack((y, np.array(y) * 2)).T + clf = TreeClassifier(random_state=0) + clf.fit(X, _y) + assert len(clf.n_classes_) == 2 + assert len(clf.classes_) == 2 + assert_array_equal(clf.n_classes_, [2, 2]) + assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) + + +def test_unbalanced_iris(): + # Check class rebalancing. + unbalanced_X = iris.data[:125] + unbalanced_y = iris.target[:125] + sample_weight = compute_sample_weight("balanced", unbalanced_y) + + for name, TreeClassifier in CLF_TREES.items(): + clf = TreeClassifier(random_state=0) + clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight) + assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y) + + +def test_memory_layout(): + # Check that it works no matter the memory layout + for (name, TreeEstimator), dtype in product( + ALL_TREES.items(), [np.float64, np.float32] + ): + est = TreeEstimator(random_state=0) + + # Nothing + X = np.asarray(iris.data, dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # C-order + X = np.asarray(iris.data, order="C", dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # F-order + X = np.asarray(iris.data, order="F", dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # Contiguous + X = np.ascontiguousarray(iris.data, dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # csr + for csr_container in CSR_CONTAINERS: + X = csr_container(iris.data, dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # csc + for csc_container in CSC_CONTAINERS: + X = csc_container(iris.data, dtype=dtype) + y = iris.target + assert_array_equal(est.fit(X, y).predict(X), y) + + # Strided + X = np.asarray(iris.data[::3], dtype=dtype) + y = iris.target[::3] + assert_array_equal(est.fit(X, y).predict(X), y) + + +def test_sample_weight(): + # Check sample weighting. + # Test that zero-weighted samples are not taken into account + X = np.arange(100)[:, np.newaxis] + y = np.ones(100) + y[:50] = 0.0 + + sample_weight = np.ones(100) + sample_weight[y == 0] = 0.0 + + clf = DecisionTreeClassifier(random_state=0) + clf.fit(X, y, sample_weight=sample_weight) + assert_array_equal(clf.predict(X), np.ones(100)) + + # Test that low weighted samples are not taken into account at low depth + X = np.arange(200)[:, np.newaxis] + y = np.zeros(200) + y[50:100] = 1 + y[100:200] = 2 + X[100:200, 0] = 200 + + sample_weight = np.ones(200) + + sample_weight[y == 2] = 0.51 # Samples of class '2' are still weightier + clf = DecisionTreeClassifier(max_depth=1, random_state=0) + clf.fit(X, y, sample_weight=sample_weight) + assert clf.tree_.threshold[0] == 149.5 + + sample_weight[y == 2] = 0.5 # Samples of class '2' are no longer weightier + clf = DecisionTreeClassifier(max_depth=1, random_state=0) + clf.fit(X, y, sample_weight=sample_weight) + assert clf.tree_.threshold[0] == 49.5 # Threshold should have moved + + # Test that sample weighting is the same as having duplicates + X = iris.data + y = iris.target + + duplicates = rng.randint(0, X.shape[0], 100) + + clf = DecisionTreeClassifier(random_state=1) + clf.fit(X[duplicates], y[duplicates]) + + sample_weight = np.bincount(duplicates, minlength=X.shape[0]) + clf2 = DecisionTreeClassifier(random_state=1) + clf2.fit(X, y, sample_weight=sample_weight) + + internal = clf.tree_.children_left != tree._tree.TREE_LEAF + assert_array_almost_equal( + clf.tree_.threshold[internal], clf2.tree_.threshold[internal] + ) + + +def test_sample_weight_invalid(): + # Check sample weighting raises errors. + X = np.arange(100)[:, np.newaxis] + y = np.ones(100) + y[:50] = 0.0 + + clf = DecisionTreeClassifier(random_state=0) + + sample_weight = np.random.rand(100, 1) + with pytest.raises(ValueError): + clf.fit(X, y, sample_weight=sample_weight) + + sample_weight = np.array(0) + expected_err = r"Singleton.* cannot be considered a valid collection" + with pytest.raises(TypeError, match=expected_err): + clf.fit(X, y, sample_weight=sample_weight) + + +@pytest.mark.parametrize("name", CLF_TREES) +def test_class_weights(name): + # Test that class_weights resemble sample_weights behavior. + TreeClassifier = CLF_TREES[name] + + # Iris is balanced, so no effect expected for using 'balanced' weights + clf1 = TreeClassifier(random_state=0) + clf1.fit(iris.data, iris.target) + clf2 = TreeClassifier(class_weight="balanced", random_state=0) + clf2.fit(iris.data, iris.target) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + # Make a multi-output problem with three copies of Iris + iris_multi = np.vstack((iris.target, iris.target, iris.target)).T + # Create user-defined weights that should balance over the outputs + clf3 = TreeClassifier( + class_weight=[ + {0: 2.0, 1: 2.0, 2: 1.0}, + {0: 2.0, 1: 1.0, 2: 2.0}, + {0: 1.0, 1: 2.0, 2: 2.0}, + ], + random_state=0, + ) + clf3.fit(iris.data, iris_multi) + assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) + # Check against multi-output "auto" which should also have no effect + clf4 = TreeClassifier(class_weight="balanced", random_state=0) + clf4.fit(iris.data, iris_multi) + assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) + + # Inflate importance of class 1, check against user-defined weights + sample_weight = np.ones(iris.target.shape) + sample_weight[iris.target == 1] *= 100 + class_weight = {0: 1.0, 1: 100.0, 2: 1.0} + clf1 = TreeClassifier(random_state=0) + clf1.fit(iris.data, iris.target, sample_weight) + clf2 = TreeClassifier(class_weight=class_weight, random_state=0) + clf2.fit(iris.data, iris.target) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + # Check that sample_weight and class_weight are multiplicative + clf1 = TreeClassifier(random_state=0) + clf1.fit(iris.data, iris.target, sample_weight**2) + clf2 = TreeClassifier(class_weight=class_weight, random_state=0) + clf2.fit(iris.data, iris.target, sample_weight) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + +@pytest.mark.parametrize("name", CLF_TREES) +def test_class_weight_errors(name): + # Test if class_weight raises errors and warnings when expected. + TreeClassifier = CLF_TREES[name] + _y = np.vstack((y, np.array(y) * 2)).T + + # Incorrect length list for multi-output + clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.0}], random_state=0) + err_msg = "number of elements in class_weight should match number of outputs." + with pytest.raises(ValueError, match=err_msg): + clf.fit(X, _y) + + +def test_max_leaf_nodes(): + # Test greedy trees with max_depth + 1 leafs. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + k = 4 + for name, TreeEstimator in ALL_TREES.items(): + est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y) + assert est.get_n_leaves() == k + 1 + + +def test_max_leaf_nodes_max_depth(): + # Test precedence of max_leaf_nodes over max_depth. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + k = 4 + for name, TreeEstimator in ALL_TREES.items(): + est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) + assert est.get_depth() == 1 + + +def test_arrays_persist(): + # Ensure property arrays' memory stays alive when tree disappears + # non-regression for #2726 + for attr in [ + "n_classes", + "value", + "children_left", + "children_right", + "threshold", + "impurity", + "feature", + "n_node_samples", + ]: + value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr) + # if pointing to freed memory, contents may be arbitrary + assert -3 <= value.flat[0] < 3, "Array points to arbitrary memory" + + +def test_only_constant_features(): + random_state = check_random_state(0) + X = np.zeros((10, 20)) + y = random_state.randint(0, 2, (10,)) + for name, TreeEstimator in ALL_TREES.items(): + est = TreeEstimator(random_state=0) + est.fit(X, y) + assert est.tree_.max_depth == 0 + + +def test_behaviour_constant_feature_after_splits(): + X = np.transpose( + np.vstack(([[0, 0, 0, 0, 0, 1, 2, 4, 5, 6, 7]], np.zeros((4, 11)))) + ) + y = [0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3] + for name, TreeEstimator in ALL_TREES.items(): + # do not check extra random trees + if "ExtraTree" not in name: + est = TreeEstimator(random_state=0, max_features=1) + est.fit(X, y) + assert est.tree_.max_depth == 2 + assert est.tree_.node_count == 5 + + +def test_with_only_one_non_constant_features(): + X = np.hstack([np.array([[1.0], [1.0], [0.0], [0.0]]), np.zeros((4, 1000))]) + + y = np.array([0.0, 1.0, 0.0, 1.0]) + for name, TreeEstimator in CLF_TREES.items(): + est = TreeEstimator(random_state=0, max_features=1) + est.fit(X, y) + assert est.tree_.max_depth == 1 + assert_array_equal(est.predict_proba(X), np.full((4, 2), 0.5)) + + for name, TreeEstimator in REG_TREES.items(): + est = TreeEstimator(random_state=0, max_features=1) + est.fit(X, y) + assert est.tree_.max_depth == 1 + assert_array_equal(est.predict(X), np.full((4,), 0.5)) + + +def test_big_input(): + # Test if the warning for too large inputs is appropriate. + X = np.repeat(10**40.0, 4).astype(np.float64).reshape(-1, 1) + clf = DecisionTreeClassifier() + with pytest.raises(ValueError, match="float32"): + clf.fit(X, [0, 1, 0, 1]) + + +def test_realloc(): + from sklearn.tree._utils import _realloc_test + + with pytest.raises(MemoryError): + _realloc_test() + + +def test_huge_allocations(): + n_bits = 8 * struct.calcsize("P") + + X = np.random.randn(10, 2) + y = np.random.randint(0, 2, 10) + + # Sanity check: we cannot request more memory than the size of the address + # space. Currently raises OverflowError. + huge = 2 ** (n_bits + 1) + clf = DecisionTreeClassifier(splitter="best", max_leaf_nodes=huge) + with pytest.raises(Exception): + clf.fit(X, y) + + # Non-regression test: MemoryError used to be dropped by Cython + # because of missing "except *". + huge = 2 ** (n_bits - 1) - 1 + clf = DecisionTreeClassifier(splitter="best", max_leaf_nodes=huge) + with pytest.raises(MemoryError): + clf.fit(X, y) + + +def check_sparse_input(tree, dataset, max_depth=None): + TreeEstimator = ALL_TREES[tree] + X = DATASETS[dataset]["X"] + y = DATASETS[dataset]["y"] + + # Gain testing time + if dataset in ["digits", "diabetes"]: + n_samples = X.shape[0] // 5 + X = X[:n_samples] + y = y[:n_samples] + + for sparse_container in COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS: + X_sparse = sparse_container(X) + + # Check the default (depth first search) + d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) + s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) + + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree), + ) + + y_pred = d.predict(X) + if tree in CLF_TREES: + y_proba = d.predict_proba(X) + y_log_proba = d.predict_log_proba(X) + + for sparse_container_test in COO_CONTAINERS + CSR_CONTAINERS + CSC_CONTAINERS: + X_sparse_test = sparse_container_test(X_sparse, dtype=np.float32) + + assert_array_almost_equal(s.predict(X_sparse_test), y_pred) + + if tree in CLF_TREES: + assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba) + assert_array_almost_equal( + s.predict_log_proba(X_sparse_test), y_log_proba + ) + + +@pytest.mark.parametrize("tree_type", SPARSE_TREES) +@pytest.mark.parametrize( + "dataset", + ( + "clf_small", + "toy", + "digits", + "multilabel", + "sparse-pos", + "sparse-neg", + "sparse-mix", + "zeros", + ), +) +def test_sparse_input(tree_type, dataset): + max_depth = 3 if dataset == "digits" else None + check_sparse_input(tree_type, dataset, max_depth) + + +@pytest.mark.parametrize("tree_type", sorted(set(SPARSE_TREES).intersection(REG_TREES))) +@pytest.mark.parametrize("dataset", ["diabetes", "reg_small"]) +def test_sparse_input_reg_trees(tree_type, dataset): + # Due to numerical instability of MSE and too strict test, we limit the + # maximal depth + check_sparse_input(tree_type, dataset, 2) + + +@pytest.mark.parametrize("tree_type", SPARSE_TREES) +@pytest.mark.parametrize("dataset", ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_parameters(tree_type, dataset, csc_container): + TreeEstimator = ALL_TREES[tree_type] + X = DATASETS[dataset]["X"] + X_sparse = csc_container(X) + y = DATASETS[dataset]["y"] + + # Check max_features + d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y) + s = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X_sparse, y) + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + # Check min_samples_split + d = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X, y) + s = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit( + X_sparse, y + ) + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + # Check min_samples_leaf + d = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y) + s = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit( + X_sparse, y + ) + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + # Check best-first search + d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y) + s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y) + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + +@pytest.mark.parametrize( + "tree_type, criterion", + list(product([tree for tree in SPARSE_TREES if tree in REG_TREES], REG_CRITERIONS)) + + list( + product([tree for tree in SPARSE_TREES if tree in CLF_TREES], CLF_CRITERIONS) + ), +) +@pytest.mark.parametrize("dataset", ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_sparse_criteria(tree_type, dataset, csc_container, criterion): + TreeEstimator = ALL_TREES[tree_type] + X = DATASETS[dataset]["X"] + X_sparse = csc_container(X) + y = DATASETS[dataset]["y"] + + d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y) + s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y) + + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree_type), + ) + assert_array_almost_equal(s.predict(X), d.predict(X)) + + +@pytest.mark.parametrize("tree_type", SPARSE_TREES) +@pytest.mark.parametrize( + "csc_container,csr_container", zip(CSC_CONTAINERS, CSR_CONTAINERS) +) +def test_explicit_sparse_zeros(tree_type, csc_container, csr_container): + TreeEstimator = ALL_TREES[tree_type] + max_depth = 3 + n_features = 10 + + # n_samples set n_feature to ease construction of a simultaneous + # construction of a csr and csc matrix + n_samples = n_features + samples = np.arange(n_samples) + + # Generate X, y + random_state = check_random_state(0) + indices = [] + data = [] + offset = 0 + indptr = [offset] + for i in range(n_features): + n_nonzero_i = random_state.binomial(n_samples, 0.5) + indices_i = random_state.permutation(samples)[:n_nonzero_i] + indices.append(indices_i) + data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i,)) - 1 + data.append(data_i) + offset += n_nonzero_i + indptr.append(offset) + + indices = np.concatenate(indices).astype(np.int32) + indptr = np.array(indptr, dtype=np.int32) + data = np.array(np.concatenate(data), dtype=np.float32) + X_sparse = csc_container((data, indices, indptr), shape=(n_samples, n_features)) + X = X_sparse.toarray() + X_sparse_test = csr_container( + (data, indices, indptr), shape=(n_samples, n_features) + ) + X_test = X_sparse_test.toarray() + y = random_state.randint(0, 3, size=(n_samples,)) + + # Ensure that X_sparse_test owns its data, indices and indptr array + X_sparse_test = X_sparse_test.copy() + + # Ensure that we have explicit zeros + assert (X_sparse.data == 0.0).sum() > 0 + assert (X_sparse_test.data == 0.0).sum() > 0 + + # Perform the comparison + d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) + s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) + + assert_tree_equal( + d.tree_, + s.tree_, + "{0} with dense and sparse format gave different trees".format(tree), + ) + + Xs = (X_test, X_sparse_test) + for X1, X2 in product(Xs, Xs): + assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2)) + assert_array_almost_equal(s.apply(X1), d.apply(X2)) + assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1)) + + assert_array_almost_equal( + s.tree_.decision_path(X1).toarray(), d.tree_.decision_path(X2).toarray() + ) + assert_array_almost_equal( + s.decision_path(X1).toarray(), d.decision_path(X2).toarray() + ) + assert_array_almost_equal( + s.decision_path(X1).toarray(), s.tree_.decision_path(X1).toarray() + ) + + assert_array_almost_equal(s.predict(X1), d.predict(X2)) + + if tree in CLF_TREES: + assert_array_almost_equal(s.predict_proba(X1), d.predict_proba(X2)) + + +@ignore_warnings +def check_raise_error_on_1d_input(name): + TreeEstimator = ALL_TREES[name] + + X = iris.data[:, 0].ravel() + X_2d = iris.data[:, 0].reshape((-1, 1)) + y = iris.target + + with pytest.raises(ValueError): + TreeEstimator(random_state=0).fit(X, y) + + est = TreeEstimator(random_state=0) + est.fit(X_2d, y) + with pytest.raises(ValueError): + est.predict([X]) + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_1d_input(name): + with ignore_warnings(): + check_raise_error_on_1d_input(name) + + +@pytest.mark.parametrize("name", ALL_TREES) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_min_weight_leaf_split_level(name, sparse_container): + TreeEstimator = ALL_TREES[name] + + X = np.array([[0], [0], [0], [0], [1]]) + y = [0, 0, 0, 0, 1] + sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2] + if sparse_container is not None: + X = sparse_container(X) + + est = TreeEstimator(random_state=0) + est.fit(X, y, sample_weight=sample_weight) + assert est.tree_.max_depth == 1 + + est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4) + est.fit(X, y, sample_weight=sample_weight) + assert est.tree_.max_depth == 0 + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_public_apply_all_trees(name): + X_small32 = X_small.astype(tree._tree.DTYPE, copy=False) + + est = ALL_TREES[name]() + est.fit(X_small, y_small) + assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) + + +@pytest.mark.parametrize("name", SPARSE_TREES) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_public_apply_sparse_trees(name, csr_container): + X_small32 = csr_container(X_small.astype(tree._tree.DTYPE, copy=False)) + + est = ALL_TREES[name]() + est.fit(X_small, y_small) + assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) + + +def test_decision_path_hardcoded(): + X = iris.data + y = iris.target + est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y) + node_indicator = est.decision_path(X[:2]).toarray() + assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]]) + + +@pytest.mark.parametrize("name", ALL_TREES) +def test_decision_path(name): + X = iris.data + y = iris.target + n_samples = X.shape[0] + + TreeEstimator = ALL_TREES[name] + est = TreeEstimator(random_state=0, max_depth=2) + est.fit(X, y) + + node_indicator_csr = est.decision_path(X) + node_indicator = node_indicator_csr.toarray() + assert node_indicator.shape == (n_samples, est.tree_.node_count) + + # Assert that leaves index are correct + leaves = est.apply(X) + leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)] + assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) + + # Ensure only one leave node per sample + all_leaves = est.tree_.children_left == TREE_LEAF + assert_array_almost_equal( + np.dot(node_indicator, all_leaves), np.ones(shape=n_samples) + ) + + # Ensure max depth is consistent with sum of indicator + max_depth = node_indicator.sum(axis=1).max() + assert est.tree_.max_depth <= max_depth + + +@pytest.mark.parametrize("name", ALL_TREES) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_no_sparse_y_support(name, csr_container): + # Currently we don't support sparse y + X, y = X_multilabel, csr_container(y_multilabel) + TreeEstimator = ALL_TREES[name] + with pytest.raises(TypeError): + TreeEstimator(random_state=0).fit(X, y) + + +def test_mae(): + """Check MAE criterion produces correct results on small toy dataset: + + ------------------ + | X | y | weight | + ------------------ + | 3 | 3 | 0.1 | + | 5 | 3 | 0.3 | + | 8 | 4 | 1.0 | + | 3 | 6 | 0.6 | + | 5 | 7 | 0.3 | + ------------------ + |sum wt:| 2.3 | + ------------------ + + Because we are dealing with sample weights, we cannot find the median by + simply choosing/averaging the centre value(s), instead we consider the + median where 50% of the cumulative weight is found (in a y sorted data set) + . Therefore with regards to this test data, the cumulative weight is >= 50% + when y = 4. Therefore: + Median = 4 + + For all the samples, we can get the total error by summing: + Absolute(Median - y) * weight + + I.e., total error = (Absolute(4 - 3) * 0.1) + + (Absolute(4 - 3) * 0.3) + + (Absolute(4 - 4) * 1.0) + + (Absolute(4 - 6) * 0.6) + + (Absolute(4 - 7) * 0.3) + = 2.5 + + Impurity = Total error / total weight + = 2.5 / 2.3 + = 1.08695652173913 + ------------------ + + From this root node, the next best split is between X values of 3 and 5. + Thus, we have left and right child nodes: + + LEFT RIGHT + ------------------ ------------------ + | X | y | weight | | X | y | weight | + ------------------ ------------------ + | 3 | 3 | 0.1 | | 5 | 3 | 0.3 | + | 3 | 6 | 0.6 | | 8 | 4 | 1.0 | + ------------------ | 5 | 7 | 0.3 | + |sum wt:| 0.7 | ------------------ + ------------------ |sum wt:| 1.6 | + ------------------ + + Impurity is found in the same way: + Left node Median = 6 + Total error = (Absolute(6 - 3) * 0.1) + + (Absolute(6 - 6) * 0.6) + = 0.3 + + Left Impurity = Total error / total weight + = 0.3 / 0.7 + = 0.428571428571429 + ------------------- + + Likewise for Right node: + Right node Median = 4 + Total error = (Absolute(4 - 3) * 0.3) + + (Absolute(4 - 4) * 1.0) + + (Absolute(4 - 7) * 0.3) + = 1.2 + + Right Impurity = Total error / total weight + = 1.2 / 1.6 + = 0.75 + ------ + """ + dt_mae = DecisionTreeRegressor( + random_state=0, criterion="absolute_error", max_leaf_nodes=2 + ) + + # Test MAE where sample weights are non-uniform (as illustrated above): + dt_mae.fit( + X=[[3], [5], [3], [8], [5]], + y=[6, 7, 3, 4, 3], + sample_weight=[0.6, 0.3, 0.1, 1.0, 0.3], + ) + assert_allclose(dt_mae.tree_.impurity, [2.5 / 2.3, 0.3 / 0.7, 1.2 / 1.6]) + assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0]) + + # Test MAE where all sample weights are uniform: + dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3], sample_weight=np.ones(5)) + assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0]) + assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0]) + + # Test MAE where a `sample_weight` is not explicitly provided. + # This is equivalent to providing uniform sample weights, though + # the internal logic is different: + dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3]) + assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0]) + assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0]) + + +def test_criterion_copy(): + # Let's check whether copy of our criterion has the same type + # and properties as original + n_outputs = 3 + n_classes = np.arange(3, dtype=np.intp) + n_samples = 100 + + def _pickle_copy(obj): + return pickle.loads(pickle.dumps(obj)) + + for copy_func in [copy.copy, copy.deepcopy, _pickle_copy]: + for _, typename in CRITERIA_CLF.items(): + criteria = typename(n_outputs, n_classes) + result = copy_func(criteria).__reduce__() + typename_, (n_outputs_, n_classes_), _ = result + assert typename == typename_ + assert n_outputs == n_outputs_ + assert_array_equal(n_classes, n_classes_) + + for _, typename in CRITERIA_REG.items(): + criteria = typename(n_outputs, n_samples) + result = copy_func(criteria).__reduce__() + typename_, (n_outputs_, n_samples_), _ = result + assert typename == typename_ + assert n_outputs == n_outputs_ + assert n_samples == n_samples_ + + +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_empty_leaf_infinite_threshold(sparse_container): + # try to make empty leaf by using near infinite value. + data = np.random.RandomState(0).randn(100, 11) * 2e38 + data = np.nan_to_num(data.astype("float32")) + X = data[:, :-1] + if sparse_container is not None: + X = sparse_container(X) + y = data[:, -1] + + tree = DecisionTreeRegressor(random_state=0).fit(X, y) + terminal_regions = tree.apply(X) + left_leaf = set(np.where(tree.tree_.children_left == TREE_LEAF)[0]) + empty_leaf = left_leaf.difference(terminal_regions) + infinite_threshold = np.where(~np.isfinite(tree.tree_.threshold))[0] + assert len(infinite_threshold) == 0 + assert len(empty_leaf) == 0 + + +@pytest.mark.parametrize( + "dataset", sorted(set(DATASETS.keys()) - {"reg_small", "diabetes"}) +) +@pytest.mark.parametrize("tree_cls", [DecisionTreeClassifier, ExtraTreeClassifier]) +def test_prune_tree_classifier_are_subtrees(dataset, tree_cls): + dataset = DATASETS[dataset] + X, y = dataset["X"], dataset["y"] + est = tree_cls(max_leaf_nodes=20, random_state=0) + info = est.cost_complexity_pruning_path(X, y) + + pruning_path = info.ccp_alphas + impurities = info.impurities + assert np.all(np.diff(pruning_path) >= 0) + assert np.all(np.diff(impurities) >= 0) + + assert_pruning_creates_subtree(tree_cls, X, y, pruning_path) + + +@pytest.mark.parametrize("dataset", DATASETS.keys()) +@pytest.mark.parametrize("tree_cls", [DecisionTreeRegressor, ExtraTreeRegressor]) +def test_prune_tree_regression_are_subtrees(dataset, tree_cls): + dataset = DATASETS[dataset] + X, y = dataset["X"], dataset["y"] + + est = tree_cls(max_leaf_nodes=20, random_state=0) + info = est.cost_complexity_pruning_path(X, y) + + pruning_path = info.ccp_alphas + impurities = info.impurities + assert np.all(np.diff(pruning_path) >= 0) + assert np.all(np.diff(impurities) >= 0) + + assert_pruning_creates_subtree(tree_cls, X, y, pruning_path) + + +def test_prune_single_node_tree(): + # single node tree + clf1 = DecisionTreeClassifier(random_state=0) + clf1.fit([[0], [1]], [0, 0]) + + # pruned single node tree + clf2 = DecisionTreeClassifier(random_state=0, ccp_alpha=10) + clf2.fit([[0], [1]], [0, 0]) + + assert_is_subtree(clf1.tree_, clf2.tree_) + + +def assert_pruning_creates_subtree(estimator_cls, X, y, pruning_path): + # generate trees with increasing alphas + estimators = [] + for ccp_alpha in pruning_path: + est = estimator_cls(max_leaf_nodes=20, ccp_alpha=ccp_alpha, random_state=0).fit( + X, y + ) + estimators.append(est) + + # A pruned tree must be a subtree of the previous tree (which had a + # smaller ccp_alpha) + for prev_est, next_est in zip(estimators, estimators[1:]): + assert_is_subtree(prev_est.tree_, next_est.tree_) + + +def assert_is_subtree(tree, subtree): + assert tree.node_count >= subtree.node_count + assert tree.max_depth >= subtree.max_depth + + tree_c_left = tree.children_left + tree_c_right = tree.children_right + subtree_c_left = subtree.children_left + subtree_c_right = subtree.children_right + + stack = [(0, 0)] + while stack: + tree_node_idx, subtree_node_idx = stack.pop() + assert_array_almost_equal( + tree.value[tree_node_idx], subtree.value[subtree_node_idx] + ) + assert_almost_equal( + tree.impurity[tree_node_idx], subtree.impurity[subtree_node_idx] + ) + assert_almost_equal( + tree.n_node_samples[tree_node_idx], subtree.n_node_samples[subtree_node_idx] + ) + assert_almost_equal( + tree.weighted_n_node_samples[tree_node_idx], + subtree.weighted_n_node_samples[subtree_node_idx], + ) + + if subtree_c_left[subtree_node_idx] == subtree_c_right[subtree_node_idx]: + # is a leaf + assert_almost_equal(TREE_UNDEFINED, subtree.threshold[subtree_node_idx]) + else: + # not a leaf + assert_almost_equal( + tree.threshold[tree_node_idx], subtree.threshold[subtree_node_idx] + ) + stack.append((tree_c_left[tree_node_idx], subtree_c_left[subtree_node_idx])) + stack.append( + (tree_c_right[tree_node_idx], subtree_c_right[subtree_node_idx]) + ) + + +@pytest.mark.parametrize("name", ALL_TREES) +@pytest.mark.parametrize("splitter", ["best", "random"]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +def test_apply_path_readonly_all_trees(name, splitter, sparse_container): + dataset = DATASETS["clf_small"] + X_small = dataset["X"].astype(tree._tree.DTYPE, copy=False) + if sparse_container is None: + X_readonly = create_memmap_backed_data(X_small) + else: + X_readonly = sparse_container(dataset["X"]) + + X_readonly.data = np.array(X_readonly.data, dtype=tree._tree.DTYPE) + ( + X_readonly.data, + X_readonly.indices, + X_readonly.indptr, + ) = create_memmap_backed_data( + (X_readonly.data, X_readonly.indices, X_readonly.indptr) + ) + + y_readonly = create_memmap_backed_data(np.array(y_small, dtype=tree._tree.DTYPE)) + est = ALL_TREES[name](splitter=splitter) + est.fit(X_readonly, y_readonly) + assert_array_equal(est.predict(X_readonly), est.predict(X_small)) + assert_array_equal( + est.decision_path(X_readonly).todense(), est.decision_path(X_small).todense() + ) + + +@pytest.mark.parametrize("criterion", ["squared_error", "friedman_mse", "poisson"]) +@pytest.mark.parametrize("Tree", REG_TREES.values()) +def test_balance_property(criterion, Tree): + # Test that sum(y_pred)=sum(y_true) on training set. + # This works if the mean is predicted (should even be true for each leaf). + # MAE predicts the median and is therefore excluded from this test. + + # Choose a training set with non-negative targets (for poisson) + X, y = diabetes.data, diabetes.target + reg = Tree(criterion=criterion) + reg.fit(X, y) + assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y)) + + +@pytest.mark.parametrize("seed", range(3)) +def test_poisson_zero_nodes(seed): + # Test that sum(y)=0 and therefore y_pred=0 is forbidden on nodes. + X = [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 2], [1, 2], [1, 3]] + y = [0, 0, 0, 0, 1, 2, 3, 4] + # Note that X[:, 0] == 0 is a 100% indicator for y == 0. The tree can + # easily learn that: + reg = DecisionTreeRegressor(criterion="squared_error", random_state=seed) + reg.fit(X, y) + assert np.amin(reg.predict(X)) == 0 + # whereas Poisson must predict strictly positive numbers + reg = DecisionTreeRegressor(criterion="poisson", random_state=seed) + reg.fit(X, y) + assert np.all(reg.predict(X) > 0) + + # Test additional dataset where something could go wrong. + n_features = 10 + X, y = datasets.make_regression( + effective_rank=n_features * 2 // 3, + tail_strength=0.6, + n_samples=1_000, + n_features=n_features, + n_informative=n_features * 2 // 3, + random_state=seed, + ) + # some excess zeros + y[(-1 < y) & (y < 0)] = 0 + # make sure the target is positive + y = np.abs(y) + reg = DecisionTreeRegressor(criterion="poisson", random_state=seed) + reg.fit(X, y) + assert np.all(reg.predict(X) > 0) + + +def test_poisson_vs_mse(): + # For a Poisson distributed target, Poisson loss should give better results + # than squared error measured in Poisson deviance as metric. + # We have a similar test, test_poisson(), in + # sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py + rng = np.random.RandomState(42) + n_train, n_test, n_features = 500, 500, 10 + X = datasets.make_low_rank_matrix( + n_samples=n_train + n_test, n_features=n_features, random_state=rng + ) + # We create a log-linear Poisson model and downscale coef as it will get + # exponentiated. + coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) + y = rng.poisson(lam=np.exp(X @ coef)) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=n_test, random_state=rng + ) + # We prevent some overfitting by setting min_samples_split=10. + tree_poi = DecisionTreeRegressor( + criterion="poisson", min_samples_split=10, random_state=rng + ) + tree_mse = DecisionTreeRegressor( + criterion="squared_error", min_samples_split=10, random_state=rng + ) + + tree_poi.fit(X_train, y_train) + tree_mse.fit(X_train, y_train) + dummy = DummyRegressor(strategy="mean").fit(X_train, y_train) + + for X, y, val in [(X_train, y_train, "train"), (X_test, y_test, "test")]: + metric_poi = mean_poisson_deviance(y, tree_poi.predict(X)) + # squared_error might produce non-positive predictions => clip + metric_mse = mean_poisson_deviance(y, np.clip(tree_mse.predict(X), 1e-15, None)) + metric_dummy = mean_poisson_deviance(y, dummy.predict(X)) + # As squared_error might correctly predict 0 in train set, its train + # score can be better than Poisson. This is no longer the case for the + # test set. + if val == "test": + assert metric_poi < 0.5 * metric_mse + assert metric_poi < 0.75 * metric_dummy + + +@pytest.mark.parametrize("criterion", REG_CRITERIONS) +def test_decision_tree_regressor_sample_weight_consistency(criterion): + """Test that the impact of sample_weight is consistent.""" + tree_params = dict(criterion=criterion) + tree = DecisionTreeRegressor(**tree_params, random_state=42) + for kind in ["zeros", "ones"]: + check_sample_weights_invariance( + "DecisionTreeRegressor_" + criterion, tree, kind="zeros" + ) + + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = np.mean(X, axis=1) + rng.rand(n_samples) + # make it positive in order to work also for poisson criterion + y += np.min(y) + 0.1 + + # check that multiplying sample_weight by 2 is equivalent + # to repeating corresponding samples twice + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = np.ones(len(y)) + sample_weight_1[: n_samples // 2] = 2 + + tree1 = DecisionTreeRegressor(**tree_params).fit( + X, y, sample_weight=sample_weight_1 + ) + + tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None) + + assert tree1.tree_.node_count == tree2.tree_.node_count + # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not + # exactly the same, but on the training set, those differences do not + # matter and thus predictions are the same. + assert_allclose(tree1.predict(X), tree2.predict(X)) + + +@pytest.mark.parametrize("Tree", [DecisionTreeClassifier, ExtraTreeClassifier]) +@pytest.mark.parametrize("n_classes", [2, 4]) +def test_criterion_entropy_same_as_log_loss(Tree, n_classes): + """Test that criterion=entropy gives same as log_loss.""" + n_samples, n_features = 50, 5 + X, y = datasets.make_classification( + n_classes=n_classes, + n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + n_redundant=0, + random_state=42, + ) + tree_log_loss = Tree(criterion="log_loss", random_state=43).fit(X, y) + tree_entropy = Tree(criterion="entropy", random_state=43).fit(X, y) + + assert_tree_equal( + tree_log_loss.tree_, + tree_entropy.tree_, + f"{Tree!r} with criterion 'entropy' and 'log_loss' gave different trees.", + ) + assert_allclose(tree_log_loss.predict(X), tree_entropy.predict(X)) + + +def test_different_endianness_pickle(): + X, y = datasets.make_classification(random_state=0) + + clf = DecisionTreeClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + def reduce_ndarray(arr): + return arr.byteswap().view(arr.dtype.newbyteorder()).__reduce__() + + def get_pickle_non_native_endianness(): + f = io.BytesIO() + p = pickle.Pickler(f) + p.dispatch_table = copyreg.dispatch_table.copy() + p.dispatch_table[np.ndarray] = reduce_ndarray + + p.dump(clf) + f.seek(0) + return f + + new_clf = pickle.load(get_pickle_non_native_endianness()) + new_score = new_clf.score(X, y) + assert np.isclose(score, new_score) + + +def test_different_endianness_joblib_pickle(): + X, y = datasets.make_classification(random_state=0) + + clf = DecisionTreeClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + class NonNativeEndiannessNumpyPickler(NumpyPickler): + def save(self, obj): + if isinstance(obj, np.ndarray): + obj = obj.byteswap().view(obj.dtype.newbyteorder()) + super().save(obj) + + def get_joblib_pickle_non_native_endianness(): + f = io.BytesIO() + p = NonNativeEndiannessNumpyPickler(f) + + p.dump(clf) + f.seek(0) + return f + + new_clf = joblib.load(get_joblib_pickle_non_native_endianness()) + new_score = new_clf.score(X, y) + assert np.isclose(score, new_score) + + +def get_different_bitness_node_ndarray(node_ndarray): + new_dtype_for_indexing_fields = np.int64 if _IS_32BIT else np.int32 + + # field names in Node struct with SIZE_t types (see sklearn/tree/_tree.pxd) + indexing_field_names = ["left_child", "right_child", "feature", "n_node_samples"] + + new_dtype_dict = { + name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items() + } + for name in indexing_field_names: + new_dtype_dict[name] = new_dtype_for_indexing_fields + + new_dtype = np.dtype( + {"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())} + ) + return node_ndarray.astype(new_dtype, casting="same_kind") + + +def get_different_alignment_node_ndarray(node_ndarray): + new_dtype_dict = { + name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items() + } + offsets = [offset for dtype, offset in node_ndarray.dtype.fields.values()] + shifted_offsets = [8 + offset for offset in offsets] + + new_dtype = np.dtype( + { + "names": list(new_dtype_dict.keys()), + "formats": list(new_dtype_dict.values()), + "offsets": shifted_offsets, + } + ) + return node_ndarray.astype(new_dtype, casting="same_kind") + + +def reduce_tree_with_different_bitness(tree): + new_dtype = np.int64 if _IS_32BIT else np.int32 + tree_cls, (n_features, n_classes, n_outputs), state = tree.__reduce__() + new_n_classes = n_classes.astype(new_dtype, casting="same_kind") + + new_state = state.copy() + new_state["nodes"] = get_different_bitness_node_ndarray(new_state["nodes"]) + + return (tree_cls, (n_features, new_n_classes, n_outputs), new_state) + + +def test_different_bitness_pickle(): + X, y = datasets.make_classification(random_state=0) + + clf = DecisionTreeClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + def pickle_dump_with_different_bitness(): + f = io.BytesIO() + p = pickle.Pickler(f) + p.dispatch_table = copyreg.dispatch_table.copy() + p.dispatch_table[CythonTree] = reduce_tree_with_different_bitness + + p.dump(clf) + f.seek(0) + return f + + new_clf = pickle.load(pickle_dump_with_different_bitness()) + new_score = new_clf.score(X, y) + assert score == pytest.approx(new_score) + + +def test_different_bitness_joblib_pickle(): + # Make sure that a platform specific pickle generated on a 64 bit + # platform can be converted at pickle load time into an estimator + # with Cython code that works with the host's native integer precision + # to index nodes in the tree data structure when the host is a 32 bit + # platform (and vice versa). + X, y = datasets.make_classification(random_state=0) + + clf = DecisionTreeClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + def joblib_dump_with_different_bitness(): + f = io.BytesIO() + p = NumpyPickler(f) + p.dispatch_table = copyreg.dispatch_table.copy() + p.dispatch_table[CythonTree] = reduce_tree_with_different_bitness + + p.dump(clf) + f.seek(0) + return f + + new_clf = joblib.load(joblib_dump_with_different_bitness()) + new_score = new_clf.score(X, y) + assert score == pytest.approx(new_score) + + +def test_check_n_classes(): + expected_dtype = np.dtype(np.int32) if _IS_32BIT else np.dtype(np.int64) + allowed_dtypes = [np.dtype(np.int32), np.dtype(np.int64)] + allowed_dtypes += [dt.newbyteorder() for dt in allowed_dtypes] + + n_classes = np.array([0, 1], dtype=expected_dtype) + for dt in allowed_dtypes: + _check_n_classes(n_classes.astype(dt), expected_dtype) + + with pytest.raises(ValueError, match="Wrong dimensions.+n_classes"): + wrong_dim_n_classes = np.array([[0, 1]], dtype=expected_dtype) + _check_n_classes(wrong_dim_n_classes, expected_dtype) + + with pytest.raises(ValueError, match="n_classes.+incompatible dtype"): + wrong_dtype_n_classes = n_classes.astype(np.float64) + _check_n_classes(wrong_dtype_n_classes, expected_dtype) + + +def test_check_value_ndarray(): + expected_dtype = np.dtype(np.float64) + expected_shape = (5, 1, 2) + value_ndarray = np.zeros(expected_shape, dtype=expected_dtype) + + allowed_dtypes = [expected_dtype, expected_dtype.newbyteorder()] + + for dt in allowed_dtypes: + _check_value_ndarray( + value_ndarray, expected_dtype=dt, expected_shape=expected_shape + ) + + with pytest.raises(ValueError, match="Wrong shape.+value array"): + _check_value_ndarray( + value_ndarray, expected_dtype=expected_dtype, expected_shape=(1, 2) + ) + + for problematic_arr in [value_ndarray[:, :, :1], np.asfortranarray(value_ndarray)]: + with pytest.raises(ValueError, match="value array.+C-contiguous"): + _check_value_ndarray( + problematic_arr, + expected_dtype=expected_dtype, + expected_shape=problematic_arr.shape, + ) + + with pytest.raises(ValueError, match="value array.+incompatible dtype"): + _check_value_ndarray( + value_ndarray.astype(np.float32), + expected_dtype=expected_dtype, + expected_shape=expected_shape, + ) + + +def test_check_node_ndarray(): + expected_dtype = NODE_DTYPE + + node_ndarray = np.zeros((5,), dtype=expected_dtype) + + valid_node_ndarrays = [ + node_ndarray, + get_different_bitness_node_ndarray(node_ndarray), + get_different_alignment_node_ndarray(node_ndarray), + ] + valid_node_ndarrays += [ + arr.astype(arr.dtype.newbyteorder()) for arr in valid_node_ndarrays + ] + + for arr in valid_node_ndarrays: + _check_node_ndarray(node_ndarray, expected_dtype=expected_dtype) + + with pytest.raises(ValueError, match="Wrong dimensions.+node array"): + problematic_node_ndarray = np.zeros((5, 2), dtype=expected_dtype) + _check_node_ndarray(problematic_node_ndarray, expected_dtype=expected_dtype) + + with pytest.raises(ValueError, match="node array.+C-contiguous"): + problematic_node_ndarray = node_ndarray[::2] + _check_node_ndarray(problematic_node_ndarray, expected_dtype=expected_dtype) + + dtype_dict = {name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items()} + + # array with wrong 'threshold' field dtype (int64 rather than float64) + new_dtype_dict = dtype_dict.copy() + new_dtype_dict["threshold"] = np.int64 + + new_dtype = np.dtype( + {"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())} + ) + problematic_node_ndarray = node_ndarray.astype(new_dtype) + + with pytest.raises(ValueError, match="node array.+incompatible dtype"): + _check_node_ndarray(problematic_node_ndarray, expected_dtype=expected_dtype) + + # array with wrong 'left_child' field dtype (float64 rather than int64 or int32) + new_dtype_dict = dtype_dict.copy() + new_dtype_dict["left_child"] = np.float64 + new_dtype = np.dtype( + {"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())} + ) + + problematic_node_ndarray = node_ndarray.astype(new_dtype) + + with pytest.raises(ValueError, match="node array.+incompatible dtype"): + _check_node_ndarray(problematic_node_ndarray, expected_dtype=expected_dtype) + + +@pytest.mark.parametrize( + "Splitter", chain(DENSE_SPLITTERS.values(), SPARSE_SPLITTERS.values()) +) +def test_splitter_serializable(Splitter): + """Check that splitters are serializable.""" + rng = np.random.RandomState(42) + max_features = 10 + n_outputs, n_classes = 2, np.array([3, 2], dtype=np.intp) + + criterion = CRITERIA_CLF["gini"](n_outputs, n_classes) + splitter = Splitter(criterion, max_features, 5, 0.5, rng, monotonic_cst=None) + splitter_serialize = pickle.dumps(splitter) + + splitter_back = pickle.loads(splitter_serialize) + assert splitter_back.max_features == max_features + assert isinstance(splitter_back, Splitter) + + +def test_tree_deserialization_from_read_only_buffer(tmpdir): + """Check that Trees can be deserialized with read only buffers. + + Non-regression test for gh-25584. + """ + pickle_path = str(tmpdir.join("clf.joblib")) + clf = DecisionTreeClassifier(random_state=0) + clf.fit(X_small, y_small) + + joblib.dump(clf, pickle_path) + loaded_clf = joblib.load(pickle_path, mmap_mode="r") + + assert_tree_equal( + loaded_clf.tree_, + clf.tree_, + "The trees of the original and loaded classifiers are not equal.", + ) + + +@pytest.mark.parametrize("Tree", ALL_TREES.values()) +def test_min_sample_split_1_error(Tree): + """Check that an error is raised when min_sample_split=1. + + non-regression test for issue gh-25481. + """ + X = np.array([[0, 0], [1, 1]]) + y = np.array([0, 1]) + + # min_samples_split=1.0 is valid + Tree(min_samples_split=1.0).fit(X, y) + + # min_samples_split=1 is invalid + tree = Tree(min_samples_split=1) + msg = ( + r"'min_samples_split' .* must be an int in the range \[2, inf\) " + r"or a float in the range \(0.0, 1.0\]" + ) + with pytest.raises(ValueError, match=msg): + tree.fit(X, y) + + +@pytest.mark.parametrize("criterion", ["squared_error", "friedman_mse"]) +def test_missing_values_on_equal_nodes_no_missing(criterion): + """Check missing values goes to correct node during predictions""" + X = np.array([[0, 1, 2, 3, 8, 9, 11, 12, 15]]).T + y = np.array([0.1, 0.2, 0.3, 0.2, 1.4, 1.4, 1.5, 1.6, 2.6]) + + dtc = DecisionTreeRegressor(random_state=42, max_depth=1, criterion=criterion) + dtc.fit(X, y) + + # Goes to right node because it has the most data points + y_pred = dtc.predict([[np.nan]]) + assert_allclose(y_pred, [np.mean(y[-5:])]) + + # equal number of elements in both nodes + X_equal = X[:-1] + y_equal = y[:-1] + + dtc = DecisionTreeRegressor(random_state=42, max_depth=1, criterion=criterion) + dtc.fit(X_equal, y_equal) + + # Goes to right node because the implementation sets: + # missing_go_to_left = n_left > n_right, which is False + y_pred = dtc.predict([[np.nan]]) + assert_allclose(y_pred, [np.mean(y_equal[-4:])]) + + +@pytest.mark.parametrize("criterion", ["entropy", "gini"]) +def test_missing_values_best_splitter_three_classes(criterion): + """Test when missing values are uniquely present in a class among 3 classes.""" + missing_values_class = 0 + X = np.array([[np.nan] * 4 + [0, 1, 2, 3, 8, 9, 11, 12]]).T + y = np.array([missing_values_class] * 4 + [1] * 4 + [2] * 4) + dtc = DecisionTreeClassifier(random_state=42, max_depth=2, criterion=criterion) + dtc.fit(X, y) + + X_test = np.array([[np.nan, 3, 12]]).T + y_nan_pred = dtc.predict(X_test) + # Missing values necessarily are associated to the observed class. + assert_array_equal(y_nan_pred, [missing_values_class, 1, 2]) + + +@pytest.mark.parametrize("criterion", ["entropy", "gini"]) +def test_missing_values_best_splitter_to_left(criterion): + """Missing values spanning only one class at fit-time must make missing + values at predict-time be classified has belonging to this class.""" + X = np.array([[np.nan] * 4 + [0, 1, 2, 3, 4, 5]]).T + y = np.array([0] * 4 + [1] * 6) + + dtc = DecisionTreeClassifier(random_state=42, max_depth=2, criterion=criterion) + dtc.fit(X, y) + + X_test = np.array([[np.nan, 5, np.nan]]).T + y_pred = dtc.predict(X_test) + + assert_array_equal(y_pred, [0, 1, 0]) + + +@pytest.mark.parametrize("criterion", ["entropy", "gini"]) +def test_missing_values_best_splitter_to_right(criterion): + """Missing values and non-missing values sharing one class at fit-time + must make missing values at predict-time be classified has belonging + to this class.""" + X = np.array([[np.nan] * 4 + [0, 1, 2, 3, 4, 5]]).T + y = np.array([1] * 4 + [0] * 4 + [1] * 2) + + dtc = DecisionTreeClassifier(random_state=42, max_depth=2, criterion=criterion) + dtc.fit(X, y) + + X_test = np.array([[np.nan, 1.2, 4.8]]).T + y_pred = dtc.predict(X_test) + + assert_array_equal(y_pred, [1, 0, 1]) + + +@pytest.mark.parametrize("criterion", ["entropy", "gini"]) +def test_missing_values_missing_both_classes_has_nan(criterion): + """Check behavior of missing value when there is one missing value in each class.""" + X = np.array([[1, 2, 3, 5, np.nan, 10, 20, 30, 60, np.nan]]).T + y = np.array([0] * 5 + [1] * 5) + + dtc = DecisionTreeClassifier(random_state=42, max_depth=1, criterion=criterion) + dtc.fit(X, y) + X_test = np.array([[np.nan, 2.3, 34.2]]).T + y_pred = dtc.predict(X_test) + + # Missing value goes to the class at the right (here 1) because the implementation + # searches right first. + assert_array_equal(y_pred, [1, 0, 1]) + + +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +@pytest.mark.parametrize( + "tree", + [ + DecisionTreeClassifier(splitter="random"), + DecisionTreeRegressor(criterion="absolute_error"), + ], +) +def test_missing_value_errors(sparse_container, tree): + """Check unsupported configurations for missing values.""" + + X = np.array([[1, 2, 3, 5, np.nan, 10, 20, 30, 60, np.nan]]).T + y = np.array([0] * 5 + [1] * 5) + + if sparse_container is not None: + X = sparse_container(X) + + with pytest.raises(ValueError, match="Input X contains NaN"): + tree.fit(X, y) + + +def test_missing_values_poisson(): + """Smoke test for poisson regression and missing values.""" + X, y = diabetes.data.copy(), diabetes.target + + # Set some values missing + X[::5, 0] = np.nan + X[::6, -1] = np.nan + + reg = DecisionTreeRegressor(criterion="poisson", random_state=42) + reg.fit(X, y) + + y_pred = reg.predict(X) + assert (y_pred >= 0.0).all() + + +def make_friedman1_classification(*args, **kwargs): + X, y = datasets.make_friedman1(*args, **kwargs) + y = y > 14 + return X, y + + +@pytest.mark.parametrize( + "make_data,Tree", + [ + (datasets.make_friedman1, DecisionTreeRegressor), + (make_friedman1_classification, DecisionTreeClassifier), + ], +) +@pytest.mark.parametrize("sample_weight_train", [None, "ones"]) +def test_missing_values_is_resilience( + make_data, Tree, sample_weight_train, global_random_seed +): + """Check that trees can deal with missing values have decent performance.""" + n_samples, n_features = 5_000, 10 + X, y = make_data( + n_samples=n_samples, n_features=n_features, random_state=global_random_seed + ) + + X_missing = X.copy() + rng = np.random.RandomState(global_random_seed) + X_missing[rng.choice([False, True], size=X.shape, p=[0.9, 0.1])] = np.nan + X_missing_train, X_missing_test, y_train, y_test = train_test_split( + X_missing, y, random_state=global_random_seed + ) + if sample_weight_train == "ones": + sample_weight = np.ones(X_missing_train.shape[0]) + else: + sample_weight = None + + native_tree = Tree(max_depth=10, random_state=global_random_seed) + native_tree.fit(X_missing_train, y_train, sample_weight=sample_weight) + score_native_tree = native_tree.score(X_missing_test, y_test) + + tree_with_imputer = make_pipeline( + SimpleImputer(), Tree(max_depth=10, random_state=global_random_seed) + ) + tree_with_imputer.fit(X_missing_train, y_train) + score_tree_with_imputer = tree_with_imputer.score(X_missing_test, y_test) + + assert ( + score_native_tree > score_tree_with_imputer + ), f"{score_native_tree=} should be strictly greater than {score_tree_with_imputer}" + + +def test_missing_value_is_predictive(): + """Check the tree learns when only the missing value is predictive.""" + rng = np.random.RandomState(0) + n_samples = 1000 + + X = rng.standard_normal(size=(n_samples, 10)) + y = rng.randint(0, high=2, size=n_samples) + + # Create a predictive feature using `y` and with some noise + X_random_mask = rng.choice([False, True], size=n_samples, p=[0.95, 0.05]) + y_mask = y.copy().astype(bool) + y_mask[X_random_mask] = ~y_mask[X_random_mask] + + X_predictive = rng.standard_normal(size=n_samples) + X_predictive[y_mask] = np.nan + + X[:, 5] = X_predictive + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng) + tree = DecisionTreeClassifier(random_state=rng).fit(X_train, y_train) + + assert tree.score(X_train, y_train) >= 0.85 + assert tree.score(X_test, y_test) >= 0.85 + + +@pytest.mark.parametrize( + "make_data, Tree", + [ + (datasets.make_regression, DecisionTreeRegressor), + (datasets.make_classification, DecisionTreeClassifier), + ], +) +def test_sample_weight_non_uniform(make_data, Tree): + """Check sample weight is correctly handled with missing values.""" + rng = np.random.RandomState(0) + n_samples, n_features = 1000, 10 + X, y = make_data(n_samples=n_samples, n_features=n_features, random_state=rng) + + # Create dataset with missing values + X[rng.choice([False, True], size=X.shape, p=[0.9, 0.1])] = np.nan + + # Zero sample weight is the same as removing the sample + sample_weight = np.ones(X.shape[0]) + sample_weight[::2] = 0.0 + + tree_with_sw = Tree(random_state=0) + tree_with_sw.fit(X, y, sample_weight=sample_weight) + + tree_samples_removed = Tree(random_state=0) + tree_samples_removed.fit(X[1::2, :], y[1::2]) + + assert_allclose(tree_samples_removed.predict(X), tree_with_sw.predict(X)) + + +def test_deterministic_pickle(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/27268 + # Uninitialised memory would lead to the two pickle strings being different. + tree1 = DecisionTreeClassifier(random_state=0).fit(iris.data, iris.target) + tree2 = DecisionTreeClassifier(random_state=0).fit(iris.data, iris.target) + + pickle1 = pickle.dumps(tree1) + pickle2 = pickle.dumps(tree2) + + assert pickle1 == pickle2 + + +@pytest.mark.parametrize( + "X", + [ + # missing values will go left for greedy splits + np.array([np.nan, 2, np.nan, 4, 5, 6]), + np.array([np.nan, np.nan, 3, 4, 5, 6]), + # missing values will go right for greedy splits + np.array([1, 2, 3, 4, np.nan, np.nan]), + np.array([1, 2, 3, np.nan, 6, np.nan]), + ], +) +@pytest.mark.parametrize("criterion", ["squared_error", "friedman_mse"]) +def test_regression_tree_missing_values_toy(X, criterion): + """Check that we properly handle missing values in regression trees using a toy + dataset. + + The regression targeted by this test was that we were not reinitializing the + criterion when it comes to the number of missing values. Therefore, the value + of the critetion (i.e. MSE) was completely wrong. + + This test check that the MSE is null when there is a single sample in the leaf. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28254 + https://github.com/scikit-learn/scikit-learn/issues/28316 + """ + X = X.reshape(-1, 1) + y = np.arange(6) + + tree = DecisionTreeRegressor(criterion=criterion, random_state=0).fit(X, y) + tree_ref = clone(tree).fit(y.reshape(-1, 1), y) + assert all(tree.tree_.impurity >= 0) # MSE should always be positive + # Check the impurity match after the first split + assert_allclose(tree.tree_.impurity[:2], tree_ref.tree_.impurity[:2]) + + # Find the leaves with a single sample where the MSE should be 0 + leaves_idx = np.flatnonzero( + (tree.tree_.children_left == -1) & (tree.tree_.n_node_samples == 1) + ) + assert_allclose(tree.tree_.impurity[leaves_idx], 0.0) + + +def test_classification_tree_missing_values_toy(): + """Check that we properly handle missing values in clasification trees using a toy + dataset. + + The test is more involved because we use a case where we detected a regression + in a random forest. We therefore define the seed and bootstrap indices to detect + one of the non-frequent regression. + + Here, we check that the impurity is null or positive in the leaves. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28254 + """ + X, y = datasets.load_iris(return_X_y=True) + + rng = np.random.RandomState(42) + X_missing = X.copy() + mask = rng.binomial( + n=np.ones(shape=(1, 4), dtype=np.int32), p=X[:, [2]] / 8 + ).astype(bool) + X_missing[mask] = np.nan + X_train, _, y_train, _ = train_test_split(X_missing, y, random_state=13) + + # fmt: off + # no black reformatting for this specific array + indices = np.array([ + 2, 81, 39, 97, 91, 38, 46, 31, 101, 13, 89, 82, 100, 42, 69, 27, 81, 16, 73, 74, + 51, 47, 107, 17, 75, 110, 20, 15, 104, 57, 26, 15, 75, 79, 35, 77, 90, 51, 46, + 13, 94, 91, 23, 8, 93, 93, 73, 77, 12, 13, 74, 109, 110, 24, 10, 23, 104, 27, + 92, 52, 20, 109, 8, 8, 28, 27, 35, 12, 12, 7, 43, 0, 30, 31, 78, 12, 24, 105, + 50, 0, 73, 12, 102, 105, 13, 31, 1, 69, 11, 32, 75, 90, 106, 94, 60, 56, 35, 17, + 62, 85, 81, 39, 80, 16, 63, 6, 80, 84, 3, 3, 76, 78 + ], dtype=np.int32) + # fmt: on + + tree = DecisionTreeClassifier( + max_depth=3, max_features="sqrt", random_state=1857819720 + ) + tree.fit(X_train[indices], y_train[indices]) + assert all(tree.tree_.impurity >= 0) + + leaves_idx = np.flatnonzero( + (tree.tree_.children_left == -1) & (tree.tree_.n_node_samples == 1) + ) + assert_allclose(tree.tree_.impurity[leaves_idx], 0.0)