applied-ai-018 commited on
Commit
2792309
·
verified ·
1 Parent(s): 7df0933

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step40/zero/22.input_layernorm.weight/fp32.pt +3 -0
  7. venv/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py +115 -0
  8. venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py +123 -0
  14. venv/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py +73 -0
  15. venv/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py +57 -0
  16. venv/lib/python3.10/site-packages/sklearn/_build_utils/version.py +14 -0
  17. venv/lib/python3.10/site-packages/sklearn/compose/__init__.py +20 -0
  18. venv/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py +1463 -0
  19. venv/lib/python3.10/site-packages/sklearn/compose/_target.py +342 -0
  20. venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/conftest.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/sklearn/externals/_scipy/__init__.py +0 -0
  23. venv/lib/python3.10/site-packages/sklearn/externals/_scipy/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/__init__.py +0 -0
  25. venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__init__.py +1 -0
  27. venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/_laplacian.py +557 -0
  30. venv/lib/python3.10/site-packages/sklearn/model_selection/__init__.py +88 -0
  31. venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/sklearn/model_selection/_plot.py +907 -0
  38. venv/lib/python3.10/site-packages/sklearn/model_selection/_search.py +1918 -0
  39. venv/lib/python3.10/site-packages/sklearn/model_selection/_search_successive_halving.py +1079 -0
  40. venv/lib/python3.10/site-packages/sklearn/model_selection/_split.py +0 -0
  41. venv/lib/python3.10/site-packages/sklearn/model_selection/_validation.py +2360 -0
  42. venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py +0 -0
  43. venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py +24 -0
ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abdbb6e5879602ced860d8749aae3e59dc1d3279503c862d79d0a45f0799aac8
3
+ size 33555612
ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b91267d51d0a7708191247e92c23bd9e3e3da1637f74347e0b8405b2032d84f0
3
+ size 33555627
ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e214d708817677d080df793018a70ebfcb45824fdfb5b4d6aeffccd1376d9bc5
3
+ size 33555533
ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9def264e9fa9726d9c0d521ebc4cfb6f9add5248c53f6a40e7df2420a7316881
3
+ size 9372
ckpts/universal/global_step40/zero/22.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0f660069a4b9e01c9a6abe669b43072ddd0add8ff5668f1cac8a94f71da23b8
3
+ size 9387
ckpts/universal/global_step40/zero/22.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61d211221b0b9dc9c67d7fc07a8da293905b0c719d41af55533a9300564bdff0
3
+ size 9293
venv/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities useful during the build.
3
+ """
4
+ # author: Andy Mueller, Gael Varoquaux
5
+ # license: BSD
6
+
7
+
8
+ import contextlib
9
+ import os
10
+
11
+ import sklearn
12
+
13
+ from .._min_dependencies import CYTHON_MIN_VERSION
14
+ from ..externals._packaging.version import parse
15
+ from .openmp_helpers import check_openmp_support
16
+ from .pre_build_helpers import basic_check_build
17
+
18
+ DEFAULT_ROOT = "sklearn"
19
+
20
+
21
+ def _check_cython_version():
22
+ message = (
23
+ "Please install Cython with a version >= {0} in order "
24
+ "to build a scikit-learn from source."
25
+ ).format(CYTHON_MIN_VERSION)
26
+ try:
27
+ import Cython
28
+ except ModuleNotFoundError as e:
29
+ # Re-raise with more informative error message instead:
30
+ raise ModuleNotFoundError(message) from e
31
+
32
+ if parse(Cython.__version__) < parse(CYTHON_MIN_VERSION):
33
+ message += " The current version of Cython is {} installed in {}.".format(
34
+ Cython.__version__, Cython.__path__
35
+ )
36
+ raise ValueError(message)
37
+
38
+
39
+ def cythonize_extensions(extension):
40
+ """Check that a recent Cython is available and cythonize extensions"""
41
+ _check_cython_version()
42
+ from Cython.Build import cythonize
43
+
44
+ # Fast fail before cythonization if compiler fails compiling basic test
45
+ # code even without OpenMP
46
+ basic_check_build()
47
+
48
+ # check simple compilation with OpenMP. If it fails scikit-learn will be
49
+ # built without OpenMP and the test test_openmp_supported in the test suite
50
+ # will fail.
51
+ # `check_openmp_support` compiles a small test program to see if the
52
+ # compilers are properly configured to build with OpenMP. This is expensive
53
+ # and we only want to call this function once.
54
+ # The result of this check is cached as a private attribute on the sklearn
55
+ # module (only at build-time) to be used in the build_ext subclass defined
56
+ # in the top-level setup.py file to actually build the compiled extensions
57
+ # with OpenMP flags if needed.
58
+ sklearn._OPENMP_SUPPORTED = check_openmp_support()
59
+
60
+ n_jobs = 1
61
+ with contextlib.suppress(ImportError):
62
+ import joblib
63
+
64
+ n_jobs = joblib.cpu_count()
65
+
66
+ # Additional checks for Cython
67
+ cython_enable_debug_directives = (
68
+ os.environ.get("SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES", "0") != "0"
69
+ )
70
+
71
+ compiler_directives = {
72
+ "language_level": 3,
73
+ "boundscheck": cython_enable_debug_directives,
74
+ "wraparound": False,
75
+ "initializedcheck": False,
76
+ "nonecheck": False,
77
+ "cdivision": True,
78
+ "profile": False,
79
+ }
80
+
81
+ return cythonize(
82
+ extension,
83
+ nthreads=n_jobs,
84
+ compiler_directives=compiler_directives,
85
+ annotate=False,
86
+ )
87
+
88
+
89
+ def gen_from_templates(templates):
90
+ """Generate cython files from a list of templates"""
91
+ # Lazy import because cython is not a runtime dependency.
92
+ from Cython import Tempita
93
+
94
+ for template in templates:
95
+ outfile = template.replace(".tp", "")
96
+
97
+ # if the template is not updated, no need to output the cython file
98
+ if not (
99
+ os.path.exists(outfile)
100
+ and os.stat(template).st_mtime < os.stat(outfile).st_mtime
101
+ ):
102
+ with open(template, "r") as f:
103
+ tmpl = f.read()
104
+
105
+ tmpl_ = Tempita.sub(tmpl)
106
+
107
+ warn_msg = (
108
+ "# WARNING: Do not edit this file directly.\n"
109
+ f"# It is automatically generated from {template!r}.\n"
110
+ "# Changes must be made there.\n\n"
111
+ )
112
+
113
+ with open(outfile, "w") as f:
114
+ f.write(warn_msg)
115
+ f.write(tmpl_)
venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.51 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc ADDED
Binary file (2.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc ADDED
Binary file (670 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helpers for OpenMP support during the build."""
2
+
3
+ # This code is adapted for a large part from the astropy openmp helpers, which
4
+ # can be found at: https://github.com/astropy/extension-helpers/blob/master/extension_helpers/_openmp_helpers.py # noqa
5
+
6
+
7
+ import os
8
+ import sys
9
+ import textwrap
10
+ import warnings
11
+
12
+ from .pre_build_helpers import compile_test_program
13
+
14
+
15
+ def get_openmp_flag():
16
+ if sys.platform == "win32":
17
+ return ["/openmp"]
18
+ elif sys.platform == "darwin" and "openmp" in os.getenv("CPPFLAGS", ""):
19
+ # -fopenmp can't be passed as compile flag when using Apple-clang.
20
+ # OpenMP support has to be enabled during preprocessing.
21
+ #
22
+ # For example, our macOS wheel build jobs use the following environment
23
+ # variables to build with Apple-clang and the brew installed "libomp":
24
+ #
25
+ # export CPPFLAGS="$CPPFLAGS -Xpreprocessor -fopenmp"
26
+ # export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include"
27
+ # export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include"
28
+ # export LDFLAGS="$LDFLAGS -Wl,-rpath,/usr/local/opt/libomp/lib
29
+ # -L/usr/local/opt/libomp/lib -lomp"
30
+ return []
31
+ # Default flag for GCC and clang:
32
+ return ["-fopenmp"]
33
+
34
+
35
+ def check_openmp_support():
36
+ """Check whether OpenMP test code can be compiled and run"""
37
+ if "PYODIDE_PACKAGE_ABI" in os.environ:
38
+ # Pyodide doesn't support OpenMP
39
+ return False
40
+
41
+ code = textwrap.dedent("""\
42
+ #include <omp.h>
43
+ #include <stdio.h>
44
+ int main(void) {
45
+ #pragma omp parallel
46
+ printf("nthreads=%d\\n", omp_get_num_threads());
47
+ return 0;
48
+ }
49
+ """)
50
+
51
+ extra_preargs = os.getenv("LDFLAGS", None)
52
+ if extra_preargs is not None:
53
+ extra_preargs = extra_preargs.strip().split(" ")
54
+ # FIXME: temporary fix to link against system libraries on linux
55
+ # "-Wl,--sysroot=/" should be removed
56
+ extra_preargs = [
57
+ flag
58
+ for flag in extra_preargs
59
+ if flag.startswith(("-L", "-Wl,-rpath", "-l", "-Wl,--sysroot=/"))
60
+ ]
61
+
62
+ extra_postargs = get_openmp_flag()
63
+
64
+ openmp_exception = None
65
+ try:
66
+ output = compile_test_program(
67
+ code, extra_preargs=extra_preargs, extra_postargs=extra_postargs
68
+ )
69
+
70
+ if output and "nthreads=" in output[0]:
71
+ nthreads = int(output[0].strip().split("=")[1])
72
+ openmp_supported = len(output) == nthreads
73
+ elif "PYTHON_CROSSENV" in os.environ:
74
+ # Since we can't run the test program when cross-compiling
75
+ # assume that openmp is supported if the program can be
76
+ # compiled.
77
+ openmp_supported = True
78
+ else:
79
+ openmp_supported = False
80
+
81
+ except Exception as exception:
82
+ # We could be more specific and only catch: CompileError, LinkError,
83
+ # and subprocess.CalledProcessError.
84
+ # setuptools introduced CompileError and LinkError, but that requires
85
+ # version 61.1. Even the latest version of Ubuntu (22.04LTS) only
86
+ # ships with 59.6. So for now we catch all exceptions and reraise a
87
+ # generic exception with the original error message instead:
88
+ openmp_supported = False
89
+ openmp_exception = exception
90
+
91
+ if not openmp_supported:
92
+ if os.getenv("SKLEARN_FAIL_NO_OPENMP"):
93
+ raise Exception(
94
+ "Failed to build scikit-learn with OpenMP support"
95
+ ) from openmp_exception
96
+ else:
97
+ message = textwrap.dedent("""
98
+
99
+ ***********
100
+ * WARNING *
101
+ ***********
102
+
103
+ It seems that scikit-learn cannot be built with OpenMP.
104
+
105
+ - Make sure you have followed the installation instructions:
106
+
107
+ https://scikit-learn.org/dev/developers/advanced_installation.html
108
+
109
+ - If your compiler supports OpenMP but you still see this
110
+ message, please submit a bug report at:
111
+
112
+ https://github.com/scikit-learn/scikit-learn/issues
113
+
114
+ - The build will continue with OpenMP-based parallelism
115
+ disabled. Note however that some estimators will run in
116
+ sequential mode instead of leveraging thread-based
117
+ parallelism.
118
+
119
+ ***
120
+ """)
121
+ warnings.warn(message)
122
+
123
+ return openmp_supported
venv/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helpers to check build environment before actual build of scikit-learn"""
2
+
3
+ import glob
4
+ import os
5
+ import subprocess
6
+ import sys
7
+ import tempfile
8
+ import textwrap
9
+
10
+ from setuptools.command.build_ext import customize_compiler, new_compiler
11
+
12
+
13
+ def compile_test_program(code, extra_preargs=None, extra_postargs=None):
14
+ """Check that some C code can be compiled and run"""
15
+ ccompiler = new_compiler()
16
+ customize_compiler(ccompiler)
17
+
18
+ start_dir = os.path.abspath(".")
19
+
20
+ with tempfile.TemporaryDirectory() as tmp_dir:
21
+ try:
22
+ os.chdir(tmp_dir)
23
+
24
+ # Write test program
25
+ with open("test_program.c", "w") as f:
26
+ f.write(code)
27
+
28
+ os.mkdir("objects")
29
+
30
+ # Compile, test program
31
+ ccompiler.compile(
32
+ ["test_program.c"], output_dir="objects", extra_postargs=extra_postargs
33
+ )
34
+
35
+ # Link test program
36
+ objects = glob.glob(os.path.join("objects", "*" + ccompiler.obj_extension))
37
+ ccompiler.link_executable(
38
+ objects,
39
+ "test_program",
40
+ extra_preargs=extra_preargs,
41
+ extra_postargs=extra_postargs,
42
+ )
43
+
44
+ if "PYTHON_CROSSENV" not in os.environ:
45
+ # Run test program if not cross compiling
46
+ # will raise a CalledProcessError if return code was non-zero
47
+ output = subprocess.check_output("./test_program")
48
+ output = output.decode(sys.stdout.encoding or "utf-8").splitlines()
49
+ else:
50
+ # Return an empty output if we are cross compiling
51
+ # as we cannot run the test_program
52
+ output = []
53
+ except Exception:
54
+ raise
55
+ finally:
56
+ os.chdir(start_dir)
57
+
58
+ return output
59
+
60
+
61
+ def basic_check_build():
62
+ """Check basic compilation and linking of C code"""
63
+ if "PYODIDE_PACKAGE_ABI" in os.environ:
64
+ # The following check won't work in pyodide
65
+ return
66
+
67
+ code = textwrap.dedent("""\
68
+ #include <stdio.h>
69
+ int main(void) {
70
+ return 0;
71
+ }
72
+ """)
73
+ compile_test_program(code)
venv/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+
4
+ from Cython import Tempita as tempita
5
+
6
+ # XXX: If this import ever fails (does it really?), vendor either
7
+ # cython.tempita or numpy/npy_tempita.
8
+
9
+
10
+ def process_tempita(fromfile, outfile=None):
11
+ """Process tempita templated file and write out the result.
12
+
13
+ The template file is expected to end in `.c.tp` or `.pyx.tp`:
14
+ E.g. processing `template.c.in` generates `template.c`.
15
+
16
+ """
17
+ with open(fromfile, "r", encoding="utf-8") as f:
18
+ template_content = f.read()
19
+
20
+ template = tempita.Template(template_content)
21
+ content = template.substitute()
22
+
23
+ with open(outfile, "w", encoding="utf-8") as f:
24
+ f.write(content)
25
+
26
+
27
+ def main():
28
+ parser = argparse.ArgumentParser()
29
+ parser.add_argument("infile", type=str, help="Path to the input file")
30
+ parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory")
31
+ parser.add_argument(
32
+ "-i",
33
+ "--ignore",
34
+ type=str,
35
+ help=(
36
+ "An ignored input - may be useful to add a "
37
+ "dependency between custom targets"
38
+ ),
39
+ )
40
+ args = parser.parse_args()
41
+
42
+ if not args.infile.endswith(".tp"):
43
+ raise ValueError(f"Unexpected extension: {args.infile}")
44
+
45
+ if not args.outdir:
46
+ raise ValueError("Missing `--outdir` argument to tempita.py")
47
+
48
+ outdir_abs = os.path.join(os.getcwd(), args.outdir)
49
+ outfile = os.path.join(
50
+ outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]
51
+ )
52
+
53
+ process_tempita(args.infile, outfile)
54
+
55
+
56
+ if __name__ == "__main__":
57
+ main()
venv/lib/python3.10/site-packages/sklearn/_build_utils/version.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """ Extract version number from __init__.py
3
+ """
4
+
5
+ import os
6
+
7
+ sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py")
8
+
9
+ data = open(sklearn_init).readlines()
10
+ version_line = next(line for line in data if line.startswith("__version__"))
11
+
12
+ version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "")
13
+
14
+ print(version)
venv/lib/python3.10/site-packages/sklearn/compose/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Meta-estimators for building composite models with transformers
2
+
3
+ In addition to its current contents, this module will eventually be home to
4
+ refurbished versions of Pipeline and FeatureUnion.
5
+
6
+ """
7
+
8
+ from ._column_transformer import (
9
+ ColumnTransformer,
10
+ make_column_selector,
11
+ make_column_transformer,
12
+ )
13
+ from ._target import TransformedTargetRegressor
14
+
15
+ __all__ = [
16
+ "ColumnTransformer",
17
+ "make_column_transformer",
18
+ "TransformedTargetRegressor",
19
+ "make_column_selector",
20
+ ]
venv/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py ADDED
@@ -0,0 +1,1463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.compose._column_transformer` module implements utilities
3
+ to work with heterogeneous data and to apply different transformers to
4
+ different columns.
5
+ """
6
+
7
+ # Author: Andreas Mueller
8
+ # Joris Van den Bossche
9
+ # License: BSD
10
+ import warnings
11
+ from collections import Counter
12
+ from itertools import chain
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ from scipy import sparse
17
+
18
+ from ..base import TransformerMixin, _fit_context, clone
19
+ from ..pipeline import _fit_transform_one, _name_estimators, _transform_one
20
+ from ..preprocessing import FunctionTransformer
21
+ from ..utils import Bunch, _get_column_indices, _safe_indexing
22
+ from ..utils._estimator_html_repr import _VisualBlock
23
+ from ..utils._metadata_requests import METHODS
24
+ from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions
25
+ from ..utils._set_output import (
26
+ _get_container_adapter,
27
+ _get_output_config,
28
+ _safe_set_output,
29
+ )
30
+ from ..utils.metadata_routing import (
31
+ MetadataRouter,
32
+ MethodMapping,
33
+ _raise_for_params,
34
+ _routing_enabled,
35
+ process_routing,
36
+ )
37
+ from ..utils.metaestimators import _BaseComposition
38
+ from ..utils.parallel import Parallel, delayed
39
+ from ..utils.validation import (
40
+ _check_feature_names_in,
41
+ _get_feature_names,
42
+ _is_pandas_df,
43
+ _num_samples,
44
+ check_array,
45
+ check_is_fitted,
46
+ )
47
+
48
+ __all__ = ["ColumnTransformer", "make_column_transformer", "make_column_selector"]
49
+
50
+
51
+ _ERR_MSG_1DCOLUMN = (
52
+ "1D data passed to a transformer that expects 2D data. "
53
+ "Try to specify the column selection as a list of one "
54
+ "item instead of a scalar."
55
+ )
56
+
57
+
58
+ class ColumnTransformer(TransformerMixin, _BaseComposition):
59
+ """Applies transformers to columns of an array or pandas DataFrame.
60
+
61
+ This estimator allows different columns or column subsets of the input
62
+ to be transformed separately and the features generated by each transformer
63
+ will be concatenated to form a single feature space.
64
+ This is useful for heterogeneous or columnar data, to combine several
65
+ feature extraction mechanisms or transformations into a single transformer.
66
+
67
+ Read more in the :ref:`User Guide <column_transformer>`.
68
+
69
+ .. versionadded:: 0.20
70
+
71
+ Parameters
72
+ ----------
73
+ transformers : list of tuples
74
+ List of (name, transformer, columns) tuples specifying the
75
+ transformer objects to be applied to subsets of the data.
76
+
77
+ name : str
78
+ Like in Pipeline and FeatureUnion, this allows the transformer and
79
+ its parameters to be set using ``set_params`` and searched in grid
80
+ search.
81
+ transformer : {'drop', 'passthrough'} or estimator
82
+ Estimator must support :term:`fit` and :term:`transform`.
83
+ Special-cased strings 'drop' and 'passthrough' are accepted as
84
+ well, to indicate to drop the columns or to pass them through
85
+ untransformed, respectively.
86
+ columns : str, array-like of str, int, array-like of int, \
87
+ array-like of bool, slice or callable
88
+ Indexes the data on its second axis. Integers are interpreted as
89
+ positional columns, while strings can reference DataFrame columns
90
+ by name. A scalar string or int should be used where
91
+ ``transformer`` expects X to be a 1d array-like (vector),
92
+ otherwise a 2d array will be passed to the transformer.
93
+ A callable is passed the input data `X` and can return any of the
94
+ above. To select multiple columns by name or dtype, you can use
95
+ :obj:`make_column_selector`.
96
+
97
+ remainder : {'drop', 'passthrough'} or estimator, default='drop'
98
+ By default, only the specified columns in `transformers` are
99
+ transformed and combined in the output, and the non-specified
100
+ columns are dropped. (default of ``'drop'``).
101
+ By specifying ``remainder='passthrough'``, all remaining columns that
102
+ were not specified in `transformers`, but present in the data passed
103
+ to `fit` will be automatically passed through. This subset of columns
104
+ is concatenated with the output of the transformers. For dataframes,
105
+ extra columns not seen during `fit` will be excluded from the output
106
+ of `transform`.
107
+ By setting ``remainder`` to be an estimator, the remaining
108
+ non-specified columns will use the ``remainder`` estimator. The
109
+ estimator must support :term:`fit` and :term:`transform`.
110
+ Note that using this feature requires that the DataFrame columns
111
+ input at :term:`fit` and :term:`transform` have identical order.
112
+
113
+ sparse_threshold : float, default=0.3
114
+ If the output of the different transformers contains sparse matrices,
115
+ these will be stacked as a sparse matrix if the overall density is
116
+ lower than this value. Use ``sparse_threshold=0`` to always return
117
+ dense. When the transformed output consists of all dense data, the
118
+ stacked result will be dense, and this keyword will be ignored.
119
+
120
+ n_jobs : int, default=None
121
+ Number of jobs to run in parallel.
122
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
123
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
124
+ for more details.
125
+
126
+ transformer_weights : dict, default=None
127
+ Multiplicative weights for features per transformer. The output of the
128
+ transformer is multiplied by these weights. Keys are transformer names,
129
+ values the weights.
130
+
131
+ verbose : bool, default=False
132
+ If True, the time elapsed while fitting each transformer will be
133
+ printed as it is completed.
134
+
135
+ verbose_feature_names_out : bool, default=True
136
+ If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
137
+ all feature names with the name of the transformer that generated that
138
+ feature.
139
+ If False, :meth:`ColumnTransformer.get_feature_names_out` will not
140
+ prefix any feature names and will error if feature names are not
141
+ unique.
142
+
143
+ .. versionadded:: 1.0
144
+
145
+ Attributes
146
+ ----------
147
+ transformers_ : list
148
+ The collection of fitted transformers as tuples of (name,
149
+ fitted_transformer, column). `fitted_transformer` can be an estimator,
150
+ or `'drop'`; `'passthrough'` is replaced with an equivalent
151
+ :class:`~sklearn.preprocessing.FunctionTransformer`. In case there were
152
+ no columns selected, this will be the unfitted transformer. If there
153
+ are remaining columns, the final element is a tuple of the form:
154
+ ('remainder', transformer, remaining_columns) corresponding to the
155
+ ``remainder`` parameter. If there are remaining columns, then
156
+ ``len(transformers_)==len(transformers)+1``, otherwise
157
+ ``len(transformers_)==len(transformers)``.
158
+
159
+ named_transformers_ : :class:`~sklearn.utils.Bunch`
160
+ Read-only attribute to access any transformer by given name.
161
+ Keys are transformer names and values are the fitted transformer
162
+ objects.
163
+
164
+ sparse_output_ : bool
165
+ Boolean flag indicating whether the output of ``transform`` is a
166
+ sparse matrix or a dense numpy array, which depends on the output
167
+ of the individual transformers and the `sparse_threshold` keyword.
168
+
169
+ output_indices_ : dict
170
+ A dictionary from each transformer name to a slice, where the slice
171
+ corresponds to indices in the transformed output. This is useful to
172
+ inspect which transformer is responsible for which transformed
173
+ feature(s).
174
+
175
+ .. versionadded:: 1.0
176
+
177
+ n_features_in_ : int
178
+ Number of features seen during :term:`fit`. Only defined if the
179
+ underlying transformers expose such an attribute when fit.
180
+
181
+ .. versionadded:: 0.24
182
+
183
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
184
+ Names of features seen during :term:`fit`. Defined only when `X`
185
+ has feature names that are all strings.
186
+
187
+ .. versionadded:: 1.0
188
+
189
+ See Also
190
+ --------
191
+ make_column_transformer : Convenience function for
192
+ combining the outputs of multiple transformer objects applied to
193
+ column subsets of the original feature space.
194
+ make_column_selector : Convenience function for selecting
195
+ columns based on datatype or the columns name with a regex pattern.
196
+
197
+ Notes
198
+ -----
199
+ The order of the columns in the transformed feature matrix follows the
200
+ order of how the columns are specified in the `transformers` list.
201
+ Columns of the original feature matrix that are not specified are
202
+ dropped from the resulting transformed feature matrix, unless specified
203
+ in the `passthrough` keyword. Those columns specified with `passthrough`
204
+ are added at the right to the output of the transformers.
205
+
206
+ Examples
207
+ --------
208
+ >>> import numpy as np
209
+ >>> from sklearn.compose import ColumnTransformer
210
+ >>> from sklearn.preprocessing import Normalizer
211
+ >>> ct = ColumnTransformer(
212
+ ... [("norm1", Normalizer(norm='l1'), [0, 1]),
213
+ ... ("norm2", Normalizer(norm='l1'), slice(2, 4))])
214
+ >>> X = np.array([[0., 1., 2., 2.],
215
+ ... [1., 1., 0., 1.]])
216
+ >>> # Normalizer scales each row of X to unit norm. A separate scaling
217
+ >>> # is applied for the two first and two last elements of each
218
+ >>> # row independently.
219
+ >>> ct.fit_transform(X)
220
+ array([[0. , 1. , 0.5, 0.5],
221
+ [0.5, 0.5, 0. , 1. ]])
222
+
223
+ :class:`ColumnTransformer` can be configured with a transformer that requires
224
+ a 1d array by setting the column to a string:
225
+
226
+ >>> from sklearn.feature_extraction.text import CountVectorizer
227
+ >>> from sklearn.preprocessing import MinMaxScaler
228
+ >>> import pandas as pd # doctest: +SKIP
229
+ >>> X = pd.DataFrame({
230
+ ... "documents": ["First item", "second one here", "Is this the last?"],
231
+ ... "width": [3, 4, 5],
232
+ ... }) # doctest: +SKIP
233
+ >>> # "documents" is a string which configures ColumnTransformer to
234
+ >>> # pass the documents column as a 1d array to the CountVectorizer
235
+ >>> ct = ColumnTransformer(
236
+ ... [("text_preprocess", CountVectorizer(), "documents"),
237
+ ... ("num_preprocess", MinMaxScaler(), ["width"])])
238
+ >>> X_trans = ct.fit_transform(X) # doctest: +SKIP
239
+
240
+ For a more detailed example of usage, see
241
+ :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`.
242
+ """
243
+
244
+ _required_parameters = ["transformers"]
245
+
246
+ _parameter_constraints: dict = {
247
+ "transformers": [list, Hidden(tuple)],
248
+ "remainder": [
249
+ StrOptions({"drop", "passthrough"}),
250
+ HasMethods(["fit", "transform"]),
251
+ HasMethods(["fit_transform", "transform"]),
252
+ ],
253
+ "sparse_threshold": [Interval(Real, 0, 1, closed="both")],
254
+ "n_jobs": [Integral, None],
255
+ "transformer_weights": [dict, None],
256
+ "verbose": ["verbose"],
257
+ "verbose_feature_names_out": ["boolean"],
258
+ }
259
+
260
+ def __init__(
261
+ self,
262
+ transformers,
263
+ *,
264
+ remainder="drop",
265
+ sparse_threshold=0.3,
266
+ n_jobs=None,
267
+ transformer_weights=None,
268
+ verbose=False,
269
+ verbose_feature_names_out=True,
270
+ ):
271
+ self.transformers = transformers
272
+ self.remainder = remainder
273
+ self.sparse_threshold = sparse_threshold
274
+ self.n_jobs = n_jobs
275
+ self.transformer_weights = transformer_weights
276
+ self.verbose = verbose
277
+ self.verbose_feature_names_out = verbose_feature_names_out
278
+
279
+ @property
280
+ def _transformers(self):
281
+ """
282
+ Internal list of transformer only containing the name and
283
+ transformers, dropping the columns.
284
+
285
+ DO NOT USE: This is for the implementation of get_params via
286
+ BaseComposition._get_params which expects lists of tuples of len 2.
287
+
288
+ To iterate through the transformers, use ``self._iter`` instead.
289
+ """
290
+ try:
291
+ return [(name, trans) for name, trans, _ in self.transformers]
292
+ except (TypeError, ValueError):
293
+ return self.transformers
294
+
295
+ @_transformers.setter
296
+ def _transformers(self, value):
297
+ """DO NOT USE: This is for the implementation of set_params via
298
+ BaseComposition._get_params which gives lists of tuples of len 2.
299
+ """
300
+ try:
301
+ self.transformers = [
302
+ (name, trans, col)
303
+ for ((name, trans), (_, _, col)) in zip(value, self.transformers)
304
+ ]
305
+ except (TypeError, ValueError):
306
+ self.transformers = value
307
+
308
+ def set_output(self, *, transform=None):
309
+ """Set the output container when `"transform"` and `"fit_transform"` are called.
310
+
311
+ Calling `set_output` will set the output of all estimators in `transformers`
312
+ and `transformers_`.
313
+
314
+ Parameters
315
+ ----------
316
+ transform : {"default", "pandas"}, default=None
317
+ Configure output of `transform` and `fit_transform`.
318
+
319
+ - `"default"`: Default output format of a transformer
320
+ - `"pandas"`: DataFrame output
321
+ - `"polars"`: Polars output
322
+ - `None`: Transform configuration is unchanged
323
+
324
+ .. versionadded:: 1.4
325
+ `"polars"` option was added.
326
+
327
+ Returns
328
+ -------
329
+ self : estimator instance
330
+ Estimator instance.
331
+ """
332
+ super().set_output(transform=transform)
333
+
334
+ transformers = (
335
+ trans
336
+ for _, trans, _ in chain(
337
+ self.transformers, getattr(self, "transformers_", [])
338
+ )
339
+ if trans not in {"passthrough", "drop"}
340
+ )
341
+ for trans in transformers:
342
+ _safe_set_output(trans, transform=transform)
343
+
344
+ if self.remainder not in {"passthrough", "drop"}:
345
+ _safe_set_output(self.remainder, transform=transform)
346
+
347
+ return self
348
+
349
+ def get_params(self, deep=True):
350
+ """Get parameters for this estimator.
351
+
352
+ Returns the parameters given in the constructor as well as the
353
+ estimators contained within the `transformers` of the
354
+ `ColumnTransformer`.
355
+
356
+ Parameters
357
+ ----------
358
+ deep : bool, default=True
359
+ If True, will return the parameters for this estimator and
360
+ contained subobjects that are estimators.
361
+
362
+ Returns
363
+ -------
364
+ params : dict
365
+ Parameter names mapped to their values.
366
+ """
367
+ return self._get_params("_transformers", deep=deep)
368
+
369
+ def set_params(self, **kwargs):
370
+ """Set the parameters of this estimator.
371
+
372
+ Valid parameter keys can be listed with ``get_params()``. Note that you
373
+ can directly set the parameters of the estimators contained in
374
+ `transformers` of `ColumnTransformer`.
375
+
376
+ Parameters
377
+ ----------
378
+ **kwargs : dict
379
+ Estimator parameters.
380
+
381
+ Returns
382
+ -------
383
+ self : ColumnTransformer
384
+ This estimator.
385
+ """
386
+ self._set_params("_transformers", **kwargs)
387
+ return self
388
+
389
+ def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns):
390
+ """
391
+ Generate (name, trans, column, weight) tuples.
392
+
393
+
394
+ Parameters
395
+ ----------
396
+ fitted : bool
397
+ If True, use the fitted transformers (``self.transformers_``) to
398
+ iterate through transformers, else use the transformers passed by
399
+ the user (``self.transformers``).
400
+
401
+ column_as_labels : bool
402
+ If True, columns are returned as string labels. If False, columns
403
+ are returned as they were given by the user. This can only be True
404
+ if the ``ColumnTransformer`` is already fitted.
405
+
406
+ skip_drop : bool
407
+ If True, 'drop' transformers are filtered out.
408
+
409
+ skip_empty_columns : bool
410
+ If True, transformers with empty selected columns are filtered out.
411
+
412
+ Yields
413
+ ------
414
+ A generator of tuples containing:
415
+ - name : the name of the transformer
416
+ - transformer : the transformer object
417
+ - columns : the columns for that transformer
418
+ - weight : the weight of the transformer
419
+ """
420
+ if fitted:
421
+ transformers = self.transformers_
422
+ else:
423
+ # interleave the validated column specifiers
424
+ transformers = [
425
+ (name, trans, column)
426
+ for (name, trans, _), column in zip(self.transformers, self._columns)
427
+ ]
428
+ # add transformer tuple for remainder
429
+ if self._remainder[2]:
430
+ transformers = chain(transformers, [self._remainder])
431
+ get_weight = (self.transformer_weights or {}).get
432
+
433
+ for name, trans, columns in transformers:
434
+ if skip_drop and trans == "drop":
435
+ continue
436
+ if skip_empty_columns and _is_empty_column_selection(columns):
437
+ continue
438
+
439
+ if column_as_labels:
440
+ # Convert all columns to using their string labels
441
+ columns_is_scalar = np.isscalar(columns)
442
+
443
+ indices = self._transformer_to_input_indices[name]
444
+ columns = self.feature_names_in_[indices]
445
+
446
+ if columns_is_scalar:
447
+ # selection is done with one dimension
448
+ columns = columns[0]
449
+
450
+ yield (name, trans, columns, get_weight(name))
451
+
452
+ def _validate_transformers(self):
453
+ """Validate names of transformers and the transformers themselves.
454
+
455
+ This checks whether given transformers have the required methods, i.e.
456
+ `fit` or `fit_transform` and `transform` implemented.
457
+ """
458
+ if not self.transformers:
459
+ return
460
+
461
+ names, transformers, _ = zip(*self.transformers)
462
+
463
+ # validate names
464
+ self._validate_names(names)
465
+
466
+ # validate estimators
467
+ for t in transformers:
468
+ if t in ("drop", "passthrough"):
469
+ continue
470
+ if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
471
+ t, "transform"
472
+ ):
473
+ # Used to validate the transformers in the `transformers` list
474
+ raise TypeError(
475
+ "All estimators should implement fit and "
476
+ "transform, or can be 'drop' or 'passthrough' "
477
+ "specifiers. '%s' (type %s) doesn't." % (t, type(t))
478
+ )
479
+
480
+ def _validate_column_callables(self, X):
481
+ """
482
+ Converts callable column specifications.
483
+
484
+ This stores a dictionary of the form `{step_name: column_indices}` and
485
+ calls the `columns` on `X` if `columns` is a callable for a given
486
+ transformer.
487
+
488
+ The results are then stored in `self._transformer_to_input_indices`.
489
+ """
490
+ all_columns = []
491
+ transformer_to_input_indices = {}
492
+ for name, _, columns in self.transformers:
493
+ if callable(columns):
494
+ columns = columns(X)
495
+ all_columns.append(columns)
496
+ transformer_to_input_indices[name] = _get_column_indices(X, columns)
497
+
498
+ self._columns = all_columns
499
+ self._transformer_to_input_indices = transformer_to_input_indices
500
+
501
+ def _validate_remainder(self, X):
502
+ """
503
+ Validates ``remainder`` and defines ``_remainder`` targeting
504
+ the remaining columns.
505
+ """
506
+ cols = set(chain(*self._transformer_to_input_indices.values()))
507
+ remaining = sorted(set(range(self.n_features_in_)) - cols)
508
+ self._remainder = ("remainder", self.remainder, remaining)
509
+ self._transformer_to_input_indices["remainder"] = remaining
510
+
511
+ @property
512
+ def named_transformers_(self):
513
+ """Access the fitted transformer by name.
514
+
515
+ Read-only attribute to access any transformer by given name.
516
+ Keys are transformer names and values are the fitted transformer
517
+ objects.
518
+ """
519
+ # Use Bunch object to improve autocomplete
520
+ return Bunch(**{name: trans for name, trans, _ in self.transformers_})
521
+
522
+ def _get_feature_name_out_for_transformer(self, name, trans, feature_names_in):
523
+ """Gets feature names of transformer.
524
+
525
+ Used in conjunction with self._iter(fitted=True) in get_feature_names_out.
526
+ """
527
+ column_indices = self._transformer_to_input_indices[name]
528
+ names = feature_names_in[column_indices]
529
+ # An actual transformer
530
+ if not hasattr(trans, "get_feature_names_out"):
531
+ raise AttributeError(
532
+ f"Transformer {name} (type {type(trans).__name__}) does "
533
+ "not provide get_feature_names_out."
534
+ )
535
+ return trans.get_feature_names_out(names)
536
+
537
+ def get_feature_names_out(self, input_features=None):
538
+ """Get output feature names for transformation.
539
+
540
+ Parameters
541
+ ----------
542
+ input_features : array-like of str or None, default=None
543
+ Input features.
544
+
545
+ - If `input_features` is `None`, then `feature_names_in_` is
546
+ used as feature names in. If `feature_names_in_` is not defined,
547
+ then the following input feature names are generated:
548
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
549
+ - If `input_features` is an array-like, then `input_features` must
550
+ match `feature_names_in_` if `feature_names_in_` is defined.
551
+
552
+ Returns
553
+ -------
554
+ feature_names_out : ndarray of str objects
555
+ Transformed feature names.
556
+ """
557
+ check_is_fitted(self)
558
+ input_features = _check_feature_names_in(self, input_features)
559
+
560
+ # List of tuples (name, feature_names_out)
561
+ transformer_with_feature_names_out = []
562
+ for name, trans, *_ in self._iter(
563
+ fitted=True,
564
+ column_as_labels=False,
565
+ skip_empty_columns=True,
566
+ skip_drop=True,
567
+ ):
568
+ feature_names_out = self._get_feature_name_out_for_transformer(
569
+ name, trans, input_features
570
+ )
571
+ if feature_names_out is None:
572
+ continue
573
+ transformer_with_feature_names_out.append((name, feature_names_out))
574
+
575
+ if not transformer_with_feature_names_out:
576
+ # No feature names
577
+ return np.array([], dtype=object)
578
+
579
+ return self._add_prefix_for_feature_names_out(
580
+ transformer_with_feature_names_out
581
+ )
582
+
583
+ def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out):
584
+ """Add prefix for feature names out that includes the transformer names.
585
+
586
+ Parameters
587
+ ----------
588
+ transformer_with_feature_names_out : list of tuples of (str, array-like of str)
589
+ The tuple consistent of the transformer's name and its feature names out.
590
+
591
+ Returns
592
+ -------
593
+ feature_names_out : ndarray of shape (n_features,), dtype=str
594
+ Transformed feature names.
595
+ """
596
+ if self.verbose_feature_names_out:
597
+ # Prefix the feature names out with the transformers name
598
+ names = list(
599
+ chain.from_iterable(
600
+ (f"{name}__{i}" for i in feature_names_out)
601
+ for name, feature_names_out in transformer_with_feature_names_out
602
+ )
603
+ )
604
+ return np.asarray(names, dtype=object)
605
+
606
+ # verbose_feature_names_out is False
607
+ # Check that names are all unique without a prefix
608
+ feature_names_count = Counter(
609
+ chain.from_iterable(s for _, s in transformer_with_feature_names_out)
610
+ )
611
+ top_6_overlap = [
612
+ name for name, count in feature_names_count.most_common(6) if count > 1
613
+ ]
614
+ top_6_overlap.sort()
615
+ if top_6_overlap:
616
+ if len(top_6_overlap) == 6:
617
+ # There are more than 5 overlapping names, we only show the 5
618
+ # of the feature names
619
+ names_repr = str(top_6_overlap[:5])[:-1] + ", ...]"
620
+ else:
621
+ names_repr = str(top_6_overlap)
622
+ raise ValueError(
623
+ f"Output feature names: {names_repr} are not unique. Please set "
624
+ "verbose_feature_names_out=True to add prefixes to feature names"
625
+ )
626
+
627
+ return np.concatenate(
628
+ [name for _, name in transformer_with_feature_names_out],
629
+ )
630
+
631
+ def _update_fitted_transformers(self, transformers):
632
+ """Set self.transformers_ from given transformers.
633
+
634
+ Parameters
635
+ ----------
636
+ transformers : list of estimators
637
+ The fitted estimators as the output of
638
+ `self._call_func_on_transformers(func=_fit_transform_one, ...)`.
639
+ That function doesn't include 'drop' or transformers for which no
640
+ column is selected. 'drop' is kept as is, and for the no-column
641
+ transformers the unfitted transformer is put in
642
+ `self.transformers_`.
643
+ """
644
+ # transformers are fitted; excludes 'drop' cases
645
+ fitted_transformers = iter(transformers)
646
+ transformers_ = []
647
+
648
+ for name, old, column, _ in self._iter(
649
+ fitted=False,
650
+ column_as_labels=False,
651
+ skip_drop=False,
652
+ skip_empty_columns=False,
653
+ ):
654
+ if old == "drop":
655
+ trans = "drop"
656
+ elif _is_empty_column_selection(column):
657
+ trans = old
658
+ else:
659
+ trans = next(fitted_transformers)
660
+ transformers_.append((name, trans, column))
661
+
662
+ # sanity check that transformers is exhausted
663
+ assert not list(fitted_transformers)
664
+ self.transformers_ = transformers_
665
+
666
+ def _validate_output(self, result):
667
+ """
668
+ Ensure that the output of each transformer is 2D. Otherwise
669
+ hstack can raise an error or produce incorrect results.
670
+ """
671
+ names = [
672
+ name
673
+ for name, _, _, _ in self._iter(
674
+ fitted=True,
675
+ column_as_labels=False,
676
+ skip_drop=True,
677
+ skip_empty_columns=True,
678
+ )
679
+ ]
680
+ for Xs, name in zip(result, names):
681
+ if not getattr(Xs, "ndim", 0) == 2 and not hasattr(Xs, "__dataframe__"):
682
+ raise ValueError(
683
+ "The output of the '{0}' transformer should be 2D (numpy array, "
684
+ "scipy sparse array, dataframe).".format(name)
685
+ )
686
+ if _get_output_config("transform", self)["dense"] == "pandas":
687
+ return
688
+ try:
689
+ import pandas as pd
690
+ except ImportError:
691
+ return
692
+ for Xs, name in zip(result, names):
693
+ if not _is_pandas_df(Xs):
694
+ continue
695
+ for col_name, dtype in Xs.dtypes.to_dict().items():
696
+ if getattr(dtype, "na_value", None) is not pd.NA:
697
+ continue
698
+ if pd.NA not in Xs[col_name].values:
699
+ continue
700
+ class_name = self.__class__.__name__
701
+ # TODO(1.6): replace warning with ValueError
702
+ warnings.warn(
703
+ (
704
+ f"The output of the '{name}' transformer for column"
705
+ f" '{col_name}' has dtype {dtype} and uses pandas.NA to"
706
+ " represent null values. Storing this output in a numpy array"
707
+ " can cause errors in downstream scikit-learn estimators, and"
708
+ " inefficiencies. Starting with scikit-learn version 1.6, this"
709
+ " will raise a ValueError. To avoid this problem you can (i)"
710
+ " store the output in a pandas DataFrame by using"
711
+ f" {class_name}.set_output(transform='pandas') or (ii) modify"
712
+ f" the input data or the '{name}' transformer to avoid the"
713
+ " presence of pandas.NA (for example by using"
714
+ " pandas.DataFrame.astype)."
715
+ ),
716
+ FutureWarning,
717
+ )
718
+
719
+ def _record_output_indices(self, Xs):
720
+ """
721
+ Record which transformer produced which column.
722
+ """
723
+ idx = 0
724
+ self.output_indices_ = {}
725
+
726
+ for transformer_idx, (name, _, _, _) in enumerate(
727
+ self._iter(
728
+ fitted=True,
729
+ column_as_labels=False,
730
+ skip_drop=True,
731
+ skip_empty_columns=True,
732
+ )
733
+ ):
734
+ n_columns = Xs[transformer_idx].shape[1]
735
+ self.output_indices_[name] = slice(idx, idx + n_columns)
736
+ idx += n_columns
737
+
738
+ # `_iter` only generates transformers that have a non empty
739
+ # selection. Here we set empty slices for transformers that
740
+ # generate no output, which are safe for indexing
741
+ all_names = [t[0] for t in self.transformers] + ["remainder"]
742
+ for name in all_names:
743
+ if name not in self.output_indices_:
744
+ self.output_indices_[name] = slice(0, 0)
745
+
746
+ def _log_message(self, name, idx, total):
747
+ if not self.verbose:
748
+ return None
749
+ return "(%d of %d) Processing %s" % (idx, total, name)
750
+
751
+ def _call_func_on_transformers(self, X, y, func, column_as_labels, routed_params):
752
+ """
753
+ Private function to fit and/or transform on demand.
754
+
755
+ Parameters
756
+ ----------
757
+ X : {array-like, dataframe} of shape (n_samples, n_features)
758
+ The data to be used in fit and/or transform.
759
+
760
+ y : array-like of shape (n_samples,)
761
+ Targets.
762
+
763
+ func : callable
764
+ Function to call, which can be _fit_transform_one or
765
+ _transform_one.
766
+
767
+ column_as_labels : bool
768
+ Used to iterate through transformers. If True, columns are returned
769
+ as strings. If False, columns are returned as they were given by
770
+ the user. Can be True only if the ``ColumnTransformer`` is already
771
+ fitted.
772
+
773
+ routed_params : dict
774
+ The routed parameters as the output from ``process_routing``.
775
+
776
+ Returns
777
+ -------
778
+ Return value (transformers and/or transformed X data) depends
779
+ on the passed function.
780
+ """
781
+ if func is _fit_transform_one:
782
+ fitted = False
783
+ else: # func is _transform_one
784
+ fitted = True
785
+
786
+ transformers = list(
787
+ self._iter(
788
+ fitted=fitted,
789
+ column_as_labels=column_as_labels,
790
+ skip_drop=True,
791
+ skip_empty_columns=True,
792
+ )
793
+ )
794
+ try:
795
+ jobs = []
796
+ for idx, (name, trans, column, weight) in enumerate(transformers, start=1):
797
+ if func is _fit_transform_one:
798
+ if trans == "passthrough":
799
+ output_config = _get_output_config("transform", self)
800
+ trans = FunctionTransformer(
801
+ accept_sparse=True,
802
+ check_inverse=False,
803
+ feature_names_out="one-to-one",
804
+ ).set_output(transform=output_config["dense"])
805
+
806
+ extra_args = dict(
807
+ message_clsname="ColumnTransformer",
808
+ message=self._log_message(name, idx, len(transformers)),
809
+ )
810
+ else: # func is _transform_one
811
+ extra_args = {}
812
+ jobs.append(
813
+ delayed(func)(
814
+ transformer=clone(trans) if not fitted else trans,
815
+ X=_safe_indexing(X, column, axis=1),
816
+ y=y,
817
+ weight=weight,
818
+ **extra_args,
819
+ params=routed_params[name],
820
+ )
821
+ )
822
+
823
+ return Parallel(n_jobs=self.n_jobs)(jobs)
824
+
825
+ except ValueError as e:
826
+ if "Expected 2D array, got 1D array instead" in str(e):
827
+ raise ValueError(_ERR_MSG_1DCOLUMN) from e
828
+ else:
829
+ raise
830
+
831
+ def fit(self, X, y=None, **params):
832
+ """Fit all transformers using X.
833
+
834
+ Parameters
835
+ ----------
836
+ X : {array-like, dataframe} of shape (n_samples, n_features)
837
+ Input data, of which specified subsets are used to fit the
838
+ transformers.
839
+
840
+ y : array-like of shape (n_samples,...), default=None
841
+ Targets for supervised learning.
842
+
843
+ **params : dict, default=None
844
+ Parameters to be passed to the underlying transformers' ``fit`` and
845
+ ``transform`` methods.
846
+
847
+ You can only pass this if metadata routing is enabled, which you
848
+ can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
849
+
850
+ .. versionadded:: 1.4
851
+
852
+ Returns
853
+ -------
854
+ self : ColumnTransformer
855
+ This estimator.
856
+ """
857
+ _raise_for_params(params, self, "fit")
858
+ # we use fit_transform to make sure to set sparse_output_ (for which we
859
+ # need the transformed data) to have consistent output type in predict
860
+ self.fit_transform(X, y=y, **params)
861
+ return self
862
+
863
+ @_fit_context(
864
+ # estimators in ColumnTransformer.transformers are not validated yet
865
+ prefer_skip_nested_validation=False
866
+ )
867
+ def fit_transform(self, X, y=None, **params):
868
+ """Fit all transformers, transform the data and concatenate results.
869
+
870
+ Parameters
871
+ ----------
872
+ X : {array-like, dataframe} of shape (n_samples, n_features)
873
+ Input data, of which specified subsets are used to fit the
874
+ transformers.
875
+
876
+ y : array-like of shape (n_samples,), default=None
877
+ Targets for supervised learning.
878
+
879
+ **params : dict, default=None
880
+ Parameters to be passed to the underlying transformers' ``fit`` and
881
+ ``transform`` methods.
882
+
883
+ You can only pass this if metadata routing is enabled, which you
884
+ can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
885
+
886
+ .. versionadded:: 1.4
887
+
888
+ Returns
889
+ -------
890
+ X_t : {array-like, sparse matrix} of \
891
+ shape (n_samples, sum_n_components)
892
+ Horizontally stacked results of transformers. sum_n_components is the
893
+ sum of n_components (output dimension) over transformers. If
894
+ any result is a sparse matrix, everything will be converted to
895
+ sparse matrices.
896
+ """
897
+ _raise_for_params(params, self, "fit_transform")
898
+ self._check_feature_names(X, reset=True)
899
+
900
+ X = _check_X(X)
901
+ # set n_features_in_ attribute
902
+ self._check_n_features(X, reset=True)
903
+ self._validate_transformers()
904
+ n_samples = _num_samples(X)
905
+
906
+ self._validate_column_callables(X)
907
+ self._validate_remainder(X)
908
+
909
+ if _routing_enabled():
910
+ routed_params = process_routing(self, "fit_transform", **params)
911
+ else:
912
+ routed_params = self._get_empty_routing()
913
+
914
+ result = self._call_func_on_transformers(
915
+ X,
916
+ y,
917
+ _fit_transform_one,
918
+ column_as_labels=False,
919
+ routed_params=routed_params,
920
+ )
921
+
922
+ if not result:
923
+ self._update_fitted_transformers([])
924
+ # All transformers are None
925
+ return np.zeros((n_samples, 0))
926
+
927
+ Xs, transformers = zip(*result)
928
+
929
+ # determine if concatenated output will be sparse or not
930
+ if any(sparse.issparse(X) for X in Xs):
931
+ nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs)
932
+ total = sum(
933
+ X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs
934
+ )
935
+ density = nnz / total
936
+ self.sparse_output_ = density < self.sparse_threshold
937
+ else:
938
+ self.sparse_output_ = False
939
+
940
+ self._update_fitted_transformers(transformers)
941
+ self._validate_output(Xs)
942
+ self._record_output_indices(Xs)
943
+
944
+ return self._hstack(list(Xs), n_samples=n_samples)
945
+
946
+ def transform(self, X, **params):
947
+ """Transform X separately by each transformer, concatenate results.
948
+
949
+ Parameters
950
+ ----------
951
+ X : {array-like, dataframe} of shape (n_samples, n_features)
952
+ The data to be transformed by subset.
953
+
954
+ **params : dict, default=None
955
+ Parameters to be passed to the underlying transformers' ``transform``
956
+ method.
957
+
958
+ You can only pass this if metadata routing is enabled, which you
959
+ can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
960
+
961
+ .. versionadded:: 1.4
962
+
963
+ Returns
964
+ -------
965
+ X_t : {array-like, sparse matrix} of \
966
+ shape (n_samples, sum_n_components)
967
+ Horizontally stacked results of transformers. sum_n_components is the
968
+ sum of n_components (output dimension) over transformers. If
969
+ any result is a sparse matrix, everything will be converted to
970
+ sparse matrices.
971
+ """
972
+ _raise_for_params(params, self, "transform")
973
+ check_is_fitted(self)
974
+ X = _check_X(X)
975
+
976
+ # If ColumnTransformer is fit using a dataframe, and now a dataframe is
977
+ # passed to be transformed, we select columns by name instead. This
978
+ # enables the user to pass X at transform time with extra columns which
979
+ # were not present in fit time, and the order of the columns doesn't
980
+ # matter.
981
+ fit_dataframe_and_transform_dataframe = hasattr(self, "feature_names_in_") and (
982
+ _is_pandas_df(X) or hasattr(X, "__dataframe__")
983
+ )
984
+
985
+ n_samples = _num_samples(X)
986
+ column_names = _get_feature_names(X)
987
+
988
+ if fit_dataframe_and_transform_dataframe:
989
+ named_transformers = self.named_transformers_
990
+ # check that all names seen in fit are in transform, unless
991
+ # they were dropped
992
+ non_dropped_indices = [
993
+ ind
994
+ for name, ind in self._transformer_to_input_indices.items()
995
+ if name in named_transformers and named_transformers[name] != "drop"
996
+ ]
997
+
998
+ all_indices = set(chain(*non_dropped_indices))
999
+ all_names = set(self.feature_names_in_[ind] for ind in all_indices)
1000
+
1001
+ diff = all_names - set(column_names)
1002
+ if diff:
1003
+ raise ValueError(f"columns are missing: {diff}")
1004
+ else:
1005
+ # ndarray was used for fitting or transforming, thus we only
1006
+ # check that n_features_in_ is consistent
1007
+ self._check_n_features(X, reset=False)
1008
+
1009
+ if _routing_enabled():
1010
+ routed_params = process_routing(self, "transform", **params)
1011
+ else:
1012
+ routed_params = self._get_empty_routing()
1013
+
1014
+ Xs = self._call_func_on_transformers(
1015
+ X,
1016
+ None,
1017
+ _transform_one,
1018
+ column_as_labels=fit_dataframe_and_transform_dataframe,
1019
+ routed_params=routed_params,
1020
+ )
1021
+ self._validate_output(Xs)
1022
+
1023
+ if not Xs:
1024
+ # All transformers are None
1025
+ return np.zeros((n_samples, 0))
1026
+
1027
+ return self._hstack(list(Xs), n_samples=n_samples)
1028
+
1029
+ def _hstack(self, Xs, *, n_samples):
1030
+ """Stacks Xs horizontally.
1031
+
1032
+ This allows subclasses to control the stacking behavior, while reusing
1033
+ everything else from ColumnTransformer.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ Xs : list of {array-like, sparse matrix, dataframe}
1038
+ The container to concatenate.
1039
+ n_samples : int
1040
+ The number of samples in the input data to checking the transformation
1041
+ consistency.
1042
+ """
1043
+ if self.sparse_output_:
1044
+ try:
1045
+ # since all columns should be numeric before stacking them
1046
+ # in a sparse matrix, `check_array` is used for the
1047
+ # dtype conversion if necessary.
1048
+ converted_Xs = [
1049
+ check_array(X, accept_sparse=True, force_all_finite=False)
1050
+ for X in Xs
1051
+ ]
1052
+ except ValueError as e:
1053
+ raise ValueError(
1054
+ "For a sparse output, all columns should "
1055
+ "be a numeric or convertible to a numeric."
1056
+ ) from e
1057
+
1058
+ return sparse.hstack(converted_Xs).tocsr()
1059
+ else:
1060
+ Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
1061
+ adapter = _get_container_adapter("transform", self)
1062
+ if adapter and all(adapter.is_supported_container(X) for X in Xs):
1063
+ # rename before stacking as it avoids to error on temporary duplicated
1064
+ # columns
1065
+ transformer_names = [
1066
+ t[0]
1067
+ for t in self._iter(
1068
+ fitted=True,
1069
+ column_as_labels=False,
1070
+ skip_drop=True,
1071
+ skip_empty_columns=True,
1072
+ )
1073
+ ]
1074
+ feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0]
1075
+ if self.verbose_feature_names_out:
1076
+ # `_add_prefix_for_feature_names_out` takes care about raising
1077
+ # an error if there are duplicated columns.
1078
+ feature_names_outs = self._add_prefix_for_feature_names_out(
1079
+ list(zip(transformer_names, feature_names_outs))
1080
+ )
1081
+ else:
1082
+ # check for duplicated columns and raise if any
1083
+ feature_names_outs = list(chain.from_iterable(feature_names_outs))
1084
+ feature_names_count = Counter(feature_names_outs)
1085
+ if any(count > 1 for count in feature_names_count.values()):
1086
+ duplicated_feature_names = sorted(
1087
+ name
1088
+ for name, count in feature_names_count.items()
1089
+ if count > 1
1090
+ )
1091
+ err_msg = (
1092
+ "Duplicated feature names found before concatenating the"
1093
+ " outputs of the transformers:"
1094
+ f" {duplicated_feature_names}.\n"
1095
+ )
1096
+ for transformer_name, X in zip(transformer_names, Xs):
1097
+ if X.shape[1] == 0:
1098
+ continue
1099
+ dup_cols_in_transformer = sorted(
1100
+ set(X.columns).intersection(duplicated_feature_names)
1101
+ )
1102
+ if len(dup_cols_in_transformer):
1103
+ err_msg += (
1104
+ f"Transformer {transformer_name} has conflicting "
1105
+ f"columns names: {dup_cols_in_transformer}.\n"
1106
+ )
1107
+ raise ValueError(
1108
+ err_msg
1109
+ + "Either make sure that the transformers named above "
1110
+ "do not generate columns with conflicting names or set "
1111
+ "verbose_feature_names_out=True to automatically "
1112
+ "prefix to the output feature names with the name "
1113
+ "of the transformer to prevent any conflicting "
1114
+ "names."
1115
+ )
1116
+
1117
+ names_idx = 0
1118
+ for X in Xs:
1119
+ if X.shape[1] == 0:
1120
+ continue
1121
+ names_out = feature_names_outs[names_idx : names_idx + X.shape[1]]
1122
+ adapter.rename_columns(X, names_out)
1123
+ names_idx += X.shape[1]
1124
+
1125
+ output = adapter.hstack(Xs)
1126
+ output_samples = output.shape[0]
1127
+ if output_samples != n_samples:
1128
+ raise ValueError(
1129
+ "Concatenating DataFrames from the transformer's output lead to"
1130
+ " an inconsistent number of samples. The output may have Pandas"
1131
+ " Indexes that do not match, or that transformers are returning"
1132
+ " number of samples which are not the same as the number input"
1133
+ " samples."
1134
+ )
1135
+
1136
+ return output
1137
+
1138
+ return np.hstack(Xs)
1139
+
1140
+ def _sk_visual_block_(self):
1141
+ if isinstance(self.remainder, str) and self.remainder == "drop":
1142
+ transformers = self.transformers
1143
+ elif hasattr(self, "_remainder"):
1144
+ remainder_columns = self._remainder[2]
1145
+ if (
1146
+ hasattr(self, "feature_names_in_")
1147
+ and remainder_columns
1148
+ and not all(isinstance(col, str) for col in remainder_columns)
1149
+ ):
1150
+ remainder_columns = self.feature_names_in_[remainder_columns].tolist()
1151
+ transformers = chain(
1152
+ self.transformers, [("remainder", self.remainder, remainder_columns)]
1153
+ )
1154
+ else:
1155
+ transformers = chain(self.transformers, [("remainder", self.remainder, "")])
1156
+
1157
+ names, transformers, name_details = zip(*transformers)
1158
+ return _VisualBlock(
1159
+ "parallel", transformers, names=names, name_details=name_details
1160
+ )
1161
+
1162
+ def _get_empty_routing(self):
1163
+ """Return empty routing.
1164
+
1165
+ Used while routing can be disabled.
1166
+
1167
+ TODO: Remove when ``set_config(enable_metadata_routing=False)`` is no
1168
+ more an option.
1169
+ """
1170
+ return Bunch(
1171
+ **{
1172
+ name: Bunch(**{method: {} for method in METHODS})
1173
+ for name, step, _, _ in self._iter(
1174
+ fitted=False,
1175
+ column_as_labels=False,
1176
+ skip_drop=True,
1177
+ skip_empty_columns=True,
1178
+ )
1179
+ }
1180
+ )
1181
+
1182
+ def get_metadata_routing(self):
1183
+ """Get metadata routing of this object.
1184
+
1185
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1186
+ mechanism works.
1187
+
1188
+ .. versionadded:: 1.4
1189
+
1190
+ Returns
1191
+ -------
1192
+ routing : MetadataRouter
1193
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
1194
+ routing information.
1195
+ """
1196
+ router = MetadataRouter(owner=self.__class__.__name__)
1197
+ # Here we don't care about which columns are used for which
1198
+ # transformers, and whether or not a transformer is used at all, which
1199
+ # might happen if no columns are selected for that transformer. We
1200
+ # request all metadata requested by all transformers.
1201
+ transformers = chain(self.transformers, [("remainder", self.remainder, None)])
1202
+ for name, step, _ in transformers:
1203
+ method_mapping = MethodMapping()
1204
+ if hasattr(step, "fit_transform"):
1205
+ (
1206
+ method_mapping.add(caller="fit", callee="fit_transform").add(
1207
+ caller="fit_transform", callee="fit_transform"
1208
+ )
1209
+ )
1210
+ else:
1211
+ (
1212
+ method_mapping.add(caller="fit", callee="fit")
1213
+ .add(caller="fit", callee="transform")
1214
+ .add(caller="fit_transform", callee="fit")
1215
+ .add(caller="fit_transform", callee="transform")
1216
+ )
1217
+ method_mapping.add(caller="transform", callee="transform")
1218
+ router.add(method_mapping=method_mapping, **{name: step})
1219
+
1220
+ return router
1221
+
1222
+
1223
+ def _check_X(X):
1224
+ """Use check_array only when necessary, e.g. on lists and other non-array-likes."""
1225
+ if hasattr(X, "__array__") or hasattr(X, "__dataframe__") or sparse.issparse(X):
1226
+ return X
1227
+ return check_array(X, force_all_finite="allow-nan", dtype=object)
1228
+
1229
+
1230
+ def _is_empty_column_selection(column):
1231
+ """
1232
+ Return True if the column selection is empty (empty list or all-False
1233
+ boolean array).
1234
+
1235
+ """
1236
+ if hasattr(column, "dtype") and np.issubdtype(column.dtype, np.bool_):
1237
+ return not column.any()
1238
+ elif hasattr(column, "__len__"):
1239
+ return (
1240
+ len(column) == 0
1241
+ or all(isinstance(col, bool) for col in column)
1242
+ and not any(column)
1243
+ )
1244
+ else:
1245
+ return False
1246
+
1247
+
1248
+ def _get_transformer_list(estimators):
1249
+ """
1250
+ Construct (name, trans, column) tuples from list
1251
+
1252
+ """
1253
+ transformers, columns = zip(*estimators)
1254
+ names, _ = zip(*_name_estimators(transformers))
1255
+
1256
+ transformer_list = list(zip(names, transformers, columns))
1257
+ return transformer_list
1258
+
1259
+
1260
+ # This function is not validated using validate_params because
1261
+ # it's just a factory for ColumnTransformer.
1262
+ def make_column_transformer(
1263
+ *transformers,
1264
+ remainder="drop",
1265
+ sparse_threshold=0.3,
1266
+ n_jobs=None,
1267
+ verbose=False,
1268
+ verbose_feature_names_out=True,
1269
+ ):
1270
+ """Construct a ColumnTransformer from the given transformers.
1271
+
1272
+ This is a shorthand for the ColumnTransformer constructor; it does not
1273
+ require, and does not permit, naming the transformers. Instead, they will
1274
+ be given names automatically based on their types. It also does not allow
1275
+ weighting with ``transformer_weights``.
1276
+
1277
+ Read more in the :ref:`User Guide <make_column_transformer>`.
1278
+
1279
+ Parameters
1280
+ ----------
1281
+ *transformers : tuples
1282
+ Tuples of the form (transformer, columns) specifying the
1283
+ transformer objects to be applied to subsets of the data.
1284
+
1285
+ transformer : {'drop', 'passthrough'} or estimator
1286
+ Estimator must support :term:`fit` and :term:`transform`.
1287
+ Special-cased strings 'drop' and 'passthrough' are accepted as
1288
+ well, to indicate to drop the columns or to pass them through
1289
+ untransformed, respectively.
1290
+ columns : str, array-like of str, int, array-like of int, slice, \
1291
+ array-like of bool or callable
1292
+ Indexes the data on its second axis. Integers are interpreted as
1293
+ positional columns, while strings can reference DataFrame columns
1294
+ by name. A scalar string or int should be used where
1295
+ ``transformer`` expects X to be a 1d array-like (vector),
1296
+ otherwise a 2d array will be passed to the transformer.
1297
+ A callable is passed the input data `X` and can return any of the
1298
+ above. To select multiple columns by name or dtype, you can use
1299
+ :obj:`make_column_selector`.
1300
+
1301
+ remainder : {'drop', 'passthrough'} or estimator, default='drop'
1302
+ By default, only the specified columns in `transformers` are
1303
+ transformed and combined in the output, and the non-specified
1304
+ columns are dropped. (default of ``'drop'``).
1305
+ By specifying ``remainder='passthrough'``, all remaining columns that
1306
+ were not specified in `transformers` will be automatically passed
1307
+ through. This subset of columns is concatenated with the output of
1308
+ the transformers.
1309
+ By setting ``remainder`` to be an estimator, the remaining
1310
+ non-specified columns will use the ``remainder`` estimator. The
1311
+ estimator must support :term:`fit` and :term:`transform`.
1312
+
1313
+ sparse_threshold : float, default=0.3
1314
+ If the transformed output consists of a mix of sparse and dense data,
1315
+ it will be stacked as a sparse matrix if the density is lower than this
1316
+ value. Use ``sparse_threshold=0`` to always return dense.
1317
+ When the transformed output consists of all sparse or all dense data,
1318
+ the stacked result will be sparse or dense, respectively, and this
1319
+ keyword will be ignored.
1320
+
1321
+ n_jobs : int, default=None
1322
+ Number of jobs to run in parallel.
1323
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1324
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1325
+ for more details.
1326
+
1327
+ verbose : bool, default=False
1328
+ If True, the time elapsed while fitting each transformer will be
1329
+ printed as it is completed.
1330
+
1331
+ verbose_feature_names_out : bool, default=True
1332
+ If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
1333
+ all feature names with the name of the transformer that generated that
1334
+ feature.
1335
+ If False, :meth:`ColumnTransformer.get_feature_names_out` will not
1336
+ prefix any feature names and will error if feature names are not
1337
+ unique.
1338
+
1339
+ .. versionadded:: 1.0
1340
+
1341
+ Returns
1342
+ -------
1343
+ ct : ColumnTransformer
1344
+ Returns a :class:`ColumnTransformer` object.
1345
+
1346
+ See Also
1347
+ --------
1348
+ ColumnTransformer : Class that allows combining the
1349
+ outputs of multiple transformer objects used on column subsets
1350
+ of the data into a single feature space.
1351
+
1352
+ Examples
1353
+ --------
1354
+ >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
1355
+ >>> from sklearn.compose import make_column_transformer
1356
+ >>> make_column_transformer(
1357
+ ... (StandardScaler(), ['numerical_column']),
1358
+ ... (OneHotEncoder(), ['categorical_column']))
1359
+ ColumnTransformer(transformers=[('standardscaler', StandardScaler(...),
1360
+ ['numerical_column']),
1361
+ ('onehotencoder', OneHotEncoder(...),
1362
+ ['categorical_column'])])
1363
+ """
1364
+ # transformer_weights keyword is not passed through because the user
1365
+ # would need to know the automatically generated names of the transformers
1366
+ transformer_list = _get_transformer_list(transformers)
1367
+ return ColumnTransformer(
1368
+ transformer_list,
1369
+ n_jobs=n_jobs,
1370
+ remainder=remainder,
1371
+ sparse_threshold=sparse_threshold,
1372
+ verbose=verbose,
1373
+ verbose_feature_names_out=verbose_feature_names_out,
1374
+ )
1375
+
1376
+
1377
+ class make_column_selector:
1378
+ """Create a callable to select columns to be used with
1379
+ :class:`ColumnTransformer`.
1380
+
1381
+ :func:`make_column_selector` can select columns based on datatype or the
1382
+ columns name with a regex. When using multiple selection criteria, **all**
1383
+ criteria must match for a column to be selected.
1384
+
1385
+ For an example of how to use :func:`make_column_selector` within a
1386
+ :class:`ColumnTransformer` to select columns based on data type (i.e.
1387
+ `dtype`), refer to
1388
+ :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`.
1389
+
1390
+ Parameters
1391
+ ----------
1392
+ pattern : str, default=None
1393
+ Name of columns containing this regex pattern will be included. If
1394
+ None, column selection will not be selected based on pattern.
1395
+
1396
+ dtype_include : column dtype or list of column dtypes, default=None
1397
+ A selection of dtypes to include. For more details, see
1398
+ :meth:`pandas.DataFrame.select_dtypes`.
1399
+
1400
+ dtype_exclude : column dtype or list of column dtypes, default=None
1401
+ A selection of dtypes to exclude. For more details, see
1402
+ :meth:`pandas.DataFrame.select_dtypes`.
1403
+
1404
+ Returns
1405
+ -------
1406
+ selector : callable
1407
+ Callable for column selection to be used by a
1408
+ :class:`ColumnTransformer`.
1409
+
1410
+ See Also
1411
+ --------
1412
+ ColumnTransformer : Class that allows combining the
1413
+ outputs of multiple transformer objects used on column subsets
1414
+ of the data into a single feature space.
1415
+
1416
+ Examples
1417
+ --------
1418
+ >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
1419
+ >>> from sklearn.compose import make_column_transformer
1420
+ >>> from sklearn.compose import make_column_selector
1421
+ >>> import numpy as np
1422
+ >>> import pandas as pd # doctest: +SKIP
1423
+ >>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'],
1424
+ ... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP
1425
+ >>> ct = make_column_transformer(
1426
+ ... (StandardScaler(),
1427
+ ... make_column_selector(dtype_include=np.number)), # rating
1428
+ ... (OneHotEncoder(),
1429
+ ... make_column_selector(dtype_include=object))) # city
1430
+ >>> ct.fit_transform(X) # doctest: +SKIP
1431
+ array([[ 0.90453403, 1. , 0. , 0. ],
1432
+ [-1.50755672, 1. , 0. , 0. ],
1433
+ [-0.30151134, 0. , 1. , 0. ],
1434
+ [ 0.90453403, 0. , 0. , 1. ]])
1435
+ """
1436
+
1437
+ def __init__(self, pattern=None, *, dtype_include=None, dtype_exclude=None):
1438
+ self.pattern = pattern
1439
+ self.dtype_include = dtype_include
1440
+ self.dtype_exclude = dtype_exclude
1441
+
1442
+ def __call__(self, df):
1443
+ """Callable for column selection to be used by a
1444
+ :class:`ColumnTransformer`.
1445
+
1446
+ Parameters
1447
+ ----------
1448
+ df : dataframe of shape (n_features, n_samples)
1449
+ DataFrame to select columns from.
1450
+ """
1451
+ if not hasattr(df, "iloc"):
1452
+ raise ValueError(
1453
+ "make_column_selector can only be applied to pandas dataframes"
1454
+ )
1455
+ df_row = df.iloc[:1]
1456
+ if self.dtype_include is not None or self.dtype_exclude is not None:
1457
+ df_row = df_row.select_dtypes(
1458
+ include=self.dtype_include, exclude=self.dtype_exclude
1459
+ )
1460
+ cols = df_row.columns
1461
+ if self.pattern is not None:
1462
+ cols = cols[cols.str.contains(self.pattern, regex=True)]
1463
+ return cols.tolist()
venv/lib/python3.10/site-packages/sklearn/compose/_target.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Andreas Mueller <[email protected]>
2
+ # Guillaume Lemaitre <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import warnings
6
+
7
+ import numpy as np
8
+
9
+ from ..base import BaseEstimator, RegressorMixin, _fit_context, clone
10
+ from ..exceptions import NotFittedError
11
+ from ..preprocessing import FunctionTransformer
12
+ from ..utils import _safe_indexing, check_array
13
+ from ..utils._param_validation import HasMethods
14
+ from ..utils._tags import _safe_tags
15
+ from ..utils.metadata_routing import (
16
+ _raise_for_unsupported_routing,
17
+ _RoutingNotSupportedMixin,
18
+ )
19
+ from ..utils.validation import check_is_fitted
20
+
21
+ __all__ = ["TransformedTargetRegressor"]
22
+
23
+
24
+ class TransformedTargetRegressor(
25
+ _RoutingNotSupportedMixin, RegressorMixin, BaseEstimator
26
+ ):
27
+ """Meta-estimator to regress on a transformed target.
28
+
29
+ Useful for applying a non-linear transformation to the target `y` in
30
+ regression problems. This transformation can be given as a Transformer
31
+ such as the :class:`~sklearn.preprocessing.QuantileTransformer` or as a
32
+ function and its inverse such as `np.log` and `np.exp`.
33
+
34
+ The computation during :meth:`fit` is::
35
+
36
+ regressor.fit(X, func(y))
37
+
38
+ or::
39
+
40
+ regressor.fit(X, transformer.transform(y))
41
+
42
+ The computation during :meth:`predict` is::
43
+
44
+ inverse_func(regressor.predict(X))
45
+
46
+ or::
47
+
48
+ transformer.inverse_transform(regressor.predict(X))
49
+
50
+ Read more in the :ref:`User Guide <transformed_target_regressor>`.
51
+
52
+ .. versionadded:: 0.20
53
+
54
+ Parameters
55
+ ----------
56
+ regressor : object, default=None
57
+ Regressor object such as derived from
58
+ :class:`~sklearn.base.RegressorMixin`. This regressor will
59
+ automatically be cloned each time prior to fitting. If `regressor is
60
+ None`, :class:`~sklearn.linear_model.LinearRegression` is created and used.
61
+
62
+ transformer : object, default=None
63
+ Estimator object such as derived from
64
+ :class:`~sklearn.base.TransformerMixin`. Cannot be set at the same time
65
+ as `func` and `inverse_func`. If `transformer is None` as well as
66
+ `func` and `inverse_func`, the transformer will be an identity
67
+ transformer. Note that the transformer will be cloned during fitting.
68
+ Also, the transformer is restricting `y` to be a numpy array.
69
+
70
+ func : function, default=None
71
+ Function to apply to `y` before passing to :meth:`fit`. Cannot be set
72
+ at the same time as `transformer`. The function needs to return a
73
+ 2-dimensional array. If `func is None`, the function used will be the
74
+ identity function.
75
+
76
+ inverse_func : function, default=None
77
+ Function to apply to the prediction of the regressor. Cannot be set at
78
+ the same time as `transformer`. The function needs to return a
79
+ 2-dimensional array. The inverse function is used to return
80
+ predictions to the same space of the original training labels.
81
+
82
+ check_inverse : bool, default=True
83
+ Whether to check that `transform` followed by `inverse_transform`
84
+ or `func` followed by `inverse_func` leads to the original targets.
85
+
86
+ Attributes
87
+ ----------
88
+ regressor_ : object
89
+ Fitted regressor.
90
+
91
+ transformer_ : object
92
+ Transformer used in :meth:`fit` and :meth:`predict`.
93
+
94
+ n_features_in_ : int
95
+ Number of features seen during :term:`fit`. Only defined if the
96
+ underlying regressor exposes such an attribute when fit.
97
+
98
+ .. versionadded:: 0.24
99
+
100
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
101
+ Names of features seen during :term:`fit`. Defined only when `X`
102
+ has feature names that are all strings.
103
+
104
+ .. versionadded:: 1.0
105
+
106
+ See Also
107
+ --------
108
+ sklearn.preprocessing.FunctionTransformer : Construct a transformer from an
109
+ arbitrary callable.
110
+
111
+ Notes
112
+ -----
113
+ Internally, the target `y` is always converted into a 2-dimensional array
114
+ to be used by scikit-learn transformers. At the time of prediction, the
115
+ output will be reshaped to a have the same number of dimensions as `y`.
116
+
117
+ Examples
118
+ --------
119
+ >>> import numpy as np
120
+ >>> from sklearn.linear_model import LinearRegression
121
+ >>> from sklearn.compose import TransformedTargetRegressor
122
+ >>> tt = TransformedTargetRegressor(regressor=LinearRegression(),
123
+ ... func=np.log, inverse_func=np.exp)
124
+ >>> X = np.arange(4).reshape(-1, 1)
125
+ >>> y = np.exp(2 * X).ravel()
126
+ >>> tt.fit(X, y)
127
+ TransformedTargetRegressor(...)
128
+ >>> tt.score(X, y)
129
+ 1.0
130
+ >>> tt.regressor_.coef_
131
+ array([2.])
132
+
133
+ For a more detailed example use case refer to
134
+ :ref:`sphx_glr_auto_examples_compose_plot_transformed_target.py`.
135
+ """
136
+
137
+ _parameter_constraints: dict = {
138
+ "regressor": [HasMethods(["fit", "predict"]), None],
139
+ "transformer": [HasMethods("transform"), None],
140
+ "func": [callable, None],
141
+ "inverse_func": [callable, None],
142
+ "check_inverse": ["boolean"],
143
+ }
144
+
145
+ def __init__(
146
+ self,
147
+ regressor=None,
148
+ *,
149
+ transformer=None,
150
+ func=None,
151
+ inverse_func=None,
152
+ check_inverse=True,
153
+ ):
154
+ self.regressor = regressor
155
+ self.transformer = transformer
156
+ self.func = func
157
+ self.inverse_func = inverse_func
158
+ self.check_inverse = check_inverse
159
+
160
+ def _fit_transformer(self, y):
161
+ """Check transformer and fit transformer.
162
+
163
+ Create the default transformer, fit it and make additional inverse
164
+ check on a subset (optional).
165
+
166
+ """
167
+ if self.transformer is not None and (
168
+ self.func is not None or self.inverse_func is not None
169
+ ):
170
+ raise ValueError(
171
+ "'transformer' and functions 'func'/'inverse_func' cannot both be set."
172
+ )
173
+ elif self.transformer is not None:
174
+ self.transformer_ = clone(self.transformer)
175
+ else:
176
+ if self.func is not None and self.inverse_func is None:
177
+ raise ValueError(
178
+ "When 'func' is provided, 'inverse_func' must also be provided"
179
+ )
180
+ self.transformer_ = FunctionTransformer(
181
+ func=self.func,
182
+ inverse_func=self.inverse_func,
183
+ validate=True,
184
+ check_inverse=self.check_inverse,
185
+ )
186
+ # XXX: sample_weight is not currently passed to the
187
+ # transformer. However, if transformer starts using sample_weight, the
188
+ # code should be modified accordingly. At the time to consider the
189
+ # sample_prop feature, it is also a good use case to be considered.
190
+ self.transformer_.fit(y)
191
+ if self.check_inverse:
192
+ idx_selected = slice(None, None, max(1, y.shape[0] // 10))
193
+ y_sel = _safe_indexing(y, idx_selected)
194
+ y_sel_t = self.transformer_.transform(y_sel)
195
+ if not np.allclose(y_sel, self.transformer_.inverse_transform(y_sel_t)):
196
+ warnings.warn(
197
+ (
198
+ "The provided functions or transformer are"
199
+ " not strictly inverse of each other. If"
200
+ " you are sure you want to proceed regardless"
201
+ ", set 'check_inverse=False'"
202
+ ),
203
+ UserWarning,
204
+ )
205
+
206
+ @_fit_context(
207
+ # TransformedTargetRegressor.regressor/transformer are not validated yet.
208
+ prefer_skip_nested_validation=False
209
+ )
210
+ def fit(self, X, y, **fit_params):
211
+ """Fit the model according to the given training data.
212
+
213
+ Parameters
214
+ ----------
215
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
216
+ Training vector, where `n_samples` is the number of samples and
217
+ `n_features` is the number of features.
218
+
219
+ y : array-like of shape (n_samples,)
220
+ Target values.
221
+
222
+ **fit_params : dict
223
+ Parameters passed to the `fit` method of the underlying
224
+ regressor.
225
+
226
+ Returns
227
+ -------
228
+ self : object
229
+ Fitted estimator.
230
+ """
231
+ _raise_for_unsupported_routing(self, "fit", **fit_params)
232
+ if y is None:
233
+ raise ValueError(
234
+ f"This {self.__class__.__name__} estimator "
235
+ "requires y to be passed, but the target y is None."
236
+ )
237
+ y = check_array(
238
+ y,
239
+ input_name="y",
240
+ accept_sparse=False,
241
+ force_all_finite=True,
242
+ ensure_2d=False,
243
+ dtype="numeric",
244
+ allow_nd=True,
245
+ )
246
+
247
+ # store the number of dimension of the target to predict an array of
248
+ # similar shape at predict
249
+ self._training_dim = y.ndim
250
+
251
+ # transformers are designed to modify X which is 2d dimensional, we
252
+ # need to modify y accordingly.
253
+ if y.ndim == 1:
254
+ y_2d = y.reshape(-1, 1)
255
+ else:
256
+ y_2d = y
257
+ self._fit_transformer(y_2d)
258
+
259
+ # transform y and convert back to 1d array if needed
260
+ y_trans = self.transformer_.transform(y_2d)
261
+ # FIXME: a FunctionTransformer can return a 1D array even when validate
262
+ # is set to True. Therefore, we need to check the number of dimension
263
+ # first.
264
+ if y_trans.ndim == 2 and y_trans.shape[1] == 1:
265
+ y_trans = y_trans.squeeze(axis=1)
266
+
267
+ if self.regressor is None:
268
+ from ..linear_model import LinearRegression
269
+
270
+ self.regressor_ = LinearRegression()
271
+ else:
272
+ self.regressor_ = clone(self.regressor)
273
+
274
+ self.regressor_.fit(X, y_trans, **fit_params)
275
+
276
+ if hasattr(self.regressor_, "feature_names_in_"):
277
+ self.feature_names_in_ = self.regressor_.feature_names_in_
278
+
279
+ return self
280
+
281
+ def predict(self, X, **predict_params):
282
+ """Predict using the base regressor, applying inverse.
283
+
284
+ The regressor is used to predict and the `inverse_func` or
285
+ `inverse_transform` is applied before returning the prediction.
286
+
287
+ Parameters
288
+ ----------
289
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
290
+ Samples.
291
+
292
+ **predict_params : dict of str -> object
293
+ Parameters passed to the `predict` method of the underlying
294
+ regressor.
295
+
296
+ Returns
297
+ -------
298
+ y_hat : ndarray of shape (n_samples,)
299
+ Predicted values.
300
+ """
301
+ check_is_fitted(self)
302
+ pred = self.regressor_.predict(X, **predict_params)
303
+ if pred.ndim == 1:
304
+ pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1))
305
+ else:
306
+ pred_trans = self.transformer_.inverse_transform(pred)
307
+ if (
308
+ self._training_dim == 1
309
+ and pred_trans.ndim == 2
310
+ and pred_trans.shape[1] == 1
311
+ ):
312
+ pred_trans = pred_trans.squeeze(axis=1)
313
+
314
+ return pred_trans
315
+
316
+ def _more_tags(self):
317
+ regressor = self.regressor
318
+ if regressor is None:
319
+ from ..linear_model import LinearRegression
320
+
321
+ regressor = LinearRegression()
322
+
323
+ return {
324
+ "poor_score": True,
325
+ "multioutput": _safe_tags(regressor, key="multioutput"),
326
+ }
327
+
328
+ @property
329
+ def n_features_in_(self):
330
+ """Number of features seen during :term:`fit`."""
331
+ # For consistency with other estimators we raise a AttributeError so
332
+ # that hasattr() returns False the estimator isn't fitted.
333
+ try:
334
+ check_is_fitted(self)
335
+ except NotFittedError as nfe:
336
+ raise AttributeError(
337
+ "{} object has no n_features_in_ attribute.".format(
338
+ self.__class__.__name__
339
+ )
340
+ ) from nfe
341
+
342
+ return self.regressor_.n_features_in_
venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc ADDED
Binary file (33.3 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (308 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/_scipy/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/externals/_scipy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (199 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from ._laplacian import laplacian
venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (251 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/externals/_scipy/sparse/csgraph/_laplacian.py ADDED
@@ -0,0 +1,557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file is a copy of the scipy.sparse.csgraph._laplacian module from SciPy 1.12
3
+
4
+ scipy.sparse.csgraph.laplacian supports sparse arrays only starting from Scipy 1.12,
5
+ see https://github.com/scipy/scipy/pull/19156. This vendored file can be removed as
6
+ soon as Scipy 1.12 becomes the minimum supported version.
7
+
8
+ Laplacian of a compressed-sparse graph
9
+ """
10
+
11
+ # License: BSD 3 clause
12
+
13
+ import numpy as np
14
+ from scipy.sparse import issparse
15
+ from scipy.sparse.linalg import LinearOperator
16
+
17
+
18
+ ###############################################################################
19
+ # Graph laplacian
20
+ def laplacian(
21
+ csgraph,
22
+ normed=False,
23
+ return_diag=False,
24
+ use_out_degree=False,
25
+ *,
26
+ copy=True,
27
+ form="array",
28
+ dtype=None,
29
+ symmetrized=False,
30
+ ):
31
+ """
32
+ Return the Laplacian of a directed graph.
33
+
34
+ Parameters
35
+ ----------
36
+ csgraph : array_like or sparse matrix, 2 dimensions
37
+ Compressed-sparse graph, with shape (N, N).
38
+ normed : bool, optional
39
+ If True, then compute symmetrically normalized Laplacian.
40
+ Default: False.
41
+ return_diag : bool, optional
42
+ If True, then also return an array related to vertex degrees.
43
+ Default: False.
44
+ use_out_degree : bool, optional
45
+ If True, then use out-degree instead of in-degree.
46
+ This distinction matters only if the graph is asymmetric.
47
+ Default: False.
48
+ copy : bool, optional
49
+ If False, then change `csgraph` in place if possible,
50
+ avoiding doubling the memory use.
51
+ Default: True, for backward compatibility.
52
+ form : 'array', or 'function', or 'lo'
53
+ Determines the format of the output Laplacian:
54
+
55
+ * 'array' is a numpy array;
56
+ * 'function' is a pointer to evaluating the Laplacian-vector
57
+ or Laplacian-matrix product;
58
+ * 'lo' results in the format of the `LinearOperator`.
59
+
60
+ Choosing 'function' or 'lo' always avoids doubling
61
+ the memory use, ignoring `copy` value.
62
+ Default: 'array', for backward compatibility.
63
+ dtype : None or one of numeric numpy dtypes, optional
64
+ The dtype of the output. If ``dtype=None``, the dtype of the
65
+ output matches the dtype of the input csgraph, except for
66
+ the case ``normed=True`` and integer-like csgraph, where
67
+ the output dtype is 'float' allowing accurate normalization,
68
+ but dramatically increasing the memory use.
69
+ Default: None, for backward compatibility.
70
+ symmetrized : bool, optional
71
+ If True, then the output Laplacian is symmetric/Hermitian.
72
+ The symmetrization is done by ``csgraph + csgraph.T.conj``
73
+ without dividing by 2 to preserve integer dtypes if possible
74
+ prior to the construction of the Laplacian.
75
+ The symmetrization will increase the memory footprint of
76
+ sparse matrices unless the sparsity pattern is symmetric or
77
+ `form` is 'function' or 'lo'.
78
+ Default: False, for backward compatibility.
79
+
80
+ Returns
81
+ -------
82
+ lap : ndarray, or sparse matrix, or `LinearOperator`
83
+ The N x N Laplacian of csgraph. It will be a NumPy array (dense)
84
+ if the input was dense, or a sparse matrix otherwise, or
85
+ the format of a function or `LinearOperator` if
86
+ `form` equals 'function' or 'lo', respectively.
87
+ diag : ndarray, optional
88
+ The length-N main diagonal of the Laplacian matrix.
89
+ For the normalized Laplacian, this is the array of square roots
90
+ of vertex degrees or 1 if the degree is zero.
91
+
92
+ Notes
93
+ -----
94
+ The Laplacian matrix of a graph is sometimes referred to as the
95
+ "Kirchhoff matrix" or just the "Laplacian", and is useful in many
96
+ parts of spectral graph theory.
97
+ In particular, the eigen-decomposition of the Laplacian can give
98
+ insight into many properties of the graph, e.g.,
99
+ is commonly used for spectral data embedding and clustering.
100
+
101
+ The constructed Laplacian doubles the memory use if ``copy=True`` and
102
+ ``form="array"`` which is the default.
103
+ Choosing ``copy=False`` has no effect unless ``form="array"``
104
+ or the matrix is sparse in the ``coo`` format, or dense array, except
105
+ for the integer input with ``normed=True`` that forces the float output.
106
+
107
+ Sparse input is reformatted into ``coo`` if ``form="array"``,
108
+ which is the default.
109
+
110
+ If the input adjacency matrix is not symmetric, the Laplacian is
111
+ also non-symmetric unless ``symmetrized=True`` is used.
112
+
113
+ Diagonal entries of the input adjacency matrix are ignored and
114
+ replaced with zeros for the purpose of normalization where ``normed=True``.
115
+ The normalization uses the inverse square roots of row-sums of the input
116
+ adjacency matrix, and thus may fail if the row-sums contain
117
+ negative or complex with a non-zero imaginary part values.
118
+
119
+ The normalization is symmetric, making the normalized Laplacian also
120
+ symmetric if the input csgraph was symmetric.
121
+
122
+ References
123
+ ----------
124
+ .. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix
125
+
126
+ Examples
127
+ --------
128
+ >>> import numpy as np
129
+ >>> from scipy.sparse import csgraph
130
+
131
+ Our first illustration is the symmetric graph
132
+
133
+ >>> G = np.arange(4) * np.arange(4)[:, np.newaxis]
134
+ >>> G
135
+ array([[0, 0, 0, 0],
136
+ [0, 1, 2, 3],
137
+ [0, 2, 4, 6],
138
+ [0, 3, 6, 9]])
139
+
140
+ and its symmetric Laplacian matrix
141
+
142
+ >>> csgraph.laplacian(G)
143
+ array([[ 0, 0, 0, 0],
144
+ [ 0, 5, -2, -3],
145
+ [ 0, -2, 8, -6],
146
+ [ 0, -3, -6, 9]])
147
+
148
+ The non-symmetric graph
149
+
150
+ >>> G = np.arange(9).reshape(3, 3)
151
+ >>> G
152
+ array([[0, 1, 2],
153
+ [3, 4, 5],
154
+ [6, 7, 8]])
155
+
156
+ has different row- and column sums, resulting in two varieties
157
+ of the Laplacian matrix, using an in-degree, which is the default
158
+
159
+ >>> L_in_degree = csgraph.laplacian(G)
160
+ >>> L_in_degree
161
+ array([[ 9, -1, -2],
162
+ [-3, 8, -5],
163
+ [-6, -7, 7]])
164
+
165
+ or alternatively an out-degree
166
+
167
+ >>> L_out_degree = csgraph.laplacian(G, use_out_degree=True)
168
+ >>> L_out_degree
169
+ array([[ 3, -1, -2],
170
+ [-3, 8, -5],
171
+ [-6, -7, 13]])
172
+
173
+ Constructing a symmetric Laplacian matrix, one can add the two as
174
+
175
+ >>> L_in_degree + L_out_degree.T
176
+ array([[ 12, -4, -8],
177
+ [ -4, 16, -12],
178
+ [ -8, -12, 20]])
179
+
180
+ or use the ``symmetrized=True`` option
181
+
182
+ >>> csgraph.laplacian(G, symmetrized=True)
183
+ array([[ 12, -4, -8],
184
+ [ -4, 16, -12],
185
+ [ -8, -12, 20]])
186
+
187
+ that is equivalent to symmetrizing the original graph
188
+
189
+ >>> csgraph.laplacian(G + G.T)
190
+ array([[ 12, -4, -8],
191
+ [ -4, 16, -12],
192
+ [ -8, -12, 20]])
193
+
194
+ The goal of normalization is to make the non-zero diagonal entries
195
+ of the Laplacian matrix to be all unit, also scaling off-diagonal
196
+ entries correspondingly. The normalization can be done manually, e.g.,
197
+
198
+ >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
199
+ >>> L, d = csgraph.laplacian(G, return_diag=True)
200
+ >>> L
201
+ array([[ 2, -1, -1],
202
+ [-1, 2, -1],
203
+ [-1, -1, 2]])
204
+ >>> d
205
+ array([2, 2, 2])
206
+ >>> scaling = np.sqrt(d)
207
+ >>> scaling
208
+ array([1.41421356, 1.41421356, 1.41421356])
209
+ >>> (1/scaling)*L*(1/scaling)
210
+ array([[ 1. , -0.5, -0.5],
211
+ [-0.5, 1. , -0.5],
212
+ [-0.5, -0.5, 1. ]])
213
+
214
+ Or using ``normed=True`` option
215
+
216
+ >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
217
+ >>> L
218
+ array([[ 1. , -0.5, -0.5],
219
+ [-0.5, 1. , -0.5],
220
+ [-0.5, -0.5, 1. ]])
221
+
222
+ which now instead of the diagonal returns the scaling coefficients
223
+
224
+ >>> d
225
+ array([1.41421356, 1.41421356, 1.41421356])
226
+
227
+ Zero scaling coefficients are substituted with 1s, where scaling
228
+ has thus no effect, e.g.,
229
+
230
+ >>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]])
231
+ >>> G
232
+ array([[0, 0, 0],
233
+ [0, 0, 1],
234
+ [0, 1, 0]])
235
+ >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
236
+ >>> L
237
+ array([[ 0., -0., -0.],
238
+ [-0., 1., -1.],
239
+ [-0., -1., 1.]])
240
+ >>> d
241
+ array([1., 1., 1.])
242
+
243
+ Only the symmetric normalization is implemented, resulting
244
+ in a symmetric Laplacian matrix if and only if its graph is symmetric
245
+ and has all non-negative degrees, like in the examples above.
246
+
247
+ The output Laplacian matrix is by default a dense array or a sparse matrix
248
+ inferring its shape, format, and dtype from the input graph matrix:
249
+
250
+ >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32)
251
+ >>> G
252
+ array([[0., 1., 1.],
253
+ [1., 0., 1.],
254
+ [1., 1., 0.]], dtype=float32)
255
+ >>> csgraph.laplacian(G)
256
+ array([[ 2., -1., -1.],
257
+ [-1., 2., -1.],
258
+ [-1., -1., 2.]], dtype=float32)
259
+
260
+ but can alternatively be generated matrix-free as a LinearOperator:
261
+
262
+ >>> L = csgraph.laplacian(G, form="lo")
263
+ >>> L
264
+ <3x3 _CustomLinearOperator with dtype=float32>
265
+ >>> L(np.eye(3))
266
+ array([[ 2., -1., -1.],
267
+ [-1., 2., -1.],
268
+ [-1., -1., 2.]])
269
+
270
+ or as a lambda-function:
271
+
272
+ >>> L = csgraph.laplacian(G, form="function")
273
+ >>> L
274
+ <function _laplace.<locals>.<lambda> at 0x0000012AE6F5A598>
275
+ >>> L(np.eye(3))
276
+ array([[ 2., -1., -1.],
277
+ [-1., 2., -1.],
278
+ [-1., -1., 2.]])
279
+
280
+ The Laplacian matrix is used for
281
+ spectral data clustering and embedding
282
+ as well as for spectral graph partitioning.
283
+ Our final example illustrates the latter
284
+ for a noisy directed linear graph.
285
+
286
+ >>> from scipy.sparse import diags, random
287
+ >>> from scipy.sparse.linalg import lobpcg
288
+
289
+ Create a directed linear graph with ``N=35`` vertices
290
+ using a sparse adjacency matrix ``G``:
291
+
292
+ >>> N = 35
293
+ >>> G = diags(np.ones(N-1), 1, format="csr")
294
+
295
+ Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``:
296
+
297
+ >>> rng = np.random.default_rng()
298
+ >>> G += 1e-2 * random(N, N, density=0.1, random_state=rng)
299
+
300
+ Set initial approximations for eigenvectors:
301
+
302
+ >>> X = rng.random((N, 2))
303
+
304
+ The constant vector of ones is always a trivial eigenvector
305
+ of the non-normalized Laplacian to be filtered out:
306
+
307
+ >>> Y = np.ones((N, 1))
308
+
309
+ Alternating (1) the sign of the graph weights allows determining
310
+ labels for spectral max- and min- cuts in a single loop.
311
+ Since the graph is undirected, the option ``symmetrized=True``
312
+ must be used in the construction of the Laplacian.
313
+ The option ``normed=True`` cannot be used in (2) for the negative weights
314
+ here as the symmetric normalization evaluates square roots.
315
+ The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees
316
+ a fixed memory footprint and read-only access to the graph.
317
+ Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector
318
+ that determines the labels as the signs of its components in (5).
319
+ Since the sign in an eigenvector is not deterministic and can flip,
320
+ we fix the sign of the first component to be always +1 in (4).
321
+
322
+ >>> for cut in ["max", "min"]:
323
+ ... G = -G # 1.
324
+ ... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2.
325
+ ... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3.
326
+ ... eves *= np.sign(eves[0, 0]) # 4.
327
+ ... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5.
328
+ max-cut labels:
329
+ [1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]
330
+ min-cut labels:
331
+ [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
332
+
333
+ As anticipated for a (slightly noisy) linear graph,
334
+ the max-cut strips all the edges of the graph coloring all
335
+ odd vertices into one color and all even vertices into another one,
336
+ while the balanced min-cut partitions the graph
337
+ in the middle by deleting a single edge.
338
+ Both determined partitions are optimal.
339
+ """
340
+ if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
341
+ raise ValueError("csgraph must be a square matrix or array")
342
+
343
+ if normed and (
344
+ np.issubdtype(csgraph.dtype, np.signedinteger)
345
+ or np.issubdtype(csgraph.dtype, np.uint)
346
+ ):
347
+ csgraph = csgraph.astype(np.float64)
348
+
349
+ if form == "array":
350
+ create_lap = _laplacian_sparse if issparse(csgraph) else _laplacian_dense
351
+ else:
352
+ create_lap = (
353
+ _laplacian_sparse_flo if issparse(csgraph) else _laplacian_dense_flo
354
+ )
355
+
356
+ degree_axis = 1 if use_out_degree else 0
357
+
358
+ lap, d = create_lap(
359
+ csgraph,
360
+ normed=normed,
361
+ axis=degree_axis,
362
+ copy=copy,
363
+ form=form,
364
+ dtype=dtype,
365
+ symmetrized=symmetrized,
366
+ )
367
+ if return_diag:
368
+ return lap, d
369
+ return lap
370
+
371
+
372
+ def _setdiag_dense(m, d):
373
+ step = len(d) + 1
374
+ m.flat[::step] = d
375
+
376
+
377
+ def _laplace(m, d):
378
+ return lambda v: v * d[:, np.newaxis] - m @ v
379
+
380
+
381
+ def _laplace_normed(m, d, nd):
382
+ laplace = _laplace(m, d)
383
+ return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis])
384
+
385
+
386
+ def _laplace_sym(m, d):
387
+ return (
388
+ lambda v: v * d[:, np.newaxis]
389
+ - m @ v
390
+ - np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m))
391
+ )
392
+
393
+
394
+ def _laplace_normed_sym(m, d, nd):
395
+ laplace_sym = _laplace_sym(m, d)
396
+ return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis])
397
+
398
+
399
+ def _linearoperator(mv, shape, dtype):
400
+ return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype)
401
+
402
+
403
+ def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized):
404
+ # The keyword argument `copy` is unused and has no effect here.
405
+ del copy
406
+
407
+ if dtype is None:
408
+ dtype = graph.dtype
409
+
410
+ graph_sum = np.asarray(graph.sum(axis=axis)).ravel()
411
+ graph_diagonal = graph.diagonal()
412
+ diag = graph_sum - graph_diagonal
413
+ if symmetrized:
414
+ graph_sum += np.asarray(graph.sum(axis=1 - axis)).ravel()
415
+ diag = graph_sum - graph_diagonal - graph_diagonal
416
+
417
+ if normed:
418
+ isolated_node_mask = diag == 0
419
+ w = np.where(isolated_node_mask, 1, np.sqrt(diag))
420
+ if symmetrized:
421
+ md = _laplace_normed_sym(graph, graph_sum, 1.0 / w)
422
+ else:
423
+ md = _laplace_normed(graph, graph_sum, 1.0 / w)
424
+ if form == "function":
425
+ return md, w.astype(dtype, copy=False)
426
+ elif form == "lo":
427
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
428
+ return m, w.astype(dtype, copy=False)
429
+ else:
430
+ raise ValueError(f"Invalid form: {form!r}")
431
+ else:
432
+ if symmetrized:
433
+ md = _laplace_sym(graph, graph_sum)
434
+ else:
435
+ md = _laplace(graph, graph_sum)
436
+ if form == "function":
437
+ return md, diag.astype(dtype, copy=False)
438
+ elif form == "lo":
439
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
440
+ return m, diag.astype(dtype, copy=False)
441
+ else:
442
+ raise ValueError(f"Invalid form: {form!r}")
443
+
444
+
445
+ def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized):
446
+ # The keyword argument `form` is unused and has no effect here.
447
+ del form
448
+
449
+ if dtype is None:
450
+ dtype = graph.dtype
451
+
452
+ needs_copy = False
453
+ if graph.format in ("lil", "dok"):
454
+ m = graph.tocoo()
455
+ else:
456
+ m = graph
457
+ if copy:
458
+ needs_copy = True
459
+
460
+ if symmetrized:
461
+ m += m.T.conj()
462
+
463
+ w = np.asarray(m.sum(axis=axis)).ravel() - m.diagonal()
464
+ if normed:
465
+ m = m.tocoo(copy=needs_copy)
466
+ isolated_node_mask = w == 0
467
+ w = np.where(isolated_node_mask, 1, np.sqrt(w))
468
+ m.data /= w[m.row]
469
+ m.data /= w[m.col]
470
+ m.data *= -1
471
+ m.setdiag(1 - isolated_node_mask)
472
+ else:
473
+ if m.format == "dia":
474
+ m = m.copy()
475
+ else:
476
+ m = m.tocoo(copy=needs_copy)
477
+ m.data *= -1
478
+ m.setdiag(w)
479
+
480
+ return m.astype(dtype, copy=False), w.astype(dtype)
481
+
482
+
483
+ def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized):
484
+ if copy:
485
+ m = np.array(graph)
486
+ else:
487
+ m = np.asarray(graph)
488
+
489
+ if dtype is None:
490
+ dtype = m.dtype
491
+
492
+ graph_sum = m.sum(axis=axis)
493
+ graph_diagonal = m.diagonal()
494
+ diag = graph_sum - graph_diagonal
495
+ if symmetrized:
496
+ graph_sum += m.sum(axis=1 - axis)
497
+ diag = graph_sum - graph_diagonal - graph_diagonal
498
+
499
+ if normed:
500
+ isolated_node_mask = diag == 0
501
+ w = np.where(isolated_node_mask, 1, np.sqrt(diag))
502
+ if symmetrized:
503
+ md = _laplace_normed_sym(m, graph_sum, 1.0 / w)
504
+ else:
505
+ md = _laplace_normed(m, graph_sum, 1.0 / w)
506
+ if form == "function":
507
+ return md, w.astype(dtype, copy=False)
508
+ elif form == "lo":
509
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
510
+ return m, w.astype(dtype, copy=False)
511
+ else:
512
+ raise ValueError(f"Invalid form: {form!r}")
513
+ else:
514
+ if symmetrized:
515
+ md = _laplace_sym(m, graph_sum)
516
+ else:
517
+ md = _laplace(m, graph_sum)
518
+ if form == "function":
519
+ return md, diag.astype(dtype, copy=False)
520
+ elif form == "lo":
521
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
522
+ return m, diag.astype(dtype, copy=False)
523
+ else:
524
+ raise ValueError(f"Invalid form: {form!r}")
525
+
526
+
527
+ def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized):
528
+ if form != "array":
529
+ raise ValueError(f'{form!r} must be "array"')
530
+
531
+ if dtype is None:
532
+ dtype = graph.dtype
533
+
534
+ if copy:
535
+ m = np.array(graph)
536
+ else:
537
+ m = np.asarray(graph)
538
+
539
+ if dtype is None:
540
+ dtype = m.dtype
541
+
542
+ if symmetrized:
543
+ m += m.T.conj()
544
+ np.fill_diagonal(m, 0)
545
+ w = m.sum(axis=axis)
546
+ if normed:
547
+ isolated_node_mask = w == 0
548
+ w = np.where(isolated_node_mask, 1, np.sqrt(w))
549
+ m /= w
550
+ m /= w[:, np.newaxis]
551
+ m *= -1
552
+ _setdiag_dense(m, 1 - isolated_node_mask)
553
+ else:
554
+ m *= -1
555
+ _setdiag_dense(m, w)
556
+
557
+ return m.astype(dtype, copy=False), w.astype(dtype, copy=False)
venv/lib/python3.10/site-packages/sklearn/model_selection/__init__.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import typing
2
+
3
+ from ._plot import LearningCurveDisplay, ValidationCurveDisplay
4
+ from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV
5
+ from ._split import (
6
+ BaseCrossValidator,
7
+ BaseShuffleSplit,
8
+ GroupKFold,
9
+ GroupShuffleSplit,
10
+ KFold,
11
+ LeaveOneGroupOut,
12
+ LeaveOneOut,
13
+ LeavePGroupsOut,
14
+ LeavePOut,
15
+ PredefinedSplit,
16
+ RepeatedKFold,
17
+ RepeatedStratifiedKFold,
18
+ ShuffleSplit,
19
+ StratifiedGroupKFold,
20
+ StratifiedKFold,
21
+ StratifiedShuffleSplit,
22
+ TimeSeriesSplit,
23
+ check_cv,
24
+ train_test_split,
25
+ )
26
+ from ._validation import (
27
+ cross_val_predict,
28
+ cross_val_score,
29
+ cross_validate,
30
+ learning_curve,
31
+ permutation_test_score,
32
+ validation_curve,
33
+ )
34
+
35
+ if typing.TYPE_CHECKING:
36
+ # Avoid errors in type checkers (e.g. mypy) for experimental estimators.
37
+ # TODO: remove this check once the estimator is no longer experimental.
38
+ from ._search_successive_halving import ( # noqa
39
+ HalvingGridSearchCV,
40
+ HalvingRandomSearchCV,
41
+ )
42
+
43
+
44
+ __all__ = [
45
+ "BaseCrossValidator",
46
+ "BaseShuffleSplit",
47
+ "GridSearchCV",
48
+ "TimeSeriesSplit",
49
+ "KFold",
50
+ "GroupKFold",
51
+ "GroupShuffleSplit",
52
+ "LeaveOneGroupOut",
53
+ "LeaveOneOut",
54
+ "LeavePGroupsOut",
55
+ "LeavePOut",
56
+ "RepeatedKFold",
57
+ "RepeatedStratifiedKFold",
58
+ "ParameterGrid",
59
+ "ParameterSampler",
60
+ "PredefinedSplit",
61
+ "RandomizedSearchCV",
62
+ "ShuffleSplit",
63
+ "StratifiedKFold",
64
+ "StratifiedGroupKFold",
65
+ "StratifiedShuffleSplit",
66
+ "check_cv",
67
+ "cross_val_predict",
68
+ "cross_val_score",
69
+ "cross_validate",
70
+ "learning_curve",
71
+ "LearningCurveDisplay",
72
+ "permutation_test_score",
73
+ "train_test_split",
74
+ "validation_curve",
75
+ "ValidationCurveDisplay",
76
+ ]
77
+
78
+
79
+ # TODO: remove this check once the estimator is no longer experimental.
80
+ def __getattr__(name):
81
+ if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}:
82
+ raise ImportError(
83
+ f"{name} is experimental and the API might change without any "
84
+ "deprecation cycle. To use it, you need to explicitly import "
85
+ "enable_halving_search_cv:\n"
86
+ "from sklearn.experimental import enable_halving_search_cv"
87
+ )
88
+ raise AttributeError(f"module {__name__} has no attribute {name}")
venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc ADDED
Binary file (30.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc ADDED
Binary file (64.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc ADDED
Binary file (37.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc ADDED
Binary file (84.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc ADDED
Binary file (69.4 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/_plot.py ADDED
@@ -0,0 +1,907 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+
5
+ from ..utils import check_matplotlib_support
6
+ from ..utils._plotting import _interval_max_min_ratio, _validate_score_name
7
+ from ._validation import learning_curve, validation_curve
8
+
9
+
10
+ class _BaseCurveDisplay:
11
+ def _plot_curve(
12
+ self,
13
+ x_data,
14
+ *,
15
+ ax=None,
16
+ negate_score=False,
17
+ score_name=None,
18
+ score_type="test",
19
+ log_scale="deprecated",
20
+ std_display_style="fill_between",
21
+ line_kw=None,
22
+ fill_between_kw=None,
23
+ errorbar_kw=None,
24
+ ):
25
+ check_matplotlib_support(f"{self.__class__.__name__}.plot")
26
+
27
+ import matplotlib.pyplot as plt
28
+
29
+ if ax is None:
30
+ _, ax = plt.subplots()
31
+
32
+ if negate_score:
33
+ train_scores, test_scores = -self.train_scores, -self.test_scores
34
+ else:
35
+ train_scores, test_scores = self.train_scores, self.test_scores
36
+
37
+ if std_display_style not in ("errorbar", "fill_between", None):
38
+ raise ValueError(
39
+ f"Unknown std_display_style: {std_display_style}. Should be one of"
40
+ " 'errorbar', 'fill_between', or None."
41
+ )
42
+
43
+ if score_type not in ("test", "train", "both"):
44
+ raise ValueError(
45
+ f"Unknown score_type: {score_type}. Should be one of 'test', "
46
+ "'train', or 'both'."
47
+ )
48
+
49
+ if score_type == "train":
50
+ scores = {"Train": train_scores}
51
+ elif score_type == "test":
52
+ scores = {"Test": test_scores}
53
+ else: # score_type == "both"
54
+ scores = {"Train": train_scores, "Test": test_scores}
55
+
56
+ if std_display_style in ("fill_between", None):
57
+ # plot the mean score
58
+ if line_kw is None:
59
+ line_kw = {}
60
+
61
+ self.lines_ = []
62
+ for line_label, score in scores.items():
63
+ self.lines_.append(
64
+ *ax.plot(
65
+ x_data,
66
+ score.mean(axis=1),
67
+ label=line_label,
68
+ **line_kw,
69
+ )
70
+ )
71
+ self.errorbar_ = None
72
+ self.fill_between_ = None # overwritten below by fill_between
73
+
74
+ if std_display_style == "errorbar":
75
+ if errorbar_kw is None:
76
+ errorbar_kw = {}
77
+
78
+ self.errorbar_ = []
79
+ for line_label, score in scores.items():
80
+ self.errorbar_.append(
81
+ ax.errorbar(
82
+ x_data,
83
+ score.mean(axis=1),
84
+ score.std(axis=1),
85
+ label=line_label,
86
+ **errorbar_kw,
87
+ )
88
+ )
89
+ self.lines_, self.fill_between_ = None, None
90
+ elif std_display_style == "fill_between":
91
+ if fill_between_kw is None:
92
+ fill_between_kw = {}
93
+ default_fill_between_kw = {"alpha": 0.5}
94
+ fill_between_kw = {**default_fill_between_kw, **fill_between_kw}
95
+
96
+ self.fill_between_ = []
97
+ for line_label, score in scores.items():
98
+ self.fill_between_.append(
99
+ ax.fill_between(
100
+ x_data,
101
+ score.mean(axis=1) - score.std(axis=1),
102
+ score.mean(axis=1) + score.std(axis=1),
103
+ **fill_between_kw,
104
+ )
105
+ )
106
+
107
+ score_name = self.score_name if score_name is None else score_name
108
+
109
+ ax.legend()
110
+
111
+ # TODO(1.5): to be removed
112
+ if log_scale != "deprecated":
113
+ warnings.warn(
114
+ (
115
+ "The `log_scale` parameter is deprecated as of version 1.3 "
116
+ "and will be removed in 1.5. You can use display.ax_.set_xscale "
117
+ "and display.ax_.set_yscale instead."
118
+ ),
119
+ FutureWarning,
120
+ )
121
+ xscale = "log" if log_scale else "linear"
122
+ else:
123
+ # We found that a ratio, smaller or bigger than 5, between the largest and
124
+ # smallest gap of the x values is a good indicator to choose between linear
125
+ # and log scale.
126
+ if _interval_max_min_ratio(x_data) > 5:
127
+ xscale = "symlog" if x_data.min() <= 0 else "log"
128
+ else:
129
+ xscale = "linear"
130
+ ax.set_xscale(xscale)
131
+ ax.set_ylabel(f"{score_name}")
132
+
133
+ self.ax_ = ax
134
+ self.figure_ = ax.figure
135
+
136
+
137
+ class LearningCurveDisplay(_BaseCurveDisplay):
138
+ """Learning Curve visualization.
139
+
140
+ It is recommended to use
141
+ :meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator` to
142
+ create a :class:`~sklearn.model_selection.LearningCurveDisplay` instance.
143
+ All parameters are stored as attributes.
144
+
145
+ Read more in the :ref:`User Guide <visualizations>` for general information
146
+ about the visualization API and
147
+ :ref:`detailed documentation <learning_curve>` regarding the learning
148
+ curve visualization.
149
+
150
+ .. versionadded:: 1.2
151
+
152
+ Parameters
153
+ ----------
154
+ train_sizes : ndarray of shape (n_unique_ticks,)
155
+ Numbers of training examples that has been used to generate the
156
+ learning curve.
157
+
158
+ train_scores : ndarray of shape (n_ticks, n_cv_folds)
159
+ Scores on training sets.
160
+
161
+ test_scores : ndarray of shape (n_ticks, n_cv_folds)
162
+ Scores on test set.
163
+
164
+ score_name : str, default=None
165
+ The name of the score used in `learning_curve`. It will override the name
166
+ inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if
167
+ `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a
168
+ string or a callable, we infer the name. We replace `_` by spaces and capitalize
169
+ the first letter. We remove `neg_` and replace it by `"Negative"` if
170
+ `negate_score` is `False` or just remove it otherwise.
171
+
172
+ Attributes
173
+ ----------
174
+ ax_ : matplotlib Axes
175
+ Axes with the learning curve.
176
+
177
+ figure_ : matplotlib Figure
178
+ Figure containing the learning curve.
179
+
180
+ errorbar_ : list of matplotlib Artist or None
181
+ When the `std_display_style` is `"errorbar"`, this is a list of
182
+ `matplotlib.container.ErrorbarContainer` objects. If another style is
183
+ used, `errorbar_` is `None`.
184
+
185
+ lines_ : list of matplotlib Artist or None
186
+ When the `std_display_style` is `"fill_between"`, this is a list of
187
+ `matplotlib.lines.Line2D` objects corresponding to the mean train and
188
+ test scores. If another style is used, `line_` is `None`.
189
+
190
+ fill_between_ : list of matplotlib Artist or None
191
+ When the `std_display_style` is `"fill_between"`, this is a list of
192
+ `matplotlib.collections.PolyCollection` objects. If another style is
193
+ used, `fill_between_` is `None`.
194
+
195
+ See Also
196
+ --------
197
+ sklearn.model_selection.learning_curve : Compute the learning curve.
198
+
199
+ Examples
200
+ --------
201
+ >>> import matplotlib.pyplot as plt
202
+ >>> from sklearn.datasets import load_iris
203
+ >>> from sklearn.model_selection import LearningCurveDisplay, learning_curve
204
+ >>> from sklearn.tree import DecisionTreeClassifier
205
+ >>> X, y = load_iris(return_X_y=True)
206
+ >>> tree = DecisionTreeClassifier(random_state=0)
207
+ >>> train_sizes, train_scores, test_scores = learning_curve(
208
+ ... tree, X, y)
209
+ >>> display = LearningCurveDisplay(train_sizes=train_sizes,
210
+ ... train_scores=train_scores, test_scores=test_scores, score_name="Score")
211
+ >>> display.plot()
212
+ <...>
213
+ >>> plt.show()
214
+ """
215
+
216
+ def __init__(self, *, train_sizes, train_scores, test_scores, score_name=None):
217
+ self.train_sizes = train_sizes
218
+ self.train_scores = train_scores
219
+ self.test_scores = test_scores
220
+ self.score_name = score_name
221
+
222
+ def plot(
223
+ self,
224
+ ax=None,
225
+ *,
226
+ negate_score=False,
227
+ score_name=None,
228
+ score_type="both",
229
+ log_scale="deprecated",
230
+ std_display_style="fill_between",
231
+ line_kw=None,
232
+ fill_between_kw=None,
233
+ errorbar_kw=None,
234
+ ):
235
+ """Plot visualization.
236
+
237
+ Parameters
238
+ ----------
239
+ ax : matplotlib Axes, default=None
240
+ Axes object to plot on. If `None`, a new figure and axes is
241
+ created.
242
+
243
+ negate_score : bool, default=False
244
+ Whether or not to negate the scores obtained through
245
+ :func:`~sklearn.model_selection.learning_curve`. This is
246
+ particularly useful when using the error denoted by `neg_*` in
247
+ `scikit-learn`.
248
+
249
+ score_name : str, default=None
250
+ The name of the score used to decorate the y-axis of the plot. It will
251
+ override the name inferred from the `scoring` parameter. If `score` is
252
+ `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
253
+ otherwise. If `scoring` is a string or a callable, we infer the name. We
254
+ replace `_` by spaces and capitalize the first letter. We remove `neg_` and
255
+ replace it by `"Negative"` if `negate_score` is
256
+ `False` or just remove it otherwise.
257
+
258
+ score_type : {"test", "train", "both"}, default="both"
259
+ The type of score to plot. Can be one of `"test"`, `"train"`, or
260
+ `"both"`.
261
+
262
+ log_scale : bool, default="deprecated"
263
+ Whether or not to use a logarithmic scale for the x-axis.
264
+
265
+ .. deprecated:: 1.3
266
+ `log_scale` is deprecated in 1.3 and will be removed in 1.5.
267
+ Use `display.ax_.set_xscale` and `display.ax_.set_yscale` instead.
268
+
269
+ std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
270
+ The style used to display the score standard deviation around the
271
+ mean score. If None, no standard deviation representation is
272
+ displayed.
273
+
274
+ line_kw : dict, default=None
275
+ Additional keyword arguments passed to the `plt.plot` used to draw
276
+ the mean score.
277
+
278
+ fill_between_kw : dict, default=None
279
+ Additional keyword arguments passed to the `plt.fill_between` used
280
+ to draw the score standard deviation.
281
+
282
+ errorbar_kw : dict, default=None
283
+ Additional keyword arguments passed to the `plt.errorbar` used to
284
+ draw mean score and standard deviation score.
285
+
286
+ Returns
287
+ -------
288
+ display : :class:`~sklearn.model_selection.LearningCurveDisplay`
289
+ Object that stores computed values.
290
+ """
291
+ self._plot_curve(
292
+ self.train_sizes,
293
+ ax=ax,
294
+ negate_score=negate_score,
295
+ score_name=score_name,
296
+ score_type=score_type,
297
+ log_scale=log_scale,
298
+ std_display_style=std_display_style,
299
+ line_kw=line_kw,
300
+ fill_between_kw=fill_between_kw,
301
+ errorbar_kw=errorbar_kw,
302
+ )
303
+ self.ax_.set_xlabel("Number of samples in the training set")
304
+ return self
305
+
306
+ @classmethod
307
+ def from_estimator(
308
+ cls,
309
+ estimator,
310
+ X,
311
+ y,
312
+ *,
313
+ groups=None,
314
+ train_sizes=np.linspace(0.1, 1.0, 5),
315
+ cv=None,
316
+ scoring=None,
317
+ exploit_incremental_learning=False,
318
+ n_jobs=None,
319
+ pre_dispatch="all",
320
+ verbose=0,
321
+ shuffle=False,
322
+ random_state=None,
323
+ error_score=np.nan,
324
+ fit_params=None,
325
+ ax=None,
326
+ negate_score=False,
327
+ score_name=None,
328
+ score_type="both",
329
+ log_scale="deprecated",
330
+ std_display_style="fill_between",
331
+ line_kw=None,
332
+ fill_between_kw=None,
333
+ errorbar_kw=None,
334
+ ):
335
+ """Create a learning curve display from an estimator.
336
+
337
+ Read more in the :ref:`User Guide <visualizations>` for general
338
+ information about the visualization API and :ref:`detailed
339
+ documentation <learning_curve>` regarding the learning curve
340
+ visualization.
341
+
342
+ Parameters
343
+ ----------
344
+ estimator : object type that implements the "fit" and "predict" methods
345
+ An object of that type which is cloned for each validation.
346
+
347
+ X : array-like of shape (n_samples, n_features)
348
+ Training data, where `n_samples` is the number of samples and
349
+ `n_features` is the number of features.
350
+
351
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
352
+ Target relative to X for classification or regression;
353
+ None for unsupervised learning.
354
+
355
+ groups : array-like of shape (n_samples,), default=None
356
+ Group labels for the samples used while splitting the dataset into
357
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
358
+ instance (e.g., :class:`GroupKFold`).
359
+
360
+ train_sizes : array-like of shape (n_ticks,), \
361
+ default=np.linspace(0.1, 1.0, 5)
362
+ Relative or absolute numbers of training examples that will be used
363
+ to generate the learning curve. If the dtype is float, it is
364
+ regarded as a fraction of the maximum size of the training set
365
+ (that is determined by the selected validation method), i.e. it has
366
+ to be within (0, 1]. Otherwise it is interpreted as absolute sizes
367
+ of the training sets. Note that for classification the number of
368
+ samples usually have to be big enough to contain at least one
369
+ sample from each class.
370
+
371
+ cv : int, cross-validation generator or an iterable, default=None
372
+ Determines the cross-validation splitting strategy.
373
+ Possible inputs for cv are:
374
+
375
+ - None, to use the default 5-fold cross validation,
376
+ - int, to specify the number of folds in a `(Stratified)KFold`,
377
+ - :term:`CV splitter`,
378
+ - An iterable yielding (train, test) splits as arrays of indices.
379
+
380
+ For int/None inputs, if the estimator is a classifier and `y` is
381
+ either binary or multiclass,
382
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. In all
383
+ other cases, :class:`~sklearn.model_selection.KFold` is used. These
384
+ splitters are instantiated with `shuffle=False` so the splits will
385
+ be the same across calls.
386
+
387
+ Refer :ref:`User Guide <cross_validation>` for the various
388
+ cross-validation strategies that can be used here.
389
+
390
+ scoring : str or callable, default=None
391
+ A string (see :ref:`scoring_parameter`) or
392
+ a scorer callable object / function with signature
393
+ `scorer(estimator, X, y)` (see :ref:`scoring`).
394
+
395
+ exploit_incremental_learning : bool, default=False
396
+ If the estimator supports incremental learning, this will be
397
+ used to speed up fitting for different training set sizes.
398
+
399
+ n_jobs : int, default=None
400
+ Number of jobs to run in parallel. Training the estimator and
401
+ computing the score are parallelized over the different training
402
+ and test sets. `None` means 1 unless in a
403
+ :obj:`joblib.parallel_backend` context. `-1` means using all
404
+ processors. See :term:`Glossary <n_jobs>` for more details.
405
+
406
+ pre_dispatch : int or str, default='all'
407
+ Number of predispatched jobs for parallel execution (default is
408
+ all). The option can reduce the allocated memory. The str can
409
+ be an expression like '2*n_jobs'.
410
+
411
+ verbose : int, default=0
412
+ Controls the verbosity: the higher, the more messages.
413
+
414
+ shuffle : bool, default=False
415
+ Whether to shuffle training data before taking prefixes of it
416
+ based on`train_sizes`.
417
+
418
+ random_state : int, RandomState instance or None, default=None
419
+ Used when `shuffle` is True. Pass an int for reproducible
420
+ output across multiple function calls.
421
+ See :term:`Glossary <random_state>`.
422
+
423
+ error_score : 'raise' or numeric, default=np.nan
424
+ Value to assign to the score if an error occurs in estimator
425
+ fitting. If set to 'raise', the error is raised. If a numeric value
426
+ is given, FitFailedWarning is raised.
427
+
428
+ fit_params : dict, default=None
429
+ Parameters to pass to the fit method of the estimator.
430
+
431
+ ax : matplotlib Axes, default=None
432
+ Axes object to plot on. If `None`, a new figure and axes is
433
+ created.
434
+
435
+ negate_score : bool, default=False
436
+ Whether or not to negate the scores obtained through
437
+ :func:`~sklearn.model_selection.learning_curve`. This is
438
+ particularly useful when using the error denoted by `neg_*` in
439
+ `scikit-learn`.
440
+
441
+ score_name : str, default=None
442
+ The name of the score used to decorate the y-axis of the plot. It will
443
+ override the name inferred from the `scoring` parameter. If `score` is
444
+ `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
445
+ otherwise. If `scoring` is a string or a callable, we infer the name. We
446
+ replace `_` by spaces and capitalize the first letter. We remove `neg_` and
447
+ replace it by `"Negative"` if `negate_score` is
448
+ `False` or just remove it otherwise.
449
+
450
+ score_type : {"test", "train", "both"}, default="both"
451
+ The type of score to plot. Can be one of `"test"`, `"train"`, or
452
+ `"both"`.
453
+
454
+ log_scale : bool, default="deprecated"
455
+ Whether or not to use a logarithmic scale for the x-axis.
456
+
457
+ .. deprecated:: 1.3
458
+ `log_scale` is deprecated in 1.3 and will be removed in 1.5.
459
+ Use `display.ax_.xscale` and `display.ax_.yscale` instead.
460
+
461
+ std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
462
+ The style used to display the score standard deviation around the
463
+ mean score. If `None`, no representation of the standard deviation
464
+ is displayed.
465
+
466
+ line_kw : dict, default=None
467
+ Additional keyword arguments passed to the `plt.plot` used to draw
468
+ the mean score.
469
+
470
+ fill_between_kw : dict, default=None
471
+ Additional keyword arguments passed to the `plt.fill_between` used
472
+ to draw the score standard deviation.
473
+
474
+ errorbar_kw : dict, default=None
475
+ Additional keyword arguments passed to the `plt.errorbar` used to
476
+ draw mean score and standard deviation score.
477
+
478
+ Returns
479
+ -------
480
+ display : :class:`~sklearn.model_selection.LearningCurveDisplay`
481
+ Object that stores computed values.
482
+
483
+ Examples
484
+ --------
485
+ >>> import matplotlib.pyplot as plt
486
+ >>> from sklearn.datasets import load_iris
487
+ >>> from sklearn.model_selection import LearningCurveDisplay
488
+ >>> from sklearn.tree import DecisionTreeClassifier
489
+ >>> X, y = load_iris(return_X_y=True)
490
+ >>> tree = DecisionTreeClassifier(random_state=0)
491
+ >>> LearningCurveDisplay.from_estimator(tree, X, y)
492
+ <...>
493
+ >>> plt.show()
494
+ """
495
+ check_matplotlib_support(f"{cls.__name__}.from_estimator")
496
+
497
+ score_name = _validate_score_name(score_name, scoring, negate_score)
498
+
499
+ train_sizes, train_scores, test_scores = learning_curve(
500
+ estimator,
501
+ X,
502
+ y,
503
+ groups=groups,
504
+ train_sizes=train_sizes,
505
+ cv=cv,
506
+ scoring=scoring,
507
+ exploit_incremental_learning=exploit_incremental_learning,
508
+ n_jobs=n_jobs,
509
+ pre_dispatch=pre_dispatch,
510
+ verbose=verbose,
511
+ shuffle=shuffle,
512
+ random_state=random_state,
513
+ error_score=error_score,
514
+ return_times=False,
515
+ fit_params=fit_params,
516
+ )
517
+
518
+ viz = cls(
519
+ train_sizes=train_sizes,
520
+ train_scores=train_scores,
521
+ test_scores=test_scores,
522
+ score_name=score_name,
523
+ )
524
+ return viz.plot(
525
+ ax=ax,
526
+ negate_score=negate_score,
527
+ score_type=score_type,
528
+ log_scale=log_scale,
529
+ std_display_style=std_display_style,
530
+ line_kw=line_kw,
531
+ fill_between_kw=fill_between_kw,
532
+ errorbar_kw=errorbar_kw,
533
+ )
534
+
535
+
536
+ class ValidationCurveDisplay(_BaseCurveDisplay):
537
+ """Validation Curve visualization.
538
+
539
+ It is recommended to use
540
+ :meth:`~sklearn.model_selection.ValidationCurveDisplay.from_estimator` to
541
+ create a :class:`~sklearn.model_selection.ValidationCurveDisplay` instance.
542
+ All parameters are stored as attributes.
543
+
544
+ Read more in the :ref:`User Guide <visualizations>` for general information
545
+ about the visualization API and :ref:`detailed documentation
546
+ <validation_curve>` regarding the validation curve visualization.
547
+
548
+ .. versionadded:: 1.3
549
+
550
+ Parameters
551
+ ----------
552
+ param_name : str
553
+ Name of the parameter that has been varied.
554
+
555
+ param_range : array-like of shape (n_ticks,)
556
+ The values of the parameter that have been evaluated.
557
+
558
+ train_scores : ndarray of shape (n_ticks, n_cv_folds)
559
+ Scores on training sets.
560
+
561
+ test_scores : ndarray of shape (n_ticks, n_cv_folds)
562
+ Scores on test set.
563
+
564
+ score_name : str, default=None
565
+ The name of the score used in `validation_curve`. It will override the name
566
+ inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if
567
+ `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a
568
+ string or a callable, we infer the name. We replace `_` by spaces and capitalize
569
+ the first letter. We remove `neg_` and replace it by `"Negative"` if
570
+ `negate_score` is `False` or just remove it otherwise.
571
+
572
+ Attributes
573
+ ----------
574
+ ax_ : matplotlib Axes
575
+ Axes with the validation curve.
576
+
577
+ figure_ : matplotlib Figure
578
+ Figure containing the validation curve.
579
+
580
+ errorbar_ : list of matplotlib Artist or None
581
+ When the `std_display_style` is `"errorbar"`, this is a list of
582
+ `matplotlib.container.ErrorbarContainer` objects. If another style is
583
+ used, `errorbar_` is `None`.
584
+
585
+ lines_ : list of matplotlib Artist or None
586
+ When the `std_display_style` is `"fill_between"`, this is a list of
587
+ `matplotlib.lines.Line2D` objects corresponding to the mean train and
588
+ test scores. If another style is used, `line_` is `None`.
589
+
590
+ fill_between_ : list of matplotlib Artist or None
591
+ When the `std_display_style` is `"fill_between"`, this is a list of
592
+ `matplotlib.collections.PolyCollection` objects. If another style is
593
+ used, `fill_between_` is `None`.
594
+
595
+ See Also
596
+ --------
597
+ sklearn.model_selection.validation_curve : Compute the validation curve.
598
+
599
+ Examples
600
+ --------
601
+ >>> import numpy as np
602
+ >>> import matplotlib.pyplot as plt
603
+ >>> from sklearn.datasets import make_classification
604
+ >>> from sklearn.model_selection import ValidationCurveDisplay, validation_curve
605
+ >>> from sklearn.linear_model import LogisticRegression
606
+ >>> X, y = make_classification(n_samples=1_000, random_state=0)
607
+ >>> logistic_regression = LogisticRegression()
608
+ >>> param_name, param_range = "C", np.logspace(-8, 3, 10)
609
+ >>> train_scores, test_scores = validation_curve(
610
+ ... logistic_regression, X, y, param_name=param_name, param_range=param_range
611
+ ... )
612
+ >>> display = ValidationCurveDisplay(
613
+ ... param_name=param_name, param_range=param_range,
614
+ ... train_scores=train_scores, test_scores=test_scores, score_name="Score"
615
+ ... )
616
+ >>> display.plot()
617
+ <...>
618
+ >>> plt.show()
619
+ """
620
+
621
+ def __init__(
622
+ self, *, param_name, param_range, train_scores, test_scores, score_name=None
623
+ ):
624
+ self.param_name = param_name
625
+ self.param_range = param_range
626
+ self.train_scores = train_scores
627
+ self.test_scores = test_scores
628
+ self.score_name = score_name
629
+
630
+ def plot(
631
+ self,
632
+ ax=None,
633
+ *,
634
+ negate_score=False,
635
+ score_name=None,
636
+ score_type="both",
637
+ std_display_style="fill_between",
638
+ line_kw=None,
639
+ fill_between_kw=None,
640
+ errorbar_kw=None,
641
+ ):
642
+ """Plot visualization.
643
+
644
+ Parameters
645
+ ----------
646
+ ax : matplotlib Axes, default=None
647
+ Axes object to plot on. If `None`, a new figure and axes is
648
+ created.
649
+
650
+ negate_score : bool, default=False
651
+ Whether or not to negate the scores obtained through
652
+ :func:`~sklearn.model_selection.validation_curve`. This is
653
+ particularly useful when using the error denoted by `neg_*` in
654
+ `scikit-learn`.
655
+
656
+ score_name : str, default=None
657
+ The name of the score used to decorate the y-axis of the plot. It will
658
+ override the name inferred from the `scoring` parameter. If `score` is
659
+ `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
660
+ otherwise. If `scoring` is a string or a callable, we infer the name. We
661
+ replace `_` by spaces and capitalize the first letter. We remove `neg_` and
662
+ replace it by `"Negative"` if `negate_score` is
663
+ `False` or just remove it otherwise.
664
+
665
+ score_type : {"test", "train", "both"}, default="both"
666
+ The type of score to plot. Can be one of `"test"`, `"train"`, or
667
+ `"both"`.
668
+
669
+ std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
670
+ The style used to display the score standard deviation around the
671
+ mean score. If None, no standard deviation representation is
672
+ displayed.
673
+
674
+ line_kw : dict, default=None
675
+ Additional keyword arguments passed to the `plt.plot` used to draw
676
+ the mean score.
677
+
678
+ fill_between_kw : dict, default=None
679
+ Additional keyword arguments passed to the `plt.fill_between` used
680
+ to draw the score standard deviation.
681
+
682
+ errorbar_kw : dict, default=None
683
+ Additional keyword arguments passed to the `plt.errorbar` used to
684
+ draw mean score and standard deviation score.
685
+
686
+ Returns
687
+ -------
688
+ display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
689
+ Object that stores computed values.
690
+ """
691
+ self._plot_curve(
692
+ self.param_range,
693
+ ax=ax,
694
+ negate_score=negate_score,
695
+ score_name=score_name,
696
+ score_type=score_type,
697
+ log_scale="deprecated",
698
+ std_display_style=std_display_style,
699
+ line_kw=line_kw,
700
+ fill_between_kw=fill_between_kw,
701
+ errorbar_kw=errorbar_kw,
702
+ )
703
+ self.ax_.set_xlabel(f"{self.param_name}")
704
+ return self
705
+
706
+ @classmethod
707
+ def from_estimator(
708
+ cls,
709
+ estimator,
710
+ X,
711
+ y,
712
+ *,
713
+ param_name,
714
+ param_range,
715
+ groups=None,
716
+ cv=None,
717
+ scoring=None,
718
+ n_jobs=None,
719
+ pre_dispatch="all",
720
+ verbose=0,
721
+ error_score=np.nan,
722
+ fit_params=None,
723
+ ax=None,
724
+ negate_score=False,
725
+ score_name=None,
726
+ score_type="both",
727
+ std_display_style="fill_between",
728
+ line_kw=None,
729
+ fill_between_kw=None,
730
+ errorbar_kw=None,
731
+ ):
732
+ """Create a validation curve display from an estimator.
733
+
734
+ Read more in the :ref:`User Guide <visualizations>` for general
735
+ information about the visualization API and :ref:`detailed
736
+ documentation <validation_curve>` regarding the validation curve
737
+ visualization.
738
+
739
+ Parameters
740
+ ----------
741
+ estimator : object type that implements the "fit" and "predict" methods
742
+ An object of that type which is cloned for each validation.
743
+
744
+ X : array-like of shape (n_samples, n_features)
745
+ Training data, where `n_samples` is the number of samples and
746
+ `n_features` is the number of features.
747
+
748
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
749
+ Target relative to X for classification or regression;
750
+ None for unsupervised learning.
751
+
752
+ param_name : str
753
+ Name of the parameter that will be varied.
754
+
755
+ param_range : array-like of shape (n_values,)
756
+ The values of the parameter that will be evaluated.
757
+
758
+ groups : array-like of shape (n_samples,), default=None
759
+ Group labels for the samples used while splitting the dataset into
760
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
761
+ instance (e.g., :class:`GroupKFold`).
762
+
763
+ cv : int, cross-validation generator or an iterable, default=None
764
+ Determines the cross-validation splitting strategy.
765
+ Possible inputs for cv are:
766
+
767
+ - None, to use the default 5-fold cross validation,
768
+ - int, to specify the number of folds in a `(Stratified)KFold`,
769
+ - :term:`CV splitter`,
770
+ - An iterable yielding (train, test) splits as arrays of indices.
771
+
772
+ For int/None inputs, if the estimator is a classifier and `y` is
773
+ either binary or multiclass,
774
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. In all
775
+ other cases, :class:`~sklearn.model_selection.KFold` is used. These
776
+ splitters are instantiated with `shuffle=False` so the splits will
777
+ be the same across calls.
778
+
779
+ Refer :ref:`User Guide <cross_validation>` for the various
780
+ cross-validation strategies that can be used here.
781
+
782
+ scoring : str or callable, default=None
783
+ A string (see :ref:`scoring_parameter`) or
784
+ a scorer callable object / function with signature
785
+ `scorer(estimator, X, y)` (see :ref:`scoring`).
786
+
787
+ n_jobs : int, default=None
788
+ Number of jobs to run in parallel. Training the estimator and
789
+ computing the score are parallelized over the different training
790
+ and test sets. `None` means 1 unless in a
791
+ :obj:`joblib.parallel_backend` context. `-1` means using all
792
+ processors. See :term:`Glossary <n_jobs>` for more details.
793
+
794
+ pre_dispatch : int or str, default='all'
795
+ Number of predispatched jobs for parallel execution (default is
796
+ all). The option can reduce the allocated memory. The str can
797
+ be an expression like '2*n_jobs'.
798
+
799
+ verbose : int, default=0
800
+ Controls the verbosity: the higher, the more messages.
801
+
802
+ error_score : 'raise' or numeric, default=np.nan
803
+ Value to assign to the score if an error occurs in estimator
804
+ fitting. If set to 'raise', the error is raised. If a numeric value
805
+ is given, FitFailedWarning is raised.
806
+
807
+ fit_params : dict, default=None
808
+ Parameters to pass to the fit method of the estimator.
809
+
810
+ ax : matplotlib Axes, default=None
811
+ Axes object to plot on. If `None`, a new figure and axes is
812
+ created.
813
+
814
+ negate_score : bool, default=False
815
+ Whether or not to negate the scores obtained through
816
+ :func:`~sklearn.model_selection.validation_curve`. This is
817
+ particularly useful when using the error denoted by `neg_*` in
818
+ `scikit-learn`.
819
+
820
+ score_name : str, default=None
821
+ The name of the score used to decorate the y-axis of the plot. It will
822
+ override the name inferred from the `scoring` parameter. If `score` is
823
+ `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
824
+ otherwise. If `scoring` is a string or a callable, we infer the name. We
825
+ replace `_` by spaces and capitalize the first letter. We remove `neg_` and
826
+ replace it by `"Negative"` if `negate_score` is
827
+ `False` or just remove it otherwise.
828
+
829
+ score_type : {"test", "train", "both"}, default="both"
830
+ The type of score to plot. Can be one of `"test"`, `"train"`, or
831
+ `"both"`.
832
+
833
+ std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
834
+ The style used to display the score standard deviation around the
835
+ mean score. If `None`, no representation of the standard deviation
836
+ is displayed.
837
+
838
+ line_kw : dict, default=None
839
+ Additional keyword arguments passed to the `plt.plot` used to draw
840
+ the mean score.
841
+
842
+ fill_between_kw : dict, default=None
843
+ Additional keyword arguments passed to the `plt.fill_between` used
844
+ to draw the score standard deviation.
845
+
846
+ errorbar_kw : dict, default=None
847
+ Additional keyword arguments passed to the `plt.errorbar` used to
848
+ draw mean score and standard deviation score.
849
+
850
+ Returns
851
+ -------
852
+ display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
853
+ Object that stores computed values.
854
+
855
+ Examples
856
+ --------
857
+ >>> import numpy as np
858
+ >>> import matplotlib.pyplot as plt
859
+ >>> from sklearn.datasets import make_classification
860
+ >>> from sklearn.model_selection import ValidationCurveDisplay
861
+ >>> from sklearn.linear_model import LogisticRegression
862
+ >>> X, y = make_classification(n_samples=1_000, random_state=0)
863
+ >>> logistic_regression = LogisticRegression()
864
+ >>> param_name, param_range = "C", np.logspace(-8, 3, 10)
865
+ >>> ValidationCurveDisplay.from_estimator(
866
+ ... logistic_regression, X, y, param_name=param_name,
867
+ ... param_range=param_range,
868
+ ... )
869
+ <...>
870
+ >>> plt.show()
871
+ """
872
+ check_matplotlib_support(f"{cls.__name__}.from_estimator")
873
+
874
+ score_name = _validate_score_name(score_name, scoring, negate_score)
875
+
876
+ train_scores, test_scores = validation_curve(
877
+ estimator,
878
+ X,
879
+ y,
880
+ param_name=param_name,
881
+ param_range=param_range,
882
+ groups=groups,
883
+ cv=cv,
884
+ scoring=scoring,
885
+ n_jobs=n_jobs,
886
+ pre_dispatch=pre_dispatch,
887
+ verbose=verbose,
888
+ error_score=error_score,
889
+ fit_params=fit_params,
890
+ )
891
+
892
+ viz = cls(
893
+ param_name=param_name,
894
+ param_range=np.asarray(param_range),
895
+ train_scores=train_scores,
896
+ test_scores=test_scores,
897
+ score_name=score_name,
898
+ )
899
+ return viz.plot(
900
+ ax=ax,
901
+ negate_score=negate_score,
902
+ score_type=score_type,
903
+ std_display_style=std_display_style,
904
+ line_kw=line_kw,
905
+ fill_between_kw=fill_between_kw,
906
+ errorbar_kw=errorbar_kw,
907
+ )
venv/lib/python3.10/site-packages/sklearn/model_selection/_search.py ADDED
@@ -0,0 +1,1918 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
3
+ parameters of an estimator.
4
+ """
5
+
6
+ # Author: Alexandre Gramfort <[email protected]>,
7
+ # Gael Varoquaux <[email protected]>
8
+ # Andreas Mueller <[email protected]>
9
+ # Olivier Grisel <[email protected]>
10
+ # Raghav RV <[email protected]>
11
+ # License: BSD 3 clause
12
+
13
+ import numbers
14
+ import operator
15
+ import time
16
+ import warnings
17
+ from abc import ABCMeta, abstractmethod
18
+ from collections import defaultdict
19
+ from collections.abc import Iterable, Mapping, Sequence
20
+ from functools import partial, reduce
21
+ from itertools import product
22
+
23
+ import numpy as np
24
+ from numpy.ma import MaskedArray
25
+ from scipy.stats import rankdata
26
+
27
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier
28
+ from ..exceptions import NotFittedError
29
+ from ..metrics import check_scoring
30
+ from ..metrics._scorer import (
31
+ _check_multimetric_scoring,
32
+ _MultimetricScorer,
33
+ get_scorer_names,
34
+ )
35
+ from ..utils import Bunch, check_random_state
36
+ from ..utils._param_validation import HasMethods, Interval, StrOptions
37
+ from ..utils._tags import _safe_tags
38
+ from ..utils.metadata_routing import (
39
+ MetadataRouter,
40
+ MethodMapping,
41
+ _raise_for_params,
42
+ _routing_enabled,
43
+ process_routing,
44
+ )
45
+ from ..utils.metaestimators import available_if
46
+ from ..utils.parallel import Parallel, delayed
47
+ from ..utils.random import sample_without_replacement
48
+ from ..utils.validation import _check_method_params, check_is_fitted, indexable
49
+ from ._split import check_cv
50
+ from ._validation import (
51
+ _aggregate_score_dicts,
52
+ _fit_and_score,
53
+ _insert_error_scores,
54
+ _normalize_score_results,
55
+ _warn_or_raise_about_fit_failures,
56
+ )
57
+
58
+ __all__ = ["GridSearchCV", "ParameterGrid", "ParameterSampler", "RandomizedSearchCV"]
59
+
60
+
61
+ class ParameterGrid:
62
+ """Grid of parameters with a discrete number of values for each.
63
+
64
+ Can be used to iterate over parameter value combinations with the
65
+ Python built-in function iter.
66
+ The order of the generated parameter combinations is deterministic.
67
+
68
+ Read more in the :ref:`User Guide <grid_search>`.
69
+
70
+ Parameters
71
+ ----------
72
+ param_grid : dict of str to sequence, or sequence of such
73
+ The parameter grid to explore, as a dictionary mapping estimator
74
+ parameters to sequences of allowed values.
75
+
76
+ An empty dict signifies default parameters.
77
+
78
+ A sequence of dicts signifies a sequence of grids to search, and is
79
+ useful to avoid exploring parameter combinations that make no sense
80
+ or have no effect. See the examples below.
81
+
82
+ Examples
83
+ --------
84
+ >>> from sklearn.model_selection import ParameterGrid
85
+ >>> param_grid = {'a': [1, 2], 'b': [True, False]}
86
+ >>> list(ParameterGrid(param_grid)) == (
87
+ ... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
88
+ ... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
89
+ True
90
+
91
+ >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
92
+ >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
93
+ ... {'kernel': 'rbf', 'gamma': 1},
94
+ ... {'kernel': 'rbf', 'gamma': 10}]
95
+ True
96
+ >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
97
+ True
98
+
99
+ See Also
100
+ --------
101
+ GridSearchCV : Uses :class:`ParameterGrid` to perform a full parallelized
102
+ parameter search.
103
+ """
104
+
105
+ def __init__(self, param_grid):
106
+ if not isinstance(param_grid, (Mapping, Iterable)):
107
+ raise TypeError(
108
+ f"Parameter grid should be a dict or a list, got: {param_grid!r} of"
109
+ f" type {type(param_grid).__name__}"
110
+ )
111
+
112
+ if isinstance(param_grid, Mapping):
113
+ # wrap dictionary in a singleton list to support either dict
114
+ # or list of dicts
115
+ param_grid = [param_grid]
116
+
117
+ # check if all entries are dictionaries of lists
118
+ for grid in param_grid:
119
+ if not isinstance(grid, dict):
120
+ raise TypeError(f"Parameter grid is not a dict ({grid!r})")
121
+ for key, value in grid.items():
122
+ if isinstance(value, np.ndarray) and value.ndim > 1:
123
+ raise ValueError(
124
+ f"Parameter array for {key!r} should be one-dimensional, got:"
125
+ f" {value!r} with shape {value.shape}"
126
+ )
127
+ if isinstance(value, str) or not isinstance(
128
+ value, (np.ndarray, Sequence)
129
+ ):
130
+ raise TypeError(
131
+ f"Parameter grid for parameter {key!r} needs to be a list or a"
132
+ f" numpy array, but got {value!r} (of type "
133
+ f"{type(value).__name__}) instead. Single values "
134
+ "need to be wrapped in a list with one element."
135
+ )
136
+ if len(value) == 0:
137
+ raise ValueError(
138
+ f"Parameter grid for parameter {key!r} need "
139
+ f"to be a non-empty sequence, got: {value!r}"
140
+ )
141
+
142
+ self.param_grid = param_grid
143
+
144
+ def __iter__(self):
145
+ """Iterate over the points in the grid.
146
+
147
+ Returns
148
+ -------
149
+ params : iterator over dict of str to any
150
+ Yields dictionaries mapping each estimator parameter to one of its
151
+ allowed values.
152
+ """
153
+ for p in self.param_grid:
154
+ # Always sort the keys of a dictionary, for reproducibility
155
+ items = sorted(p.items())
156
+ if not items:
157
+ yield {}
158
+ else:
159
+ keys, values = zip(*items)
160
+ for v in product(*values):
161
+ params = dict(zip(keys, v))
162
+ yield params
163
+
164
+ def __len__(self):
165
+ """Number of points on the grid."""
166
+ # Product function that can handle iterables (np.prod can't).
167
+ product = partial(reduce, operator.mul)
168
+ return sum(
169
+ product(len(v) for v in p.values()) if p else 1 for p in self.param_grid
170
+ )
171
+
172
+ def __getitem__(self, ind):
173
+ """Get the parameters that would be ``ind``th in iteration
174
+
175
+ Parameters
176
+ ----------
177
+ ind : int
178
+ The iteration index
179
+
180
+ Returns
181
+ -------
182
+ params : dict of str to any
183
+ Equal to list(self)[ind]
184
+ """
185
+ # This is used to make discrete sampling without replacement memory
186
+ # efficient.
187
+ for sub_grid in self.param_grid:
188
+ # XXX: could memoize information used here
189
+ if not sub_grid:
190
+ if ind == 0:
191
+ return {}
192
+ else:
193
+ ind -= 1
194
+ continue
195
+
196
+ # Reverse so most frequent cycling parameter comes first
197
+ keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
198
+ sizes = [len(v_list) for v_list in values_lists]
199
+ total = np.prod(sizes)
200
+
201
+ if ind >= total:
202
+ # Try the next grid
203
+ ind -= total
204
+ else:
205
+ out = {}
206
+ for key, v_list, n in zip(keys, values_lists, sizes):
207
+ ind, offset = divmod(ind, n)
208
+ out[key] = v_list[offset]
209
+ return out
210
+
211
+ raise IndexError("ParameterGrid index out of range")
212
+
213
+
214
+ class ParameterSampler:
215
+ """Generator on parameters sampled from given distributions.
216
+
217
+ Non-deterministic iterable over random candidate combinations for hyper-
218
+ parameter search. If all parameters are presented as a list,
219
+ sampling without replacement is performed. If at least one parameter
220
+ is given as a distribution, sampling with replacement is used.
221
+ It is highly recommended to use continuous distributions for continuous
222
+ parameters.
223
+
224
+ Read more in the :ref:`User Guide <grid_search>`.
225
+
226
+ Parameters
227
+ ----------
228
+ param_distributions : dict
229
+ Dictionary with parameters names (`str`) as keys and distributions
230
+ or lists of parameters to try. Distributions must provide a ``rvs``
231
+ method for sampling (such as those from scipy.stats.distributions).
232
+ If a list is given, it is sampled uniformly.
233
+ If a list of dicts is given, first a dict is sampled uniformly, and
234
+ then a parameter is sampled using that dict as above.
235
+
236
+ n_iter : int
237
+ Number of parameter settings that are produced.
238
+
239
+ random_state : int, RandomState instance or None, default=None
240
+ Pseudo random number generator state used for random uniform sampling
241
+ from lists of possible values instead of scipy.stats distributions.
242
+ Pass an int for reproducible output across multiple
243
+ function calls.
244
+ See :term:`Glossary <random_state>`.
245
+
246
+ Returns
247
+ -------
248
+ params : dict of str to any
249
+ **Yields** dictionaries mapping each estimator parameter to
250
+ as sampled value.
251
+
252
+ Examples
253
+ --------
254
+ >>> from sklearn.model_selection import ParameterSampler
255
+ >>> from scipy.stats.distributions import expon
256
+ >>> import numpy as np
257
+ >>> rng = np.random.RandomState(0)
258
+ >>> param_grid = {'a':[1, 2], 'b': expon()}
259
+ >>> param_list = list(ParameterSampler(param_grid, n_iter=4,
260
+ ... random_state=rng))
261
+ >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
262
+ ... for d in param_list]
263
+ >>> rounded_list == [{'b': 0.89856, 'a': 1},
264
+ ... {'b': 0.923223, 'a': 1},
265
+ ... {'b': 1.878964, 'a': 2},
266
+ ... {'b': 1.038159, 'a': 2}]
267
+ True
268
+ """
269
+
270
+ def __init__(self, param_distributions, n_iter, *, random_state=None):
271
+ if not isinstance(param_distributions, (Mapping, Iterable)):
272
+ raise TypeError(
273
+ "Parameter distribution is not a dict or a list,"
274
+ f" got: {param_distributions!r} of type "
275
+ f"{type(param_distributions).__name__}"
276
+ )
277
+
278
+ if isinstance(param_distributions, Mapping):
279
+ # wrap dictionary in a singleton list to support either dict
280
+ # or list of dicts
281
+ param_distributions = [param_distributions]
282
+
283
+ for dist in param_distributions:
284
+ if not isinstance(dist, dict):
285
+ raise TypeError(
286
+ "Parameter distribution is not a dict ({!r})".format(dist)
287
+ )
288
+ for key in dist:
289
+ if not isinstance(dist[key], Iterable) and not hasattr(
290
+ dist[key], "rvs"
291
+ ):
292
+ raise TypeError(
293
+ f"Parameter grid for parameter {key!r} is not iterable "
294
+ f"or a distribution (value={dist[key]})"
295
+ )
296
+ self.n_iter = n_iter
297
+ self.random_state = random_state
298
+ self.param_distributions = param_distributions
299
+
300
+ def _is_all_lists(self):
301
+ return all(
302
+ all(not hasattr(v, "rvs") for v in dist.values())
303
+ for dist in self.param_distributions
304
+ )
305
+
306
+ def __iter__(self):
307
+ rng = check_random_state(self.random_state)
308
+
309
+ # if all distributions are given as lists, we want to sample without
310
+ # replacement
311
+ if self._is_all_lists():
312
+ # look up sampled parameter settings in parameter grid
313
+ param_grid = ParameterGrid(self.param_distributions)
314
+ grid_size = len(param_grid)
315
+ n_iter = self.n_iter
316
+
317
+ if grid_size < n_iter:
318
+ warnings.warn(
319
+ "The total space of parameters %d is smaller "
320
+ "than n_iter=%d. Running %d iterations. For exhaustive "
321
+ "searches, use GridSearchCV." % (grid_size, self.n_iter, grid_size),
322
+ UserWarning,
323
+ )
324
+ n_iter = grid_size
325
+ for i in sample_without_replacement(grid_size, n_iter, random_state=rng):
326
+ yield param_grid[i]
327
+
328
+ else:
329
+ for _ in range(self.n_iter):
330
+ dist = rng.choice(self.param_distributions)
331
+ # Always sort the keys of a dictionary, for reproducibility
332
+ items = sorted(dist.items())
333
+ params = dict()
334
+ for k, v in items:
335
+ if hasattr(v, "rvs"):
336
+ params[k] = v.rvs(random_state=rng)
337
+ else:
338
+ params[k] = v[rng.randint(len(v))]
339
+ yield params
340
+
341
+ def __len__(self):
342
+ """Number of points that will be sampled."""
343
+ if self._is_all_lists():
344
+ grid_size = len(ParameterGrid(self.param_distributions))
345
+ return min(self.n_iter, grid_size)
346
+ else:
347
+ return self.n_iter
348
+
349
+
350
+ def _check_refit(search_cv, attr):
351
+ if not search_cv.refit:
352
+ raise AttributeError(
353
+ f"This {type(search_cv).__name__} instance was initialized with "
354
+ f"`refit=False`. {attr} is available only after refitting on the best "
355
+ "parameters. You can refit an estimator manually using the "
356
+ "`best_params_` attribute"
357
+ )
358
+
359
+
360
+ def _estimator_has(attr):
361
+ """Check if we can delegate a method to the underlying estimator.
362
+
363
+ Calling a prediction method will only be available if `refit=True`. In
364
+ such case, we check first the fitted best estimator. If it is not
365
+ fitted, we check the unfitted estimator.
366
+
367
+ Checking the unfitted estimator allows to use `hasattr` on the `SearchCV`
368
+ instance even before calling `fit`.
369
+ """
370
+
371
+ def check(self):
372
+ _check_refit(self, attr)
373
+ if hasattr(self, "best_estimator_"):
374
+ # raise an AttributeError if `attr` does not exist
375
+ getattr(self.best_estimator_, attr)
376
+ return True
377
+ # raise an AttributeError if `attr` does not exist
378
+ getattr(self.estimator, attr)
379
+ return True
380
+
381
+ return check
382
+
383
+
384
+ class BaseSearchCV(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
385
+ """Abstract base class for hyper parameter search with cross-validation."""
386
+
387
+ _parameter_constraints: dict = {
388
+ "estimator": [HasMethods(["fit"])],
389
+ "scoring": [
390
+ StrOptions(set(get_scorer_names())),
391
+ callable,
392
+ list,
393
+ tuple,
394
+ dict,
395
+ None,
396
+ ],
397
+ "n_jobs": [numbers.Integral, None],
398
+ "refit": ["boolean", str, callable],
399
+ "cv": ["cv_object"],
400
+ "verbose": ["verbose"],
401
+ "pre_dispatch": [numbers.Integral, str],
402
+ "error_score": [StrOptions({"raise"}), numbers.Real],
403
+ "return_train_score": ["boolean"],
404
+ }
405
+
406
+ @abstractmethod
407
+ def __init__(
408
+ self,
409
+ estimator,
410
+ *,
411
+ scoring=None,
412
+ n_jobs=None,
413
+ refit=True,
414
+ cv=None,
415
+ verbose=0,
416
+ pre_dispatch="2*n_jobs",
417
+ error_score=np.nan,
418
+ return_train_score=True,
419
+ ):
420
+ self.scoring = scoring
421
+ self.estimator = estimator
422
+ self.n_jobs = n_jobs
423
+ self.refit = refit
424
+ self.cv = cv
425
+ self.verbose = verbose
426
+ self.pre_dispatch = pre_dispatch
427
+ self.error_score = error_score
428
+ self.return_train_score = return_train_score
429
+
430
+ @property
431
+ def _estimator_type(self):
432
+ return self.estimator._estimator_type
433
+
434
+ def _more_tags(self):
435
+ # allows cross-validation to see 'precomputed' metrics
436
+ return {
437
+ "pairwise": _safe_tags(self.estimator, "pairwise"),
438
+ "_xfail_checks": {
439
+ "check_supervised_y_2d": "DataConversionWarning not caught"
440
+ },
441
+ }
442
+
443
+ def score(self, X, y=None, **params):
444
+ """Return the score on the given data, if the estimator has been refit.
445
+
446
+ This uses the score defined by ``scoring`` where provided, and the
447
+ ``best_estimator_.score`` method otherwise.
448
+
449
+ Parameters
450
+ ----------
451
+ X : array-like of shape (n_samples, n_features)
452
+ Input data, where `n_samples` is the number of samples and
453
+ `n_features` is the number of features.
454
+
455
+ y : array-like of shape (n_samples, n_output) \
456
+ or (n_samples,), default=None
457
+ Target relative to X for classification or regression;
458
+ None for unsupervised learning.
459
+
460
+ **params : dict
461
+ Parameters to be passed to the underlying scorer(s).
462
+
463
+ ..versionadded:: 1.4
464
+ Only available if `enable_metadata_routing=True`. See
465
+ :ref:`Metadata Routing User Guide <metadata_routing>` for more
466
+ details.
467
+
468
+ Returns
469
+ -------
470
+ score : float
471
+ The score defined by ``scoring`` if provided, and the
472
+ ``best_estimator_.score`` method otherwise.
473
+ """
474
+ _check_refit(self, "score")
475
+ check_is_fitted(self)
476
+
477
+ _raise_for_params(params, self, "score")
478
+
479
+ if _routing_enabled():
480
+ score_params = process_routing(self, "score", **params).scorer["score"]
481
+ else:
482
+ score_params = dict()
483
+
484
+ if self.scorer_ is None:
485
+ raise ValueError(
486
+ "No score function explicitly defined, "
487
+ "and the estimator doesn't provide one %s"
488
+ % self.best_estimator_
489
+ )
490
+ if isinstance(self.scorer_, dict):
491
+ if self.multimetric_:
492
+ scorer = self.scorer_[self.refit]
493
+ else:
494
+ scorer = self.scorer_
495
+ return scorer(self.best_estimator_, X, y, **score_params)
496
+
497
+ # callable
498
+ score = self.scorer_(self.best_estimator_, X, y, **score_params)
499
+ if self.multimetric_:
500
+ score = score[self.refit]
501
+ return score
502
+
503
+ @available_if(_estimator_has("score_samples"))
504
+ def score_samples(self, X):
505
+ """Call score_samples on the estimator with the best found parameters.
506
+
507
+ Only available if ``refit=True`` and the underlying estimator supports
508
+ ``score_samples``.
509
+
510
+ .. versionadded:: 0.24
511
+
512
+ Parameters
513
+ ----------
514
+ X : iterable
515
+ Data to predict on. Must fulfill input requirements
516
+ of the underlying estimator.
517
+
518
+ Returns
519
+ -------
520
+ y_score : ndarray of shape (n_samples,)
521
+ The ``best_estimator_.score_samples`` method.
522
+ """
523
+ check_is_fitted(self)
524
+ return self.best_estimator_.score_samples(X)
525
+
526
+ @available_if(_estimator_has("predict"))
527
+ def predict(self, X):
528
+ """Call predict on the estimator with the best found parameters.
529
+
530
+ Only available if ``refit=True`` and the underlying estimator supports
531
+ ``predict``.
532
+
533
+ Parameters
534
+ ----------
535
+ X : indexable, length n_samples
536
+ Must fulfill the input assumptions of the
537
+ underlying estimator.
538
+
539
+ Returns
540
+ -------
541
+ y_pred : ndarray of shape (n_samples,)
542
+ The predicted labels or values for `X` based on the estimator with
543
+ the best found parameters.
544
+ """
545
+ check_is_fitted(self)
546
+ return self.best_estimator_.predict(X)
547
+
548
+ @available_if(_estimator_has("predict_proba"))
549
+ def predict_proba(self, X):
550
+ """Call predict_proba on the estimator with the best found parameters.
551
+
552
+ Only available if ``refit=True`` and the underlying estimator supports
553
+ ``predict_proba``.
554
+
555
+ Parameters
556
+ ----------
557
+ X : indexable, length n_samples
558
+ Must fulfill the input assumptions of the
559
+ underlying estimator.
560
+
561
+ Returns
562
+ -------
563
+ y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)
564
+ Predicted class probabilities for `X` based on the estimator with
565
+ the best found parameters. The order of the classes corresponds
566
+ to that in the fitted attribute :term:`classes_`.
567
+ """
568
+ check_is_fitted(self)
569
+ return self.best_estimator_.predict_proba(X)
570
+
571
+ @available_if(_estimator_has("predict_log_proba"))
572
+ def predict_log_proba(self, X):
573
+ """Call predict_log_proba on the estimator with the best found parameters.
574
+
575
+ Only available if ``refit=True`` and the underlying estimator supports
576
+ ``predict_log_proba``.
577
+
578
+ Parameters
579
+ ----------
580
+ X : indexable, length n_samples
581
+ Must fulfill the input assumptions of the
582
+ underlying estimator.
583
+
584
+ Returns
585
+ -------
586
+ y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)
587
+ Predicted class log-probabilities for `X` based on the estimator
588
+ with the best found parameters. The order of the classes
589
+ corresponds to that in the fitted attribute :term:`classes_`.
590
+ """
591
+ check_is_fitted(self)
592
+ return self.best_estimator_.predict_log_proba(X)
593
+
594
+ @available_if(_estimator_has("decision_function"))
595
+ def decision_function(self, X):
596
+ """Call decision_function on the estimator with the best found parameters.
597
+
598
+ Only available if ``refit=True`` and the underlying estimator supports
599
+ ``decision_function``.
600
+
601
+ Parameters
602
+ ----------
603
+ X : indexable, length n_samples
604
+ Must fulfill the input assumptions of the
605
+ underlying estimator.
606
+
607
+ Returns
608
+ -------
609
+ y_score : ndarray of shape (n_samples,) or (n_samples, n_classes) \
610
+ or (n_samples, n_classes * (n_classes-1) / 2)
611
+ Result of the decision function for `X` based on the estimator with
612
+ the best found parameters.
613
+ """
614
+ check_is_fitted(self)
615
+ return self.best_estimator_.decision_function(X)
616
+
617
+ @available_if(_estimator_has("transform"))
618
+ def transform(self, X):
619
+ """Call transform on the estimator with the best found parameters.
620
+
621
+ Only available if the underlying estimator supports ``transform`` and
622
+ ``refit=True``.
623
+
624
+ Parameters
625
+ ----------
626
+ X : indexable, length n_samples
627
+ Must fulfill the input assumptions of the
628
+ underlying estimator.
629
+
630
+ Returns
631
+ -------
632
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
633
+ `X` transformed in the new space based on the estimator with
634
+ the best found parameters.
635
+ """
636
+ check_is_fitted(self)
637
+ return self.best_estimator_.transform(X)
638
+
639
+ @available_if(_estimator_has("inverse_transform"))
640
+ def inverse_transform(self, Xt):
641
+ """Call inverse_transform on the estimator with the best found params.
642
+
643
+ Only available if the underlying estimator implements
644
+ ``inverse_transform`` and ``refit=True``.
645
+
646
+ Parameters
647
+ ----------
648
+ Xt : indexable, length n_samples
649
+ Must fulfill the input assumptions of the
650
+ underlying estimator.
651
+
652
+ Returns
653
+ -------
654
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
655
+ Result of the `inverse_transform` function for `Xt` based on the
656
+ estimator with the best found parameters.
657
+ """
658
+ check_is_fitted(self)
659
+ return self.best_estimator_.inverse_transform(Xt)
660
+
661
+ @property
662
+ def n_features_in_(self):
663
+ """Number of features seen during :term:`fit`.
664
+
665
+ Only available when `refit=True`.
666
+ """
667
+ # For consistency with other estimators we raise a AttributeError so
668
+ # that hasattr() fails if the search estimator isn't fitted.
669
+ try:
670
+ check_is_fitted(self)
671
+ except NotFittedError as nfe:
672
+ raise AttributeError(
673
+ "{} object has no n_features_in_ attribute.".format(
674
+ self.__class__.__name__
675
+ )
676
+ ) from nfe
677
+
678
+ return self.best_estimator_.n_features_in_
679
+
680
+ @property
681
+ def classes_(self):
682
+ """Class labels.
683
+
684
+ Only available when `refit=True` and the estimator is a classifier.
685
+ """
686
+ _estimator_has("classes_")(self)
687
+ return self.best_estimator_.classes_
688
+
689
+ def _run_search(self, evaluate_candidates):
690
+ """Repeatedly calls `evaluate_candidates` to conduct a search.
691
+
692
+ This method, implemented in sub-classes, makes it possible to
693
+ customize the scheduling of evaluations: GridSearchCV and
694
+ RandomizedSearchCV schedule evaluations for their whole parameter
695
+ search space at once but other more sequential approaches are also
696
+ possible: for instance is possible to iteratively schedule evaluations
697
+ for new regions of the parameter search space based on previously
698
+ collected evaluation results. This makes it possible to implement
699
+ Bayesian optimization or more generally sequential model-based
700
+ optimization by deriving from the BaseSearchCV abstract base class.
701
+ For example, Successive Halving is implemented by calling
702
+ `evaluate_candidates` multiples times (once per iteration of the SH
703
+ process), each time passing a different set of candidates with `X`
704
+ and `y` of increasing sizes.
705
+
706
+ Parameters
707
+ ----------
708
+ evaluate_candidates : callable
709
+ This callback accepts:
710
+ - a list of candidates, where each candidate is a dict of
711
+ parameter settings.
712
+ - an optional `cv` parameter which can be used to e.g.
713
+ evaluate candidates on different dataset splits, or
714
+ evaluate candidates on subsampled data (as done in the
715
+ SucessiveHaling estimators). By default, the original `cv`
716
+ parameter is used, and it is available as a private
717
+ `_checked_cv_orig` attribute.
718
+ - an optional `more_results` dict. Each key will be added to
719
+ the `cv_results_` attribute. Values should be lists of
720
+ length `n_candidates`
721
+
722
+ It returns a dict of all results so far, formatted like
723
+ ``cv_results_``.
724
+
725
+ Important note (relevant whether the default cv is used or not):
726
+ in randomized splitters, and unless the random_state parameter of
727
+ cv was set to an int, calling cv.split() multiple times will
728
+ yield different splits. Since cv.split() is called in
729
+ evaluate_candidates, this means that candidates will be evaluated
730
+ on different splits each time evaluate_candidates is called. This
731
+ might be a methodological issue depending on the search strategy
732
+ that you're implementing. To prevent randomized splitters from
733
+ being used, you may use _split._yields_constant_splits()
734
+
735
+ Examples
736
+ --------
737
+
738
+ ::
739
+
740
+ def _run_search(self, evaluate_candidates):
741
+ 'Try C=0.1 only if C=1 is better than C=10'
742
+ all_results = evaluate_candidates([{'C': 1}, {'C': 10}])
743
+ score = all_results['mean_test_score']
744
+ if score[0] < score[1]:
745
+ evaluate_candidates([{'C': 0.1}])
746
+ """
747
+ raise NotImplementedError("_run_search not implemented.")
748
+
749
+ def _check_refit_for_multimetric(self, scores):
750
+ """Check `refit` is compatible with `scores` is valid"""
751
+ multimetric_refit_msg = (
752
+ "For multi-metric scoring, the parameter refit must be set to a "
753
+ "scorer key or a callable to refit an estimator with the best "
754
+ "parameter setting on the whole data and make the best_* "
755
+ "attributes available for that metric. If this is not needed, "
756
+ f"refit should be set to False explicitly. {self.refit!r} was "
757
+ "passed."
758
+ )
759
+
760
+ valid_refit_dict = isinstance(self.refit, str) and self.refit in scores
761
+
762
+ if (
763
+ self.refit is not False
764
+ and not valid_refit_dict
765
+ and not callable(self.refit)
766
+ ):
767
+ raise ValueError(multimetric_refit_msg)
768
+
769
+ @staticmethod
770
+ def _select_best_index(refit, refit_metric, results):
771
+ """Select index of the best combination of hyperparemeters."""
772
+ if callable(refit):
773
+ # If callable, refit is expected to return the index of the best
774
+ # parameter set.
775
+ best_index = refit(results)
776
+ if not isinstance(best_index, numbers.Integral):
777
+ raise TypeError("best_index_ returned is not an integer")
778
+ if best_index < 0 or best_index >= len(results["params"]):
779
+ raise IndexError("best_index_ index out of range")
780
+ else:
781
+ best_index = results[f"rank_test_{refit_metric}"].argmin()
782
+ return best_index
783
+
784
+ def _get_scorers(self, convert_multimetric):
785
+ """Get the scorer(s) to be used.
786
+
787
+ This is used in ``fit`` and ``get_metadata_routing``.
788
+
789
+ Parameters
790
+ ----------
791
+ convert_multimetric : bool
792
+ Whether to convert a dict of scorers to a _MultimetricScorer. This
793
+ is used in ``get_metadata_routing`` to include the routing info for
794
+ multiple scorers.
795
+
796
+ Returns
797
+ -------
798
+ scorers, refit_metric
799
+ """
800
+ refit_metric = "score"
801
+
802
+ if callable(self.scoring):
803
+ scorers = self.scoring
804
+ elif self.scoring is None or isinstance(self.scoring, str):
805
+ scorers = check_scoring(self.estimator, self.scoring)
806
+ else:
807
+ scorers = _check_multimetric_scoring(self.estimator, self.scoring)
808
+ self._check_refit_for_multimetric(scorers)
809
+ refit_metric = self.refit
810
+ if convert_multimetric and isinstance(scorers, dict):
811
+ scorers = _MultimetricScorer(
812
+ scorers=scorers, raise_exc=(self.error_score == "raise")
813
+ )
814
+
815
+ return scorers, refit_metric
816
+
817
+ def _get_routed_params_for_fit(self, params):
818
+ """Get the parameters to be used for routing.
819
+
820
+ This is a method instead of a snippet in ``fit`` since it's used twice,
821
+ here in ``fit``, and in ``HalvingRandomSearchCV.fit``.
822
+ """
823
+ if _routing_enabled():
824
+ routed_params = process_routing(self, "fit", **params)
825
+ else:
826
+ params = params.copy()
827
+ groups = params.pop("groups", None)
828
+ routed_params = Bunch(
829
+ estimator=Bunch(fit=params),
830
+ splitter=Bunch(split={"groups": groups}),
831
+ scorer=Bunch(score={}),
832
+ )
833
+ return routed_params
834
+
835
+ @_fit_context(
836
+ # *SearchCV.estimator is not validated yet
837
+ prefer_skip_nested_validation=False
838
+ )
839
+ def fit(self, X, y=None, **params):
840
+ """Run fit with all sets of parameters.
841
+
842
+ Parameters
843
+ ----------
844
+
845
+ X : array-like of shape (n_samples, n_features)
846
+ Training vector, where `n_samples` is the number of samples and
847
+ `n_features` is the number of features.
848
+
849
+ y : array-like of shape (n_samples, n_output) \
850
+ or (n_samples,), default=None
851
+ Target relative to X for classification or regression;
852
+ None for unsupervised learning.
853
+
854
+ **params : dict of str -> object
855
+ Parameters passed to the ``fit`` method of the estimator, the scorer,
856
+ and the CV splitter.
857
+
858
+ If a fit parameter is an array-like whose length is equal to
859
+ `num_samples` then it will be split across CV groups along with `X`
860
+ and `y`. For example, the :term:`sample_weight` parameter is split
861
+ because `len(sample_weights) = len(X)`.
862
+
863
+ Returns
864
+ -------
865
+ self : object
866
+ Instance of fitted estimator.
867
+ """
868
+ estimator = self.estimator
869
+ # Here we keep a dict of scorers as is, and only convert to a
870
+ # _MultimetricScorer at a later stage. Issue:
871
+ # https://github.com/scikit-learn/scikit-learn/issues/27001
872
+ scorers, refit_metric = self._get_scorers(convert_multimetric=False)
873
+
874
+ X, y = indexable(X, y)
875
+ params = _check_method_params(X, params=params)
876
+
877
+ routed_params = self._get_routed_params_for_fit(params)
878
+
879
+ cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
880
+ n_splits = cv_orig.get_n_splits(X, y, **routed_params.splitter.split)
881
+
882
+ base_estimator = clone(self.estimator)
883
+
884
+ parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)
885
+
886
+ fit_and_score_kwargs = dict(
887
+ scorer=scorers,
888
+ fit_params=routed_params.estimator.fit,
889
+ score_params=routed_params.scorer.score,
890
+ return_train_score=self.return_train_score,
891
+ return_n_test_samples=True,
892
+ return_times=True,
893
+ return_parameters=False,
894
+ error_score=self.error_score,
895
+ verbose=self.verbose,
896
+ )
897
+ results = {}
898
+ with parallel:
899
+ all_candidate_params = []
900
+ all_out = []
901
+ all_more_results = defaultdict(list)
902
+
903
+ def evaluate_candidates(candidate_params, cv=None, more_results=None):
904
+ cv = cv or cv_orig
905
+ candidate_params = list(candidate_params)
906
+ n_candidates = len(candidate_params)
907
+
908
+ if self.verbose > 0:
909
+ print(
910
+ "Fitting {0} folds for each of {1} candidates,"
911
+ " totalling {2} fits".format(
912
+ n_splits, n_candidates, n_candidates * n_splits
913
+ )
914
+ )
915
+
916
+ out = parallel(
917
+ delayed(_fit_and_score)(
918
+ clone(base_estimator),
919
+ X,
920
+ y,
921
+ train=train,
922
+ test=test,
923
+ parameters=parameters,
924
+ split_progress=(split_idx, n_splits),
925
+ candidate_progress=(cand_idx, n_candidates),
926
+ **fit_and_score_kwargs,
927
+ )
928
+ for (cand_idx, parameters), (split_idx, (train, test)) in product(
929
+ enumerate(candidate_params),
930
+ enumerate(cv.split(X, y, **routed_params.splitter.split)),
931
+ )
932
+ )
933
+
934
+ if len(out) < 1:
935
+ raise ValueError(
936
+ "No fits were performed. "
937
+ "Was the CV iterator empty? "
938
+ "Were there no candidates?"
939
+ )
940
+ elif len(out) != n_candidates * n_splits:
941
+ raise ValueError(
942
+ "cv.split and cv.get_n_splits returned "
943
+ "inconsistent results. Expected {} "
944
+ "splits, got {}".format(n_splits, len(out) // n_candidates)
945
+ )
946
+
947
+ _warn_or_raise_about_fit_failures(out, self.error_score)
948
+
949
+ # For callable self.scoring, the return type is only know after
950
+ # calling. If the return type is a dictionary, the error scores
951
+ # can now be inserted with the correct key. The type checking
952
+ # of out will be done in `_insert_error_scores`.
953
+ if callable(self.scoring):
954
+ _insert_error_scores(out, self.error_score)
955
+
956
+ all_candidate_params.extend(candidate_params)
957
+ all_out.extend(out)
958
+
959
+ if more_results is not None:
960
+ for key, value in more_results.items():
961
+ all_more_results[key].extend(value)
962
+
963
+ nonlocal results
964
+ results = self._format_results(
965
+ all_candidate_params, n_splits, all_out, all_more_results
966
+ )
967
+
968
+ return results
969
+
970
+ self._run_search(evaluate_candidates)
971
+
972
+ # multimetric is determined here because in the case of a callable
973
+ # self.scoring the return type is only known after calling
974
+ first_test_score = all_out[0]["test_scores"]
975
+ self.multimetric_ = isinstance(first_test_score, dict)
976
+
977
+ # check refit_metric now for a callabe scorer that is multimetric
978
+ if callable(self.scoring) and self.multimetric_:
979
+ self._check_refit_for_multimetric(first_test_score)
980
+ refit_metric = self.refit
981
+
982
+ # For multi-metric evaluation, store the best_index_, best_params_ and
983
+ # best_score_ iff refit is one of the scorer names
984
+ # In single metric evaluation, refit_metric is "score"
985
+ if self.refit or not self.multimetric_:
986
+ self.best_index_ = self._select_best_index(
987
+ self.refit, refit_metric, results
988
+ )
989
+ if not callable(self.refit):
990
+ # With a non-custom callable, we can select the best score
991
+ # based on the best index
992
+ self.best_score_ = results[f"mean_test_{refit_metric}"][
993
+ self.best_index_
994
+ ]
995
+ self.best_params_ = results["params"][self.best_index_]
996
+
997
+ if self.refit:
998
+ # here we clone the estimator as well as the parameters, since
999
+ # sometimes the parameters themselves might be estimators, e.g.
1000
+ # when we search over different estimators in a pipeline.
1001
+ # ref: https://github.com/scikit-learn/scikit-learn/pull/26786
1002
+ self.best_estimator_ = clone(base_estimator).set_params(
1003
+ **clone(self.best_params_, safe=False)
1004
+ )
1005
+
1006
+ refit_start_time = time.time()
1007
+ if y is not None:
1008
+ self.best_estimator_.fit(X, y, **routed_params.estimator.fit)
1009
+ else:
1010
+ self.best_estimator_.fit(X, **routed_params.estimator.fit)
1011
+ refit_end_time = time.time()
1012
+ self.refit_time_ = refit_end_time - refit_start_time
1013
+
1014
+ if hasattr(self.best_estimator_, "feature_names_in_"):
1015
+ self.feature_names_in_ = self.best_estimator_.feature_names_in_
1016
+
1017
+ # Store the only scorer not as a dict for single metric evaluation
1018
+ self.scorer_ = scorers
1019
+
1020
+ self.cv_results_ = results
1021
+ self.n_splits_ = n_splits
1022
+
1023
+ return self
1024
+
1025
+ def _format_results(self, candidate_params, n_splits, out, more_results=None):
1026
+ n_candidates = len(candidate_params)
1027
+ out = _aggregate_score_dicts(out)
1028
+
1029
+ results = dict(more_results or {})
1030
+ for key, val in results.items():
1031
+ # each value is a list (as per evaluate_candidate's convention)
1032
+ # we convert it to an array for consistency with the other keys
1033
+ results[key] = np.asarray(val)
1034
+
1035
+ def _store(key_name, array, weights=None, splits=False, rank=False):
1036
+ """A small helper to store the scores/times to the cv_results_"""
1037
+ # When iterated first by splits, then by parameters
1038
+ # We want `array` to have `n_candidates` rows and `n_splits` cols.
1039
+ array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
1040
+ if splits:
1041
+ for split_idx in range(n_splits):
1042
+ # Uses closure to alter the results
1043
+ results["split%d_%s" % (split_idx, key_name)] = array[:, split_idx]
1044
+
1045
+ array_means = np.average(array, axis=1, weights=weights)
1046
+ results["mean_%s" % key_name] = array_means
1047
+
1048
+ if key_name.startswith(("train_", "test_")) and np.any(
1049
+ ~np.isfinite(array_means)
1050
+ ):
1051
+ warnings.warn(
1052
+ (
1053
+ f"One or more of the {key_name.split('_')[0]} scores "
1054
+ f"are non-finite: {array_means}"
1055
+ ),
1056
+ category=UserWarning,
1057
+ )
1058
+
1059
+ # Weighted std is not directly available in numpy
1060
+ array_stds = np.sqrt(
1061
+ np.average(
1062
+ (array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights
1063
+ )
1064
+ )
1065
+ results["std_%s" % key_name] = array_stds
1066
+
1067
+ if rank:
1068
+ # When the fit/scoring fails `array_means` contains NaNs, we
1069
+ # will exclude them from the ranking process and consider them
1070
+ # as tied with the worst performers.
1071
+ if np.isnan(array_means).all():
1072
+ # All fit/scoring routines failed.
1073
+ rank_result = np.ones_like(array_means, dtype=np.int32)
1074
+ else:
1075
+ min_array_means = np.nanmin(array_means) - 1
1076
+ array_means = np.nan_to_num(array_means, nan=min_array_means)
1077
+ rank_result = rankdata(-array_means, method="min").astype(
1078
+ np.int32, copy=False
1079
+ )
1080
+ results["rank_%s" % key_name] = rank_result
1081
+
1082
+ _store("fit_time", out["fit_time"])
1083
+ _store("score_time", out["score_time"])
1084
+ # Use one MaskedArray and mask all the places where the param is not
1085
+ # applicable for that candidate. Use defaultdict as each candidate may
1086
+ # not contain all the params
1087
+ param_results = defaultdict(
1088
+ partial(
1089
+ MaskedArray,
1090
+ np.empty(
1091
+ n_candidates,
1092
+ ),
1093
+ mask=True,
1094
+ dtype=object,
1095
+ )
1096
+ )
1097
+ for cand_idx, params in enumerate(candidate_params):
1098
+ for name, value in params.items():
1099
+ # An all masked empty array gets created for the key
1100
+ # `"param_%s" % name` at the first occurrence of `name`.
1101
+ # Setting the value at an index also unmasks that index
1102
+ param_results["param_%s" % name][cand_idx] = value
1103
+
1104
+ results.update(param_results)
1105
+ # Store a list of param dicts at the key 'params'
1106
+ results["params"] = candidate_params
1107
+
1108
+ test_scores_dict = _normalize_score_results(out["test_scores"])
1109
+ if self.return_train_score:
1110
+ train_scores_dict = _normalize_score_results(out["train_scores"])
1111
+
1112
+ for scorer_name in test_scores_dict:
1113
+ # Computed the (weighted) mean and std for test scores alone
1114
+ _store(
1115
+ "test_%s" % scorer_name,
1116
+ test_scores_dict[scorer_name],
1117
+ splits=True,
1118
+ rank=True,
1119
+ weights=None,
1120
+ )
1121
+ if self.return_train_score:
1122
+ _store(
1123
+ "train_%s" % scorer_name,
1124
+ train_scores_dict[scorer_name],
1125
+ splits=True,
1126
+ )
1127
+
1128
+ return results
1129
+
1130
+ def get_metadata_routing(self):
1131
+ """Get metadata routing of this object.
1132
+
1133
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1134
+ mechanism works.
1135
+
1136
+ .. versionadded:: 1.4
1137
+
1138
+ Returns
1139
+ -------
1140
+ routing : MetadataRouter
1141
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
1142
+ routing information.
1143
+ """
1144
+ router = MetadataRouter(owner=self.__class__.__name__)
1145
+ router.add(
1146
+ estimator=self.estimator,
1147
+ method_mapping=MethodMapping().add(caller="fit", callee="fit"),
1148
+ )
1149
+
1150
+ scorer, _ = self._get_scorers(convert_multimetric=True)
1151
+ router.add(
1152
+ scorer=scorer,
1153
+ method_mapping=MethodMapping()
1154
+ .add(caller="score", callee="score")
1155
+ .add(caller="fit", callee="score"),
1156
+ )
1157
+ router.add(
1158
+ splitter=self.cv,
1159
+ method_mapping=MethodMapping().add(caller="fit", callee="split"),
1160
+ )
1161
+ return router
1162
+
1163
+
1164
+ class GridSearchCV(BaseSearchCV):
1165
+ """Exhaustive search over specified parameter values for an estimator.
1166
+
1167
+ Important members are fit, predict.
1168
+
1169
+ GridSearchCV implements a "fit" and a "score" method.
1170
+ It also implements "score_samples", "predict", "predict_proba",
1171
+ "decision_function", "transform" and "inverse_transform" if they are
1172
+ implemented in the estimator used.
1173
+
1174
+ The parameters of the estimator used to apply these methods are optimized
1175
+ by cross-validated grid-search over a parameter grid.
1176
+
1177
+ Read more in the :ref:`User Guide <grid_search>`.
1178
+
1179
+ Parameters
1180
+ ----------
1181
+ estimator : estimator object
1182
+ This is assumed to implement the scikit-learn estimator interface.
1183
+ Either estimator needs to provide a ``score`` function,
1184
+ or ``scoring`` must be passed.
1185
+
1186
+ param_grid : dict or list of dictionaries
1187
+ Dictionary with parameters names (`str`) as keys and lists of
1188
+ parameter settings to try as values, or a list of such
1189
+ dictionaries, in which case the grids spanned by each dictionary
1190
+ in the list are explored. This enables searching over any sequence
1191
+ of parameter settings.
1192
+
1193
+ scoring : str, callable, list, tuple or dict, default=None
1194
+ Strategy to evaluate the performance of the cross-validated model on
1195
+ the test set.
1196
+
1197
+ If `scoring` represents a single score, one can use:
1198
+
1199
+ - a single string (see :ref:`scoring_parameter`);
1200
+ - a callable (see :ref:`scoring`) that returns a single value.
1201
+
1202
+ If `scoring` represents multiple scores, one can use:
1203
+
1204
+ - a list or tuple of unique strings;
1205
+ - a callable returning a dictionary where the keys are the metric
1206
+ names and the values are the metric scores;
1207
+ - a dictionary with metric names as keys and callables a values.
1208
+
1209
+ See :ref:`multimetric_grid_search` for an example.
1210
+
1211
+ n_jobs : int, default=None
1212
+ Number of jobs to run in parallel.
1213
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1214
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1215
+ for more details.
1216
+
1217
+ .. versionchanged:: v0.20
1218
+ `n_jobs` default changed from 1 to None
1219
+
1220
+ refit : bool, str, or callable, default=True
1221
+ Refit an estimator using the best found parameters on the whole
1222
+ dataset.
1223
+
1224
+ For multiple metric evaluation, this needs to be a `str` denoting the
1225
+ scorer that would be used to find the best parameters for refitting
1226
+ the estimator at the end.
1227
+
1228
+ Where there are considerations other than maximum score in
1229
+ choosing a best estimator, ``refit`` can be set to a function which
1230
+ returns the selected ``best_index_`` given ``cv_results_``. In that
1231
+ case, the ``best_estimator_`` and ``best_params_`` will be set
1232
+ according to the returned ``best_index_`` while the ``best_score_``
1233
+ attribute will not be available.
1234
+
1235
+ The refitted estimator is made available at the ``best_estimator_``
1236
+ attribute and permits using ``predict`` directly on this
1237
+ ``GridSearchCV`` instance.
1238
+
1239
+ Also for multiple metric evaluation, the attributes ``best_index_``,
1240
+ ``best_score_`` and ``best_params_`` will only be available if
1241
+ ``refit`` is set and all of them will be determined w.r.t this specific
1242
+ scorer.
1243
+
1244
+ See ``scoring`` parameter to know more about multiple metric
1245
+ evaluation.
1246
+
1247
+ See :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_digits.py`
1248
+ to see how to design a custom selection strategy using a callable
1249
+ via `refit`.
1250
+
1251
+ .. versionchanged:: 0.20
1252
+ Support for callable added.
1253
+
1254
+ cv : int, cross-validation generator or an iterable, default=None
1255
+ Determines the cross-validation splitting strategy.
1256
+ Possible inputs for cv are:
1257
+
1258
+ - None, to use the default 5-fold cross validation,
1259
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
1260
+ - :term:`CV splitter`,
1261
+ - An iterable yielding (train, test) splits as arrays of indices.
1262
+
1263
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
1264
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1265
+ other cases, :class:`KFold` is used. These splitters are instantiated
1266
+ with `shuffle=False` so the splits will be the same across calls.
1267
+
1268
+ Refer :ref:`User Guide <cross_validation>` for the various
1269
+ cross-validation strategies that can be used here.
1270
+
1271
+ .. versionchanged:: 0.22
1272
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1273
+
1274
+ verbose : int
1275
+ Controls the verbosity: the higher, the more messages.
1276
+
1277
+ - >1 : the computation time for each fold and parameter candidate is
1278
+ displayed;
1279
+ - >2 : the score is also displayed;
1280
+ - >3 : the fold and candidate parameter indexes are also displayed
1281
+ together with the starting time of the computation.
1282
+
1283
+ pre_dispatch : int, or str, default='2*n_jobs'
1284
+ Controls the number of jobs that get dispatched during parallel
1285
+ execution. Reducing this number can be useful to avoid an
1286
+ explosion of memory consumption when more jobs get dispatched
1287
+ than CPUs can process. This parameter can be:
1288
+
1289
+ - None, in which case all the jobs are immediately
1290
+ created and spawned. Use this for lightweight and
1291
+ fast-running jobs, to avoid delays due to on-demand
1292
+ spawning of the jobs
1293
+
1294
+ - An int, giving the exact number of total jobs that are
1295
+ spawned
1296
+
1297
+ - A str, giving an expression as a function of n_jobs,
1298
+ as in '2*n_jobs'
1299
+
1300
+ error_score : 'raise' or numeric, default=np.nan
1301
+ Value to assign to the score if an error occurs in estimator fitting.
1302
+ If set to 'raise', the error is raised. If a numeric value is given,
1303
+ FitFailedWarning is raised. This parameter does not affect the refit
1304
+ step, which will always raise the error.
1305
+
1306
+ return_train_score : bool, default=False
1307
+ If ``False``, the ``cv_results_`` attribute will not include training
1308
+ scores.
1309
+ Computing training scores is used to get insights on how different
1310
+ parameter settings impact the overfitting/underfitting trade-off.
1311
+ However computing the scores on the training set can be computationally
1312
+ expensive and is not strictly required to select the parameters that
1313
+ yield the best generalization performance.
1314
+
1315
+ .. versionadded:: 0.19
1316
+
1317
+ .. versionchanged:: 0.21
1318
+ Default value was changed from ``True`` to ``False``
1319
+
1320
+ Attributes
1321
+ ----------
1322
+ cv_results_ : dict of numpy (masked) ndarrays
1323
+ A dict with keys as column headers and values as columns, that can be
1324
+ imported into a pandas ``DataFrame``.
1325
+
1326
+ For instance the below given table
1327
+
1328
+ +------------+-----------+------------+-----------------+---+---------+
1329
+ |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
1330
+ +============+===========+============+=================+===+=========+
1331
+ | 'poly' | -- | 2 | 0.80 |...| 2 |
1332
+ +------------+-----------+------------+-----------------+---+---------+
1333
+ | 'poly' | -- | 3 | 0.70 |...| 4 |
1334
+ +------------+-----------+------------+-----------------+---+---------+
1335
+ | 'rbf' | 0.1 | -- | 0.80 |...| 3 |
1336
+ +------------+-----------+------------+-----------------+---+---------+
1337
+ | 'rbf' | 0.2 | -- | 0.93 |...| 1 |
1338
+ +------------+-----------+------------+-----------------+---+---------+
1339
+
1340
+ will be represented by a ``cv_results_`` dict of::
1341
+
1342
+ {
1343
+ 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
1344
+ mask = [False False False False]...)
1345
+ 'param_gamma': masked_array(data = [-- -- 0.1 0.2],
1346
+ mask = [ True True False False]...),
1347
+ 'param_degree': masked_array(data = [2.0 3.0 -- --],
1348
+ mask = [False False True True]...),
1349
+ 'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
1350
+ 'split1_test_score' : [0.82, 0.50, 0.70, 0.78],
1351
+ 'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
1352
+ 'std_test_score' : [0.01, 0.10, 0.05, 0.08],
1353
+ 'rank_test_score' : [2, 4, 3, 1],
1354
+ 'split0_train_score' : [0.80, 0.92, 0.70, 0.93],
1355
+ 'split1_train_score' : [0.82, 0.55, 0.70, 0.87],
1356
+ 'mean_train_score' : [0.81, 0.74, 0.70, 0.90],
1357
+ 'std_train_score' : [0.01, 0.19, 0.00, 0.03],
1358
+ 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
1359
+ 'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
1360
+ 'mean_score_time' : [0.01, 0.06, 0.04, 0.04],
1361
+ 'std_score_time' : [0.00, 0.00, 0.00, 0.01],
1362
+ 'params' : [{'kernel': 'poly', 'degree': 2}, ...],
1363
+ }
1364
+
1365
+ NOTE
1366
+
1367
+ The key ``'params'`` is used to store a list of parameter
1368
+ settings dicts for all the parameter candidates.
1369
+
1370
+ The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
1371
+ ``std_score_time`` are all in seconds.
1372
+
1373
+ For multi-metric evaluation, the scores for all the scorers are
1374
+ available in the ``cv_results_`` dict at the keys ending with that
1375
+ scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
1376
+ above. ('split0_test_precision', 'mean_train_precision' etc.)
1377
+
1378
+ best_estimator_ : estimator
1379
+ Estimator that was chosen by the search, i.e. estimator
1380
+ which gave highest score (or smallest loss if specified)
1381
+ on the left out data. Not available if ``refit=False``.
1382
+
1383
+ See ``refit`` parameter for more information on allowed values.
1384
+
1385
+ best_score_ : float
1386
+ Mean cross-validated score of the best_estimator
1387
+
1388
+ For multi-metric evaluation, this is present only if ``refit`` is
1389
+ specified.
1390
+
1391
+ This attribute is not available if ``refit`` is a function.
1392
+
1393
+ best_params_ : dict
1394
+ Parameter setting that gave the best results on the hold out data.
1395
+
1396
+ For multi-metric evaluation, this is present only if ``refit`` is
1397
+ specified.
1398
+
1399
+ best_index_ : int
1400
+ The index (of the ``cv_results_`` arrays) which corresponds to the best
1401
+ candidate parameter setting.
1402
+
1403
+ The dict at ``search.cv_results_['params'][search.best_index_]`` gives
1404
+ the parameter setting for the best model, that gives the highest
1405
+ mean score (``search.best_score_``).
1406
+
1407
+ For multi-metric evaluation, this is present only if ``refit`` is
1408
+ specified.
1409
+
1410
+ scorer_ : function or a dict
1411
+ Scorer function used on the held out data to choose the best
1412
+ parameters for the model.
1413
+
1414
+ For multi-metric evaluation, this attribute holds the validated
1415
+ ``scoring`` dict which maps the scorer key to the scorer callable.
1416
+
1417
+ n_splits_ : int
1418
+ The number of cross-validation splits (folds/iterations).
1419
+
1420
+ refit_time_ : float
1421
+ Seconds used for refitting the best model on the whole dataset.
1422
+
1423
+ This is present only if ``refit`` is not False.
1424
+
1425
+ .. versionadded:: 0.20
1426
+
1427
+ multimetric_ : bool
1428
+ Whether or not the scorers compute several metrics.
1429
+
1430
+ classes_ : ndarray of shape (n_classes,)
1431
+ The classes labels. This is present only if ``refit`` is specified and
1432
+ the underlying estimator is a classifier.
1433
+
1434
+ n_features_in_ : int
1435
+ Number of features seen during :term:`fit`. Only defined if
1436
+ `best_estimator_` is defined (see the documentation for the `refit`
1437
+ parameter for more details) and that `best_estimator_` exposes
1438
+ `n_features_in_` when fit.
1439
+
1440
+ .. versionadded:: 0.24
1441
+
1442
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1443
+ Names of features seen during :term:`fit`. Only defined if
1444
+ `best_estimator_` is defined (see the documentation for the `refit`
1445
+ parameter for more details) and that `best_estimator_` exposes
1446
+ `feature_names_in_` when fit.
1447
+
1448
+ .. versionadded:: 1.0
1449
+
1450
+ See Also
1451
+ --------
1452
+ ParameterGrid : Generates all the combinations of a hyperparameter grid.
1453
+ train_test_split : Utility function to split the data into a development
1454
+ set usable for fitting a GridSearchCV instance and an evaluation set
1455
+ for its final evaluation.
1456
+ sklearn.metrics.make_scorer : Make a scorer from a performance metric or
1457
+ loss function.
1458
+
1459
+ Notes
1460
+ -----
1461
+ The parameters selected are those that maximize the score of the left out
1462
+ data, unless an explicit score is passed in which case it is used instead.
1463
+
1464
+ If `n_jobs` was set to a value higher than one, the data is copied for each
1465
+ point in the grid (and not `n_jobs` times). This is done for efficiency
1466
+ reasons if individual jobs take very little time, but may raise errors if
1467
+ the dataset is large and not enough memory is available. A workaround in
1468
+ this case is to set `pre_dispatch`. Then, the memory is copied only
1469
+ `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
1470
+ n_jobs`.
1471
+
1472
+ Examples
1473
+ --------
1474
+ >>> from sklearn import svm, datasets
1475
+ >>> from sklearn.model_selection import GridSearchCV
1476
+ >>> iris = datasets.load_iris()
1477
+ >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
1478
+ >>> svc = svm.SVC()
1479
+ >>> clf = GridSearchCV(svc, parameters)
1480
+ >>> clf.fit(iris.data, iris.target)
1481
+ GridSearchCV(estimator=SVC(),
1482
+ param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')})
1483
+ >>> sorted(clf.cv_results_.keys())
1484
+ ['mean_fit_time', 'mean_score_time', 'mean_test_score',...
1485
+ 'param_C', 'param_kernel', 'params',...
1486
+ 'rank_test_score', 'split0_test_score',...
1487
+ 'split2_test_score', ...
1488
+ 'std_fit_time', 'std_score_time', 'std_test_score']
1489
+ """
1490
+
1491
+ _required_parameters = ["estimator", "param_grid"]
1492
+
1493
+ _parameter_constraints: dict = {
1494
+ **BaseSearchCV._parameter_constraints,
1495
+ "param_grid": [dict, list],
1496
+ }
1497
+
1498
+ def __init__(
1499
+ self,
1500
+ estimator,
1501
+ param_grid,
1502
+ *,
1503
+ scoring=None,
1504
+ n_jobs=None,
1505
+ refit=True,
1506
+ cv=None,
1507
+ verbose=0,
1508
+ pre_dispatch="2*n_jobs",
1509
+ error_score=np.nan,
1510
+ return_train_score=False,
1511
+ ):
1512
+ super().__init__(
1513
+ estimator=estimator,
1514
+ scoring=scoring,
1515
+ n_jobs=n_jobs,
1516
+ refit=refit,
1517
+ cv=cv,
1518
+ verbose=verbose,
1519
+ pre_dispatch=pre_dispatch,
1520
+ error_score=error_score,
1521
+ return_train_score=return_train_score,
1522
+ )
1523
+ self.param_grid = param_grid
1524
+
1525
+ def _run_search(self, evaluate_candidates):
1526
+ """Search all candidates in param_grid"""
1527
+ evaluate_candidates(ParameterGrid(self.param_grid))
1528
+
1529
+
1530
+ class RandomizedSearchCV(BaseSearchCV):
1531
+ """Randomized search on hyper parameters.
1532
+
1533
+ RandomizedSearchCV implements a "fit" and a "score" method.
1534
+ It also implements "score_samples", "predict", "predict_proba",
1535
+ "decision_function", "transform" and "inverse_transform" if they are
1536
+ implemented in the estimator used.
1537
+
1538
+ The parameters of the estimator used to apply these methods are optimized
1539
+ by cross-validated search over parameter settings.
1540
+
1541
+ In contrast to GridSearchCV, not all parameter values are tried out, but
1542
+ rather a fixed number of parameter settings is sampled from the specified
1543
+ distributions. The number of parameter settings that are tried is
1544
+ given by n_iter.
1545
+
1546
+ If all parameters are presented as a list,
1547
+ sampling without replacement is performed. If at least one parameter
1548
+ is given as a distribution, sampling with replacement is used.
1549
+ It is highly recommended to use continuous distributions for continuous
1550
+ parameters.
1551
+
1552
+ Read more in the :ref:`User Guide <randomized_parameter_search>`.
1553
+
1554
+ .. versionadded:: 0.14
1555
+
1556
+ Parameters
1557
+ ----------
1558
+ estimator : estimator object
1559
+ An object of that type is instantiated for each grid point.
1560
+ This is assumed to implement the scikit-learn estimator interface.
1561
+ Either estimator needs to provide a ``score`` function,
1562
+ or ``scoring`` must be passed.
1563
+
1564
+ param_distributions : dict or list of dicts
1565
+ Dictionary with parameters names (`str`) as keys and distributions
1566
+ or lists of parameters to try. Distributions must provide a ``rvs``
1567
+ method for sampling (such as those from scipy.stats.distributions).
1568
+ If a list is given, it is sampled uniformly.
1569
+ If a list of dicts is given, first a dict is sampled uniformly, and
1570
+ then a parameter is sampled using that dict as above.
1571
+
1572
+ n_iter : int, default=10
1573
+ Number of parameter settings that are sampled. n_iter trades
1574
+ off runtime vs quality of the solution.
1575
+
1576
+ scoring : str, callable, list, tuple or dict, default=None
1577
+ Strategy to evaluate the performance of the cross-validated model on
1578
+ the test set.
1579
+
1580
+ If `scoring` represents a single score, one can use:
1581
+
1582
+ - a single string (see :ref:`scoring_parameter`);
1583
+ - a callable (see :ref:`scoring`) that returns a single value.
1584
+
1585
+ If `scoring` represents multiple scores, one can use:
1586
+
1587
+ - a list or tuple of unique strings;
1588
+ - a callable returning a dictionary where the keys are the metric
1589
+ names and the values are the metric scores;
1590
+ - a dictionary with metric names as keys and callables a values.
1591
+
1592
+ See :ref:`multimetric_grid_search` for an example.
1593
+
1594
+ If None, the estimator's score method is used.
1595
+
1596
+ n_jobs : int, default=None
1597
+ Number of jobs to run in parallel.
1598
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1599
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1600
+ for more details.
1601
+
1602
+ .. versionchanged:: v0.20
1603
+ `n_jobs` default changed from 1 to None
1604
+
1605
+ refit : bool, str, or callable, default=True
1606
+ Refit an estimator using the best found parameters on the whole
1607
+ dataset.
1608
+
1609
+ For multiple metric evaluation, this needs to be a `str` denoting the
1610
+ scorer that would be used to find the best parameters for refitting
1611
+ the estimator at the end.
1612
+
1613
+ Where there are considerations other than maximum score in
1614
+ choosing a best estimator, ``refit`` can be set to a function which
1615
+ returns the selected ``best_index_`` given the ``cv_results``. In that
1616
+ case, the ``best_estimator_`` and ``best_params_`` will be set
1617
+ according to the returned ``best_index_`` while the ``best_score_``
1618
+ attribute will not be available.
1619
+
1620
+ The refitted estimator is made available at the ``best_estimator_``
1621
+ attribute and permits using ``predict`` directly on this
1622
+ ``RandomizedSearchCV`` instance.
1623
+
1624
+ Also for multiple metric evaluation, the attributes ``best_index_``,
1625
+ ``best_score_`` and ``best_params_`` will only be available if
1626
+ ``refit`` is set and all of them will be determined w.r.t this specific
1627
+ scorer.
1628
+
1629
+ See ``scoring`` parameter to know more about multiple metric
1630
+ evaluation.
1631
+
1632
+ .. versionchanged:: 0.20
1633
+ Support for callable added.
1634
+
1635
+ cv : int, cross-validation generator or an iterable, default=None
1636
+ Determines the cross-validation splitting strategy.
1637
+ Possible inputs for cv are:
1638
+
1639
+ - None, to use the default 5-fold cross validation,
1640
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
1641
+ - :term:`CV splitter`,
1642
+ - An iterable yielding (train, test) splits as arrays of indices.
1643
+
1644
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
1645
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1646
+ other cases, :class:`KFold` is used. These splitters are instantiated
1647
+ with `shuffle=False` so the splits will be the same across calls.
1648
+
1649
+ Refer :ref:`User Guide <cross_validation>` for the various
1650
+ cross-validation strategies that can be used here.
1651
+
1652
+ .. versionchanged:: 0.22
1653
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1654
+
1655
+ verbose : int
1656
+ Controls the verbosity: the higher, the more messages.
1657
+
1658
+ - >1 : the computation time for each fold and parameter candidate is
1659
+ displayed;
1660
+ - >2 : the score is also displayed;
1661
+ - >3 : the fold and candidate parameter indexes are also displayed
1662
+ together with the starting time of the computation.
1663
+
1664
+ pre_dispatch : int, or str, default='2*n_jobs'
1665
+ Controls the number of jobs that get dispatched during parallel
1666
+ execution. Reducing this number can be useful to avoid an
1667
+ explosion of memory consumption when more jobs get dispatched
1668
+ than CPUs can process. This parameter can be:
1669
+
1670
+ - None, in which case all the jobs are immediately
1671
+ created and spawned. Use this for lightweight and
1672
+ fast-running jobs, to avoid delays due to on-demand
1673
+ spawning of the jobs
1674
+
1675
+ - An int, giving the exact number of total jobs that are
1676
+ spawned
1677
+
1678
+ - A str, giving an expression as a function of n_jobs,
1679
+ as in '2*n_jobs'
1680
+
1681
+ random_state : int, RandomState instance or None, default=None
1682
+ Pseudo random number generator state used for random uniform sampling
1683
+ from lists of possible values instead of scipy.stats distributions.
1684
+ Pass an int for reproducible output across multiple
1685
+ function calls.
1686
+ See :term:`Glossary <random_state>`.
1687
+
1688
+ error_score : 'raise' or numeric, default=np.nan
1689
+ Value to assign to the score if an error occurs in estimator fitting.
1690
+ If set to 'raise', the error is raised. If a numeric value is given,
1691
+ FitFailedWarning is raised. This parameter does not affect the refit
1692
+ step, which will always raise the error.
1693
+
1694
+ return_train_score : bool, default=False
1695
+ If ``False``, the ``cv_results_`` attribute will not include training
1696
+ scores.
1697
+ Computing training scores is used to get insights on how different
1698
+ parameter settings impact the overfitting/underfitting trade-off.
1699
+ However computing the scores on the training set can be computationally
1700
+ expensive and is not strictly required to select the parameters that
1701
+ yield the best generalization performance.
1702
+
1703
+ .. versionadded:: 0.19
1704
+
1705
+ .. versionchanged:: 0.21
1706
+ Default value was changed from ``True`` to ``False``
1707
+
1708
+ Attributes
1709
+ ----------
1710
+ cv_results_ : dict of numpy (masked) ndarrays
1711
+ A dict with keys as column headers and values as columns, that can be
1712
+ imported into a pandas ``DataFrame``.
1713
+
1714
+ For instance the below given table
1715
+
1716
+ +--------------+-------------+-------------------+---+---------------+
1717
+ | param_kernel | param_gamma | split0_test_score |...|rank_test_score|
1718
+ +==============+=============+===================+===+===============+
1719
+ | 'rbf' | 0.1 | 0.80 |...| 1 |
1720
+ +--------------+-------------+-------------------+---+---------------+
1721
+ | 'rbf' | 0.2 | 0.84 |...| 3 |
1722
+ +--------------+-------------+-------------------+---+---------------+
1723
+ | 'rbf' | 0.3 | 0.70 |...| 2 |
1724
+ +--------------+-------------+-------------------+---+---------------+
1725
+
1726
+ will be represented by a ``cv_results_`` dict of::
1727
+
1728
+ {
1729
+ 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
1730
+ mask = False),
1731
+ 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
1732
+ 'split0_test_score' : [0.80, 0.84, 0.70],
1733
+ 'split1_test_score' : [0.82, 0.50, 0.70],
1734
+ 'mean_test_score' : [0.81, 0.67, 0.70],
1735
+ 'std_test_score' : [0.01, 0.24, 0.00],
1736
+ 'rank_test_score' : [1, 3, 2],
1737
+ 'split0_train_score' : [0.80, 0.92, 0.70],
1738
+ 'split1_train_score' : [0.82, 0.55, 0.70],
1739
+ 'mean_train_score' : [0.81, 0.74, 0.70],
1740
+ 'std_train_score' : [0.01, 0.19, 0.00],
1741
+ 'mean_fit_time' : [0.73, 0.63, 0.43],
1742
+ 'std_fit_time' : [0.01, 0.02, 0.01],
1743
+ 'mean_score_time' : [0.01, 0.06, 0.04],
1744
+ 'std_score_time' : [0.00, 0.00, 0.00],
1745
+ 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
1746
+ }
1747
+
1748
+ NOTE
1749
+
1750
+ The key ``'params'`` is used to store a list of parameter
1751
+ settings dicts for all the parameter candidates.
1752
+
1753
+ The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
1754
+ ``std_score_time`` are all in seconds.
1755
+
1756
+ For multi-metric evaluation, the scores for all the scorers are
1757
+ available in the ``cv_results_`` dict at the keys ending with that
1758
+ scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
1759
+ above. ('split0_test_precision', 'mean_train_precision' etc.)
1760
+
1761
+ best_estimator_ : estimator
1762
+ Estimator that was chosen by the search, i.e. estimator
1763
+ which gave highest score (or smallest loss if specified)
1764
+ on the left out data. Not available if ``refit=False``.
1765
+
1766
+ For multi-metric evaluation, this attribute is present only if
1767
+ ``refit`` is specified.
1768
+
1769
+ See ``refit`` parameter for more information on allowed values.
1770
+
1771
+ best_score_ : float
1772
+ Mean cross-validated score of the best_estimator.
1773
+
1774
+ For multi-metric evaluation, this is not available if ``refit`` is
1775
+ ``False``. See ``refit`` parameter for more information.
1776
+
1777
+ This attribute is not available if ``refit`` is a function.
1778
+
1779
+ best_params_ : dict
1780
+ Parameter setting that gave the best results on the hold out data.
1781
+
1782
+ For multi-metric evaluation, this is not available if ``refit`` is
1783
+ ``False``. See ``refit`` parameter for more information.
1784
+
1785
+ best_index_ : int
1786
+ The index (of the ``cv_results_`` arrays) which corresponds to the best
1787
+ candidate parameter setting.
1788
+
1789
+ The dict at ``search.cv_results_['params'][search.best_index_]`` gives
1790
+ the parameter setting for the best model, that gives the highest
1791
+ mean score (``search.best_score_``).
1792
+
1793
+ For multi-metric evaluation, this is not available if ``refit`` is
1794
+ ``False``. See ``refit`` parameter for more information.
1795
+
1796
+ scorer_ : function or a dict
1797
+ Scorer function used on the held out data to choose the best
1798
+ parameters for the model.
1799
+
1800
+ For multi-metric evaluation, this attribute holds the validated
1801
+ ``scoring`` dict which maps the scorer key to the scorer callable.
1802
+
1803
+ n_splits_ : int
1804
+ The number of cross-validation splits (folds/iterations).
1805
+
1806
+ refit_time_ : float
1807
+ Seconds used for refitting the best model on the whole dataset.
1808
+
1809
+ This is present only if ``refit`` is not False.
1810
+
1811
+ .. versionadded:: 0.20
1812
+
1813
+ multimetric_ : bool
1814
+ Whether or not the scorers compute several metrics.
1815
+
1816
+ classes_ : ndarray of shape (n_classes,)
1817
+ The classes labels. This is present only if ``refit`` is specified and
1818
+ the underlying estimator is a classifier.
1819
+
1820
+ n_features_in_ : int
1821
+ Number of features seen during :term:`fit`. Only defined if
1822
+ `best_estimator_` is defined (see the documentation for the `refit`
1823
+ parameter for more details) and that `best_estimator_` exposes
1824
+ `n_features_in_` when fit.
1825
+
1826
+ .. versionadded:: 0.24
1827
+
1828
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1829
+ Names of features seen during :term:`fit`. Only defined if
1830
+ `best_estimator_` is defined (see the documentation for the `refit`
1831
+ parameter for more details) and that `best_estimator_` exposes
1832
+ `feature_names_in_` when fit.
1833
+
1834
+ .. versionadded:: 1.0
1835
+
1836
+ See Also
1837
+ --------
1838
+ GridSearchCV : Does exhaustive search over a grid of parameters.
1839
+ ParameterSampler : A generator over parameter settings, constructed from
1840
+ param_distributions.
1841
+
1842
+ Notes
1843
+ -----
1844
+ The parameters selected are those that maximize the score of the held-out
1845
+ data, according to the scoring parameter.
1846
+
1847
+ If `n_jobs` was set to a value higher than one, the data is copied for each
1848
+ parameter setting(and not `n_jobs` times). This is done for efficiency
1849
+ reasons if individual jobs take very little time, but may raise errors if
1850
+ the dataset is large and not enough memory is available. A workaround in
1851
+ this case is to set `pre_dispatch`. Then, the memory is copied only
1852
+ `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
1853
+ n_jobs`.
1854
+
1855
+ Examples
1856
+ --------
1857
+ >>> from sklearn.datasets import load_iris
1858
+ >>> from sklearn.linear_model import LogisticRegression
1859
+ >>> from sklearn.model_selection import RandomizedSearchCV
1860
+ >>> from scipy.stats import uniform
1861
+ >>> iris = load_iris()
1862
+ >>> logistic = LogisticRegression(solver='saga', tol=1e-2, max_iter=200,
1863
+ ... random_state=0)
1864
+ >>> distributions = dict(C=uniform(loc=0, scale=4),
1865
+ ... penalty=['l2', 'l1'])
1866
+ >>> clf = RandomizedSearchCV(logistic, distributions, random_state=0)
1867
+ >>> search = clf.fit(iris.data, iris.target)
1868
+ >>> search.best_params_
1869
+ {'C': 2..., 'penalty': 'l1'}
1870
+ """
1871
+
1872
+ _required_parameters = ["estimator", "param_distributions"]
1873
+
1874
+ _parameter_constraints: dict = {
1875
+ **BaseSearchCV._parameter_constraints,
1876
+ "param_distributions": [dict, list],
1877
+ "n_iter": [Interval(numbers.Integral, 1, None, closed="left")],
1878
+ "random_state": ["random_state"],
1879
+ }
1880
+
1881
+ def __init__(
1882
+ self,
1883
+ estimator,
1884
+ param_distributions,
1885
+ *,
1886
+ n_iter=10,
1887
+ scoring=None,
1888
+ n_jobs=None,
1889
+ refit=True,
1890
+ cv=None,
1891
+ verbose=0,
1892
+ pre_dispatch="2*n_jobs",
1893
+ random_state=None,
1894
+ error_score=np.nan,
1895
+ return_train_score=False,
1896
+ ):
1897
+ self.param_distributions = param_distributions
1898
+ self.n_iter = n_iter
1899
+ self.random_state = random_state
1900
+ super().__init__(
1901
+ estimator=estimator,
1902
+ scoring=scoring,
1903
+ n_jobs=n_jobs,
1904
+ refit=refit,
1905
+ cv=cv,
1906
+ verbose=verbose,
1907
+ pre_dispatch=pre_dispatch,
1908
+ error_score=error_score,
1909
+ return_train_score=return_train_score,
1910
+ )
1911
+
1912
+ def _run_search(self, evaluate_candidates):
1913
+ """Search n_iter candidates from param_distributions"""
1914
+ evaluate_candidates(
1915
+ ParameterSampler(
1916
+ self.param_distributions, self.n_iter, random_state=self.random_state
1917
+ )
1918
+ )
venv/lib/python3.10/site-packages/sklearn/model_selection/_search_successive_halving.py ADDED
@@ -0,0 +1,1079 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from copy import deepcopy
3
+ from math import ceil, floor, log
4
+ from numbers import Integral, Real
5
+
6
+ import numpy as np
7
+
8
+ from ..base import _fit_context, is_classifier
9
+ from ..metrics._scorer import get_scorer_names
10
+ from ..utils import resample
11
+ from ..utils._param_validation import Interval, StrOptions
12
+ from ..utils.multiclass import check_classification_targets
13
+ from ..utils.validation import _num_samples
14
+ from . import ParameterGrid, ParameterSampler
15
+ from ._search import BaseSearchCV
16
+ from ._split import _yields_constant_splits, check_cv
17
+
18
+ __all__ = ["HalvingGridSearchCV", "HalvingRandomSearchCV"]
19
+
20
+
21
+ class _SubsampleMetaSplitter:
22
+ """Splitter that subsamples a given fraction of the dataset"""
23
+
24
+ def __init__(self, *, base_cv, fraction, subsample_test, random_state):
25
+ self.base_cv = base_cv
26
+ self.fraction = fraction
27
+ self.subsample_test = subsample_test
28
+ self.random_state = random_state
29
+
30
+ def split(self, X, y, **kwargs):
31
+ for train_idx, test_idx in self.base_cv.split(X, y, **kwargs):
32
+ train_idx = resample(
33
+ train_idx,
34
+ replace=False,
35
+ random_state=self.random_state,
36
+ n_samples=int(self.fraction * len(train_idx)),
37
+ )
38
+ if self.subsample_test:
39
+ test_idx = resample(
40
+ test_idx,
41
+ replace=False,
42
+ random_state=self.random_state,
43
+ n_samples=int(self.fraction * len(test_idx)),
44
+ )
45
+ yield train_idx, test_idx
46
+
47
+
48
+ def _top_k(results, k, itr):
49
+ # Return the best candidates of a given iteration
50
+ iteration, mean_test_score, params = (
51
+ np.asarray(a)
52
+ for a in (results["iter"], results["mean_test_score"], results["params"])
53
+ )
54
+ iter_indices = np.flatnonzero(iteration == itr)
55
+ scores = mean_test_score[iter_indices]
56
+ # argsort() places NaNs at the end of the array so we move NaNs to the
57
+ # front of the array so the last `k` items are the those with the
58
+ # highest scores.
59
+ sorted_indices = np.roll(np.argsort(scores), np.count_nonzero(np.isnan(scores)))
60
+ return np.array(params[iter_indices][sorted_indices[-k:]])
61
+
62
+
63
+ class BaseSuccessiveHalving(BaseSearchCV):
64
+ """Implements successive halving.
65
+
66
+ Ref:
67
+ Almost optimal exploration in multi-armed bandits, ICML 13
68
+ Zohar Karnin, Tomer Koren, Oren Somekh
69
+ """
70
+
71
+ _parameter_constraints: dict = {
72
+ **BaseSearchCV._parameter_constraints,
73
+ # overwrite `scoring` since multi-metrics are not supported
74
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
75
+ "random_state": ["random_state"],
76
+ "max_resources": [
77
+ Interval(Integral, 0, None, closed="neither"),
78
+ StrOptions({"auto"}),
79
+ ],
80
+ "min_resources": [
81
+ Interval(Integral, 0, None, closed="neither"),
82
+ StrOptions({"exhaust", "smallest"}),
83
+ ],
84
+ "resource": [str],
85
+ "factor": [Interval(Real, 0, None, closed="neither")],
86
+ "aggressive_elimination": ["boolean"],
87
+ }
88
+ _parameter_constraints.pop("pre_dispatch") # not used in this class
89
+
90
+ def __init__(
91
+ self,
92
+ estimator,
93
+ *,
94
+ scoring=None,
95
+ n_jobs=None,
96
+ refit=True,
97
+ cv=5,
98
+ verbose=0,
99
+ random_state=None,
100
+ error_score=np.nan,
101
+ return_train_score=True,
102
+ max_resources="auto",
103
+ min_resources="exhaust",
104
+ resource="n_samples",
105
+ factor=3,
106
+ aggressive_elimination=False,
107
+ ):
108
+ super().__init__(
109
+ estimator,
110
+ scoring=scoring,
111
+ n_jobs=n_jobs,
112
+ refit=refit,
113
+ cv=cv,
114
+ verbose=verbose,
115
+ error_score=error_score,
116
+ return_train_score=return_train_score,
117
+ )
118
+
119
+ self.random_state = random_state
120
+ self.max_resources = max_resources
121
+ self.resource = resource
122
+ self.factor = factor
123
+ self.min_resources = min_resources
124
+ self.aggressive_elimination = aggressive_elimination
125
+
126
+ def _check_input_parameters(self, X, y, split_params):
127
+ # We need to enforce that successive calls to cv.split() yield the same
128
+ # splits: see https://github.com/scikit-learn/scikit-learn/issues/15149
129
+ if not _yields_constant_splits(self._checked_cv_orig):
130
+ raise ValueError(
131
+ "The cv parameter must yield consistent folds across "
132
+ "calls to split(). Set its random_state to an int, or set "
133
+ "shuffle=False."
134
+ )
135
+
136
+ if (
137
+ self.resource != "n_samples"
138
+ and self.resource not in self.estimator.get_params()
139
+ ):
140
+ raise ValueError(
141
+ f"Cannot use resource={self.resource} which is not supported "
142
+ f"by estimator {self.estimator.__class__.__name__}"
143
+ )
144
+
145
+ if isinstance(self, HalvingRandomSearchCV):
146
+ if self.min_resources == self.n_candidates == "exhaust":
147
+ # for n_candidates=exhaust to work, we need to know what
148
+ # min_resources is. Similarly min_resources=exhaust needs to
149
+ # know the actual number of candidates.
150
+ raise ValueError(
151
+ "n_candidates and min_resources cannot be both set to 'exhaust'."
152
+ )
153
+
154
+ self.min_resources_ = self.min_resources
155
+ if self.min_resources_ in ("smallest", "exhaust"):
156
+ if self.resource == "n_samples":
157
+ n_splits = self._checked_cv_orig.get_n_splits(X, y, **split_params)
158
+ # please see https://gph.is/1KjihQe for a justification
159
+ magic_factor = 2
160
+ self.min_resources_ = n_splits * magic_factor
161
+ if is_classifier(self.estimator):
162
+ y = self._validate_data(X="no_validation", y=y)
163
+ check_classification_targets(y)
164
+ n_classes = np.unique(y).shape[0]
165
+ self.min_resources_ *= n_classes
166
+ else:
167
+ self.min_resources_ = 1
168
+ # if 'exhaust', min_resources_ might be set to a higher value later
169
+ # in _run_search
170
+
171
+ self.max_resources_ = self.max_resources
172
+ if self.max_resources_ == "auto":
173
+ if not self.resource == "n_samples":
174
+ raise ValueError(
175
+ "resource can only be 'n_samples' when max_resources='auto'"
176
+ )
177
+ self.max_resources_ = _num_samples(X)
178
+
179
+ if self.min_resources_ > self.max_resources_:
180
+ raise ValueError(
181
+ f"min_resources_={self.min_resources_} is greater "
182
+ f"than max_resources_={self.max_resources_}."
183
+ )
184
+
185
+ if self.min_resources_ == 0:
186
+ raise ValueError(
187
+ f"min_resources_={self.min_resources_}: you might have passed "
188
+ "an empty dataset X."
189
+ )
190
+
191
+ @staticmethod
192
+ def _select_best_index(refit, refit_metric, results):
193
+ """Custom refit callable to return the index of the best candidate.
194
+
195
+ We want the best candidate out of the last iteration. By default
196
+ BaseSearchCV would return the best candidate out of all iterations.
197
+
198
+ Currently, we only support for a single metric thus `refit` and
199
+ `refit_metric` are not required.
200
+ """
201
+ last_iter = np.max(results["iter"])
202
+ last_iter_indices = np.flatnonzero(results["iter"] == last_iter)
203
+
204
+ test_scores = results["mean_test_score"][last_iter_indices]
205
+ # If all scores are NaNs there is no way to pick between them,
206
+ # so we (arbitrarily) declare the zero'th entry the best one
207
+ if np.isnan(test_scores).all():
208
+ best_idx = 0
209
+ else:
210
+ best_idx = np.nanargmax(test_scores)
211
+
212
+ return last_iter_indices[best_idx]
213
+
214
+ @_fit_context(
215
+ # Halving*SearchCV.estimator is not validated yet
216
+ prefer_skip_nested_validation=False
217
+ )
218
+ def fit(self, X, y=None, **params):
219
+ """Run fit with all sets of parameters.
220
+
221
+ Parameters
222
+ ----------
223
+
224
+ X : array-like, shape (n_samples, n_features)
225
+ Training vector, where `n_samples` is the number of samples and
226
+ `n_features` is the number of features.
227
+
228
+ y : array-like, shape (n_samples,) or (n_samples, n_output), optional
229
+ Target relative to X for classification or regression;
230
+ None for unsupervised learning.
231
+
232
+ **params : dict of string -> object
233
+ Parameters passed to the ``fit`` method of the estimator.
234
+
235
+ Returns
236
+ -------
237
+ self : object
238
+ Instance of fitted estimator.
239
+ """
240
+ self._checked_cv_orig = check_cv(
241
+ self.cv, y, classifier=is_classifier(self.estimator)
242
+ )
243
+
244
+ routed_params = self._get_routed_params_for_fit(params)
245
+ self._check_input_parameters(
246
+ X=X, y=y, split_params=routed_params.splitter.split
247
+ )
248
+
249
+ self._n_samples_orig = _num_samples(X)
250
+
251
+ super().fit(X, y=y, **params)
252
+
253
+ # Set best_score_: BaseSearchCV does not set it, as refit is a callable
254
+ self.best_score_ = self.cv_results_["mean_test_score"][self.best_index_]
255
+
256
+ return self
257
+
258
+ def _run_search(self, evaluate_candidates):
259
+ candidate_params = self._generate_candidate_params()
260
+
261
+ if self.resource != "n_samples" and any(
262
+ self.resource in candidate for candidate in candidate_params
263
+ ):
264
+ # Can only check this now since we need the candidates list
265
+ raise ValueError(
266
+ f"Cannot use parameter {self.resource} as the resource since "
267
+ "it is part of the searched parameters."
268
+ )
269
+
270
+ # n_required_iterations is the number of iterations needed so that the
271
+ # last iterations evaluates less than `factor` candidates.
272
+ n_required_iterations = 1 + floor(log(len(candidate_params), self.factor))
273
+
274
+ if self.min_resources == "exhaust":
275
+ # To exhaust the resources, we want to start with the biggest
276
+ # min_resources possible so that the last (required) iteration
277
+ # uses as many resources as possible
278
+ last_iteration = n_required_iterations - 1
279
+ self.min_resources_ = max(
280
+ self.min_resources_,
281
+ self.max_resources_ // self.factor**last_iteration,
282
+ )
283
+
284
+ # n_possible_iterations is the number of iterations that we can
285
+ # actually do starting from min_resources and without exceeding
286
+ # max_resources. Depending on max_resources and the number of
287
+ # candidates, this may be higher or smaller than
288
+ # n_required_iterations.
289
+ n_possible_iterations = 1 + floor(
290
+ log(self.max_resources_ // self.min_resources_, self.factor)
291
+ )
292
+
293
+ if self.aggressive_elimination:
294
+ n_iterations = n_required_iterations
295
+ else:
296
+ n_iterations = min(n_possible_iterations, n_required_iterations)
297
+
298
+ if self.verbose:
299
+ print(f"n_iterations: {n_iterations}")
300
+ print(f"n_required_iterations: {n_required_iterations}")
301
+ print(f"n_possible_iterations: {n_possible_iterations}")
302
+ print(f"min_resources_: {self.min_resources_}")
303
+ print(f"max_resources_: {self.max_resources_}")
304
+ print(f"aggressive_elimination: {self.aggressive_elimination}")
305
+ print(f"factor: {self.factor}")
306
+
307
+ self.n_resources_ = []
308
+ self.n_candidates_ = []
309
+
310
+ for itr in range(n_iterations):
311
+ power = itr # default
312
+ if self.aggressive_elimination:
313
+ # this will set n_resources to the initial value (i.e. the
314
+ # value of n_resources at the first iteration) for as many
315
+ # iterations as needed (while candidates are being
316
+ # eliminated), and then go on as usual.
317
+ power = max(0, itr - n_required_iterations + n_possible_iterations)
318
+
319
+ n_resources = int(self.factor**power * self.min_resources_)
320
+ # guard, probably not needed
321
+ n_resources = min(n_resources, self.max_resources_)
322
+ self.n_resources_.append(n_resources)
323
+
324
+ n_candidates = len(candidate_params)
325
+ self.n_candidates_.append(n_candidates)
326
+
327
+ if self.verbose:
328
+ print("-" * 10)
329
+ print(f"iter: {itr}")
330
+ print(f"n_candidates: {n_candidates}")
331
+ print(f"n_resources: {n_resources}")
332
+
333
+ if self.resource == "n_samples":
334
+ # subsampling will be done in cv.split()
335
+ cv = _SubsampleMetaSplitter(
336
+ base_cv=self._checked_cv_orig,
337
+ fraction=n_resources / self._n_samples_orig,
338
+ subsample_test=True,
339
+ random_state=self.random_state,
340
+ )
341
+
342
+ else:
343
+ # Need copy so that the n_resources of next iteration does
344
+ # not overwrite
345
+ candidate_params = [c.copy() for c in candidate_params]
346
+ for candidate in candidate_params:
347
+ candidate[self.resource] = n_resources
348
+ cv = self._checked_cv_orig
349
+
350
+ more_results = {
351
+ "iter": [itr] * n_candidates,
352
+ "n_resources": [n_resources] * n_candidates,
353
+ }
354
+
355
+ results = evaluate_candidates(
356
+ candidate_params, cv, more_results=more_results
357
+ )
358
+
359
+ n_candidates_to_keep = ceil(n_candidates / self.factor)
360
+ candidate_params = _top_k(results, n_candidates_to_keep, itr)
361
+
362
+ self.n_remaining_candidates_ = len(candidate_params)
363
+ self.n_required_iterations_ = n_required_iterations
364
+ self.n_possible_iterations_ = n_possible_iterations
365
+ self.n_iterations_ = n_iterations
366
+
367
+ @abstractmethod
368
+ def _generate_candidate_params(self):
369
+ pass
370
+
371
+ def _more_tags(self):
372
+ tags = deepcopy(super()._more_tags())
373
+ tags["_xfail_checks"].update(
374
+ {
375
+ "check_fit2d_1sample": (
376
+ "Fail during parameter check since min/max resources requires"
377
+ " more samples"
378
+ ),
379
+ }
380
+ )
381
+ return tags
382
+
383
+
384
+ class HalvingGridSearchCV(BaseSuccessiveHalving):
385
+ """Search over specified parameter values with successive halving.
386
+
387
+ The search strategy starts evaluating all the candidates with a small
388
+ amount of resources and iteratively selects the best candidates, using
389
+ more and more resources.
390
+
391
+ Read more in the :ref:`User guide <successive_halving_user_guide>`.
392
+
393
+ .. note::
394
+
395
+ This estimator is still **experimental** for now: the predictions
396
+ and the API might change without any deprecation cycle. To use it,
397
+ you need to explicitly import ``enable_halving_search_cv``::
398
+
399
+ >>> # explicitly require this experimental feature
400
+ >>> from sklearn.experimental import enable_halving_search_cv # noqa
401
+ >>> # now you can import normally from model_selection
402
+ >>> from sklearn.model_selection import HalvingGridSearchCV
403
+
404
+ Parameters
405
+ ----------
406
+ estimator : estimator object
407
+ This is assumed to implement the scikit-learn estimator interface.
408
+ Either estimator needs to provide a ``score`` function,
409
+ or ``scoring`` must be passed.
410
+
411
+ param_grid : dict or list of dictionaries
412
+ Dictionary with parameters names (string) as keys and lists of
413
+ parameter settings to try as values, or a list of such
414
+ dictionaries, in which case the grids spanned by each dictionary
415
+ in the list are explored. This enables searching over any sequence
416
+ of parameter settings.
417
+
418
+ factor : int or float, default=3
419
+ The 'halving' parameter, which determines the proportion of candidates
420
+ that are selected for each subsequent iteration. For example,
421
+ ``factor=3`` means that only one third of the candidates are selected.
422
+
423
+ resource : ``'n_samples'`` or str, default='n_samples'
424
+ Defines the resource that increases with each iteration. By default,
425
+ the resource is the number of samples. It can also be set to any
426
+ parameter of the base estimator that accepts positive integer
427
+ values, e.g. 'n_iterations' or 'n_estimators' for a gradient
428
+ boosting estimator. In this case ``max_resources`` cannot be 'auto'
429
+ and must be set explicitly.
430
+
431
+ max_resources : int, default='auto'
432
+ The maximum amount of resource that any candidate is allowed to use
433
+ for a given iteration. By default, this is set to ``n_samples`` when
434
+ ``resource='n_samples'`` (default), else an error is raised.
435
+
436
+ min_resources : {'exhaust', 'smallest'} or int, default='exhaust'
437
+ The minimum amount of resource that any candidate is allowed to use
438
+ for a given iteration. Equivalently, this defines the amount of
439
+ resources `r0` that are allocated for each candidate at the first
440
+ iteration.
441
+
442
+ - 'smallest' is a heuristic that sets `r0` to a small value:
443
+
444
+ - ``n_splits * 2`` when ``resource='n_samples'`` for a regression
445
+ problem
446
+ - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
447
+ classification problem
448
+ - ``1`` when ``resource != 'n_samples'``
449
+
450
+ - 'exhaust' will set `r0` such that the **last** iteration uses as
451
+ much resources as possible. Namely, the last iteration will use the
452
+ highest value smaller than ``max_resources`` that is a multiple of
453
+ both ``min_resources`` and ``factor``. In general, using 'exhaust'
454
+ leads to a more accurate estimator, but is slightly more time
455
+ consuming.
456
+
457
+ Note that the amount of resources used at each iteration is always a
458
+ multiple of ``min_resources``.
459
+
460
+ aggressive_elimination : bool, default=False
461
+ This is only relevant in cases where there isn't enough resources to
462
+ reduce the remaining candidates to at most `factor` after the last
463
+ iteration. If ``True``, then the search process will 'replay' the
464
+ first iteration for as long as needed until the number of candidates
465
+ is small enough. This is ``False`` by default, which means that the
466
+ last iteration may evaluate more than ``factor`` candidates. See
467
+ :ref:`aggressive_elimination` for more details.
468
+
469
+ cv : int, cross-validation generator or iterable, default=5
470
+ Determines the cross-validation splitting strategy.
471
+ Possible inputs for cv are:
472
+
473
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
474
+ - :term:`CV splitter`,
475
+ - An iterable yielding (train, test) splits as arrays of indices.
476
+
477
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
478
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
479
+ other cases, :class:`KFold` is used. These splitters are instantiated
480
+ with `shuffle=False` so the splits will be the same across calls.
481
+
482
+ Refer :ref:`User Guide <cross_validation>` for the various
483
+ cross-validation strategies that can be used here.
484
+
485
+ .. note::
486
+ Due to implementation details, the folds produced by `cv` must be
487
+ the same across multiple calls to `cv.split()`. For
488
+ built-in `scikit-learn` iterators, this can be achieved by
489
+ deactivating shuffling (`shuffle=False`), or by setting the
490
+ `cv`'s `random_state` parameter to an integer.
491
+
492
+ scoring : str, callable, or None, default=None
493
+ A single string (see :ref:`scoring_parameter`) or a callable
494
+ (see :ref:`scoring`) to evaluate the predictions on the test set.
495
+ If None, the estimator's score method is used.
496
+
497
+ refit : bool, default=True
498
+ If True, refit an estimator using the best found parameters on the
499
+ whole dataset.
500
+
501
+ The refitted estimator is made available at the ``best_estimator_``
502
+ attribute and permits using ``predict`` directly on this
503
+ ``HalvingGridSearchCV`` instance.
504
+
505
+ error_score : 'raise' or numeric
506
+ Value to assign to the score if an error occurs in estimator fitting.
507
+ If set to 'raise', the error is raised. If a numeric value is given,
508
+ FitFailedWarning is raised. This parameter does not affect the refit
509
+ step, which will always raise the error. Default is ``np.nan``.
510
+
511
+ return_train_score : bool, default=False
512
+ If ``False``, the ``cv_results_`` attribute will not include training
513
+ scores.
514
+ Computing training scores is used to get insights on how different
515
+ parameter settings impact the overfitting/underfitting trade-off.
516
+ However computing the scores on the training set can be computationally
517
+ expensive and is not strictly required to select the parameters that
518
+ yield the best generalization performance.
519
+
520
+ random_state : int, RandomState instance or None, default=None
521
+ Pseudo random number generator state used for subsampling the dataset
522
+ when `resources != 'n_samples'`. Ignored otherwise.
523
+ Pass an int for reproducible output across multiple function calls.
524
+ See :term:`Glossary <random_state>`.
525
+
526
+ n_jobs : int or None, default=None
527
+ Number of jobs to run in parallel.
528
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
529
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
530
+ for more details.
531
+
532
+ verbose : int
533
+ Controls the verbosity: the higher, the more messages.
534
+
535
+ Attributes
536
+ ----------
537
+ n_resources_ : list of int
538
+ The amount of resources used at each iteration.
539
+
540
+ n_candidates_ : list of int
541
+ The number of candidate parameters that were evaluated at each
542
+ iteration.
543
+
544
+ n_remaining_candidates_ : int
545
+ The number of candidate parameters that are left after the last
546
+ iteration. It corresponds to `ceil(n_candidates[-1] / factor)`
547
+
548
+ max_resources_ : int
549
+ The maximum number of resources that any candidate is allowed to use
550
+ for a given iteration. Note that since the number of resources used
551
+ at each iteration must be a multiple of ``min_resources_``, the
552
+ actual number of resources used at the last iteration may be smaller
553
+ than ``max_resources_``.
554
+
555
+ min_resources_ : int
556
+ The amount of resources that are allocated for each candidate at the
557
+ first iteration.
558
+
559
+ n_iterations_ : int
560
+ The actual number of iterations that were run. This is equal to
561
+ ``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
562
+ Else, this is equal to ``min(n_possible_iterations_,
563
+ n_required_iterations_)``.
564
+
565
+ n_possible_iterations_ : int
566
+ The number of iterations that are possible starting with
567
+ ``min_resources_`` resources and without exceeding
568
+ ``max_resources_``.
569
+
570
+ n_required_iterations_ : int
571
+ The number of iterations that are required to end up with less than
572
+ ``factor`` candidates at the last iteration, starting with
573
+ ``min_resources_`` resources. This will be smaller than
574
+ ``n_possible_iterations_`` when there isn't enough resources.
575
+
576
+ cv_results_ : dict of numpy (masked) ndarrays
577
+ A dict with keys as column headers and values as columns, that can be
578
+ imported into a pandas ``DataFrame``. It contains lots of information
579
+ for analysing the results of a search.
580
+ Please refer to the :ref:`User guide<successive_halving_cv_results>`
581
+ for details.
582
+
583
+ best_estimator_ : estimator or dict
584
+ Estimator that was chosen by the search, i.e. estimator
585
+ which gave highest score (or smallest loss if specified)
586
+ on the left out data. Not available if ``refit=False``.
587
+
588
+ best_score_ : float
589
+ Mean cross-validated score of the best_estimator.
590
+
591
+ best_params_ : dict
592
+ Parameter setting that gave the best results on the hold out data.
593
+
594
+ best_index_ : int
595
+ The index (of the ``cv_results_`` arrays) which corresponds to the best
596
+ candidate parameter setting.
597
+
598
+ The dict at ``search.cv_results_['params'][search.best_index_]`` gives
599
+ the parameter setting for the best model, that gives the highest
600
+ mean score (``search.best_score_``).
601
+
602
+ scorer_ : function or a dict
603
+ Scorer function used on the held out data to choose the best
604
+ parameters for the model.
605
+
606
+ n_splits_ : int
607
+ The number of cross-validation splits (folds/iterations).
608
+
609
+ refit_time_ : float
610
+ Seconds used for refitting the best model on the whole dataset.
611
+
612
+ This is present only if ``refit`` is not False.
613
+
614
+ multimetric_ : bool
615
+ Whether or not the scorers compute several metrics.
616
+
617
+ classes_ : ndarray of shape (n_classes,)
618
+ The classes labels. This is present only if ``refit`` is specified and
619
+ the underlying estimator is a classifier.
620
+
621
+ n_features_in_ : int
622
+ Number of features seen during :term:`fit`. Only defined if
623
+ `best_estimator_` is defined (see the documentation for the `refit`
624
+ parameter for more details) and that `best_estimator_` exposes
625
+ `n_features_in_` when fit.
626
+
627
+ .. versionadded:: 0.24
628
+
629
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
630
+ Names of features seen during :term:`fit`. Only defined if
631
+ `best_estimator_` is defined (see the documentation for the `refit`
632
+ parameter for more details) and that `best_estimator_` exposes
633
+ `feature_names_in_` when fit.
634
+
635
+ .. versionadded:: 1.0
636
+
637
+ See Also
638
+ --------
639
+ :class:`HalvingRandomSearchCV`:
640
+ Random search over a set of parameters using successive halving.
641
+
642
+ Notes
643
+ -----
644
+ The parameters selected are those that maximize the score of the held-out
645
+ data, according to the scoring parameter.
646
+
647
+ All parameter combinations scored with a NaN will share the lowest rank.
648
+
649
+ Examples
650
+ --------
651
+
652
+ >>> from sklearn.datasets import load_iris
653
+ >>> from sklearn.ensemble import RandomForestClassifier
654
+ >>> from sklearn.experimental import enable_halving_search_cv # noqa
655
+ >>> from sklearn.model_selection import HalvingGridSearchCV
656
+ ...
657
+ >>> X, y = load_iris(return_X_y=True)
658
+ >>> clf = RandomForestClassifier(random_state=0)
659
+ ...
660
+ >>> param_grid = {"max_depth": [3, None],
661
+ ... "min_samples_split": [5, 10]}
662
+ >>> search = HalvingGridSearchCV(clf, param_grid, resource='n_estimators',
663
+ ... max_resources=10,
664
+ ... random_state=0).fit(X, y)
665
+ >>> search.best_params_ # doctest: +SKIP
666
+ {'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
667
+ """
668
+
669
+ _required_parameters = ["estimator", "param_grid"]
670
+
671
+ _parameter_constraints: dict = {
672
+ **BaseSuccessiveHalving._parameter_constraints,
673
+ "param_grid": [dict, list],
674
+ }
675
+
676
+ def __init__(
677
+ self,
678
+ estimator,
679
+ param_grid,
680
+ *,
681
+ factor=3,
682
+ resource="n_samples",
683
+ max_resources="auto",
684
+ min_resources="exhaust",
685
+ aggressive_elimination=False,
686
+ cv=5,
687
+ scoring=None,
688
+ refit=True,
689
+ error_score=np.nan,
690
+ return_train_score=True,
691
+ random_state=None,
692
+ n_jobs=None,
693
+ verbose=0,
694
+ ):
695
+ super().__init__(
696
+ estimator,
697
+ scoring=scoring,
698
+ n_jobs=n_jobs,
699
+ refit=refit,
700
+ verbose=verbose,
701
+ cv=cv,
702
+ random_state=random_state,
703
+ error_score=error_score,
704
+ return_train_score=return_train_score,
705
+ max_resources=max_resources,
706
+ resource=resource,
707
+ factor=factor,
708
+ min_resources=min_resources,
709
+ aggressive_elimination=aggressive_elimination,
710
+ )
711
+ self.param_grid = param_grid
712
+
713
+ def _generate_candidate_params(self):
714
+ return ParameterGrid(self.param_grid)
715
+
716
+
717
+ class HalvingRandomSearchCV(BaseSuccessiveHalving):
718
+ """Randomized search on hyper parameters.
719
+
720
+ The search strategy starts evaluating all the candidates with a small
721
+ amount of resources and iteratively selects the best candidates, using more
722
+ and more resources.
723
+
724
+ The candidates are sampled at random from the parameter space and the
725
+ number of sampled candidates is determined by ``n_candidates``.
726
+
727
+ Read more in the :ref:`User guide<successive_halving_user_guide>`.
728
+
729
+ .. note::
730
+
731
+ This estimator is still **experimental** for now: the predictions
732
+ and the API might change without any deprecation cycle. To use it,
733
+ you need to explicitly import ``enable_halving_search_cv``::
734
+
735
+ >>> # explicitly require this experimental feature
736
+ >>> from sklearn.experimental import enable_halving_search_cv # noqa
737
+ >>> # now you can import normally from model_selection
738
+ >>> from sklearn.model_selection import HalvingRandomSearchCV
739
+
740
+ Parameters
741
+ ----------
742
+ estimator : estimator object
743
+ This is assumed to implement the scikit-learn estimator interface.
744
+ Either estimator needs to provide a ``score`` function,
745
+ or ``scoring`` must be passed.
746
+
747
+ param_distributions : dict or list of dicts
748
+ Dictionary with parameters names (`str`) as keys and distributions
749
+ or lists of parameters to try. Distributions must provide a ``rvs``
750
+ method for sampling (such as those from scipy.stats.distributions).
751
+ If a list is given, it is sampled uniformly.
752
+ If a list of dicts is given, first a dict is sampled uniformly, and
753
+ then a parameter is sampled using that dict as above.
754
+
755
+ n_candidates : "exhaust" or int, default="exhaust"
756
+ The number of candidate parameters to sample, at the first
757
+ iteration. Using 'exhaust' will sample enough candidates so that the
758
+ last iteration uses as many resources as possible, based on
759
+ `min_resources`, `max_resources` and `factor`. In this case,
760
+ `min_resources` cannot be 'exhaust'.
761
+
762
+ factor : int or float, default=3
763
+ The 'halving' parameter, which determines the proportion of candidates
764
+ that are selected for each subsequent iteration. For example,
765
+ ``factor=3`` means that only one third of the candidates are selected.
766
+
767
+ resource : ``'n_samples'`` or str, default='n_samples'
768
+ Defines the resource that increases with each iteration. By default,
769
+ the resource is the number of samples. It can also be set to any
770
+ parameter of the base estimator that accepts positive integer
771
+ values, e.g. 'n_iterations' or 'n_estimators' for a gradient
772
+ boosting estimator. In this case ``max_resources`` cannot be 'auto'
773
+ and must be set explicitly.
774
+
775
+ max_resources : int, default='auto'
776
+ The maximum number of resources that any candidate is allowed to use
777
+ for a given iteration. By default, this is set ``n_samples`` when
778
+ ``resource='n_samples'`` (default), else an error is raised.
779
+
780
+ min_resources : {'exhaust', 'smallest'} or int, default='smallest'
781
+ The minimum amount of resource that any candidate is allowed to use
782
+ for a given iteration. Equivalently, this defines the amount of
783
+ resources `r0` that are allocated for each candidate at the first
784
+ iteration.
785
+
786
+ - 'smallest' is a heuristic that sets `r0` to a small value:
787
+
788
+ - ``n_splits * 2`` when ``resource='n_samples'`` for a regression
789
+ problem
790
+ - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
791
+ classification problem
792
+ - ``1`` when ``resource != 'n_samples'``
793
+
794
+ - 'exhaust' will set `r0` such that the **last** iteration uses as
795
+ much resources as possible. Namely, the last iteration will use the
796
+ highest value smaller than ``max_resources`` that is a multiple of
797
+ both ``min_resources`` and ``factor``. In general, using 'exhaust'
798
+ leads to a more accurate estimator, but is slightly more time
799
+ consuming. 'exhaust' isn't available when `n_candidates='exhaust'`.
800
+
801
+ Note that the amount of resources used at each iteration is always a
802
+ multiple of ``min_resources``.
803
+
804
+ aggressive_elimination : bool, default=False
805
+ This is only relevant in cases where there isn't enough resources to
806
+ reduce the remaining candidates to at most `factor` after the last
807
+ iteration. If ``True``, then the search process will 'replay' the
808
+ first iteration for as long as needed until the number of candidates
809
+ is small enough. This is ``False`` by default, which means that the
810
+ last iteration may evaluate more than ``factor`` candidates. See
811
+ :ref:`aggressive_elimination` for more details.
812
+
813
+ cv : int, cross-validation generator or an iterable, default=5
814
+ Determines the cross-validation splitting strategy.
815
+ Possible inputs for cv are:
816
+
817
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
818
+ - :term:`CV splitter`,
819
+ - An iterable yielding (train, test) splits as arrays of indices.
820
+
821
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
822
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
823
+ other cases, :class:`KFold` is used. These splitters are instantiated
824
+ with `shuffle=False` so the splits will be the same across calls.
825
+
826
+ Refer :ref:`User Guide <cross_validation>` for the various
827
+ cross-validation strategies that can be used here.
828
+
829
+ .. note::
830
+ Due to implementation details, the folds produced by `cv` must be
831
+ the same across multiple calls to `cv.split()`. For
832
+ built-in `scikit-learn` iterators, this can be achieved by
833
+ deactivating shuffling (`shuffle=False`), or by setting the
834
+ `cv`'s `random_state` parameter to an integer.
835
+
836
+ scoring : str, callable, or None, default=None
837
+ A single string (see :ref:`scoring_parameter`) or a callable
838
+ (see :ref:`scoring`) to evaluate the predictions on the test set.
839
+ If None, the estimator's score method is used.
840
+
841
+ refit : bool, default=True
842
+ If True, refit an estimator using the best found parameters on the
843
+ whole dataset.
844
+
845
+ The refitted estimator is made available at the ``best_estimator_``
846
+ attribute and permits using ``predict`` directly on this
847
+ ``HalvingRandomSearchCV`` instance.
848
+
849
+ error_score : 'raise' or numeric
850
+ Value to assign to the score if an error occurs in estimator fitting.
851
+ If set to 'raise', the error is raised. If a numeric value is given,
852
+ FitFailedWarning is raised. This parameter does not affect the refit
853
+ step, which will always raise the error. Default is ``np.nan``.
854
+
855
+ return_train_score : bool, default=False
856
+ If ``False``, the ``cv_results_`` attribute will not include training
857
+ scores.
858
+ Computing training scores is used to get insights on how different
859
+ parameter settings impact the overfitting/underfitting trade-off.
860
+ However computing the scores on the training set can be computationally
861
+ expensive and is not strictly required to select the parameters that
862
+ yield the best generalization performance.
863
+
864
+ random_state : int, RandomState instance or None, default=None
865
+ Pseudo random number generator state used for subsampling the dataset
866
+ when `resources != 'n_samples'`. Also used for random uniform
867
+ sampling from lists of possible values instead of scipy.stats
868
+ distributions.
869
+ Pass an int for reproducible output across multiple function calls.
870
+ See :term:`Glossary <random_state>`.
871
+
872
+ n_jobs : int or None, default=None
873
+ Number of jobs to run in parallel.
874
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
875
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
876
+ for more details.
877
+
878
+ verbose : int
879
+ Controls the verbosity: the higher, the more messages.
880
+
881
+ Attributes
882
+ ----------
883
+ n_resources_ : list of int
884
+ The amount of resources used at each iteration.
885
+
886
+ n_candidates_ : list of int
887
+ The number of candidate parameters that were evaluated at each
888
+ iteration.
889
+
890
+ n_remaining_candidates_ : int
891
+ The number of candidate parameters that are left after the last
892
+ iteration. It corresponds to `ceil(n_candidates[-1] / factor)`
893
+
894
+ max_resources_ : int
895
+ The maximum number of resources that any candidate is allowed to use
896
+ for a given iteration. Note that since the number of resources used at
897
+ each iteration must be a multiple of ``min_resources_``, the actual
898
+ number of resources used at the last iteration may be smaller than
899
+ ``max_resources_``.
900
+
901
+ min_resources_ : int
902
+ The amount of resources that are allocated for each candidate at the
903
+ first iteration.
904
+
905
+ n_iterations_ : int
906
+ The actual number of iterations that were run. This is equal to
907
+ ``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
908
+ Else, this is equal to ``min(n_possible_iterations_,
909
+ n_required_iterations_)``.
910
+
911
+ n_possible_iterations_ : int
912
+ The number of iterations that are possible starting with
913
+ ``min_resources_`` resources and without exceeding
914
+ ``max_resources_``.
915
+
916
+ n_required_iterations_ : int
917
+ The number of iterations that are required to end up with less than
918
+ ``factor`` candidates at the last iteration, starting with
919
+ ``min_resources_`` resources. This will be smaller than
920
+ ``n_possible_iterations_`` when there isn't enough resources.
921
+
922
+ cv_results_ : dict of numpy (masked) ndarrays
923
+ A dict with keys as column headers and values as columns, that can be
924
+ imported into a pandas ``DataFrame``. It contains lots of information
925
+ for analysing the results of a search.
926
+ Please refer to the :ref:`User guide<successive_halving_cv_results>`
927
+ for details.
928
+
929
+ best_estimator_ : estimator or dict
930
+ Estimator that was chosen by the search, i.e. estimator
931
+ which gave highest score (or smallest loss if specified)
932
+ on the left out data. Not available if ``refit=False``.
933
+
934
+ best_score_ : float
935
+ Mean cross-validated score of the best_estimator.
936
+
937
+ best_params_ : dict
938
+ Parameter setting that gave the best results on the hold out data.
939
+
940
+ best_index_ : int
941
+ The index (of the ``cv_results_`` arrays) which corresponds to the best
942
+ candidate parameter setting.
943
+
944
+ The dict at ``search.cv_results_['params'][search.best_index_]`` gives
945
+ the parameter setting for the best model, that gives the highest
946
+ mean score (``search.best_score_``).
947
+
948
+ scorer_ : function or a dict
949
+ Scorer function used on the held out data to choose the best
950
+ parameters for the model.
951
+
952
+ n_splits_ : int
953
+ The number of cross-validation splits (folds/iterations).
954
+
955
+ refit_time_ : float
956
+ Seconds used for refitting the best model on the whole dataset.
957
+
958
+ This is present only if ``refit`` is not False.
959
+
960
+ multimetric_ : bool
961
+ Whether or not the scorers compute several metrics.
962
+
963
+ classes_ : ndarray of shape (n_classes,)
964
+ The classes labels. This is present only if ``refit`` is specified and
965
+ the underlying estimator is a classifier.
966
+
967
+ n_features_in_ : int
968
+ Number of features seen during :term:`fit`. Only defined if
969
+ `best_estimator_` is defined (see the documentation for the `refit`
970
+ parameter for more details) and that `best_estimator_` exposes
971
+ `n_features_in_` when fit.
972
+
973
+ .. versionadded:: 0.24
974
+
975
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
976
+ Names of features seen during :term:`fit`. Only defined if
977
+ `best_estimator_` is defined (see the documentation for the `refit`
978
+ parameter for more details) and that `best_estimator_` exposes
979
+ `feature_names_in_` when fit.
980
+
981
+ .. versionadded:: 1.0
982
+
983
+ See Also
984
+ --------
985
+ :class:`HalvingGridSearchCV`:
986
+ Search over a grid of parameters using successive halving.
987
+
988
+ Notes
989
+ -----
990
+ The parameters selected are those that maximize the score of the held-out
991
+ data, according to the scoring parameter.
992
+
993
+ All parameter combinations scored with a NaN will share the lowest rank.
994
+
995
+ Examples
996
+ --------
997
+
998
+ >>> from sklearn.datasets import load_iris
999
+ >>> from sklearn.ensemble import RandomForestClassifier
1000
+ >>> from sklearn.experimental import enable_halving_search_cv # noqa
1001
+ >>> from sklearn.model_selection import HalvingRandomSearchCV
1002
+ >>> from scipy.stats import randint
1003
+ >>> import numpy as np
1004
+ ...
1005
+ >>> X, y = load_iris(return_X_y=True)
1006
+ >>> clf = RandomForestClassifier(random_state=0)
1007
+ >>> np.random.seed(0)
1008
+ ...
1009
+ >>> param_distributions = {"max_depth": [3, None],
1010
+ ... "min_samples_split": randint(2, 11)}
1011
+ >>> search = HalvingRandomSearchCV(clf, param_distributions,
1012
+ ... resource='n_estimators',
1013
+ ... max_resources=10,
1014
+ ... random_state=0).fit(X, y)
1015
+ >>> search.best_params_ # doctest: +SKIP
1016
+ {'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
1017
+ """
1018
+
1019
+ _required_parameters = ["estimator", "param_distributions"]
1020
+
1021
+ _parameter_constraints: dict = {
1022
+ **BaseSuccessiveHalving._parameter_constraints,
1023
+ "param_distributions": [dict, list],
1024
+ "n_candidates": [
1025
+ Interval(Integral, 0, None, closed="neither"),
1026
+ StrOptions({"exhaust"}),
1027
+ ],
1028
+ }
1029
+
1030
+ def __init__(
1031
+ self,
1032
+ estimator,
1033
+ param_distributions,
1034
+ *,
1035
+ n_candidates="exhaust",
1036
+ factor=3,
1037
+ resource="n_samples",
1038
+ max_resources="auto",
1039
+ min_resources="smallest",
1040
+ aggressive_elimination=False,
1041
+ cv=5,
1042
+ scoring=None,
1043
+ refit=True,
1044
+ error_score=np.nan,
1045
+ return_train_score=True,
1046
+ random_state=None,
1047
+ n_jobs=None,
1048
+ verbose=0,
1049
+ ):
1050
+ super().__init__(
1051
+ estimator,
1052
+ scoring=scoring,
1053
+ n_jobs=n_jobs,
1054
+ refit=refit,
1055
+ verbose=verbose,
1056
+ cv=cv,
1057
+ random_state=random_state,
1058
+ error_score=error_score,
1059
+ return_train_score=return_train_score,
1060
+ max_resources=max_resources,
1061
+ resource=resource,
1062
+ factor=factor,
1063
+ min_resources=min_resources,
1064
+ aggressive_elimination=aggressive_elimination,
1065
+ )
1066
+ self.param_distributions = param_distributions
1067
+ self.n_candidates = n_candidates
1068
+
1069
+ def _generate_candidate_params(self):
1070
+ n_candidates_first_iter = self.n_candidates
1071
+ if n_candidates_first_iter == "exhaust":
1072
+ # This will generate enough candidate so that the last iteration
1073
+ # uses as much resources as possible
1074
+ n_candidates_first_iter = self.max_resources_ // self.min_resources_
1075
+ return ParameterSampler(
1076
+ self.param_distributions,
1077
+ n_candidates_first_iter,
1078
+ random_state=self.random_state,
1079
+ )
venv/lib/python3.10/site-packages/sklearn/model_selection/_split.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/sklearn/model_selection/_validation.py ADDED
@@ -0,0 +1,2360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.model_selection._validation` module includes classes and
3
+ functions to validate the model.
4
+ """
5
+
6
+ # Author: Alexandre Gramfort <[email protected]>
7
+ # Gael Varoquaux <[email protected]>
8
+ # Olivier Grisel <[email protected]>
9
+ # Raghav RV <[email protected]>
10
+ # Michal Karbownik <[email protected]>
11
+ # License: BSD 3 clause
12
+
13
+
14
+ import numbers
15
+ import time
16
+ import warnings
17
+ from collections import Counter
18
+ from contextlib import suppress
19
+ from functools import partial
20
+ from numbers import Real
21
+ from traceback import format_exc
22
+
23
+ import numpy as np
24
+ import scipy.sparse as sp
25
+ from joblib import logger
26
+
27
+ from ..base import clone, is_classifier
28
+ from ..exceptions import FitFailedWarning, UnsetMetadataPassedError
29
+ from ..metrics import check_scoring, get_scorer_names
30
+ from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer
31
+ from ..preprocessing import LabelEncoder
32
+ from ..utils import Bunch, _safe_indexing, check_random_state, indexable
33
+ from ..utils._param_validation import (
34
+ HasMethods,
35
+ Integral,
36
+ Interval,
37
+ StrOptions,
38
+ validate_params,
39
+ )
40
+ from ..utils.metadata_routing import (
41
+ MetadataRouter,
42
+ MethodMapping,
43
+ _routing_enabled,
44
+ process_routing,
45
+ )
46
+ from ..utils.metaestimators import _safe_split
47
+ from ..utils.parallel import Parallel, delayed
48
+ from ..utils.validation import _check_method_params, _num_samples
49
+ from ._split import check_cv
50
+
51
+ __all__ = [
52
+ "cross_validate",
53
+ "cross_val_score",
54
+ "cross_val_predict",
55
+ "permutation_test_score",
56
+ "learning_curve",
57
+ "validation_curve",
58
+ ]
59
+
60
+
61
+ def _check_params_groups_deprecation(fit_params, params, groups):
62
+ """A helper function to check deprecations on `groups` and `fit_params`.
63
+
64
+ To be removed when set_config(enable_metadata_routing=False) is not possible.
65
+ """
66
+ if params is not None and fit_params is not None:
67
+ raise ValueError(
68
+ "`params` and `fit_params` cannot both be provided. Pass parameters "
69
+ "via `params`. `fit_params` is deprecated and will be removed in "
70
+ "version 1.6."
71
+ )
72
+ elif fit_params is not None:
73
+ warnings.warn(
74
+ (
75
+ "`fit_params` is deprecated and will be removed in version 1.6. "
76
+ "Pass parameters via `params` instead."
77
+ ),
78
+ FutureWarning,
79
+ )
80
+ params = fit_params
81
+
82
+ params = {} if params is None else params
83
+
84
+ if groups is not None and _routing_enabled():
85
+ raise ValueError(
86
+ "`groups` can only be passed if metadata routing is not enabled via"
87
+ " `sklearn.set_config(enable_metadata_routing=True)`. When routing is"
88
+ " enabled, pass `groups` alongside other metadata via the `params` argument"
89
+ " instead."
90
+ )
91
+
92
+ return params
93
+
94
+
95
+ @validate_params(
96
+ {
97
+ "estimator": [HasMethods("fit")],
98
+ "X": ["array-like", "sparse matrix"],
99
+ "y": ["array-like", None],
100
+ "groups": ["array-like", None],
101
+ "scoring": [
102
+ StrOptions(set(get_scorer_names())),
103
+ callable,
104
+ list,
105
+ tuple,
106
+ dict,
107
+ None,
108
+ ],
109
+ "cv": ["cv_object"],
110
+ "n_jobs": [Integral, None],
111
+ "verbose": ["verbose"],
112
+ "fit_params": [dict, None],
113
+ "params": [dict, None],
114
+ "pre_dispatch": [Integral, str],
115
+ "return_train_score": ["boolean"],
116
+ "return_estimator": ["boolean"],
117
+ "return_indices": ["boolean"],
118
+ "error_score": [StrOptions({"raise"}), Real],
119
+ },
120
+ prefer_skip_nested_validation=False, # estimator is not validated yet
121
+ )
122
+ def cross_validate(
123
+ estimator,
124
+ X,
125
+ y=None,
126
+ *,
127
+ groups=None,
128
+ scoring=None,
129
+ cv=None,
130
+ n_jobs=None,
131
+ verbose=0,
132
+ fit_params=None,
133
+ params=None,
134
+ pre_dispatch="2*n_jobs",
135
+ return_train_score=False,
136
+ return_estimator=False,
137
+ return_indices=False,
138
+ error_score=np.nan,
139
+ ):
140
+ """Evaluate metric(s) by cross-validation and also record fit/score times.
141
+
142
+ Read more in the :ref:`User Guide <multimetric_cross_validation>`.
143
+
144
+ Parameters
145
+ ----------
146
+ estimator : estimator object implementing 'fit'
147
+ The object to use to fit the data.
148
+
149
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
150
+ The data to fit. Can be for example a list, or an array.
151
+
152
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
153
+ The target variable to try to predict in the case of
154
+ supervised learning.
155
+
156
+ groups : array-like of shape (n_samples,), default=None
157
+ Group labels for the samples used while splitting the dataset into
158
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
159
+ instance (e.g., :class:`GroupKFold`).
160
+
161
+ .. versionchanged:: 1.4
162
+ ``groups`` can only be passed if metadata routing is not enabled
163
+ via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
164
+ is enabled, pass ``groups`` alongside other metadata via the ``params``
165
+ argument instead. E.g.:
166
+ ``cross_validate(..., params={'groups': groups})``.
167
+
168
+ scoring : str, callable, list, tuple, or dict, default=None
169
+ Strategy to evaluate the performance of the cross-validated model on
170
+ the test set.
171
+
172
+ If `scoring` represents a single score, one can use:
173
+
174
+ - a single string (see :ref:`scoring_parameter`);
175
+ - a callable (see :ref:`scoring`) that returns a single value.
176
+
177
+ If `scoring` represents multiple scores, one can use:
178
+
179
+ - a list or tuple of unique strings;
180
+ - a callable returning a dictionary where the keys are the metric
181
+ names and the values are the metric scores;
182
+ - a dictionary with metric names as keys and callables a values.
183
+
184
+ See :ref:`multimetric_grid_search` for an example.
185
+
186
+ cv : int, cross-validation generator or an iterable, default=None
187
+ Determines the cross-validation splitting strategy.
188
+ Possible inputs for cv are:
189
+
190
+ - None, to use the default 5-fold cross validation,
191
+ - int, to specify the number of folds in a `(Stratified)KFold`,
192
+ - :term:`CV splitter`,
193
+ - An iterable yielding (train, test) splits as arrays of indices.
194
+
195
+ For int/None inputs, if the estimator is a classifier and ``y`` is
196
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
197
+ other cases, :class:`KFold` is used. These splitters are instantiated
198
+ with `shuffle=False` so the splits will be the same across calls.
199
+
200
+ Refer :ref:`User Guide <cross_validation>` for the various
201
+ cross-validation strategies that can be used here.
202
+
203
+ .. versionchanged:: 0.22
204
+ ``cv`` default value if None changed from 3-fold to 5-fold.
205
+
206
+ n_jobs : int, default=None
207
+ Number of jobs to run in parallel. Training the estimator and computing
208
+ the score are parallelized over the cross-validation splits.
209
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
210
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
211
+ for more details.
212
+
213
+ verbose : int, default=0
214
+ The verbosity level.
215
+
216
+ fit_params : dict, default=None
217
+ Parameters to pass to the fit method of the estimator.
218
+
219
+ .. deprecated:: 1.4
220
+ This parameter is deprecated and will be removed in version 1.6. Use
221
+ ``params`` instead.
222
+
223
+ params : dict, default=None
224
+ Parameters to pass to the underlying estimator's ``fit``, the scorer,
225
+ and the CV splitter.
226
+
227
+ .. versionadded:: 1.4
228
+
229
+ pre_dispatch : int or str, default='2*n_jobs'
230
+ Controls the number of jobs that get dispatched during parallel
231
+ execution. Reducing this number can be useful to avoid an
232
+ explosion of memory consumption when more jobs get dispatched
233
+ than CPUs can process. This parameter can be:
234
+
235
+ - An int, giving the exact number of total jobs that are
236
+ spawned
237
+
238
+ - A str, giving an expression as a function of n_jobs,
239
+ as in '2*n_jobs'
240
+
241
+ return_train_score : bool, default=False
242
+ Whether to include train scores.
243
+ Computing training scores is used to get insights on how different
244
+ parameter settings impact the overfitting/underfitting trade-off.
245
+ However computing the scores on the training set can be computationally
246
+ expensive and is not strictly required to select the parameters that
247
+ yield the best generalization performance.
248
+
249
+ .. versionadded:: 0.19
250
+
251
+ .. versionchanged:: 0.21
252
+ Default value was changed from ``True`` to ``False``
253
+
254
+ return_estimator : bool, default=False
255
+ Whether to return the estimators fitted on each split.
256
+
257
+ .. versionadded:: 0.20
258
+
259
+ return_indices : bool, default=False
260
+ Whether to return the train-test indices selected for each split.
261
+
262
+ .. versionadded:: 1.3
263
+
264
+ error_score : 'raise' or numeric, default=np.nan
265
+ Value to assign to the score if an error occurs in estimator fitting.
266
+ If set to 'raise', the error is raised.
267
+ If a numeric value is given, FitFailedWarning is raised.
268
+
269
+ .. versionadded:: 0.20
270
+
271
+ Returns
272
+ -------
273
+ scores : dict of float arrays of shape (n_splits,)
274
+ Array of scores of the estimator for each run of the cross validation.
275
+
276
+ A dict of arrays containing the score/time arrays for each scorer is
277
+ returned. The possible keys for this ``dict`` are:
278
+
279
+ ``test_score``
280
+ The score array for test scores on each cv split.
281
+ Suffix ``_score`` in ``test_score`` changes to a specific
282
+ metric like ``test_r2`` or ``test_auc`` if there are
283
+ multiple scoring metrics in the scoring parameter.
284
+ ``train_score``
285
+ The score array for train scores on each cv split.
286
+ Suffix ``_score`` in ``train_score`` changes to a specific
287
+ metric like ``train_r2`` or ``train_auc`` if there are
288
+ multiple scoring metrics in the scoring parameter.
289
+ This is available only if ``return_train_score`` parameter
290
+ is ``True``.
291
+ ``fit_time``
292
+ The time for fitting the estimator on the train
293
+ set for each cv split.
294
+ ``score_time``
295
+ The time for scoring the estimator on the test set for each
296
+ cv split. (Note time for scoring on the train set is not
297
+ included even if ``return_train_score`` is set to ``True``
298
+ ``estimator``
299
+ The estimator objects for each cv split.
300
+ This is available only if ``return_estimator`` parameter
301
+ is set to ``True``.
302
+ ``indices``
303
+ The train/test positional indices for each cv split. A dictionary
304
+ is returned where the keys are either `"train"` or `"test"`
305
+ and the associated values are a list of integer-dtyped NumPy
306
+ arrays with the indices. Available only if `return_indices=True`.
307
+
308
+ See Also
309
+ --------
310
+ cross_val_score : Run cross-validation for single metric evaluation.
311
+
312
+ cross_val_predict : Get predictions from each split of cross-validation for
313
+ diagnostic purposes.
314
+
315
+ sklearn.metrics.make_scorer : Make a scorer from a performance metric or
316
+ loss function.
317
+
318
+ Examples
319
+ --------
320
+ >>> from sklearn import datasets, linear_model
321
+ >>> from sklearn.model_selection import cross_validate
322
+ >>> from sklearn.metrics import make_scorer
323
+ >>> from sklearn.metrics import confusion_matrix
324
+ >>> from sklearn.svm import LinearSVC
325
+ >>> diabetes = datasets.load_diabetes()
326
+ >>> X = diabetes.data[:150]
327
+ >>> y = diabetes.target[:150]
328
+ >>> lasso = linear_model.Lasso()
329
+
330
+ Single metric evaluation using ``cross_validate``
331
+
332
+ >>> cv_results = cross_validate(lasso, X, y, cv=3)
333
+ >>> sorted(cv_results.keys())
334
+ ['fit_time', 'score_time', 'test_score']
335
+ >>> cv_results['test_score']
336
+ array([0.3315057 , 0.08022103, 0.03531816])
337
+
338
+ Multiple metric evaluation using ``cross_validate``
339
+ (please refer the ``scoring`` parameter doc for more information)
340
+
341
+ >>> scores = cross_validate(lasso, X, y, cv=3,
342
+ ... scoring=('r2', 'neg_mean_squared_error'),
343
+ ... return_train_score=True)
344
+ >>> print(scores['test_neg_mean_squared_error'])
345
+ [-3635.5... -3573.3... -6114.7...]
346
+ >>> print(scores['train_r2'])
347
+ [0.28009951 0.3908844 0.22784907]
348
+ """
349
+ params = _check_params_groups_deprecation(fit_params, params, groups)
350
+
351
+ X, y = indexable(X, y)
352
+
353
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
354
+
355
+ if callable(scoring):
356
+ scorers = scoring
357
+ elif scoring is None or isinstance(scoring, str):
358
+ scorers = check_scoring(estimator, scoring)
359
+ else:
360
+ scorers = _check_multimetric_scoring(estimator, scoring)
361
+
362
+ if _routing_enabled():
363
+ # `cross_validate` will create a `_MultiMetricScorer` if `scoring` is a
364
+ # dict at a later stage. We need the same object for the purpose of
365
+ # routing. However, creating it here and passing it around would create
366
+ # a much larger diff since the dict is used in many places.
367
+ if isinstance(scorers, dict):
368
+ _scorer = _MultimetricScorer(
369
+ scorers=scorers, raise_exc=(error_score == "raise")
370
+ )
371
+ else:
372
+ _scorer = scorers
373
+ # For estimators, a MetadataRouter is created in get_metadata_routing
374
+ # methods. For these router methods, we create the router to use
375
+ # `process_routing` on it.
376
+ router = (
377
+ MetadataRouter(owner="cross_validate")
378
+ .add(
379
+ splitter=cv,
380
+ method_mapping=MethodMapping().add(caller="fit", callee="split"),
381
+ )
382
+ .add(
383
+ estimator=estimator,
384
+ # TODO(SLEP6): also pass metadata to the predict method for
385
+ # scoring?
386
+ method_mapping=MethodMapping().add(caller="fit", callee="fit"),
387
+ )
388
+ .add(
389
+ scorer=_scorer,
390
+ method_mapping=MethodMapping().add(caller="fit", callee="score"),
391
+ )
392
+ )
393
+ try:
394
+ routed_params = process_routing(router, "fit", **params)
395
+ except UnsetMetadataPassedError as e:
396
+ # The default exception would mention `fit` since in the above
397
+ # `process_routing` code, we pass `fit` as the caller. However,
398
+ # the user is not calling `fit` directly, so we change the message
399
+ # to make it more suitable for this case.
400
+ unrequested_params = sorted(e.unrequested_params)
401
+ raise UnsetMetadataPassedError(
402
+ message=(
403
+ f"{unrequested_params} are passed to cross validation but are not"
404
+ " explicitly set as requested or not requested for cross_validate's"
405
+ f" estimator: {estimator.__class__.__name__}. Call"
406
+ " `.set_fit_request({{metadata}}=True)` on the estimator for"
407
+ f" each metadata in {unrequested_params} that you"
408
+ " want to use and `metadata=False` for not using it. See the"
409
+ " Metadata Routing User guide"
410
+ " <https://scikit-learn.org/stable/metadata_routing.html> for more"
411
+ " information."
412
+ ),
413
+ unrequested_params=e.unrequested_params,
414
+ routed_params=e.routed_params,
415
+ )
416
+ else:
417
+ routed_params = Bunch()
418
+ routed_params.splitter = Bunch(split={"groups": groups})
419
+ routed_params.estimator = Bunch(fit=params)
420
+ routed_params.scorer = Bunch(score={})
421
+
422
+ indices = cv.split(X, y, **routed_params.splitter.split)
423
+ if return_indices:
424
+ # materialize the indices since we need to store them in the returned dict
425
+ indices = list(indices)
426
+
427
+ # We clone the estimator to make sure that all the folds are
428
+ # independent, and that it is pickle-able.
429
+ parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
430
+ results = parallel(
431
+ delayed(_fit_and_score)(
432
+ clone(estimator),
433
+ X,
434
+ y,
435
+ scorer=scorers,
436
+ train=train,
437
+ test=test,
438
+ verbose=verbose,
439
+ parameters=None,
440
+ fit_params=routed_params.estimator.fit,
441
+ score_params=routed_params.scorer.score,
442
+ return_train_score=return_train_score,
443
+ return_times=True,
444
+ return_estimator=return_estimator,
445
+ error_score=error_score,
446
+ )
447
+ for train, test in indices
448
+ )
449
+
450
+ _warn_or_raise_about_fit_failures(results, error_score)
451
+
452
+ # For callable scoring, the return type is only know after calling. If the
453
+ # return type is a dictionary, the error scores can now be inserted with
454
+ # the correct key.
455
+ if callable(scoring):
456
+ _insert_error_scores(results, error_score)
457
+
458
+ results = _aggregate_score_dicts(results)
459
+
460
+ ret = {}
461
+ ret["fit_time"] = results["fit_time"]
462
+ ret["score_time"] = results["score_time"]
463
+
464
+ if return_estimator:
465
+ ret["estimator"] = results["estimator"]
466
+
467
+ if return_indices:
468
+ ret["indices"] = {}
469
+ ret["indices"]["train"], ret["indices"]["test"] = zip(*indices)
470
+
471
+ test_scores_dict = _normalize_score_results(results["test_scores"])
472
+ if return_train_score:
473
+ train_scores_dict = _normalize_score_results(results["train_scores"])
474
+
475
+ for name in test_scores_dict:
476
+ ret["test_%s" % name] = test_scores_dict[name]
477
+ if return_train_score:
478
+ key = "train_%s" % name
479
+ ret[key] = train_scores_dict[name]
480
+
481
+ return ret
482
+
483
+
484
+ def _insert_error_scores(results, error_score):
485
+ """Insert error in `results` by replacing them inplace with `error_score`.
486
+
487
+ This only applies to multimetric scores because `_fit_and_score` will
488
+ handle the single metric case.
489
+ """
490
+ successful_score = None
491
+ failed_indices = []
492
+ for i, result in enumerate(results):
493
+ if result["fit_error"] is not None:
494
+ failed_indices.append(i)
495
+ elif successful_score is None:
496
+ successful_score = result["test_scores"]
497
+
498
+ if isinstance(successful_score, dict):
499
+ formatted_error = {name: error_score for name in successful_score}
500
+ for i in failed_indices:
501
+ results[i]["test_scores"] = formatted_error.copy()
502
+ if "train_scores" in results[i]:
503
+ results[i]["train_scores"] = formatted_error.copy()
504
+
505
+
506
+ def _normalize_score_results(scores, scaler_score_key="score"):
507
+ """Creates a scoring dictionary based on the type of `scores`"""
508
+ if isinstance(scores[0], dict):
509
+ # multimetric scoring
510
+ return _aggregate_score_dicts(scores)
511
+ # scaler
512
+ return {scaler_score_key: scores}
513
+
514
+
515
+ def _warn_or_raise_about_fit_failures(results, error_score):
516
+ fit_errors = [
517
+ result["fit_error"] for result in results if result["fit_error"] is not None
518
+ ]
519
+ if fit_errors:
520
+ num_failed_fits = len(fit_errors)
521
+ num_fits = len(results)
522
+ fit_errors_counter = Counter(fit_errors)
523
+ delimiter = "-" * 80 + "\n"
524
+ fit_errors_summary = "\n".join(
525
+ f"{delimiter}{n} fits failed with the following error:\n{error}"
526
+ for error, n in fit_errors_counter.items()
527
+ )
528
+
529
+ if num_failed_fits == num_fits:
530
+ all_fits_failed_message = (
531
+ f"\nAll the {num_fits} fits failed.\n"
532
+ "It is very likely that your model is misconfigured.\n"
533
+ "You can try to debug the error by setting error_score='raise'.\n\n"
534
+ f"Below are more details about the failures:\n{fit_errors_summary}"
535
+ )
536
+ raise ValueError(all_fits_failed_message)
537
+
538
+ else:
539
+ some_fits_failed_message = (
540
+ f"\n{num_failed_fits} fits failed out of a total of {num_fits}.\n"
541
+ "The score on these train-test partitions for these parameters"
542
+ f" will be set to {error_score}.\n"
543
+ "If these failures are not expected, you can try to debug them "
544
+ "by setting error_score='raise'.\n\n"
545
+ f"Below are more details about the failures:\n{fit_errors_summary}"
546
+ )
547
+ warnings.warn(some_fits_failed_message, FitFailedWarning)
548
+
549
+
550
+ @validate_params(
551
+ {
552
+ "estimator": [HasMethods("fit")],
553
+ "X": ["array-like", "sparse matrix"],
554
+ "y": ["array-like", None],
555
+ "groups": ["array-like", None],
556
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
557
+ "cv": ["cv_object"],
558
+ "n_jobs": [Integral, None],
559
+ "verbose": ["verbose"],
560
+ "fit_params": [dict, None],
561
+ "params": [dict, None],
562
+ "pre_dispatch": [Integral, str, None],
563
+ "error_score": [StrOptions({"raise"}), Real],
564
+ },
565
+ prefer_skip_nested_validation=False, # estimator is not validated yet
566
+ )
567
+ def cross_val_score(
568
+ estimator,
569
+ X,
570
+ y=None,
571
+ *,
572
+ groups=None,
573
+ scoring=None,
574
+ cv=None,
575
+ n_jobs=None,
576
+ verbose=0,
577
+ fit_params=None,
578
+ params=None,
579
+ pre_dispatch="2*n_jobs",
580
+ error_score=np.nan,
581
+ ):
582
+ """Evaluate a score by cross-validation.
583
+
584
+ Read more in the :ref:`User Guide <cross_validation>`.
585
+
586
+ Parameters
587
+ ----------
588
+ estimator : estimator object implementing 'fit'
589
+ The object to use to fit the data.
590
+
591
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
592
+ The data to fit. Can be for example a list, or an array.
593
+
594
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
595
+ default=None
596
+ The target variable to try to predict in the case of
597
+ supervised learning.
598
+
599
+ groups : array-like of shape (n_samples,), default=None
600
+ Group labels for the samples used while splitting the dataset into
601
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
602
+ instance (e.g., :class:`GroupKFold`).
603
+
604
+ .. versionchanged:: 1.4
605
+ ``groups`` can only be passed if metadata routing is not enabled
606
+ via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
607
+ is enabled, pass ``groups`` alongside other metadata via the ``params``
608
+ argument instead. E.g.:
609
+ ``cross_val_score(..., params={'groups': groups})``.
610
+
611
+ scoring : str or callable, default=None
612
+ A str (see model evaluation documentation) or
613
+ a scorer callable object / function with signature
614
+ ``scorer(estimator, X, y)`` which should return only
615
+ a single value.
616
+
617
+ Similar to :func:`cross_validate`
618
+ but only a single metric is permitted.
619
+
620
+ If `None`, the estimator's default scorer (if available) is used.
621
+
622
+ cv : int, cross-validation generator or an iterable, default=None
623
+ Determines the cross-validation splitting strategy.
624
+ Possible inputs for cv are:
625
+
626
+ - `None`, to use the default 5-fold cross validation,
627
+ - int, to specify the number of folds in a `(Stratified)KFold`,
628
+ - :term:`CV splitter`,
629
+ - An iterable that generates (train, test) splits as arrays of indices.
630
+
631
+ For `int`/`None` inputs, if the estimator is a classifier and `y` is
632
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
633
+ other cases, :class:`KFold` is used. These splitters are instantiated
634
+ with `shuffle=False` so the splits will be the same across calls.
635
+
636
+ Refer :ref:`User Guide <cross_validation>` for the various
637
+ cross-validation strategies that can be used here.
638
+
639
+ .. versionchanged:: 0.22
640
+ `cv` default value if `None` changed from 3-fold to 5-fold.
641
+
642
+ n_jobs : int, default=None
643
+ Number of jobs to run in parallel. Training the estimator and computing
644
+ the score are parallelized over the cross-validation splits.
645
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
646
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
647
+ for more details.
648
+
649
+ verbose : int, default=0
650
+ The verbosity level.
651
+
652
+ fit_params : dict, default=None
653
+ Parameters to pass to the fit method of the estimator.
654
+
655
+ .. deprecated:: 1.4
656
+ This parameter is deprecated and will be removed in version 1.6. Use
657
+ ``params`` instead.
658
+
659
+ params : dict, default=None
660
+ Parameters to pass to the underlying estimator's ``fit``, the scorer,
661
+ and the CV splitter.
662
+
663
+ .. versionadded:: 1.4
664
+
665
+ pre_dispatch : int or str, default='2*n_jobs'
666
+ Controls the number of jobs that get dispatched during parallel
667
+ execution. Reducing this number can be useful to avoid an
668
+ explosion of memory consumption when more jobs get dispatched
669
+ than CPUs can process. This parameter can be:
670
+
671
+ - ``None``, in which case all the jobs are immediately
672
+ created and spawned. Use this for lightweight and
673
+ fast-running jobs, to avoid delays due to on-demand
674
+ spawning of the jobs
675
+
676
+ - An int, giving the exact number of total jobs that are
677
+ spawned
678
+
679
+ - A str, giving an expression as a function of n_jobs,
680
+ as in '2*n_jobs'
681
+
682
+ error_score : 'raise' or numeric, default=np.nan
683
+ Value to assign to the score if an error occurs in estimator fitting.
684
+ If set to 'raise', the error is raised.
685
+ If a numeric value is given, FitFailedWarning is raised.
686
+
687
+ .. versionadded:: 0.20
688
+
689
+ Returns
690
+ -------
691
+ scores : ndarray of float of shape=(len(list(cv)),)
692
+ Array of scores of the estimator for each run of the cross validation.
693
+
694
+ See Also
695
+ --------
696
+ cross_validate : To run cross-validation on multiple metrics and also to
697
+ return train scores, fit times and score times.
698
+
699
+ cross_val_predict : Get predictions from each split of cross-validation for
700
+ diagnostic purposes.
701
+
702
+ sklearn.metrics.make_scorer : Make a scorer from a performance metric or
703
+ loss function.
704
+
705
+ Examples
706
+ --------
707
+ >>> from sklearn import datasets, linear_model
708
+ >>> from sklearn.model_selection import cross_val_score
709
+ >>> diabetes = datasets.load_diabetes()
710
+ >>> X = diabetes.data[:150]
711
+ >>> y = diabetes.target[:150]
712
+ >>> lasso = linear_model.Lasso()
713
+ >>> print(cross_val_score(lasso, X, y, cv=3))
714
+ [0.3315057 0.08022103 0.03531816]
715
+ """
716
+ # To ensure multimetric format is not supported
717
+ scorer = check_scoring(estimator, scoring=scoring)
718
+
719
+ cv_results = cross_validate(
720
+ estimator=estimator,
721
+ X=X,
722
+ y=y,
723
+ groups=groups,
724
+ scoring={"score": scorer},
725
+ cv=cv,
726
+ n_jobs=n_jobs,
727
+ verbose=verbose,
728
+ fit_params=fit_params,
729
+ params=params,
730
+ pre_dispatch=pre_dispatch,
731
+ error_score=error_score,
732
+ )
733
+ return cv_results["test_score"]
734
+
735
+
736
+ def _fit_and_score(
737
+ estimator,
738
+ X,
739
+ y,
740
+ *,
741
+ scorer,
742
+ train,
743
+ test,
744
+ verbose,
745
+ parameters,
746
+ fit_params,
747
+ score_params,
748
+ return_train_score=False,
749
+ return_parameters=False,
750
+ return_n_test_samples=False,
751
+ return_times=False,
752
+ return_estimator=False,
753
+ split_progress=None,
754
+ candidate_progress=None,
755
+ error_score=np.nan,
756
+ ):
757
+ """Fit estimator and compute scores for a given dataset split.
758
+
759
+ Parameters
760
+ ----------
761
+ estimator : estimator object implementing 'fit'
762
+ The object to use to fit the data.
763
+
764
+ X : array-like of shape (n_samples, n_features)
765
+ The data to fit.
766
+
767
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
768
+ The target variable to try to predict in the case of
769
+ supervised learning.
770
+
771
+ scorer : A single callable or dict mapping scorer name to the callable
772
+ If it is a single callable, the return value for ``train_scores`` and
773
+ ``test_scores`` is a single float.
774
+
775
+ For a dict, it should be one mapping the scorer name to the scorer
776
+ callable object / function.
777
+
778
+ The callable object / fn should have signature
779
+ ``scorer(estimator, X, y)``.
780
+
781
+ train : array-like of shape (n_train_samples,)
782
+ Indices of training samples.
783
+
784
+ test : array-like of shape (n_test_samples,)
785
+ Indices of test samples.
786
+
787
+ verbose : int
788
+ The verbosity level.
789
+
790
+ error_score : 'raise' or numeric, default=np.nan
791
+ Value to assign to the score if an error occurs in estimator fitting.
792
+ If set to 'raise', the error is raised.
793
+ If a numeric value is given, FitFailedWarning is raised.
794
+
795
+ parameters : dict or None
796
+ Parameters to be set on the estimator.
797
+
798
+ fit_params : dict or None
799
+ Parameters that will be passed to ``estimator.fit``.
800
+
801
+ score_params : dict or None
802
+ Parameters that will be passed to the scorer.
803
+
804
+ return_train_score : bool, default=False
805
+ Compute and return score on training set.
806
+
807
+ return_parameters : bool, default=False
808
+ Return parameters that has been used for the estimator.
809
+
810
+ split_progress : {list, tuple} of int, default=None
811
+ A list or tuple of format (<current_split_id>, <total_num_of_splits>).
812
+
813
+ candidate_progress : {list, tuple} of int, default=None
814
+ A list or tuple of format
815
+ (<current_candidate_id>, <total_number_of_candidates>).
816
+
817
+ return_n_test_samples : bool, default=False
818
+ Whether to return the ``n_test_samples``.
819
+
820
+ return_times : bool, default=False
821
+ Whether to return the fit/score times.
822
+
823
+ return_estimator : bool, default=False
824
+ Whether to return the fitted estimator.
825
+
826
+ Returns
827
+ -------
828
+ result : dict with the following attributes
829
+ train_scores : dict of scorer name -> float
830
+ Score on training set (for all the scorers),
831
+ returned only if `return_train_score` is `True`.
832
+ test_scores : dict of scorer name -> float
833
+ Score on testing set (for all the scorers).
834
+ n_test_samples : int
835
+ Number of test samples.
836
+ fit_time : float
837
+ Time spent for fitting in seconds.
838
+ score_time : float
839
+ Time spent for scoring in seconds.
840
+ parameters : dict or None
841
+ The parameters that have been evaluated.
842
+ estimator : estimator object
843
+ The fitted estimator.
844
+ fit_error : str or None
845
+ Traceback str if the fit failed, None if the fit succeeded.
846
+ """
847
+ if not isinstance(error_score, numbers.Number) and error_score != "raise":
848
+ raise ValueError(
849
+ "error_score must be the string 'raise' or a numeric value. "
850
+ "(Hint: if using 'raise', please make sure that it has been "
851
+ "spelled correctly.)"
852
+ )
853
+
854
+ progress_msg = ""
855
+ if verbose > 2:
856
+ if split_progress is not None:
857
+ progress_msg = f" {split_progress[0]+1}/{split_progress[1]}"
858
+ if candidate_progress and verbose > 9:
859
+ progress_msg += f"; {candidate_progress[0]+1}/{candidate_progress[1]}"
860
+
861
+ if verbose > 1:
862
+ if parameters is None:
863
+ params_msg = ""
864
+ else:
865
+ sorted_keys = sorted(parameters) # Ensure deterministic o/p
866
+ params_msg = ", ".join(f"{k}={parameters[k]}" for k in sorted_keys)
867
+ if verbose > 9:
868
+ start_msg = f"[CV{progress_msg}] START {params_msg}"
869
+ print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
870
+
871
+ # Adjust length of sample weights
872
+ fit_params = fit_params if fit_params is not None else {}
873
+ fit_params = _check_method_params(X, params=fit_params, indices=train)
874
+ score_params = score_params if score_params is not None else {}
875
+ score_params_train = _check_method_params(X, params=score_params, indices=train)
876
+ score_params_test = _check_method_params(X, params=score_params, indices=test)
877
+
878
+ if parameters is not None:
879
+ # here we clone the parameters, since sometimes the parameters
880
+ # themselves might be estimators, e.g. when we search over different
881
+ # estimators in a pipeline.
882
+ # ref: https://github.com/scikit-learn/scikit-learn/pull/26786
883
+ estimator = estimator.set_params(**clone(parameters, safe=False))
884
+
885
+ start_time = time.time()
886
+
887
+ X_train, y_train = _safe_split(estimator, X, y, train)
888
+ X_test, y_test = _safe_split(estimator, X, y, test, train)
889
+
890
+ result = {}
891
+ try:
892
+ if y_train is None:
893
+ estimator.fit(X_train, **fit_params)
894
+ else:
895
+ estimator.fit(X_train, y_train, **fit_params)
896
+
897
+ except Exception:
898
+ # Note fit time as time until error
899
+ fit_time = time.time() - start_time
900
+ score_time = 0.0
901
+ if error_score == "raise":
902
+ raise
903
+ elif isinstance(error_score, numbers.Number):
904
+ if isinstance(scorer, dict):
905
+ test_scores = {name: error_score for name in scorer}
906
+ if return_train_score:
907
+ train_scores = test_scores.copy()
908
+ else:
909
+ test_scores = error_score
910
+ if return_train_score:
911
+ train_scores = error_score
912
+ result["fit_error"] = format_exc()
913
+ else:
914
+ result["fit_error"] = None
915
+
916
+ fit_time = time.time() - start_time
917
+ test_scores = _score(
918
+ estimator, X_test, y_test, scorer, score_params_test, error_score
919
+ )
920
+ score_time = time.time() - start_time - fit_time
921
+ if return_train_score:
922
+ train_scores = _score(
923
+ estimator, X_train, y_train, scorer, score_params_train, error_score
924
+ )
925
+
926
+ if verbose > 1:
927
+ total_time = score_time + fit_time
928
+ end_msg = f"[CV{progress_msg}] END "
929
+ result_msg = params_msg + (";" if params_msg else "")
930
+ if verbose > 2:
931
+ if isinstance(test_scores, dict):
932
+ for scorer_name in sorted(test_scores):
933
+ result_msg += f" {scorer_name}: ("
934
+ if return_train_score:
935
+ scorer_scores = train_scores[scorer_name]
936
+ result_msg += f"train={scorer_scores:.3f}, "
937
+ result_msg += f"test={test_scores[scorer_name]:.3f})"
938
+ else:
939
+ result_msg += ", score="
940
+ if return_train_score:
941
+ result_msg += f"(train={train_scores:.3f}, test={test_scores:.3f})"
942
+ else:
943
+ result_msg += f"{test_scores:.3f}"
944
+ result_msg += f" total time={logger.short_format_time(total_time)}"
945
+
946
+ # Right align the result_msg
947
+ end_msg += "." * (80 - len(end_msg) - len(result_msg))
948
+ end_msg += result_msg
949
+ print(end_msg)
950
+
951
+ result["test_scores"] = test_scores
952
+ if return_train_score:
953
+ result["train_scores"] = train_scores
954
+ if return_n_test_samples:
955
+ result["n_test_samples"] = _num_samples(X_test)
956
+ if return_times:
957
+ result["fit_time"] = fit_time
958
+ result["score_time"] = score_time
959
+ if return_parameters:
960
+ result["parameters"] = parameters
961
+ if return_estimator:
962
+ result["estimator"] = estimator
963
+ return result
964
+
965
+
966
+ def _score(estimator, X_test, y_test, scorer, score_params, error_score="raise"):
967
+ """Compute the score(s) of an estimator on a given test set.
968
+
969
+ Will return a dict of floats if `scorer` is a dict, otherwise a single
970
+ float is returned.
971
+ """
972
+ if isinstance(scorer, dict):
973
+ # will cache method calls if needed. scorer() returns a dict
974
+ scorer = _MultimetricScorer(scorers=scorer, raise_exc=(error_score == "raise"))
975
+
976
+ score_params = {} if score_params is None else score_params
977
+
978
+ try:
979
+ if y_test is None:
980
+ scores = scorer(estimator, X_test, **score_params)
981
+ else:
982
+ scores = scorer(estimator, X_test, y_test, **score_params)
983
+ except Exception:
984
+ if isinstance(scorer, _MultimetricScorer):
985
+ # If `_MultimetricScorer` raises exception, the `error_score`
986
+ # parameter is equal to "raise".
987
+ raise
988
+ else:
989
+ if error_score == "raise":
990
+ raise
991
+ else:
992
+ scores = error_score
993
+ warnings.warn(
994
+ (
995
+ "Scoring failed. The score on this train-test partition for "
996
+ f"these parameters will be set to {error_score}. Details: \n"
997
+ f"{format_exc()}"
998
+ ),
999
+ UserWarning,
1000
+ )
1001
+
1002
+ # Check non-raised error messages in `_MultimetricScorer`
1003
+ if isinstance(scorer, _MultimetricScorer):
1004
+ exception_messages = [
1005
+ (name, str_e) for name, str_e in scores.items() if isinstance(str_e, str)
1006
+ ]
1007
+ if exception_messages:
1008
+ # error_score != "raise"
1009
+ for name, str_e in exception_messages:
1010
+ scores[name] = error_score
1011
+ warnings.warn(
1012
+ (
1013
+ "Scoring failed. The score on this train-test partition for "
1014
+ f"these parameters will be set to {error_score}. Details: \n"
1015
+ f"{str_e}"
1016
+ ),
1017
+ UserWarning,
1018
+ )
1019
+
1020
+ error_msg = "scoring must return a number, got %s (%s) instead. (scorer=%s)"
1021
+ if isinstance(scores, dict):
1022
+ for name, score in scores.items():
1023
+ if hasattr(score, "item"):
1024
+ with suppress(ValueError):
1025
+ # e.g. unwrap memmapped scalars
1026
+ score = score.item()
1027
+ if not isinstance(score, numbers.Number):
1028
+ raise ValueError(error_msg % (score, type(score), name))
1029
+ scores[name] = score
1030
+ else: # scalar
1031
+ if hasattr(scores, "item"):
1032
+ with suppress(ValueError):
1033
+ # e.g. unwrap memmapped scalars
1034
+ scores = scores.item()
1035
+ if not isinstance(scores, numbers.Number):
1036
+ raise ValueError(error_msg % (scores, type(scores), scorer))
1037
+ return scores
1038
+
1039
+
1040
+ @validate_params(
1041
+ {
1042
+ "estimator": [HasMethods(["fit", "predict"])],
1043
+ "X": ["array-like", "sparse matrix"],
1044
+ "y": ["array-like", None],
1045
+ "groups": ["array-like", None],
1046
+ "cv": ["cv_object"],
1047
+ "n_jobs": [Integral, None],
1048
+ "verbose": ["verbose"],
1049
+ "fit_params": [dict, None],
1050
+ "params": [dict, None],
1051
+ "pre_dispatch": [Integral, str, None],
1052
+ "method": [
1053
+ StrOptions(
1054
+ {
1055
+ "predict",
1056
+ "predict_proba",
1057
+ "predict_log_proba",
1058
+ "decision_function",
1059
+ }
1060
+ )
1061
+ ],
1062
+ },
1063
+ prefer_skip_nested_validation=False, # estimator is not validated yet
1064
+ )
1065
+ def cross_val_predict(
1066
+ estimator,
1067
+ X,
1068
+ y=None,
1069
+ *,
1070
+ groups=None,
1071
+ cv=None,
1072
+ n_jobs=None,
1073
+ verbose=0,
1074
+ fit_params=None,
1075
+ params=None,
1076
+ pre_dispatch="2*n_jobs",
1077
+ method="predict",
1078
+ ):
1079
+ """Generate cross-validated estimates for each input data point.
1080
+
1081
+ The data is split according to the cv parameter. Each sample belongs
1082
+ to exactly one test set, and its prediction is computed with an
1083
+ estimator fitted on the corresponding training set.
1084
+
1085
+ Passing these predictions into an evaluation metric may not be a valid
1086
+ way to measure generalization performance. Results can differ from
1087
+ :func:`cross_validate` and :func:`cross_val_score` unless all tests sets
1088
+ have equal size and the metric decomposes over samples.
1089
+
1090
+ Read more in the :ref:`User Guide <cross_validation>`.
1091
+
1092
+ Parameters
1093
+ ----------
1094
+ estimator : estimator
1095
+ The estimator instance to use to fit the data. It must implement a `fit`
1096
+ method and the method given by the `method` parameter.
1097
+
1098
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1099
+ The data to fit. Can be, for example a list, or an array at least 2d.
1100
+
1101
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
1102
+ default=None
1103
+ The target variable to try to predict in the case of
1104
+ supervised learning.
1105
+
1106
+ groups : array-like of shape (n_samples,), default=None
1107
+ Group labels for the samples used while splitting the dataset into
1108
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
1109
+ instance (e.g., :class:`GroupKFold`).
1110
+
1111
+ .. versionchanged:: 1.4
1112
+ ``groups`` can only be passed if metadata routing is not enabled
1113
+ via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
1114
+ is enabled, pass ``groups`` alongside other metadata via the ``params``
1115
+ argument instead. E.g.:
1116
+ ``cross_val_predict(..., params={'groups': groups})``.
1117
+
1118
+ cv : int, cross-validation generator or an iterable, default=None
1119
+ Determines the cross-validation splitting strategy.
1120
+ Possible inputs for cv are:
1121
+
1122
+ - None, to use the default 5-fold cross validation,
1123
+ - int, to specify the number of folds in a `(Stratified)KFold`,
1124
+ - :term:`CV splitter`,
1125
+ - An iterable that generates (train, test) splits as arrays of indices.
1126
+
1127
+ For int/None inputs, if the estimator is a classifier and ``y`` is
1128
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1129
+ other cases, :class:`KFold` is used. These splitters are instantiated
1130
+ with `shuffle=False` so the splits will be the same across calls.
1131
+
1132
+ Refer :ref:`User Guide <cross_validation>` for the various
1133
+ cross-validation strategies that can be used here.
1134
+
1135
+ .. versionchanged:: 0.22
1136
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1137
+
1138
+ n_jobs : int, default=None
1139
+ Number of jobs to run in parallel. Training the estimator and
1140
+ predicting are parallelized over the cross-validation splits.
1141
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1142
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1143
+ for more details.
1144
+
1145
+ verbose : int, default=0
1146
+ The verbosity level.
1147
+
1148
+ fit_params : dict, default=None
1149
+ Parameters to pass to the fit method of the estimator.
1150
+
1151
+ .. deprecated:: 1.4
1152
+ This parameter is deprecated and will be removed in version 1.6. Use
1153
+ ``params`` instead.
1154
+
1155
+ params : dict, default=None
1156
+ Parameters to pass to the underlying estimator's ``fit`` and the CV
1157
+ splitter.
1158
+
1159
+ .. versionadded:: 1.4
1160
+
1161
+ pre_dispatch : int or str, default='2*n_jobs'
1162
+ Controls the number of jobs that get dispatched during parallel
1163
+ execution. Reducing this number can be useful to avoid an
1164
+ explosion of memory consumption when more jobs get dispatched
1165
+ than CPUs can process. This parameter can be:
1166
+
1167
+ - None, in which case all the jobs are immediately
1168
+ created and spawned. Use this for lightweight and
1169
+ fast-running jobs, to avoid delays due to on-demand
1170
+ spawning of the jobs
1171
+
1172
+ - An int, giving the exact number of total jobs that are
1173
+ spawned
1174
+
1175
+ - A str, giving an expression as a function of n_jobs,
1176
+ as in '2*n_jobs'
1177
+
1178
+ method : {'predict', 'predict_proba', 'predict_log_proba', \
1179
+ 'decision_function'}, default='predict'
1180
+ The method to be invoked by `estimator`.
1181
+
1182
+ Returns
1183
+ -------
1184
+ predictions : ndarray
1185
+ This is the result of calling `method`. Shape:
1186
+
1187
+ - When `method` is 'predict' and in special case where `method` is
1188
+ 'decision_function' and the target is binary: (n_samples,)
1189
+ - When `method` is one of {'predict_proba', 'predict_log_proba',
1190
+ 'decision_function'} (unless special case above):
1191
+ (n_samples, n_classes)
1192
+ - If `estimator` is :term:`multioutput`, an extra dimension
1193
+ 'n_outputs' is added to the end of each shape above.
1194
+
1195
+ See Also
1196
+ --------
1197
+ cross_val_score : Calculate score for each CV split.
1198
+ cross_validate : Calculate one or more scores and timings for each CV
1199
+ split.
1200
+
1201
+ Notes
1202
+ -----
1203
+ In the case that one or more classes are absent in a training portion, a
1204
+ default score needs to be assigned to all instances for that class if
1205
+ ``method`` produces columns per class, as in {'decision_function',
1206
+ 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is
1207
+ 0. In order to ensure finite output, we approximate negative infinity by
1208
+ the minimum finite float value for the dtype in other cases.
1209
+
1210
+ Examples
1211
+ --------
1212
+ >>> from sklearn import datasets, linear_model
1213
+ >>> from sklearn.model_selection import cross_val_predict
1214
+ >>> diabetes = datasets.load_diabetes()
1215
+ >>> X = diabetes.data[:150]
1216
+ >>> y = diabetes.target[:150]
1217
+ >>> lasso = linear_model.Lasso()
1218
+ >>> y_pred = cross_val_predict(lasso, X, y, cv=3)
1219
+ """
1220
+ params = _check_params_groups_deprecation(fit_params, params, groups)
1221
+ X, y = indexable(X, y)
1222
+
1223
+ if _routing_enabled():
1224
+ # For estimators, a MetadataRouter is created in get_metadata_routing
1225
+ # methods. For these router methods, we create the router to use
1226
+ # `process_routing` on it.
1227
+ router = (
1228
+ MetadataRouter(owner="cross_validate")
1229
+ .add(
1230
+ splitter=cv,
1231
+ method_mapping=MethodMapping().add(caller="fit", callee="split"),
1232
+ )
1233
+ .add(
1234
+ estimator=estimator,
1235
+ # TODO(SLEP6): also pass metadata for the predict method.
1236
+ method_mapping=MethodMapping().add(caller="fit", callee="fit"),
1237
+ )
1238
+ )
1239
+ try:
1240
+ routed_params = process_routing(router, "fit", **params)
1241
+ except UnsetMetadataPassedError as e:
1242
+ # The default exception would mention `fit` since in the above
1243
+ # `process_routing` code, we pass `fit` as the caller. However,
1244
+ # the user is not calling `fit` directly, so we change the message
1245
+ # to make it more suitable for this case.
1246
+ unrequested_params = sorted(e.unrequested_params)
1247
+ raise UnsetMetadataPassedError(
1248
+ message=(
1249
+ f"{unrequested_params} are passed to `cross_val_predict` but are"
1250
+ " not explicitly set as requested or not requested for"
1251
+ f" cross_validate's estimator: {estimator.__class__.__name__} Call"
1252
+ " `.set_fit_request({{metadata}}=True)` on the estimator for"
1253
+ f" each metadata in {unrequested_params} that you want to use and"
1254
+ " `metadata=False` for not using it. See the Metadata Routing User"
1255
+ " guide <https://scikit-learn.org/stable/metadata_routing.html>"
1256
+ " for more information."
1257
+ ),
1258
+ unrequested_params=e.unrequested_params,
1259
+ routed_params=e.routed_params,
1260
+ )
1261
+ else:
1262
+ routed_params = Bunch()
1263
+ routed_params.splitter = Bunch(split={"groups": groups})
1264
+ routed_params.estimator = Bunch(fit=params)
1265
+
1266
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
1267
+ splits = list(cv.split(X, y, **routed_params.splitter.split))
1268
+
1269
+ test_indices = np.concatenate([test for _, test in splits])
1270
+ if not _check_is_permutation(test_indices, _num_samples(X)):
1271
+ raise ValueError("cross_val_predict only works for partitions")
1272
+
1273
+ # If classification methods produce multiple columns of output,
1274
+ # we need to manually encode classes to ensure consistent column ordering.
1275
+ encode = (
1276
+ method in ["decision_function", "predict_proba", "predict_log_proba"]
1277
+ and y is not None
1278
+ )
1279
+ if encode:
1280
+ y = np.asarray(y)
1281
+ if y.ndim == 1:
1282
+ le = LabelEncoder()
1283
+ y = le.fit_transform(y)
1284
+ elif y.ndim == 2:
1285
+ y_enc = np.zeros_like(y, dtype=int)
1286
+ for i_label in range(y.shape[1]):
1287
+ y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label])
1288
+ y = y_enc
1289
+
1290
+ # We clone the estimator to make sure that all the folds are
1291
+ # independent, and that it is pickle-able.
1292
+ parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
1293
+ predictions = parallel(
1294
+ delayed(_fit_and_predict)(
1295
+ clone(estimator),
1296
+ X,
1297
+ y,
1298
+ train,
1299
+ test,
1300
+ routed_params.estimator.fit,
1301
+ method,
1302
+ )
1303
+ for train, test in splits
1304
+ )
1305
+
1306
+ inv_test_indices = np.empty(len(test_indices), dtype=int)
1307
+ inv_test_indices[test_indices] = np.arange(len(test_indices))
1308
+
1309
+ if sp.issparse(predictions[0]):
1310
+ predictions = sp.vstack(predictions, format=predictions[0].format)
1311
+ elif encode and isinstance(predictions[0], list):
1312
+ # `predictions` is a list of method outputs from each fold.
1313
+ # If each of those is also a list, then treat this as a
1314
+ # multioutput-multiclass task. We need to separately concatenate
1315
+ # the method outputs for each label into an `n_labels` long list.
1316
+ n_labels = y.shape[1]
1317
+ concat_pred = []
1318
+ for i_label in range(n_labels):
1319
+ label_preds = np.concatenate([p[i_label] for p in predictions])
1320
+ concat_pred.append(label_preds)
1321
+ predictions = concat_pred
1322
+ else:
1323
+ predictions = np.concatenate(predictions)
1324
+
1325
+ if isinstance(predictions, list):
1326
+ return [p[inv_test_indices] for p in predictions]
1327
+ else:
1328
+ return predictions[inv_test_indices]
1329
+
1330
+
1331
+ def _fit_and_predict(estimator, X, y, train, test, fit_params, method):
1332
+ """Fit estimator and predict values for a given dataset split.
1333
+
1334
+ Read more in the :ref:`User Guide <cross_validation>`.
1335
+
1336
+ Parameters
1337
+ ----------
1338
+ estimator : estimator object implementing 'fit' and 'predict'
1339
+ The object to use to fit the data.
1340
+
1341
+ X : array-like of shape (n_samples, n_features)
1342
+ The data to fit.
1343
+
1344
+ .. versionchanged:: 0.20
1345
+ X is only required to be an object with finite length or shape now
1346
+
1347
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
1348
+ The target variable to try to predict in the case of
1349
+ supervised learning.
1350
+
1351
+ train : array-like of shape (n_train_samples,)
1352
+ Indices of training samples.
1353
+
1354
+ test : array-like of shape (n_test_samples,)
1355
+ Indices of test samples.
1356
+
1357
+ fit_params : dict or None
1358
+ Parameters that will be passed to ``estimator.fit``.
1359
+
1360
+ method : str
1361
+ Invokes the passed method name of the passed estimator.
1362
+
1363
+ Returns
1364
+ -------
1365
+ predictions : sequence
1366
+ Result of calling 'estimator.method'
1367
+ """
1368
+ # Adjust length of sample weights
1369
+ fit_params = fit_params if fit_params is not None else {}
1370
+ fit_params = _check_method_params(X, params=fit_params, indices=train)
1371
+
1372
+ X_train, y_train = _safe_split(estimator, X, y, train)
1373
+ X_test, _ = _safe_split(estimator, X, y, test, train)
1374
+
1375
+ if y_train is None:
1376
+ estimator.fit(X_train, **fit_params)
1377
+ else:
1378
+ estimator.fit(X_train, y_train, **fit_params)
1379
+ func = getattr(estimator, method)
1380
+ predictions = func(X_test)
1381
+
1382
+ encode = (
1383
+ method in ["decision_function", "predict_proba", "predict_log_proba"]
1384
+ and y is not None
1385
+ )
1386
+
1387
+ if encode:
1388
+ if isinstance(predictions, list):
1389
+ predictions = [
1390
+ _enforce_prediction_order(
1391
+ estimator.classes_[i_label],
1392
+ predictions[i_label],
1393
+ n_classes=len(set(y[:, i_label])),
1394
+ method=method,
1395
+ )
1396
+ for i_label in range(len(predictions))
1397
+ ]
1398
+ else:
1399
+ # A 2D y array should be a binary label indicator matrix
1400
+ n_classes = len(set(y)) if y.ndim == 1 else y.shape[1]
1401
+ predictions = _enforce_prediction_order(
1402
+ estimator.classes_, predictions, n_classes, method
1403
+ )
1404
+ return predictions
1405
+
1406
+
1407
+ def _enforce_prediction_order(classes, predictions, n_classes, method):
1408
+ """Ensure that prediction arrays have correct column order
1409
+
1410
+ When doing cross-validation, if one or more classes are
1411
+ not present in the subset of data used for training,
1412
+ then the output prediction array might not have the same
1413
+ columns as other folds. Use the list of class names
1414
+ (assumed to be ints) to enforce the correct column order.
1415
+
1416
+ Note that `classes` is the list of classes in this fold
1417
+ (a subset of the classes in the full training set)
1418
+ and `n_classes` is the number of classes in the full training set.
1419
+ """
1420
+ if n_classes != len(classes):
1421
+ recommendation = (
1422
+ "To fix this, use a cross-validation "
1423
+ "technique resulting in properly "
1424
+ "stratified folds"
1425
+ )
1426
+ warnings.warn(
1427
+ "Number of classes in training fold ({}) does "
1428
+ "not match total number of classes ({}). "
1429
+ "Results may not be appropriate for your use case. "
1430
+ "{}".format(len(classes), n_classes, recommendation),
1431
+ RuntimeWarning,
1432
+ )
1433
+ if method == "decision_function":
1434
+ if predictions.ndim == 2 and predictions.shape[1] != len(classes):
1435
+ # This handles the case when the shape of predictions
1436
+ # does not match the number of classes used to train
1437
+ # it with. This case is found when sklearn.svm.SVC is
1438
+ # set to `decision_function_shape='ovo'`.
1439
+ raise ValueError(
1440
+ "Output shape {} of {} does not match "
1441
+ "number of classes ({}) in fold. "
1442
+ "Irregular decision_function outputs "
1443
+ "are not currently supported by "
1444
+ "cross_val_predict".format(predictions.shape, method, len(classes))
1445
+ )
1446
+ if len(classes) <= 2:
1447
+ # In this special case, `predictions` contains a 1D array.
1448
+ raise ValueError(
1449
+ "Only {} class/es in training fold, but {} "
1450
+ "in overall dataset. This "
1451
+ "is not supported for decision_function "
1452
+ "with imbalanced folds. {}".format(
1453
+ len(classes), n_classes, recommendation
1454
+ )
1455
+ )
1456
+
1457
+ float_min = np.finfo(predictions.dtype).min
1458
+ default_values = {
1459
+ "decision_function": float_min,
1460
+ "predict_log_proba": float_min,
1461
+ "predict_proba": 0,
1462
+ }
1463
+ predictions_for_all_classes = np.full(
1464
+ (_num_samples(predictions), n_classes),
1465
+ default_values[method],
1466
+ dtype=predictions.dtype,
1467
+ )
1468
+ predictions_for_all_classes[:, classes] = predictions
1469
+ predictions = predictions_for_all_classes
1470
+ return predictions
1471
+
1472
+
1473
+ def _check_is_permutation(indices, n_samples):
1474
+ """Check whether indices is a reordering of the array np.arange(n_samples)
1475
+
1476
+ Parameters
1477
+ ----------
1478
+ indices : ndarray
1479
+ int array to test
1480
+ n_samples : int
1481
+ number of expected elements
1482
+
1483
+ Returns
1484
+ -------
1485
+ is_partition : bool
1486
+ True iff sorted(indices) is np.arange(n)
1487
+ """
1488
+ if len(indices) != n_samples:
1489
+ return False
1490
+ hit = np.zeros(n_samples, dtype=bool)
1491
+ hit[indices] = True
1492
+ if not np.all(hit):
1493
+ return False
1494
+ return True
1495
+
1496
+
1497
+ @validate_params(
1498
+ {
1499
+ "estimator": [HasMethods("fit")],
1500
+ "X": ["array-like", "sparse matrix"],
1501
+ "y": ["array-like", None],
1502
+ "groups": ["array-like", None],
1503
+ "cv": ["cv_object"],
1504
+ "n_permutations": [Interval(Integral, 1, None, closed="left")],
1505
+ "n_jobs": [Integral, None],
1506
+ "random_state": ["random_state"],
1507
+ "verbose": ["verbose"],
1508
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
1509
+ "fit_params": [dict, None],
1510
+ },
1511
+ prefer_skip_nested_validation=False, # estimator is not validated yet
1512
+ )
1513
+ def permutation_test_score(
1514
+ estimator,
1515
+ X,
1516
+ y,
1517
+ *,
1518
+ groups=None,
1519
+ cv=None,
1520
+ n_permutations=100,
1521
+ n_jobs=None,
1522
+ random_state=0,
1523
+ verbose=0,
1524
+ scoring=None,
1525
+ fit_params=None,
1526
+ ):
1527
+ """Evaluate the significance of a cross-validated score with permutations.
1528
+
1529
+ Permutes targets to generate 'randomized data' and compute the empirical
1530
+ p-value against the null hypothesis that features and targets are
1531
+ independent.
1532
+
1533
+ The p-value represents the fraction of randomized data sets where the
1534
+ estimator performed as well or better than in the original data. A small
1535
+ p-value suggests that there is a real dependency between features and
1536
+ targets which has been used by the estimator to give good predictions.
1537
+ A large p-value may be due to lack of real dependency between features
1538
+ and targets or the estimator was not able to use the dependency to
1539
+ give good predictions.
1540
+
1541
+ Read more in the :ref:`User Guide <permutation_test_score>`.
1542
+
1543
+ Parameters
1544
+ ----------
1545
+ estimator : estimator object implementing 'fit'
1546
+ The object to use to fit the data.
1547
+
1548
+ X : array-like of shape at least 2D
1549
+ The data to fit.
1550
+
1551
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
1552
+ The target variable to try to predict in the case of
1553
+ supervised learning.
1554
+
1555
+ groups : array-like of shape (n_samples,), default=None
1556
+ Labels to constrain permutation within groups, i.e. ``y`` values
1557
+ are permuted among samples with the same group identifier.
1558
+ When not specified, ``y`` values are permuted among all samples.
1559
+
1560
+ When a grouped cross-validator is used, the group labels are
1561
+ also passed on to the ``split`` method of the cross-validator. The
1562
+ cross-validator uses them for grouping the samples while splitting
1563
+ the dataset into train/test set.
1564
+
1565
+ cv : int, cross-validation generator or an iterable, default=None
1566
+ Determines the cross-validation splitting strategy.
1567
+ Possible inputs for cv are:
1568
+
1569
+ - `None`, to use the default 5-fold cross validation,
1570
+ - int, to specify the number of folds in a `(Stratified)KFold`,
1571
+ - :term:`CV splitter`,
1572
+ - An iterable yielding (train, test) splits as arrays of indices.
1573
+
1574
+ For `int`/`None` inputs, if the estimator is a classifier and `y` is
1575
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1576
+ other cases, :class:`KFold` is used. These splitters are instantiated
1577
+ with `shuffle=False` so the splits will be the same across calls.
1578
+
1579
+ Refer :ref:`User Guide <cross_validation>` for the various
1580
+ cross-validation strategies that can be used here.
1581
+
1582
+ .. versionchanged:: 0.22
1583
+ `cv` default value if `None` changed from 3-fold to 5-fold.
1584
+
1585
+ n_permutations : int, default=100
1586
+ Number of times to permute ``y``.
1587
+
1588
+ n_jobs : int, default=None
1589
+ Number of jobs to run in parallel. Training the estimator and computing
1590
+ the cross-validated score are parallelized over the permutations.
1591
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1592
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1593
+ for more details.
1594
+
1595
+ random_state : int, RandomState instance or None, default=0
1596
+ Pass an int for reproducible output for permutation of
1597
+ ``y`` values among samples. See :term:`Glossary <random_state>`.
1598
+
1599
+ verbose : int, default=0
1600
+ The verbosity level.
1601
+
1602
+ scoring : str or callable, default=None
1603
+ A single str (see :ref:`scoring_parameter`) or a callable
1604
+ (see :ref:`scoring`) to evaluate the predictions on the test set.
1605
+
1606
+ If `None` the estimator's score method is used.
1607
+
1608
+ fit_params : dict, default=None
1609
+ Parameters to pass to the fit method of the estimator.
1610
+
1611
+ .. versionadded:: 0.24
1612
+
1613
+ Returns
1614
+ -------
1615
+ score : float
1616
+ The true score without permuting targets.
1617
+
1618
+ permutation_scores : array of shape (n_permutations,)
1619
+ The scores obtained for each permutations.
1620
+
1621
+ pvalue : float
1622
+ The p-value, which approximates the probability that the score would
1623
+ be obtained by chance. This is calculated as:
1624
+
1625
+ `(C + 1) / (n_permutations + 1)`
1626
+
1627
+ Where C is the number of permutations whose score >= the true score.
1628
+
1629
+ The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
1630
+
1631
+ Notes
1632
+ -----
1633
+ This function implements Test 1 in:
1634
+
1635
+ Ojala and Garriga. `Permutation Tests for Studying Classifier
1636
+ Performance
1637
+ <http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The
1638
+ Journal of Machine Learning Research (2010) vol. 11
1639
+
1640
+ Examples
1641
+ --------
1642
+ >>> from sklearn.datasets import make_classification
1643
+ >>> from sklearn.linear_model import LogisticRegression
1644
+ >>> from sklearn.model_selection import permutation_test_score
1645
+ >>> X, y = make_classification(random_state=0)
1646
+ >>> estimator = LogisticRegression()
1647
+ >>> score, permutation_scores, pvalue = permutation_test_score(
1648
+ ... estimator, X, y, random_state=0
1649
+ ... )
1650
+ >>> print(f"Original Score: {score:.3f}")
1651
+ Original Score: 0.810
1652
+ >>> print(
1653
+ ... f"Permutation Scores: {permutation_scores.mean():.3f} +/- "
1654
+ ... f"{permutation_scores.std():.3f}"
1655
+ ... )
1656
+ Permutation Scores: 0.505 +/- 0.057
1657
+ >>> print(f"P-value: {pvalue:.3f}")
1658
+ P-value: 0.010
1659
+ """
1660
+ X, y, groups = indexable(X, y, groups)
1661
+
1662
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
1663
+ scorer = check_scoring(estimator, scoring=scoring)
1664
+ random_state = check_random_state(random_state)
1665
+
1666
+ # We clone the estimator to make sure that all the folds are
1667
+ # independent, and that it is pickle-able.
1668
+ score = _permutation_test_score(
1669
+ clone(estimator), X, y, groups, cv, scorer, fit_params=fit_params
1670
+ )
1671
+ permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
1672
+ delayed(_permutation_test_score)(
1673
+ clone(estimator),
1674
+ X,
1675
+ _shuffle(y, groups, random_state),
1676
+ groups,
1677
+ cv,
1678
+ scorer,
1679
+ fit_params=fit_params,
1680
+ )
1681
+ for _ in range(n_permutations)
1682
+ )
1683
+ permutation_scores = np.array(permutation_scores)
1684
+ pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
1685
+ return score, permutation_scores, pvalue
1686
+
1687
+
1688
+ def _permutation_test_score(estimator, X, y, groups, cv, scorer, fit_params):
1689
+ """Auxiliary function for permutation_test_score"""
1690
+ # Adjust length of sample weights
1691
+ fit_params = fit_params if fit_params is not None else {}
1692
+ avg_score = []
1693
+ for train, test in cv.split(X, y, groups):
1694
+ X_train, y_train = _safe_split(estimator, X, y, train)
1695
+ X_test, y_test = _safe_split(estimator, X, y, test, train)
1696
+ fit_params = _check_method_params(X, params=fit_params, indices=train)
1697
+ estimator.fit(X_train, y_train, **fit_params)
1698
+ avg_score.append(scorer(estimator, X_test, y_test))
1699
+ return np.mean(avg_score)
1700
+
1701
+
1702
+ def _shuffle(y, groups, random_state):
1703
+ """Return a shuffled copy of y eventually shuffle among same groups."""
1704
+ if groups is None:
1705
+ indices = random_state.permutation(len(y))
1706
+ else:
1707
+ indices = np.arange(len(groups))
1708
+ for group in np.unique(groups):
1709
+ this_mask = groups == group
1710
+ indices[this_mask] = random_state.permutation(indices[this_mask])
1711
+ return _safe_indexing(y, indices)
1712
+
1713
+
1714
+ @validate_params(
1715
+ {
1716
+ "estimator": [HasMethods(["fit"])],
1717
+ "X": ["array-like", "sparse matrix"],
1718
+ "y": ["array-like", None],
1719
+ "groups": ["array-like", None],
1720
+ "train_sizes": ["array-like"],
1721
+ "cv": ["cv_object"],
1722
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
1723
+ "exploit_incremental_learning": ["boolean"],
1724
+ "n_jobs": [Integral, None],
1725
+ "pre_dispatch": [Integral, str],
1726
+ "verbose": ["verbose"],
1727
+ "shuffle": ["boolean"],
1728
+ "random_state": ["random_state"],
1729
+ "error_score": [StrOptions({"raise"}), Real],
1730
+ "return_times": ["boolean"],
1731
+ "fit_params": [dict, None],
1732
+ },
1733
+ prefer_skip_nested_validation=False, # estimator is not validated yet
1734
+ )
1735
+ def learning_curve(
1736
+ estimator,
1737
+ X,
1738
+ y,
1739
+ *,
1740
+ groups=None,
1741
+ train_sizes=np.linspace(0.1, 1.0, 5),
1742
+ cv=None,
1743
+ scoring=None,
1744
+ exploit_incremental_learning=False,
1745
+ n_jobs=None,
1746
+ pre_dispatch="all",
1747
+ verbose=0,
1748
+ shuffle=False,
1749
+ random_state=None,
1750
+ error_score=np.nan,
1751
+ return_times=False,
1752
+ fit_params=None,
1753
+ ):
1754
+ """Learning curve.
1755
+
1756
+ Determines cross-validated training and test scores for different training
1757
+ set sizes.
1758
+
1759
+ A cross-validation generator splits the whole dataset k times in training
1760
+ and test data. Subsets of the training set with varying sizes will be used
1761
+ to train the estimator and a score for each training subset size and the
1762
+ test set will be computed. Afterwards, the scores will be averaged over
1763
+ all k runs for each training subset size.
1764
+
1765
+ Read more in the :ref:`User Guide <learning_curve>`.
1766
+
1767
+ Parameters
1768
+ ----------
1769
+ estimator : object type that implements the "fit" method
1770
+ An object of that type which is cloned for each validation. It must
1771
+ also implement "predict" unless `scoring` is a callable that doesn't
1772
+ rely on "predict" to compute a score.
1773
+
1774
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1775
+ Training vector, where `n_samples` is the number of samples and
1776
+ `n_features` is the number of features.
1777
+
1778
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
1779
+ Target relative to X for classification or regression;
1780
+ None for unsupervised learning.
1781
+
1782
+ groups : array-like of shape (n_samples,), default=None
1783
+ Group labels for the samples used while splitting the dataset into
1784
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
1785
+ instance (e.g., :class:`GroupKFold`).
1786
+
1787
+ train_sizes : array-like of shape (n_ticks,), \
1788
+ default=np.linspace(0.1, 1.0, 5)
1789
+ Relative or absolute numbers of training examples that will be used to
1790
+ generate the learning curve. If the dtype is float, it is regarded as a
1791
+ fraction of the maximum size of the training set (that is determined
1792
+ by the selected validation method), i.e. it has to be within (0, 1].
1793
+ Otherwise it is interpreted as absolute sizes of the training sets.
1794
+ Note that for classification the number of samples usually have to
1795
+ be big enough to contain at least one sample from each class.
1796
+
1797
+ cv : int, cross-validation generator or an iterable, default=None
1798
+ Determines the cross-validation splitting strategy.
1799
+ Possible inputs for cv are:
1800
+
1801
+ - None, to use the default 5-fold cross validation,
1802
+ - int, to specify the number of folds in a `(Stratified)KFold`,
1803
+ - :term:`CV splitter`,
1804
+ - An iterable yielding (train, test) splits as arrays of indices.
1805
+
1806
+ For int/None inputs, if the estimator is a classifier and ``y`` is
1807
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1808
+ other cases, :class:`KFold` is used. These splitters are instantiated
1809
+ with `shuffle=False` so the splits will be the same across calls.
1810
+
1811
+ Refer :ref:`User Guide <cross_validation>` for the various
1812
+ cross-validation strategies that can be used here.
1813
+
1814
+ .. versionchanged:: 0.22
1815
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1816
+
1817
+ scoring : str or callable, default=None
1818
+ A str (see model evaluation documentation) or
1819
+ a scorer callable object / function with signature
1820
+ ``scorer(estimator, X, y)``.
1821
+
1822
+ exploit_incremental_learning : bool, default=False
1823
+ If the estimator supports incremental learning, this will be
1824
+ used to speed up fitting for different training set sizes.
1825
+
1826
+ n_jobs : int, default=None
1827
+ Number of jobs to run in parallel. Training the estimator and computing
1828
+ the score are parallelized over the different training and test sets.
1829
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1830
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1831
+ for more details.
1832
+
1833
+ pre_dispatch : int or str, default='all'
1834
+ Number of predispatched jobs for parallel execution (default is
1835
+ all). The option can reduce the allocated memory. The str can
1836
+ be an expression like '2*n_jobs'.
1837
+
1838
+ verbose : int, default=0
1839
+ Controls the verbosity: the higher, the more messages.
1840
+
1841
+ shuffle : bool, default=False
1842
+ Whether to shuffle training data before taking prefixes of it
1843
+ based on``train_sizes``.
1844
+
1845
+ random_state : int, RandomState instance or None, default=None
1846
+ Used when ``shuffle`` is True. Pass an int for reproducible
1847
+ output across multiple function calls.
1848
+ See :term:`Glossary <random_state>`.
1849
+
1850
+ error_score : 'raise' or numeric, default=np.nan
1851
+ Value to assign to the score if an error occurs in estimator fitting.
1852
+ If set to 'raise', the error is raised.
1853
+ If a numeric value is given, FitFailedWarning is raised.
1854
+
1855
+ .. versionadded:: 0.20
1856
+
1857
+ return_times : bool, default=False
1858
+ Whether to return the fit and score times.
1859
+
1860
+ fit_params : dict, default=None
1861
+ Parameters to pass to the fit method of the estimator.
1862
+
1863
+ .. versionadded:: 0.24
1864
+
1865
+ Returns
1866
+ -------
1867
+ train_sizes_abs : array of shape (n_unique_ticks,)
1868
+ Numbers of training examples that has been used to generate the
1869
+ learning curve. Note that the number of ticks might be less
1870
+ than n_ticks because duplicate entries will be removed.
1871
+
1872
+ train_scores : array of shape (n_ticks, n_cv_folds)
1873
+ Scores on training sets.
1874
+
1875
+ test_scores : array of shape (n_ticks, n_cv_folds)
1876
+ Scores on test set.
1877
+
1878
+ fit_times : array of shape (n_ticks, n_cv_folds)
1879
+ Times spent for fitting in seconds. Only present if ``return_times``
1880
+ is True.
1881
+
1882
+ score_times : array of shape (n_ticks, n_cv_folds)
1883
+ Times spent for scoring in seconds. Only present if ``return_times``
1884
+ is True.
1885
+
1886
+ Examples
1887
+ --------
1888
+ >>> from sklearn.datasets import make_classification
1889
+ >>> from sklearn.tree import DecisionTreeClassifier
1890
+ >>> from sklearn.model_selection import learning_curve
1891
+ >>> X, y = make_classification(n_samples=100, n_features=10, random_state=42)
1892
+ >>> tree = DecisionTreeClassifier(max_depth=4, random_state=42)
1893
+ >>> train_size_abs, train_scores, test_scores = learning_curve(
1894
+ ... tree, X, y, train_sizes=[0.3, 0.6, 0.9]
1895
+ ... )
1896
+ >>> for train_size, cv_train_scores, cv_test_scores in zip(
1897
+ ... train_size_abs, train_scores, test_scores
1898
+ ... ):
1899
+ ... print(f"{train_size} samples were used to train the model")
1900
+ ... print(f"The average train accuracy is {cv_train_scores.mean():.2f}")
1901
+ ... print(f"The average test accuracy is {cv_test_scores.mean():.2f}")
1902
+ 24 samples were used to train the model
1903
+ The average train accuracy is 1.00
1904
+ The average test accuracy is 0.85
1905
+ 48 samples were used to train the model
1906
+ The average train accuracy is 1.00
1907
+ The average test accuracy is 0.90
1908
+ 72 samples were used to train the model
1909
+ The average train accuracy is 1.00
1910
+ The average test accuracy is 0.93
1911
+ """
1912
+ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
1913
+ raise ValueError(
1914
+ "An estimator must support the partial_fit interface "
1915
+ "to exploit incremental learning"
1916
+ )
1917
+ X, y, groups = indexable(X, y, groups)
1918
+
1919
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
1920
+ # Store it as list as we will be iterating over the list multiple times
1921
+ cv_iter = list(cv.split(X, y, groups))
1922
+
1923
+ scorer = check_scoring(estimator, scoring=scoring)
1924
+
1925
+ n_max_training_samples = len(cv_iter[0][0])
1926
+ # Because the lengths of folds can be significantly different, it is
1927
+ # not guaranteed that we use all of the available training data when we
1928
+ # use the first 'n_max_training_samples' samples.
1929
+ train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples)
1930
+ n_unique_ticks = train_sizes_abs.shape[0]
1931
+ if verbose > 0:
1932
+ print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
1933
+
1934
+ parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
1935
+
1936
+ if shuffle:
1937
+ rng = check_random_state(random_state)
1938
+ cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
1939
+
1940
+ if exploit_incremental_learning:
1941
+ classes = np.unique(y) if is_classifier(estimator) else None
1942
+ out = parallel(
1943
+ delayed(_incremental_fit_estimator)(
1944
+ clone(estimator),
1945
+ X,
1946
+ y,
1947
+ classes,
1948
+ train,
1949
+ test,
1950
+ train_sizes_abs,
1951
+ scorer,
1952
+ return_times,
1953
+ error_score=error_score,
1954
+ fit_params=fit_params,
1955
+ )
1956
+ for train, test in cv_iter
1957
+ )
1958
+ out = np.asarray(out).transpose((2, 1, 0))
1959
+ else:
1960
+ train_test_proportions = []
1961
+ for train, test in cv_iter:
1962
+ for n_train_samples in train_sizes_abs:
1963
+ train_test_proportions.append((train[:n_train_samples], test))
1964
+
1965
+ results = parallel(
1966
+ delayed(_fit_and_score)(
1967
+ clone(estimator),
1968
+ X,
1969
+ y,
1970
+ scorer=scorer,
1971
+ train=train,
1972
+ test=test,
1973
+ verbose=verbose,
1974
+ parameters=None,
1975
+ fit_params=fit_params,
1976
+ # TODO(SLEP6): support score params here
1977
+ score_params=None,
1978
+ return_train_score=True,
1979
+ error_score=error_score,
1980
+ return_times=return_times,
1981
+ )
1982
+ for train, test in train_test_proportions
1983
+ )
1984
+ _warn_or_raise_about_fit_failures(results, error_score)
1985
+ results = _aggregate_score_dicts(results)
1986
+ train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T
1987
+ test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T
1988
+ out = [train_scores, test_scores]
1989
+
1990
+ if return_times:
1991
+ fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T
1992
+ score_times = results["score_time"].reshape(-1, n_unique_ticks).T
1993
+ out.extend([fit_times, score_times])
1994
+
1995
+ ret = train_sizes_abs, out[0], out[1]
1996
+
1997
+ if return_times:
1998
+ ret = ret + (out[2], out[3])
1999
+
2000
+ return ret
2001
+
2002
+
2003
+ def _translate_train_sizes(train_sizes, n_max_training_samples):
2004
+ """Determine absolute sizes of training subsets and validate 'train_sizes'.
2005
+
2006
+ Examples:
2007
+ _translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
2008
+ _translate_train_sizes([5, 10], 10) -> [5, 10]
2009
+
2010
+ Parameters
2011
+ ----------
2012
+ train_sizes : array-like of shape (n_ticks,)
2013
+ Numbers of training examples that will be used to generate the
2014
+ learning curve. If the dtype is float, it is regarded as a
2015
+ fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
2016
+
2017
+ n_max_training_samples : int
2018
+ Maximum number of training samples (upper bound of 'train_sizes').
2019
+
2020
+ Returns
2021
+ -------
2022
+ train_sizes_abs : array of shape (n_unique_ticks,)
2023
+ Numbers of training examples that will be used to generate the
2024
+ learning curve. Note that the number of ticks might be less
2025
+ than n_ticks because duplicate entries will be removed.
2026
+ """
2027
+ train_sizes_abs = np.asarray(train_sizes)
2028
+ n_ticks = train_sizes_abs.shape[0]
2029
+ n_min_required_samples = np.min(train_sizes_abs)
2030
+ n_max_required_samples = np.max(train_sizes_abs)
2031
+ if np.issubdtype(train_sizes_abs.dtype, np.floating):
2032
+ if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
2033
+ raise ValueError(
2034
+ "train_sizes has been interpreted as fractions "
2035
+ "of the maximum number of training samples and "
2036
+ "must be within (0, 1], but is within [%f, %f]."
2037
+ % (n_min_required_samples, n_max_required_samples)
2038
+ )
2039
+ train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
2040
+ dtype=int, copy=False
2041
+ )
2042
+ train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples)
2043
+ else:
2044
+ if (
2045
+ n_min_required_samples <= 0
2046
+ or n_max_required_samples > n_max_training_samples
2047
+ ):
2048
+ raise ValueError(
2049
+ "train_sizes has been interpreted as absolute "
2050
+ "numbers of training samples and must be within "
2051
+ "(0, %d], but is within [%d, %d]."
2052
+ % (
2053
+ n_max_training_samples,
2054
+ n_min_required_samples,
2055
+ n_max_required_samples,
2056
+ )
2057
+ )
2058
+
2059
+ train_sizes_abs = np.unique(train_sizes_abs)
2060
+ if n_ticks > train_sizes_abs.shape[0]:
2061
+ warnings.warn(
2062
+ "Removed duplicate entries from 'train_sizes'. Number "
2063
+ "of ticks will be less than the size of "
2064
+ "'train_sizes': %d instead of %d." % (train_sizes_abs.shape[0], n_ticks),
2065
+ RuntimeWarning,
2066
+ )
2067
+
2068
+ return train_sizes_abs
2069
+
2070
+
2071
+ def _incremental_fit_estimator(
2072
+ estimator,
2073
+ X,
2074
+ y,
2075
+ classes,
2076
+ train,
2077
+ test,
2078
+ train_sizes,
2079
+ scorer,
2080
+ return_times,
2081
+ error_score,
2082
+ fit_params,
2083
+ ):
2084
+ """Train estimator on training subsets incrementally and compute scores."""
2085
+ train_scores, test_scores, fit_times, score_times = [], [], [], []
2086
+ partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
2087
+ if fit_params is None:
2088
+ fit_params = {}
2089
+ if classes is None:
2090
+ partial_fit_func = partial(estimator.partial_fit, **fit_params)
2091
+ else:
2092
+ partial_fit_func = partial(estimator.partial_fit, classes=classes, **fit_params)
2093
+
2094
+ for n_train_samples, partial_train in partitions:
2095
+ train_subset = train[:n_train_samples]
2096
+ X_train, y_train = _safe_split(estimator, X, y, train_subset)
2097
+ X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train)
2098
+ X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
2099
+ start_fit = time.time()
2100
+ if y_partial_train is None:
2101
+ partial_fit_func(X_partial_train)
2102
+ else:
2103
+ partial_fit_func(X_partial_train, y_partial_train)
2104
+ fit_time = time.time() - start_fit
2105
+ fit_times.append(fit_time)
2106
+
2107
+ start_score = time.time()
2108
+
2109
+ # TODO(SLEP6): support score params in the following two calls
2110
+ test_scores.append(
2111
+ _score(
2112
+ estimator,
2113
+ X_test,
2114
+ y_test,
2115
+ scorer,
2116
+ score_params=None,
2117
+ error_score=error_score,
2118
+ )
2119
+ )
2120
+ train_scores.append(
2121
+ _score(
2122
+ estimator,
2123
+ X_train,
2124
+ y_train,
2125
+ scorer,
2126
+ score_params=None,
2127
+ error_score=error_score,
2128
+ )
2129
+ )
2130
+ score_time = time.time() - start_score
2131
+ score_times.append(score_time)
2132
+
2133
+ ret = (
2134
+ (train_scores, test_scores, fit_times, score_times)
2135
+ if return_times
2136
+ else (train_scores, test_scores)
2137
+ )
2138
+
2139
+ return np.array(ret).T
2140
+
2141
+
2142
+ @validate_params(
2143
+ {
2144
+ "estimator": [HasMethods(["fit"])],
2145
+ "X": ["array-like", "sparse matrix"],
2146
+ "y": ["array-like", None],
2147
+ "param_name": [str],
2148
+ "param_range": ["array-like"],
2149
+ "groups": ["array-like", None],
2150
+ "cv": ["cv_object"],
2151
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
2152
+ "n_jobs": [Integral, None],
2153
+ "pre_dispatch": [Integral, str],
2154
+ "verbose": ["verbose"],
2155
+ "error_score": [StrOptions({"raise"}), Real],
2156
+ "fit_params": [dict, None],
2157
+ },
2158
+ prefer_skip_nested_validation=False, # estimator is not validated yet
2159
+ )
2160
+ def validation_curve(
2161
+ estimator,
2162
+ X,
2163
+ y,
2164
+ *,
2165
+ param_name,
2166
+ param_range,
2167
+ groups=None,
2168
+ cv=None,
2169
+ scoring=None,
2170
+ n_jobs=None,
2171
+ pre_dispatch="all",
2172
+ verbose=0,
2173
+ error_score=np.nan,
2174
+ fit_params=None,
2175
+ ):
2176
+ """Validation curve.
2177
+
2178
+ Determine training and test scores for varying parameter values.
2179
+
2180
+ Compute scores for an estimator with different values of a specified
2181
+ parameter. This is similar to grid search with one parameter. However, this
2182
+ will also compute training scores and is merely a utility for plotting the
2183
+ results.
2184
+
2185
+ Read more in the :ref:`User Guide <validation_curve>`.
2186
+
2187
+ Parameters
2188
+ ----------
2189
+ estimator : object type that implements the "fit" method
2190
+ An object of that type which is cloned for each validation. It must
2191
+ also implement "predict" unless `scoring` is a callable that doesn't
2192
+ rely on "predict" to compute a score.
2193
+
2194
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2195
+ Training vector, where `n_samples` is the number of samples and
2196
+ `n_features` is the number of features.
2197
+
2198
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
2199
+ Target relative to X for classification or regression;
2200
+ None for unsupervised learning.
2201
+
2202
+ param_name : str
2203
+ Name of the parameter that will be varied.
2204
+
2205
+ param_range : array-like of shape (n_values,)
2206
+ The values of the parameter that will be evaluated.
2207
+
2208
+ groups : array-like of shape (n_samples,), default=None
2209
+ Group labels for the samples used while splitting the dataset into
2210
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
2211
+ instance (e.g., :class:`GroupKFold`).
2212
+
2213
+ cv : int, cross-validation generator or an iterable, default=None
2214
+ Determines the cross-validation splitting strategy.
2215
+ Possible inputs for cv are:
2216
+
2217
+ - None, to use the default 5-fold cross validation,
2218
+ - int, to specify the number of folds in a `(Stratified)KFold`,
2219
+ - :term:`CV splitter`,
2220
+ - An iterable yielding (train, test) splits as arrays of indices.
2221
+
2222
+ For int/None inputs, if the estimator is a classifier and ``y`` is
2223
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
2224
+ other cases, :class:`KFold` is used. These splitters are instantiated
2225
+ with `shuffle=False` so the splits will be the same across calls.
2226
+
2227
+ Refer :ref:`User Guide <cross_validation>` for the various
2228
+ cross-validation strategies that can be used here.
2229
+
2230
+ .. versionchanged:: 0.22
2231
+ ``cv`` default value if None changed from 3-fold to 5-fold.
2232
+
2233
+ scoring : str or callable, default=None
2234
+ A str (see model evaluation documentation) or
2235
+ a scorer callable object / function with signature
2236
+ ``scorer(estimator, X, y)``.
2237
+
2238
+ n_jobs : int, default=None
2239
+ Number of jobs to run in parallel. Training the estimator and computing
2240
+ the score are parallelized over the combinations of each parameter
2241
+ value and each cross-validation split.
2242
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
2243
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
2244
+ for more details.
2245
+
2246
+ pre_dispatch : int or str, default='all'
2247
+ Number of predispatched jobs for parallel execution (default is
2248
+ all). The option can reduce the allocated memory. The str can
2249
+ be an expression like '2*n_jobs'.
2250
+
2251
+ verbose : int, default=0
2252
+ Controls the verbosity: the higher, the more messages.
2253
+
2254
+ error_score : 'raise' or numeric, default=np.nan
2255
+ Value to assign to the score if an error occurs in estimator fitting.
2256
+ If set to 'raise', the error is raised.
2257
+ If a numeric value is given, FitFailedWarning is raised.
2258
+
2259
+ .. versionadded:: 0.20
2260
+
2261
+ fit_params : dict, default=None
2262
+ Parameters to pass to the fit method of the estimator.
2263
+
2264
+ .. versionadded:: 0.24
2265
+
2266
+ Returns
2267
+ -------
2268
+ train_scores : array of shape (n_ticks, n_cv_folds)
2269
+ Scores on training sets.
2270
+
2271
+ test_scores : array of shape (n_ticks, n_cv_folds)
2272
+ Scores on test set.
2273
+
2274
+ Notes
2275
+ -----
2276
+ See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
2277
+
2278
+ Examples
2279
+ --------
2280
+ >>> import numpy as np
2281
+ >>> from sklearn.datasets import make_classification
2282
+ >>> from sklearn.model_selection import validation_curve
2283
+ >>> from sklearn.linear_model import LogisticRegression
2284
+ >>> X, y = make_classification(n_samples=1_000, random_state=0)
2285
+ >>> logistic_regression = LogisticRegression()
2286
+ >>> param_name, param_range = "C", np.logspace(-8, 3, 10)
2287
+ >>> train_scores, test_scores = validation_curve(
2288
+ ... logistic_regression, X, y, param_name=param_name, param_range=param_range
2289
+ ... )
2290
+ >>> print(f"The average train accuracy is {train_scores.mean():.2f}")
2291
+ The average train accuracy is 0.81
2292
+ >>> print(f"The average test accuracy is {test_scores.mean():.2f}")
2293
+ The average test accuracy is 0.81
2294
+ """
2295
+ X, y, groups = indexable(X, y, groups)
2296
+
2297
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
2298
+ scorer = check_scoring(estimator, scoring=scoring)
2299
+
2300
+ parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
2301
+ results = parallel(
2302
+ delayed(_fit_and_score)(
2303
+ clone(estimator),
2304
+ X,
2305
+ y,
2306
+ scorer=scorer,
2307
+ train=train,
2308
+ test=test,
2309
+ verbose=verbose,
2310
+ parameters={param_name: v},
2311
+ fit_params=fit_params,
2312
+ # TODO(SLEP6): support score params here
2313
+ score_params=None,
2314
+ return_train_score=True,
2315
+ error_score=error_score,
2316
+ )
2317
+ # NOTE do not change order of iteration to allow one time cv splitters
2318
+ for train, test in cv.split(X, y, groups)
2319
+ for v in param_range
2320
+ )
2321
+ n_params = len(param_range)
2322
+
2323
+ results = _aggregate_score_dicts(results)
2324
+ train_scores = results["train_scores"].reshape(-1, n_params).T
2325
+ test_scores = results["test_scores"].reshape(-1, n_params).T
2326
+
2327
+ return train_scores, test_scores
2328
+
2329
+
2330
+ def _aggregate_score_dicts(scores):
2331
+ """Aggregate the list of dict to dict of np ndarray
2332
+
2333
+ The aggregated output of _aggregate_score_dicts will be a list of dict
2334
+ of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]
2335
+ Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}
2336
+
2337
+ Parameters
2338
+ ----------
2339
+
2340
+ scores : list of dict
2341
+ List of dicts of the scores for all scorers. This is a flat list,
2342
+ assumed originally to be of row major order.
2343
+
2344
+ Example
2345
+ -------
2346
+
2347
+ >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
2348
+ ... {'a': 10, 'b': 10}] # doctest: +SKIP
2349
+ >>> _aggregate_score_dicts(scores) # doctest: +SKIP
2350
+ {'a': array([1, 2, 3, 10]),
2351
+ 'b': array([10, 2, 3, 10])}
2352
+ """
2353
+ return {
2354
+ key: (
2355
+ np.asarray([score[key] for score in scores])
2356
+ if isinstance(scores[0][key], numbers.Number)
2357
+ else [score[key] for score in scores]
2358
+ )
2359
+ for key in scores[0]
2360
+ }
venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc ADDED
Binary file (68.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc ADDED
Binary file (50.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc ADDED
Binary file (62.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Common utilities for testing model selection.
3
+ """
4
+
5
+ import numpy as np
6
+
7
+ from sklearn.model_selection import KFold
8
+
9
+
10
+ class OneTimeSplitter:
11
+ """A wrapper to make KFold single entry cv iterator"""
12
+
13
+ def __init__(self, n_splits=4, n_samples=99):
14
+ self.n_splits = n_splits
15
+ self.n_samples = n_samples
16
+ self.indices = iter(KFold(n_splits=n_splits).split(np.ones(n_samples)))
17
+
18
+ def split(self, X=None, y=None, groups=None):
19
+ """Split can be called only once"""
20
+ for index in self.indices:
21
+ yield index
22
+
23
+ def get_n_splits(self, X=None, y=None, groups=None):
24
+ return self.n_splits