applied-ai-018 commited on
Commit
9997805
·
verified ·
1 Parent(s): 7f44192

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/sklearn/__check_build/__init__.py +47 -0
  2. env-llmeval/lib/python3.10/site-packages/sklearn/__check_build/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so +0 -0
  4. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py +115 -0
  5. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py +123 -0
  11. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py +73 -0
  12. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py +57 -0
  13. env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/version.py +14 -0
  14. env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/__init__.py +3 -0
  15. env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/__init__.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/_pls.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/_pls.py +1083 -0
  18. env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__init__.py +0 -0
  19. env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/test_pls.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/test_pls.py +646 -0
  22. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py +47 -0
  23. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__init__.py +100 -0
  37. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_base.py +814 -0
  38. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py +857 -0
  39. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_huber.py +352 -0
  40. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py +2190 -0
  41. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_omp.py +1097 -0
  42. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py +575 -0
  43. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_sag.py +372 -0
  44. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  45. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pxd +26 -0
  46. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_theil_sen.py +456 -0
  47. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py +0 -0
  48. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ridge.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/sklearn/__check_build/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Module to give helpful messages to the user that did not
2
+ compile scikit-learn properly.
3
+ """
4
+ import os
5
+
6
+ INPLACE_MSG = """
7
+ It appears that you are importing a local scikit-learn source tree. For
8
+ this, you need to have an inplace install. Maybe you are in the source
9
+ directory and you need to try from another location."""
10
+
11
+ STANDARD_MSG = """
12
+ If you have used an installer, please check that it is suited for your
13
+ Python version, your operating system and your platform."""
14
+
15
+
16
+ def raise_build_error(e):
17
+ # Raise a comprehensible error and list the contents of the
18
+ # directory to help debugging on the mailing list.
19
+ local_dir = os.path.split(__file__)[0]
20
+ msg = STANDARD_MSG
21
+ if local_dir == "sklearn/__check_build":
22
+ # Picking up the local install: this will work only if the
23
+ # install is an 'inplace build'
24
+ msg = INPLACE_MSG
25
+ dir_content = list()
26
+ for i, filename in enumerate(os.listdir(local_dir)):
27
+ if (i + 1) % 3:
28
+ dir_content.append(filename.ljust(26))
29
+ else:
30
+ dir_content.append(filename + "\n")
31
+ raise ImportError("""%s
32
+ ___________________________________________________________________________
33
+ Contents of %s:
34
+ %s
35
+ ___________________________________________________________________________
36
+ It seems that scikit-learn has not been built correctly.
37
+
38
+ If you have installed scikit-learn from source, please do not forget
39
+ to build the package before using it: run `python setup.py install` or
40
+ `make` in the source directory.
41
+ %s""" % (e, local_dir, "".join(dir_content).strip(), msg))
42
+
43
+
44
+ try:
45
+ from ._check_build import check_build # noqa
46
+ except ImportError as e:
47
+ raise_build_error(e)
env-llmeval/lib/python3.10/site-packages/sklearn/__check_build/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (51.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities useful during the build.
3
+ """
4
+ # author: Andy Mueller, Gael Varoquaux
5
+ # license: BSD
6
+
7
+
8
+ import contextlib
9
+ import os
10
+
11
+ import sklearn
12
+
13
+ from .._min_dependencies import CYTHON_MIN_VERSION
14
+ from ..externals._packaging.version import parse
15
+ from .openmp_helpers import check_openmp_support
16
+ from .pre_build_helpers import basic_check_build
17
+
18
+ DEFAULT_ROOT = "sklearn"
19
+
20
+
21
+ def _check_cython_version():
22
+ message = (
23
+ "Please install Cython with a version >= {0} in order "
24
+ "to build a scikit-learn from source."
25
+ ).format(CYTHON_MIN_VERSION)
26
+ try:
27
+ import Cython
28
+ except ModuleNotFoundError as e:
29
+ # Re-raise with more informative error message instead:
30
+ raise ModuleNotFoundError(message) from e
31
+
32
+ if parse(Cython.__version__) < parse(CYTHON_MIN_VERSION):
33
+ message += " The current version of Cython is {} installed in {}.".format(
34
+ Cython.__version__, Cython.__path__
35
+ )
36
+ raise ValueError(message)
37
+
38
+
39
+ def cythonize_extensions(extension):
40
+ """Check that a recent Cython is available and cythonize extensions"""
41
+ _check_cython_version()
42
+ from Cython.Build import cythonize
43
+
44
+ # Fast fail before cythonization if compiler fails compiling basic test
45
+ # code even without OpenMP
46
+ basic_check_build()
47
+
48
+ # check simple compilation with OpenMP. If it fails scikit-learn will be
49
+ # built without OpenMP and the test test_openmp_supported in the test suite
50
+ # will fail.
51
+ # `check_openmp_support` compiles a small test program to see if the
52
+ # compilers are properly configured to build with OpenMP. This is expensive
53
+ # and we only want to call this function once.
54
+ # The result of this check is cached as a private attribute on the sklearn
55
+ # module (only at build-time) to be used in the build_ext subclass defined
56
+ # in the top-level setup.py file to actually build the compiled extensions
57
+ # with OpenMP flags if needed.
58
+ sklearn._OPENMP_SUPPORTED = check_openmp_support()
59
+
60
+ n_jobs = 1
61
+ with contextlib.suppress(ImportError):
62
+ import joblib
63
+
64
+ n_jobs = joblib.cpu_count()
65
+
66
+ # Additional checks for Cython
67
+ cython_enable_debug_directives = (
68
+ os.environ.get("SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES", "0") != "0"
69
+ )
70
+
71
+ compiler_directives = {
72
+ "language_level": 3,
73
+ "boundscheck": cython_enable_debug_directives,
74
+ "wraparound": False,
75
+ "initializedcheck": False,
76
+ "nonecheck": False,
77
+ "cdivision": True,
78
+ "profile": False,
79
+ }
80
+
81
+ return cythonize(
82
+ extension,
83
+ nthreads=n_jobs,
84
+ compiler_directives=compiler_directives,
85
+ annotate=False,
86
+ )
87
+
88
+
89
+ def gen_from_templates(templates):
90
+ """Generate cython files from a list of templates"""
91
+ # Lazy import because cython is not a runtime dependency.
92
+ from Cython import Tempita
93
+
94
+ for template in templates:
95
+ outfile = template.replace(".tp", "")
96
+
97
+ # if the template is not updated, no need to output the cython file
98
+ if not (
99
+ os.path.exists(outfile)
100
+ and os.stat(template).st_mtime < os.stat(outfile).st_mtime
101
+ ):
102
+ with open(template, "r") as f:
103
+ tmpl = f.read()
104
+
105
+ tmpl_ = Tempita.sub(tmpl)
106
+
107
+ warn_msg = (
108
+ "# WARNING: Do not edit this file directly.\n"
109
+ f"# It is automatically generated from {template!r}.\n"
110
+ "# Changes must be made there.\n\n"
111
+ )
112
+
113
+ with open(outfile, "w") as f:
114
+ f.write(warn_msg)
115
+ f.write(tmpl_)
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc ADDED
Binary file (2.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc ADDED
Binary file (667 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/openmp_helpers.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helpers for OpenMP support during the build."""
2
+
3
+ # This code is adapted for a large part from the astropy openmp helpers, which
4
+ # can be found at: https://github.com/astropy/extension-helpers/blob/master/extension_helpers/_openmp_helpers.py # noqa
5
+
6
+
7
+ import os
8
+ import sys
9
+ import textwrap
10
+ import warnings
11
+
12
+ from .pre_build_helpers import compile_test_program
13
+
14
+
15
+ def get_openmp_flag():
16
+ if sys.platform == "win32":
17
+ return ["/openmp"]
18
+ elif sys.platform == "darwin" and "openmp" in os.getenv("CPPFLAGS", ""):
19
+ # -fopenmp can't be passed as compile flag when using Apple-clang.
20
+ # OpenMP support has to be enabled during preprocessing.
21
+ #
22
+ # For example, our macOS wheel build jobs use the following environment
23
+ # variables to build with Apple-clang and the brew installed "libomp":
24
+ #
25
+ # export CPPFLAGS="$CPPFLAGS -Xpreprocessor -fopenmp"
26
+ # export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include"
27
+ # export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include"
28
+ # export LDFLAGS="$LDFLAGS -Wl,-rpath,/usr/local/opt/libomp/lib
29
+ # -L/usr/local/opt/libomp/lib -lomp"
30
+ return []
31
+ # Default flag for GCC and clang:
32
+ return ["-fopenmp"]
33
+
34
+
35
+ def check_openmp_support():
36
+ """Check whether OpenMP test code can be compiled and run"""
37
+ if "PYODIDE_PACKAGE_ABI" in os.environ:
38
+ # Pyodide doesn't support OpenMP
39
+ return False
40
+
41
+ code = textwrap.dedent("""\
42
+ #include <omp.h>
43
+ #include <stdio.h>
44
+ int main(void) {
45
+ #pragma omp parallel
46
+ printf("nthreads=%d\\n", omp_get_num_threads());
47
+ return 0;
48
+ }
49
+ """)
50
+
51
+ extra_preargs = os.getenv("LDFLAGS", None)
52
+ if extra_preargs is not None:
53
+ extra_preargs = extra_preargs.strip().split(" ")
54
+ # FIXME: temporary fix to link against system libraries on linux
55
+ # "-Wl,--sysroot=/" should be removed
56
+ extra_preargs = [
57
+ flag
58
+ for flag in extra_preargs
59
+ if flag.startswith(("-L", "-Wl,-rpath", "-l", "-Wl,--sysroot=/"))
60
+ ]
61
+
62
+ extra_postargs = get_openmp_flag()
63
+
64
+ openmp_exception = None
65
+ try:
66
+ output = compile_test_program(
67
+ code, extra_preargs=extra_preargs, extra_postargs=extra_postargs
68
+ )
69
+
70
+ if output and "nthreads=" in output[0]:
71
+ nthreads = int(output[0].strip().split("=")[1])
72
+ openmp_supported = len(output) == nthreads
73
+ elif "PYTHON_CROSSENV" in os.environ:
74
+ # Since we can't run the test program when cross-compiling
75
+ # assume that openmp is supported if the program can be
76
+ # compiled.
77
+ openmp_supported = True
78
+ else:
79
+ openmp_supported = False
80
+
81
+ except Exception as exception:
82
+ # We could be more specific and only catch: CompileError, LinkError,
83
+ # and subprocess.CalledProcessError.
84
+ # setuptools introduced CompileError and LinkError, but that requires
85
+ # version 61.1. Even the latest version of Ubuntu (22.04LTS) only
86
+ # ships with 59.6. So for now we catch all exceptions and reraise a
87
+ # generic exception with the original error message instead:
88
+ openmp_supported = False
89
+ openmp_exception = exception
90
+
91
+ if not openmp_supported:
92
+ if os.getenv("SKLEARN_FAIL_NO_OPENMP"):
93
+ raise Exception(
94
+ "Failed to build scikit-learn with OpenMP support"
95
+ ) from openmp_exception
96
+ else:
97
+ message = textwrap.dedent("""
98
+
99
+ ***********
100
+ * WARNING *
101
+ ***********
102
+
103
+ It seems that scikit-learn cannot be built with OpenMP.
104
+
105
+ - Make sure you have followed the installation instructions:
106
+
107
+ https://scikit-learn.org/dev/developers/advanced_installation.html
108
+
109
+ - If your compiler supports OpenMP but you still see this
110
+ message, please submit a bug report at:
111
+
112
+ https://github.com/scikit-learn/scikit-learn/issues
113
+
114
+ - The build will continue with OpenMP-based parallelism
115
+ disabled. Note however that some estimators will run in
116
+ sequential mode instead of leveraging thread-based
117
+ parallelism.
118
+
119
+ ***
120
+ """)
121
+ warnings.warn(message)
122
+
123
+ return openmp_supported
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/pre_build_helpers.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helpers to check build environment before actual build of scikit-learn"""
2
+
3
+ import glob
4
+ import os
5
+ import subprocess
6
+ import sys
7
+ import tempfile
8
+ import textwrap
9
+
10
+ from setuptools.command.build_ext import customize_compiler, new_compiler
11
+
12
+
13
+ def compile_test_program(code, extra_preargs=None, extra_postargs=None):
14
+ """Check that some C code can be compiled and run"""
15
+ ccompiler = new_compiler()
16
+ customize_compiler(ccompiler)
17
+
18
+ start_dir = os.path.abspath(".")
19
+
20
+ with tempfile.TemporaryDirectory() as tmp_dir:
21
+ try:
22
+ os.chdir(tmp_dir)
23
+
24
+ # Write test program
25
+ with open("test_program.c", "w") as f:
26
+ f.write(code)
27
+
28
+ os.mkdir("objects")
29
+
30
+ # Compile, test program
31
+ ccompiler.compile(
32
+ ["test_program.c"], output_dir="objects", extra_postargs=extra_postargs
33
+ )
34
+
35
+ # Link test program
36
+ objects = glob.glob(os.path.join("objects", "*" + ccompiler.obj_extension))
37
+ ccompiler.link_executable(
38
+ objects,
39
+ "test_program",
40
+ extra_preargs=extra_preargs,
41
+ extra_postargs=extra_postargs,
42
+ )
43
+
44
+ if "PYTHON_CROSSENV" not in os.environ:
45
+ # Run test program if not cross compiling
46
+ # will raise a CalledProcessError if return code was non-zero
47
+ output = subprocess.check_output("./test_program")
48
+ output = output.decode(sys.stdout.encoding or "utf-8").splitlines()
49
+ else:
50
+ # Return an empty output if we are cross compiling
51
+ # as we cannot run the test_program
52
+ output = []
53
+ except Exception:
54
+ raise
55
+ finally:
56
+ os.chdir(start_dir)
57
+
58
+ return output
59
+
60
+
61
+ def basic_check_build():
62
+ """Check basic compilation and linking of C code"""
63
+ if "PYODIDE_PACKAGE_ABI" in os.environ:
64
+ # The following check won't work in pyodide
65
+ return
66
+
67
+ code = textwrap.dedent("""\
68
+ #include <stdio.h>
69
+ int main(void) {
70
+ return 0;
71
+ }
72
+ """)
73
+ compile_test_program(code)
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+
4
+ from Cython import Tempita as tempita
5
+
6
+ # XXX: If this import ever fails (does it really?), vendor either
7
+ # cython.tempita or numpy/npy_tempita.
8
+
9
+
10
+ def process_tempita(fromfile, outfile=None):
11
+ """Process tempita templated file and write out the result.
12
+
13
+ The template file is expected to end in `.c.tp` or `.pyx.tp`:
14
+ E.g. processing `template.c.in` generates `template.c`.
15
+
16
+ """
17
+ with open(fromfile, "r", encoding="utf-8") as f:
18
+ template_content = f.read()
19
+
20
+ template = tempita.Template(template_content)
21
+ content = template.substitute()
22
+
23
+ with open(outfile, "w", encoding="utf-8") as f:
24
+ f.write(content)
25
+
26
+
27
+ def main():
28
+ parser = argparse.ArgumentParser()
29
+ parser.add_argument("infile", type=str, help="Path to the input file")
30
+ parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory")
31
+ parser.add_argument(
32
+ "-i",
33
+ "--ignore",
34
+ type=str,
35
+ help=(
36
+ "An ignored input - may be useful to add a "
37
+ "dependency between custom targets"
38
+ ),
39
+ )
40
+ args = parser.parse_args()
41
+
42
+ if not args.infile.endswith(".tp"):
43
+ raise ValueError(f"Unexpected extension: {args.infile}")
44
+
45
+ if not args.outdir:
46
+ raise ValueError("Missing `--outdir` argument to tempita.py")
47
+
48
+ outdir_abs = os.path.join(os.getcwd(), args.outdir)
49
+ outfile = os.path.join(
50
+ outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]
51
+ )
52
+
53
+ process_tempita(args.infile, outfile)
54
+
55
+
56
+ if __name__ == "__main__":
57
+ main()
env-llmeval/lib/python3.10/site-packages/sklearn/_build_utils/version.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """ Extract version number from __init__.py
3
+ """
4
+
5
+ import os
6
+
7
+ sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py")
8
+
9
+ data = open(sklearn_init).readlines()
10
+ version_line = next(line for line in data if line.startswith("__version__"))
11
+
12
+ version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "")
13
+
14
+ print(version)
env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from ._pls import CCA, PLSSVD, PLSCanonical, PLSRegression
2
+
3
+ __all__ = ["PLSCanonical", "PLSRegression", "PLSSVD", "CCA"]
env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (329 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/__pycache__/_pls.cpython-310.pyc ADDED
Binary file (29.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/_pls.py ADDED
@@ -0,0 +1,1083 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
3
+ """
4
+
5
+ # Author: Edouard Duchesnay <[email protected]>
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+ from abc import ABCMeta, abstractmethod
10
+ from numbers import Integral, Real
11
+
12
+ import numpy as np
13
+ from scipy.linalg import svd
14
+
15
+ from ..base import (
16
+ BaseEstimator,
17
+ ClassNamePrefixFeaturesOutMixin,
18
+ MultiOutputMixin,
19
+ RegressorMixin,
20
+ TransformerMixin,
21
+ _fit_context,
22
+ )
23
+ from ..exceptions import ConvergenceWarning
24
+ from ..utils import check_array, check_consistent_length
25
+ from ..utils._param_validation import Interval, StrOptions
26
+ from ..utils.extmath import svd_flip
27
+ from ..utils.fixes import parse_version, sp_version
28
+ from ..utils.validation import FLOAT_DTYPES, check_is_fitted
29
+
30
+ __all__ = ["PLSCanonical", "PLSRegression", "PLSSVD"]
31
+
32
+
33
+ if sp_version >= parse_version("1.7"):
34
+ # Starting in scipy 1.7 pinv2 was deprecated in favor of pinv.
35
+ # pinv now uses the svd to compute the pseudo-inverse.
36
+ from scipy.linalg import pinv as pinv2
37
+ else:
38
+ from scipy.linalg import pinv2
39
+
40
+
41
+ def _pinv2_old(a):
42
+ # Used previous scipy pinv2 that was updated in:
43
+ # https://github.com/scipy/scipy/pull/10067
44
+ # We can not set `cond` or `rcond` for pinv2 in scipy >= 1.3 to keep the
45
+ # same behavior of pinv2 for scipy < 1.3, because the condition used to
46
+ # determine the rank is dependent on the output of svd.
47
+ u, s, vh = svd(a, full_matrices=False, check_finite=False)
48
+
49
+ t = u.dtype.char.lower()
50
+ factor = {"f": 1e3, "d": 1e6}
51
+ cond = np.max(s) * factor[t] * np.finfo(t).eps
52
+ rank = np.sum(s > cond)
53
+
54
+ u = u[:, :rank]
55
+ u /= s[:rank]
56
+ return np.transpose(np.conjugate(np.dot(u, vh[:rank])))
57
+
58
+
59
+ def _get_first_singular_vectors_power_method(
60
+ X, Y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False
61
+ ):
62
+ """Return the first left and right singular vectors of X'Y.
63
+
64
+ Provides an alternative to the svd(X'Y) and uses the power method instead.
65
+ With norm_y_weights to True and in mode A, this corresponds to the
66
+ algorithm section 11.3 of the Wegelin's review, except this starts at the
67
+ "update saliences" part.
68
+ """
69
+
70
+ eps = np.finfo(X.dtype).eps
71
+ try:
72
+ y_score = next(col for col in Y.T if np.any(np.abs(col) > eps))
73
+ except StopIteration as e:
74
+ raise StopIteration("Y residual is constant") from e
75
+
76
+ x_weights_old = 100 # init to big value for first convergence check
77
+
78
+ if mode == "B":
79
+ # Precompute pseudo inverse matrices
80
+ # Basically: X_pinv = (X.T X)^-1 X.T
81
+ # Which requires inverting a (n_features, n_features) matrix.
82
+ # As a result, and as detailed in the Wegelin's review, CCA (i.e. mode
83
+ # B) will be unstable if n_features > n_samples or n_targets >
84
+ # n_samples
85
+ X_pinv, Y_pinv = _pinv2_old(X), _pinv2_old(Y)
86
+
87
+ for i in range(max_iter):
88
+ if mode == "B":
89
+ x_weights = np.dot(X_pinv, y_score)
90
+ else:
91
+ x_weights = np.dot(X.T, y_score) / np.dot(y_score, y_score)
92
+
93
+ x_weights /= np.sqrt(np.dot(x_weights, x_weights)) + eps
94
+ x_score = np.dot(X, x_weights)
95
+
96
+ if mode == "B":
97
+ y_weights = np.dot(Y_pinv, x_score)
98
+ else:
99
+ y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
100
+
101
+ if norm_y_weights:
102
+ y_weights /= np.sqrt(np.dot(y_weights, y_weights)) + eps
103
+
104
+ y_score = np.dot(Y, y_weights) / (np.dot(y_weights, y_weights) + eps)
105
+
106
+ x_weights_diff = x_weights - x_weights_old
107
+ if np.dot(x_weights_diff, x_weights_diff) < tol or Y.shape[1] == 1:
108
+ break
109
+ x_weights_old = x_weights
110
+
111
+ n_iter = i + 1
112
+ if n_iter == max_iter:
113
+ warnings.warn("Maximum number of iterations reached", ConvergenceWarning)
114
+
115
+ return x_weights, y_weights, n_iter
116
+
117
+
118
+ def _get_first_singular_vectors_svd(X, Y):
119
+ """Return the first left and right singular vectors of X'Y.
120
+
121
+ Here the whole SVD is computed.
122
+ """
123
+ C = np.dot(X.T, Y)
124
+ U, _, Vt = svd(C, full_matrices=False)
125
+ return U[:, 0], Vt[0, :]
126
+
127
+
128
+ def _center_scale_xy(X, Y, scale=True):
129
+ """Center X, Y and scale if the scale parameter==True
130
+
131
+ Returns
132
+ -------
133
+ X, Y, x_mean, y_mean, x_std, y_std
134
+ """
135
+ # center
136
+ x_mean = X.mean(axis=0)
137
+ X -= x_mean
138
+ y_mean = Y.mean(axis=0)
139
+ Y -= y_mean
140
+ # scale
141
+ if scale:
142
+ x_std = X.std(axis=0, ddof=1)
143
+ x_std[x_std == 0.0] = 1.0
144
+ X /= x_std
145
+ y_std = Y.std(axis=0, ddof=1)
146
+ y_std[y_std == 0.0] = 1.0
147
+ Y /= y_std
148
+ else:
149
+ x_std = np.ones(X.shape[1])
150
+ y_std = np.ones(Y.shape[1])
151
+ return X, Y, x_mean, y_mean, x_std, y_std
152
+
153
+
154
+ def _svd_flip_1d(u, v):
155
+ """Same as svd_flip but works on 1d arrays, and is inplace"""
156
+ # svd_flip would force us to convert to 2d array and would also return 2d
157
+ # arrays. We don't want that.
158
+ biggest_abs_val_idx = np.argmax(np.abs(u))
159
+ sign = np.sign(u[biggest_abs_val_idx])
160
+ u *= sign
161
+ v *= sign
162
+
163
+
164
+ class _PLS(
165
+ ClassNamePrefixFeaturesOutMixin,
166
+ TransformerMixin,
167
+ RegressorMixin,
168
+ MultiOutputMixin,
169
+ BaseEstimator,
170
+ metaclass=ABCMeta,
171
+ ):
172
+ """Partial Least Squares (PLS)
173
+
174
+ This class implements the generic PLS algorithm.
175
+
176
+ Main ref: Wegelin, a survey of Partial Least Squares (PLS) methods,
177
+ with emphasis on the two-block case
178
+ https://stat.uw.edu/sites/default/files/files/reports/2000/tr371.pdf
179
+ """
180
+
181
+ _parameter_constraints: dict = {
182
+ "n_components": [Interval(Integral, 1, None, closed="left")],
183
+ "scale": ["boolean"],
184
+ "deflation_mode": [StrOptions({"regression", "canonical"})],
185
+ "mode": [StrOptions({"A", "B"})],
186
+ "algorithm": [StrOptions({"svd", "nipals"})],
187
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
188
+ "tol": [Interval(Real, 0, None, closed="left")],
189
+ "copy": ["boolean"],
190
+ }
191
+
192
+ @abstractmethod
193
+ def __init__(
194
+ self,
195
+ n_components=2,
196
+ *,
197
+ scale=True,
198
+ deflation_mode="regression",
199
+ mode="A",
200
+ algorithm="nipals",
201
+ max_iter=500,
202
+ tol=1e-06,
203
+ copy=True,
204
+ ):
205
+ self.n_components = n_components
206
+ self.deflation_mode = deflation_mode
207
+ self.mode = mode
208
+ self.scale = scale
209
+ self.algorithm = algorithm
210
+ self.max_iter = max_iter
211
+ self.tol = tol
212
+ self.copy = copy
213
+
214
+ @_fit_context(prefer_skip_nested_validation=True)
215
+ def fit(self, X, Y):
216
+ """Fit model to data.
217
+
218
+ Parameters
219
+ ----------
220
+ X : array-like of shape (n_samples, n_features)
221
+ Training vectors, where `n_samples` is the number of samples and
222
+ `n_features` is the number of predictors.
223
+
224
+ Y : array-like of shape (n_samples,) or (n_samples, n_targets)
225
+ Target vectors, where `n_samples` is the number of samples and
226
+ `n_targets` is the number of response variables.
227
+
228
+ Returns
229
+ -------
230
+ self : object
231
+ Fitted model.
232
+ """
233
+ check_consistent_length(X, Y)
234
+ X = self._validate_data(
235
+ X, dtype=np.float64, copy=self.copy, ensure_min_samples=2
236
+ )
237
+ Y = check_array(
238
+ Y, input_name="Y", dtype=np.float64, copy=self.copy, ensure_2d=False
239
+ )
240
+ if Y.ndim == 1:
241
+ self._predict_1d = True
242
+ Y = Y.reshape(-1, 1)
243
+ else:
244
+ self._predict_1d = False
245
+
246
+ n = X.shape[0]
247
+ p = X.shape[1]
248
+ q = Y.shape[1]
249
+
250
+ n_components = self.n_components
251
+ # With PLSRegression n_components is bounded by the rank of (X.T X) see
252
+ # Wegelin page 25. With CCA and PLSCanonical, n_components is bounded
253
+ # by the rank of X and the rank of Y: see Wegelin page 12
254
+ rank_upper_bound = p if self.deflation_mode == "regression" else min(n, p, q)
255
+ if n_components > rank_upper_bound:
256
+ raise ValueError(
257
+ f"`n_components` upper bound is {rank_upper_bound}. "
258
+ f"Got {n_components} instead. Reduce `n_components`."
259
+ )
260
+
261
+ self._norm_y_weights = self.deflation_mode == "canonical" # 1.1
262
+ norm_y_weights = self._norm_y_weights
263
+
264
+ # Scale (in place)
265
+ Xk, Yk, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy(
266
+ X, Y, self.scale
267
+ )
268
+
269
+ self.x_weights_ = np.zeros((p, n_components)) # U
270
+ self.y_weights_ = np.zeros((q, n_components)) # V
271
+ self._x_scores = np.zeros((n, n_components)) # Xi
272
+ self._y_scores = np.zeros((n, n_components)) # Omega
273
+ self.x_loadings_ = np.zeros((p, n_components)) # Gamma
274
+ self.y_loadings_ = np.zeros((q, n_components)) # Delta
275
+ self.n_iter_ = []
276
+
277
+ # This whole thing corresponds to the algorithm in section 4.1 of the
278
+ # review from Wegelin. See above for a notation mapping from code to
279
+ # paper.
280
+ Y_eps = np.finfo(Yk.dtype).eps
281
+ for k in range(n_components):
282
+ # Find first left and right singular vectors of the X.T.dot(Y)
283
+ # cross-covariance matrix.
284
+ if self.algorithm == "nipals":
285
+ # Replace columns that are all close to zero with zeros
286
+ Yk_mask = np.all(np.abs(Yk) < 10 * Y_eps, axis=0)
287
+ Yk[:, Yk_mask] = 0.0
288
+
289
+ try:
290
+ (
291
+ x_weights,
292
+ y_weights,
293
+ n_iter_,
294
+ ) = _get_first_singular_vectors_power_method(
295
+ Xk,
296
+ Yk,
297
+ mode=self.mode,
298
+ max_iter=self.max_iter,
299
+ tol=self.tol,
300
+ norm_y_weights=norm_y_weights,
301
+ )
302
+ except StopIteration as e:
303
+ if str(e) != "Y residual is constant":
304
+ raise
305
+ warnings.warn(f"Y residual is constant at iteration {k}")
306
+ break
307
+
308
+ self.n_iter_.append(n_iter_)
309
+
310
+ elif self.algorithm == "svd":
311
+ x_weights, y_weights = _get_first_singular_vectors_svd(Xk, Yk)
312
+
313
+ # inplace sign flip for consistency across solvers and archs
314
+ _svd_flip_1d(x_weights, y_weights)
315
+
316
+ # compute scores, i.e. the projections of X and Y
317
+ x_scores = np.dot(Xk, x_weights)
318
+ if norm_y_weights:
319
+ y_ss = 1
320
+ else:
321
+ y_ss = np.dot(y_weights, y_weights)
322
+ y_scores = np.dot(Yk, y_weights) / y_ss
323
+
324
+ # Deflation: subtract rank-one approx to obtain Xk+1 and Yk+1
325
+ x_loadings = np.dot(x_scores, Xk) / np.dot(x_scores, x_scores)
326
+ Xk -= np.outer(x_scores, x_loadings)
327
+
328
+ if self.deflation_mode == "canonical":
329
+ # regress Yk on y_score
330
+ y_loadings = np.dot(y_scores, Yk) / np.dot(y_scores, y_scores)
331
+ Yk -= np.outer(y_scores, y_loadings)
332
+ if self.deflation_mode == "regression":
333
+ # regress Yk on x_score
334
+ y_loadings = np.dot(x_scores, Yk) / np.dot(x_scores, x_scores)
335
+ Yk -= np.outer(x_scores, y_loadings)
336
+
337
+ self.x_weights_[:, k] = x_weights
338
+ self.y_weights_[:, k] = y_weights
339
+ self._x_scores[:, k] = x_scores
340
+ self._y_scores[:, k] = y_scores
341
+ self.x_loadings_[:, k] = x_loadings
342
+ self.y_loadings_[:, k] = y_loadings
343
+
344
+ # X was approximated as Xi . Gamma.T + X_(R+1)
345
+ # Xi . Gamma.T is a sum of n_components rank-1 matrices. X_(R+1) is
346
+ # whatever is left to fully reconstruct X, and can be 0 if X is of rank
347
+ # n_components.
348
+ # Similarly, Y was approximated as Omega . Delta.T + Y_(R+1)
349
+
350
+ # Compute transformation matrices (rotations_). See User Guide.
351
+ self.x_rotations_ = np.dot(
352
+ self.x_weights_,
353
+ pinv2(np.dot(self.x_loadings_.T, self.x_weights_), check_finite=False),
354
+ )
355
+ self.y_rotations_ = np.dot(
356
+ self.y_weights_,
357
+ pinv2(np.dot(self.y_loadings_.T, self.y_weights_), check_finite=False),
358
+ )
359
+ self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
360
+ self.coef_ = (self.coef_ * self._y_std).T
361
+ self.intercept_ = self._y_mean
362
+ self._n_features_out = self.x_rotations_.shape[1]
363
+ return self
364
+
365
+ def transform(self, X, Y=None, copy=True):
366
+ """Apply the dimension reduction.
367
+
368
+ Parameters
369
+ ----------
370
+ X : array-like of shape (n_samples, n_features)
371
+ Samples to transform.
372
+
373
+ Y : array-like of shape (n_samples, n_targets), default=None
374
+ Target vectors.
375
+
376
+ copy : bool, default=True
377
+ Whether to copy `X` and `Y`, or perform in-place normalization.
378
+
379
+ Returns
380
+ -------
381
+ x_scores, y_scores : array-like or tuple of array-like
382
+ Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
383
+ """
384
+ check_is_fitted(self)
385
+ X = self._validate_data(X, copy=copy, dtype=FLOAT_DTYPES, reset=False)
386
+ # Normalize
387
+ X -= self._x_mean
388
+ X /= self._x_std
389
+ # Apply rotation
390
+ x_scores = np.dot(X, self.x_rotations_)
391
+ if Y is not None:
392
+ Y = check_array(
393
+ Y, input_name="Y", ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES
394
+ )
395
+ if Y.ndim == 1:
396
+ Y = Y.reshape(-1, 1)
397
+ Y -= self._y_mean
398
+ Y /= self._y_std
399
+ y_scores = np.dot(Y, self.y_rotations_)
400
+ return x_scores, y_scores
401
+
402
+ return x_scores
403
+
404
+ def inverse_transform(self, X, Y=None):
405
+ """Transform data back to its original space.
406
+
407
+ Parameters
408
+ ----------
409
+ X : array-like of shape (n_samples, n_components)
410
+ New data, where `n_samples` is the number of samples
411
+ and `n_components` is the number of pls components.
412
+
413
+ Y : array-like of shape (n_samples, n_components)
414
+ New target, where `n_samples` is the number of samples
415
+ and `n_components` is the number of pls components.
416
+
417
+ Returns
418
+ -------
419
+ X_reconstructed : ndarray of shape (n_samples, n_features)
420
+ Return the reconstructed `X` data.
421
+
422
+ Y_reconstructed : ndarray of shape (n_samples, n_targets)
423
+ Return the reconstructed `X` target. Only returned when `Y` is given.
424
+
425
+ Notes
426
+ -----
427
+ This transformation will only be exact if `n_components=n_features`.
428
+ """
429
+ check_is_fitted(self)
430
+ X = check_array(X, input_name="X", dtype=FLOAT_DTYPES)
431
+ # From pls space to original space
432
+ X_reconstructed = np.matmul(X, self.x_loadings_.T)
433
+ # Denormalize
434
+ X_reconstructed *= self._x_std
435
+ X_reconstructed += self._x_mean
436
+
437
+ if Y is not None:
438
+ Y = check_array(Y, input_name="Y", dtype=FLOAT_DTYPES)
439
+ # From pls space to original space
440
+ Y_reconstructed = np.matmul(Y, self.y_loadings_.T)
441
+ # Denormalize
442
+ Y_reconstructed *= self._y_std
443
+ Y_reconstructed += self._y_mean
444
+ return X_reconstructed, Y_reconstructed
445
+
446
+ return X_reconstructed
447
+
448
+ def predict(self, X, copy=True):
449
+ """Predict targets of given samples.
450
+
451
+ Parameters
452
+ ----------
453
+ X : array-like of shape (n_samples, n_features)
454
+ Samples.
455
+
456
+ copy : bool, default=True
457
+ Whether to copy `X` and `Y`, or perform in-place normalization.
458
+
459
+ Returns
460
+ -------
461
+ y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets)
462
+ Returns predicted values.
463
+
464
+ Notes
465
+ -----
466
+ This call requires the estimation of a matrix of shape
467
+ `(n_features, n_targets)`, which may be an issue in high dimensional
468
+ space.
469
+ """
470
+ check_is_fitted(self)
471
+ X = self._validate_data(X, copy=copy, dtype=FLOAT_DTYPES, reset=False)
472
+ # Normalize
473
+ X -= self._x_mean
474
+ X /= self._x_std
475
+ Ypred = X @ self.coef_.T + self.intercept_
476
+ return Ypred.ravel() if self._predict_1d else Ypred
477
+
478
+ def fit_transform(self, X, y=None):
479
+ """Learn and apply the dimension reduction on the train data.
480
+
481
+ Parameters
482
+ ----------
483
+ X : array-like of shape (n_samples, n_features)
484
+ Training vectors, where `n_samples` is the number of samples and
485
+ `n_features` is the number of predictors.
486
+
487
+ y : array-like of shape (n_samples, n_targets), default=None
488
+ Target vectors, where `n_samples` is the number of samples and
489
+ `n_targets` is the number of response variables.
490
+
491
+ Returns
492
+ -------
493
+ self : ndarray of shape (n_samples, n_components)
494
+ Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
495
+ """
496
+ return self.fit(X, y).transform(X, y)
497
+
498
+ def _more_tags(self):
499
+ return {"poor_score": True, "requires_y": False}
500
+
501
+
502
+ class PLSRegression(_PLS):
503
+ """PLS regression.
504
+
505
+ PLSRegression is also known as PLS2 or PLS1, depending on the number of
506
+ targets.
507
+
508
+ For a comparison between other cross decomposition algorithms, see
509
+ :ref:`sphx_glr_auto_examples_cross_decomposition_plot_compare_cross_decomposition.py`.
510
+
511
+ Read more in the :ref:`User Guide <cross_decomposition>`.
512
+
513
+ .. versionadded:: 0.8
514
+
515
+ Parameters
516
+ ----------
517
+ n_components : int, default=2
518
+ Number of components to keep. Should be in `[1, min(n_samples,
519
+ n_features, n_targets)]`.
520
+
521
+ scale : bool, default=True
522
+ Whether to scale `X` and `Y`.
523
+
524
+ max_iter : int, default=500
525
+ The maximum number of iterations of the power method when
526
+ `algorithm='nipals'`. Ignored otherwise.
527
+
528
+ tol : float, default=1e-06
529
+ The tolerance used as convergence criteria in the power method: the
530
+ algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
531
+ than `tol`, where `u` corresponds to the left singular vector.
532
+
533
+ copy : bool, default=True
534
+ Whether to copy `X` and `Y` in :term:`fit` before applying centering,
535
+ and potentially scaling. If `False`, these operations will be done
536
+ inplace, modifying both arrays.
537
+
538
+ Attributes
539
+ ----------
540
+ x_weights_ : ndarray of shape (n_features, n_components)
541
+ The left singular vectors of the cross-covariance matrices of each
542
+ iteration.
543
+
544
+ y_weights_ : ndarray of shape (n_targets, n_components)
545
+ The right singular vectors of the cross-covariance matrices of each
546
+ iteration.
547
+
548
+ x_loadings_ : ndarray of shape (n_features, n_components)
549
+ The loadings of `X`.
550
+
551
+ y_loadings_ : ndarray of shape (n_targets, n_components)
552
+ The loadings of `Y`.
553
+
554
+ x_scores_ : ndarray of shape (n_samples, n_components)
555
+ The transformed training samples.
556
+
557
+ y_scores_ : ndarray of shape (n_samples, n_components)
558
+ The transformed training targets.
559
+
560
+ x_rotations_ : ndarray of shape (n_features, n_components)
561
+ The projection matrix used to transform `X`.
562
+
563
+ y_rotations_ : ndarray of shape (n_targets, n_components)
564
+ The projection matrix used to transform `Y`.
565
+
566
+ coef_ : ndarray of shape (n_target, n_features)
567
+ The coefficients of the linear model such that `Y` is approximated as
568
+ `Y = X @ coef_.T + intercept_`.
569
+
570
+ intercept_ : ndarray of shape (n_targets,)
571
+ The intercepts of the linear model such that `Y` is approximated as
572
+ `Y = X @ coef_.T + intercept_`.
573
+
574
+ .. versionadded:: 1.1
575
+
576
+ n_iter_ : list of shape (n_components,)
577
+ Number of iterations of the power method, for each
578
+ component.
579
+
580
+ n_features_in_ : int
581
+ Number of features seen during :term:`fit`.
582
+
583
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
584
+ Names of features seen during :term:`fit`. Defined only when `X`
585
+ has feature names that are all strings.
586
+
587
+ .. versionadded:: 1.0
588
+
589
+ See Also
590
+ --------
591
+ PLSCanonical : Partial Least Squares transformer and regressor.
592
+
593
+ Examples
594
+ --------
595
+ >>> from sklearn.cross_decomposition import PLSRegression
596
+ >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
597
+ >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
598
+ >>> pls2 = PLSRegression(n_components=2)
599
+ >>> pls2.fit(X, Y)
600
+ PLSRegression()
601
+ >>> Y_pred = pls2.predict(X)
602
+
603
+ For a comparison between PLS Regression and :class:`~sklearn.decomposition.PCA`, see
604
+ :ref:`sphx_glr_auto_examples_cross_decomposition_plot_pcr_vs_pls.py`.
605
+ """
606
+
607
+ _parameter_constraints: dict = {**_PLS._parameter_constraints}
608
+ for param in ("deflation_mode", "mode", "algorithm"):
609
+ _parameter_constraints.pop(param)
610
+
611
+ # This implementation provides the same results that 3 PLS packages
612
+ # provided in the R language (R-project):
613
+ # - "mixOmics" with function pls(X, Y, mode = "regression")
614
+ # - "plspm " with function plsreg2(X, Y)
615
+ # - "pls" with function oscorespls.fit(X, Y)
616
+
617
+ def __init__(
618
+ self, n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True
619
+ ):
620
+ super().__init__(
621
+ n_components=n_components,
622
+ scale=scale,
623
+ deflation_mode="regression",
624
+ mode="A",
625
+ algorithm="nipals",
626
+ max_iter=max_iter,
627
+ tol=tol,
628
+ copy=copy,
629
+ )
630
+
631
+ def fit(self, X, Y):
632
+ """Fit model to data.
633
+
634
+ Parameters
635
+ ----------
636
+ X : array-like of shape (n_samples, n_features)
637
+ Training vectors, where `n_samples` is the number of samples and
638
+ `n_features` is the number of predictors.
639
+
640
+ Y : array-like of shape (n_samples,) or (n_samples, n_targets)
641
+ Target vectors, where `n_samples` is the number of samples and
642
+ `n_targets` is the number of response variables.
643
+
644
+ Returns
645
+ -------
646
+ self : object
647
+ Fitted model.
648
+ """
649
+ super().fit(X, Y)
650
+ # expose the fitted attributes `x_scores_` and `y_scores_`
651
+ self.x_scores_ = self._x_scores
652
+ self.y_scores_ = self._y_scores
653
+ return self
654
+
655
+
656
+ class PLSCanonical(_PLS):
657
+ """Partial Least Squares transformer and regressor.
658
+
659
+ For a comparison between other cross decomposition algorithms, see
660
+ :ref:`sphx_glr_auto_examples_cross_decomposition_plot_compare_cross_decomposition.py`.
661
+
662
+ Read more in the :ref:`User Guide <cross_decomposition>`.
663
+
664
+ .. versionadded:: 0.8
665
+
666
+ Parameters
667
+ ----------
668
+ n_components : int, default=2
669
+ Number of components to keep. Should be in `[1, min(n_samples,
670
+ n_features, n_targets)]`.
671
+
672
+ scale : bool, default=True
673
+ Whether to scale `X` and `Y`.
674
+
675
+ algorithm : {'nipals', 'svd'}, default='nipals'
676
+ The algorithm used to estimate the first singular vectors of the
677
+ cross-covariance matrix. 'nipals' uses the power method while 'svd'
678
+ will compute the whole SVD.
679
+
680
+ max_iter : int, default=500
681
+ The maximum number of iterations of the power method when
682
+ `algorithm='nipals'`. Ignored otherwise.
683
+
684
+ tol : float, default=1e-06
685
+ The tolerance used as convergence criteria in the power method: the
686
+ algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
687
+ than `tol`, where `u` corresponds to the left singular vector.
688
+
689
+ copy : bool, default=True
690
+ Whether to copy `X` and `Y` in fit before applying centering, and
691
+ potentially scaling. If False, these operations will be done inplace,
692
+ modifying both arrays.
693
+
694
+ Attributes
695
+ ----------
696
+ x_weights_ : ndarray of shape (n_features, n_components)
697
+ The left singular vectors of the cross-covariance matrices of each
698
+ iteration.
699
+
700
+ y_weights_ : ndarray of shape (n_targets, n_components)
701
+ The right singular vectors of the cross-covariance matrices of each
702
+ iteration.
703
+
704
+ x_loadings_ : ndarray of shape (n_features, n_components)
705
+ The loadings of `X`.
706
+
707
+ y_loadings_ : ndarray of shape (n_targets, n_components)
708
+ The loadings of `Y`.
709
+
710
+ x_rotations_ : ndarray of shape (n_features, n_components)
711
+ The projection matrix used to transform `X`.
712
+
713
+ y_rotations_ : ndarray of shape (n_targets, n_components)
714
+ The projection matrix used to transform `Y`.
715
+
716
+ coef_ : ndarray of shape (n_targets, n_features)
717
+ The coefficients of the linear model such that `Y` is approximated as
718
+ `Y = X @ coef_.T + intercept_`.
719
+
720
+ intercept_ : ndarray of shape (n_targets,)
721
+ The intercepts of the linear model such that `Y` is approximated as
722
+ `Y = X @ coef_.T + intercept_`.
723
+
724
+ .. versionadded:: 1.1
725
+
726
+ n_iter_ : list of shape (n_components,)
727
+ Number of iterations of the power method, for each
728
+ component. Empty if `algorithm='svd'`.
729
+
730
+ n_features_in_ : int
731
+ Number of features seen during :term:`fit`.
732
+
733
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
734
+ Names of features seen during :term:`fit`. Defined only when `X`
735
+ has feature names that are all strings.
736
+
737
+ .. versionadded:: 1.0
738
+
739
+ See Also
740
+ --------
741
+ CCA : Canonical Correlation Analysis.
742
+ PLSSVD : Partial Least Square SVD.
743
+
744
+ Examples
745
+ --------
746
+ >>> from sklearn.cross_decomposition import PLSCanonical
747
+ >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
748
+ >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
749
+ >>> plsca = PLSCanonical(n_components=2)
750
+ >>> plsca.fit(X, Y)
751
+ PLSCanonical()
752
+ >>> X_c, Y_c = plsca.transform(X, Y)
753
+ """
754
+
755
+ _parameter_constraints: dict = {**_PLS._parameter_constraints}
756
+ for param in ("deflation_mode", "mode"):
757
+ _parameter_constraints.pop(param)
758
+
759
+ # This implementation provides the same results that the "plspm" package
760
+ # provided in the R language (R-project), using the function plsca(X, Y).
761
+ # Results are equal or collinear with the function
762
+ # ``pls(..., mode = "canonical")`` of the "mixOmics" package. The
763
+ # difference relies in the fact that mixOmics implementation does not
764
+ # exactly implement the Wold algorithm since it does not normalize
765
+ # y_weights to one.
766
+
767
+ def __init__(
768
+ self,
769
+ n_components=2,
770
+ *,
771
+ scale=True,
772
+ algorithm="nipals",
773
+ max_iter=500,
774
+ tol=1e-06,
775
+ copy=True,
776
+ ):
777
+ super().__init__(
778
+ n_components=n_components,
779
+ scale=scale,
780
+ deflation_mode="canonical",
781
+ mode="A",
782
+ algorithm=algorithm,
783
+ max_iter=max_iter,
784
+ tol=tol,
785
+ copy=copy,
786
+ )
787
+
788
+
789
+ class CCA(_PLS):
790
+ """Canonical Correlation Analysis, also known as "Mode B" PLS.
791
+
792
+ For a comparison between other cross decomposition algorithms, see
793
+ :ref:`sphx_glr_auto_examples_cross_decomposition_plot_compare_cross_decomposition.py`.
794
+
795
+ Read more in the :ref:`User Guide <cross_decomposition>`.
796
+
797
+ Parameters
798
+ ----------
799
+ n_components : int, default=2
800
+ Number of components to keep. Should be in `[1, min(n_samples,
801
+ n_features, n_targets)]`.
802
+
803
+ scale : bool, default=True
804
+ Whether to scale `X` and `Y`.
805
+
806
+ max_iter : int, default=500
807
+ The maximum number of iterations of the power method.
808
+
809
+ tol : float, default=1e-06
810
+ The tolerance used as convergence criteria in the power method: the
811
+ algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
812
+ than `tol`, where `u` corresponds to the left singular vector.
813
+
814
+ copy : bool, default=True
815
+ Whether to copy `X` and `Y` in fit before applying centering, and
816
+ potentially scaling. If False, these operations will be done inplace,
817
+ modifying both arrays.
818
+
819
+ Attributes
820
+ ----------
821
+ x_weights_ : ndarray of shape (n_features, n_components)
822
+ The left singular vectors of the cross-covariance matrices of each
823
+ iteration.
824
+
825
+ y_weights_ : ndarray of shape (n_targets, n_components)
826
+ The right singular vectors of the cross-covariance matrices of each
827
+ iteration.
828
+
829
+ x_loadings_ : ndarray of shape (n_features, n_components)
830
+ The loadings of `X`.
831
+
832
+ y_loadings_ : ndarray of shape (n_targets, n_components)
833
+ The loadings of `Y`.
834
+
835
+ x_rotations_ : ndarray of shape (n_features, n_components)
836
+ The projection matrix used to transform `X`.
837
+
838
+ y_rotations_ : ndarray of shape (n_targets, n_components)
839
+ The projection matrix used to transform `Y`.
840
+
841
+ coef_ : ndarray of shape (n_targets, n_features)
842
+ The coefficients of the linear model such that `Y` is approximated as
843
+ `Y = X @ coef_.T + intercept_`.
844
+
845
+ intercept_ : ndarray of shape (n_targets,)
846
+ The intercepts of the linear model such that `Y` is approximated as
847
+ `Y = X @ coef_.T + intercept_`.
848
+
849
+ .. versionadded:: 1.1
850
+
851
+ n_iter_ : list of shape (n_components,)
852
+ Number of iterations of the power method, for each
853
+ component.
854
+
855
+ n_features_in_ : int
856
+ Number of features seen during :term:`fit`.
857
+
858
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
859
+ Names of features seen during :term:`fit`. Defined only when `X`
860
+ has feature names that are all strings.
861
+
862
+ .. versionadded:: 1.0
863
+
864
+ See Also
865
+ --------
866
+ PLSCanonical : Partial Least Squares transformer and regressor.
867
+ PLSSVD : Partial Least Square SVD.
868
+
869
+ Examples
870
+ --------
871
+ >>> from sklearn.cross_decomposition import CCA
872
+ >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
873
+ >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
874
+ >>> cca = CCA(n_components=1)
875
+ >>> cca.fit(X, Y)
876
+ CCA(n_components=1)
877
+ >>> X_c, Y_c = cca.transform(X, Y)
878
+ """
879
+
880
+ _parameter_constraints: dict = {**_PLS._parameter_constraints}
881
+ for param in ("deflation_mode", "mode", "algorithm"):
882
+ _parameter_constraints.pop(param)
883
+
884
+ def __init__(
885
+ self, n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True
886
+ ):
887
+ super().__init__(
888
+ n_components=n_components,
889
+ scale=scale,
890
+ deflation_mode="canonical",
891
+ mode="B",
892
+ algorithm="nipals",
893
+ max_iter=max_iter,
894
+ tol=tol,
895
+ copy=copy,
896
+ )
897
+
898
+
899
+ class PLSSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
900
+ """Partial Least Square SVD.
901
+
902
+ This transformer simply performs a SVD on the cross-covariance matrix
903
+ `X'Y`. It is able to project both the training data `X` and the targets
904
+ `Y`. The training data `X` is projected on the left singular vectors, while
905
+ the targets are projected on the right singular vectors.
906
+
907
+ Read more in the :ref:`User Guide <cross_decomposition>`.
908
+
909
+ .. versionadded:: 0.8
910
+
911
+ Parameters
912
+ ----------
913
+ n_components : int, default=2
914
+ The number of components to keep. Should be in `[1,
915
+ min(n_samples, n_features, n_targets)]`.
916
+
917
+ scale : bool, default=True
918
+ Whether to scale `X` and `Y`.
919
+
920
+ copy : bool, default=True
921
+ Whether to copy `X` and `Y` in fit before applying centering, and
922
+ potentially scaling. If `False`, these operations will be done inplace,
923
+ modifying both arrays.
924
+
925
+ Attributes
926
+ ----------
927
+ x_weights_ : ndarray of shape (n_features, n_components)
928
+ The left singular vectors of the SVD of the cross-covariance matrix.
929
+ Used to project `X` in :meth:`transform`.
930
+
931
+ y_weights_ : ndarray of (n_targets, n_components)
932
+ The right singular vectors of the SVD of the cross-covariance matrix.
933
+ Used to project `X` in :meth:`transform`.
934
+
935
+ n_features_in_ : int
936
+ Number of features seen during :term:`fit`.
937
+
938
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
939
+ Names of features seen during :term:`fit`. Defined only when `X`
940
+ has feature names that are all strings.
941
+
942
+ .. versionadded:: 1.0
943
+
944
+ See Also
945
+ --------
946
+ PLSCanonical : Partial Least Squares transformer and regressor.
947
+ CCA : Canonical Correlation Analysis.
948
+
949
+ Examples
950
+ --------
951
+ >>> import numpy as np
952
+ >>> from sklearn.cross_decomposition import PLSSVD
953
+ >>> X = np.array([[0., 0., 1.],
954
+ ... [1., 0., 0.],
955
+ ... [2., 2., 2.],
956
+ ... [2., 5., 4.]])
957
+ >>> Y = np.array([[0.1, -0.2],
958
+ ... [0.9, 1.1],
959
+ ... [6.2, 5.9],
960
+ ... [11.9, 12.3]])
961
+ >>> pls = PLSSVD(n_components=2).fit(X, Y)
962
+ >>> X_c, Y_c = pls.transform(X, Y)
963
+ >>> X_c.shape, Y_c.shape
964
+ ((4, 2), (4, 2))
965
+ """
966
+
967
+ _parameter_constraints: dict = {
968
+ "n_components": [Interval(Integral, 1, None, closed="left")],
969
+ "scale": ["boolean"],
970
+ "copy": ["boolean"],
971
+ }
972
+
973
+ def __init__(self, n_components=2, *, scale=True, copy=True):
974
+ self.n_components = n_components
975
+ self.scale = scale
976
+ self.copy = copy
977
+
978
+ @_fit_context(prefer_skip_nested_validation=True)
979
+ def fit(self, X, Y):
980
+ """Fit model to data.
981
+
982
+ Parameters
983
+ ----------
984
+ X : array-like of shape (n_samples, n_features)
985
+ Training samples.
986
+
987
+ Y : array-like of shape (n_samples,) or (n_samples, n_targets)
988
+ Targets.
989
+
990
+ Returns
991
+ -------
992
+ self : object
993
+ Fitted estimator.
994
+ """
995
+ check_consistent_length(X, Y)
996
+ X = self._validate_data(
997
+ X, dtype=np.float64, copy=self.copy, ensure_min_samples=2
998
+ )
999
+ Y = check_array(
1000
+ Y, input_name="Y", dtype=np.float64, copy=self.copy, ensure_2d=False
1001
+ )
1002
+ if Y.ndim == 1:
1003
+ Y = Y.reshape(-1, 1)
1004
+
1005
+ # we'll compute the SVD of the cross-covariance matrix = X.T.dot(Y)
1006
+ # This matrix rank is at most min(n_samples, n_features, n_targets) so
1007
+ # n_components cannot be bigger than that.
1008
+ n_components = self.n_components
1009
+ rank_upper_bound = min(X.shape[0], X.shape[1], Y.shape[1])
1010
+ if n_components > rank_upper_bound:
1011
+ raise ValueError(
1012
+ f"`n_components` upper bound is {rank_upper_bound}. "
1013
+ f"Got {n_components} instead. Reduce `n_components`."
1014
+ )
1015
+
1016
+ X, Y, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy(
1017
+ X, Y, self.scale
1018
+ )
1019
+
1020
+ # Compute SVD of cross-covariance matrix
1021
+ C = np.dot(X.T, Y)
1022
+ U, s, Vt = svd(C, full_matrices=False)
1023
+ U = U[:, :n_components]
1024
+ Vt = Vt[:n_components]
1025
+ U, Vt = svd_flip(U, Vt)
1026
+ V = Vt.T
1027
+
1028
+ self.x_weights_ = U
1029
+ self.y_weights_ = V
1030
+ self._n_features_out = self.x_weights_.shape[1]
1031
+ return self
1032
+
1033
+ def transform(self, X, Y=None):
1034
+ """
1035
+ Apply the dimensionality reduction.
1036
+
1037
+ Parameters
1038
+ ----------
1039
+ X : array-like of shape (n_samples, n_features)
1040
+ Samples to be transformed.
1041
+
1042
+ Y : array-like of shape (n_samples,) or (n_samples, n_targets), \
1043
+ default=None
1044
+ Targets.
1045
+
1046
+ Returns
1047
+ -------
1048
+ x_scores : array-like or tuple of array-like
1049
+ The transformed data `X_transformed` if `Y is not None`,
1050
+ `(X_transformed, Y_transformed)` otherwise.
1051
+ """
1052
+ check_is_fitted(self)
1053
+ X = self._validate_data(X, dtype=np.float64, reset=False)
1054
+ Xr = (X - self._x_mean) / self._x_std
1055
+ x_scores = np.dot(Xr, self.x_weights_)
1056
+ if Y is not None:
1057
+ Y = check_array(Y, input_name="Y", ensure_2d=False, dtype=np.float64)
1058
+ if Y.ndim == 1:
1059
+ Y = Y.reshape(-1, 1)
1060
+ Yr = (Y - self._y_mean) / self._y_std
1061
+ y_scores = np.dot(Yr, self.y_weights_)
1062
+ return x_scores, y_scores
1063
+ return x_scores
1064
+
1065
+ def fit_transform(self, X, y=None):
1066
+ """Learn and apply the dimensionality reduction.
1067
+
1068
+ Parameters
1069
+ ----------
1070
+ X : array-like of shape (n_samples, n_features)
1071
+ Training samples.
1072
+
1073
+ y : array-like of shape (n_samples,) or (n_samples, n_targets), \
1074
+ default=None
1075
+ Targets.
1076
+
1077
+ Returns
1078
+ -------
1079
+ out : array-like or tuple of array-like
1080
+ The transformed data `X_transformed` if `Y is not None`,
1081
+ `(X_transformed, Y_transformed)` otherwise.
1082
+ """
1083
+ return self.fit(X, y).transform(X, y)
env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/__pycache__/test_pls.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cross_decomposition/tests/test_pls.py ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+ import pytest
5
+ from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
6
+
7
+ from sklearn.cross_decomposition import CCA, PLSSVD, PLSCanonical, PLSRegression
8
+ from sklearn.cross_decomposition._pls import (
9
+ _center_scale_xy,
10
+ _get_first_singular_vectors_power_method,
11
+ _get_first_singular_vectors_svd,
12
+ _svd_flip_1d,
13
+ )
14
+ from sklearn.datasets import load_linnerud, make_regression
15
+ from sklearn.ensemble import VotingRegressor
16
+ from sklearn.exceptions import ConvergenceWarning
17
+ from sklearn.linear_model import LinearRegression
18
+ from sklearn.utils import check_random_state
19
+ from sklearn.utils.extmath import svd_flip
20
+
21
+
22
+ def assert_matrix_orthogonal(M):
23
+ K = np.dot(M.T, M)
24
+ assert_array_almost_equal(K, np.diag(np.diag(K)))
25
+
26
+
27
+ def test_pls_canonical_basics():
28
+ # Basic checks for PLSCanonical
29
+ d = load_linnerud()
30
+ X = d.data
31
+ Y = d.target
32
+
33
+ pls = PLSCanonical(n_components=X.shape[1])
34
+ pls.fit(X, Y)
35
+
36
+ assert_matrix_orthogonal(pls.x_weights_)
37
+ assert_matrix_orthogonal(pls.y_weights_)
38
+ assert_matrix_orthogonal(pls._x_scores)
39
+ assert_matrix_orthogonal(pls._y_scores)
40
+
41
+ # Check X = TP' and Y = UQ'
42
+ T = pls._x_scores
43
+ P = pls.x_loadings_
44
+ U = pls._y_scores
45
+ Q = pls.y_loadings_
46
+ # Need to scale first
47
+ Xc, Yc, x_mean, y_mean, x_std, y_std = _center_scale_xy(
48
+ X.copy(), Y.copy(), scale=True
49
+ )
50
+ assert_array_almost_equal(Xc, np.dot(T, P.T))
51
+ assert_array_almost_equal(Yc, np.dot(U, Q.T))
52
+
53
+ # Check that rotations on training data lead to scores
54
+ Xt = pls.transform(X)
55
+ assert_array_almost_equal(Xt, pls._x_scores)
56
+ Xt, Yt = pls.transform(X, Y)
57
+ assert_array_almost_equal(Xt, pls._x_scores)
58
+ assert_array_almost_equal(Yt, pls._y_scores)
59
+
60
+ # Check that inverse_transform works
61
+ X_back = pls.inverse_transform(Xt)
62
+ assert_array_almost_equal(X_back, X)
63
+ _, Y_back = pls.inverse_transform(Xt, Yt)
64
+ assert_array_almost_equal(Y_back, Y)
65
+
66
+
67
+ def test_sanity_check_pls_regression():
68
+ # Sanity check for PLSRegression
69
+ # The results were checked against the R-packages plspm, misOmics and pls
70
+
71
+ d = load_linnerud()
72
+ X = d.data
73
+ Y = d.target
74
+
75
+ pls = PLSRegression(n_components=X.shape[1])
76
+ X_trans, _ = pls.fit_transform(X, Y)
77
+
78
+ # FIXME: one would expect y_trans == pls.y_scores_ but this is not
79
+ # the case.
80
+ # xref: https://github.com/scikit-learn/scikit-learn/issues/22420
81
+ assert_allclose(X_trans, pls.x_scores_)
82
+
83
+ expected_x_weights = np.array(
84
+ [
85
+ [-0.61330704, -0.00443647, 0.78983213],
86
+ [-0.74697144, -0.32172099, -0.58183269],
87
+ [-0.25668686, 0.94682413, -0.19399983],
88
+ ]
89
+ )
90
+
91
+ expected_x_loadings = np.array(
92
+ [
93
+ [-0.61470416, -0.24574278, 0.78983213],
94
+ [-0.65625755, -0.14396183, -0.58183269],
95
+ [-0.51733059, 1.00609417, -0.19399983],
96
+ ]
97
+ )
98
+
99
+ expected_y_weights = np.array(
100
+ [
101
+ [+0.32456184, 0.29892183, 0.20316322],
102
+ [+0.42439636, 0.61970543, 0.19320542],
103
+ [-0.13143144, -0.26348971, -0.17092916],
104
+ ]
105
+ )
106
+
107
+ expected_y_loadings = np.array(
108
+ [
109
+ [+0.32456184, 0.29892183, 0.20316322],
110
+ [+0.42439636, 0.61970543, 0.19320542],
111
+ [-0.13143144, -0.26348971, -0.17092916],
112
+ ]
113
+ )
114
+
115
+ assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))
116
+ assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
117
+ assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
118
+ assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
119
+
120
+ # The R / Python difference in the signs should be consistent across
121
+ # loadings, weights, etc.
122
+ x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
123
+ x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
124
+ y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
125
+ y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
126
+ assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
127
+ assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
128
+
129
+
130
+ def test_sanity_check_pls_regression_constant_column_Y():
131
+ # Check behavior when the first column of Y is constant
132
+ # The results are checked against a modified version of plsreg2
133
+ # from the R-package plsdepot
134
+ d = load_linnerud()
135
+ X = d.data
136
+ Y = d.target
137
+ Y[:, 0] = 1
138
+ pls = PLSRegression(n_components=X.shape[1])
139
+ pls.fit(X, Y)
140
+
141
+ expected_x_weights = np.array(
142
+ [
143
+ [-0.6273573, 0.007081799, 0.7786994],
144
+ [-0.7493417, -0.277612681, -0.6011807],
145
+ [-0.2119194, 0.960666981, -0.1794690],
146
+ ]
147
+ )
148
+
149
+ expected_x_loadings = np.array(
150
+ [
151
+ [-0.6273512, -0.22464538, 0.7786994],
152
+ [-0.6643156, -0.09871193, -0.6011807],
153
+ [-0.5125877, 1.01407380, -0.1794690],
154
+ ]
155
+ )
156
+
157
+ expected_y_loadings = np.array(
158
+ [
159
+ [0.0000000, 0.0000000, 0.0000000],
160
+ [0.4357300, 0.5828479, 0.2174802],
161
+ [-0.1353739, -0.2486423, -0.1810386],
162
+ ]
163
+ )
164
+
165
+ assert_array_almost_equal(np.abs(expected_x_weights), np.abs(pls.x_weights_))
166
+ assert_array_almost_equal(np.abs(expected_x_loadings), np.abs(pls.x_loadings_))
167
+ # For the PLSRegression with default parameters, y_loadings == y_weights
168
+ assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
169
+ assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_loadings))
170
+
171
+ x_loadings_sign_flip = np.sign(expected_x_loadings / pls.x_loadings_)
172
+ x_weights_sign_flip = np.sign(expected_x_weights / pls.x_weights_)
173
+ # we ignore the first full-zeros row for y
174
+ y_loadings_sign_flip = np.sign(expected_y_loadings[1:] / pls.y_loadings_[1:])
175
+
176
+ assert_array_equal(x_loadings_sign_flip, x_weights_sign_flip)
177
+ assert_array_equal(x_loadings_sign_flip[1:], y_loadings_sign_flip)
178
+
179
+
180
+ def test_sanity_check_pls_canonical():
181
+ # Sanity check for PLSCanonical
182
+ # The results were checked against the R-package plspm
183
+
184
+ d = load_linnerud()
185
+ X = d.data
186
+ Y = d.target
187
+
188
+ pls = PLSCanonical(n_components=X.shape[1])
189
+ pls.fit(X, Y)
190
+
191
+ expected_x_weights = np.array(
192
+ [
193
+ [-0.61330704, 0.25616119, -0.74715187],
194
+ [-0.74697144, 0.11930791, 0.65406368],
195
+ [-0.25668686, -0.95924297, -0.11817271],
196
+ ]
197
+ )
198
+
199
+ expected_x_rotations = np.array(
200
+ [
201
+ [-0.61330704, 0.41591889, -0.62297525],
202
+ [-0.74697144, 0.31388326, 0.77368233],
203
+ [-0.25668686, -0.89237972, -0.24121788],
204
+ ]
205
+ )
206
+
207
+ expected_y_weights = np.array(
208
+ [
209
+ [+0.58989127, 0.7890047, 0.1717553],
210
+ [+0.77134053, -0.61351791, 0.16920272],
211
+ [-0.23887670, -0.03267062, 0.97050016],
212
+ ]
213
+ )
214
+
215
+ expected_y_rotations = np.array(
216
+ [
217
+ [+0.58989127, 0.7168115, 0.30665872],
218
+ [+0.77134053, -0.70791757, 0.19786539],
219
+ [-0.23887670, -0.00343595, 0.94162826],
220
+ ]
221
+ )
222
+
223
+ assert_array_almost_equal(np.abs(pls.x_rotations_), np.abs(expected_x_rotations))
224
+ assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
225
+ assert_array_almost_equal(np.abs(pls.y_rotations_), np.abs(expected_y_rotations))
226
+ assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
227
+
228
+ x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations)
229
+ x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
230
+ y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)
231
+ y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
232
+ assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
233
+ assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
234
+
235
+ assert_matrix_orthogonal(pls.x_weights_)
236
+ assert_matrix_orthogonal(pls.y_weights_)
237
+
238
+ assert_matrix_orthogonal(pls._x_scores)
239
+ assert_matrix_orthogonal(pls._y_scores)
240
+
241
+
242
+ def test_sanity_check_pls_canonical_random():
243
+ # Sanity check for PLSCanonical on random data
244
+ # The results were checked against the R-package plspm
245
+ n = 500
246
+ p_noise = 10
247
+ q_noise = 5
248
+ # 2 latents vars:
249
+ rng = check_random_state(11)
250
+ l1 = rng.normal(size=n)
251
+ l2 = rng.normal(size=n)
252
+ latents = np.array([l1, l1, l2, l2]).T
253
+ X = latents + rng.normal(size=4 * n).reshape((n, 4))
254
+ Y = latents + rng.normal(size=4 * n).reshape((n, 4))
255
+ X = np.concatenate((X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
256
+ Y = np.concatenate((Y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
257
+
258
+ pls = PLSCanonical(n_components=3)
259
+ pls.fit(X, Y)
260
+
261
+ expected_x_weights = np.array(
262
+ [
263
+ [0.65803719, 0.19197924, 0.21769083],
264
+ [0.7009113, 0.13303969, -0.15376699],
265
+ [0.13528197, -0.68636408, 0.13856546],
266
+ [0.16854574, -0.66788088, -0.12485304],
267
+ [-0.03232333, -0.04189855, 0.40690153],
268
+ [0.1148816, -0.09643158, 0.1613305],
269
+ [0.04792138, -0.02384992, 0.17175319],
270
+ [-0.06781, -0.01666137, -0.18556747],
271
+ [-0.00266945, -0.00160224, 0.11893098],
272
+ [-0.00849528, -0.07706095, 0.1570547],
273
+ [-0.00949471, -0.02964127, 0.34657036],
274
+ [-0.03572177, 0.0945091, 0.3414855],
275
+ [0.05584937, -0.02028961, -0.57682568],
276
+ [0.05744254, -0.01482333, -0.17431274],
277
+ ]
278
+ )
279
+
280
+ expected_x_loadings = np.array(
281
+ [
282
+ [0.65649254, 0.1847647, 0.15270699],
283
+ [0.67554234, 0.15237508, -0.09182247],
284
+ [0.19219925, -0.67750975, 0.08673128],
285
+ [0.2133631, -0.67034809, -0.08835483],
286
+ [-0.03178912, -0.06668336, 0.43395268],
287
+ [0.15684588, -0.13350241, 0.20578984],
288
+ [0.03337736, -0.03807306, 0.09871553],
289
+ [-0.06199844, 0.01559854, -0.1881785],
290
+ [0.00406146, -0.00587025, 0.16413253],
291
+ [-0.00374239, -0.05848466, 0.19140336],
292
+ [0.00139214, -0.01033161, 0.32239136],
293
+ [-0.05292828, 0.0953533, 0.31916881],
294
+ [0.04031924, -0.01961045, -0.65174036],
295
+ [0.06172484, -0.06597366, -0.1244497],
296
+ ]
297
+ )
298
+
299
+ expected_y_weights = np.array(
300
+ [
301
+ [0.66101097, 0.18672553, 0.22826092],
302
+ [0.69347861, 0.18463471, -0.23995597],
303
+ [0.14462724, -0.66504085, 0.17082434],
304
+ [0.22247955, -0.6932605, -0.09832993],
305
+ [0.07035859, 0.00714283, 0.67810124],
306
+ [0.07765351, -0.0105204, -0.44108074],
307
+ [-0.00917056, 0.04322147, 0.10062478],
308
+ [-0.01909512, 0.06182718, 0.28830475],
309
+ [0.01756709, 0.04797666, 0.32225745],
310
+ ]
311
+ )
312
+
313
+ expected_y_loadings = np.array(
314
+ [
315
+ [0.68568625, 0.1674376, 0.0969508],
316
+ [0.68782064, 0.20375837, -0.1164448],
317
+ [0.11712173, -0.68046903, 0.12001505],
318
+ [0.17860457, -0.6798319, -0.05089681],
319
+ [0.06265739, -0.0277703, 0.74729584],
320
+ [0.0914178, 0.00403751, -0.5135078],
321
+ [-0.02196918, -0.01377169, 0.09564505],
322
+ [-0.03288952, 0.09039729, 0.31858973],
323
+ [0.04287624, 0.05254676, 0.27836841],
324
+ ]
325
+ )
326
+
327
+ assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))
328
+ assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
329
+ assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
330
+ assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
331
+
332
+ x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
333
+ x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
334
+ y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
335
+ y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
336
+ assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
337
+ assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
338
+
339
+ assert_matrix_orthogonal(pls.x_weights_)
340
+ assert_matrix_orthogonal(pls.y_weights_)
341
+
342
+ assert_matrix_orthogonal(pls._x_scores)
343
+ assert_matrix_orthogonal(pls._y_scores)
344
+
345
+
346
+ def test_convergence_fail():
347
+ # Make sure ConvergenceWarning is raised if max_iter is too small
348
+ d = load_linnerud()
349
+ X = d.data
350
+ Y = d.target
351
+ pls_nipals = PLSCanonical(n_components=X.shape[1], max_iter=2)
352
+ with pytest.warns(ConvergenceWarning):
353
+ pls_nipals.fit(X, Y)
354
+
355
+
356
+ @pytest.mark.parametrize("Est", (PLSSVD, PLSRegression, PLSCanonical))
357
+ def test_attibutes_shapes(Est):
358
+ # Make sure attributes are of the correct shape depending on n_components
359
+ d = load_linnerud()
360
+ X = d.data
361
+ Y = d.target
362
+ n_components = 2
363
+ pls = Est(n_components=n_components)
364
+ pls.fit(X, Y)
365
+ assert all(
366
+ attr.shape[1] == n_components for attr in (pls.x_weights_, pls.y_weights_)
367
+ )
368
+
369
+
370
+ @pytest.mark.parametrize("Est", (PLSRegression, PLSCanonical, CCA))
371
+ def test_univariate_equivalence(Est):
372
+ # Ensure 2D Y with 1 column is equivalent to 1D Y
373
+ d = load_linnerud()
374
+ X = d.data
375
+ Y = d.target
376
+
377
+ est = Est(n_components=1)
378
+ one_d_coeff = est.fit(X, Y[:, 0]).coef_
379
+ two_d_coeff = est.fit(X, Y[:, :1]).coef_
380
+
381
+ assert one_d_coeff.shape == two_d_coeff.shape
382
+ assert_array_almost_equal(one_d_coeff, two_d_coeff)
383
+
384
+
385
+ @pytest.mark.parametrize("Est", (PLSRegression, PLSCanonical, CCA, PLSSVD))
386
+ def test_copy(Est):
387
+ # check that the "copy" keyword works
388
+ d = load_linnerud()
389
+ X = d.data
390
+ Y = d.target
391
+ X_orig = X.copy()
392
+
393
+ # copy=True won't modify inplace
394
+ pls = Est(copy=True).fit(X, Y)
395
+ assert_array_equal(X, X_orig)
396
+
397
+ # copy=False will modify inplace
398
+ with pytest.raises(AssertionError):
399
+ Est(copy=False).fit(X, Y)
400
+ assert_array_almost_equal(X, X_orig)
401
+
402
+ if Est is PLSSVD:
403
+ return # PLSSVD does not support copy param in predict or transform
404
+
405
+ X_orig = X.copy()
406
+ with pytest.raises(AssertionError):
407
+ pls.transform(X, Y, copy=False),
408
+ assert_array_almost_equal(X, X_orig)
409
+
410
+ X_orig = X.copy()
411
+ with pytest.raises(AssertionError):
412
+ pls.predict(X, copy=False),
413
+ assert_array_almost_equal(X, X_orig)
414
+
415
+ # Make sure copy=True gives same transform and predictions as predict=False
416
+ assert_array_almost_equal(
417
+ pls.transform(X, Y, copy=True), pls.transform(X.copy(), Y.copy(), copy=False)
418
+ )
419
+ assert_array_almost_equal(
420
+ pls.predict(X, copy=True), pls.predict(X.copy(), copy=False)
421
+ )
422
+
423
+
424
+ def _generate_test_scale_and_stability_datasets():
425
+ """Generate dataset for test_scale_and_stability"""
426
+ # dataset for non-regression 7818
427
+ rng = np.random.RandomState(0)
428
+ n_samples = 1000
429
+ n_targets = 5
430
+ n_features = 10
431
+ Q = rng.randn(n_targets, n_features)
432
+ Y = rng.randn(n_samples, n_targets)
433
+ X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1
434
+ X *= 1000
435
+ yield X, Y
436
+
437
+ # Data set where one of the features is constraint
438
+ X, Y = load_linnerud(return_X_y=True)
439
+ # causes X[:, -1].std() to be zero
440
+ X[:, -1] = 1.0
441
+ yield X, Y
442
+
443
+ X = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [3.0, 5.0, 4.0]])
444
+ Y = np.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]])
445
+ yield X, Y
446
+
447
+ # Seeds that provide a non-regression test for #18746, where CCA fails
448
+ seeds = [530, 741]
449
+ for seed in seeds:
450
+ rng = np.random.RandomState(seed)
451
+ X = rng.randn(4, 3)
452
+ Y = rng.randn(4, 2)
453
+ yield X, Y
454
+
455
+
456
+ @pytest.mark.parametrize("Est", (CCA, PLSCanonical, PLSRegression, PLSSVD))
457
+ @pytest.mark.parametrize("X, Y", _generate_test_scale_and_stability_datasets())
458
+ def test_scale_and_stability(Est, X, Y):
459
+ """scale=True is equivalent to scale=False on centered/scaled data
460
+ This allows to check numerical stability over platforms as well"""
461
+
462
+ X_s, Y_s, *_ = _center_scale_xy(X, Y)
463
+
464
+ X_score, Y_score = Est(scale=True).fit_transform(X, Y)
465
+ X_s_score, Y_s_score = Est(scale=False).fit_transform(X_s, Y_s)
466
+
467
+ assert_allclose(X_s_score, X_score, atol=1e-4)
468
+ assert_allclose(Y_s_score, Y_score, atol=1e-4)
469
+
470
+
471
+ @pytest.mark.parametrize("Estimator", (PLSSVD, PLSRegression, PLSCanonical, CCA))
472
+ def test_n_components_upper_bounds(Estimator):
473
+ """Check the validation of `n_components` upper bounds for `PLS` regressors."""
474
+ rng = np.random.RandomState(0)
475
+ X = rng.randn(10, 5)
476
+ Y = rng.randn(10, 3)
477
+ est = Estimator(n_components=10)
478
+ err_msg = "`n_components` upper bound is .*. Got 10 instead. Reduce `n_components`."
479
+ with pytest.raises(ValueError, match=err_msg):
480
+ est.fit(X, Y)
481
+
482
+
483
+ @pytest.mark.parametrize("n_samples, n_features", [(100, 10), (100, 200)])
484
+ def test_singular_value_helpers(n_samples, n_features, global_random_seed):
485
+ # Make sure SVD and power method give approximately the same results
486
+ X, Y = make_regression(
487
+ n_samples, n_features, n_targets=5, random_state=global_random_seed
488
+ )
489
+ u1, v1, _ = _get_first_singular_vectors_power_method(X, Y, norm_y_weights=True)
490
+ u2, v2 = _get_first_singular_vectors_svd(X, Y)
491
+
492
+ _svd_flip_1d(u1, v1)
493
+ _svd_flip_1d(u2, v2)
494
+
495
+ rtol = 1e-3
496
+ # Setting atol because some coordinates are very close to zero
497
+ assert_allclose(u1, u2, atol=u2.max() * rtol)
498
+ assert_allclose(v1, v2, atol=v2.max() * rtol)
499
+
500
+
501
+ def test_one_component_equivalence(global_random_seed):
502
+ # PLSSVD, PLSRegression and PLSCanonical should all be equivalent when
503
+ # n_components is 1
504
+ X, Y = make_regression(100, 10, n_targets=5, random_state=global_random_seed)
505
+ svd = PLSSVD(n_components=1).fit(X, Y).transform(X)
506
+ reg = PLSRegression(n_components=1).fit(X, Y).transform(X)
507
+ canonical = PLSCanonical(n_components=1).fit(X, Y).transform(X)
508
+
509
+ rtol = 1e-3
510
+ # Setting atol because some entries are very close to zero
511
+ assert_allclose(svd, reg, atol=reg.max() * rtol)
512
+ assert_allclose(svd, canonical, atol=canonical.max() * rtol)
513
+
514
+
515
+ def test_svd_flip_1d():
516
+ # Make sure svd_flip_1d is equivalent to svd_flip
517
+ u = np.array([1, -4, 2])
518
+ v = np.array([1, 2, 3])
519
+
520
+ u_expected, v_expected = svd_flip(u.reshape(-1, 1), v.reshape(1, -1))
521
+ _svd_flip_1d(u, v) # inplace
522
+
523
+ assert_allclose(u, u_expected.ravel())
524
+ assert_allclose(u, [-1, 4, -2])
525
+
526
+ assert_allclose(v, v_expected.ravel())
527
+ assert_allclose(v, [-1, -2, -3])
528
+
529
+
530
+ def test_loadings_converges(global_random_seed):
531
+ """Test that CCA converges. Non-regression test for #19549."""
532
+ X, y = make_regression(
533
+ n_samples=200, n_features=20, n_targets=20, random_state=global_random_seed
534
+ )
535
+
536
+ cca = CCA(n_components=10, max_iter=500)
537
+
538
+ with warnings.catch_warnings():
539
+ warnings.simplefilter("error", ConvergenceWarning)
540
+
541
+ cca.fit(X, y)
542
+
543
+ # Loadings converges to reasonable values
544
+ assert np.all(np.abs(cca.x_loadings_) < 1)
545
+
546
+
547
+ def test_pls_constant_y():
548
+ """Checks warning when y is constant. Non-regression test for #19831"""
549
+ rng = np.random.RandomState(42)
550
+ x = rng.rand(100, 3)
551
+ y = np.zeros(100)
552
+
553
+ pls = PLSRegression()
554
+
555
+ msg = "Y residual is constant at iteration"
556
+ with pytest.warns(UserWarning, match=msg):
557
+ pls.fit(x, y)
558
+
559
+ assert_allclose(pls.x_rotations_, 0)
560
+
561
+
562
+ @pytest.mark.parametrize("PLSEstimator", [PLSRegression, PLSCanonical, CCA])
563
+ def test_pls_coef_shape(PLSEstimator):
564
+ """Check the shape of `coef_` attribute.
565
+
566
+ Non-regression test for:
567
+ https://github.com/scikit-learn/scikit-learn/issues/12410
568
+ """
569
+ d = load_linnerud()
570
+ X = d.data
571
+ Y = d.target
572
+
573
+ pls = PLSEstimator(copy=True).fit(X, Y)
574
+
575
+ n_targets, n_features = Y.shape[1], X.shape[1]
576
+ assert pls.coef_.shape == (n_targets, n_features)
577
+
578
+
579
+ @pytest.mark.parametrize("scale", [True, False])
580
+ @pytest.mark.parametrize("PLSEstimator", [PLSRegression, PLSCanonical, CCA])
581
+ def test_pls_prediction(PLSEstimator, scale):
582
+ """Check the behaviour of the prediction function."""
583
+ d = load_linnerud()
584
+ X = d.data
585
+ Y = d.target
586
+
587
+ pls = PLSEstimator(copy=True, scale=scale).fit(X, Y)
588
+ Y_pred = pls.predict(X, copy=True)
589
+
590
+ y_mean = Y.mean(axis=0)
591
+ X_trans = X - X.mean(axis=0)
592
+ if scale:
593
+ X_trans /= X.std(axis=0, ddof=1)
594
+
595
+ assert_allclose(pls.intercept_, y_mean)
596
+ assert_allclose(Y_pred, X_trans @ pls.coef_.T + pls.intercept_)
597
+
598
+
599
+ @pytest.mark.parametrize("Klass", [CCA, PLSSVD, PLSRegression, PLSCanonical])
600
+ def test_pls_feature_names_out(Klass):
601
+ """Check `get_feature_names_out` cross_decomposition module."""
602
+ X, Y = load_linnerud(return_X_y=True)
603
+
604
+ est = Klass().fit(X, Y)
605
+ names_out = est.get_feature_names_out()
606
+
607
+ class_name_lower = Klass.__name__.lower()
608
+ expected_names_out = np.array(
609
+ [f"{class_name_lower}{i}" for i in range(est.x_weights_.shape[1])],
610
+ dtype=object,
611
+ )
612
+ assert_array_equal(names_out, expected_names_out)
613
+
614
+
615
+ @pytest.mark.parametrize("Klass", [CCA, PLSSVD, PLSRegression, PLSCanonical])
616
+ def test_pls_set_output(Klass):
617
+ """Check `set_output` in cross_decomposition module."""
618
+ pd = pytest.importorskip("pandas")
619
+ X, Y = load_linnerud(return_X_y=True, as_frame=True)
620
+
621
+ est = Klass().set_output(transform="pandas").fit(X, Y)
622
+ X_trans, y_trans = est.transform(X, Y)
623
+ assert isinstance(y_trans, np.ndarray)
624
+ assert isinstance(X_trans, pd.DataFrame)
625
+ assert_array_equal(X_trans.columns, est.get_feature_names_out())
626
+
627
+
628
+ def test_pls_regression_fit_1d_y():
629
+ """Check that when fitting with 1d `y`, prediction should also be 1d.
630
+
631
+ Non-regression test for Issue #26549.
632
+ """
633
+ X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]])
634
+ y = np.array([2, 6, 12, 20, 30, 42])
635
+ expected = y.copy()
636
+
637
+ plsr = PLSRegression().fit(X, y)
638
+ y_pred = plsr.predict(X)
639
+ assert y_pred.shape == expected.shape
640
+
641
+ # Check that it works in VotingRegressor
642
+ lr = LinearRegression().fit(X, y)
643
+ vr = VotingRegressor([("lr", lr), ("plsr", plsr)])
644
+ y_pred = vr.fit(X, y).predict(X)
645
+ assert y_pred.shape == expected.shape
646
+ assert_allclose(y_pred, expected)
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.feature_selection` module implements feature selection
3
+ algorithms. It currently includes univariate filter selection methods and the
4
+ recursive feature elimination algorithm.
5
+ """
6
+
7
+ from ._base import SelectorMixin
8
+ from ._from_model import SelectFromModel
9
+ from ._mutual_info import mutual_info_classif, mutual_info_regression
10
+ from ._rfe import RFE, RFECV
11
+ from ._sequential import SequentialFeatureSelector
12
+ from ._univariate_selection import (
13
+ GenericUnivariateSelect,
14
+ SelectFdr,
15
+ SelectFpr,
16
+ SelectFwe,
17
+ SelectKBest,
18
+ SelectPercentile,
19
+ chi2,
20
+ f_classif,
21
+ f_oneway,
22
+ f_regression,
23
+ r_regression,
24
+ )
25
+ from ._variance_threshold import VarianceThreshold
26
+
27
+ __all__ = [
28
+ "GenericUnivariateSelect",
29
+ "SequentialFeatureSelector",
30
+ "RFE",
31
+ "RFECV",
32
+ "SelectFdr",
33
+ "SelectFpr",
34
+ "SelectFwe",
35
+ "SelectKBest",
36
+ "SelectFromModel",
37
+ "SelectPercentile",
38
+ "VarianceThreshold",
39
+ "chi2",
40
+ "f_classif",
41
+ "f_oneway",
42
+ "f_regression",
43
+ "r_regression",
44
+ "mutual_info_classif",
45
+ "mutual_info_regression",
46
+ "SelectorMixin",
47
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc ADDED
Binary file (8.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc ADDED
Binary file (9.81 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc ADDED
Binary file (38.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc ADDED
Binary file (4.65 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc ADDED
Binary file (6.49 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc ADDED
Binary file (16.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc ADDED
Binary file (8.53 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/__init__.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.linear_model` module implements a variety of linear models.
3
+ """
4
+
5
+ # See http://scikit-learn.sourceforge.net/modules/sgd.html and
6
+ # http://scikit-learn.sourceforge.net/modules/linear_model.html for
7
+ # complete documentation.
8
+
9
+ from ._base import LinearRegression
10
+ from ._bayes import ARDRegression, BayesianRidge
11
+ from ._coordinate_descent import (
12
+ ElasticNet,
13
+ ElasticNetCV,
14
+ Lasso,
15
+ LassoCV,
16
+ MultiTaskElasticNet,
17
+ MultiTaskElasticNetCV,
18
+ MultiTaskLasso,
19
+ MultiTaskLassoCV,
20
+ enet_path,
21
+ lasso_path,
22
+ )
23
+ from ._glm import GammaRegressor, PoissonRegressor, TweedieRegressor
24
+ from ._huber import HuberRegressor
25
+ from ._least_angle import (
26
+ Lars,
27
+ LarsCV,
28
+ LassoLars,
29
+ LassoLarsCV,
30
+ LassoLarsIC,
31
+ lars_path,
32
+ lars_path_gram,
33
+ )
34
+ from ._logistic import LogisticRegression, LogisticRegressionCV
35
+ from ._omp import (
36
+ OrthogonalMatchingPursuit,
37
+ OrthogonalMatchingPursuitCV,
38
+ orthogonal_mp,
39
+ orthogonal_mp_gram,
40
+ )
41
+ from ._passive_aggressive import PassiveAggressiveClassifier, PassiveAggressiveRegressor
42
+ from ._perceptron import Perceptron
43
+ from ._quantile import QuantileRegressor
44
+ from ._ransac import RANSACRegressor
45
+ from ._ridge import Ridge, RidgeClassifier, RidgeClassifierCV, RidgeCV, ridge_regression
46
+ from ._sgd_fast import Hinge, Huber, Log, ModifiedHuber, SquaredLoss
47
+ from ._stochastic_gradient import SGDClassifier, SGDOneClassSVM, SGDRegressor
48
+ from ._theil_sen import TheilSenRegressor
49
+
50
+ __all__ = [
51
+ "ARDRegression",
52
+ "BayesianRidge",
53
+ "ElasticNet",
54
+ "ElasticNetCV",
55
+ "Hinge",
56
+ "Huber",
57
+ "HuberRegressor",
58
+ "Lars",
59
+ "LarsCV",
60
+ "Lasso",
61
+ "LassoCV",
62
+ "LassoLars",
63
+ "LassoLarsCV",
64
+ "LassoLarsIC",
65
+ "LinearRegression",
66
+ "Log",
67
+ "LogisticRegression",
68
+ "LogisticRegressionCV",
69
+ "ModifiedHuber",
70
+ "MultiTaskElasticNet",
71
+ "MultiTaskElasticNetCV",
72
+ "MultiTaskLasso",
73
+ "MultiTaskLassoCV",
74
+ "OrthogonalMatchingPursuit",
75
+ "OrthogonalMatchingPursuitCV",
76
+ "PassiveAggressiveClassifier",
77
+ "PassiveAggressiveRegressor",
78
+ "Perceptron",
79
+ "QuantileRegressor",
80
+ "Ridge",
81
+ "RidgeCV",
82
+ "RidgeClassifier",
83
+ "RidgeClassifierCV",
84
+ "SGDClassifier",
85
+ "SGDRegressor",
86
+ "SGDOneClassSVM",
87
+ "SquaredLoss",
88
+ "TheilSenRegressor",
89
+ "enet_path",
90
+ "lars_path",
91
+ "lars_path_gram",
92
+ "lasso_path",
93
+ "orthogonal_mp",
94
+ "orthogonal_mp_gram",
95
+ "ridge_regression",
96
+ "RANSACRegressor",
97
+ "PoissonRegressor",
98
+ "GammaRegressor",
99
+ "TweedieRegressor",
100
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_base.py ADDED
@@ -0,0 +1,814 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generalized Linear Models.
3
+ """
4
+
5
+ # Author: Alexandre Gramfort <[email protected]>
6
+ # Fabian Pedregosa <[email protected]>
7
+ # Olivier Grisel <[email protected]>
8
+ # Vincent Michel <[email protected]>
9
+ # Peter Prettenhofer <[email protected]>
10
+ # Mathieu Blondel <[email protected]>
11
+ # Lars Buitinck
12
+ # Maryan Morel <[email protected]>
13
+ # Giorgio Patrini <[email protected]>
14
+ # Maria Telenczuk <https://github.com/maikia>
15
+ # License: BSD 3 clause
16
+
17
+ import numbers
18
+ import warnings
19
+ from abc import ABCMeta, abstractmethod
20
+ from numbers import Integral
21
+
22
+ import numpy as np
23
+ import scipy.sparse as sp
24
+ from scipy import linalg, optimize, sparse
25
+ from scipy.sparse.linalg import lsqr
26
+ from scipy.special import expit
27
+
28
+ from ..base import (
29
+ BaseEstimator,
30
+ ClassifierMixin,
31
+ MultiOutputMixin,
32
+ RegressorMixin,
33
+ _fit_context,
34
+ )
35
+ from ..utils import check_array, check_random_state
36
+ from ..utils._array_api import get_namespace
37
+ from ..utils._seq_dataset import (
38
+ ArrayDataset32,
39
+ ArrayDataset64,
40
+ CSRDataset32,
41
+ CSRDataset64,
42
+ )
43
+ from ..utils.extmath import safe_sparse_dot
44
+ from ..utils.parallel import Parallel, delayed
45
+ from ..utils.sparsefuncs import mean_variance_axis
46
+ from ..utils.validation import FLOAT_DTYPES, _check_sample_weight, check_is_fitted
47
+
48
+ # TODO: bayesian_ridge_regression and bayesian_regression_ard
49
+ # should be squashed into its respective objects.
50
+
51
+ SPARSE_INTERCEPT_DECAY = 0.01
52
+ # For sparse data intercept updates are scaled by this decay factor to avoid
53
+ # intercept oscillation.
54
+
55
+
56
+ def make_dataset(X, y, sample_weight, random_state=None):
57
+ """Create ``Dataset`` abstraction for sparse and dense inputs.
58
+
59
+ This also returns the ``intercept_decay`` which is different
60
+ for sparse datasets.
61
+
62
+ Parameters
63
+ ----------
64
+ X : array-like, shape (n_samples, n_features)
65
+ Training data
66
+
67
+ y : array-like, shape (n_samples, )
68
+ Target values.
69
+
70
+ sample_weight : numpy array of shape (n_samples,)
71
+ The weight of each sample
72
+
73
+ random_state : int, RandomState instance or None (default)
74
+ Determines random number generation for dataset random sampling. It is not
75
+ used for dataset shuffling.
76
+ Pass an int for reproducible output across multiple function calls.
77
+ See :term:`Glossary <random_state>`.
78
+
79
+ Returns
80
+ -------
81
+ dataset
82
+ The ``Dataset`` abstraction
83
+ intercept_decay
84
+ The intercept decay
85
+ """
86
+
87
+ rng = check_random_state(random_state)
88
+ # seed should never be 0 in SequentialDataset64
89
+ seed = rng.randint(1, np.iinfo(np.int32).max)
90
+
91
+ if X.dtype == np.float32:
92
+ CSRData = CSRDataset32
93
+ ArrayData = ArrayDataset32
94
+ else:
95
+ CSRData = CSRDataset64
96
+ ArrayData = ArrayDataset64
97
+
98
+ if sp.issparse(X):
99
+ dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed)
100
+ intercept_decay = SPARSE_INTERCEPT_DECAY
101
+ else:
102
+ X = np.ascontiguousarray(X)
103
+ dataset = ArrayData(X, y, sample_weight, seed=seed)
104
+ intercept_decay = 1.0
105
+
106
+ return dataset, intercept_decay
107
+
108
+
109
+ def _preprocess_data(
110
+ X,
111
+ y,
112
+ *,
113
+ fit_intercept,
114
+ copy=True,
115
+ copy_y=True,
116
+ sample_weight=None,
117
+ check_input=True,
118
+ ):
119
+ """Common data preprocessing for fitting linear models.
120
+
121
+ This helper is in charge of the following steps:
122
+
123
+ - Ensure that `sample_weight` is an array or `None`.
124
+ - If `check_input=True`, perform standard input validation of `X`, `y`.
125
+ - Perform copies if requested to avoid side-effects in case of inplace
126
+ modifications of the input.
127
+
128
+ Then, if `fit_intercept=True` this preprocessing centers both `X` and `y` as
129
+ follows:
130
+ - if `X` is dense, center the data and
131
+ store the mean vector in `X_offset`.
132
+ - if `X` is sparse, store the mean in `X_offset`
133
+ without centering `X`. The centering is expected to be handled by the
134
+ linear solver where appropriate.
135
+ - in either case, always center `y` and store the mean in `y_offset`.
136
+ - both `X_offset` and `y_offset` are always weighted by `sample_weight`
137
+ if not set to `None`.
138
+
139
+ If `fit_intercept=False`, no centering is performed and `X_offset`, `y_offset`
140
+ are set to zero.
141
+
142
+ Returns
143
+ -------
144
+ X_out : {ndarray, sparse matrix} of shape (n_samples, n_features)
145
+ If copy=True a copy of the input X is triggered, otherwise operations are
146
+ inplace.
147
+ If input X is dense, then X_out is centered.
148
+ y_out : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
149
+ Centered version of y. Possibly performed inplace on input y depending
150
+ on the copy_y parameter.
151
+ X_offset : ndarray of shape (n_features,)
152
+ The mean per column of input X.
153
+ y_offset : float or ndarray of shape (n_features,)
154
+ X_scale : ndarray of shape (n_features,)
155
+ Always an array of ones. TODO: refactor the code base to make it
156
+ possible to remove this unused variable.
157
+ """
158
+ if isinstance(sample_weight, numbers.Number):
159
+ sample_weight = None
160
+ if sample_weight is not None:
161
+ sample_weight = np.asarray(sample_weight)
162
+
163
+ if check_input:
164
+ X = check_array(X, copy=copy, accept_sparse=["csr", "csc"], dtype=FLOAT_DTYPES)
165
+ y = check_array(y, dtype=X.dtype, copy=copy_y, ensure_2d=False)
166
+ else:
167
+ y = y.astype(X.dtype, copy=copy_y)
168
+ if copy:
169
+ if sp.issparse(X):
170
+ X = X.copy()
171
+ else:
172
+ X = X.copy(order="K")
173
+
174
+ if fit_intercept:
175
+ if sp.issparse(X):
176
+ X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight)
177
+ else:
178
+ X_offset = np.average(X, axis=0, weights=sample_weight)
179
+
180
+ X_offset = X_offset.astype(X.dtype, copy=False)
181
+ X -= X_offset
182
+
183
+ y_offset = np.average(y, axis=0, weights=sample_weight)
184
+ y -= y_offset
185
+ else:
186
+ X_offset = np.zeros(X.shape[1], dtype=X.dtype)
187
+ if y.ndim == 1:
188
+ y_offset = X.dtype.type(0)
189
+ else:
190
+ y_offset = np.zeros(y.shape[1], dtype=X.dtype)
191
+
192
+ # XXX: X_scale is no longer needed. It is an historic artifact from the
193
+ # time where linear model exposed the normalize parameter.
194
+ X_scale = np.ones(X.shape[1], dtype=X.dtype)
195
+ return X, y, X_offset, y_offset, X_scale
196
+
197
+
198
+ # TODO: _rescale_data should be factored into _preprocess_data.
199
+ # Currently, the fact that sag implements its own way to deal with
200
+ # sample_weight makes the refactoring tricky.
201
+
202
+
203
+ def _rescale_data(X, y, sample_weight, inplace=False):
204
+ """Rescale data sample-wise by square root of sample_weight.
205
+
206
+ For many linear models, this enables easy support for sample_weight because
207
+
208
+ (y - X w)' S (y - X w)
209
+
210
+ with S = diag(sample_weight) becomes
211
+
212
+ ||y_rescaled - X_rescaled w||_2^2
213
+
214
+ when setting
215
+
216
+ y_rescaled = sqrt(S) y
217
+ X_rescaled = sqrt(S) X
218
+
219
+ Returns
220
+ -------
221
+ X_rescaled : {array-like, sparse matrix}
222
+
223
+ y_rescaled : {array-like, sparse matrix}
224
+ """
225
+ # Assume that _validate_data and _check_sample_weight have been called by
226
+ # the caller.
227
+ n_samples = X.shape[0]
228
+ sample_weight_sqrt = np.sqrt(sample_weight)
229
+
230
+ if sp.issparse(X) or sp.issparse(y):
231
+ sw_matrix = sparse.dia_matrix(
232
+ (sample_weight_sqrt, 0), shape=(n_samples, n_samples)
233
+ )
234
+
235
+ if sp.issparse(X):
236
+ X = safe_sparse_dot(sw_matrix, X)
237
+ else:
238
+ if inplace:
239
+ X *= sample_weight_sqrt[:, np.newaxis]
240
+ else:
241
+ X = X * sample_weight_sqrt[:, np.newaxis]
242
+
243
+ if sp.issparse(y):
244
+ y = safe_sparse_dot(sw_matrix, y)
245
+ else:
246
+ if inplace:
247
+ if y.ndim == 1:
248
+ y *= sample_weight_sqrt
249
+ else:
250
+ y *= sample_weight_sqrt[:, np.newaxis]
251
+ else:
252
+ if y.ndim == 1:
253
+ y = y * sample_weight_sqrt
254
+ else:
255
+ y = y * sample_weight_sqrt[:, np.newaxis]
256
+ return X, y, sample_weight_sqrt
257
+
258
+
259
+ class LinearModel(BaseEstimator, metaclass=ABCMeta):
260
+ """Base class for Linear Models"""
261
+
262
+ @abstractmethod
263
+ def fit(self, X, y):
264
+ """Fit model."""
265
+
266
+ def _decision_function(self, X):
267
+ check_is_fitted(self)
268
+
269
+ X = self._validate_data(X, accept_sparse=["csr", "csc", "coo"], reset=False)
270
+ return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
271
+
272
+ def predict(self, X):
273
+ """
274
+ Predict using the linear model.
275
+
276
+ Parameters
277
+ ----------
278
+ X : array-like or sparse matrix, shape (n_samples, n_features)
279
+ Samples.
280
+
281
+ Returns
282
+ -------
283
+ C : array, shape (n_samples,)
284
+ Returns predicted values.
285
+ """
286
+ return self._decision_function(X)
287
+
288
+ def _set_intercept(self, X_offset, y_offset, X_scale):
289
+ """Set the intercept_"""
290
+ if self.fit_intercept:
291
+ # We always want coef_.dtype=X.dtype. For instance, X.dtype can differ from
292
+ # coef_.dtype if warm_start=True.
293
+ self.coef_ = np.divide(self.coef_, X_scale, dtype=X_scale.dtype)
294
+ self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
295
+ else:
296
+ self.intercept_ = 0.0
297
+
298
+ def _more_tags(self):
299
+ return {"requires_y": True}
300
+
301
+
302
+ # XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
303
+ # Maybe the n_features checking can be moved to LinearModel.
304
+ class LinearClassifierMixin(ClassifierMixin):
305
+ """Mixin for linear classifiers.
306
+
307
+ Handles prediction for sparse and dense X.
308
+ """
309
+
310
+ def decision_function(self, X):
311
+ """
312
+ Predict confidence scores for samples.
313
+
314
+ The confidence score for a sample is proportional to the signed
315
+ distance of that sample to the hyperplane.
316
+
317
+ Parameters
318
+ ----------
319
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
320
+ The data matrix for which we want to get the confidence scores.
321
+
322
+ Returns
323
+ -------
324
+ scores : ndarray of shape (n_samples,) or (n_samples, n_classes)
325
+ Confidence scores per `(n_samples, n_classes)` combination. In the
326
+ binary case, confidence score for `self.classes_[1]` where >0 means
327
+ this class would be predicted.
328
+ """
329
+ check_is_fitted(self)
330
+ xp, _ = get_namespace(X)
331
+
332
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
333
+ scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
334
+ return xp.reshape(scores, (-1,)) if scores.shape[1] == 1 else scores
335
+
336
+ def predict(self, X):
337
+ """
338
+ Predict class labels for samples in X.
339
+
340
+ Parameters
341
+ ----------
342
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
343
+ The data matrix for which we want to get the predictions.
344
+
345
+ Returns
346
+ -------
347
+ y_pred : ndarray of shape (n_samples,)
348
+ Vector containing the class labels for each sample.
349
+ """
350
+ xp, _ = get_namespace(X)
351
+ scores = self.decision_function(X)
352
+ if len(scores.shape) == 1:
353
+ indices = xp.astype(scores > 0, int)
354
+ else:
355
+ indices = xp.argmax(scores, axis=1)
356
+
357
+ return xp.take(self.classes_, indices, axis=0)
358
+
359
+ def _predict_proba_lr(self, X):
360
+ """Probability estimation for OvR logistic regression.
361
+
362
+ Positive class probabilities are computed as
363
+ 1. / (1. + np.exp(-self.decision_function(X)));
364
+ multiclass is handled by normalizing that over all classes.
365
+ """
366
+ prob = self.decision_function(X)
367
+ expit(prob, out=prob)
368
+ if prob.ndim == 1:
369
+ return np.vstack([1 - prob, prob]).T
370
+ else:
371
+ # OvR normalization, like LibLinear's predict_probability
372
+ prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
373
+ return prob
374
+
375
+
376
+ class SparseCoefMixin:
377
+ """Mixin for converting coef_ to and from CSR format.
378
+
379
+ L1-regularizing estimators should inherit this.
380
+ """
381
+
382
+ def densify(self):
383
+ """
384
+ Convert coefficient matrix to dense array format.
385
+
386
+ Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
387
+ default format of ``coef_`` and is required for fitting, so calling
388
+ this method is only required on models that have previously been
389
+ sparsified; otherwise, it is a no-op.
390
+
391
+ Returns
392
+ -------
393
+ self
394
+ Fitted estimator.
395
+ """
396
+ msg = "Estimator, %(name)s, must be fitted before densifying."
397
+ check_is_fitted(self, msg=msg)
398
+ if sp.issparse(self.coef_):
399
+ self.coef_ = self.coef_.toarray()
400
+ return self
401
+
402
+ def sparsify(self):
403
+ """
404
+ Convert coefficient matrix to sparse format.
405
+
406
+ Converts the ``coef_`` member to a scipy.sparse matrix, which for
407
+ L1-regularized models can be much more memory- and storage-efficient
408
+ than the usual numpy.ndarray representation.
409
+
410
+ The ``intercept_`` member is not converted.
411
+
412
+ Returns
413
+ -------
414
+ self
415
+ Fitted estimator.
416
+
417
+ Notes
418
+ -----
419
+ For non-sparse models, i.e. when there are not many zeros in ``coef_``,
420
+ this may actually *increase* memory usage, so use this method with
421
+ care. A rule of thumb is that the number of zero elements, which can
422
+ be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
423
+ to provide significant benefits.
424
+
425
+ After calling this method, further fitting with the partial_fit
426
+ method (if any) will not work until you call densify.
427
+ """
428
+ msg = "Estimator, %(name)s, must be fitted before sparsifying."
429
+ check_is_fitted(self, msg=msg)
430
+ self.coef_ = sp.csr_matrix(self.coef_)
431
+ return self
432
+
433
+
434
+ class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
435
+ """
436
+ Ordinary least squares Linear Regression.
437
+
438
+ LinearRegression fits a linear model with coefficients w = (w1, ..., wp)
439
+ to minimize the residual sum of squares between the observed targets in
440
+ the dataset, and the targets predicted by the linear approximation.
441
+
442
+ Parameters
443
+ ----------
444
+ fit_intercept : bool, default=True
445
+ Whether to calculate the intercept for this model. If set
446
+ to False, no intercept will be used in calculations
447
+ (i.e. data is expected to be centered).
448
+
449
+ copy_X : bool, default=True
450
+ If True, X will be copied; else, it may be overwritten.
451
+
452
+ n_jobs : int, default=None
453
+ The number of jobs to use for the computation. This will only provide
454
+ speedup in case of sufficiently large problems, that is if firstly
455
+ `n_targets > 1` and secondly `X` is sparse or if `positive` is set
456
+ to `True`. ``None`` means 1 unless in a
457
+ :obj:`joblib.parallel_backend` context. ``-1`` means using all
458
+ processors. See :term:`Glossary <n_jobs>` for more details.
459
+
460
+ positive : bool, default=False
461
+ When set to ``True``, forces the coefficients to be positive. This
462
+ option is only supported for dense arrays.
463
+
464
+ .. versionadded:: 0.24
465
+
466
+ Attributes
467
+ ----------
468
+ coef_ : array of shape (n_features, ) or (n_targets, n_features)
469
+ Estimated coefficients for the linear regression problem.
470
+ If multiple targets are passed during the fit (y 2D), this
471
+ is a 2D array of shape (n_targets, n_features), while if only
472
+ one target is passed, this is a 1D array of length n_features.
473
+
474
+ rank_ : int
475
+ Rank of matrix `X`. Only available when `X` is dense.
476
+
477
+ singular_ : array of shape (min(X, y),)
478
+ Singular values of `X`. Only available when `X` is dense.
479
+
480
+ intercept_ : float or array of shape (n_targets,)
481
+ Independent term in the linear model. Set to 0.0 if
482
+ `fit_intercept = False`.
483
+
484
+ n_features_in_ : int
485
+ Number of features seen during :term:`fit`.
486
+
487
+ .. versionadded:: 0.24
488
+
489
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
490
+ Names of features seen during :term:`fit`. Defined only when `X`
491
+ has feature names that are all strings.
492
+
493
+ .. versionadded:: 1.0
494
+
495
+ See Also
496
+ --------
497
+ Ridge : Ridge regression addresses some of the
498
+ problems of Ordinary Least Squares by imposing a penalty on the
499
+ size of the coefficients with l2 regularization.
500
+ Lasso : The Lasso is a linear model that estimates
501
+ sparse coefficients with l1 regularization.
502
+ ElasticNet : Elastic-Net is a linear regression
503
+ model trained with both l1 and l2 -norm regularization of the
504
+ coefficients.
505
+
506
+ Notes
507
+ -----
508
+ From the implementation point of view, this is just plain Ordinary
509
+ Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares
510
+ (scipy.optimize.nnls) wrapped as a predictor object.
511
+
512
+ Examples
513
+ --------
514
+ >>> import numpy as np
515
+ >>> from sklearn.linear_model import LinearRegression
516
+ >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
517
+ >>> # y = 1 * x_0 + 2 * x_1 + 3
518
+ >>> y = np.dot(X, np.array([1, 2])) + 3
519
+ >>> reg = LinearRegression().fit(X, y)
520
+ >>> reg.score(X, y)
521
+ 1.0
522
+ >>> reg.coef_
523
+ array([1., 2.])
524
+ >>> reg.intercept_
525
+ 3.0...
526
+ >>> reg.predict(np.array([[3, 5]]))
527
+ array([16.])
528
+ """
529
+
530
+ _parameter_constraints: dict = {
531
+ "fit_intercept": ["boolean"],
532
+ "copy_X": ["boolean"],
533
+ "n_jobs": [None, Integral],
534
+ "positive": ["boolean"],
535
+ }
536
+
537
+ def __init__(
538
+ self,
539
+ *,
540
+ fit_intercept=True,
541
+ copy_X=True,
542
+ n_jobs=None,
543
+ positive=False,
544
+ ):
545
+ self.fit_intercept = fit_intercept
546
+ self.copy_X = copy_X
547
+ self.n_jobs = n_jobs
548
+ self.positive = positive
549
+
550
+ @_fit_context(prefer_skip_nested_validation=True)
551
+ def fit(self, X, y, sample_weight=None):
552
+ """
553
+ Fit linear model.
554
+
555
+ Parameters
556
+ ----------
557
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
558
+ Training data.
559
+
560
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
561
+ Target values. Will be cast to X's dtype if necessary.
562
+
563
+ sample_weight : array-like of shape (n_samples,), default=None
564
+ Individual weights for each sample.
565
+
566
+ .. versionadded:: 0.17
567
+ parameter *sample_weight* support to LinearRegression.
568
+
569
+ Returns
570
+ -------
571
+ self : object
572
+ Fitted Estimator.
573
+ """
574
+ n_jobs_ = self.n_jobs
575
+
576
+ accept_sparse = False if self.positive else ["csr", "csc", "coo"]
577
+
578
+ X, y = self._validate_data(
579
+ X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True
580
+ )
581
+
582
+ has_sw = sample_weight is not None
583
+ if has_sw:
584
+ sample_weight = _check_sample_weight(
585
+ sample_weight, X, dtype=X.dtype, only_non_negative=True
586
+ )
587
+
588
+ # Note that neither _rescale_data nor the rest of the fit method of
589
+ # LinearRegression can benefit from in-place operations when X is a
590
+ # sparse matrix. Therefore, let's not copy X when it is sparse.
591
+ copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X)
592
+
593
+ X, y, X_offset, y_offset, X_scale = _preprocess_data(
594
+ X,
595
+ y,
596
+ fit_intercept=self.fit_intercept,
597
+ copy=copy_X_in_preprocess_data,
598
+ sample_weight=sample_weight,
599
+ )
600
+
601
+ if has_sw:
602
+ # Sample weight can be implemented via a simple rescaling. Note
603
+ # that we safely do inplace rescaling when _preprocess_data has
604
+ # already made a copy if requested.
605
+ X, y, sample_weight_sqrt = _rescale_data(
606
+ X, y, sample_weight, inplace=copy_X_in_preprocess_data
607
+ )
608
+
609
+ if self.positive:
610
+ if y.ndim < 2:
611
+ self.coef_ = optimize.nnls(X, y)[0]
612
+ else:
613
+ # scipy.optimize.nnls cannot handle y with shape (M, K)
614
+ outs = Parallel(n_jobs=n_jobs_)(
615
+ delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1])
616
+ )
617
+ self.coef_ = np.vstack([out[0] for out in outs])
618
+ elif sp.issparse(X):
619
+ X_offset_scale = X_offset / X_scale
620
+
621
+ if has_sw:
622
+
623
+ def matvec(b):
624
+ return X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale)
625
+
626
+ def rmatvec(b):
627
+ return X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt)
628
+
629
+ else:
630
+
631
+ def matvec(b):
632
+ return X.dot(b) - b.dot(X_offset_scale)
633
+
634
+ def rmatvec(b):
635
+ return X.T.dot(b) - X_offset_scale * b.sum()
636
+
637
+ X_centered = sparse.linalg.LinearOperator(
638
+ shape=X.shape, matvec=matvec, rmatvec=rmatvec
639
+ )
640
+
641
+ if y.ndim < 2:
642
+ self.coef_ = lsqr(X_centered, y)[0]
643
+ else:
644
+ # sparse_lstsq cannot handle y with shape (M, K)
645
+ outs = Parallel(n_jobs=n_jobs_)(
646
+ delayed(lsqr)(X_centered, y[:, j].ravel())
647
+ for j in range(y.shape[1])
648
+ )
649
+ self.coef_ = np.vstack([out[0] for out in outs])
650
+ else:
651
+ self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y)
652
+ self.coef_ = self.coef_.T
653
+
654
+ if y.ndim == 1:
655
+ self.coef_ = np.ravel(self.coef_)
656
+ self._set_intercept(X_offset, y_offset, X_scale)
657
+ return self
658
+
659
+
660
+ def _check_precomputed_gram_matrix(
661
+ X, precompute, X_offset, X_scale, rtol=None, atol=1e-5
662
+ ):
663
+ """Computes a single element of the gram matrix and compares it to
664
+ the corresponding element of the user supplied gram matrix.
665
+
666
+ If the values do not match a ValueError will be thrown.
667
+
668
+ Parameters
669
+ ----------
670
+ X : ndarray of shape (n_samples, n_features)
671
+ Data array.
672
+
673
+ precompute : array-like of shape (n_features, n_features)
674
+ User-supplied gram matrix.
675
+
676
+ X_offset : ndarray of shape (n_features,)
677
+ Array of feature means used to center design matrix.
678
+
679
+ X_scale : ndarray of shape (n_features,)
680
+ Array of feature scale factors used to normalize design matrix.
681
+
682
+ rtol : float, default=None
683
+ Relative tolerance; see numpy.allclose
684
+ If None, it is set to 1e-4 for arrays of dtype numpy.float32 and 1e-7
685
+ otherwise.
686
+
687
+ atol : float, default=1e-5
688
+ absolute tolerance; see :func`numpy.allclose`. Note that the default
689
+ here is more tolerant than the default for
690
+ :func:`numpy.testing.assert_allclose`, where `atol=0`.
691
+
692
+ Raises
693
+ ------
694
+ ValueError
695
+ Raised when the provided Gram matrix is not consistent.
696
+ """
697
+
698
+ n_features = X.shape[1]
699
+ f1 = n_features // 2
700
+ f2 = min(f1 + 1, n_features - 1)
701
+
702
+ v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1]
703
+ v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2]
704
+
705
+ expected = np.dot(v1, v2)
706
+ actual = precompute[f1, f2]
707
+
708
+ dtypes = [precompute.dtype, expected.dtype]
709
+ if rtol is None:
710
+ rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]
711
+ rtol = max(rtols)
712
+
713
+ if not np.isclose(expected, actual, rtol=rtol, atol=atol):
714
+ raise ValueError(
715
+ "Gram matrix passed in via 'precompute' parameter "
716
+ "did not pass validation when a single element was "
717
+ "checked - please check that it was computed "
718
+ f"properly. For element ({f1},{f2}) we computed "
719
+ f"{expected} but the user-supplied value was "
720
+ f"{actual}."
721
+ )
722
+
723
+
724
+ def _pre_fit(
725
+ X,
726
+ y,
727
+ Xy,
728
+ precompute,
729
+ fit_intercept,
730
+ copy,
731
+ check_input=True,
732
+ sample_weight=None,
733
+ ):
734
+ """Function used at beginning of fit in linear models with L1 or L0 penalty.
735
+
736
+ This function applies _preprocess_data and additionally computes the gram matrix
737
+ `precompute` as needed as well as `Xy`.
738
+ """
739
+ n_samples, n_features = X.shape
740
+
741
+ if sparse.issparse(X):
742
+ # copy is not needed here as X is not modified inplace when X is sparse
743
+ precompute = False
744
+ X, y, X_offset, y_offset, X_scale = _preprocess_data(
745
+ X,
746
+ y,
747
+ fit_intercept=fit_intercept,
748
+ copy=False,
749
+ check_input=check_input,
750
+ sample_weight=sample_weight,
751
+ )
752
+ else:
753
+ # copy was done in fit if necessary
754
+ X, y, X_offset, y_offset, X_scale = _preprocess_data(
755
+ X,
756
+ y,
757
+ fit_intercept=fit_intercept,
758
+ copy=copy,
759
+ check_input=check_input,
760
+ sample_weight=sample_weight,
761
+ )
762
+ # Rescale only in dense case. Sparse cd solver directly deals with
763
+ # sample_weight.
764
+ if sample_weight is not None:
765
+ # This triggers copies anyway.
766
+ X, y, _ = _rescale_data(X, y, sample_weight=sample_weight)
767
+
768
+ if hasattr(precompute, "__array__"):
769
+ if fit_intercept and not np.allclose(X_offset, np.zeros(n_features)):
770
+ warnings.warn(
771
+ (
772
+ "Gram matrix was provided but X was centered to fit "
773
+ "intercept: recomputing Gram matrix."
774
+ ),
775
+ UserWarning,
776
+ )
777
+ # TODO: instead of warning and recomputing, we could just center
778
+ # the user provided Gram matrix a-posteriori (after making a copy
779
+ # when `copy=True`).
780
+ # recompute Gram
781
+ precompute = "auto"
782
+ Xy = None
783
+ elif check_input:
784
+ # If we're going to use the user's precomputed gram matrix, we
785
+ # do a quick check to make sure its not totally bogus.
786
+ _check_precomputed_gram_matrix(X, precompute, X_offset, X_scale)
787
+
788
+ # precompute if n_samples > n_features
789
+ if isinstance(precompute, str) and precompute == "auto":
790
+ precompute = n_samples > n_features
791
+
792
+ if precompute is True:
793
+ # make sure that the 'precompute' array is contiguous.
794
+ precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order="C")
795
+ np.dot(X.T, X, out=precompute)
796
+
797
+ if not hasattr(precompute, "__array__"):
798
+ Xy = None # cannot use Xy if precompute is not Gram
799
+
800
+ if hasattr(precompute, "__array__") and Xy is None:
801
+ common_dtype = np.result_type(X.dtype, y.dtype)
802
+ if y.ndim == 1:
803
+ # Xy is 1d, make sure it is contiguous.
804
+ Xy = np.empty(shape=n_features, dtype=common_dtype, order="C")
805
+ np.dot(X.T, y, out=Xy)
806
+ else:
807
+ # Make sure that Xy is always F contiguous even if X or y are not
808
+ # contiguous: the goal is to make it fast to extract the data for a
809
+ # specific target.
810
+ n_targets = y.shape[1]
811
+ Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order="F")
812
+ np.dot(y.T, X, out=Xy.T)
813
+
814
+ return X, y, X_offset, y_offset, X_scale, precompute, Xy
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py ADDED
@@ -0,0 +1,857 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Various bayesian regression
3
+ """
4
+
5
+ # Authors: V. Michel, F. Pedregosa, A. Gramfort
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+ from math import log
10
+ from numbers import Integral, Real
11
+
12
+ import numpy as np
13
+ from scipy import linalg
14
+ from scipy.linalg import pinvh
15
+
16
+ from ..base import RegressorMixin, _fit_context
17
+ from ..utils import _safe_indexing
18
+ from ..utils._param_validation import Hidden, Interval, StrOptions
19
+ from ..utils.extmath import fast_logdet
20
+ from ..utils.validation import _check_sample_weight
21
+ from ._base import LinearModel, _preprocess_data, _rescale_data
22
+
23
+
24
+ # TODO(1.5) Remove
25
+ def _deprecate_n_iter(n_iter, max_iter):
26
+ """Deprecates n_iter in favour of max_iter. Checks if the n_iter has been
27
+ used instead of max_iter and generates a deprecation warning if True.
28
+
29
+ Parameters
30
+ ----------
31
+ n_iter : int,
32
+ Value of n_iter attribute passed by the estimator.
33
+
34
+ max_iter : int, default=None
35
+ Value of max_iter attribute passed by the estimator.
36
+ If `None`, it corresponds to `max_iter=300`.
37
+
38
+ Returns
39
+ -------
40
+ max_iter : int,
41
+ Value of max_iter which shall further be used by the estimator.
42
+
43
+ Notes
44
+ -----
45
+ This function should be completely removed in 1.5.
46
+ """
47
+ if n_iter != "deprecated":
48
+ if max_iter is not None:
49
+ raise ValueError(
50
+ "Both `n_iter` and `max_iter` attributes were set. Attribute"
51
+ " `n_iter` was deprecated in version 1.3 and will be removed in"
52
+ " 1.5. To avoid this error, only set the `max_iter` attribute."
53
+ )
54
+ warnings.warn(
55
+ (
56
+ "'n_iter' was renamed to 'max_iter' in version 1.3 and "
57
+ "will be removed in 1.5"
58
+ ),
59
+ FutureWarning,
60
+ )
61
+ max_iter = n_iter
62
+ elif max_iter is None:
63
+ max_iter = 300
64
+ return max_iter
65
+
66
+
67
+ ###############################################################################
68
+ # BayesianRidge regression
69
+
70
+
71
+ class BayesianRidge(RegressorMixin, LinearModel):
72
+ """Bayesian ridge regression.
73
+
74
+ Fit a Bayesian ridge model. See the Notes section for details on this
75
+ implementation and the optimization of the regularization parameters
76
+ lambda (precision of the weights) and alpha (precision of the noise).
77
+
78
+ Read more in the :ref:`User Guide <bayesian_regression>`.
79
+
80
+ Parameters
81
+ ----------
82
+ max_iter : int, default=None
83
+ Maximum number of iterations over the complete dataset before
84
+ stopping independently of any early stopping criterion. If `None`, it
85
+ corresponds to `max_iter=300`.
86
+
87
+ .. versionchanged:: 1.3
88
+
89
+ tol : float, default=1e-3
90
+ Stop the algorithm if w has converged.
91
+
92
+ alpha_1 : float, default=1e-6
93
+ Hyper-parameter : shape parameter for the Gamma distribution prior
94
+ over the alpha parameter.
95
+
96
+ alpha_2 : float, default=1e-6
97
+ Hyper-parameter : inverse scale parameter (rate parameter) for the
98
+ Gamma distribution prior over the alpha parameter.
99
+
100
+ lambda_1 : float, default=1e-6
101
+ Hyper-parameter : shape parameter for the Gamma distribution prior
102
+ over the lambda parameter.
103
+
104
+ lambda_2 : float, default=1e-6
105
+ Hyper-parameter : inverse scale parameter (rate parameter) for the
106
+ Gamma distribution prior over the lambda parameter.
107
+
108
+ alpha_init : float, default=None
109
+ Initial value for alpha (precision of the noise).
110
+ If not set, alpha_init is 1/Var(y).
111
+
112
+ .. versionadded:: 0.22
113
+
114
+ lambda_init : float, default=None
115
+ Initial value for lambda (precision of the weights).
116
+ If not set, lambda_init is 1.
117
+
118
+ .. versionadded:: 0.22
119
+
120
+ compute_score : bool, default=False
121
+ If True, compute the log marginal likelihood at each iteration of the
122
+ optimization.
123
+
124
+ fit_intercept : bool, default=True
125
+ Whether to calculate the intercept for this model.
126
+ The intercept is not treated as a probabilistic parameter
127
+ and thus has no associated variance. If set
128
+ to False, no intercept will be used in calculations
129
+ (i.e. data is expected to be centered).
130
+
131
+ copy_X : bool, default=True
132
+ If True, X will be copied; else, it may be overwritten.
133
+
134
+ verbose : bool, default=False
135
+ Verbose mode when fitting the model.
136
+
137
+ n_iter : int
138
+ Maximum number of iterations. Should be greater than or equal to 1.
139
+
140
+ .. deprecated:: 1.3
141
+ `n_iter` is deprecated in 1.3 and will be removed in 1.5. Use
142
+ `max_iter` instead.
143
+
144
+ Attributes
145
+ ----------
146
+ coef_ : array-like of shape (n_features,)
147
+ Coefficients of the regression model (mean of distribution)
148
+
149
+ intercept_ : float
150
+ Independent term in decision function. Set to 0.0 if
151
+ `fit_intercept = False`.
152
+
153
+ alpha_ : float
154
+ Estimated precision of the noise.
155
+
156
+ lambda_ : float
157
+ Estimated precision of the weights.
158
+
159
+ sigma_ : array-like of shape (n_features, n_features)
160
+ Estimated variance-covariance matrix of the weights
161
+
162
+ scores_ : array-like of shape (n_iter_+1,)
163
+ If computed_score is True, value of the log marginal likelihood (to be
164
+ maximized) at each iteration of the optimization. The array starts
165
+ with the value of the log marginal likelihood obtained for the initial
166
+ values of alpha and lambda and ends with the value obtained for the
167
+ estimated alpha and lambda.
168
+
169
+ n_iter_ : int
170
+ The actual number of iterations to reach the stopping criterion.
171
+
172
+ X_offset_ : ndarray of shape (n_features,)
173
+ If `fit_intercept=True`, offset subtracted for centering data to a
174
+ zero mean. Set to np.zeros(n_features) otherwise.
175
+
176
+ X_scale_ : ndarray of shape (n_features,)
177
+ Set to np.ones(n_features).
178
+
179
+ n_features_in_ : int
180
+ Number of features seen during :term:`fit`.
181
+
182
+ .. versionadded:: 0.24
183
+
184
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
185
+ Names of features seen during :term:`fit`. Defined only when `X`
186
+ has feature names that are all strings.
187
+
188
+ .. versionadded:: 1.0
189
+
190
+ See Also
191
+ --------
192
+ ARDRegression : Bayesian ARD regression.
193
+
194
+ Notes
195
+ -----
196
+ There exist several strategies to perform Bayesian ridge regression. This
197
+ implementation is based on the algorithm described in Appendix A of
198
+ (Tipping, 2001) where updates of the regularization parameters are done as
199
+ suggested in (MacKay, 1992). Note that according to A New
200
+ View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these
201
+ update rules do not guarantee that the marginal likelihood is increasing
202
+ between two consecutive iterations of the optimization.
203
+
204
+ References
205
+ ----------
206
+ D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
207
+ Vol. 4, No. 3, 1992.
208
+
209
+ M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,
210
+ Journal of Machine Learning Research, Vol. 1, 2001.
211
+
212
+ Examples
213
+ --------
214
+ >>> from sklearn import linear_model
215
+ >>> clf = linear_model.BayesianRidge()
216
+ >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
217
+ BayesianRidge()
218
+ >>> clf.predict([[1, 1]])
219
+ array([1.])
220
+ """
221
+
222
+ _parameter_constraints: dict = {
223
+ "max_iter": [Interval(Integral, 1, None, closed="left"), None],
224
+ "tol": [Interval(Real, 0, None, closed="neither")],
225
+ "alpha_1": [Interval(Real, 0, None, closed="left")],
226
+ "alpha_2": [Interval(Real, 0, None, closed="left")],
227
+ "lambda_1": [Interval(Real, 0, None, closed="left")],
228
+ "lambda_2": [Interval(Real, 0, None, closed="left")],
229
+ "alpha_init": [None, Interval(Real, 0, None, closed="left")],
230
+ "lambda_init": [None, Interval(Real, 0, None, closed="left")],
231
+ "compute_score": ["boolean"],
232
+ "fit_intercept": ["boolean"],
233
+ "copy_X": ["boolean"],
234
+ "verbose": ["verbose"],
235
+ "n_iter": [
236
+ Interval(Integral, 1, None, closed="left"),
237
+ Hidden(StrOptions({"deprecated"})),
238
+ ],
239
+ }
240
+
241
+ def __init__(
242
+ self,
243
+ *,
244
+ max_iter=None, # TODO(1.5): Set to 300
245
+ tol=1.0e-3,
246
+ alpha_1=1.0e-6,
247
+ alpha_2=1.0e-6,
248
+ lambda_1=1.0e-6,
249
+ lambda_2=1.0e-6,
250
+ alpha_init=None,
251
+ lambda_init=None,
252
+ compute_score=False,
253
+ fit_intercept=True,
254
+ copy_X=True,
255
+ verbose=False,
256
+ n_iter="deprecated", # TODO(1.5): Remove
257
+ ):
258
+ self.max_iter = max_iter
259
+ self.tol = tol
260
+ self.alpha_1 = alpha_1
261
+ self.alpha_2 = alpha_2
262
+ self.lambda_1 = lambda_1
263
+ self.lambda_2 = lambda_2
264
+ self.alpha_init = alpha_init
265
+ self.lambda_init = lambda_init
266
+ self.compute_score = compute_score
267
+ self.fit_intercept = fit_intercept
268
+ self.copy_X = copy_X
269
+ self.verbose = verbose
270
+ self.n_iter = n_iter
271
+
272
+ @_fit_context(prefer_skip_nested_validation=True)
273
+ def fit(self, X, y, sample_weight=None):
274
+ """Fit the model.
275
+
276
+ Parameters
277
+ ----------
278
+ X : ndarray of shape (n_samples, n_features)
279
+ Training data.
280
+ y : ndarray of shape (n_samples,)
281
+ Target values. Will be cast to X's dtype if necessary.
282
+
283
+ sample_weight : ndarray of shape (n_samples,), default=None
284
+ Individual weights for each sample.
285
+
286
+ .. versionadded:: 0.20
287
+ parameter *sample_weight* support to BayesianRidge.
288
+
289
+ Returns
290
+ -------
291
+ self : object
292
+ Returns the instance itself.
293
+ """
294
+ max_iter = _deprecate_n_iter(self.n_iter, self.max_iter)
295
+
296
+ X, y = self._validate_data(X, y, dtype=[np.float64, np.float32], y_numeric=True)
297
+ dtype = X.dtype
298
+
299
+ if sample_weight is not None:
300
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=dtype)
301
+
302
+ X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data(
303
+ X,
304
+ y,
305
+ fit_intercept=self.fit_intercept,
306
+ copy=self.copy_X,
307
+ sample_weight=sample_weight,
308
+ )
309
+
310
+ if sample_weight is not None:
311
+ # Sample weight can be implemented via a simple rescaling.
312
+ X, y, _ = _rescale_data(X, y, sample_weight)
313
+
314
+ self.X_offset_ = X_offset_
315
+ self.X_scale_ = X_scale_
316
+ n_samples, n_features = X.shape
317
+
318
+ # Initialization of the values of the parameters
319
+ eps = np.finfo(np.float64).eps
320
+ # Add `eps` in the denominator to omit division by zero if `np.var(y)`
321
+ # is zero
322
+ alpha_ = self.alpha_init
323
+ lambda_ = self.lambda_init
324
+ if alpha_ is None:
325
+ alpha_ = 1.0 / (np.var(y) + eps)
326
+ if lambda_ is None:
327
+ lambda_ = 1.0
328
+
329
+ # Avoid unintended type promotion to float64 with numpy 2
330
+ alpha_ = np.asarray(alpha_, dtype=dtype)
331
+ lambda_ = np.asarray(lambda_, dtype=dtype)
332
+
333
+ verbose = self.verbose
334
+ lambda_1 = self.lambda_1
335
+ lambda_2 = self.lambda_2
336
+ alpha_1 = self.alpha_1
337
+ alpha_2 = self.alpha_2
338
+
339
+ self.scores_ = list()
340
+ coef_old_ = None
341
+
342
+ XT_y = np.dot(X.T, y)
343
+ U, S, Vh = linalg.svd(X, full_matrices=False)
344
+ eigen_vals_ = S**2
345
+
346
+ # Convergence loop of the bayesian ridge regression
347
+ for iter_ in range(max_iter):
348
+ # update posterior mean coef_ based on alpha_ and lambda_ and
349
+ # compute corresponding rmse
350
+ coef_, rmse_ = self._update_coef_(
351
+ X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
352
+ )
353
+ if self.compute_score:
354
+ # compute the log marginal likelihood
355
+ s = self._log_marginal_likelihood(
356
+ n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_
357
+ )
358
+ self.scores_.append(s)
359
+
360
+ # Update alpha and lambda according to (MacKay, 1992)
361
+ gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_))
362
+ lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_**2) + 2 * lambda_2)
363
+ alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)
364
+
365
+ # Check for convergence
366
+ if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
367
+ if verbose:
368
+ print("Convergence after ", str(iter_), " iterations")
369
+ break
370
+ coef_old_ = np.copy(coef_)
371
+
372
+ self.n_iter_ = iter_ + 1
373
+
374
+ # return regularization parameters and corresponding posterior mean,
375
+ # log marginal likelihood and posterior covariance
376
+ self.alpha_ = alpha_
377
+ self.lambda_ = lambda_
378
+ self.coef_, rmse_ = self._update_coef_(
379
+ X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
380
+ )
381
+ if self.compute_score:
382
+ # compute the log marginal likelihood
383
+ s = self._log_marginal_likelihood(
384
+ n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_
385
+ )
386
+ self.scores_.append(s)
387
+ self.scores_ = np.array(self.scores_)
388
+
389
+ # posterior covariance is given by 1/alpha_ * scaled_sigma_
390
+ scaled_sigma_ = np.dot(
391
+ Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis]
392
+ )
393
+ self.sigma_ = (1.0 / alpha_) * scaled_sigma_
394
+
395
+ self._set_intercept(X_offset_, y_offset_, X_scale_)
396
+
397
+ return self
398
+
399
+ def predict(self, X, return_std=False):
400
+ """Predict using the linear model.
401
+
402
+ In addition to the mean of the predictive distribution, also its
403
+ standard deviation can be returned.
404
+
405
+ Parameters
406
+ ----------
407
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
408
+ Samples.
409
+
410
+ return_std : bool, default=False
411
+ Whether to return the standard deviation of posterior prediction.
412
+
413
+ Returns
414
+ -------
415
+ y_mean : array-like of shape (n_samples,)
416
+ Mean of predictive distribution of query points.
417
+
418
+ y_std : array-like of shape (n_samples,)
419
+ Standard deviation of predictive distribution of query points.
420
+ """
421
+ y_mean = self._decision_function(X)
422
+ if not return_std:
423
+ return y_mean
424
+ else:
425
+ sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
426
+ y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))
427
+ return y_mean, y_std
428
+
429
+ def _update_coef_(
430
+ self, X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
431
+ ):
432
+ """Update posterior mean and compute corresponding rmse.
433
+
434
+ Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where
435
+ scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features)
436
+ + np.dot(X.T, X))^-1
437
+ """
438
+
439
+ if n_samples > n_features:
440
+ coef_ = np.linalg.multi_dot(
441
+ [Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y]
442
+ )
443
+ else:
444
+ coef_ = np.linalg.multi_dot(
445
+ [X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y]
446
+ )
447
+
448
+ rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
449
+
450
+ return coef_, rmse_
451
+
452
+ def _log_marginal_likelihood(
453
+ self, n_samples, n_features, eigen_vals, alpha_, lambda_, coef, rmse
454
+ ):
455
+ """Log marginal likelihood."""
456
+ alpha_1 = self.alpha_1
457
+ alpha_2 = self.alpha_2
458
+ lambda_1 = self.lambda_1
459
+ lambda_2 = self.lambda_2
460
+
461
+ # compute the log of the determinant of the posterior covariance.
462
+ # posterior covariance is given by
463
+ # sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1
464
+ if n_samples > n_features:
465
+ logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals))
466
+ else:
467
+ logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
468
+ logdet_sigma[:n_samples] += alpha_ * eigen_vals
469
+ logdet_sigma = -np.sum(np.log(logdet_sigma))
470
+
471
+ score = lambda_1 * log(lambda_) - lambda_2 * lambda_
472
+ score += alpha_1 * log(alpha_) - alpha_2 * alpha_
473
+ score += 0.5 * (
474
+ n_features * log(lambda_)
475
+ + n_samples * log(alpha_)
476
+ - alpha_ * rmse
477
+ - lambda_ * np.sum(coef**2)
478
+ + logdet_sigma
479
+ - n_samples * log(2 * np.pi)
480
+ )
481
+
482
+ return score
483
+
484
+
485
+ ###############################################################################
486
+ # ARD (Automatic Relevance Determination) regression
487
+
488
+
489
+ class ARDRegression(RegressorMixin, LinearModel):
490
+ """Bayesian ARD regression.
491
+
492
+ Fit the weights of a regression model, using an ARD prior. The weights of
493
+ the regression model are assumed to be in Gaussian distributions.
494
+ Also estimate the parameters lambda (precisions of the distributions of the
495
+ weights) and alpha (precision of the distribution of the noise).
496
+ The estimation is done by an iterative procedures (Evidence Maximization)
497
+
498
+ Read more in the :ref:`User Guide <bayesian_regression>`.
499
+
500
+ Parameters
501
+ ----------
502
+ max_iter : int, default=None
503
+ Maximum number of iterations. If `None`, it corresponds to `max_iter=300`.
504
+
505
+ .. versionchanged:: 1.3
506
+
507
+ tol : float, default=1e-3
508
+ Stop the algorithm if w has converged.
509
+
510
+ alpha_1 : float, default=1e-6
511
+ Hyper-parameter : shape parameter for the Gamma distribution prior
512
+ over the alpha parameter.
513
+
514
+ alpha_2 : float, default=1e-6
515
+ Hyper-parameter : inverse scale parameter (rate parameter) for the
516
+ Gamma distribution prior over the alpha parameter.
517
+
518
+ lambda_1 : float, default=1e-6
519
+ Hyper-parameter : shape parameter for the Gamma distribution prior
520
+ over the lambda parameter.
521
+
522
+ lambda_2 : float, default=1e-6
523
+ Hyper-parameter : inverse scale parameter (rate parameter) for the
524
+ Gamma distribution prior over the lambda parameter.
525
+
526
+ compute_score : bool, default=False
527
+ If True, compute the objective function at each step of the model.
528
+
529
+ threshold_lambda : float, default=10 000
530
+ Threshold for removing (pruning) weights with high precision from
531
+ the computation.
532
+
533
+ fit_intercept : bool, default=True
534
+ Whether to calculate the intercept for this model. If set
535
+ to false, no intercept will be used in calculations
536
+ (i.e. data is expected to be centered).
537
+
538
+ copy_X : bool, default=True
539
+ If True, X will be copied; else, it may be overwritten.
540
+
541
+ verbose : bool, default=False
542
+ Verbose mode when fitting the model.
543
+
544
+ n_iter : int
545
+ Maximum number of iterations.
546
+
547
+ .. deprecated:: 1.3
548
+ `n_iter` is deprecated in 1.3 and will be removed in 1.5. Use
549
+ `max_iter` instead.
550
+
551
+ Attributes
552
+ ----------
553
+ coef_ : array-like of shape (n_features,)
554
+ Coefficients of the regression model (mean of distribution)
555
+
556
+ alpha_ : float
557
+ estimated precision of the noise.
558
+
559
+ lambda_ : array-like of shape (n_features,)
560
+ estimated precisions of the weights.
561
+
562
+ sigma_ : array-like of shape (n_features, n_features)
563
+ estimated variance-covariance matrix of the weights
564
+
565
+ scores_ : float
566
+ if computed, value of the objective function (to be maximized)
567
+
568
+ n_iter_ : int
569
+ The actual number of iterations to reach the stopping criterion.
570
+
571
+ .. versionadded:: 1.3
572
+
573
+ intercept_ : float
574
+ Independent term in decision function. Set to 0.0 if
575
+ ``fit_intercept = False``.
576
+
577
+ X_offset_ : float
578
+ If `fit_intercept=True`, offset subtracted for centering data to a
579
+ zero mean. Set to np.zeros(n_features) otherwise.
580
+
581
+ X_scale_ : float
582
+ Set to np.ones(n_features).
583
+
584
+ n_features_in_ : int
585
+ Number of features seen during :term:`fit`.
586
+
587
+ .. versionadded:: 0.24
588
+
589
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
590
+ Names of features seen during :term:`fit`. Defined only when `X`
591
+ has feature names that are all strings.
592
+
593
+ .. versionadded:: 1.0
594
+
595
+ See Also
596
+ --------
597
+ BayesianRidge : Bayesian ridge regression.
598
+
599
+ Notes
600
+ -----
601
+ For an example, see :ref:`examples/linear_model/plot_ard.py
602
+ <sphx_glr_auto_examples_linear_model_plot_ard.py>`.
603
+
604
+ References
605
+ ----------
606
+ D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
607
+ competition, ASHRAE Transactions, 1994.
608
+
609
+ R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
610
+ http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
611
+ Their beta is our ``self.alpha_``
612
+ Their alpha is our ``self.lambda_``
613
+ ARD is a little different than the slide: only dimensions/features for
614
+ which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are
615
+ discarded.
616
+
617
+ Examples
618
+ --------
619
+ >>> from sklearn import linear_model
620
+ >>> clf = linear_model.ARDRegression()
621
+ >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
622
+ ARDRegression()
623
+ >>> clf.predict([[1, 1]])
624
+ array([1.])
625
+ """
626
+
627
+ _parameter_constraints: dict = {
628
+ "max_iter": [Interval(Integral, 1, None, closed="left"), None],
629
+ "tol": [Interval(Real, 0, None, closed="left")],
630
+ "alpha_1": [Interval(Real, 0, None, closed="left")],
631
+ "alpha_2": [Interval(Real, 0, None, closed="left")],
632
+ "lambda_1": [Interval(Real, 0, None, closed="left")],
633
+ "lambda_2": [Interval(Real, 0, None, closed="left")],
634
+ "compute_score": ["boolean"],
635
+ "threshold_lambda": [Interval(Real, 0, None, closed="left")],
636
+ "fit_intercept": ["boolean"],
637
+ "copy_X": ["boolean"],
638
+ "verbose": ["verbose"],
639
+ "n_iter": [
640
+ Interval(Integral, 1, None, closed="left"),
641
+ Hidden(StrOptions({"deprecated"})),
642
+ ],
643
+ }
644
+
645
+ def __init__(
646
+ self,
647
+ *,
648
+ max_iter=None, # TODO(1.5): Set to 300
649
+ tol=1.0e-3,
650
+ alpha_1=1.0e-6,
651
+ alpha_2=1.0e-6,
652
+ lambda_1=1.0e-6,
653
+ lambda_2=1.0e-6,
654
+ compute_score=False,
655
+ threshold_lambda=1.0e4,
656
+ fit_intercept=True,
657
+ copy_X=True,
658
+ verbose=False,
659
+ n_iter="deprecated", # TODO(1.5): Remove
660
+ ):
661
+ self.max_iter = max_iter
662
+ self.tol = tol
663
+ self.fit_intercept = fit_intercept
664
+ self.alpha_1 = alpha_1
665
+ self.alpha_2 = alpha_2
666
+ self.lambda_1 = lambda_1
667
+ self.lambda_2 = lambda_2
668
+ self.compute_score = compute_score
669
+ self.threshold_lambda = threshold_lambda
670
+ self.copy_X = copy_X
671
+ self.verbose = verbose
672
+ self.n_iter = n_iter
673
+
674
+ @_fit_context(prefer_skip_nested_validation=True)
675
+ def fit(self, X, y):
676
+ """Fit the model according to the given training data and parameters.
677
+
678
+ Iterative procedure to maximize the evidence
679
+
680
+ Parameters
681
+ ----------
682
+ X : array-like of shape (n_samples, n_features)
683
+ Training vector, where `n_samples` is the number of samples and
684
+ `n_features` is the number of features.
685
+ y : array-like of shape (n_samples,)
686
+ Target values (integers). Will be cast to X's dtype if necessary.
687
+
688
+ Returns
689
+ -------
690
+ self : object
691
+ Fitted estimator.
692
+ """
693
+ max_iter = _deprecate_n_iter(self.n_iter, self.max_iter)
694
+
695
+ X, y = self._validate_data(
696
+ X, y, dtype=[np.float64, np.float32], y_numeric=True, ensure_min_samples=2
697
+ )
698
+ dtype = X.dtype
699
+
700
+ n_samples, n_features = X.shape
701
+ coef_ = np.zeros(n_features, dtype=dtype)
702
+
703
+ X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data(
704
+ X, y, fit_intercept=self.fit_intercept, copy=self.copy_X
705
+ )
706
+
707
+ self.X_offset_ = X_offset_
708
+ self.X_scale_ = X_scale_
709
+
710
+ # Launch the convergence loop
711
+ keep_lambda = np.ones(n_features, dtype=bool)
712
+
713
+ lambda_1 = self.lambda_1
714
+ lambda_2 = self.lambda_2
715
+ alpha_1 = self.alpha_1
716
+ alpha_2 = self.alpha_2
717
+ verbose = self.verbose
718
+
719
+ # Initialization of the values of the parameters
720
+ eps = np.finfo(np.float64).eps
721
+ # Add `eps` in the denominator to omit division by zero if `np.var(y)`
722
+ # is zero.
723
+ # Explicitly set dtype to avoid unintended type promotion with numpy 2.
724
+ alpha_ = np.asarray(1.0 / (np.var(y) + eps), dtype=dtype)
725
+ lambda_ = np.ones(n_features, dtype=dtype)
726
+
727
+ self.scores_ = list()
728
+ coef_old_ = None
729
+
730
+ def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
731
+ coef_[keep_lambda] = alpha_ * np.linalg.multi_dot(
732
+ [sigma_, X[:, keep_lambda].T, y]
733
+ )
734
+ return coef_
735
+
736
+ update_sigma = (
737
+ self._update_sigma
738
+ if n_samples >= n_features
739
+ else self._update_sigma_woodbury
740
+ )
741
+ # Iterative procedure of ARDRegression
742
+ for iter_ in range(max_iter):
743
+ sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
744
+ coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
745
+
746
+ # Update alpha and lambda
747
+ rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
748
+ gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_)
749
+ lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / (
750
+ (coef_[keep_lambda]) ** 2 + 2.0 * lambda_2
751
+ )
752
+ alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / (
753
+ rmse_ + 2.0 * alpha_2
754
+ )
755
+
756
+ # Prune the weights with a precision over a threshold
757
+ keep_lambda = lambda_ < self.threshold_lambda
758
+ coef_[~keep_lambda] = 0
759
+
760
+ # Compute the objective function
761
+ if self.compute_score:
762
+ s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
763
+ s += alpha_1 * log(alpha_) - alpha_2 * alpha_
764
+ s += 0.5 * (
765
+ fast_logdet(sigma_)
766
+ + n_samples * log(alpha_)
767
+ + np.sum(np.log(lambda_))
768
+ )
769
+ s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_**2).sum())
770
+ self.scores_.append(s)
771
+
772
+ # Check for convergence
773
+ if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
774
+ if verbose:
775
+ print("Converged after %s iterations" % iter_)
776
+ break
777
+ coef_old_ = np.copy(coef_)
778
+
779
+ if not keep_lambda.any():
780
+ break
781
+
782
+ self.n_iter_ = iter_ + 1
783
+
784
+ if keep_lambda.any():
785
+ # update sigma and mu using updated params from the last iteration
786
+ sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
787
+ coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
788
+ else:
789
+ sigma_ = np.array([]).reshape(0, 0)
790
+
791
+ self.coef_ = coef_
792
+ self.alpha_ = alpha_
793
+ self.sigma_ = sigma_
794
+ self.lambda_ = lambda_
795
+ self._set_intercept(X_offset_, y_offset_, X_scale_)
796
+ return self
797
+
798
+ def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda):
799
+ # See slides as referenced in the docstring note
800
+ # this function is used when n_samples < n_features and will invert
801
+ # a matrix of shape (n_samples, n_samples) making use of the
802
+ # woodbury formula:
803
+ # https://en.wikipedia.org/wiki/Woodbury_matrix_identity
804
+ n_samples = X.shape[0]
805
+ X_keep = X[:, keep_lambda]
806
+ inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1)
807
+ sigma_ = pinvh(
808
+ np.eye(n_samples, dtype=X.dtype) / alpha_
809
+ + np.dot(X_keep * inv_lambda, X_keep.T)
810
+ )
811
+ sigma_ = np.dot(sigma_, X_keep * inv_lambda)
812
+ sigma_ = -np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_)
813
+ sigma_[np.diag_indices(sigma_.shape[1])] += 1.0 / lambda_[keep_lambda]
814
+ return sigma_
815
+
816
+ def _update_sigma(self, X, alpha_, lambda_, keep_lambda):
817
+ # See slides as referenced in the docstring note
818
+ # this function is used when n_samples >= n_features and will
819
+ # invert a matrix of shape (n_features, n_features)
820
+ X_keep = X[:, keep_lambda]
821
+ gram = np.dot(X_keep.T, X_keep)
822
+ eye = np.eye(gram.shape[0], dtype=X.dtype)
823
+ sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram
824
+ sigma_ = pinvh(sigma_inv)
825
+ return sigma_
826
+
827
+ def predict(self, X, return_std=False):
828
+ """Predict using the linear model.
829
+
830
+ In addition to the mean of the predictive distribution, also its
831
+ standard deviation can be returned.
832
+
833
+ Parameters
834
+ ----------
835
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
836
+ Samples.
837
+
838
+ return_std : bool, default=False
839
+ Whether to return the standard deviation of posterior prediction.
840
+
841
+ Returns
842
+ -------
843
+ y_mean : array-like of shape (n_samples,)
844
+ Mean of predictive distribution of query points.
845
+
846
+ y_std : array-like of shape (n_samples,)
847
+ Standard deviation of predictive distribution of query points.
848
+ """
849
+ y_mean = self._decision_function(X)
850
+ if return_std is False:
851
+ return y_mean
852
+ else:
853
+ col_index = self.lambda_ < self.threshold_lambda
854
+ X = _safe_indexing(X, indices=col_index, axis=1)
855
+ sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
856
+ y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))
857
+ return y_mean, y_std
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_huber.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Manoj Kumar [email protected]
2
+ # License: BSD 3 clause
3
+
4
+ from numbers import Integral, Real
5
+
6
+ import numpy as np
7
+ from scipy import optimize
8
+
9
+ from ..base import BaseEstimator, RegressorMixin, _fit_context
10
+ from ..utils import axis0_safe_slice
11
+ from ..utils._param_validation import Interval
12
+ from ..utils.extmath import safe_sparse_dot
13
+ from ..utils.optimize import _check_optimize_result
14
+ from ..utils.validation import _check_sample_weight
15
+ from ._base import LinearModel
16
+
17
+
18
+ def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
19
+ """Returns the Huber loss and the gradient.
20
+
21
+ Parameters
22
+ ----------
23
+ w : ndarray, shape (n_features + 1,) or (n_features + 2,)
24
+ Feature vector.
25
+ w[:n_features] gives the coefficients
26
+ w[-1] gives the scale factor and if the intercept is fit w[-2]
27
+ gives the intercept factor.
28
+
29
+ X : ndarray of shape (n_samples, n_features)
30
+ Input data.
31
+
32
+ y : ndarray of shape (n_samples,)
33
+ Target vector.
34
+
35
+ epsilon : float
36
+ Robustness of the Huber estimator.
37
+
38
+ alpha : float
39
+ Regularization parameter.
40
+
41
+ sample_weight : ndarray of shape (n_samples,), default=None
42
+ Weight assigned to each sample.
43
+
44
+ Returns
45
+ -------
46
+ loss : float
47
+ Huber loss.
48
+
49
+ gradient : ndarray, shape (len(w))
50
+ Returns the derivative of the Huber loss with respect to each
51
+ coefficient, intercept and the scale as a vector.
52
+ """
53
+ _, n_features = X.shape
54
+ fit_intercept = n_features + 2 == w.shape[0]
55
+ if fit_intercept:
56
+ intercept = w[-2]
57
+ sigma = w[-1]
58
+ w = w[:n_features]
59
+ n_samples = np.sum(sample_weight)
60
+
61
+ # Calculate the values where |y - X'w -c / sigma| > epsilon
62
+ # The values above this threshold are outliers.
63
+ linear_loss = y - safe_sparse_dot(X, w)
64
+ if fit_intercept:
65
+ linear_loss -= intercept
66
+ abs_linear_loss = np.abs(linear_loss)
67
+ outliers_mask = abs_linear_loss > epsilon * sigma
68
+
69
+ # Calculate the linear loss due to the outliers.
70
+ # This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma
71
+ outliers = abs_linear_loss[outliers_mask]
72
+ num_outliers = np.count_nonzero(outliers_mask)
73
+ n_non_outliers = X.shape[0] - num_outliers
74
+
75
+ # n_sq_outliers includes the weight give to the outliers while
76
+ # num_outliers is just the number of outliers.
77
+ outliers_sw = sample_weight[outliers_mask]
78
+ n_sw_outliers = np.sum(outliers_sw)
79
+ outlier_loss = (
80
+ 2.0 * epsilon * np.sum(outliers_sw * outliers)
81
+ - sigma * n_sw_outliers * epsilon**2
82
+ )
83
+
84
+ # Calculate the quadratic loss due to the non-outliers.-
85
+ # This is equal to |(y - X'w - c)**2 / sigma**2| * sigma
86
+ non_outliers = linear_loss[~outliers_mask]
87
+ weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers
88
+ weighted_loss = np.dot(weighted_non_outliers.T, non_outliers)
89
+ squared_loss = weighted_loss / sigma
90
+
91
+ if fit_intercept:
92
+ grad = np.zeros(n_features + 2)
93
+ else:
94
+ grad = np.zeros(n_features + 1)
95
+
96
+ # Gradient due to the squared loss.
97
+ X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers)
98
+ grad[:n_features] = (
99
+ 2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers)
100
+ )
101
+
102
+ # Gradient due to the linear loss.
103
+ signed_outliers = np.ones_like(outliers)
104
+ signed_outliers_mask = linear_loss[outliers_mask] < 0
105
+ signed_outliers[signed_outliers_mask] = -1.0
106
+ X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers)
107
+ sw_outliers = sample_weight[outliers_mask] * signed_outliers
108
+ grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers))
109
+
110
+ # Gradient due to the penalty.
111
+ grad[:n_features] += alpha * 2.0 * w
112
+
113
+ # Gradient due to sigma.
114
+ grad[-1] = n_samples
115
+ grad[-1] -= n_sw_outliers * epsilon**2
116
+ grad[-1] -= squared_loss / sigma
117
+
118
+ # Gradient due to the intercept.
119
+ if fit_intercept:
120
+ grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma
121
+ grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers)
122
+
123
+ loss = n_samples * sigma + squared_loss + outlier_loss
124
+ loss += alpha * np.dot(w, w)
125
+ return loss, grad
126
+
127
+
128
+ class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator):
129
+ """L2-regularized linear regression model that is robust to outliers.
130
+
131
+ The Huber Regressor optimizes the squared loss for the samples where
132
+ ``|(y - Xw - c) / sigma| < epsilon`` and the absolute loss for the samples
133
+ where ``|(y - Xw - c) / sigma| > epsilon``, where the model coefficients
134
+ ``w``, the intercept ``c`` and the scale ``sigma`` are parameters
135
+ to be optimized. The parameter sigma makes sure that if y is scaled up
136
+ or down by a certain factor, one does not need to rescale epsilon to
137
+ achieve the same robustness. Note that this does not take into account
138
+ the fact that the different features of X may be of different scales.
139
+
140
+ The Huber loss function has the advantage of not being heavily influenced
141
+ by the outliers while not completely ignoring their effect.
142
+
143
+ Read more in the :ref:`User Guide <huber_regression>`
144
+
145
+ .. versionadded:: 0.18
146
+
147
+ Parameters
148
+ ----------
149
+ epsilon : float, default=1.35
150
+ The parameter epsilon controls the number of samples that should be
151
+ classified as outliers. The smaller the epsilon, the more robust it is
152
+ to outliers. Epsilon must be in the range `[1, inf)`.
153
+
154
+ max_iter : int, default=100
155
+ Maximum number of iterations that
156
+ ``scipy.optimize.minimize(method="L-BFGS-B")`` should run for.
157
+
158
+ alpha : float, default=0.0001
159
+ Strength of the squared L2 regularization. Note that the penalty is
160
+ equal to ``alpha * ||w||^2``.
161
+ Must be in the range `[0, inf)`.
162
+
163
+ warm_start : bool, default=False
164
+ This is useful if the stored attributes of a previously used model
165
+ has to be reused. If set to False, then the coefficients will
166
+ be rewritten for every call to fit.
167
+ See :term:`the Glossary <warm_start>`.
168
+
169
+ fit_intercept : bool, default=True
170
+ Whether or not to fit the intercept. This can be set to False
171
+ if the data is already centered around the origin.
172
+
173
+ tol : float, default=1e-05
174
+ The iteration will stop when
175
+ ``max{|proj g_i | i = 1, ..., n}`` <= ``tol``
176
+ where pg_i is the i-th component of the projected gradient.
177
+
178
+ Attributes
179
+ ----------
180
+ coef_ : array, shape (n_features,)
181
+ Features got by optimizing the L2-regularized Huber loss.
182
+
183
+ intercept_ : float
184
+ Bias.
185
+
186
+ scale_ : float
187
+ The value by which ``|y - Xw - c|`` is scaled down.
188
+
189
+ n_features_in_ : int
190
+ Number of features seen during :term:`fit`.
191
+
192
+ .. versionadded:: 0.24
193
+
194
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
195
+ Names of features seen during :term:`fit`. Defined only when `X`
196
+ has feature names that are all strings.
197
+
198
+ .. versionadded:: 1.0
199
+
200
+ n_iter_ : int
201
+ Number of iterations that
202
+ ``scipy.optimize.minimize(method="L-BFGS-B")`` has run for.
203
+
204
+ .. versionchanged:: 0.20
205
+
206
+ In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
207
+ ``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
208
+
209
+ outliers_ : array, shape (n_samples,)
210
+ A boolean mask which is set to True where the samples are identified
211
+ as outliers.
212
+
213
+ See Also
214
+ --------
215
+ RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
216
+ TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
217
+ SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
218
+
219
+ References
220
+ ----------
221
+ .. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics
222
+ Concomitant scale estimates, pg 172
223
+ .. [2] Art B. Owen (2006), A robust hybrid of lasso and ridge regression.
224
+ https://statweb.stanford.edu/~owen/reports/hhu.pdf
225
+
226
+ Examples
227
+ --------
228
+ >>> import numpy as np
229
+ >>> from sklearn.linear_model import HuberRegressor, LinearRegression
230
+ >>> from sklearn.datasets import make_regression
231
+ >>> rng = np.random.RandomState(0)
232
+ >>> X, y, coef = make_regression(
233
+ ... n_samples=200, n_features=2, noise=4.0, coef=True, random_state=0)
234
+ >>> X[:4] = rng.uniform(10, 20, (4, 2))
235
+ >>> y[:4] = rng.uniform(10, 20, 4)
236
+ >>> huber = HuberRegressor().fit(X, y)
237
+ >>> huber.score(X, y)
238
+ -7.284...
239
+ >>> huber.predict(X[:1,])
240
+ array([806.7200...])
241
+ >>> linear = LinearRegression().fit(X, y)
242
+ >>> print("True coefficients:", coef)
243
+ True coefficients: [20.4923... 34.1698...]
244
+ >>> print("Huber coefficients:", huber.coef_)
245
+ Huber coefficients: [17.7906... 31.0106...]
246
+ >>> print("Linear Regression coefficients:", linear.coef_)
247
+ Linear Regression coefficients: [-1.9221... 7.0226...]
248
+ """
249
+
250
+ _parameter_constraints: dict = {
251
+ "epsilon": [Interval(Real, 1.0, None, closed="left")],
252
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
253
+ "alpha": [Interval(Real, 0, None, closed="left")],
254
+ "warm_start": ["boolean"],
255
+ "fit_intercept": ["boolean"],
256
+ "tol": [Interval(Real, 0.0, None, closed="left")],
257
+ }
258
+
259
+ def __init__(
260
+ self,
261
+ *,
262
+ epsilon=1.35,
263
+ max_iter=100,
264
+ alpha=0.0001,
265
+ warm_start=False,
266
+ fit_intercept=True,
267
+ tol=1e-05,
268
+ ):
269
+ self.epsilon = epsilon
270
+ self.max_iter = max_iter
271
+ self.alpha = alpha
272
+ self.warm_start = warm_start
273
+ self.fit_intercept = fit_intercept
274
+ self.tol = tol
275
+
276
+ @_fit_context(prefer_skip_nested_validation=True)
277
+ def fit(self, X, y, sample_weight=None):
278
+ """Fit the model according to the given training data.
279
+
280
+ Parameters
281
+ ----------
282
+ X : array-like, shape (n_samples, n_features)
283
+ Training vector, where `n_samples` is the number of samples and
284
+ `n_features` is the number of features.
285
+
286
+ y : array-like, shape (n_samples,)
287
+ Target vector relative to X.
288
+
289
+ sample_weight : array-like, shape (n_samples,)
290
+ Weight given to each sample.
291
+
292
+ Returns
293
+ -------
294
+ self : object
295
+ Fitted `HuberRegressor` estimator.
296
+ """
297
+ X, y = self._validate_data(
298
+ X,
299
+ y,
300
+ copy=False,
301
+ accept_sparse=["csr"],
302
+ y_numeric=True,
303
+ dtype=[np.float64, np.float32],
304
+ )
305
+
306
+ sample_weight = _check_sample_weight(sample_weight, X)
307
+
308
+ if self.warm_start and hasattr(self, "coef_"):
309
+ parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_]))
310
+ else:
311
+ if self.fit_intercept:
312
+ parameters = np.zeros(X.shape[1] + 2)
313
+ else:
314
+ parameters = np.zeros(X.shape[1] + 1)
315
+ # Make sure to initialize the scale parameter to a strictly
316
+ # positive value:
317
+ parameters[-1] = 1
318
+
319
+ # Sigma or the scale factor should be non-negative.
320
+ # Setting it to be zero might cause undefined bounds hence we set it
321
+ # to a value close to zero.
322
+ bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1))
323
+ bounds[-1][0] = np.finfo(np.float64).eps * 10
324
+
325
+ opt_res = optimize.minimize(
326
+ _huber_loss_and_gradient,
327
+ parameters,
328
+ method="L-BFGS-B",
329
+ jac=True,
330
+ args=(X, y, self.epsilon, self.alpha, sample_weight),
331
+ options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1},
332
+ bounds=bounds,
333
+ )
334
+
335
+ parameters = opt_res.x
336
+
337
+ if opt_res.status == 2:
338
+ raise ValueError(
339
+ "HuberRegressor convergence failed: l-BFGS-b solver terminated with %s"
340
+ % opt_res.message
341
+ )
342
+ self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
343
+ self.scale_ = parameters[-1]
344
+ if self.fit_intercept:
345
+ self.intercept_ = parameters[-2]
346
+ else:
347
+ self.intercept_ = 0.0
348
+ self.coef_ = parameters[: X.shape[1]]
349
+
350
+ residual = np.abs(y - safe_sparse_dot(X, self.coef_) - self.intercept_)
351
+ self.outliers_ = residual > self.scale_ * self.epsilon
352
+ return self
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py ADDED
@@ -0,0 +1,2190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Logistic Regression
3
+ """
4
+
5
+ # Author: Gael Varoquaux <[email protected]>
6
+ # Fabian Pedregosa <[email protected]>
7
+ # Alexandre Gramfort <[email protected]>
8
+ # Manoj Kumar <[email protected]>
9
+ # Lars Buitinck
10
+ # Simon Wu <[email protected]>
11
+ # Arthur Mensch <[email protected]
12
+
13
+ import numbers
14
+ import warnings
15
+ from numbers import Integral, Real
16
+
17
+ import numpy as np
18
+ from joblib import effective_n_jobs
19
+ from scipy import optimize
20
+
21
+ from sklearn.metrics import get_scorer_names
22
+
23
+ from .._loss.loss import HalfBinomialLoss, HalfMultinomialLoss
24
+ from ..base import _fit_context
25
+ from ..metrics import get_scorer
26
+ from ..model_selection import check_cv
27
+ from ..preprocessing import LabelBinarizer, LabelEncoder
28
+ from ..svm._base import _fit_liblinear
29
+ from ..utils import (
30
+ Bunch,
31
+ check_array,
32
+ check_consistent_length,
33
+ check_random_state,
34
+ compute_class_weight,
35
+ )
36
+ from ..utils._param_validation import Interval, StrOptions
37
+ from ..utils.extmath import row_norms, softmax
38
+ from ..utils.metadata_routing import (
39
+ MetadataRouter,
40
+ MethodMapping,
41
+ _raise_for_params,
42
+ _routing_enabled,
43
+ process_routing,
44
+ )
45
+ from ..utils.multiclass import check_classification_targets
46
+ from ..utils.optimize import _check_optimize_result, _newton_cg
47
+ from ..utils.parallel import Parallel, delayed
48
+ from ..utils.validation import (
49
+ _check_method_params,
50
+ _check_sample_weight,
51
+ check_is_fitted,
52
+ )
53
+ from ._base import BaseEstimator, LinearClassifierMixin, SparseCoefMixin
54
+ from ._glm.glm import NewtonCholeskySolver
55
+ from ._linear_loss import LinearModelLoss
56
+ from ._sag import sag_solver
57
+
58
+ _LOGISTIC_SOLVER_CONVERGENCE_MSG = (
59
+ "Please also refer to the documentation for alternative solver options:\n"
60
+ " https://scikit-learn.org/stable/modules/linear_model.html"
61
+ "#logistic-regression"
62
+ )
63
+
64
+
65
+ def _check_solver(solver, penalty, dual):
66
+ if solver not in ["liblinear", "saga"] and penalty not in ("l2", None):
67
+ raise ValueError(
68
+ f"Solver {solver} supports only 'l2' or None penalties, got {penalty} "
69
+ "penalty."
70
+ )
71
+ if solver != "liblinear" and dual:
72
+ raise ValueError(f"Solver {solver} supports only dual=False, got dual={dual}")
73
+
74
+ if penalty == "elasticnet" and solver != "saga":
75
+ raise ValueError(
76
+ f"Only 'saga' solver supports elasticnet penalty, got solver={solver}."
77
+ )
78
+
79
+ if solver == "liblinear" and penalty is None:
80
+ raise ValueError("penalty=None is not supported for the liblinear solver")
81
+
82
+ return solver
83
+
84
+
85
+ def _check_multi_class(multi_class, solver, n_classes):
86
+ """Computes the multi class type, either "multinomial" or "ovr".
87
+
88
+ For `n_classes` > 2 and a solver that supports it, returns "multinomial".
89
+ For all other cases, in particular binary classification, return "ovr".
90
+ """
91
+ if multi_class == "auto":
92
+ if solver in ("liblinear", "newton-cholesky"):
93
+ multi_class = "ovr"
94
+ elif n_classes > 2:
95
+ multi_class = "multinomial"
96
+ else:
97
+ multi_class = "ovr"
98
+ if multi_class == "multinomial" and solver in ("liblinear", "newton-cholesky"):
99
+ raise ValueError("Solver %s does not support a multinomial backend." % solver)
100
+ return multi_class
101
+
102
+
103
+ def _logistic_regression_path(
104
+ X,
105
+ y,
106
+ pos_class=None,
107
+ Cs=10,
108
+ fit_intercept=True,
109
+ max_iter=100,
110
+ tol=1e-4,
111
+ verbose=0,
112
+ solver="lbfgs",
113
+ coef=None,
114
+ class_weight=None,
115
+ dual=False,
116
+ penalty="l2",
117
+ intercept_scaling=1.0,
118
+ multi_class="auto",
119
+ random_state=None,
120
+ check_input=True,
121
+ max_squared_sum=None,
122
+ sample_weight=None,
123
+ l1_ratio=None,
124
+ n_threads=1,
125
+ ):
126
+ """Compute a Logistic Regression model for a list of regularization
127
+ parameters.
128
+
129
+ This is an implementation that uses the result of the previous model
130
+ to speed up computations along the set of solutions, making it faster
131
+ than sequentially calling LogisticRegression for the different parameters.
132
+ Note that there will be no speedup with liblinear solver, since it does
133
+ not handle warm-starting.
134
+
135
+ Read more in the :ref:`User Guide <logistic_regression>`.
136
+
137
+ Parameters
138
+ ----------
139
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
140
+ Input data.
141
+
142
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
143
+ Input data, target values.
144
+
145
+ pos_class : int, default=None
146
+ The class with respect to which we perform a one-vs-all fit.
147
+ If None, then it is assumed that the given problem is binary.
148
+
149
+ Cs : int or array-like of shape (n_cs,), default=10
150
+ List of values for the regularization parameter or integer specifying
151
+ the number of regularization parameters that should be used. In this
152
+ case, the parameters will be chosen in a logarithmic scale between
153
+ 1e-4 and 1e4.
154
+
155
+ fit_intercept : bool, default=True
156
+ Whether to fit an intercept for the model. In this case the shape of
157
+ the returned array is (n_cs, n_features + 1).
158
+
159
+ max_iter : int, default=100
160
+ Maximum number of iterations for the solver.
161
+
162
+ tol : float, default=1e-4
163
+ Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
164
+ will stop when ``max{|g_i | i = 1, ..., n} <= tol``
165
+ where ``g_i`` is the i-th component of the gradient.
166
+
167
+ verbose : int, default=0
168
+ For the liblinear and lbfgs solvers set verbose to any positive
169
+ number for verbosity.
170
+
171
+ solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
172
+ default='lbfgs'
173
+ Numerical solver to use.
174
+
175
+ coef : array-like of shape (n_features,), default=None
176
+ Initialization value for coefficients of logistic regression.
177
+ Useless for liblinear solver.
178
+
179
+ class_weight : dict or 'balanced', default=None
180
+ Weights associated with classes in the form ``{class_label: weight}``.
181
+ If not given, all classes are supposed to have weight one.
182
+
183
+ The "balanced" mode uses the values of y to automatically adjust
184
+ weights inversely proportional to class frequencies in the input data
185
+ as ``n_samples / (n_classes * np.bincount(y))``.
186
+
187
+ Note that these weights will be multiplied with sample_weight (passed
188
+ through the fit method) if sample_weight is specified.
189
+
190
+ dual : bool, default=False
191
+ Dual or primal formulation. Dual formulation is only implemented for
192
+ l2 penalty with liblinear solver. Prefer dual=False when
193
+ n_samples > n_features.
194
+
195
+ penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
196
+ Used to specify the norm used in the penalization. The 'newton-cg',
197
+ 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
198
+ only supported by the 'saga' solver.
199
+
200
+ intercept_scaling : float, default=1.
201
+ Useful only when the solver 'liblinear' is used
202
+ and self.fit_intercept is set to True. In this case, x becomes
203
+ [x, self.intercept_scaling],
204
+ i.e. a "synthetic" feature with constant value equal to
205
+ intercept_scaling is appended to the instance vector.
206
+ The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
207
+
208
+ Note! the synthetic feature weight is subject to l1/l2 regularization
209
+ as all other features.
210
+ To lessen the effect of regularization on synthetic feature weight
211
+ (and therefore on the intercept) intercept_scaling has to be increased.
212
+
213
+ multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
214
+ If the option chosen is 'ovr', then a binary problem is fit for each
215
+ label. For 'multinomial' the loss minimised is the multinomial loss fit
216
+ across the entire probability distribution, *even when the data is
217
+ binary*. 'multinomial' is unavailable when solver='liblinear'.
218
+ 'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
219
+ and otherwise selects 'multinomial'.
220
+
221
+ .. versionadded:: 0.18
222
+ Stochastic Average Gradient descent solver for 'multinomial' case.
223
+ .. versionchanged:: 0.22
224
+ Default changed from 'ovr' to 'auto' in 0.22.
225
+
226
+ random_state : int, RandomState instance, default=None
227
+ Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
228
+ data. See :term:`Glossary <random_state>` for details.
229
+
230
+ check_input : bool, default=True
231
+ If False, the input arrays X and y will not be checked.
232
+
233
+ max_squared_sum : float, default=None
234
+ Maximum squared sum of X over samples. Used only in SAG solver.
235
+ If None, it will be computed, going through all the samples.
236
+ The value should be precomputed to speed up cross validation.
237
+
238
+ sample_weight : array-like of shape(n_samples,), default=None
239
+ Array of weights that are assigned to individual samples.
240
+ If not provided, then each sample is given unit weight.
241
+
242
+ l1_ratio : float, default=None
243
+ The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
244
+ used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
245
+ to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
246
+ to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
247
+ combination of L1 and L2.
248
+
249
+ n_threads : int, default=1
250
+ Number of OpenMP threads to use.
251
+
252
+ Returns
253
+ -------
254
+ coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
255
+ List of coefficients for the Logistic Regression model. If
256
+ fit_intercept is set to True then the second dimension will be
257
+ n_features + 1, where the last item represents the intercept. For
258
+ ``multiclass='multinomial'``, the shape is (n_classes, n_cs,
259
+ n_features) or (n_classes, n_cs, n_features + 1).
260
+
261
+ Cs : ndarray
262
+ Grid of Cs used for cross-validation.
263
+
264
+ n_iter : array of shape (n_cs,)
265
+ Actual number of iteration for each Cs.
266
+
267
+ Notes
268
+ -----
269
+ You might get slightly different results with the solver liblinear than
270
+ with the others since this uses LIBLINEAR which penalizes the intercept.
271
+
272
+ .. versionchanged:: 0.19
273
+ The "copy" parameter was removed.
274
+ """
275
+ if isinstance(Cs, numbers.Integral):
276
+ Cs = np.logspace(-4, 4, Cs)
277
+
278
+ solver = _check_solver(solver, penalty, dual)
279
+
280
+ # Preprocessing.
281
+ if check_input:
282
+ X = check_array(
283
+ X,
284
+ accept_sparse="csr",
285
+ dtype=np.float64,
286
+ accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
287
+ )
288
+ y = check_array(y, ensure_2d=False, dtype=None)
289
+ check_consistent_length(X, y)
290
+ n_samples, n_features = X.shape
291
+
292
+ classes = np.unique(y)
293
+ random_state = check_random_state(random_state)
294
+
295
+ multi_class = _check_multi_class(multi_class, solver, len(classes))
296
+ if pos_class is None and multi_class != "multinomial":
297
+ if classes.size > 2:
298
+ raise ValueError("To fit OvR, use the pos_class argument")
299
+ # np.unique(y) gives labels in sorted order.
300
+ pos_class = classes[1]
301
+
302
+ if sample_weight is not None or class_weight is not None:
303
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True)
304
+
305
+ # If class_weights is a dict (provided by the user), the weights
306
+ # are assigned to the original labels. If it is "balanced", then
307
+ # the class_weights are assigned after masking the labels with a OvR.
308
+ le = LabelEncoder()
309
+ if isinstance(class_weight, dict) or (
310
+ multi_class == "multinomial" and class_weight is not None
311
+ ):
312
+ class_weight_ = compute_class_weight(class_weight, classes=classes, y=y)
313
+ sample_weight *= class_weight_[le.fit_transform(y)]
314
+
315
+ # For doing a ovr, we need to mask the labels first. For the
316
+ # multinomial case this is not necessary.
317
+ if multi_class == "ovr":
318
+ w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
319
+ mask = y == pos_class
320
+ y_bin = np.ones(y.shape, dtype=X.dtype)
321
+ if solver in ["lbfgs", "newton-cg", "newton-cholesky"]:
322
+ # HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead
323
+ # of in [-1, 1].
324
+ mask_classes = np.array([0, 1])
325
+ y_bin[~mask] = 0.0
326
+ else:
327
+ mask_classes = np.array([-1, 1])
328
+ y_bin[~mask] = -1.0
329
+
330
+ # for compute_class_weight
331
+ if class_weight == "balanced":
332
+ class_weight_ = compute_class_weight(
333
+ class_weight, classes=mask_classes, y=y_bin
334
+ )
335
+ sample_weight *= class_weight_[le.fit_transform(y_bin)]
336
+
337
+ else:
338
+ if solver in ["sag", "saga", "lbfgs", "newton-cg"]:
339
+ # SAG, lbfgs and newton-cg multinomial solvers need LabelEncoder,
340
+ # not LabelBinarizer, i.e. y as a 1d-array of integers.
341
+ # LabelEncoder also saves memory compared to LabelBinarizer, especially
342
+ # when n_classes is large.
343
+ le = LabelEncoder()
344
+ Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
345
+ else:
346
+ # For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded.
347
+ lbin = LabelBinarizer()
348
+ Y_multi = lbin.fit_transform(y)
349
+ if Y_multi.shape[1] == 1:
350
+ Y_multi = np.hstack([1 - Y_multi, Y_multi])
351
+
352
+ w0 = np.zeros(
353
+ (classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype
354
+ )
355
+
356
+ # IMPORTANT NOTE:
357
+ # All solvers relying on LinearModelLoss need to scale the penalty with n_samples
358
+ # or the sum of sample weights because the implemented logistic regression
359
+ # objective here is (unfortunately)
360
+ # C * sum(pointwise_loss) + penalty
361
+ # instead of (as LinearModelLoss does)
362
+ # mean(pointwise_loss) + 1/C * penalty
363
+ if solver in ["lbfgs", "newton-cg", "newton-cholesky"]:
364
+ # This needs to be calculated after sample_weight is multiplied by
365
+ # class_weight. It is even tested that passing class_weight is equivalent to
366
+ # passing sample_weights according to class_weight.
367
+ sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
368
+
369
+ if coef is not None:
370
+ # it must work both giving the bias term and not
371
+ if multi_class == "ovr":
372
+ if coef.size not in (n_features, w0.size):
373
+ raise ValueError(
374
+ "Initialization coef is of shape %d, expected shape %d or %d"
375
+ % (coef.size, n_features, w0.size)
376
+ )
377
+ w0[: coef.size] = coef
378
+ else:
379
+ # For binary problems coef.shape[0] should be 1, otherwise it
380
+ # should be classes.size.
381
+ n_classes = classes.size
382
+ if n_classes == 2:
383
+ n_classes = 1
384
+
385
+ if coef.shape[0] != n_classes or coef.shape[1] not in (
386
+ n_features,
387
+ n_features + 1,
388
+ ):
389
+ raise ValueError(
390
+ "Initialization coef is of shape (%d, %d), expected "
391
+ "shape (%d, %d) or (%d, %d)"
392
+ % (
393
+ coef.shape[0],
394
+ coef.shape[1],
395
+ classes.size,
396
+ n_features,
397
+ classes.size,
398
+ n_features + 1,
399
+ )
400
+ )
401
+
402
+ if n_classes == 1:
403
+ w0[0, : coef.shape[1]] = -coef
404
+ w0[1, : coef.shape[1]] = coef
405
+ else:
406
+ w0[:, : coef.shape[1]] = coef
407
+
408
+ if multi_class == "multinomial":
409
+ if solver in ["lbfgs", "newton-cg"]:
410
+ # scipy.optimize.minimize and newton-cg accept only ravelled parameters,
411
+ # i.e. 1d-arrays. LinearModelLoss expects classes to be contiguous and
412
+ # reconstructs the 2d-array via w0.reshape((n_classes, -1), order="F").
413
+ # As w0 is F-contiguous, ravel(order="F") also avoids a copy.
414
+ w0 = w0.ravel(order="F")
415
+ loss = LinearModelLoss(
416
+ base_loss=HalfMultinomialLoss(n_classes=classes.size),
417
+ fit_intercept=fit_intercept,
418
+ )
419
+ target = Y_multi
420
+ if solver == "lbfgs":
421
+ func = loss.loss_gradient
422
+ elif solver == "newton-cg":
423
+ func = loss.loss
424
+ grad = loss.gradient
425
+ hess = loss.gradient_hessian_product # hess = [gradient, hessp]
426
+ warm_start_sag = {"coef": w0.T}
427
+ else:
428
+ target = y_bin
429
+ if solver == "lbfgs":
430
+ loss = LinearModelLoss(
431
+ base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
432
+ )
433
+ func = loss.loss_gradient
434
+ elif solver == "newton-cg":
435
+ loss = LinearModelLoss(
436
+ base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
437
+ )
438
+ func = loss.loss
439
+ grad = loss.gradient
440
+ hess = loss.gradient_hessian_product # hess = [gradient, hessp]
441
+ elif solver == "newton-cholesky":
442
+ loss = LinearModelLoss(
443
+ base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
444
+ )
445
+ warm_start_sag = {"coef": np.expand_dims(w0, axis=1)}
446
+
447
+ coefs = list()
448
+ n_iter = np.zeros(len(Cs), dtype=np.int32)
449
+ for i, C in enumerate(Cs):
450
+ if solver == "lbfgs":
451
+ l2_reg_strength = 1.0 / (C * sw_sum)
452
+ iprint = [-1, 50, 1, 100, 101][
453
+ np.searchsorted(np.array([0, 1, 2, 3]), verbose)
454
+ ]
455
+ opt_res = optimize.minimize(
456
+ func,
457
+ w0,
458
+ method="L-BFGS-B",
459
+ jac=True,
460
+ args=(X, target, sample_weight, l2_reg_strength, n_threads),
461
+ options={
462
+ "maxiter": max_iter,
463
+ "maxls": 50, # default is 20
464
+ "iprint": iprint,
465
+ "gtol": tol,
466
+ "ftol": 64 * np.finfo(float).eps,
467
+ },
468
+ )
469
+ n_iter_i = _check_optimize_result(
470
+ solver,
471
+ opt_res,
472
+ max_iter,
473
+ extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,
474
+ )
475
+ w0, loss = opt_res.x, opt_res.fun
476
+ elif solver == "newton-cg":
477
+ l2_reg_strength = 1.0 / (C * sw_sum)
478
+ args = (X, target, sample_weight, l2_reg_strength, n_threads)
479
+ w0, n_iter_i = _newton_cg(
480
+ hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol
481
+ )
482
+ elif solver == "newton-cholesky":
483
+ l2_reg_strength = 1.0 / (C * sw_sum)
484
+ sol = NewtonCholeskySolver(
485
+ coef=w0,
486
+ linear_loss=loss,
487
+ l2_reg_strength=l2_reg_strength,
488
+ tol=tol,
489
+ max_iter=max_iter,
490
+ n_threads=n_threads,
491
+ verbose=verbose,
492
+ )
493
+ w0 = sol.solve(X=X, y=target, sample_weight=sample_weight)
494
+ n_iter_i = sol.iteration
495
+ elif solver == "liblinear":
496
+ (
497
+ coef_,
498
+ intercept_,
499
+ n_iter_i,
500
+ ) = _fit_liblinear(
501
+ X,
502
+ target,
503
+ C,
504
+ fit_intercept,
505
+ intercept_scaling,
506
+ None,
507
+ penalty,
508
+ dual,
509
+ verbose,
510
+ max_iter,
511
+ tol,
512
+ random_state,
513
+ sample_weight=sample_weight,
514
+ )
515
+ if fit_intercept:
516
+ w0 = np.concatenate([coef_.ravel(), intercept_])
517
+ else:
518
+ w0 = coef_.ravel()
519
+ # n_iter_i is an array for each class. However, `target` is always encoded
520
+ # in {-1, 1}, so we only take the first element of n_iter_i.
521
+ n_iter_i = n_iter_i.item()
522
+
523
+ elif solver in ["sag", "saga"]:
524
+ if multi_class == "multinomial":
525
+ target = target.astype(X.dtype, copy=False)
526
+ loss = "multinomial"
527
+ else:
528
+ loss = "log"
529
+ # alpha is for L2-norm, beta is for L1-norm
530
+ if penalty == "l1":
531
+ alpha = 0.0
532
+ beta = 1.0 / C
533
+ elif penalty == "l2":
534
+ alpha = 1.0 / C
535
+ beta = 0.0
536
+ else: # Elastic-Net penalty
537
+ alpha = (1.0 / C) * (1 - l1_ratio)
538
+ beta = (1.0 / C) * l1_ratio
539
+
540
+ w0, n_iter_i, warm_start_sag = sag_solver(
541
+ X,
542
+ target,
543
+ sample_weight,
544
+ loss,
545
+ alpha,
546
+ beta,
547
+ max_iter,
548
+ tol,
549
+ verbose,
550
+ random_state,
551
+ False,
552
+ max_squared_sum,
553
+ warm_start_sag,
554
+ is_saga=(solver == "saga"),
555
+ )
556
+
557
+ else:
558
+ raise ValueError(
559
+ "solver must be one of {'liblinear', 'lbfgs', "
560
+ "'newton-cg', 'sag'}, got '%s' instead" % solver
561
+ )
562
+
563
+ if multi_class == "multinomial":
564
+ n_classes = max(2, classes.size)
565
+ if solver in ["lbfgs", "newton-cg"]:
566
+ multi_w0 = np.reshape(w0, (n_classes, -1), order="F")
567
+ else:
568
+ multi_w0 = w0
569
+ if n_classes == 2:
570
+ multi_w0 = multi_w0[1][np.newaxis, :]
571
+ coefs.append(multi_w0.copy())
572
+ else:
573
+ coefs.append(w0.copy())
574
+
575
+ n_iter[i] = n_iter_i
576
+
577
+ return np.array(coefs), np.array(Cs), n_iter
578
+
579
+
580
+ # helper function for LogisticCV
581
+ def _log_reg_scoring_path(
582
+ X,
583
+ y,
584
+ train,
585
+ test,
586
+ *,
587
+ pos_class,
588
+ Cs,
589
+ scoring,
590
+ fit_intercept,
591
+ max_iter,
592
+ tol,
593
+ class_weight,
594
+ verbose,
595
+ solver,
596
+ penalty,
597
+ dual,
598
+ intercept_scaling,
599
+ multi_class,
600
+ random_state,
601
+ max_squared_sum,
602
+ sample_weight,
603
+ l1_ratio,
604
+ score_params,
605
+ ):
606
+ """Computes scores across logistic_regression_path
607
+
608
+ Parameters
609
+ ----------
610
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
611
+ Training data.
612
+
613
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
614
+ Target labels.
615
+
616
+ train : list of indices
617
+ The indices of the train set.
618
+
619
+ test : list of indices
620
+ The indices of the test set.
621
+
622
+ pos_class : int
623
+ The class with respect to which we perform a one-vs-all fit.
624
+ If None, then it is assumed that the given problem is binary.
625
+
626
+ Cs : int or list of floats
627
+ Each of the values in Cs describes the inverse of
628
+ regularization strength. If Cs is as an int, then a grid of Cs
629
+ values are chosen in a logarithmic scale between 1e-4 and 1e4.
630
+
631
+ scoring : callable
632
+ A string (see model evaluation documentation) or
633
+ a scorer callable object / function with signature
634
+ ``scorer(estimator, X, y)``. For a list of scoring functions
635
+ that can be used, look at :mod:`sklearn.metrics`.
636
+
637
+ fit_intercept : bool
638
+ If False, then the bias term is set to zero. Else the last
639
+ term of each coef_ gives us the intercept.
640
+
641
+ max_iter : int
642
+ Maximum number of iterations for the solver.
643
+
644
+ tol : float
645
+ Tolerance for stopping criteria.
646
+
647
+ class_weight : dict or 'balanced'
648
+ Weights associated with classes in the form ``{class_label: weight}``.
649
+ If not given, all classes are supposed to have weight one.
650
+
651
+ The "balanced" mode uses the values of y to automatically adjust
652
+ weights inversely proportional to class frequencies in the input data
653
+ as ``n_samples / (n_classes * np.bincount(y))``
654
+
655
+ Note that these weights will be multiplied with sample_weight (passed
656
+ through the fit method) if sample_weight is specified.
657
+
658
+ verbose : int
659
+ For the liblinear and lbfgs solvers set verbose to any positive
660
+ number for verbosity.
661
+
662
+ solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}
663
+ Decides which solver to use.
664
+
665
+ penalty : {'l1', 'l2', 'elasticnet'}
666
+ Used to specify the norm used in the penalization. The 'newton-cg',
667
+ 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
668
+ only supported by the 'saga' solver.
669
+
670
+ dual : bool
671
+ Dual or primal formulation. Dual formulation is only implemented for
672
+ l2 penalty with liblinear solver. Prefer dual=False when
673
+ n_samples > n_features.
674
+
675
+ intercept_scaling : float
676
+ Useful only when the solver 'liblinear' is used
677
+ and self.fit_intercept is set to True. In this case, x becomes
678
+ [x, self.intercept_scaling],
679
+ i.e. a "synthetic" feature with constant value equals to
680
+ intercept_scaling is appended to the instance vector.
681
+ The intercept becomes intercept_scaling * synthetic feature weight
682
+ Note! the synthetic feature weight is subject to l1/l2 regularization
683
+ as all other features.
684
+ To lessen the effect of regularization on synthetic feature weight
685
+ (and therefore on the intercept) intercept_scaling has to be increased.
686
+
687
+ multi_class : {'auto', 'ovr', 'multinomial'}
688
+ If the option chosen is 'ovr', then a binary problem is fit for each
689
+ label. For 'multinomial' the loss minimised is the multinomial loss fit
690
+ across the entire probability distribution, *even when the data is
691
+ binary*. 'multinomial' is unavailable when solver='liblinear'.
692
+
693
+ random_state : int, RandomState instance
694
+ Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
695
+ data. See :term:`Glossary <random_state>` for details.
696
+
697
+ max_squared_sum : float
698
+ Maximum squared sum of X over samples. Used only in SAG solver.
699
+ If None, it will be computed, going through all the samples.
700
+ The value should be precomputed to speed up cross validation.
701
+
702
+ sample_weight : array-like of shape(n_samples,)
703
+ Array of weights that are assigned to individual samples.
704
+ If not provided, then each sample is given unit weight.
705
+
706
+ l1_ratio : float
707
+ The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
708
+ used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
709
+ to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
710
+ to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
711
+ combination of L1 and L2.
712
+
713
+ score_params : dict
714
+ Parameters to pass to the `score` method of the underlying scorer.
715
+
716
+ Returns
717
+ -------
718
+ coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
719
+ List of coefficients for the Logistic Regression model. If
720
+ fit_intercept is set to True then the second dimension will be
721
+ n_features + 1, where the last item represents the intercept.
722
+
723
+ Cs : ndarray
724
+ Grid of Cs used for cross-validation.
725
+
726
+ scores : ndarray of shape (n_cs,)
727
+ Scores obtained for each Cs.
728
+
729
+ n_iter : ndarray of shape(n_cs,)
730
+ Actual number of iteration for each Cs.
731
+ """
732
+ X_train = X[train]
733
+ X_test = X[test]
734
+ y_train = y[train]
735
+ y_test = y[test]
736
+
737
+ if sample_weight is not None:
738
+ sample_weight = _check_sample_weight(sample_weight, X)
739
+ sample_weight = sample_weight[train]
740
+
741
+ coefs, Cs, n_iter = _logistic_regression_path(
742
+ X_train,
743
+ y_train,
744
+ Cs=Cs,
745
+ l1_ratio=l1_ratio,
746
+ fit_intercept=fit_intercept,
747
+ solver=solver,
748
+ max_iter=max_iter,
749
+ class_weight=class_weight,
750
+ pos_class=pos_class,
751
+ multi_class=multi_class,
752
+ tol=tol,
753
+ verbose=verbose,
754
+ dual=dual,
755
+ penalty=penalty,
756
+ intercept_scaling=intercept_scaling,
757
+ random_state=random_state,
758
+ check_input=False,
759
+ max_squared_sum=max_squared_sum,
760
+ sample_weight=sample_weight,
761
+ )
762
+
763
+ log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
764
+
765
+ # The score method of Logistic Regression has a classes_ attribute.
766
+ if multi_class == "ovr":
767
+ log_reg.classes_ = np.array([-1, 1])
768
+ elif multi_class == "multinomial":
769
+ log_reg.classes_ = np.unique(y_train)
770
+ else:
771
+ raise ValueError(
772
+ "multi_class should be either multinomial or ovr, got %d" % multi_class
773
+ )
774
+
775
+ if pos_class is not None:
776
+ mask = y_test == pos_class
777
+ y_test = np.ones(y_test.shape, dtype=np.float64)
778
+ y_test[~mask] = -1.0
779
+
780
+ scores = list()
781
+
782
+ scoring = get_scorer(scoring)
783
+ for w in coefs:
784
+ if multi_class == "ovr":
785
+ w = w[np.newaxis, :]
786
+ if fit_intercept:
787
+ log_reg.coef_ = w[:, :-1]
788
+ log_reg.intercept_ = w[:, -1]
789
+ else:
790
+ log_reg.coef_ = w
791
+ log_reg.intercept_ = 0.0
792
+
793
+ if scoring is None:
794
+ scores.append(log_reg.score(X_test, y_test))
795
+ else:
796
+ score_params = score_params or {}
797
+ score_params = _check_method_params(X=X, params=score_params, indices=test)
798
+ scores.append(scoring(log_reg, X_test, y_test, **score_params))
799
+
800
+ return coefs, Cs, np.array(scores), n_iter
801
+
802
+
803
+ class LogisticRegression(LinearClassifierMixin, SparseCoefMixin, BaseEstimator):
804
+ """
805
+ Logistic Regression (aka logit, MaxEnt) classifier.
806
+
807
+ In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
808
+ scheme if the 'multi_class' option is set to 'ovr', and uses the
809
+ cross-entropy loss if the 'multi_class' option is set to 'multinomial'.
810
+ (Currently the 'multinomial' option is supported only by the 'lbfgs',
811
+ 'sag', 'saga' and 'newton-cg' solvers.)
812
+
813
+ This class implements regularized logistic regression using the
814
+ 'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
815
+ that regularization is applied by default**. It can handle both dense
816
+ and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
817
+ floats for optimal performance; any other input format will be converted
818
+ (and copied).
819
+
820
+ The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
821
+ with primal formulation, or no regularization. The 'liblinear' solver
822
+ supports both L1 and L2 regularization, with a dual formulation only for
823
+ the L2 penalty. The Elastic-Net regularization is only supported by the
824
+ 'saga' solver.
825
+
826
+ Read more in the :ref:`User Guide <logistic_regression>`.
827
+
828
+ Parameters
829
+ ----------
830
+ penalty : {'l1', 'l2', 'elasticnet', None}, default='l2'
831
+ Specify the norm of the penalty:
832
+
833
+ - `None`: no penalty is added;
834
+ - `'l2'`: add a L2 penalty term and it is the default choice;
835
+ - `'l1'`: add a L1 penalty term;
836
+ - `'elasticnet'`: both L1 and L2 penalty terms are added.
837
+
838
+ .. warning::
839
+ Some penalties may not work with some solvers. See the parameter
840
+ `solver` below, to know the compatibility between the penalty and
841
+ solver.
842
+
843
+ .. versionadded:: 0.19
844
+ l1 penalty with SAGA solver (allowing 'multinomial' + L1)
845
+
846
+ dual : bool, default=False
847
+ Dual (constrained) or primal (regularized, see also
848
+ :ref:`this equation <regularized-logistic-loss>`) formulation. Dual formulation
849
+ is only implemented for l2 penalty with liblinear solver. Prefer dual=False when
850
+ n_samples > n_features.
851
+
852
+ tol : float, default=1e-4
853
+ Tolerance for stopping criteria.
854
+
855
+ C : float, default=1.0
856
+ Inverse of regularization strength; must be a positive float.
857
+ Like in support vector machines, smaller values specify stronger
858
+ regularization.
859
+
860
+ fit_intercept : bool, default=True
861
+ Specifies if a constant (a.k.a. bias or intercept) should be
862
+ added to the decision function.
863
+
864
+ intercept_scaling : float, default=1
865
+ Useful only when the solver 'liblinear' is used
866
+ and self.fit_intercept is set to True. In this case, x becomes
867
+ [x, self.intercept_scaling],
868
+ i.e. a "synthetic" feature with constant value equal to
869
+ intercept_scaling is appended to the instance vector.
870
+ The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
871
+
872
+ Note! the synthetic feature weight is subject to l1/l2 regularization
873
+ as all other features.
874
+ To lessen the effect of regularization on synthetic feature weight
875
+ (and therefore on the intercept) intercept_scaling has to be increased.
876
+
877
+ class_weight : dict or 'balanced', default=None
878
+ Weights associated with classes in the form ``{class_label: weight}``.
879
+ If not given, all classes are supposed to have weight one.
880
+
881
+ The "balanced" mode uses the values of y to automatically adjust
882
+ weights inversely proportional to class frequencies in the input data
883
+ as ``n_samples / (n_classes * np.bincount(y))``.
884
+
885
+ Note that these weights will be multiplied with sample_weight (passed
886
+ through the fit method) if sample_weight is specified.
887
+
888
+ .. versionadded:: 0.17
889
+ *class_weight='balanced'*
890
+
891
+ random_state : int, RandomState instance, default=None
892
+ Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
893
+ data. See :term:`Glossary <random_state>` for details.
894
+
895
+ solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
896
+ default='lbfgs'
897
+
898
+ Algorithm to use in the optimization problem. Default is 'lbfgs'.
899
+ To choose a solver, you might want to consider the following aspects:
900
+
901
+ - For small datasets, 'liblinear' is a good choice, whereas 'sag'
902
+ and 'saga' are faster for large ones;
903
+ - For multiclass problems, only 'newton-cg', 'sag', 'saga' and
904
+ 'lbfgs' handle multinomial loss;
905
+ - 'liblinear' is limited to one-versus-rest schemes.
906
+ - 'newton-cholesky' is a good choice for `n_samples` >> `n_features`,
907
+ especially with one-hot encoded categorical features with rare
908
+ categories. Note that it is limited to binary classification and the
909
+ one-versus-rest reduction for multiclass classification. Be aware that
910
+ the memory usage of this solver has a quadratic dependency on
911
+ `n_features` because it explicitly computes the Hessian matrix.
912
+
913
+ .. warning::
914
+ The choice of the algorithm depends on the penalty chosen.
915
+ Supported penalties by solver:
916
+
917
+ - 'lbfgs' - ['l2', None]
918
+ - 'liblinear' - ['l1', 'l2']
919
+ - 'newton-cg' - ['l2', None]
920
+ - 'newton-cholesky' - ['l2', None]
921
+ - 'sag' - ['l2', None]
922
+ - 'saga' - ['elasticnet', 'l1', 'l2', None]
923
+
924
+ .. note::
925
+ 'sag' and 'saga' fast convergence is only guaranteed on features
926
+ with approximately the same scale. You can preprocess the data with
927
+ a scaler from :mod:`sklearn.preprocessing`.
928
+
929
+ .. seealso::
930
+ Refer to the User Guide for more information regarding
931
+ :class:`LogisticRegression` and more specifically the
932
+ :ref:`Table <Logistic_regression>`
933
+ summarizing solver/penalty supports.
934
+
935
+ .. versionadded:: 0.17
936
+ Stochastic Average Gradient descent solver.
937
+ .. versionadded:: 0.19
938
+ SAGA solver.
939
+ .. versionchanged:: 0.22
940
+ The default solver changed from 'liblinear' to 'lbfgs' in 0.22.
941
+ .. versionadded:: 1.2
942
+ newton-cholesky solver.
943
+
944
+ max_iter : int, default=100
945
+ Maximum number of iterations taken for the solvers to converge.
946
+
947
+ multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
948
+ If the option chosen is 'ovr', then a binary problem is fit for each
949
+ label. For 'multinomial' the loss minimised is the multinomial loss fit
950
+ across the entire probability distribution, *even when the data is
951
+ binary*. 'multinomial' is unavailable when solver='liblinear'.
952
+ 'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
953
+ and otherwise selects 'multinomial'.
954
+
955
+ .. versionadded:: 0.18
956
+ Stochastic Average Gradient descent solver for 'multinomial' case.
957
+ .. versionchanged:: 0.22
958
+ Default changed from 'ovr' to 'auto' in 0.22.
959
+
960
+ verbose : int, default=0
961
+ For the liblinear and lbfgs solvers set verbose to any positive
962
+ number for verbosity.
963
+
964
+ warm_start : bool, default=False
965
+ When set to True, reuse the solution of the previous call to fit as
966
+ initialization, otherwise, just erase the previous solution.
967
+ Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
968
+
969
+ .. versionadded:: 0.17
970
+ *warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
971
+
972
+ n_jobs : int, default=None
973
+ Number of CPU cores used when parallelizing over classes if
974
+ multi_class='ovr'". This parameter is ignored when the ``solver`` is
975
+ set to 'liblinear' regardless of whether 'multi_class' is specified or
976
+ not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
977
+ context. ``-1`` means using all processors.
978
+ See :term:`Glossary <n_jobs>` for more details.
979
+
980
+ l1_ratio : float, default=None
981
+ The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
982
+ used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
983
+ to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
984
+ to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
985
+ combination of L1 and L2.
986
+
987
+ Attributes
988
+ ----------
989
+
990
+ classes_ : ndarray of shape (n_classes, )
991
+ A list of class labels known to the classifier.
992
+
993
+ coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
994
+ Coefficient of the features in the decision function.
995
+
996
+ `coef_` is of shape (1, n_features) when the given problem is binary.
997
+ In particular, when `multi_class='multinomial'`, `coef_` corresponds
998
+ to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
999
+
1000
+ intercept_ : ndarray of shape (1,) or (n_classes,)
1001
+ Intercept (a.k.a. bias) added to the decision function.
1002
+
1003
+ If `fit_intercept` is set to False, the intercept is set to zero.
1004
+ `intercept_` is of shape (1,) when the given problem is binary.
1005
+ In particular, when `multi_class='multinomial'`, `intercept_`
1006
+ corresponds to outcome 1 (True) and `-intercept_` corresponds to
1007
+ outcome 0 (False).
1008
+
1009
+ n_features_in_ : int
1010
+ Number of features seen during :term:`fit`.
1011
+
1012
+ .. versionadded:: 0.24
1013
+
1014
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1015
+ Names of features seen during :term:`fit`. Defined only when `X`
1016
+ has feature names that are all strings.
1017
+
1018
+ .. versionadded:: 1.0
1019
+
1020
+ n_iter_ : ndarray of shape (n_classes,) or (1, )
1021
+ Actual number of iterations for all classes. If binary or multinomial,
1022
+ it returns only 1 element. For liblinear solver, only the maximum
1023
+ number of iteration across all classes is given.
1024
+
1025
+ .. versionchanged:: 0.20
1026
+
1027
+ In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
1028
+ ``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
1029
+
1030
+ See Also
1031
+ --------
1032
+ SGDClassifier : Incrementally trained logistic regression (when given
1033
+ the parameter ``loss="log_loss"``).
1034
+ LogisticRegressionCV : Logistic regression with built-in cross validation.
1035
+
1036
+ Notes
1037
+ -----
1038
+ The underlying C implementation uses a random number generator to
1039
+ select features when fitting the model. It is thus not uncommon,
1040
+ to have slightly different results for the same input data. If
1041
+ that happens, try with a smaller tol parameter.
1042
+
1043
+ Predict output may not match that of standalone liblinear in certain
1044
+ cases. See :ref:`differences from liblinear <liblinear_differences>`
1045
+ in the narrative documentation.
1046
+
1047
+ References
1048
+ ----------
1049
+
1050
+ L-BFGS-B -- Software for Large-scale Bound-constrained Optimization
1051
+ Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales.
1052
+ http://users.iems.northwestern.edu/~nocedal/lbfgsb.html
1053
+
1054
+ LIBLINEAR -- A Library for Large Linear Classification
1055
+ https://www.csie.ntu.edu.tw/~cjlin/liblinear/
1056
+
1057
+ SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
1058
+ Minimizing Finite Sums with the Stochastic Average Gradient
1059
+ https://hal.inria.fr/hal-00860051/document
1060
+
1061
+ SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
1062
+ :arxiv:`"SAGA: A Fast Incremental Gradient Method With Support
1063
+ for Non-Strongly Convex Composite Objectives" <1407.0202>`
1064
+
1065
+ Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
1066
+ methods for logistic regression and maximum entropy models.
1067
+ Machine Learning 85(1-2):41-75.
1068
+ https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
1069
+
1070
+ Examples
1071
+ --------
1072
+ >>> from sklearn.datasets import load_iris
1073
+ >>> from sklearn.linear_model import LogisticRegression
1074
+ >>> X, y = load_iris(return_X_y=True)
1075
+ >>> clf = LogisticRegression(random_state=0).fit(X, y)
1076
+ >>> clf.predict(X[:2, :])
1077
+ array([0, 0])
1078
+ >>> clf.predict_proba(X[:2, :])
1079
+ array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
1080
+ [9.7...e-01, 2.8...e-02, ...e-08]])
1081
+ >>> clf.score(X, y)
1082
+ 0.97...
1083
+ """
1084
+
1085
+ _parameter_constraints: dict = {
1086
+ "penalty": [StrOptions({"l1", "l2", "elasticnet"}), None],
1087
+ "dual": ["boolean"],
1088
+ "tol": [Interval(Real, 0, None, closed="left")],
1089
+ "C": [Interval(Real, 0, None, closed="right")],
1090
+ "fit_intercept": ["boolean"],
1091
+ "intercept_scaling": [Interval(Real, 0, None, closed="neither")],
1092
+ "class_weight": [dict, StrOptions({"balanced"}), None],
1093
+ "random_state": ["random_state"],
1094
+ "solver": [
1095
+ StrOptions(
1096
+ {"lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga"}
1097
+ )
1098
+ ],
1099
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
1100
+ "multi_class": [StrOptions({"auto", "ovr", "multinomial"})],
1101
+ "verbose": ["verbose"],
1102
+ "warm_start": ["boolean"],
1103
+ "n_jobs": [None, Integral],
1104
+ "l1_ratio": [Interval(Real, 0, 1, closed="both"), None],
1105
+ }
1106
+
1107
+ def __init__(
1108
+ self,
1109
+ penalty="l2",
1110
+ *,
1111
+ dual=False,
1112
+ tol=1e-4,
1113
+ C=1.0,
1114
+ fit_intercept=True,
1115
+ intercept_scaling=1,
1116
+ class_weight=None,
1117
+ random_state=None,
1118
+ solver="lbfgs",
1119
+ max_iter=100,
1120
+ multi_class="auto",
1121
+ verbose=0,
1122
+ warm_start=False,
1123
+ n_jobs=None,
1124
+ l1_ratio=None,
1125
+ ):
1126
+ self.penalty = penalty
1127
+ self.dual = dual
1128
+ self.tol = tol
1129
+ self.C = C
1130
+ self.fit_intercept = fit_intercept
1131
+ self.intercept_scaling = intercept_scaling
1132
+ self.class_weight = class_weight
1133
+ self.random_state = random_state
1134
+ self.solver = solver
1135
+ self.max_iter = max_iter
1136
+ self.multi_class = multi_class
1137
+ self.verbose = verbose
1138
+ self.warm_start = warm_start
1139
+ self.n_jobs = n_jobs
1140
+ self.l1_ratio = l1_ratio
1141
+
1142
+ @_fit_context(prefer_skip_nested_validation=True)
1143
+ def fit(self, X, y, sample_weight=None):
1144
+ """
1145
+ Fit the model according to the given training data.
1146
+
1147
+ Parameters
1148
+ ----------
1149
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1150
+ Training vector, where `n_samples` is the number of samples and
1151
+ `n_features` is the number of features.
1152
+
1153
+ y : array-like of shape (n_samples,)
1154
+ Target vector relative to X.
1155
+
1156
+ sample_weight : array-like of shape (n_samples,) default=None
1157
+ Array of weights that are assigned to individual samples.
1158
+ If not provided, then each sample is given unit weight.
1159
+
1160
+ .. versionadded:: 0.17
1161
+ *sample_weight* support to LogisticRegression.
1162
+
1163
+ Returns
1164
+ -------
1165
+ self
1166
+ Fitted estimator.
1167
+
1168
+ Notes
1169
+ -----
1170
+ The SAGA solver supports both float64 and float32 bit arrays.
1171
+ """
1172
+ solver = _check_solver(self.solver, self.penalty, self.dual)
1173
+
1174
+ if self.penalty != "elasticnet" and self.l1_ratio is not None:
1175
+ warnings.warn(
1176
+ "l1_ratio parameter is only used when penalty is "
1177
+ "'elasticnet'. Got "
1178
+ "(penalty={})".format(self.penalty)
1179
+ )
1180
+
1181
+ if self.penalty == "elasticnet" and self.l1_ratio is None:
1182
+ raise ValueError("l1_ratio must be specified when penalty is elasticnet.")
1183
+
1184
+ if self.penalty is None:
1185
+ if self.C != 1.0: # default values
1186
+ warnings.warn(
1187
+ "Setting penalty=None will ignore the C and l1_ratio parameters"
1188
+ )
1189
+ # Note that check for l1_ratio is done right above
1190
+ C_ = np.inf
1191
+ penalty = "l2"
1192
+ else:
1193
+ C_ = self.C
1194
+ penalty = self.penalty
1195
+
1196
+ if solver == "lbfgs":
1197
+ _dtype = np.float64
1198
+ else:
1199
+ _dtype = [np.float64, np.float32]
1200
+
1201
+ X, y = self._validate_data(
1202
+ X,
1203
+ y,
1204
+ accept_sparse="csr",
1205
+ dtype=_dtype,
1206
+ order="C",
1207
+ accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
1208
+ )
1209
+ check_classification_targets(y)
1210
+ self.classes_ = np.unique(y)
1211
+
1212
+ multi_class = _check_multi_class(self.multi_class, solver, len(self.classes_))
1213
+
1214
+ if solver == "liblinear":
1215
+ if effective_n_jobs(self.n_jobs) != 1:
1216
+ warnings.warn(
1217
+ "'n_jobs' > 1 does not have any effect when"
1218
+ " 'solver' is set to 'liblinear'. Got 'n_jobs'"
1219
+ " = {}.".format(effective_n_jobs(self.n_jobs))
1220
+ )
1221
+ self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
1222
+ X,
1223
+ y,
1224
+ self.C,
1225
+ self.fit_intercept,
1226
+ self.intercept_scaling,
1227
+ self.class_weight,
1228
+ self.penalty,
1229
+ self.dual,
1230
+ self.verbose,
1231
+ self.max_iter,
1232
+ self.tol,
1233
+ self.random_state,
1234
+ sample_weight=sample_weight,
1235
+ )
1236
+ return self
1237
+
1238
+ if solver in ["sag", "saga"]:
1239
+ max_squared_sum = row_norms(X, squared=True).max()
1240
+ else:
1241
+ max_squared_sum = None
1242
+
1243
+ n_classes = len(self.classes_)
1244
+ classes_ = self.classes_
1245
+ if n_classes < 2:
1246
+ raise ValueError(
1247
+ "This solver needs samples of at least 2 classes"
1248
+ " in the data, but the data contains only one"
1249
+ " class: %r"
1250
+ % classes_[0]
1251
+ )
1252
+
1253
+ if len(self.classes_) == 2:
1254
+ n_classes = 1
1255
+ classes_ = classes_[1:]
1256
+
1257
+ if self.warm_start:
1258
+ warm_start_coef = getattr(self, "coef_", None)
1259
+ else:
1260
+ warm_start_coef = None
1261
+ if warm_start_coef is not None and self.fit_intercept:
1262
+ warm_start_coef = np.append(
1263
+ warm_start_coef, self.intercept_[:, np.newaxis], axis=1
1264
+ )
1265
+
1266
+ # Hack so that we iterate only once for the multinomial case.
1267
+ if multi_class == "multinomial":
1268
+ classes_ = [None]
1269
+ warm_start_coef = [warm_start_coef]
1270
+ if warm_start_coef is None:
1271
+ warm_start_coef = [None] * n_classes
1272
+
1273
+ path_func = delayed(_logistic_regression_path)
1274
+
1275
+ # The SAG solver releases the GIL so it's more efficient to use
1276
+ # threads for this solver.
1277
+ if solver in ["sag", "saga"]:
1278
+ prefer = "threads"
1279
+ else:
1280
+ prefer = "processes"
1281
+
1282
+ # TODO: Refactor this to avoid joblib parallelism entirely when doing binary
1283
+ # and multinomial multiclass classification and use joblib only for the
1284
+ # one-vs-rest multiclass case.
1285
+ if (
1286
+ solver in ["lbfgs", "newton-cg", "newton-cholesky"]
1287
+ and len(classes_) == 1
1288
+ and effective_n_jobs(self.n_jobs) == 1
1289
+ ):
1290
+ # In the future, we would like n_threads = _openmp_effective_n_threads()
1291
+ # For the time being, we just do
1292
+ n_threads = 1
1293
+ else:
1294
+ n_threads = 1
1295
+
1296
+ fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)(
1297
+ path_func(
1298
+ X,
1299
+ y,
1300
+ pos_class=class_,
1301
+ Cs=[C_],
1302
+ l1_ratio=self.l1_ratio,
1303
+ fit_intercept=self.fit_intercept,
1304
+ tol=self.tol,
1305
+ verbose=self.verbose,
1306
+ solver=solver,
1307
+ multi_class=multi_class,
1308
+ max_iter=self.max_iter,
1309
+ class_weight=self.class_weight,
1310
+ check_input=False,
1311
+ random_state=self.random_state,
1312
+ coef=warm_start_coef_,
1313
+ penalty=penalty,
1314
+ max_squared_sum=max_squared_sum,
1315
+ sample_weight=sample_weight,
1316
+ n_threads=n_threads,
1317
+ )
1318
+ for class_, warm_start_coef_ in zip(classes_, warm_start_coef)
1319
+ )
1320
+
1321
+ fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
1322
+ self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
1323
+
1324
+ n_features = X.shape[1]
1325
+ if multi_class == "multinomial":
1326
+ self.coef_ = fold_coefs_[0][0]
1327
+ else:
1328
+ self.coef_ = np.asarray(fold_coefs_)
1329
+ self.coef_ = self.coef_.reshape(
1330
+ n_classes, n_features + int(self.fit_intercept)
1331
+ )
1332
+
1333
+ if self.fit_intercept:
1334
+ self.intercept_ = self.coef_[:, -1]
1335
+ self.coef_ = self.coef_[:, :-1]
1336
+ else:
1337
+ self.intercept_ = np.zeros(n_classes)
1338
+
1339
+ return self
1340
+
1341
+ def predict_proba(self, X):
1342
+ """
1343
+ Probability estimates.
1344
+
1345
+ The returned estimates for all classes are ordered by the
1346
+ label of classes.
1347
+
1348
+ For a multi_class problem, if multi_class is set to be "multinomial"
1349
+ the softmax function is used to find the predicted probability of
1350
+ each class.
1351
+ Else use a one-vs-rest approach, i.e. calculate the probability
1352
+ of each class assuming it to be positive using the logistic function.
1353
+ and normalize these values across all the classes.
1354
+
1355
+ Parameters
1356
+ ----------
1357
+ X : array-like of shape (n_samples, n_features)
1358
+ Vector to be scored, where `n_samples` is the number of samples and
1359
+ `n_features` is the number of features.
1360
+
1361
+ Returns
1362
+ -------
1363
+ T : array-like of shape (n_samples, n_classes)
1364
+ Returns the probability of the sample for each class in the model,
1365
+ where classes are ordered as they are in ``self.classes_``.
1366
+ """
1367
+ check_is_fitted(self)
1368
+
1369
+ ovr = self.multi_class in ["ovr", "warn"] or (
1370
+ self.multi_class == "auto"
1371
+ and (
1372
+ self.classes_.size <= 2
1373
+ or self.solver in ("liblinear", "newton-cholesky")
1374
+ )
1375
+ )
1376
+ if ovr:
1377
+ return super()._predict_proba_lr(X)
1378
+ else:
1379
+ decision = self.decision_function(X)
1380
+ if decision.ndim == 1:
1381
+ # Workaround for multi_class="multinomial" and binary outcomes
1382
+ # which requires softmax prediction with only a 1D decision.
1383
+ decision_2d = np.c_[-decision, decision]
1384
+ else:
1385
+ decision_2d = decision
1386
+ return softmax(decision_2d, copy=False)
1387
+
1388
+ def predict_log_proba(self, X):
1389
+ """
1390
+ Predict logarithm of probability estimates.
1391
+
1392
+ The returned estimates for all classes are ordered by the
1393
+ label of classes.
1394
+
1395
+ Parameters
1396
+ ----------
1397
+ X : array-like of shape (n_samples, n_features)
1398
+ Vector to be scored, where `n_samples` is the number of samples and
1399
+ `n_features` is the number of features.
1400
+
1401
+ Returns
1402
+ -------
1403
+ T : array-like of shape (n_samples, n_classes)
1404
+ Returns the log-probability of the sample for each class in the
1405
+ model, where classes are ordered as they are in ``self.classes_``.
1406
+ """
1407
+ return np.log(self.predict_proba(X))
1408
+
1409
+
1410
+ class LogisticRegressionCV(LogisticRegression, LinearClassifierMixin, BaseEstimator):
1411
+ """Logistic Regression CV (aka logit, MaxEnt) classifier.
1412
+
1413
+ See glossary entry for :term:`cross-validation estimator`.
1414
+
1415
+ This class implements logistic regression using liblinear, newton-cg, sag
1416
+ of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
1417
+ regularization with primal formulation. The liblinear solver supports both
1418
+ L1 and L2 regularization, with a dual formulation only for the L2 penalty.
1419
+ Elastic-Net penalty is only supported by the saga solver.
1420
+
1421
+ For the grid of `Cs` values and `l1_ratios` values, the best hyperparameter
1422
+ is selected by the cross-validator
1423
+ :class:`~sklearn.model_selection.StratifiedKFold`, but it can be changed
1424
+ using the :term:`cv` parameter. The 'newton-cg', 'sag', 'saga' and 'lbfgs'
1425
+ solvers can warm-start the coefficients (see :term:`Glossary<warm_start>`).
1426
+
1427
+ Read more in the :ref:`User Guide <logistic_regression>`.
1428
+
1429
+ Parameters
1430
+ ----------
1431
+ Cs : int or list of floats, default=10
1432
+ Each of the values in Cs describes the inverse of regularization
1433
+ strength. If Cs is as an int, then a grid of Cs values are chosen
1434
+ in a logarithmic scale between 1e-4 and 1e4.
1435
+ Like in support vector machines, smaller values specify stronger
1436
+ regularization.
1437
+
1438
+ fit_intercept : bool, default=True
1439
+ Specifies if a constant (a.k.a. bias or intercept) should be
1440
+ added to the decision function.
1441
+
1442
+ cv : int or cross-validation generator, default=None
1443
+ The default cross-validation generator used is Stratified K-Folds.
1444
+ If an integer is provided, then it is the number of folds used.
1445
+ See the module :mod:`sklearn.model_selection` module for the
1446
+ list of possible cross-validation objects.
1447
+
1448
+ .. versionchanged:: 0.22
1449
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1450
+
1451
+ dual : bool, default=False
1452
+ Dual (constrained) or primal (regularized, see also
1453
+ :ref:`this equation <regularized-logistic-loss>`) formulation. Dual formulation
1454
+ is only implemented for l2 penalty with liblinear solver. Prefer dual=False when
1455
+ n_samples > n_features.
1456
+
1457
+ penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
1458
+ Specify the norm of the penalty:
1459
+
1460
+ - `'l2'`: add a L2 penalty term (used by default);
1461
+ - `'l1'`: add a L1 penalty term;
1462
+ - `'elasticnet'`: both L1 and L2 penalty terms are added.
1463
+
1464
+ .. warning::
1465
+ Some penalties may not work with some solvers. See the parameter
1466
+ `solver` below, to know the compatibility between the penalty and
1467
+ solver.
1468
+
1469
+ scoring : str or callable, default=None
1470
+ A string (see model evaluation documentation) or
1471
+ a scorer callable object / function with signature
1472
+ ``scorer(estimator, X, y)``. For a list of scoring functions
1473
+ that can be used, look at :mod:`sklearn.metrics`. The
1474
+ default scoring option used is 'accuracy'.
1475
+
1476
+ solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
1477
+ default='lbfgs'
1478
+
1479
+ Algorithm to use in the optimization problem. Default is 'lbfgs'.
1480
+ To choose a solver, you might want to consider the following aspects:
1481
+
1482
+ - For small datasets, 'liblinear' is a good choice, whereas 'sag'
1483
+ and 'saga' are faster for large ones;
1484
+ - For multiclass problems, only 'newton-cg', 'sag', 'saga' and
1485
+ 'lbfgs' handle multinomial loss;
1486
+ - 'liblinear' might be slower in :class:`LogisticRegressionCV`
1487
+ because it does not handle warm-starting. 'liblinear' is
1488
+ limited to one-versus-rest schemes.
1489
+ - 'newton-cholesky' is a good choice for `n_samples` >> `n_features`,
1490
+ especially with one-hot encoded categorical features with rare
1491
+ categories. Note that it is limited to binary classification and the
1492
+ one-versus-rest reduction for multiclass classification. Be aware that
1493
+ the memory usage of this solver has a quadratic dependency on
1494
+ `n_features` because it explicitly computes the Hessian matrix.
1495
+
1496
+ .. warning::
1497
+ The choice of the algorithm depends on the penalty chosen.
1498
+ Supported penalties by solver:
1499
+
1500
+ - 'lbfgs' - ['l2']
1501
+ - 'liblinear' - ['l1', 'l2']
1502
+ - 'newton-cg' - ['l2']
1503
+ - 'newton-cholesky' - ['l2']
1504
+ - 'sag' - ['l2']
1505
+ - 'saga' - ['elasticnet', 'l1', 'l2']
1506
+
1507
+ .. note::
1508
+ 'sag' and 'saga' fast convergence is only guaranteed on features
1509
+ with approximately the same scale. You can preprocess the data with
1510
+ a scaler from :mod:`sklearn.preprocessing`.
1511
+
1512
+ .. versionadded:: 0.17
1513
+ Stochastic Average Gradient descent solver.
1514
+ .. versionadded:: 0.19
1515
+ SAGA solver.
1516
+ .. versionadded:: 1.2
1517
+ newton-cholesky solver.
1518
+
1519
+ tol : float, default=1e-4
1520
+ Tolerance for stopping criteria.
1521
+
1522
+ max_iter : int, default=100
1523
+ Maximum number of iterations of the optimization algorithm.
1524
+
1525
+ class_weight : dict or 'balanced', default=None
1526
+ Weights associated with classes in the form ``{class_label: weight}``.
1527
+ If not given, all classes are supposed to have weight one.
1528
+
1529
+ The "balanced" mode uses the values of y to automatically adjust
1530
+ weights inversely proportional to class frequencies in the input data
1531
+ as ``n_samples / (n_classes * np.bincount(y))``.
1532
+
1533
+ Note that these weights will be multiplied with sample_weight (passed
1534
+ through the fit method) if sample_weight is specified.
1535
+
1536
+ .. versionadded:: 0.17
1537
+ class_weight == 'balanced'
1538
+
1539
+ n_jobs : int, default=None
1540
+ Number of CPU cores used during the cross-validation loop.
1541
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1542
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1543
+ for more details.
1544
+
1545
+ verbose : int, default=0
1546
+ For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
1547
+ positive number for verbosity.
1548
+
1549
+ refit : bool, default=True
1550
+ If set to True, the scores are averaged across all folds, and the
1551
+ coefs and the C that corresponds to the best score is taken, and a
1552
+ final refit is done using these parameters.
1553
+ Otherwise the coefs, intercepts and C that correspond to the
1554
+ best scores across folds are averaged.
1555
+
1556
+ intercept_scaling : float, default=1
1557
+ Useful only when the solver 'liblinear' is used
1558
+ and self.fit_intercept is set to True. In this case, x becomes
1559
+ [x, self.intercept_scaling],
1560
+ i.e. a "synthetic" feature with constant value equal to
1561
+ intercept_scaling is appended to the instance vector.
1562
+ The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
1563
+
1564
+ Note! the synthetic feature weight is subject to l1/l2 regularization
1565
+ as all other features.
1566
+ To lessen the effect of regularization on synthetic feature weight
1567
+ (and therefore on the intercept) intercept_scaling has to be increased.
1568
+
1569
+ multi_class : {'auto, 'ovr', 'multinomial'}, default='auto'
1570
+ If the option chosen is 'ovr', then a binary problem is fit for each
1571
+ label. For 'multinomial' the loss minimised is the multinomial loss fit
1572
+ across the entire probability distribution, *even when the data is
1573
+ binary*. 'multinomial' is unavailable when solver='liblinear'.
1574
+ 'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
1575
+ and otherwise selects 'multinomial'.
1576
+
1577
+ .. versionadded:: 0.18
1578
+ Stochastic Average Gradient descent solver for 'multinomial' case.
1579
+ .. versionchanged:: 0.22
1580
+ Default changed from 'ovr' to 'auto' in 0.22.
1581
+
1582
+ random_state : int, RandomState instance, default=None
1583
+ Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data.
1584
+ Note that this only applies to the solver and not the cross-validation
1585
+ generator. See :term:`Glossary <random_state>` for details.
1586
+
1587
+ l1_ratios : list of float, default=None
1588
+ The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.
1589
+ Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to
1590
+ using ``penalty='l2'``, while 1 is equivalent to using
1591
+ ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination
1592
+ of L1 and L2.
1593
+
1594
+ Attributes
1595
+ ----------
1596
+ classes_ : ndarray of shape (n_classes, )
1597
+ A list of class labels known to the classifier.
1598
+
1599
+ coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
1600
+ Coefficient of the features in the decision function.
1601
+
1602
+ `coef_` is of shape (1, n_features) when the given problem
1603
+ is binary.
1604
+
1605
+ intercept_ : ndarray of shape (1,) or (n_classes,)
1606
+ Intercept (a.k.a. bias) added to the decision function.
1607
+
1608
+ If `fit_intercept` is set to False, the intercept is set to zero.
1609
+ `intercept_` is of shape(1,) when the problem is binary.
1610
+
1611
+ Cs_ : ndarray of shape (n_cs)
1612
+ Array of C i.e. inverse of regularization parameter values used
1613
+ for cross-validation.
1614
+
1615
+ l1_ratios_ : ndarray of shape (n_l1_ratios)
1616
+ Array of l1_ratios used for cross-validation. If no l1_ratio is used
1617
+ (i.e. penalty is not 'elasticnet'), this is set to ``[None]``
1618
+
1619
+ coefs_paths_ : ndarray of shape (n_folds, n_cs, n_features) or \
1620
+ (n_folds, n_cs, n_features + 1)
1621
+ dict with classes as the keys, and the path of coefficients obtained
1622
+ during cross-validating across each fold and then across each Cs
1623
+ after doing an OvR for the corresponding class as values.
1624
+ If the 'multi_class' option is set to 'multinomial', then
1625
+ the coefs_paths are the coefficients corresponding to each class.
1626
+ Each dict value has shape ``(n_folds, n_cs, n_features)`` or
1627
+ ``(n_folds, n_cs, n_features + 1)`` depending on whether the
1628
+ intercept is fit or not. If ``penalty='elasticnet'``, the shape is
1629
+ ``(n_folds, n_cs, n_l1_ratios_, n_features)`` or
1630
+ ``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``.
1631
+
1632
+ scores_ : dict
1633
+ dict with classes as the keys, and the values as the
1634
+ grid of scores obtained during cross-validating each fold, after doing
1635
+ an OvR for the corresponding class. If the 'multi_class' option
1636
+ given is 'multinomial' then the same scores are repeated across
1637
+ all classes, since this is the multinomial class. Each dict value
1638
+ has shape ``(n_folds, n_cs)`` or ``(n_folds, n_cs, n_l1_ratios)`` if
1639
+ ``penalty='elasticnet'``.
1640
+
1641
+ C_ : ndarray of shape (n_classes,) or (n_classes - 1,)
1642
+ Array of C that maps to the best scores across every class. If refit is
1643
+ set to False, then for each class, the best C is the average of the
1644
+ C's that correspond to the best scores for each fold.
1645
+ `C_` is of shape(n_classes,) when the problem is binary.
1646
+
1647
+ l1_ratio_ : ndarray of shape (n_classes,) or (n_classes - 1,)
1648
+ Array of l1_ratio that maps to the best scores across every class. If
1649
+ refit is set to False, then for each class, the best l1_ratio is the
1650
+ average of the l1_ratio's that correspond to the best scores for each
1651
+ fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary.
1652
+
1653
+ n_iter_ : ndarray of shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
1654
+ Actual number of iterations for all classes, folds and Cs.
1655
+ In the binary or multinomial cases, the first dimension is equal to 1.
1656
+ If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds,
1657
+ n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``.
1658
+
1659
+ n_features_in_ : int
1660
+ Number of features seen during :term:`fit`.
1661
+
1662
+ .. versionadded:: 0.24
1663
+
1664
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1665
+ Names of features seen during :term:`fit`. Defined only when `X`
1666
+ has feature names that are all strings.
1667
+
1668
+ .. versionadded:: 1.0
1669
+
1670
+ See Also
1671
+ --------
1672
+ LogisticRegression : Logistic regression without tuning the
1673
+ hyperparameter `C`.
1674
+
1675
+ Examples
1676
+ --------
1677
+ >>> from sklearn.datasets import load_iris
1678
+ >>> from sklearn.linear_model import LogisticRegressionCV
1679
+ >>> X, y = load_iris(return_X_y=True)
1680
+ >>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y)
1681
+ >>> clf.predict(X[:2, :])
1682
+ array([0, 0])
1683
+ >>> clf.predict_proba(X[:2, :]).shape
1684
+ (2, 3)
1685
+ >>> clf.score(X, y)
1686
+ 0.98...
1687
+ """
1688
+
1689
+ _parameter_constraints: dict = {**LogisticRegression._parameter_constraints}
1690
+
1691
+ for param in ["C", "warm_start", "l1_ratio"]:
1692
+ _parameter_constraints.pop(param)
1693
+
1694
+ _parameter_constraints.update(
1695
+ {
1696
+ "Cs": [Interval(Integral, 1, None, closed="left"), "array-like"],
1697
+ "cv": ["cv_object"],
1698
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
1699
+ "l1_ratios": ["array-like", None],
1700
+ "refit": ["boolean"],
1701
+ "penalty": [StrOptions({"l1", "l2", "elasticnet"})],
1702
+ }
1703
+ )
1704
+
1705
+ def __init__(
1706
+ self,
1707
+ *,
1708
+ Cs=10,
1709
+ fit_intercept=True,
1710
+ cv=None,
1711
+ dual=False,
1712
+ penalty="l2",
1713
+ scoring=None,
1714
+ solver="lbfgs",
1715
+ tol=1e-4,
1716
+ max_iter=100,
1717
+ class_weight=None,
1718
+ n_jobs=None,
1719
+ verbose=0,
1720
+ refit=True,
1721
+ intercept_scaling=1.0,
1722
+ multi_class="auto",
1723
+ random_state=None,
1724
+ l1_ratios=None,
1725
+ ):
1726
+ self.Cs = Cs
1727
+ self.fit_intercept = fit_intercept
1728
+ self.cv = cv
1729
+ self.dual = dual
1730
+ self.penalty = penalty
1731
+ self.scoring = scoring
1732
+ self.tol = tol
1733
+ self.max_iter = max_iter
1734
+ self.class_weight = class_weight
1735
+ self.n_jobs = n_jobs
1736
+ self.verbose = verbose
1737
+ self.solver = solver
1738
+ self.refit = refit
1739
+ self.intercept_scaling = intercept_scaling
1740
+ self.multi_class = multi_class
1741
+ self.random_state = random_state
1742
+ self.l1_ratios = l1_ratios
1743
+
1744
+ @_fit_context(prefer_skip_nested_validation=True)
1745
+ def fit(self, X, y, sample_weight=None, **params):
1746
+ """Fit the model according to the given training data.
1747
+
1748
+ Parameters
1749
+ ----------
1750
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1751
+ Training vector, where `n_samples` is the number of samples and
1752
+ `n_features` is the number of features.
1753
+
1754
+ y : array-like of shape (n_samples,)
1755
+ Target vector relative to X.
1756
+
1757
+ sample_weight : array-like of shape (n_samples,) default=None
1758
+ Array of weights that are assigned to individual samples.
1759
+ If not provided, then each sample is given unit weight.
1760
+
1761
+ **params : dict
1762
+ Parameters to pass to the underlying splitter and scorer.
1763
+
1764
+ .. versionadded:: 1.4
1765
+
1766
+ Returns
1767
+ -------
1768
+ self : object
1769
+ Fitted LogisticRegressionCV estimator.
1770
+ """
1771
+ _raise_for_params(params, self, "fit")
1772
+
1773
+ solver = _check_solver(self.solver, self.penalty, self.dual)
1774
+
1775
+ if self.penalty == "elasticnet":
1776
+ if (
1777
+ self.l1_ratios is None
1778
+ or len(self.l1_ratios) == 0
1779
+ or any(
1780
+ (
1781
+ not isinstance(l1_ratio, numbers.Number)
1782
+ or l1_ratio < 0
1783
+ or l1_ratio > 1
1784
+ )
1785
+ for l1_ratio in self.l1_ratios
1786
+ )
1787
+ ):
1788
+ raise ValueError(
1789
+ "l1_ratios must be a list of numbers between "
1790
+ "0 and 1; got (l1_ratios=%r)"
1791
+ % self.l1_ratios
1792
+ )
1793
+ l1_ratios_ = self.l1_ratios
1794
+ else:
1795
+ if self.l1_ratios is not None:
1796
+ warnings.warn(
1797
+ "l1_ratios parameter is only used when penalty "
1798
+ "is 'elasticnet'. Got (penalty={})".format(self.penalty)
1799
+ )
1800
+
1801
+ l1_ratios_ = [None]
1802
+
1803
+ X, y = self._validate_data(
1804
+ X,
1805
+ y,
1806
+ accept_sparse="csr",
1807
+ dtype=np.float64,
1808
+ order="C",
1809
+ accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
1810
+ )
1811
+ check_classification_targets(y)
1812
+
1813
+ class_weight = self.class_weight
1814
+
1815
+ # Encode for string labels
1816
+ label_encoder = LabelEncoder().fit(y)
1817
+ y = label_encoder.transform(y)
1818
+ if isinstance(class_weight, dict):
1819
+ class_weight = {
1820
+ label_encoder.transform([cls])[0]: v for cls, v in class_weight.items()
1821
+ }
1822
+
1823
+ # The original class labels
1824
+ classes = self.classes_ = label_encoder.classes_
1825
+ encoded_labels = label_encoder.transform(label_encoder.classes_)
1826
+
1827
+ multi_class = _check_multi_class(self.multi_class, solver, len(classes))
1828
+
1829
+ if solver in ["sag", "saga"]:
1830
+ max_squared_sum = row_norms(X, squared=True).max()
1831
+ else:
1832
+ max_squared_sum = None
1833
+
1834
+ if _routing_enabled():
1835
+ routed_params = process_routing(
1836
+ self,
1837
+ "fit",
1838
+ sample_weight=sample_weight,
1839
+ **params,
1840
+ )
1841
+ else:
1842
+ routed_params = Bunch()
1843
+ routed_params.splitter = Bunch(split={})
1844
+ routed_params.scorer = Bunch(score=params)
1845
+ if sample_weight is not None:
1846
+ routed_params.scorer.score["sample_weight"] = sample_weight
1847
+
1848
+ # init cross-validation generator
1849
+ cv = check_cv(self.cv, y, classifier=True)
1850
+ folds = list(cv.split(X, y, **routed_params.splitter.split))
1851
+
1852
+ # Use the label encoded classes
1853
+ n_classes = len(encoded_labels)
1854
+
1855
+ if n_classes < 2:
1856
+ raise ValueError(
1857
+ "This solver needs samples of at least 2 classes"
1858
+ " in the data, but the data contains only one"
1859
+ " class: %r"
1860
+ % classes[0]
1861
+ )
1862
+
1863
+ if n_classes == 2:
1864
+ # OvR in case of binary problems is as good as fitting
1865
+ # the higher label
1866
+ n_classes = 1
1867
+ encoded_labels = encoded_labels[1:]
1868
+ classes = classes[1:]
1869
+
1870
+ # We need this hack to iterate only once over labels, in the case of
1871
+ # multi_class = multinomial, without changing the value of the labels.
1872
+ if multi_class == "multinomial":
1873
+ iter_encoded_labels = iter_classes = [None]
1874
+ else:
1875
+ iter_encoded_labels = encoded_labels
1876
+ iter_classes = classes
1877
+
1878
+ # compute the class weights for the entire dataset y
1879
+ if class_weight == "balanced":
1880
+ class_weight = compute_class_weight(
1881
+ class_weight, classes=np.arange(len(self.classes_)), y=y
1882
+ )
1883
+ class_weight = dict(enumerate(class_weight))
1884
+
1885
+ path_func = delayed(_log_reg_scoring_path)
1886
+
1887
+ # The SAG solver releases the GIL so it's more efficient to use
1888
+ # threads for this solver.
1889
+ if self.solver in ["sag", "saga"]:
1890
+ prefer = "threads"
1891
+ else:
1892
+ prefer = "processes"
1893
+
1894
+ fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)(
1895
+ path_func(
1896
+ X,
1897
+ y,
1898
+ train,
1899
+ test,
1900
+ pos_class=label,
1901
+ Cs=self.Cs,
1902
+ fit_intercept=self.fit_intercept,
1903
+ penalty=self.penalty,
1904
+ dual=self.dual,
1905
+ solver=solver,
1906
+ tol=self.tol,
1907
+ max_iter=self.max_iter,
1908
+ verbose=self.verbose,
1909
+ class_weight=class_weight,
1910
+ scoring=self.scoring,
1911
+ multi_class=multi_class,
1912
+ intercept_scaling=self.intercept_scaling,
1913
+ random_state=self.random_state,
1914
+ max_squared_sum=max_squared_sum,
1915
+ sample_weight=sample_weight,
1916
+ l1_ratio=l1_ratio,
1917
+ score_params=routed_params.scorer.score,
1918
+ )
1919
+ for label in iter_encoded_labels
1920
+ for train, test in folds
1921
+ for l1_ratio in l1_ratios_
1922
+ )
1923
+
1924
+ # _log_reg_scoring_path will output different shapes depending on the
1925
+ # multi_class param, so we need to reshape the outputs accordingly.
1926
+ # Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the
1927
+ # rows are equal, so we just take the first one.
1928
+ # After reshaping,
1929
+ # - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios)
1930
+ # - coefs_paths is of shape
1931
+ # (n_classes, n_folds, n_Cs . n_l1_ratios, n_features)
1932
+ # - n_iter is of shape
1933
+ # (n_classes, n_folds, n_Cs . n_l1_ratios) or
1934
+ # (1, n_folds, n_Cs . n_l1_ratios)
1935
+ coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
1936
+ self.Cs_ = Cs[0]
1937
+ if multi_class == "multinomial":
1938
+ coefs_paths = np.reshape(
1939
+ coefs_paths,
1940
+ (len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1),
1941
+ )
1942
+ # equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3),
1943
+ # (1, 2, 0, 3))
1944
+ coefs_paths = np.swapaxes(coefs_paths, 0, 1)
1945
+ coefs_paths = np.swapaxes(coefs_paths, 0, 2)
1946
+ self.n_iter_ = np.reshape(
1947
+ n_iter_, (1, len(folds), len(self.Cs_) * len(l1_ratios_))
1948
+ )
1949
+ # repeat same scores across all classes
1950
+ scores = np.tile(scores, (n_classes, 1, 1))
1951
+ else:
1952
+ coefs_paths = np.reshape(
1953
+ coefs_paths,
1954
+ (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_), -1),
1955
+ )
1956
+ self.n_iter_ = np.reshape(
1957
+ n_iter_, (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_))
1958
+ )
1959
+ scores = np.reshape(scores, (n_classes, len(folds), -1))
1960
+ self.scores_ = dict(zip(classes, scores))
1961
+ self.coefs_paths_ = dict(zip(classes, coefs_paths))
1962
+
1963
+ self.C_ = list()
1964
+ self.l1_ratio_ = list()
1965
+ self.coef_ = np.empty((n_classes, X.shape[1]))
1966
+ self.intercept_ = np.zeros(n_classes)
1967
+ for index, (cls, encoded_label) in enumerate(
1968
+ zip(iter_classes, iter_encoded_labels)
1969
+ ):
1970
+ if multi_class == "ovr":
1971
+ scores = self.scores_[cls]
1972
+ coefs_paths = self.coefs_paths_[cls]
1973
+ else:
1974
+ # For multinomial, all scores are the same across classes
1975
+ scores = scores[0]
1976
+ # coefs_paths will keep its original shape because
1977
+ # logistic_regression_path expects it this way
1978
+
1979
+ if self.refit:
1980
+ # best_index is between 0 and (n_Cs . n_l1_ratios - 1)
1981
+ # for example, with n_cs=2 and n_l1_ratios=3
1982
+ # the layout of scores is
1983
+ # [c1, c2, c1, c2, c1, c2]
1984
+ # l1_1 , l1_2 , l1_3
1985
+ best_index = scores.sum(axis=0).argmax()
1986
+
1987
+ best_index_C = best_index % len(self.Cs_)
1988
+ C_ = self.Cs_[best_index_C]
1989
+ self.C_.append(C_)
1990
+
1991
+ best_index_l1 = best_index // len(self.Cs_)
1992
+ l1_ratio_ = l1_ratios_[best_index_l1]
1993
+ self.l1_ratio_.append(l1_ratio_)
1994
+
1995
+ if multi_class == "multinomial":
1996
+ coef_init = np.mean(coefs_paths[:, :, best_index, :], axis=1)
1997
+ else:
1998
+ coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
1999
+
2000
+ # Note that y is label encoded and hence pos_class must be
2001
+ # the encoded label / None (for 'multinomial')
2002
+ w, _, _ = _logistic_regression_path(
2003
+ X,
2004
+ y,
2005
+ pos_class=encoded_label,
2006
+ Cs=[C_],
2007
+ solver=solver,
2008
+ fit_intercept=self.fit_intercept,
2009
+ coef=coef_init,
2010
+ max_iter=self.max_iter,
2011
+ tol=self.tol,
2012
+ penalty=self.penalty,
2013
+ class_weight=class_weight,
2014
+ multi_class=multi_class,
2015
+ verbose=max(0, self.verbose - 1),
2016
+ random_state=self.random_state,
2017
+ check_input=False,
2018
+ max_squared_sum=max_squared_sum,
2019
+ sample_weight=sample_weight,
2020
+ l1_ratio=l1_ratio_,
2021
+ )
2022
+ w = w[0]
2023
+
2024
+ else:
2025
+ # Take the best scores across every fold and the average of
2026
+ # all coefficients corresponding to the best scores.
2027
+ best_indices = np.argmax(scores, axis=1)
2028
+ if multi_class == "ovr":
2029
+ w = np.mean(
2030
+ [coefs_paths[i, best_indices[i], :] for i in range(len(folds))],
2031
+ axis=0,
2032
+ )
2033
+ else:
2034
+ w = np.mean(
2035
+ [
2036
+ coefs_paths[:, i, best_indices[i], :]
2037
+ for i in range(len(folds))
2038
+ ],
2039
+ axis=0,
2040
+ )
2041
+
2042
+ best_indices_C = best_indices % len(self.Cs_)
2043
+ self.C_.append(np.mean(self.Cs_[best_indices_C]))
2044
+
2045
+ if self.penalty == "elasticnet":
2046
+ best_indices_l1 = best_indices // len(self.Cs_)
2047
+ self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))
2048
+ else:
2049
+ self.l1_ratio_.append(None)
2050
+
2051
+ if multi_class == "multinomial":
2052
+ self.C_ = np.tile(self.C_, n_classes)
2053
+ self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes)
2054
+ self.coef_ = w[:, : X.shape[1]]
2055
+ if self.fit_intercept:
2056
+ self.intercept_ = w[:, -1]
2057
+ else:
2058
+ self.coef_[index] = w[: X.shape[1]]
2059
+ if self.fit_intercept:
2060
+ self.intercept_[index] = w[-1]
2061
+
2062
+ self.C_ = np.asarray(self.C_)
2063
+ self.l1_ratio_ = np.asarray(self.l1_ratio_)
2064
+ self.l1_ratios_ = np.asarray(l1_ratios_)
2065
+ # if elasticnet was used, add the l1_ratios dimension to some
2066
+ # attributes
2067
+ if self.l1_ratios is not None:
2068
+ # with n_cs=2 and n_l1_ratios=3
2069
+ # the layout of scores is
2070
+ # [c1, c2, c1, c2, c1, c2]
2071
+ # l1_1 , l1_2 , l1_3
2072
+ # To get a 2d array with the following layout
2073
+ # l1_1, l1_2, l1_3
2074
+ # c1 [[ . , . , . ],
2075
+ # c2 [ . , . , . ]]
2076
+ # We need to first reshape and then transpose.
2077
+ # The same goes for the other arrays
2078
+ for cls, coefs_path in self.coefs_paths_.items():
2079
+ self.coefs_paths_[cls] = coefs_path.reshape(
2080
+ (len(folds), self.l1_ratios_.size, self.Cs_.size, -1)
2081
+ )
2082
+ self.coefs_paths_[cls] = np.transpose(
2083
+ self.coefs_paths_[cls], (0, 2, 1, 3)
2084
+ )
2085
+ for cls, score in self.scores_.items():
2086
+ self.scores_[cls] = score.reshape(
2087
+ (len(folds), self.l1_ratios_.size, self.Cs_.size)
2088
+ )
2089
+ self.scores_[cls] = np.transpose(self.scores_[cls], (0, 2, 1))
2090
+
2091
+ self.n_iter_ = self.n_iter_.reshape(
2092
+ (-1, len(folds), self.l1_ratios_.size, self.Cs_.size)
2093
+ )
2094
+ self.n_iter_ = np.transpose(self.n_iter_, (0, 1, 3, 2))
2095
+
2096
+ return self
2097
+
2098
+ def score(self, X, y, sample_weight=None, **score_params):
2099
+ """Score using the `scoring` option on the given test data and labels.
2100
+
2101
+ Parameters
2102
+ ----------
2103
+ X : array-like of shape (n_samples, n_features)
2104
+ Test samples.
2105
+
2106
+ y : array-like of shape (n_samples,)
2107
+ True labels for X.
2108
+
2109
+ sample_weight : array-like of shape (n_samples,), default=None
2110
+ Sample weights.
2111
+
2112
+ **score_params : dict
2113
+ Parameters to pass to the `score` method of the underlying scorer.
2114
+
2115
+ .. versionadded:: 1.4
2116
+
2117
+ Returns
2118
+ -------
2119
+ score : float
2120
+ Score of self.predict(X) w.r.t. y.
2121
+ """
2122
+ _raise_for_params(score_params, self, "score")
2123
+
2124
+ scoring = self._get_scorer()
2125
+ if _routing_enabled():
2126
+ routed_params = process_routing(
2127
+ self,
2128
+ "score",
2129
+ sample_weight=sample_weight,
2130
+ **score_params,
2131
+ )
2132
+ else:
2133
+ routed_params = Bunch()
2134
+ routed_params.scorer = Bunch(score={})
2135
+ if sample_weight is not None:
2136
+ routed_params.scorer.score["sample_weight"] = sample_weight
2137
+
2138
+ return scoring(
2139
+ self,
2140
+ X,
2141
+ y,
2142
+ **routed_params.scorer.score,
2143
+ )
2144
+
2145
+ def get_metadata_routing(self):
2146
+ """Get metadata routing of this object.
2147
+
2148
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
2149
+ mechanism works.
2150
+
2151
+ .. versionadded:: 1.4
2152
+
2153
+ Returns
2154
+ -------
2155
+ routing : MetadataRouter
2156
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
2157
+ routing information.
2158
+ """
2159
+
2160
+ router = (
2161
+ MetadataRouter(owner=self.__class__.__name__)
2162
+ .add_self_request(self)
2163
+ .add(
2164
+ splitter=self.cv,
2165
+ method_mapping=MethodMapping().add(callee="split", caller="fit"),
2166
+ )
2167
+ .add(
2168
+ scorer=self._get_scorer(),
2169
+ method_mapping=MethodMapping()
2170
+ .add(callee="score", caller="score")
2171
+ .add(callee="score", caller="fit"),
2172
+ )
2173
+ )
2174
+ return router
2175
+
2176
+ def _more_tags(self):
2177
+ return {
2178
+ "_xfail_checks": {
2179
+ "check_sample_weights_invariance": (
2180
+ "zero sample_weight is not equivalent to removing samples"
2181
+ ),
2182
+ }
2183
+ }
2184
+
2185
+ def _get_scorer(self):
2186
+ """Get the scorer based on the scoring method specified.
2187
+ The default scoring method is `accuracy`.
2188
+ """
2189
+ scoring = self.scoring or "accuracy"
2190
+ return get_scorer(scoring)
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_omp.py ADDED
@@ -0,0 +1,1097 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Orthogonal matching pursuit algorithms
2
+ """
3
+
4
+ # Author: Vlad Niculae
5
+ #
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+ from math import sqrt
10
+ from numbers import Integral, Real
11
+
12
+ import numpy as np
13
+ from scipy import linalg
14
+ from scipy.linalg.lapack import get_lapack_funcs
15
+
16
+ from ..base import MultiOutputMixin, RegressorMixin, _fit_context
17
+ from ..model_selection import check_cv
18
+ from ..utils import Bunch, as_float_array, check_array
19
+ from ..utils._param_validation import Interval, StrOptions, validate_params
20
+ from ..utils.metadata_routing import (
21
+ MetadataRouter,
22
+ MethodMapping,
23
+ _raise_for_params,
24
+ _routing_enabled,
25
+ process_routing,
26
+ )
27
+ from ..utils.parallel import Parallel, delayed
28
+ from ._base import LinearModel, _pre_fit
29
+
30
+ premature = (
31
+ "Orthogonal matching pursuit ended prematurely due to linear"
32
+ " dependence in the dictionary. The requested precision might"
33
+ " not have been met."
34
+ )
35
+
36
+
37
+ def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False):
38
+ """Orthogonal Matching Pursuit step using the Cholesky decomposition.
39
+
40
+ Parameters
41
+ ----------
42
+ X : ndarray of shape (n_samples, n_features)
43
+ Input dictionary. Columns are assumed to have unit norm.
44
+
45
+ y : ndarray of shape (n_samples,)
46
+ Input targets.
47
+
48
+ n_nonzero_coefs : int
49
+ Targeted number of non-zero elements.
50
+
51
+ tol : float, default=None
52
+ Targeted squared error, if not None overrides n_nonzero_coefs.
53
+
54
+ copy_X : bool, default=True
55
+ Whether the design matrix X must be copied by the algorithm. A false
56
+ value is only helpful if X is already Fortran-ordered, otherwise a
57
+ copy is made anyway.
58
+
59
+ return_path : bool, default=False
60
+ Whether to return every value of the nonzero coefficients along the
61
+ forward path. Useful for cross-validation.
62
+
63
+ Returns
64
+ -------
65
+ gamma : ndarray of shape (n_nonzero_coefs,)
66
+ Non-zero elements of the solution.
67
+
68
+ idx : ndarray of shape (n_nonzero_coefs,)
69
+ Indices of the positions of the elements in gamma within the solution
70
+ vector.
71
+
72
+ coef : ndarray of shape (n_features, n_nonzero_coefs)
73
+ The first k values of column k correspond to the coefficient value
74
+ for the active features at that step. The lower left triangle contains
75
+ garbage. Only returned if ``return_path=True``.
76
+
77
+ n_active : int
78
+ Number of active features at convergence.
79
+ """
80
+ if copy_X:
81
+ X = X.copy("F")
82
+ else: # even if we are allowed to overwrite, still copy it if bad order
83
+ X = np.asfortranarray(X)
84
+
85
+ min_float = np.finfo(X.dtype).eps
86
+ nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (X,))
87
+ (potrs,) = get_lapack_funcs(("potrs",), (X,))
88
+
89
+ alpha = np.dot(X.T, y)
90
+ residual = y
91
+ gamma = np.empty(0)
92
+ n_active = 0
93
+ indices = np.arange(X.shape[1]) # keeping track of swapping
94
+
95
+ max_features = X.shape[1] if tol is not None else n_nonzero_coefs
96
+
97
+ L = np.empty((max_features, max_features), dtype=X.dtype)
98
+
99
+ if return_path:
100
+ coefs = np.empty_like(L)
101
+
102
+ while True:
103
+ lam = np.argmax(np.abs(np.dot(X.T, residual)))
104
+ if lam < n_active or alpha[lam] ** 2 < min_float:
105
+ # atom already selected or inner product too small
106
+ warnings.warn(premature, RuntimeWarning, stacklevel=2)
107
+ break
108
+
109
+ if n_active > 0:
110
+ # Updates the Cholesky decomposition of X' X
111
+ L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
112
+ linalg.solve_triangular(
113
+ L[:n_active, :n_active],
114
+ L[n_active, :n_active],
115
+ trans=0,
116
+ lower=1,
117
+ overwrite_b=True,
118
+ check_finite=False,
119
+ )
120
+ v = nrm2(L[n_active, :n_active]) ** 2
121
+ Lkk = linalg.norm(X[:, lam]) ** 2 - v
122
+ if Lkk <= min_float: # selected atoms are dependent
123
+ warnings.warn(premature, RuntimeWarning, stacklevel=2)
124
+ break
125
+ L[n_active, n_active] = sqrt(Lkk)
126
+ else:
127
+ L[0, 0] = linalg.norm(X[:, lam])
128
+
129
+ X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
130
+ alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
131
+ indices[n_active], indices[lam] = indices[lam], indices[n_active]
132
+ n_active += 1
133
+
134
+ # solves LL'x = X'y as a composition of two triangular systems
135
+ gamma, _ = potrs(
136
+ L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False
137
+ )
138
+
139
+ if return_path:
140
+ coefs[:n_active, n_active - 1] = gamma
141
+ residual = y - np.dot(X[:, :n_active], gamma)
142
+ if tol is not None and nrm2(residual) ** 2 <= tol:
143
+ break
144
+ elif n_active == max_features:
145
+ break
146
+
147
+ if return_path:
148
+ return gamma, indices[:n_active], coefs[:, :n_active], n_active
149
+ else:
150
+ return gamma, indices[:n_active], n_active
151
+
152
+
153
+ def _gram_omp(
154
+ Gram,
155
+ Xy,
156
+ n_nonzero_coefs,
157
+ tol_0=None,
158
+ tol=None,
159
+ copy_Gram=True,
160
+ copy_Xy=True,
161
+ return_path=False,
162
+ ):
163
+ """Orthogonal Matching Pursuit step on a precomputed Gram matrix.
164
+
165
+ This function uses the Cholesky decomposition method.
166
+
167
+ Parameters
168
+ ----------
169
+ Gram : ndarray of shape (n_features, n_features)
170
+ Gram matrix of the input data matrix.
171
+
172
+ Xy : ndarray of shape (n_features,)
173
+ Input targets.
174
+
175
+ n_nonzero_coefs : int
176
+ Targeted number of non-zero elements.
177
+
178
+ tol_0 : float, default=None
179
+ Squared norm of y, required if tol is not None.
180
+
181
+ tol : float, default=None
182
+ Targeted squared error, if not None overrides n_nonzero_coefs.
183
+
184
+ copy_Gram : bool, default=True
185
+ Whether the gram matrix must be copied by the algorithm. A false
186
+ value is only helpful if it is already Fortran-ordered, otherwise a
187
+ copy is made anyway.
188
+
189
+ copy_Xy : bool, default=True
190
+ Whether the covariance vector Xy must be copied by the algorithm.
191
+ If False, it may be overwritten.
192
+
193
+ return_path : bool, default=False
194
+ Whether to return every value of the nonzero coefficients along the
195
+ forward path. Useful for cross-validation.
196
+
197
+ Returns
198
+ -------
199
+ gamma : ndarray of shape (n_nonzero_coefs,)
200
+ Non-zero elements of the solution.
201
+
202
+ idx : ndarray of shape (n_nonzero_coefs,)
203
+ Indices of the positions of the elements in gamma within the solution
204
+ vector.
205
+
206
+ coefs : ndarray of shape (n_features, n_nonzero_coefs)
207
+ The first k values of column k correspond to the coefficient value
208
+ for the active features at that step. The lower left triangle contains
209
+ garbage. Only returned if ``return_path=True``.
210
+
211
+ n_active : int
212
+ Number of active features at convergence.
213
+ """
214
+ Gram = Gram.copy("F") if copy_Gram else np.asfortranarray(Gram)
215
+
216
+ if copy_Xy or not Xy.flags.writeable:
217
+ Xy = Xy.copy()
218
+
219
+ min_float = np.finfo(Gram.dtype).eps
220
+ nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (Gram,))
221
+ (potrs,) = get_lapack_funcs(("potrs",), (Gram,))
222
+
223
+ indices = np.arange(len(Gram)) # keeping track of swapping
224
+ alpha = Xy
225
+ tol_curr = tol_0
226
+ delta = 0
227
+ gamma = np.empty(0)
228
+ n_active = 0
229
+
230
+ max_features = len(Gram) if tol is not None else n_nonzero_coefs
231
+
232
+ L = np.empty((max_features, max_features), dtype=Gram.dtype)
233
+
234
+ L[0, 0] = 1.0
235
+ if return_path:
236
+ coefs = np.empty_like(L)
237
+
238
+ while True:
239
+ lam = np.argmax(np.abs(alpha))
240
+ if lam < n_active or alpha[lam] ** 2 < min_float:
241
+ # selected same atom twice, or inner product too small
242
+ warnings.warn(premature, RuntimeWarning, stacklevel=3)
243
+ break
244
+ if n_active > 0:
245
+ L[n_active, :n_active] = Gram[lam, :n_active]
246
+ linalg.solve_triangular(
247
+ L[:n_active, :n_active],
248
+ L[n_active, :n_active],
249
+ trans=0,
250
+ lower=1,
251
+ overwrite_b=True,
252
+ check_finite=False,
253
+ )
254
+ v = nrm2(L[n_active, :n_active]) ** 2
255
+ Lkk = Gram[lam, lam] - v
256
+ if Lkk <= min_float: # selected atoms are dependent
257
+ warnings.warn(premature, RuntimeWarning, stacklevel=3)
258
+ break
259
+ L[n_active, n_active] = sqrt(Lkk)
260
+ else:
261
+ L[0, 0] = sqrt(Gram[lam, lam])
262
+
263
+ Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
264
+ Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
265
+ indices[n_active], indices[lam] = indices[lam], indices[n_active]
266
+ Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
267
+ n_active += 1
268
+ # solves LL'x = X'y as a composition of two triangular systems
269
+ gamma, _ = potrs(
270
+ L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False
271
+ )
272
+ if return_path:
273
+ coefs[:n_active, n_active - 1] = gamma
274
+ beta = np.dot(Gram[:, :n_active], gamma)
275
+ alpha = Xy - beta
276
+ if tol is not None:
277
+ tol_curr += delta
278
+ delta = np.inner(gamma, beta[:n_active])
279
+ tol_curr -= delta
280
+ if abs(tol_curr) <= tol:
281
+ break
282
+ elif n_active == max_features:
283
+ break
284
+
285
+ if return_path:
286
+ return gamma, indices[:n_active], coefs[:, :n_active], n_active
287
+ else:
288
+ return gamma, indices[:n_active], n_active
289
+
290
+
291
+ @validate_params(
292
+ {
293
+ "X": ["array-like"],
294
+ "y": [np.ndarray],
295
+ "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
296
+ "tol": [Interval(Real, 0, None, closed="left"), None],
297
+ "precompute": ["boolean", StrOptions({"auto"})],
298
+ "copy_X": ["boolean"],
299
+ "return_path": ["boolean"],
300
+ "return_n_iter": ["boolean"],
301
+ },
302
+ prefer_skip_nested_validation=True,
303
+ )
304
+ def orthogonal_mp(
305
+ X,
306
+ y,
307
+ *,
308
+ n_nonzero_coefs=None,
309
+ tol=None,
310
+ precompute=False,
311
+ copy_X=True,
312
+ return_path=False,
313
+ return_n_iter=False,
314
+ ):
315
+ r"""Orthogonal Matching Pursuit (OMP).
316
+
317
+ Solves n_targets Orthogonal Matching Pursuit problems.
318
+ An instance of the problem has the form:
319
+
320
+ When parametrized by the number of non-zero coefficients using
321
+ `n_nonzero_coefs`:
322
+ argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
323
+
324
+ When parametrized by error using the parameter `tol`:
325
+ argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
326
+
327
+ Read more in the :ref:`User Guide <omp>`.
328
+
329
+ Parameters
330
+ ----------
331
+ X : array-like of shape (n_samples, n_features)
332
+ Input data. Columns are assumed to have unit norm.
333
+
334
+ y : ndarray of shape (n_samples,) or (n_samples, n_targets)
335
+ Input targets.
336
+
337
+ n_nonzero_coefs : int, default=None
338
+ Desired number of non-zero entries in the solution. If None (by
339
+ default) this value is set to 10% of n_features.
340
+
341
+ tol : float, default=None
342
+ Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs.
343
+
344
+ precompute : 'auto' or bool, default=False
345
+ Whether to perform precomputations. Improves performance when n_targets
346
+ or n_samples is very large.
347
+
348
+ copy_X : bool, default=True
349
+ Whether the design matrix X must be copied by the algorithm. A false
350
+ value is only helpful if X is already Fortran-ordered, otherwise a
351
+ copy is made anyway.
352
+
353
+ return_path : bool, default=False
354
+ Whether to return every value of the nonzero coefficients along the
355
+ forward path. Useful for cross-validation.
356
+
357
+ return_n_iter : bool, default=False
358
+ Whether or not to return the number of iterations.
359
+
360
+ Returns
361
+ -------
362
+ coef : ndarray of shape (n_features,) or (n_features, n_targets)
363
+ Coefficients of the OMP solution. If `return_path=True`, this contains
364
+ the whole coefficient path. In this case its shape is
365
+ (n_features, n_features) or (n_features, n_targets, n_features) and
366
+ iterating over the last axis generates coefficients in increasing order
367
+ of active features.
368
+
369
+ n_iters : array-like or int
370
+ Number of active features across every target. Returned only if
371
+ `return_n_iter` is set to True.
372
+
373
+ See Also
374
+ --------
375
+ OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model.
376
+ orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y.
377
+ lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
378
+ sklearn.decomposition.sparse_encode : Sparse coding.
379
+
380
+ Notes
381
+ -----
382
+ Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
383
+ Matching pursuits with time-frequency dictionaries, IEEE Transactions on
384
+ Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
385
+ (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
386
+
387
+ This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
388
+ M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
389
+ Matching Pursuit Technical Report - CS Technion, April 2008.
390
+ https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
391
+ """
392
+ X = check_array(X, order="F", copy=copy_X)
393
+ copy_X = False
394
+ if y.ndim == 1:
395
+ y = y.reshape(-1, 1)
396
+ y = check_array(y)
397
+ if y.shape[1] > 1: # subsequent targets will be affected
398
+ copy_X = True
399
+ if n_nonzero_coefs is None and tol is None:
400
+ # default for n_nonzero_coefs is 0.1 * n_features
401
+ # but at least one.
402
+ n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
403
+ if tol is None and n_nonzero_coefs > X.shape[1]:
404
+ raise ValueError(
405
+ "The number of atoms cannot be more than the number of features"
406
+ )
407
+ if precompute == "auto":
408
+ precompute = X.shape[0] > X.shape[1]
409
+ if precompute:
410
+ G = np.dot(X.T, X)
411
+ G = np.asfortranarray(G)
412
+ Xy = np.dot(X.T, y)
413
+ if tol is not None:
414
+ norms_squared = np.sum((y**2), axis=0)
415
+ else:
416
+ norms_squared = None
417
+ return orthogonal_mp_gram(
418
+ G,
419
+ Xy,
420
+ n_nonzero_coefs=n_nonzero_coefs,
421
+ tol=tol,
422
+ norms_squared=norms_squared,
423
+ copy_Gram=copy_X,
424
+ copy_Xy=False,
425
+ return_path=return_path,
426
+ )
427
+
428
+ if return_path:
429
+ coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
430
+ else:
431
+ coef = np.zeros((X.shape[1], y.shape[1]))
432
+ n_iters = []
433
+
434
+ for k in range(y.shape[1]):
435
+ out = _cholesky_omp(
436
+ X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path
437
+ )
438
+ if return_path:
439
+ _, idx, coefs, n_iter = out
440
+ coef = coef[:, :, : len(idx)]
441
+ for n_active, x in enumerate(coefs.T):
442
+ coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]
443
+ else:
444
+ x, idx, n_iter = out
445
+ coef[idx, k] = x
446
+ n_iters.append(n_iter)
447
+
448
+ if y.shape[1] == 1:
449
+ n_iters = n_iters[0]
450
+
451
+ if return_n_iter:
452
+ return np.squeeze(coef), n_iters
453
+ else:
454
+ return np.squeeze(coef)
455
+
456
+
457
+ @validate_params(
458
+ {
459
+ "Gram": ["array-like"],
460
+ "Xy": ["array-like"],
461
+ "n_nonzero_coefs": [Interval(Integral, 0, None, closed="neither"), None],
462
+ "tol": [Interval(Real, 0, None, closed="left"), None],
463
+ "norms_squared": ["array-like", None],
464
+ "copy_Gram": ["boolean"],
465
+ "copy_Xy": ["boolean"],
466
+ "return_path": ["boolean"],
467
+ "return_n_iter": ["boolean"],
468
+ },
469
+ prefer_skip_nested_validation=True,
470
+ )
471
+ def orthogonal_mp_gram(
472
+ Gram,
473
+ Xy,
474
+ *,
475
+ n_nonzero_coefs=None,
476
+ tol=None,
477
+ norms_squared=None,
478
+ copy_Gram=True,
479
+ copy_Xy=True,
480
+ return_path=False,
481
+ return_n_iter=False,
482
+ ):
483
+ """Gram Orthogonal Matching Pursuit (OMP).
484
+
485
+ Solves n_targets Orthogonal Matching Pursuit problems using only
486
+ the Gram matrix X.T * X and the product X.T * y.
487
+
488
+ Read more in the :ref:`User Guide <omp>`.
489
+
490
+ Parameters
491
+ ----------
492
+ Gram : array-like of shape (n_features, n_features)
493
+ Gram matrix of the input data: `X.T * X`.
494
+
495
+ Xy : array-like of shape (n_features,) or (n_features, n_targets)
496
+ Input targets multiplied by `X`: `X.T * y`.
497
+
498
+ n_nonzero_coefs : int, default=None
499
+ Desired number of non-zero entries in the solution. If `None` (by
500
+ default) this value is set to 10% of n_features.
501
+
502
+ tol : float, default=None
503
+ Maximum squared norm of the residual. If not `None`,
504
+ overrides `n_nonzero_coefs`.
505
+
506
+ norms_squared : array-like of shape (n_targets,), default=None
507
+ Squared L2 norms of the lines of `y`. Required if `tol` is not None.
508
+
509
+ copy_Gram : bool, default=True
510
+ Whether the gram matrix must be copied by the algorithm. A `False`
511
+ value is only helpful if it is already Fortran-ordered, otherwise a
512
+ copy is made anyway.
513
+
514
+ copy_Xy : bool, default=True
515
+ Whether the covariance vector `Xy` must be copied by the algorithm.
516
+ If `False`, it may be overwritten.
517
+
518
+ return_path : bool, default=False
519
+ Whether to return every value of the nonzero coefficients along the
520
+ forward path. Useful for cross-validation.
521
+
522
+ return_n_iter : bool, default=False
523
+ Whether or not to return the number of iterations.
524
+
525
+ Returns
526
+ -------
527
+ coef : ndarray of shape (n_features,) or (n_features, n_targets)
528
+ Coefficients of the OMP solution. If `return_path=True`, this contains
529
+ the whole coefficient path. In this case its shape is
530
+ `(n_features, n_features)` or `(n_features, n_targets, n_features)` and
531
+ iterating over the last axis yields coefficients in increasing order
532
+ of active features.
533
+
534
+ n_iters : list or int
535
+ Number of active features across every target. Returned only if
536
+ `return_n_iter` is set to True.
537
+
538
+ See Also
539
+ --------
540
+ OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
541
+ orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
542
+ lars_path : Compute Least Angle Regression or Lasso path using
543
+ LARS algorithm.
544
+ sklearn.decomposition.sparse_encode : Generic sparse coding.
545
+ Each column of the result is the solution to a Lasso problem.
546
+
547
+ Notes
548
+ -----
549
+ Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
550
+ Matching pursuits with time-frequency dictionaries, IEEE Transactions on
551
+ Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
552
+ (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
553
+
554
+ This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
555
+ M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
556
+ Matching Pursuit Technical Report - CS Technion, April 2008.
557
+ https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
558
+ """
559
+ Gram = check_array(Gram, order="F", copy=copy_Gram)
560
+ Xy = np.asarray(Xy)
561
+ if Xy.ndim > 1 and Xy.shape[1] > 1:
562
+ # or subsequent target will be affected
563
+ copy_Gram = True
564
+ if Xy.ndim == 1:
565
+ Xy = Xy[:, np.newaxis]
566
+ if tol is not None:
567
+ norms_squared = [norms_squared]
568
+ if copy_Xy or not Xy.flags.writeable:
569
+ # Make the copy once instead of many times in _gram_omp itself.
570
+ Xy = Xy.copy()
571
+
572
+ if n_nonzero_coefs is None and tol is None:
573
+ n_nonzero_coefs = int(0.1 * len(Gram))
574
+ if tol is not None and norms_squared is None:
575
+ raise ValueError(
576
+ "Gram OMP needs the precomputed norms in order "
577
+ "to evaluate the error sum of squares."
578
+ )
579
+ if tol is not None and tol < 0:
580
+ raise ValueError("Epsilon cannot be negative")
581
+ if tol is None and n_nonzero_coefs <= 0:
582
+ raise ValueError("The number of atoms must be positive")
583
+ if tol is None and n_nonzero_coefs > len(Gram):
584
+ raise ValueError(
585
+ "The number of atoms cannot be more than the number of features"
586
+ )
587
+
588
+ if return_path:
589
+ coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
590
+ else:
591
+ coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
592
+
593
+ n_iters = []
594
+ for k in range(Xy.shape[1]):
595
+ out = _gram_omp(
596
+ Gram,
597
+ Xy[:, k],
598
+ n_nonzero_coefs,
599
+ norms_squared[k] if tol is not None else None,
600
+ tol,
601
+ copy_Gram=copy_Gram,
602
+ copy_Xy=False,
603
+ return_path=return_path,
604
+ )
605
+ if return_path:
606
+ _, idx, coefs, n_iter = out
607
+ coef = coef[:, :, : len(idx)]
608
+ for n_active, x in enumerate(coefs.T):
609
+ coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]
610
+ else:
611
+ x, idx, n_iter = out
612
+ coef[idx, k] = x
613
+ n_iters.append(n_iter)
614
+
615
+ if Xy.shape[1] == 1:
616
+ n_iters = n_iters[0]
617
+
618
+ if return_n_iter:
619
+ return np.squeeze(coef), n_iters
620
+ else:
621
+ return np.squeeze(coef)
622
+
623
+
624
+ class OrthogonalMatchingPursuit(MultiOutputMixin, RegressorMixin, LinearModel):
625
+ """Orthogonal Matching Pursuit model (OMP).
626
+
627
+ Read more in the :ref:`User Guide <omp>`.
628
+
629
+ Parameters
630
+ ----------
631
+ n_nonzero_coefs : int, default=None
632
+ Desired number of non-zero entries in the solution. If None (by
633
+ default) this value is set to 10% of n_features.
634
+
635
+ tol : float, default=None
636
+ Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs.
637
+
638
+ fit_intercept : bool, default=True
639
+ Whether to calculate the intercept for this model. If set
640
+ to false, no intercept will be used in calculations
641
+ (i.e. data is expected to be centered).
642
+
643
+ precompute : 'auto' or bool, default='auto'
644
+ Whether to use a precomputed Gram and Xy matrix to speed up
645
+ calculations. Improves performance when :term:`n_targets` or
646
+ :term:`n_samples` is very large. Note that if you already have such
647
+ matrices, you can pass them directly to the fit method.
648
+
649
+ Attributes
650
+ ----------
651
+ coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
652
+ Parameter vector (w in the formula).
653
+
654
+ intercept_ : float or ndarray of shape (n_targets,)
655
+ Independent term in decision function.
656
+
657
+ n_iter_ : int or array-like
658
+ Number of active features across every target.
659
+
660
+ n_nonzero_coefs_ : int
661
+ The number of non-zero coefficients in the solution. If
662
+ `n_nonzero_coefs` is None and `tol` is None this value is either set
663
+ to 10% of `n_features` or 1, whichever is greater.
664
+
665
+ n_features_in_ : int
666
+ Number of features seen during :term:`fit`.
667
+
668
+ .. versionadded:: 0.24
669
+
670
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
671
+ Names of features seen during :term:`fit`. Defined only when `X`
672
+ has feature names that are all strings.
673
+
674
+ .. versionadded:: 1.0
675
+
676
+ See Also
677
+ --------
678
+ orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
679
+ orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit
680
+ problems using only the Gram matrix X.T * X and the product X.T * y.
681
+ lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
682
+ Lars : Least Angle Regression model a.k.a. LAR.
683
+ LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
684
+ sklearn.decomposition.sparse_encode : Generic sparse coding.
685
+ Each column of the result is the solution to a Lasso problem.
686
+ OrthogonalMatchingPursuitCV : Cross-validated
687
+ Orthogonal Matching Pursuit model (OMP).
688
+
689
+ Notes
690
+ -----
691
+ Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
692
+ Matching pursuits with time-frequency dictionaries, IEEE Transactions on
693
+ Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
694
+ (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
695
+
696
+ This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
697
+ M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
698
+ Matching Pursuit Technical Report - CS Technion, April 2008.
699
+ https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
700
+
701
+ Examples
702
+ --------
703
+ >>> from sklearn.linear_model import OrthogonalMatchingPursuit
704
+ >>> from sklearn.datasets import make_regression
705
+ >>> X, y = make_regression(noise=4, random_state=0)
706
+ >>> reg = OrthogonalMatchingPursuit().fit(X, y)
707
+ >>> reg.score(X, y)
708
+ 0.9991...
709
+ >>> reg.predict(X[:1,])
710
+ array([-78.3854...])
711
+ """
712
+
713
+ _parameter_constraints: dict = {
714
+ "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
715
+ "tol": [Interval(Real, 0, None, closed="left"), None],
716
+ "fit_intercept": ["boolean"],
717
+ "precompute": [StrOptions({"auto"}), "boolean"],
718
+ }
719
+
720
+ def __init__(
721
+ self,
722
+ *,
723
+ n_nonzero_coefs=None,
724
+ tol=None,
725
+ fit_intercept=True,
726
+ precompute="auto",
727
+ ):
728
+ self.n_nonzero_coefs = n_nonzero_coefs
729
+ self.tol = tol
730
+ self.fit_intercept = fit_intercept
731
+ self.precompute = precompute
732
+
733
+ @_fit_context(prefer_skip_nested_validation=True)
734
+ def fit(self, X, y):
735
+ """Fit the model using X, y as training data.
736
+
737
+ Parameters
738
+ ----------
739
+ X : array-like of shape (n_samples, n_features)
740
+ Training data.
741
+
742
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
743
+ Target values. Will be cast to X's dtype if necessary.
744
+
745
+ Returns
746
+ -------
747
+ self : object
748
+ Returns an instance of self.
749
+ """
750
+ X, y = self._validate_data(X, y, multi_output=True, y_numeric=True)
751
+ n_features = X.shape[1]
752
+
753
+ X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit(
754
+ X, y, None, self.precompute, self.fit_intercept, copy=True
755
+ )
756
+
757
+ if y.ndim == 1:
758
+ y = y[:, np.newaxis]
759
+
760
+ if self.n_nonzero_coefs is None and self.tol is None:
761
+ # default for n_nonzero_coefs is 0.1 * n_features
762
+ # but at least one.
763
+ self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
764
+ else:
765
+ self.n_nonzero_coefs_ = self.n_nonzero_coefs
766
+
767
+ if Gram is False:
768
+ coef_, self.n_iter_ = orthogonal_mp(
769
+ X,
770
+ y,
771
+ n_nonzero_coefs=self.n_nonzero_coefs_,
772
+ tol=self.tol,
773
+ precompute=False,
774
+ copy_X=True,
775
+ return_n_iter=True,
776
+ )
777
+ else:
778
+ norms_sq = np.sum(y**2, axis=0) if self.tol is not None else None
779
+
780
+ coef_, self.n_iter_ = orthogonal_mp_gram(
781
+ Gram,
782
+ Xy=Xy,
783
+ n_nonzero_coefs=self.n_nonzero_coefs_,
784
+ tol=self.tol,
785
+ norms_squared=norms_sq,
786
+ copy_Gram=True,
787
+ copy_Xy=True,
788
+ return_n_iter=True,
789
+ )
790
+ self.coef_ = coef_.T
791
+ self._set_intercept(X_offset, y_offset, X_scale)
792
+ return self
793
+
794
+
795
+ def _omp_path_residues(
796
+ X_train,
797
+ y_train,
798
+ X_test,
799
+ y_test,
800
+ copy=True,
801
+ fit_intercept=True,
802
+ max_iter=100,
803
+ ):
804
+ """Compute the residues on left-out data for a full LARS path.
805
+
806
+ Parameters
807
+ ----------
808
+ X_train : ndarray of shape (n_samples, n_features)
809
+ The data to fit the LARS on.
810
+
811
+ y_train : ndarray of shape (n_samples)
812
+ The target variable to fit LARS on.
813
+
814
+ X_test : ndarray of shape (n_samples, n_features)
815
+ The data to compute the residues on.
816
+
817
+ y_test : ndarray of shape (n_samples)
818
+ The target variable to compute the residues on.
819
+
820
+ copy : bool, default=True
821
+ Whether X_train, X_test, y_train and y_test should be copied. If
822
+ False, they may be overwritten.
823
+
824
+ fit_intercept : bool, default=True
825
+ Whether to calculate the intercept for this model. If set
826
+ to false, no intercept will be used in calculations
827
+ (i.e. data is expected to be centered).
828
+
829
+ max_iter : int, default=100
830
+ Maximum numbers of iterations to perform, therefore maximum features
831
+ to include. 100 by default.
832
+
833
+ Returns
834
+ -------
835
+ residues : ndarray of shape (n_samples, max_features)
836
+ Residues of the prediction on the test data.
837
+ """
838
+
839
+ if copy:
840
+ X_train = X_train.copy()
841
+ y_train = y_train.copy()
842
+ X_test = X_test.copy()
843
+ y_test = y_test.copy()
844
+
845
+ if fit_intercept:
846
+ X_mean = X_train.mean(axis=0)
847
+ X_train -= X_mean
848
+ X_test -= X_mean
849
+ y_mean = y_train.mean(axis=0)
850
+ y_train = as_float_array(y_train, copy=False)
851
+ y_train -= y_mean
852
+ y_test = as_float_array(y_test, copy=False)
853
+ y_test -= y_mean
854
+
855
+ coefs = orthogonal_mp(
856
+ X_train,
857
+ y_train,
858
+ n_nonzero_coefs=max_iter,
859
+ tol=None,
860
+ precompute=False,
861
+ copy_X=False,
862
+ return_path=True,
863
+ )
864
+ if coefs.ndim == 1:
865
+ coefs = coefs[:, np.newaxis]
866
+
867
+ return np.dot(coefs.T, X_test.T) - y_test
868
+
869
+
870
+ class OrthogonalMatchingPursuitCV(RegressorMixin, LinearModel):
871
+ """Cross-validated Orthogonal Matching Pursuit model (OMP).
872
+
873
+ See glossary entry for :term:`cross-validation estimator`.
874
+
875
+ Read more in the :ref:`User Guide <omp>`.
876
+
877
+ Parameters
878
+ ----------
879
+ copy : bool, default=True
880
+ Whether the design matrix X must be copied by the algorithm. A false
881
+ value is only helpful if X is already Fortran-ordered, otherwise a
882
+ copy is made anyway.
883
+
884
+ fit_intercept : bool, default=True
885
+ Whether to calculate the intercept for this model. If set
886
+ to false, no intercept will be used in calculations
887
+ (i.e. data is expected to be centered).
888
+
889
+ max_iter : int, default=None
890
+ Maximum numbers of iterations to perform, therefore maximum features
891
+ to include. 10% of ``n_features`` but at least 5 if available.
892
+
893
+ cv : int, cross-validation generator or iterable, default=None
894
+ Determines the cross-validation splitting strategy.
895
+ Possible inputs for cv are:
896
+
897
+ - None, to use the default 5-fold cross-validation,
898
+ - integer, to specify the number of folds.
899
+ - :term:`CV splitter`,
900
+ - An iterable yielding (train, test) splits as arrays of indices.
901
+
902
+ For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
903
+
904
+ Refer :ref:`User Guide <cross_validation>` for the various
905
+ cross-validation strategies that can be used here.
906
+
907
+ .. versionchanged:: 0.22
908
+ ``cv`` default value if None changed from 3-fold to 5-fold.
909
+
910
+ n_jobs : int, default=None
911
+ Number of CPUs to use during the cross validation.
912
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
913
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
914
+ for more details.
915
+
916
+ verbose : bool or int, default=False
917
+ Sets the verbosity amount.
918
+
919
+ Attributes
920
+ ----------
921
+ intercept_ : float or ndarray of shape (n_targets,)
922
+ Independent term in decision function.
923
+
924
+ coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
925
+ Parameter vector (w in the problem formulation).
926
+
927
+ n_nonzero_coefs_ : int
928
+ Estimated number of non-zero coefficients giving the best mean squared
929
+ error over the cross-validation folds.
930
+
931
+ n_iter_ : int or array-like
932
+ Number of active features across every target for the model refit with
933
+ the best hyperparameters got by cross-validating across all folds.
934
+
935
+ n_features_in_ : int
936
+ Number of features seen during :term:`fit`.
937
+
938
+ .. versionadded:: 0.24
939
+
940
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
941
+ Names of features seen during :term:`fit`. Defined only when `X`
942
+ has feature names that are all strings.
943
+
944
+ .. versionadded:: 1.0
945
+
946
+ See Also
947
+ --------
948
+ orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
949
+ orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit
950
+ problems using only the Gram matrix X.T * X and the product X.T * y.
951
+ lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
952
+ Lars : Least Angle Regression model a.k.a. LAR.
953
+ LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
954
+ OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
955
+ LarsCV : Cross-validated Least Angle Regression model.
956
+ LassoLarsCV : Cross-validated Lasso model fit with Least Angle Regression.
957
+ sklearn.decomposition.sparse_encode : Generic sparse coding.
958
+ Each column of the result is the solution to a Lasso problem.
959
+
960
+ Notes
961
+ -----
962
+ In `fit`, once the optimal number of non-zero coefficients is found through
963
+ cross-validation, the model is fit again using the entire training set.
964
+
965
+ Examples
966
+ --------
967
+ >>> from sklearn.linear_model import OrthogonalMatchingPursuitCV
968
+ >>> from sklearn.datasets import make_regression
969
+ >>> X, y = make_regression(n_features=100, n_informative=10,
970
+ ... noise=4, random_state=0)
971
+ >>> reg = OrthogonalMatchingPursuitCV(cv=5).fit(X, y)
972
+ >>> reg.score(X, y)
973
+ 0.9991...
974
+ >>> reg.n_nonzero_coefs_
975
+ 10
976
+ >>> reg.predict(X[:1,])
977
+ array([-78.3854...])
978
+ """
979
+
980
+ _parameter_constraints: dict = {
981
+ "copy": ["boolean"],
982
+ "fit_intercept": ["boolean"],
983
+ "max_iter": [Interval(Integral, 0, None, closed="left"), None],
984
+ "cv": ["cv_object"],
985
+ "n_jobs": [Integral, None],
986
+ "verbose": ["verbose"],
987
+ }
988
+
989
+ def __init__(
990
+ self,
991
+ *,
992
+ copy=True,
993
+ fit_intercept=True,
994
+ max_iter=None,
995
+ cv=None,
996
+ n_jobs=None,
997
+ verbose=False,
998
+ ):
999
+ self.copy = copy
1000
+ self.fit_intercept = fit_intercept
1001
+ self.max_iter = max_iter
1002
+ self.cv = cv
1003
+ self.n_jobs = n_jobs
1004
+ self.verbose = verbose
1005
+
1006
+ @_fit_context(prefer_skip_nested_validation=True)
1007
+ def fit(self, X, y, **fit_params):
1008
+ """Fit the model using X, y as training data.
1009
+
1010
+ Parameters
1011
+ ----------
1012
+ X : array-like of shape (n_samples, n_features)
1013
+ Training data.
1014
+
1015
+ y : array-like of shape (n_samples,)
1016
+ Target values. Will be cast to X's dtype if necessary.
1017
+
1018
+ **fit_params : dict
1019
+ Parameters to pass to the underlying splitter.
1020
+
1021
+ .. versionadded:: 1.4
1022
+ Only available if `enable_metadata_routing=True`,
1023
+ which can be set by using
1024
+ ``sklearn.set_config(enable_metadata_routing=True)``.
1025
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
1026
+ more details.
1027
+
1028
+ Returns
1029
+ -------
1030
+ self : object
1031
+ Returns an instance of self.
1032
+ """
1033
+ _raise_for_params(fit_params, self, "fit")
1034
+
1035
+ X, y = self._validate_data(X, y, y_numeric=True, ensure_min_features=2)
1036
+ X = as_float_array(X, copy=False, force_all_finite=False)
1037
+ cv = check_cv(self.cv, classifier=False)
1038
+ if _routing_enabled():
1039
+ routed_params = process_routing(self, "fit", **fit_params)
1040
+ else:
1041
+ # TODO(SLEP6): remove when metadata routing cannot be disabled.
1042
+ routed_params = Bunch()
1043
+ routed_params.splitter = Bunch(split={})
1044
+ max_iter = (
1045
+ min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
1046
+ if not self.max_iter
1047
+ else self.max_iter
1048
+ )
1049
+ cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
1050
+ delayed(_omp_path_residues)(
1051
+ X[train],
1052
+ y[train],
1053
+ X[test],
1054
+ y[test],
1055
+ self.copy,
1056
+ self.fit_intercept,
1057
+ max_iter,
1058
+ )
1059
+ for train, test in cv.split(X, **routed_params.splitter.split)
1060
+ )
1061
+
1062
+ min_early_stop = min(fold.shape[0] for fold in cv_paths)
1063
+ mse_folds = np.array(
1064
+ [(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]
1065
+ )
1066
+ best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
1067
+ self.n_nonzero_coefs_ = best_n_nonzero_coefs
1068
+ omp = OrthogonalMatchingPursuit(
1069
+ n_nonzero_coefs=best_n_nonzero_coefs,
1070
+ fit_intercept=self.fit_intercept,
1071
+ ).fit(X, y)
1072
+
1073
+ self.coef_ = omp.coef_
1074
+ self.intercept_ = omp.intercept_
1075
+ self.n_iter_ = omp.n_iter_
1076
+ return self
1077
+
1078
+ def get_metadata_routing(self):
1079
+ """Get metadata routing of this object.
1080
+
1081
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1082
+ mechanism works.
1083
+
1084
+ .. versionadded:: 1.4
1085
+
1086
+ Returns
1087
+ -------
1088
+ routing : MetadataRouter
1089
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
1090
+ routing information.
1091
+ """
1092
+
1093
+ router = MetadataRouter(owner=self.__class__.__name__).add(
1094
+ splitter=self.cv,
1095
+ method_mapping=MethodMapping().add(callee="split", caller="fit"),
1096
+ )
1097
+ return router
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Rob Zinkov, Mathieu Blondel
2
+ # License: BSD 3 clause
3
+ from numbers import Real
4
+
5
+ from ..base import _fit_context
6
+ from ..utils._param_validation import Interval, StrOptions
7
+ from ._stochastic_gradient import DEFAULT_EPSILON, BaseSGDClassifier, BaseSGDRegressor
8
+
9
+
10
+ class PassiveAggressiveClassifier(BaseSGDClassifier):
11
+ """Passive Aggressive Classifier.
12
+
13
+ Read more in the :ref:`User Guide <passive_aggressive>`.
14
+
15
+ Parameters
16
+ ----------
17
+ C : float, default=1.0
18
+ Maximum step size (regularization). Defaults to 1.0.
19
+
20
+ fit_intercept : bool, default=True
21
+ Whether the intercept should be estimated or not. If False, the
22
+ data is assumed to be already centered.
23
+
24
+ max_iter : int, default=1000
25
+ The maximum number of passes over the training data (aka epochs).
26
+ It only impacts the behavior in the ``fit`` method, and not the
27
+ :meth:`~sklearn.linear_model.PassiveAggressiveClassifier.partial_fit` method.
28
+
29
+ .. versionadded:: 0.19
30
+
31
+ tol : float or None, default=1e-3
32
+ The stopping criterion. If it is not None, the iterations will stop
33
+ when (loss > previous_loss - tol).
34
+
35
+ .. versionadded:: 0.19
36
+
37
+ early_stopping : bool, default=False
38
+ Whether to use early stopping to terminate training when validation
39
+ score is not improving. If set to True, it will automatically set aside
40
+ a stratified fraction of training data as validation and terminate
41
+ training when validation score is not improving by at least `tol` for
42
+ `n_iter_no_change` consecutive epochs.
43
+
44
+ .. versionadded:: 0.20
45
+
46
+ validation_fraction : float, default=0.1
47
+ The proportion of training data to set aside as validation set for
48
+ early stopping. Must be between 0 and 1.
49
+ Only used if early_stopping is True.
50
+
51
+ .. versionadded:: 0.20
52
+
53
+ n_iter_no_change : int, default=5
54
+ Number of iterations with no improvement to wait before early stopping.
55
+
56
+ .. versionadded:: 0.20
57
+
58
+ shuffle : bool, default=True
59
+ Whether or not the training data should be shuffled after each epoch.
60
+
61
+ verbose : int, default=0
62
+ The verbosity level.
63
+
64
+ loss : str, default="hinge"
65
+ The loss function to be used:
66
+ hinge: equivalent to PA-I in the reference paper.
67
+ squared_hinge: equivalent to PA-II in the reference paper.
68
+
69
+ n_jobs : int or None, default=None
70
+ The number of CPUs to use to do the OVA (One Versus All, for
71
+ multi-class problems) computation.
72
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
73
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
74
+ for more details.
75
+
76
+ random_state : int, RandomState instance, default=None
77
+ Used to shuffle the training data, when ``shuffle`` is set to
78
+ ``True``. Pass an int for reproducible output across multiple
79
+ function calls.
80
+ See :term:`Glossary <random_state>`.
81
+
82
+ warm_start : bool, default=False
83
+ When set to True, reuse the solution of the previous call to fit as
84
+ initialization, otherwise, just erase the previous solution.
85
+ See :term:`the Glossary <warm_start>`.
86
+
87
+ Repeatedly calling fit or partial_fit when warm_start is True can
88
+ result in a different solution than when calling fit a single time
89
+ because of the way the data is shuffled.
90
+
91
+ class_weight : dict, {class_label: weight} or "balanced" or None, \
92
+ default=None
93
+ Preset for the class_weight fit parameter.
94
+
95
+ Weights associated with classes. If not given, all classes
96
+ are supposed to have weight one.
97
+
98
+ The "balanced" mode uses the values of y to automatically adjust
99
+ weights inversely proportional to class frequencies in the input data
100
+ as ``n_samples / (n_classes * np.bincount(y))``.
101
+
102
+ .. versionadded:: 0.17
103
+ parameter *class_weight* to automatically weight samples.
104
+
105
+ average : bool or int, default=False
106
+ When set to True, computes the averaged SGD weights and stores the
107
+ result in the ``coef_`` attribute. If set to an int greater than 1,
108
+ averaging will begin once the total number of samples seen reaches
109
+ average. So average=10 will begin averaging after seeing 10 samples.
110
+
111
+ .. versionadded:: 0.19
112
+ parameter *average* to use weights averaging in SGD.
113
+
114
+ Attributes
115
+ ----------
116
+ coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
117
+ (n_classes, n_features)
118
+ Weights assigned to the features.
119
+
120
+ intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
121
+ Constants in decision function.
122
+
123
+ n_features_in_ : int
124
+ Number of features seen during :term:`fit`.
125
+
126
+ .. versionadded:: 0.24
127
+
128
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
129
+ Names of features seen during :term:`fit`. Defined only when `X`
130
+ has feature names that are all strings.
131
+
132
+ .. versionadded:: 1.0
133
+
134
+ n_iter_ : int
135
+ The actual number of iterations to reach the stopping criterion.
136
+ For multiclass fits, it is the maximum over every binary fit.
137
+
138
+ classes_ : ndarray of shape (n_classes,)
139
+ The unique classes labels.
140
+
141
+ t_ : int
142
+ Number of weight updates performed during training.
143
+ Same as ``(n_iter_ * n_samples + 1)``.
144
+
145
+ loss_function_ : callable
146
+ Loss function used by the algorithm.
147
+
148
+ See Also
149
+ --------
150
+ SGDClassifier : Incrementally trained logistic regression.
151
+ Perceptron : Linear perceptron classifier.
152
+
153
+ References
154
+ ----------
155
+ Online Passive-Aggressive Algorithms
156
+ <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
157
+ K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
158
+
159
+ Examples
160
+ --------
161
+ >>> from sklearn.linear_model import PassiveAggressiveClassifier
162
+ >>> from sklearn.datasets import make_classification
163
+ >>> X, y = make_classification(n_features=4, random_state=0)
164
+ >>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0,
165
+ ... tol=1e-3)
166
+ >>> clf.fit(X, y)
167
+ PassiveAggressiveClassifier(random_state=0)
168
+ >>> print(clf.coef_)
169
+ [[0.26642044 0.45070924 0.67251877 0.64185414]]
170
+ >>> print(clf.intercept_)
171
+ [1.84127814]
172
+ >>> print(clf.predict([[0, 0, 0, 0]]))
173
+ [1]
174
+ """
175
+
176
+ _parameter_constraints: dict = {
177
+ **BaseSGDClassifier._parameter_constraints,
178
+ "loss": [StrOptions({"hinge", "squared_hinge"})],
179
+ "C": [Interval(Real, 0, None, closed="right")],
180
+ }
181
+
182
+ def __init__(
183
+ self,
184
+ *,
185
+ C=1.0,
186
+ fit_intercept=True,
187
+ max_iter=1000,
188
+ tol=1e-3,
189
+ early_stopping=False,
190
+ validation_fraction=0.1,
191
+ n_iter_no_change=5,
192
+ shuffle=True,
193
+ verbose=0,
194
+ loss="hinge",
195
+ n_jobs=None,
196
+ random_state=None,
197
+ warm_start=False,
198
+ class_weight=None,
199
+ average=False,
200
+ ):
201
+ super().__init__(
202
+ penalty=None,
203
+ fit_intercept=fit_intercept,
204
+ max_iter=max_iter,
205
+ tol=tol,
206
+ early_stopping=early_stopping,
207
+ validation_fraction=validation_fraction,
208
+ n_iter_no_change=n_iter_no_change,
209
+ shuffle=shuffle,
210
+ verbose=verbose,
211
+ random_state=random_state,
212
+ eta0=1.0,
213
+ warm_start=warm_start,
214
+ class_weight=class_weight,
215
+ average=average,
216
+ n_jobs=n_jobs,
217
+ )
218
+
219
+ self.C = C
220
+ self.loss = loss
221
+
222
+ @_fit_context(prefer_skip_nested_validation=True)
223
+ def partial_fit(self, X, y, classes=None):
224
+ """Fit linear model with Passive Aggressive algorithm.
225
+
226
+ Parameters
227
+ ----------
228
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
229
+ Subset of the training data.
230
+
231
+ y : array-like of shape (n_samples,)
232
+ Subset of the target values.
233
+
234
+ classes : ndarray of shape (n_classes,)
235
+ Classes across all calls to partial_fit.
236
+ Can be obtained by via `np.unique(y_all)`, where y_all is the
237
+ target vector of the entire dataset.
238
+ This argument is required for the first call to partial_fit
239
+ and can be omitted in the subsequent calls.
240
+ Note that y doesn't need to contain all labels in `classes`.
241
+
242
+ Returns
243
+ -------
244
+ self : object
245
+ Fitted estimator.
246
+ """
247
+ if not hasattr(self, "classes_"):
248
+ self._more_validate_params(for_partial_fit=True)
249
+
250
+ if self.class_weight == "balanced":
251
+ raise ValueError(
252
+ "class_weight 'balanced' is not supported for "
253
+ "partial_fit. For 'balanced' weights, use "
254
+ "`sklearn.utils.compute_class_weight` with "
255
+ "`class_weight='balanced'`. In place of y you "
256
+ "can use a large enough subset of the full "
257
+ "training set target to properly estimate the "
258
+ "class frequency distributions. Pass the "
259
+ "resulting weights as the class_weight "
260
+ "parameter."
261
+ )
262
+
263
+ lr = "pa1" if self.loss == "hinge" else "pa2"
264
+ return self._partial_fit(
265
+ X,
266
+ y,
267
+ alpha=1.0,
268
+ C=self.C,
269
+ loss="hinge",
270
+ learning_rate=lr,
271
+ max_iter=1,
272
+ classes=classes,
273
+ sample_weight=None,
274
+ coef_init=None,
275
+ intercept_init=None,
276
+ )
277
+
278
+ @_fit_context(prefer_skip_nested_validation=True)
279
+ def fit(self, X, y, coef_init=None, intercept_init=None):
280
+ """Fit linear model with Passive Aggressive algorithm.
281
+
282
+ Parameters
283
+ ----------
284
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
285
+ Training data.
286
+
287
+ y : array-like of shape (n_samples,)
288
+ Target values.
289
+
290
+ coef_init : ndarray of shape (n_classes, n_features)
291
+ The initial coefficients to warm-start the optimization.
292
+
293
+ intercept_init : ndarray of shape (n_classes,)
294
+ The initial intercept to warm-start the optimization.
295
+
296
+ Returns
297
+ -------
298
+ self : object
299
+ Fitted estimator.
300
+ """
301
+ self._more_validate_params()
302
+
303
+ lr = "pa1" if self.loss == "hinge" else "pa2"
304
+ return self._fit(
305
+ X,
306
+ y,
307
+ alpha=1.0,
308
+ C=self.C,
309
+ loss="hinge",
310
+ learning_rate=lr,
311
+ coef_init=coef_init,
312
+ intercept_init=intercept_init,
313
+ )
314
+
315
+
316
+ class PassiveAggressiveRegressor(BaseSGDRegressor):
317
+ """Passive Aggressive Regressor.
318
+
319
+ Read more in the :ref:`User Guide <passive_aggressive>`.
320
+
321
+ Parameters
322
+ ----------
323
+
324
+ C : float, default=1.0
325
+ Maximum step size (regularization). Defaults to 1.0.
326
+
327
+ fit_intercept : bool, default=True
328
+ Whether the intercept should be estimated or not. If False, the
329
+ data is assumed to be already centered. Defaults to True.
330
+
331
+ max_iter : int, default=1000
332
+ The maximum number of passes over the training data (aka epochs).
333
+ It only impacts the behavior in the ``fit`` method, and not the
334
+ :meth:`~sklearn.linear_model.PassiveAggressiveRegressor.partial_fit` method.
335
+
336
+ .. versionadded:: 0.19
337
+
338
+ tol : float or None, default=1e-3
339
+ The stopping criterion. If it is not None, the iterations will stop
340
+ when (loss > previous_loss - tol).
341
+
342
+ .. versionadded:: 0.19
343
+
344
+ early_stopping : bool, default=False
345
+ Whether to use early stopping to terminate training when validation.
346
+ score is not improving. If set to True, it will automatically set aside
347
+ a fraction of training data as validation and terminate
348
+ training when validation score is not improving by at least tol for
349
+ n_iter_no_change consecutive epochs.
350
+
351
+ .. versionadded:: 0.20
352
+
353
+ validation_fraction : float, default=0.1
354
+ The proportion of training data to set aside as validation set for
355
+ early stopping. Must be between 0 and 1.
356
+ Only used if early_stopping is True.
357
+
358
+ .. versionadded:: 0.20
359
+
360
+ n_iter_no_change : int, default=5
361
+ Number of iterations with no improvement to wait before early stopping.
362
+
363
+ .. versionadded:: 0.20
364
+
365
+ shuffle : bool, default=True
366
+ Whether or not the training data should be shuffled after each epoch.
367
+
368
+ verbose : int, default=0
369
+ The verbosity level.
370
+
371
+ loss : str, default="epsilon_insensitive"
372
+ The loss function to be used:
373
+ epsilon_insensitive: equivalent to PA-I in the reference paper.
374
+ squared_epsilon_insensitive: equivalent to PA-II in the reference
375
+ paper.
376
+
377
+ epsilon : float, default=0.1
378
+ If the difference between the current prediction and the correct label
379
+ is below this threshold, the model is not updated.
380
+
381
+ random_state : int, RandomState instance, default=None
382
+ Used to shuffle the training data, when ``shuffle`` is set to
383
+ ``True``. Pass an int for reproducible output across multiple
384
+ function calls.
385
+ See :term:`Glossary <random_state>`.
386
+
387
+ warm_start : bool, default=False
388
+ When set to True, reuse the solution of the previous call to fit as
389
+ initialization, otherwise, just erase the previous solution.
390
+ See :term:`the Glossary <warm_start>`.
391
+
392
+ Repeatedly calling fit or partial_fit when warm_start is True can
393
+ result in a different solution than when calling fit a single time
394
+ because of the way the data is shuffled.
395
+
396
+ average : bool or int, default=False
397
+ When set to True, computes the averaged SGD weights and stores the
398
+ result in the ``coef_`` attribute. If set to an int greater than 1,
399
+ averaging will begin once the total number of samples seen reaches
400
+ average. So average=10 will begin averaging after seeing 10 samples.
401
+
402
+ .. versionadded:: 0.19
403
+ parameter *average* to use weights averaging in SGD.
404
+
405
+ Attributes
406
+ ----------
407
+ coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
408
+ n_features]
409
+ Weights assigned to the features.
410
+
411
+ intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
412
+ Constants in decision function.
413
+
414
+ n_features_in_ : int
415
+ Number of features seen during :term:`fit`.
416
+
417
+ .. versionadded:: 0.24
418
+
419
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
420
+ Names of features seen during :term:`fit`. Defined only when `X`
421
+ has feature names that are all strings.
422
+
423
+ .. versionadded:: 1.0
424
+
425
+ n_iter_ : int
426
+ The actual number of iterations to reach the stopping criterion.
427
+
428
+ t_ : int
429
+ Number of weight updates performed during training.
430
+ Same as ``(n_iter_ * n_samples + 1)``.
431
+
432
+ See Also
433
+ --------
434
+ SGDRegressor : Linear model fitted by minimizing a regularized
435
+ empirical loss with SGD.
436
+
437
+ References
438
+ ----------
439
+ Online Passive-Aggressive Algorithms
440
+ <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
441
+ K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006).
442
+
443
+ Examples
444
+ --------
445
+ >>> from sklearn.linear_model import PassiveAggressiveRegressor
446
+ >>> from sklearn.datasets import make_regression
447
+
448
+ >>> X, y = make_regression(n_features=4, random_state=0)
449
+ >>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0,
450
+ ... tol=1e-3)
451
+ >>> regr.fit(X, y)
452
+ PassiveAggressiveRegressor(max_iter=100, random_state=0)
453
+ >>> print(regr.coef_)
454
+ [20.48736655 34.18818427 67.59122734 87.94731329]
455
+ >>> print(regr.intercept_)
456
+ [-0.02306214]
457
+ >>> print(regr.predict([[0, 0, 0, 0]]))
458
+ [-0.02306214]
459
+ """
460
+
461
+ _parameter_constraints: dict = {
462
+ **BaseSGDRegressor._parameter_constraints,
463
+ "loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})],
464
+ "C": [Interval(Real, 0, None, closed="right")],
465
+ "epsilon": [Interval(Real, 0, None, closed="left")],
466
+ }
467
+
468
+ def __init__(
469
+ self,
470
+ *,
471
+ C=1.0,
472
+ fit_intercept=True,
473
+ max_iter=1000,
474
+ tol=1e-3,
475
+ early_stopping=False,
476
+ validation_fraction=0.1,
477
+ n_iter_no_change=5,
478
+ shuffle=True,
479
+ verbose=0,
480
+ loss="epsilon_insensitive",
481
+ epsilon=DEFAULT_EPSILON,
482
+ random_state=None,
483
+ warm_start=False,
484
+ average=False,
485
+ ):
486
+ super().__init__(
487
+ penalty=None,
488
+ l1_ratio=0,
489
+ epsilon=epsilon,
490
+ eta0=1.0,
491
+ fit_intercept=fit_intercept,
492
+ max_iter=max_iter,
493
+ tol=tol,
494
+ early_stopping=early_stopping,
495
+ validation_fraction=validation_fraction,
496
+ n_iter_no_change=n_iter_no_change,
497
+ shuffle=shuffle,
498
+ verbose=verbose,
499
+ random_state=random_state,
500
+ warm_start=warm_start,
501
+ average=average,
502
+ )
503
+ self.C = C
504
+ self.loss = loss
505
+
506
+ @_fit_context(prefer_skip_nested_validation=True)
507
+ def partial_fit(self, X, y):
508
+ """Fit linear model with Passive Aggressive algorithm.
509
+
510
+ Parameters
511
+ ----------
512
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
513
+ Subset of training data.
514
+
515
+ y : numpy array of shape [n_samples]
516
+ Subset of target values.
517
+
518
+ Returns
519
+ -------
520
+ self : object
521
+ Fitted estimator.
522
+ """
523
+ if not hasattr(self, "coef_"):
524
+ self._more_validate_params(for_partial_fit=True)
525
+
526
+ lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
527
+ return self._partial_fit(
528
+ X,
529
+ y,
530
+ alpha=1.0,
531
+ C=self.C,
532
+ loss="epsilon_insensitive",
533
+ learning_rate=lr,
534
+ max_iter=1,
535
+ sample_weight=None,
536
+ coef_init=None,
537
+ intercept_init=None,
538
+ )
539
+
540
+ @_fit_context(prefer_skip_nested_validation=True)
541
+ def fit(self, X, y, coef_init=None, intercept_init=None):
542
+ """Fit linear model with Passive Aggressive algorithm.
543
+
544
+ Parameters
545
+ ----------
546
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
547
+ Training data.
548
+
549
+ y : numpy array of shape [n_samples]
550
+ Target values.
551
+
552
+ coef_init : array, shape = [n_features]
553
+ The initial coefficients to warm-start the optimization.
554
+
555
+ intercept_init : array, shape = [1]
556
+ The initial intercept to warm-start the optimization.
557
+
558
+ Returns
559
+ -------
560
+ self : object
561
+ Fitted estimator.
562
+ """
563
+ self._more_validate_params()
564
+
565
+ lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
566
+ return self._fit(
567
+ X,
568
+ y,
569
+ alpha=1.0,
570
+ C=self.C,
571
+ loss="epsilon_insensitive",
572
+ learning_rate=lr,
573
+ coef_init=coef_init,
574
+ intercept_init=intercept_init,
575
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_sag.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Solvers for Ridge and LogisticRegression using SAG algorithm"""
2
+
3
+ # Authors: Tom Dupre la Tour <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ import warnings
8
+
9
+ import numpy as np
10
+
11
+ from ..exceptions import ConvergenceWarning
12
+ from ..utils import check_array
13
+ from ..utils.extmath import row_norms
14
+ from ..utils.validation import _check_sample_weight
15
+ from ._base import make_dataset
16
+ from ._sag_fast import sag32, sag64
17
+
18
+
19
+ def get_auto_step_size(
20
+ max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False
21
+ ):
22
+ """Compute automatic step size for SAG solver.
23
+
24
+ The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
25
+ the max sum of squares for over all samples.
26
+
27
+ Parameters
28
+ ----------
29
+ max_squared_sum : float
30
+ Maximum squared sum of X over samples.
31
+
32
+ alpha_scaled : float
33
+ Constant that multiplies the regularization term, scaled by
34
+ 1. / n_samples, the number of samples.
35
+
36
+ loss : {'log', 'squared', 'multinomial'}
37
+ The loss function used in SAG solver.
38
+
39
+ fit_intercept : bool
40
+ Specifies if a constant (a.k.a. bias or intercept) will be
41
+ added to the decision function.
42
+
43
+ n_samples : int, default=None
44
+ Number of rows in X. Useful if is_saga=True.
45
+
46
+ is_saga : bool, default=False
47
+ Whether to return step size for the SAGA algorithm or the SAG
48
+ algorithm.
49
+
50
+ Returns
51
+ -------
52
+ step_size : float
53
+ Step size used in SAG solver.
54
+
55
+ References
56
+ ----------
57
+ Schmidt, M., Roux, N. L., & Bach, F. (2013).
58
+ Minimizing finite sums with the stochastic average gradient
59
+ https://hal.inria.fr/hal-00860051/document
60
+
61
+ :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
62
+ "SAGA: A Fast Incremental Gradient Method With Support
63
+ for Non-Strongly Convex Composite Objectives" <1407.0202>`
64
+ """
65
+ if loss in ("log", "multinomial"):
66
+ L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled
67
+ elif loss == "squared":
68
+ # inverse Lipschitz constant for squared loss
69
+ L = max_squared_sum + int(fit_intercept) + alpha_scaled
70
+ else:
71
+ raise ValueError(
72
+ "Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'"
73
+ % loss
74
+ )
75
+ if is_saga:
76
+ # SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
77
+ # See Defazio et al. 2014
78
+ mun = min(2 * n_samples * alpha_scaled, L)
79
+ step = 1.0 / (2 * L + mun)
80
+ else:
81
+ # SAG theoretical step size is 1/16L but it is recommended to use 1 / L
82
+ # see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
83
+ # slide 65
84
+ step = 1.0 / L
85
+ return step
86
+
87
+
88
+ def sag_solver(
89
+ X,
90
+ y,
91
+ sample_weight=None,
92
+ loss="log",
93
+ alpha=1.0,
94
+ beta=0.0,
95
+ max_iter=1000,
96
+ tol=0.001,
97
+ verbose=0,
98
+ random_state=None,
99
+ check_input=True,
100
+ max_squared_sum=None,
101
+ warm_start_mem=None,
102
+ is_saga=False,
103
+ ):
104
+ """SAG solver for Ridge and LogisticRegression.
105
+
106
+ SAG stands for Stochastic Average Gradient: the gradient of the loss is
107
+ estimated each sample at a time and the model is updated along the way with
108
+ a constant learning rate.
109
+
110
+ IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
111
+ same scale. You can normalize the data by using
112
+ sklearn.preprocessing.StandardScaler on your data before passing it to the
113
+ fit method.
114
+
115
+ This implementation works with data represented as dense numpy arrays or
116
+ sparse scipy arrays of floating point values for the features. It will
117
+ fit the data according to squared loss or log loss.
118
+
119
+ The regularizer is a penalty added to the loss function that shrinks model
120
+ parameters towards the zero vector using the squared euclidean norm L2.
121
+
122
+ .. versionadded:: 0.17
123
+
124
+ Parameters
125
+ ----------
126
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
127
+ Training data.
128
+
129
+ y : ndarray of shape (n_samples,)
130
+ Target values. With loss='multinomial', y must be label encoded
131
+ (see preprocessing.LabelEncoder).
132
+
133
+ sample_weight : array-like of shape (n_samples,), default=None
134
+ Weights applied to individual samples (1. for unweighted).
135
+
136
+ loss : {'log', 'squared', 'multinomial'}, default='log'
137
+ Loss function that will be optimized:
138
+ -'log' is the binary logistic loss, as used in LogisticRegression.
139
+ -'squared' is the squared loss, as used in Ridge.
140
+ -'multinomial' is the multinomial logistic loss, as used in
141
+ LogisticRegression.
142
+
143
+ .. versionadded:: 0.18
144
+ *loss='multinomial'*
145
+
146
+ alpha : float, default=1.
147
+ L2 regularization term in the objective function
148
+ ``(0.5 * alpha * || W ||_F^2)``.
149
+
150
+ beta : float, default=0.
151
+ L1 regularization term in the objective function
152
+ ``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
153
+
154
+ max_iter : int, default=1000
155
+ The max number of passes over the training data if the stopping
156
+ criteria is not reached.
157
+
158
+ tol : float, default=0.001
159
+ The stopping criteria for the weights. The iterations will stop when
160
+ max(change in weights) / max(weights) < tol.
161
+
162
+ verbose : int, default=0
163
+ The verbosity level.
164
+
165
+ random_state : int, RandomState instance or None, default=None
166
+ Used when shuffling the data. Pass an int for reproducible output
167
+ across multiple function calls.
168
+ See :term:`Glossary <random_state>`.
169
+
170
+ check_input : bool, default=True
171
+ If False, the input arrays X and y will not be checked.
172
+
173
+ max_squared_sum : float, default=None
174
+ Maximum squared sum of X over samples. If None, it will be computed,
175
+ going through all the samples. The value should be precomputed
176
+ to speed up cross validation.
177
+
178
+ warm_start_mem : dict, default=None
179
+ The initialization parameters used for warm starting. Warm starting is
180
+ currently used in LogisticRegression but not in Ridge.
181
+ It contains:
182
+ - 'coef': the weight vector, with the intercept in last line
183
+ if the intercept is fitted.
184
+ - 'gradient_memory': the scalar gradient for all seen samples.
185
+ - 'sum_gradient': the sum of gradient over all seen samples,
186
+ for each feature.
187
+ - 'intercept_sum_gradient': the sum of gradient over all seen
188
+ samples, for the intercept.
189
+ - 'seen': array of boolean describing the seen samples.
190
+ - 'num_seen': the number of seen samples.
191
+
192
+ is_saga : bool, default=False
193
+ Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
194
+ better in the first epochs, and allow for l1 regularisation.
195
+
196
+ Returns
197
+ -------
198
+ coef_ : ndarray of shape (n_features,)
199
+ Weight vector.
200
+
201
+ n_iter_ : int
202
+ The number of full pass on all samples.
203
+
204
+ warm_start_mem : dict
205
+ Contains a 'coef' key with the fitted result, and possibly the
206
+ fitted intercept at the end of the array. Contains also other keys
207
+ used for warm starting.
208
+
209
+ Examples
210
+ --------
211
+ >>> import numpy as np
212
+ >>> from sklearn import linear_model
213
+ >>> n_samples, n_features = 10, 5
214
+ >>> rng = np.random.RandomState(0)
215
+ >>> X = rng.randn(n_samples, n_features)
216
+ >>> y = rng.randn(n_samples)
217
+ >>> clf = linear_model.Ridge(solver='sag')
218
+ >>> clf.fit(X, y)
219
+ Ridge(solver='sag')
220
+
221
+ >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
222
+ >>> y = np.array([1, 1, 2, 2])
223
+ >>> clf = linear_model.LogisticRegression(
224
+ ... solver='sag', multi_class='multinomial')
225
+ >>> clf.fit(X, y)
226
+ LogisticRegression(multi_class='multinomial', solver='sag')
227
+
228
+ References
229
+ ----------
230
+ Schmidt, M., Roux, N. L., & Bach, F. (2013).
231
+ Minimizing finite sums with the stochastic average gradient
232
+ https://hal.inria.fr/hal-00860051/document
233
+
234
+ :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
235
+ "SAGA: A Fast Incremental Gradient Method With Support
236
+ for Non-Strongly Convex Composite Objectives" <1407.0202>`
237
+
238
+ See Also
239
+ --------
240
+ Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
241
+ LogisticRegression, SGDClassifier, LinearSVC, Perceptron
242
+ """
243
+ if warm_start_mem is None:
244
+ warm_start_mem = {}
245
+ # Ridge default max_iter is None
246
+ if max_iter is None:
247
+ max_iter = 1000
248
+
249
+ if check_input:
250
+ _dtype = [np.float64, np.float32]
251
+ X = check_array(X, dtype=_dtype, accept_sparse="csr", order="C")
252
+ y = check_array(y, dtype=_dtype, ensure_2d=False, order="C")
253
+
254
+ n_samples, n_features = X.shape[0], X.shape[1]
255
+ # As in SGD, the alpha is scaled by n_samples.
256
+ alpha_scaled = float(alpha) / n_samples
257
+ beta_scaled = float(beta) / n_samples
258
+
259
+ # if loss == 'multinomial', y should be label encoded.
260
+ n_classes = int(y.max()) + 1 if loss == "multinomial" else 1
261
+
262
+ # initialization
263
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
264
+
265
+ if "coef" in warm_start_mem.keys():
266
+ coef_init = warm_start_mem["coef"]
267
+ else:
268
+ # assume fit_intercept is False
269
+ coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
270
+
271
+ # coef_init contains possibly the intercept_init at the end.
272
+ # Note that Ridge centers the data before fitting, so fit_intercept=False.
273
+ fit_intercept = coef_init.shape[0] == (n_features + 1)
274
+ if fit_intercept:
275
+ intercept_init = coef_init[-1, :]
276
+ coef_init = coef_init[:-1, :]
277
+ else:
278
+ intercept_init = np.zeros(n_classes, dtype=X.dtype)
279
+
280
+ if "intercept_sum_gradient" in warm_start_mem.keys():
281
+ intercept_sum_gradient = warm_start_mem["intercept_sum_gradient"]
282
+ else:
283
+ intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
284
+
285
+ if "gradient_memory" in warm_start_mem.keys():
286
+ gradient_memory_init = warm_start_mem["gradient_memory"]
287
+ else:
288
+ gradient_memory_init = np.zeros(
289
+ (n_samples, n_classes), dtype=X.dtype, order="C"
290
+ )
291
+ if "sum_gradient" in warm_start_mem.keys():
292
+ sum_gradient_init = warm_start_mem["sum_gradient"]
293
+ else:
294
+ sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
295
+
296
+ if "seen" in warm_start_mem.keys():
297
+ seen_init = warm_start_mem["seen"]
298
+ else:
299
+ seen_init = np.zeros(n_samples, dtype=np.int32, order="C")
300
+
301
+ if "num_seen" in warm_start_mem.keys():
302
+ num_seen_init = warm_start_mem["num_seen"]
303
+ else:
304
+ num_seen_init = 0
305
+
306
+ dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
307
+
308
+ if max_squared_sum is None:
309
+ max_squared_sum = row_norms(X, squared=True).max()
310
+ step_size = get_auto_step_size(
311
+ max_squared_sum,
312
+ alpha_scaled,
313
+ loss,
314
+ fit_intercept,
315
+ n_samples=n_samples,
316
+ is_saga=is_saga,
317
+ )
318
+ if step_size * alpha_scaled == 1:
319
+ raise ZeroDivisionError(
320
+ "Current sag implementation does not handle "
321
+ "the case step_size * alpha_scaled == 1"
322
+ )
323
+
324
+ sag = sag64 if X.dtype == np.float64 else sag32
325
+ num_seen, n_iter_ = sag(
326
+ dataset,
327
+ coef_init,
328
+ intercept_init,
329
+ n_samples,
330
+ n_features,
331
+ n_classes,
332
+ tol,
333
+ max_iter,
334
+ loss,
335
+ step_size,
336
+ alpha_scaled,
337
+ beta_scaled,
338
+ sum_gradient_init,
339
+ gradient_memory_init,
340
+ seen_init,
341
+ num_seen_init,
342
+ fit_intercept,
343
+ intercept_sum_gradient,
344
+ intercept_decay,
345
+ is_saga,
346
+ verbose,
347
+ )
348
+
349
+ if n_iter_ == max_iter:
350
+ warnings.warn(
351
+ "The max_iter was reached which means the coef_ did not converge",
352
+ ConvergenceWarning,
353
+ )
354
+
355
+ if fit_intercept:
356
+ coef_init = np.vstack((coef_init, intercept_init))
357
+
358
+ warm_start_mem = {
359
+ "coef": coef_init,
360
+ "sum_gradient": sum_gradient_init,
361
+ "intercept_sum_gradient": intercept_sum_gradient,
362
+ "gradient_memory": gradient_memory_init,
363
+ "seen": seen_init,
364
+ "num_seen": num_seen,
365
+ }
366
+
367
+ if loss == "multinomial":
368
+ coef_ = coef_init.T
369
+ else:
370
+ coef_ = coef_init[:, 0]
371
+
372
+ return coef_, n_iter_, warm_start_mem
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (307 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pxd ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # License: BSD 3 clause
2
+ """Helper to load LossFunction from sgd_fast.pyx to sag_fast.pyx"""
3
+
4
+ cdef class LossFunction:
5
+ cdef double loss(self, double p, double y) noexcept nogil
6
+ cdef double dloss(self, double p, double y) noexcept nogil
7
+
8
+
9
+ cdef class Regression(LossFunction):
10
+ cdef double loss(self, double p, double y) noexcept nogil
11
+ cdef double dloss(self, double p, double y) noexcept nogil
12
+
13
+
14
+ cdef class Classification(LossFunction):
15
+ cdef double loss(self, double p, double y) noexcept nogil
16
+ cdef double dloss(self, double p, double y) noexcept nogil
17
+
18
+
19
+ cdef class Log(Classification):
20
+ cdef double loss(self, double p, double y) noexcept nogil
21
+ cdef double dloss(self, double p, double y) noexcept nogil
22
+
23
+
24
+ cdef class SquaredLoss(Regression):
25
+ cdef double loss(self, double p, double y) noexcept nogil
26
+ cdef double dloss(self, double p, double y) noexcept nogil
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/_theil_sen.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A Theil-Sen Estimator for Multiple Linear Regression Model
3
+ """
4
+
5
+ # Author: Florian Wilhelm <[email protected]>
6
+ #
7
+ # License: BSD 3 clause
8
+
9
+
10
+ import warnings
11
+ from itertools import combinations
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from joblib import effective_n_jobs
16
+ from scipy import linalg
17
+ from scipy.linalg.lapack import get_lapack_funcs
18
+ from scipy.special import binom
19
+
20
+ from ..base import RegressorMixin, _fit_context
21
+ from ..exceptions import ConvergenceWarning
22
+ from ..utils import check_random_state
23
+ from ..utils._param_validation import Interval
24
+ from ..utils.parallel import Parallel, delayed
25
+ from ._base import LinearModel
26
+
27
+ _EPSILON = np.finfo(np.double).eps
28
+
29
+
30
+ def _modified_weiszfeld_step(X, x_old):
31
+ """Modified Weiszfeld step.
32
+
33
+ This function defines one iteration step in order to approximate the
34
+ spatial median (L1 median). It is a form of an iteratively re-weighted
35
+ least squares method.
36
+
37
+ Parameters
38
+ ----------
39
+ X : array-like of shape (n_samples, n_features)
40
+ Training vector, where `n_samples` is the number of samples and
41
+ `n_features` is the number of features.
42
+
43
+ x_old : ndarray of shape = (n_features,)
44
+ Current start vector.
45
+
46
+ Returns
47
+ -------
48
+ x_new : ndarray of shape (n_features,)
49
+ New iteration step.
50
+
51
+ References
52
+ ----------
53
+ - On Computation of Spatial Median for Robust Data Mining, 2005
54
+ T. Kärkkäinen and S. Äyrämö
55
+ http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
56
+ """
57
+ diff = X - x_old
58
+ diff_norm = np.sqrt(np.sum(diff**2, axis=1))
59
+ mask = diff_norm >= _EPSILON
60
+ # x_old equals one of our samples
61
+ is_x_old_in_X = int(mask.sum() < X.shape[0])
62
+
63
+ diff = diff[mask]
64
+ diff_norm = diff_norm[mask][:, np.newaxis]
65
+ quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))
66
+
67
+ if quotient_norm > _EPSILON: # to avoid division by zero
68
+ new_direction = np.sum(X[mask, :] / diff_norm, axis=0) / np.sum(
69
+ 1 / diff_norm, axis=0
70
+ )
71
+ else:
72
+ new_direction = 1.0
73
+ quotient_norm = 1.0
74
+
75
+ return (
76
+ max(0.0, 1.0 - is_x_old_in_X / quotient_norm) * new_direction
77
+ + min(1.0, is_x_old_in_X / quotient_norm) * x_old
78
+ )
79
+
80
+
81
+ def _spatial_median(X, max_iter=300, tol=1.0e-3):
82
+ """Spatial median (L1 median).
83
+
84
+ The spatial median is member of a class of so-called M-estimators which
85
+ are defined by an optimization problem. Given a number of p points in an
86
+ n-dimensional space, the point x minimizing the sum of all distances to the
87
+ p other points is called spatial median.
88
+
89
+ Parameters
90
+ ----------
91
+ X : array-like of shape (n_samples, n_features)
92
+ Training vector, where `n_samples` is the number of samples and
93
+ `n_features` is the number of features.
94
+
95
+ max_iter : int, default=300
96
+ Maximum number of iterations.
97
+
98
+ tol : float, default=1.e-3
99
+ Stop the algorithm if spatial_median has converged.
100
+
101
+ Returns
102
+ -------
103
+ spatial_median : ndarray of shape = (n_features,)
104
+ Spatial median.
105
+
106
+ n_iter : int
107
+ Number of iterations needed.
108
+
109
+ References
110
+ ----------
111
+ - On Computation of Spatial Median for Robust Data Mining, 2005
112
+ T. Kärkkäinen and S. Äyrämö
113
+ http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
114
+ """
115
+ if X.shape[1] == 1:
116
+ return 1, np.median(X.ravel(), keepdims=True)
117
+
118
+ tol **= 2 # We are computing the tol on the squared norm
119
+ spatial_median_old = np.mean(X, axis=0)
120
+
121
+ for n_iter in range(max_iter):
122
+ spatial_median = _modified_weiszfeld_step(X, spatial_median_old)
123
+ if np.sum((spatial_median_old - spatial_median) ** 2) < tol:
124
+ break
125
+ else:
126
+ spatial_median_old = spatial_median
127
+ else:
128
+ warnings.warn(
129
+ "Maximum number of iterations {max_iter} reached in "
130
+ "spatial median for TheilSen regressor."
131
+ "".format(max_iter=max_iter),
132
+ ConvergenceWarning,
133
+ )
134
+ return n_iter, spatial_median
135
+
136
+
137
+ def _breakdown_point(n_samples, n_subsamples):
138
+ """Approximation of the breakdown point.
139
+
140
+ Parameters
141
+ ----------
142
+ n_samples : int
143
+ Number of samples.
144
+
145
+ n_subsamples : int
146
+ Number of subsamples to consider.
147
+
148
+ Returns
149
+ -------
150
+ breakdown_point : float
151
+ Approximation of breakdown point.
152
+ """
153
+ return (
154
+ 1
155
+ - (
156
+ 0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1)
157
+ + n_subsamples
158
+ - 1
159
+ )
160
+ / n_samples
161
+ )
162
+
163
+
164
+ def _lstsq(X, y, indices, fit_intercept):
165
+ """Least Squares Estimator for TheilSenRegressor class.
166
+
167
+ This function calculates the least squares method on a subset of rows of X
168
+ and y defined by the indices array. Optionally, an intercept column is
169
+ added if intercept is set to true.
170
+
171
+ Parameters
172
+ ----------
173
+ X : array-like of shape (n_samples, n_features)
174
+ Design matrix, where `n_samples` is the number of samples and
175
+ `n_features` is the number of features.
176
+
177
+ y : ndarray of shape (n_samples,)
178
+ Target vector, where `n_samples` is the number of samples.
179
+
180
+ indices : ndarray of shape (n_subpopulation, n_subsamples)
181
+ Indices of all subsamples with respect to the chosen subpopulation.
182
+
183
+ fit_intercept : bool
184
+ Fit intercept or not.
185
+
186
+ Returns
187
+ -------
188
+ weights : ndarray of shape (n_subpopulation, n_features + intercept)
189
+ Solution matrix of n_subpopulation solved least square problems.
190
+ """
191
+ fit_intercept = int(fit_intercept)
192
+ n_features = X.shape[1] + fit_intercept
193
+ n_subsamples = indices.shape[1]
194
+ weights = np.empty((indices.shape[0], n_features))
195
+ X_subpopulation = np.ones((n_subsamples, n_features))
196
+ # gelss need to pad y_subpopulation to be of the max dim of X_subpopulation
197
+ y_subpopulation = np.zeros((max(n_subsamples, n_features)))
198
+ (lstsq,) = get_lapack_funcs(("gelss",), (X_subpopulation, y_subpopulation))
199
+
200
+ for index, subset in enumerate(indices):
201
+ X_subpopulation[:, fit_intercept:] = X[subset, :]
202
+ y_subpopulation[:n_subsamples] = y[subset]
203
+ weights[index] = lstsq(X_subpopulation, y_subpopulation)[1][:n_features]
204
+
205
+ return weights
206
+
207
+
208
+ class TheilSenRegressor(RegressorMixin, LinearModel):
209
+ """Theil-Sen Estimator: robust multivariate regression model.
210
+
211
+ The algorithm calculates least square solutions on subsets with size
212
+ n_subsamples of the samples in X. Any value of n_subsamples between the
213
+ number of features and samples leads to an estimator with a compromise
214
+ between robustness and efficiency. Since the number of least square
215
+ solutions is "n_samples choose n_subsamples", it can be extremely large
216
+ and can therefore be limited with max_subpopulation. If this limit is
217
+ reached, the subsets are chosen randomly. In a final step, the spatial
218
+ median (or L1 median) is calculated of all least square solutions.
219
+
220
+ Read more in the :ref:`User Guide <theil_sen_regression>`.
221
+
222
+ Parameters
223
+ ----------
224
+ fit_intercept : bool, default=True
225
+ Whether to calculate the intercept for this model. If set
226
+ to false, no intercept will be used in calculations.
227
+
228
+ copy_X : bool, default=True
229
+ If True, X will be copied; else, it may be overwritten.
230
+
231
+ max_subpopulation : int, default=1e4
232
+ Instead of computing with a set of cardinality 'n choose k', where n is
233
+ the number of samples and k is the number of subsamples (at least
234
+ number of features), consider only a stochastic subpopulation of a
235
+ given maximal size if 'n choose k' is larger than max_subpopulation.
236
+ For other than small problem sizes this parameter will determine
237
+ memory usage and runtime if n_subsamples is not changed. Note that the
238
+ data type should be int but floats such as 1e4 can be accepted too.
239
+
240
+ n_subsamples : int, default=None
241
+ Number of samples to calculate the parameters. This is at least the
242
+ number of features (plus 1 if fit_intercept=True) and the number of
243
+ samples as a maximum. A lower number leads to a higher breakdown
244
+ point and a low efficiency while a high number leads to a low
245
+ breakdown point and a high efficiency. If None, take the
246
+ minimum number of subsamples leading to maximal robustness.
247
+ If n_subsamples is set to n_samples, Theil-Sen is identical to least
248
+ squares.
249
+
250
+ max_iter : int, default=300
251
+ Maximum number of iterations for the calculation of spatial median.
252
+
253
+ tol : float, default=1e-3
254
+ Tolerance when calculating spatial median.
255
+
256
+ random_state : int, RandomState instance or None, default=None
257
+ A random number generator instance to define the state of the random
258
+ permutations generator. Pass an int for reproducible output across
259
+ multiple function calls.
260
+ See :term:`Glossary <random_state>`.
261
+
262
+ n_jobs : int, default=None
263
+ Number of CPUs to use during the cross validation.
264
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
265
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
266
+ for more details.
267
+
268
+ verbose : bool, default=False
269
+ Verbose mode when fitting the model.
270
+
271
+ Attributes
272
+ ----------
273
+ coef_ : ndarray of shape (n_features,)
274
+ Coefficients of the regression model (median of distribution).
275
+
276
+ intercept_ : float
277
+ Estimated intercept of regression model.
278
+
279
+ breakdown_ : float
280
+ Approximated breakdown point.
281
+
282
+ n_iter_ : int
283
+ Number of iterations needed for the spatial median.
284
+
285
+ n_subpopulation_ : int
286
+ Number of combinations taken into account from 'n choose k', where n is
287
+ the number of samples and k is the number of subsamples.
288
+
289
+ n_features_in_ : int
290
+ Number of features seen during :term:`fit`.
291
+
292
+ .. versionadded:: 0.24
293
+
294
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
295
+ Names of features seen during :term:`fit`. Defined only when `X`
296
+ has feature names that are all strings.
297
+
298
+ .. versionadded:: 1.0
299
+
300
+ See Also
301
+ --------
302
+ HuberRegressor : Linear regression model that is robust to outliers.
303
+ RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
304
+ SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
305
+
306
+ References
307
+ ----------
308
+ - Theil-Sen Estimators in a Multiple Linear Regression Model, 2009
309
+ Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang
310
+ http://home.olemiss.edu/~xdang/papers/MTSE.pdf
311
+
312
+ Examples
313
+ --------
314
+ >>> from sklearn.linear_model import TheilSenRegressor
315
+ >>> from sklearn.datasets import make_regression
316
+ >>> X, y = make_regression(
317
+ ... n_samples=200, n_features=2, noise=4.0, random_state=0)
318
+ >>> reg = TheilSenRegressor(random_state=0).fit(X, y)
319
+ >>> reg.score(X, y)
320
+ 0.9884...
321
+ >>> reg.predict(X[:1,])
322
+ array([-31.5871...])
323
+ """
324
+
325
+ _parameter_constraints: dict = {
326
+ "fit_intercept": ["boolean"],
327
+ "copy_X": ["boolean"],
328
+ # target_type should be Integral but can accept Real for backward compatibility
329
+ "max_subpopulation": [Interval(Real, 1, None, closed="left")],
330
+ "n_subsamples": [None, Integral],
331
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
332
+ "tol": [Interval(Real, 0.0, None, closed="left")],
333
+ "random_state": ["random_state"],
334
+ "n_jobs": [None, Integral],
335
+ "verbose": ["verbose"],
336
+ }
337
+
338
+ def __init__(
339
+ self,
340
+ *,
341
+ fit_intercept=True,
342
+ copy_X=True,
343
+ max_subpopulation=1e4,
344
+ n_subsamples=None,
345
+ max_iter=300,
346
+ tol=1.0e-3,
347
+ random_state=None,
348
+ n_jobs=None,
349
+ verbose=False,
350
+ ):
351
+ self.fit_intercept = fit_intercept
352
+ self.copy_X = copy_X
353
+ self.max_subpopulation = max_subpopulation
354
+ self.n_subsamples = n_subsamples
355
+ self.max_iter = max_iter
356
+ self.tol = tol
357
+ self.random_state = random_state
358
+ self.n_jobs = n_jobs
359
+ self.verbose = verbose
360
+
361
+ def _check_subparams(self, n_samples, n_features):
362
+ n_subsamples = self.n_subsamples
363
+
364
+ if self.fit_intercept:
365
+ n_dim = n_features + 1
366
+ else:
367
+ n_dim = n_features
368
+
369
+ if n_subsamples is not None:
370
+ if n_subsamples > n_samples:
371
+ raise ValueError(
372
+ "Invalid parameter since n_subsamples > "
373
+ "n_samples ({0} > {1}).".format(n_subsamples, n_samples)
374
+ )
375
+ if n_samples >= n_features:
376
+ if n_dim > n_subsamples:
377
+ plus_1 = "+1" if self.fit_intercept else ""
378
+ raise ValueError(
379
+ "Invalid parameter since n_features{0} "
380
+ "> n_subsamples ({1} > {2})."
381
+ "".format(plus_1, n_dim, n_subsamples)
382
+ )
383
+ else: # if n_samples < n_features
384
+ if n_subsamples != n_samples:
385
+ raise ValueError(
386
+ "Invalid parameter since n_subsamples != "
387
+ "n_samples ({0} != {1}) while n_samples "
388
+ "< n_features.".format(n_subsamples, n_samples)
389
+ )
390
+ else:
391
+ n_subsamples = min(n_dim, n_samples)
392
+
393
+ all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
394
+ n_subpopulation = int(min(self.max_subpopulation, all_combinations))
395
+
396
+ return n_subsamples, n_subpopulation
397
+
398
+ @_fit_context(prefer_skip_nested_validation=True)
399
+ def fit(self, X, y):
400
+ """Fit linear model.
401
+
402
+ Parameters
403
+ ----------
404
+ X : ndarray of shape (n_samples, n_features)
405
+ Training data.
406
+ y : ndarray of shape (n_samples,)
407
+ Target values.
408
+
409
+ Returns
410
+ -------
411
+ self : returns an instance of self.
412
+ Fitted `TheilSenRegressor` estimator.
413
+ """
414
+ random_state = check_random_state(self.random_state)
415
+ X, y = self._validate_data(X, y, y_numeric=True)
416
+ n_samples, n_features = X.shape
417
+ n_subsamples, self.n_subpopulation_ = self._check_subparams(
418
+ n_samples, n_features
419
+ )
420
+ self.breakdown_ = _breakdown_point(n_samples, n_subsamples)
421
+
422
+ if self.verbose:
423
+ print("Breakdown point: {0}".format(self.breakdown_))
424
+ print("Number of samples: {0}".format(n_samples))
425
+ tol_outliers = int(self.breakdown_ * n_samples)
426
+ print("Tolerable outliers: {0}".format(tol_outliers))
427
+ print("Number of subpopulations: {0}".format(self.n_subpopulation_))
428
+
429
+ # Determine indices of subpopulation
430
+ if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation:
431
+ indices = list(combinations(range(n_samples), n_subsamples))
432
+ else:
433
+ indices = [
434
+ random_state.choice(n_samples, size=n_subsamples, replace=False)
435
+ for _ in range(self.n_subpopulation_)
436
+ ]
437
+
438
+ n_jobs = effective_n_jobs(self.n_jobs)
439
+ index_list = np.array_split(indices, n_jobs)
440
+ weights = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
441
+ delayed(_lstsq)(X, y, index_list[job], self.fit_intercept)
442
+ for job in range(n_jobs)
443
+ )
444
+ weights = np.vstack(weights)
445
+ self.n_iter_, coefs = _spatial_median(
446
+ weights, max_iter=self.max_iter, tol=self.tol
447
+ )
448
+
449
+ if self.fit_intercept:
450
+ self.intercept_ = coefs[0]
451
+ self.coef_ = coefs[1:]
452
+ else:
453
+ self.intercept_ = 0.0
454
+ self.coef_ = coefs
455
+
456
+ return self
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ridge.cpython-310.pyc ADDED
Binary file (48.3 kB). View file