applied-ai-018 commited on
Commit
2cd4d2d
·
verified ·
1 Parent(s): c0a0b9e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llmeval-env/lib/python3.10/site-packages/_distutils_hack/__init__.py +132 -0
  3. llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so +3 -0
  6. llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER +1 -0
  7. llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt +1568 -0
  8. llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA +38 -0
  9. llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD +22 -0
  10. llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL +5 -0
  11. llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt +1 -0
  12. llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt +1568 -0
  13. llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA +36 -0
  14. llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD +17 -0
  15. llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/INSTALLER +1 -0
  16. llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/LICENSE.txt +19 -0
  17. llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/METADATA +649 -0
  18. llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/RECORD +622 -0
  19. llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/WHEEL +6 -0
  20. llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/top_level.txt +1 -0
  21. llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/zip-safe +1 -0
  22. llmeval-env/lib/python3.10/site-packages/regex/__init__.py +3 -0
  23. llmeval-env/lib/python3.10/site-packages/regex/regex.py +746 -0
  24. llmeval-env/lib/python3.10/site-packages/regex/test_regex.py +0 -0
  25. llmeval-env/lib/python3.10/site-packages/sklearn/__init__.py +157 -0
  26. llmeval-env/lib/python3.10/site-packages/sklearn/_config.py +373 -0
  27. llmeval-env/lib/python3.10/site-packages/sklearn/_distributor_init.py +10 -0
  28. llmeval-env/lib/python3.10/site-packages/sklearn/_isotonic.cpython-310-x86_64-linux-gnu.so +0 -0
  29. llmeval-env/lib/python3.10/site-packages/sklearn/_min_dependencies.py +65 -0
  30. llmeval-env/lib/python3.10/site-packages/sklearn/base.py +1478 -0
  31. llmeval-env/lib/python3.10/site-packages/sklearn/calibration.py +1410 -0
  32. llmeval-env/lib/python3.10/site-packages/sklearn/conftest.py +309 -0
  33. llmeval-env/lib/python3.10/site-packages/sklearn/discriminant_analysis.py +1047 -0
  34. llmeval-env/lib/python3.10/site-packages/sklearn/dummy.py +682 -0
  35. llmeval-env/lib/python3.10/site-packages/sklearn/exceptions.py +191 -0
  36. llmeval-env/lib/python3.10/site-packages/sklearn/isotonic.py +498 -0
  37. llmeval-env/lib/python3.10/site-packages/sklearn/kernel_approximation.py +1137 -0
  38. llmeval-env/lib/python3.10/site-packages/sklearn/kernel_ridge.py +237 -0
  39. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__init__.py +100 -0
  40. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_base.py +814 -0
  41. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  42. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_huber.py +352 -0
  43. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_omp.py +1097 -0
  44. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py +575 -0
  45. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py +229 -0
  46. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py +308 -0
  47. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag.py +372 -0
  48. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  49. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pxd +26 -0
  50. llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py +2605 -0
.gitattributes CHANGED
@@ -82,3 +82,4 @@ llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=
82
  llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs diff=lfs merge=lfs -text
83
  llmeval-env/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
84
  llmeval-env/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
 
 
82
  llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs diff=lfs merge=lfs -text
83
  llmeval-env/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
84
  llmeval-env/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
85
+ llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/_distutils_hack/__init__.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import re
4
+ import importlib
5
+ import warnings
6
+
7
+
8
+ is_pypy = '__pypy__' in sys.builtin_module_names
9
+
10
+
11
+ warnings.filterwarnings('ignore',
12
+ r'.+ distutils\b.+ deprecated',
13
+ DeprecationWarning)
14
+
15
+
16
+ def warn_distutils_present():
17
+ if 'distutils' not in sys.modules:
18
+ return
19
+ if is_pypy and sys.version_info < (3, 7):
20
+ # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
21
+ # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
22
+ return
23
+ warnings.warn(
24
+ "Distutils was imported before Setuptools, but importing Setuptools "
25
+ "also replaces the `distutils` module in `sys.modules`. This may lead "
26
+ "to undesirable behaviors or errors. To avoid these issues, avoid "
27
+ "using distutils directly, ensure that setuptools is installed in the "
28
+ "traditional way (e.g. not an editable install), and/or make sure "
29
+ "that setuptools is always imported before distutils.")
30
+
31
+
32
+ def clear_distutils():
33
+ if 'distutils' not in sys.modules:
34
+ return
35
+ warnings.warn("Setuptools is replacing distutils.")
36
+ mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
37
+ for name in mods:
38
+ del sys.modules[name]
39
+
40
+
41
+ def enabled():
42
+ """
43
+ Allow selection of distutils by environment variable.
44
+ """
45
+ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
46
+ return which == 'local'
47
+
48
+
49
+ def ensure_local_distutils():
50
+ clear_distutils()
51
+
52
+ # With the DistutilsMetaFinder in place,
53
+ # perform an import to cause distutils to be
54
+ # loaded from setuptools._distutils. Ref #2906.
55
+ add_shim()
56
+ importlib.import_module('distutils')
57
+ remove_shim()
58
+
59
+ # check that submodules load as expected
60
+ core = importlib.import_module('distutils.core')
61
+ assert '_distutils' in core.__file__, core.__file__
62
+
63
+
64
+ def do_override():
65
+ """
66
+ Ensure that the local copy of distutils is preferred over stdlib.
67
+
68
+ See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
69
+ for more motivation.
70
+ """
71
+ if enabled():
72
+ warn_distutils_present()
73
+ ensure_local_distutils()
74
+
75
+
76
+ class DistutilsMetaFinder:
77
+ def find_spec(self, fullname, path, target=None):
78
+ if path is not None:
79
+ return
80
+
81
+ method_name = 'spec_for_{fullname}'.format(**locals())
82
+ method = getattr(self, method_name, lambda: None)
83
+ return method()
84
+
85
+ def spec_for_distutils(self):
86
+ import importlib.abc
87
+ import importlib.util
88
+
89
+ class DistutilsLoader(importlib.abc.Loader):
90
+
91
+ def create_module(self, spec):
92
+ return importlib.import_module('setuptools._distutils')
93
+
94
+ def exec_module(self, module):
95
+ pass
96
+
97
+ return importlib.util.spec_from_loader('distutils', DistutilsLoader())
98
+
99
+ def spec_for_pip(self):
100
+ """
101
+ Ensure stdlib distutils when running under pip.
102
+ See pypa/pip#8761 for rationale.
103
+ """
104
+ if self.pip_imported_during_build():
105
+ return
106
+ clear_distutils()
107
+ self.spec_for_distutils = lambda: None
108
+
109
+ @staticmethod
110
+ def pip_imported_during_build():
111
+ """
112
+ Detect if pip is being imported in a build script. Ref #2355.
113
+ """
114
+ import traceback
115
+ return any(
116
+ frame.f_globals['__file__'].endswith('setup.py')
117
+ for frame, line in traceback.walk_stack(None)
118
+ )
119
+
120
+
121
+ DISTUTILS_FINDER = DistutilsMetaFinder()
122
+
123
+
124
+ def add_shim():
125
+ sys.meta_path.insert(0, DISTUTILS_FINDER)
126
+
127
+
128
+ def remove_shim():
129
+ try:
130
+ sys.meta_path.remove(DISTUTILS_FINDER)
131
+ except ValueError:
132
+ pass
llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc ADDED
Binary file (239 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c85d194503e7816b08e4fca94004af158e3e7b0737ba0647a301cacbd6e92886
3
+ size 3057096
llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ End User License Agreement
2
+ --------------------------
3
+
4
+
5
+ Preface
6
+ -------
7
+
8
+ The Software License Agreement in Chapter 1 and the Supplement
9
+ in Chapter 2 contain license terms and conditions that govern
10
+ the use of NVIDIA software. By accepting this agreement, you
11
+ agree to comply with all the terms and conditions applicable
12
+ to the product(s) included herein.
13
+
14
+
15
+ NVIDIA Driver
16
+
17
+
18
+ Description
19
+
20
+ This package contains the operating system driver and
21
+ fundamental system software components for NVIDIA GPUs.
22
+
23
+
24
+ NVIDIA CUDA Toolkit
25
+
26
+
27
+ Description
28
+
29
+ The NVIDIA CUDA Toolkit provides command-line and graphical
30
+ tools for building, debugging and optimizing the performance
31
+ of applications accelerated by NVIDIA GPUs, runtime and math
32
+ libraries, and documentation including programming guides,
33
+ user manuals, and API references.
34
+
35
+
36
+ Default Install Location of CUDA Toolkit
37
+
38
+ Windows platform:
39
+
40
+ %ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
41
+
42
+ Linux platform:
43
+
44
+ /usr/local/cuda-#.#
45
+
46
+ Mac platform:
47
+
48
+ /Developer/NVIDIA/CUDA-#.#
49
+
50
+
51
+ NVIDIA CUDA Samples
52
+
53
+
54
+ Description
55
+
56
+ This package includes over 100+ CUDA examples that demonstrate
57
+ various CUDA programming principles, and efficient CUDA
58
+ implementation of algorithms in specific application domains.
59
+
60
+
61
+ Default Install Location of CUDA Samples
62
+
63
+ Windows platform:
64
+
65
+ %ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
66
+
67
+ Linux platform:
68
+
69
+ /usr/local/cuda-#.#/samples
70
+
71
+ and
72
+
73
+ $HOME/NVIDIA_CUDA-#.#_Samples
74
+
75
+ Mac platform:
76
+
77
+ /Developer/NVIDIA/CUDA-#.#/samples
78
+
79
+
80
+ NVIDIA Nsight Visual Studio Edition (Windows only)
81
+
82
+
83
+ Description
84
+
85
+ NVIDIA Nsight Development Platform, Visual Studio Edition is a
86
+ development environment integrated into Microsoft Visual
87
+ Studio that provides tools for debugging, profiling, analyzing
88
+ and optimizing your GPU computing and graphics applications.
89
+
90
+
91
+ Default Install Location of Nsight Visual Studio Edition
92
+
93
+ Windows platform:
94
+
95
+ %ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
96
+
97
+
98
+ 1. License Agreement for NVIDIA Software Development Kits
99
+ ---------------------------------------------------------
100
+
101
+
102
+ Release Date: July 26, 2018
103
+ ---------------------------
104
+
105
+
106
+ Important NoticeRead before downloading, installing,
107
+ copying or using the licensed software:
108
+ -------------------------------------------------------
109
+
110
+ This license agreement, including exhibits attached
111
+ ("Agreement”) is a legal agreement between you and NVIDIA
112
+ Corporation ("NVIDIA") and governs your use of a NVIDIA
113
+ software development kit (“SDK”).
114
+
115
+ Each SDK has its own set of software and materials, but here
116
+ is a description of the types of items that may be included in
117
+ a SDK: source code, header files, APIs, data sets and assets
118
+ (examples include images, textures, models, scenes, videos,
119
+ native API input/output files), binary software, sample code,
120
+ libraries, utility programs, programming code and
121
+ documentation.
122
+
123
+ This Agreement can be accepted only by an adult of legal age
124
+ of majority in the country in which the SDK is used.
125
+
126
+ If you are entering into this Agreement on behalf of a company
127
+ or other legal entity, you represent that you have the legal
128
+ authority to bind the entity to this Agreement, in which case
129
+ “you” will mean the entity you represent.
130
+
131
+ If you don’t have the required age or authority to accept
132
+ this Agreement, or if you don’t accept all the terms and
133
+ conditions of this Agreement, do not download, install or use
134
+ the SDK.
135
+
136
+ You agree to use the SDK only for purposes that are permitted
137
+ by (a) this Agreement, and (b) any applicable law, regulation
138
+ or generally accepted practices or guidelines in the relevant
139
+ jurisdictions.
140
+
141
+
142
+ 1.1. License
143
+
144
+
145
+ 1.1.1. License Grant
146
+
147
+ Subject to the terms of this Agreement, NVIDIA hereby grants
148
+ you a non-exclusive, non-transferable license, without the
149
+ right to sublicense (except as expressly provided in this
150
+ Agreement) to:
151
+
152
+ 1. Install and use the SDK,
153
+
154
+ 2. Modify and create derivative works of sample source code
155
+ delivered in the SDK, and
156
+
157
+ 3. Distribute those portions of the SDK that are identified
158
+ in this Agreement as distributable, as incorporated in
159
+ object code format into a software application that meets
160
+ the distribution requirements indicated in this Agreement.
161
+
162
+
163
+ 1.1.2. Distribution Requirements
164
+
165
+ These are the distribution requirements for you to exercise
166
+ the distribution grant:
167
+
168
+ 1. Your application must have material additional
169
+ functionality, beyond the included portions of the SDK.
170
+
171
+ 2. The distributable portions of the SDK shall only be
172
+ accessed by your application.
173
+
174
+ 3. The following notice shall be included in modifications
175
+ and derivative works of sample source code distributed:
176
+ “This software contains source code provided by NVIDIA
177
+ Corporation.”
178
+
179
+ 4. Unless a developer tool is identified in this Agreement
180
+ as distributable, it is delivered for your internal use
181
+ only.
182
+
183
+ 5. The terms under which you distribute your application
184
+ must be consistent with the terms of this Agreement,
185
+ including (without limitation) terms relating to the
186
+ license grant and license restrictions and protection of
187
+ NVIDIA’s intellectual property rights. Additionally, you
188
+ agree that you will protect the privacy, security and
189
+ legal rights of your application users.
190
+
191
+ 6. You agree to notify NVIDIA in writing of any known or
192
+ suspected distribution or use of the SDK not in compliance
193
+ with the requirements of this Agreement, and to enforce
194
+ the terms of your agreements with respect to distributed
195
+ SDK.
196
+
197
+
198
+ 1.1.3. Authorized Users
199
+
200
+ You may allow employees and contractors of your entity or of
201
+ your subsidiary(ies) to access and use the SDK from your
202
+ secure network to perform work on your behalf.
203
+
204
+ If you are an academic institution you may allow users
205
+ enrolled or employed by the academic institution to access and
206
+ use the SDK from your secure network.
207
+
208
+ You are responsible for the compliance with the terms of this
209
+ Agreement by your authorized users. If you become aware that
210
+ your authorized users didn’t follow the terms of this
211
+ Agreement, you agree to take reasonable steps to resolve the
212
+ non-compliance and prevent new occurrences.
213
+
214
+
215
+ 1.1.4. Pre-Release SDK
216
+
217
+ The SDK versions identified as alpha, beta, preview or
218
+ otherwise as pre-release, may not be fully functional, may
219
+ contain errors or design flaws, and may have reduced or
220
+ different security, privacy, accessibility, availability, and
221
+ reliability standards relative to commercial versions of
222
+ NVIDIA software and materials. Use of a pre-release SDK may
223
+ result in unexpected results, loss of data, project delays or
224
+ other unpredictable damage or loss.
225
+
226
+ You may use a pre-release SDK at your own risk, understanding
227
+ that pre-release SDKs are not intended for use in production
228
+ or business-critical systems.
229
+
230
+ NVIDIA may choose not to make available a commercial version
231
+ of any pre-release SDK. NVIDIA may also choose to abandon
232
+ development and terminate the availability of a pre-release
233
+ SDK at any time without liability.
234
+
235
+
236
+ 1.1.5. Updates
237
+
238
+ NVIDIA may, at its option, make available patches, workarounds
239
+ or other updates to this SDK. Unless the updates are provided
240
+ with their separate governing terms, they are deemed part of
241
+ the SDK licensed to you as provided in this Agreement. You
242
+ agree that the form and content of the SDK that NVIDIA
243
+ provides may change without prior notice to you. While NVIDIA
244
+ generally maintains compatibility between versions, NVIDIA may
245
+ in some cases make changes that introduce incompatibilities in
246
+ future versions of the SDK.
247
+
248
+
249
+ 1.1.6. Third Party Licenses
250
+
251
+ The SDK may come bundled with, or otherwise include or be
252
+ distributed with, third party software licensed by a NVIDIA
253
+ supplier and/or open source software provided under an open
254
+ source license. Use of third party software is subject to the
255
+ third-party license terms, or in the absence of third party
256
+ terms, the terms of this Agreement. Copyright to third party
257
+ software is held by the copyright holders indicated in the
258
+ third-party software or license.
259
+
260
+
261
+ 1.1.7. Reservation of Rights
262
+
263
+ NVIDIA reserves all rights, title, and interest in and to the
264
+ SDK, not expressly granted to you under this Agreement.
265
+
266
+
267
+ 1.2. Limitations
268
+
269
+ The following license limitations apply to your use of the
270
+ SDK:
271
+
272
+ 1. You may not reverse engineer, decompile or disassemble,
273
+ or remove copyright or other proprietary notices from any
274
+ portion of the SDK or copies of the SDK.
275
+
276
+ 2. Except as expressly provided in this Agreement, you may
277
+ not copy, sell, rent, sublicense, transfer, distribute,
278
+ modify, or create derivative works of any portion of the
279
+ SDK. For clarity, you may not distribute or sublicense the
280
+ SDK as a stand-alone product.
281
+
282
+ 3. Unless you have an agreement with NVIDIA for this
283
+ purpose, you may not indicate that an application created
284
+ with the SDK is sponsored or endorsed by NVIDIA.
285
+
286
+ 4. You may not bypass, disable, or circumvent any
287
+ encryption, security, digital rights management or
288
+ authentication mechanism in the SDK.
289
+
290
+ 5. You may not use the SDK in any manner that would cause it
291
+ to become subject to an open source software license. As
292
+ examples, licenses that require as a condition of use,
293
+ modification, and/or distribution that the SDK be:
294
+
295
+ a. Disclosed or distributed in source code form;
296
+
297
+ b. Licensed for the purpose of making derivative works;
298
+ or
299
+
300
+ c. Redistributable at no charge.
301
+
302
+ 6. Unless you have an agreement with NVIDIA for this
303
+ purpose, you may not use the SDK with any system or
304
+ application where the use or failure of the system or
305
+ application can reasonably be expected to threaten or
306
+ result in personal injury, death, or catastrophic loss.
307
+ Examples include use in avionics, navigation, military,
308
+ medical, life support or other life critical applications.
309
+ NVIDIA does not design, test or manufacture the SDK for
310
+ these critical uses and NVIDIA shall not be liable to you
311
+ or any third party, in whole or in part, for any claims or
312
+ damages arising from such uses.
313
+
314
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
315
+ and its affiliates, and their respective employees,
316
+ contractors, agents, officers and directors, from and
317
+ against any and all claims, damages, obligations, losses,
318
+ liabilities, costs or debt, fines, restitutions and
319
+ expenses (including but not limited to attorney’s fees
320
+ and costs incident to establishing the right of
321
+ indemnification) arising out of or related to your use of
322
+ the SDK outside of the scope of this Agreement, or not in
323
+ compliance with its terms.
324
+
325
+
326
+ 1.3. Ownership
327
+
328
+ 1. NVIDIA or its licensors hold all rights, title and
329
+ interest in and to the SDK and its modifications and
330
+ derivative works, including their respective intellectual
331
+ property rights, subject to your rights described in this
332
+ section. This SDK may include software and materials from
333
+ NVIDIA’s licensors, and these licensors are intended
334
+ third party beneficiaries that may enforce this Agreement
335
+ with respect to their intellectual property rights.
336
+
337
+ 2. You hold all rights, title and interest in and to your
338
+ applications and your derivative works of the sample
339
+ source code delivered in the SDK, including their
340
+ respective intellectual property rights, subject to
341
+ NVIDIA’s rights described in this section.
342
+
343
+ 3. You may, but don’t have to, provide to NVIDIA
344
+ suggestions, feature requests or other feedback regarding
345
+ the SDK, including possible enhancements or modifications
346
+ to the SDK. For any feedback that you voluntarily provide,
347
+ you hereby grant NVIDIA and its affiliates a perpetual,
348
+ non-exclusive, worldwide, irrevocable license to use,
349
+ reproduce, modify, license, sublicense (through multiple
350
+ tiers of sublicensees), and distribute (through multiple
351
+ tiers of distributors) it without the payment of any
352
+ royalties or fees to you. NVIDIA will use feedback at its
353
+ choice. NVIDIA is constantly looking for ways to improve
354
+ its products, so you may send feedback to NVIDIA through
355
+ the developer portal at https://developer.nvidia.com.
356
+
357
+
358
+ 1.4. No Warranties
359
+
360
+ THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
361
+ FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
362
+ ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
363
+ OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
364
+ BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
365
+ FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
366
+ ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
367
+ WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
368
+ DEALING OR COURSE OF TRADE.
369
+
370
+
371
+ 1.5. Limitation of Liability
372
+
373
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
374
+ AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
375
+ PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
376
+ OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
377
+ PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
378
+ WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
379
+ WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
380
+ OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
381
+ PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
382
+ LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
383
+ TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
384
+ AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
385
+ NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
386
+ LIMIT.
387
+
388
+ These exclusions and limitations of liability shall apply
389
+ regardless if NVIDIA or its affiliates have been advised of
390
+ the possibility of such damages, and regardless of whether a
391
+ remedy fails its essential purpose. These exclusions and
392
+ limitations of liability form an essential basis of the
393
+ bargain between the parties, and, absent any of these
394
+ exclusions or limitations of liability, the provisions of this
395
+ Agreement, including, without limitation, the economic terms,
396
+ would be substantially different.
397
+
398
+
399
+ 1.6. Termination
400
+
401
+ 1. This Agreement will continue to apply until terminated by
402
+ either you or NVIDIA as described below.
403
+
404
+ 2. If you want to terminate this Agreement, you may do so by
405
+ stopping to use the SDK.
406
+
407
+ 3. NVIDIA may, at any time, terminate this Agreement if:
408
+
409
+ a. (i) you fail to comply with any term of this
410
+ Agreement and the non-compliance is not fixed within
411
+ thirty (30) days following notice from NVIDIA (or
412
+ immediately if you violate NVIDIA’s intellectual
413
+ property rights);
414
+
415
+ b. (ii) you commence or participate in any legal
416
+ proceeding against NVIDIA with respect to the SDK; or
417
+
418
+ c. (iii) NVIDIA decides to no longer provide the SDK in
419
+ a country or, in NVIDIA’s sole discretion, the
420
+ continued use of it is no longer commercially viable.
421
+
422
+ 4. Upon any termination of this Agreement, you agree to
423
+ promptly discontinue use of the SDK and destroy all copies
424
+ in your possession or control. Your prior distributions in
425
+ accordance with this Agreement are not affected by the
426
+ termination of this Agreement. Upon written request, you
427
+ will certify in writing that you have complied with your
428
+ commitments under this section. Upon any termination of
429
+ this Agreement all provisions survive except for the
430
+ license grant provisions.
431
+
432
+
433
+ 1.7. General
434
+
435
+ If you wish to assign this Agreement or your rights and
436
+ obligations, including by merger, consolidation, dissolution
437
+ or operation of law, contact NVIDIA to ask for permission. Any
438
+ attempted assignment not approved by NVIDIA in writing shall
439
+ be void and of no effect. NVIDIA may assign, delegate or
440
+ transfer this Agreement and its rights and obligations, and if
441
+ to a non-affiliate you will be notified.
442
+
443
+ You agree to cooperate with NVIDIA and provide reasonably
444
+ requested information to verify your compliance with this
445
+ Agreement.
446
+
447
+ This Agreement will be governed in all respects by the laws of
448
+ the United States and of the State of Delaware as those laws
449
+ are applied to contracts entered into and performed entirely
450
+ within Delaware by Delaware residents, without regard to the
451
+ conflicts of laws principles. The United Nations Convention on
452
+ Contracts for the International Sale of Goods is specifically
453
+ disclaimed. You agree to all terms of this Agreement in the
454
+ English language.
455
+
456
+ The state or federal courts residing in Santa Clara County,
457
+ California shall have exclusive jurisdiction over any dispute
458
+ or claim arising out of this Agreement. Notwithstanding this,
459
+ you agree that NVIDIA shall still be allowed to apply for
460
+ injunctive remedies or an equivalent type of urgent legal
461
+ relief in any jurisdiction.
462
+
463
+ If any court of competent jurisdiction determines that any
464
+ provision of this Agreement is illegal, invalid or
465
+ unenforceable, such provision will be construed as limited to
466
+ the extent necessary to be consistent with and fully
467
+ enforceable under the law and the remaining provisions will
468
+ remain in full force and effect. Unless otherwise specified,
469
+ remedies are cumulative.
470
+
471
+ Each party acknowledges and agrees that the other is an
472
+ independent contractor in the performance of this Agreement.
473
+
474
+ The SDK has been developed entirely at private expense and is
475
+ “commercial items” consisting of “commercial computer
476
+ software” and “commercial computer software
477
+ documentation” provided with RESTRICTED RIGHTS. Use,
478
+ duplication or disclosure by the U.S. Government or a U.S.
479
+ Government subcontractor is subject to the restrictions in
480
+ this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
481
+ in subparagraphs (c)(1) and (2) of the Commercial Computer
482
+ Software - Restricted Rights clause at FAR 52.227-19, as
483
+ applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
484
+ Expressway, Santa Clara, CA 95051.
485
+
486
+ The SDK is subject to United States export laws and
487
+ regulations. You agree that you will not ship, transfer or
488
+ export the SDK into any country, or use the SDK in any manner,
489
+ prohibited by the United States Bureau of Industry and
490
+ Security or economic sanctions regulations administered by the
491
+ U.S. Department of Treasury’s Office of Foreign Assets
492
+ Control (OFAC), or any applicable export laws, restrictions or
493
+ regulations. These laws include restrictions on destinations,
494
+ end users and end use. By accepting this Agreement, you
495
+ confirm that you are not a resident or citizen of any country
496
+ currently embargoed by the U.S. and that you are not otherwise
497
+ prohibited from receiving the SDK.
498
+
499
+ Any notice delivered by NVIDIA to you under this Agreement
500
+ will be delivered via mail, email or fax. You agree that any
501
+ notices that NVIDIA sends you electronically will satisfy any
502
+ legal communication requirements. Please direct your legal
503
+ notices or other correspondence to NVIDIA Corporation, 2788
504
+ San Tomas Expressway, Santa Clara, California 95051, United
505
+ States of America, Attention: Legal Department.
506
+
507
+ This Agreement and any exhibits incorporated into this
508
+ Agreement constitute the entire agreement of the parties with
509
+ respect to the subject matter of this Agreement and supersede
510
+ all prior negotiations or documentation exchanged between the
511
+ parties relating to this SDK license. Any additional and/or
512
+ conflicting terms on documents issued by you are null, void,
513
+ and invalid. Any amendment or waiver under this Agreement
514
+ shall be in writing and signed by representatives of both
515
+ parties.
516
+
517
+
518
+ 2. CUDA Toolkit Supplement to Software License Agreement for
519
+ NVIDIA Software Development Kits
520
+ ------------------------------------------------------------
521
+
522
+
523
+ Release date: August 16, 2018
524
+ -----------------------------
525
+
526
+ The terms in this supplement govern your use of the NVIDIA
527
+ CUDA Toolkit SDK under the terms of your license agreement
528
+ (“Agreement”) as modified by this supplement. Capitalized
529
+ terms used but not defined below have the meaning assigned to
530
+ them in the Agreement.
531
+
532
+ This supplement is an exhibit to the Agreement and is
533
+ incorporated as an integral part of the Agreement. In the
534
+ event of conflict between the terms in this supplement and the
535
+ terms in the Agreement, the terms in this supplement govern.
536
+
537
+
538
+ 2.1. License Scope
539
+
540
+ The SDK is licensed for you to develop applications only for
541
+ use in systems with NVIDIA GPUs.
542
+
543
+
544
+ 2.2. Distribution
545
+
546
+ The portions of the SDK that are distributable under the
547
+ Agreement are listed in Attachment A.
548
+
549
+
550
+ 2.3. Operating Systems
551
+
552
+ Those portions of the SDK designed exclusively for use on the
553
+ Linux or FreeBSD operating systems, or other operating systems
554
+ derived from the source code to these operating systems, may
555
+ be copied and redistributed for use in accordance with this
556
+ Agreement, provided that the object code files are not
557
+ modified in any way (except for unzipping of compressed
558
+ files).
559
+
560
+
561
+ 2.4. Audio and Video Encoders and Decoders
562
+
563
+ You acknowledge and agree that it is your sole responsibility
564
+ to obtain any additional third-party licenses required to
565
+ make, have made, use, have used, sell, import, and offer for
566
+ sale your products or services that include or incorporate any
567
+ third-party software and content relating to audio and/or
568
+ video encoders and decoders from, including but not limited
569
+ to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
570
+ MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
571
+ under this Agreement any necessary patent or other rights with
572
+ respect to any audio and/or video encoders and decoders.
573
+
574
+
575
+ 2.5. Licensing
576
+
577
+ If the distribution terms in this Agreement are not suitable
578
+ for your organization, or for any questions regarding this
579
+ Agreement, please contact NVIDIA at
580
581
+
582
+
583
+ 2.6. Attachment A
584
+
585
+ The following portions of the SDK are distributable under the
586
+ Agreement:
587
+
588
+ Component
589
+
590
+ CUDA Runtime
591
+
592
+ Windows
593
+
594
+ cudart.dll, cudart_static.lib, cudadevrt.lib
595
+
596
+ Mac OSX
597
+
598
+ libcudart.dylib, libcudart_static.a, libcudadevrt.a
599
+
600
+ Linux
601
+
602
+ libcudart.so, libcudart_static.a, libcudadevrt.a
603
+
604
+ Android
605
+
606
+ libcudart.so, libcudart_static.a, libcudadevrt.a
607
+
608
+ Component
609
+
610
+ CUDA FFT Library
611
+
612
+ Windows
613
+
614
+ cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
615
+
616
+ Mac OSX
617
+
618
+ libcufft.dylib, libcufft_static.a, libcufftw.dylib,
619
+ libcufftw_static.a
620
+
621
+ Linux
622
+
623
+ libcufft.so, libcufft_static.a, libcufftw.so,
624
+ libcufftw_static.a
625
+
626
+ Android
627
+
628
+ libcufft.so, libcufft_static.a, libcufftw.so,
629
+ libcufftw_static.a
630
+
631
+ Component
632
+
633
+ CUDA BLAS Library
634
+
635
+ Windows
636
+
637
+ cublas.dll, cublasLt.dll
638
+
639
+ Mac OSX
640
+
641
+ libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
642
+ libcublasLt_static.a
643
+
644
+ Linux
645
+
646
+ libcublas.so, libcublasLt.so, libcublas_static.a,
647
+ libcublasLt_static.a
648
+
649
+ Android
650
+
651
+ libcublas.so, libcublasLt.so, libcublas_static.a,
652
+ libcublasLt_static.a
653
+
654
+ Component
655
+
656
+ NVIDIA "Drop-in" BLAS Library
657
+
658
+ Windows
659
+
660
+ nvblas.dll
661
+
662
+ Mac OSX
663
+
664
+ libnvblas.dylib
665
+
666
+ Linux
667
+
668
+ libnvblas.so
669
+
670
+ Component
671
+
672
+ CUDA Sparse Matrix Library
673
+
674
+ Windows
675
+
676
+ cusparse.dll, cusparse.lib
677
+
678
+ Mac OSX
679
+
680
+ libcusparse.dylib, libcusparse_static.a
681
+
682
+ Linux
683
+
684
+ libcusparse.so, libcusparse_static.a
685
+
686
+ Android
687
+
688
+ libcusparse.so, libcusparse_static.a
689
+
690
+ Component
691
+
692
+ CUDA Linear Solver Library
693
+
694
+ Windows
695
+
696
+ cusolver.dll, cusolver.lib
697
+
698
+ Mac OSX
699
+
700
+ libcusolver.dylib, libcusolver_static.a
701
+
702
+ Linux
703
+
704
+ libcusolver.so, libcusolver_static.a
705
+
706
+ Android
707
+
708
+ libcusolver.so, libcusolver_static.a
709
+
710
+ Component
711
+
712
+ CUDA Random Number Generation Library
713
+
714
+ Windows
715
+
716
+ curand.dll, curand.lib
717
+
718
+ Mac OSX
719
+
720
+ libcurand.dylib, libcurand_static.a
721
+
722
+ Linux
723
+
724
+ libcurand.so, libcurand_static.a
725
+
726
+ Android
727
+
728
+ libcurand.so, libcurand_static.a
729
+
730
+ Component
731
+
732
+ CUDA Accelerated Graph Library
733
+
734
+ Component
735
+
736
+ NVIDIA Performance Primitives Library
737
+
738
+ Windows
739
+
740
+ nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
741
+ nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
742
+ nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
743
+ nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
744
+ nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
745
+
746
+ Mac OSX
747
+
748
+ libnppc.dylib, libnppc_static.a, libnppial.dylib,
749
+ libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
750
+ libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
751
+ libnppidei_static.a, libnppif.dylib, libnppif_static.a,
752
+ libnppig.dylib, libnppig_static.a, libnppim.dylib,
753
+ libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
754
+ libnpps.dylib, libnpps_static.a
755
+
756
+ Linux
757
+
758
+ libnppc.so, libnppc_static.a, libnppial.so,
759
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
760
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
761
+ libnppidei_static.a, libnppif.so, libnppif_static.a
762
+ libnppig.so, libnppig_static.a, libnppim.so,
763
+ libnppim_static.a, libnppist.so, libnppist_static.a,
764
+ libnppisu.so, libnppisu_static.a, libnppitc.so
765
+ libnppitc_static.a, libnpps.so, libnpps_static.a
766
+
767
+ Android
768
+
769
+ libnppc.so, libnppc_static.a, libnppial.so,
770
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
771
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
772
+ libnppidei_static.a, libnppif.so, libnppif_static.a
773
+ libnppig.so, libnppig_static.a, libnppim.so,
774
+ libnppim_static.a, libnppist.so, libnppist_static.a,
775
+ libnppisu.so, libnppisu_static.a, libnppitc.so
776
+ libnppitc_static.a, libnpps.so, libnpps_static.a
777
+
778
+ Component
779
+
780
+ NVIDIA JPEG Library
781
+
782
+ Linux
783
+
784
+ libnvjpeg.so, libnvjpeg_static.a
785
+
786
+ Component
787
+
788
+ Internal common library required for statically linking to
789
+ cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
790
+
791
+ Mac OSX
792
+
793
+ libculibos.a
794
+
795
+ Linux
796
+
797
+ libculibos.a
798
+
799
+ Component
800
+
801
+ NVIDIA Runtime Compilation Library and Header
802
+
803
+ All
804
+
805
+ nvrtc.h
806
+
807
+ Windows
808
+
809
+ nvrtc.dll, nvrtc-builtins.dll
810
+
811
+ Mac OSX
812
+
813
+ libnvrtc.dylib, libnvrtc-builtins.dylib
814
+
815
+ Linux
816
+
817
+ libnvrtc.so, libnvrtc-builtins.so
818
+
819
+ Component
820
+
821
+ NVIDIA Optimizing Compiler Library
822
+
823
+ Windows
824
+
825
+ nvvm.dll
826
+
827
+ Mac OSX
828
+
829
+ libnvvm.dylib
830
+
831
+ Linux
832
+
833
+ libnvvm.so
834
+
835
+ Component
836
+
837
+ NVIDIA Common Device Math Functions Library
838
+
839
+ Windows
840
+
841
+ libdevice.10.bc
842
+
843
+ Mac OSX
844
+
845
+ libdevice.10.bc
846
+
847
+ Linux
848
+
849
+ libdevice.10.bc
850
+
851
+ Component
852
+
853
+ CUDA Occupancy Calculation Header Library
854
+
855
+ All
856
+
857
+ cuda_occupancy.h
858
+
859
+ Component
860
+
861
+ CUDA Half Precision Headers
862
+
863
+ All
864
+
865
+ cuda_fp16.h, cuda_fp16.hpp
866
+
867
+ Component
868
+
869
+ CUDA Profiling Tools Interface (CUPTI) Library
870
+
871
+ Windows
872
+
873
+ cupti.dll
874
+
875
+ Mac OSX
876
+
877
+ libcupti.dylib
878
+
879
+ Linux
880
+
881
+ libcupti.so
882
+
883
+ Component
884
+
885
+ NVIDIA Tools Extension Library
886
+
887
+ Windows
888
+
889
+ nvToolsExt.dll, nvToolsExt.lib
890
+
891
+ Mac OSX
892
+
893
+ libnvToolsExt.dylib
894
+
895
+ Linux
896
+
897
+ libnvToolsExt.so
898
+
899
+ Component
900
+
901
+ NVIDIA CUDA Driver Libraries
902
+
903
+ Linux
904
+
905
+ libcuda.so, libnvidia-fatbinaryloader.so,
906
+ libnvidia-ptxjitcompiler.so
907
+
908
+ The NVIDIA CUDA Driver Libraries are only distributable in
909
+ applications that meet this criteria:
910
+
911
+ 1. The application was developed starting from a NVIDIA CUDA
912
+ container obtained from Docker Hub or the NVIDIA GPU
913
+ Cloud, and
914
+
915
+ 2. The resulting application is packaged as a Docker
916
+ container and distributed to users on Docker Hub or the
917
+ NVIDIA GPU Cloud only.
918
+
919
+
920
+ 2.7. Attachment B
921
+
922
+
923
+ Additional Licensing Obligations
924
+
925
+ The following third party components included in the SOFTWARE
926
+ are licensed to Licensee pursuant to the following terms and
927
+ conditions:
928
+
929
+ 1. Licensee's use of the GDB third party component is
930
+ subject to the terms and conditions of GNU GPL v3:
931
+
932
+ This product includes copyrighted third-party software licensed
933
+ under the terms of the GNU General Public License v3 ("GPL v3").
934
+ All third-party software packages are copyright by their respective
935
+ authors. GPL v3 terms and conditions are hereby incorporated into
936
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
937
+
938
+ Consistent with these licensing requirements, the software
939
+ listed below is provided under the terms of the specified
940
+ open source software licenses. To obtain source code for
941
+ software provided under licenses that require
942
+ redistribution of source code, including the GNU General
943
+ Public License (GPL) and GNU Lesser General Public License
944
+ (LGPL), contact [email protected]. This offer is
945
+ valid for a period of three (3) years from the date of the
946
+ distribution of this product by NVIDIA CORPORATION.
947
+
948
+ Component License
949
+ CUDA-GDB GPL v3
950
+
951
+ 2. Licensee represents and warrants that any and all third
952
+ party licensing and/or royalty payment obligations in
953
+ connection with Licensee's use of the H.264 video codecs
954
+ are solely the responsibility of Licensee.
955
+
956
+ 3. Licensee's use of the Thrust library is subject to the
957
+ terms and conditions of the Apache License Version 2.0.
958
+ All third-party software packages are copyright by their
959
+ respective authors. Apache License Version 2.0 terms and
960
+ conditions are hereby incorporated into the Agreement by
961
+ this reference.
962
+ http://www.apache.org/licenses/LICENSE-2.0.html
963
+
964
+ In addition, Licensee acknowledges the following notice:
965
+ Thrust includes source code from the Boost Iterator,
966
+ Tuple, System, and Random Number libraries.
967
+
968
+ Boost Software License - Version 1.0 - August 17th, 2003
969
+ . . . .
970
+
971
+ Permission is hereby granted, free of charge, to any person or
972
+ organization obtaining a copy of the software and accompanying
973
+ documentation covered by this license (the "Software") to use,
974
+ reproduce, display, distribute, execute, and transmit the Software,
975
+ and to prepare derivative works of the Software, and to permit
976
+ third-parties to whom the Software is furnished to do so, all
977
+ subject to the following:
978
+
979
+ The copyright notices in the Software and this entire statement,
980
+ including the above license grant, this restriction and the following
981
+ disclaimer, must be included in all copies of the Software, in whole
982
+ or in part, and all derivative works of the Software, unless such
983
+ copies or derivative works are solely in the form of machine-executable
984
+ object code generated by a source language processor.
985
+
986
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
987
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
988
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
989
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
990
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
991
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
992
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
993
+ OTHER DEALINGS IN THE SOFTWARE.
994
+
995
+ 4. Licensee's use of the LLVM third party component is
996
+ subject to the following terms and conditions:
997
+
998
+ ======================================================
999
+ LLVM Release License
1000
+ ======================================================
1001
+ University of Illinois/NCSA
1002
+ Open Source License
1003
+
1004
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
1005
+ All rights reserved.
1006
+
1007
+ Developed by:
1008
+
1009
+ LLVM Team
1010
+
1011
+ University of Illinois at Urbana-Champaign
1012
+
1013
+ http://llvm.org
1014
+
1015
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1016
+ of this software and associated documentation files (the "Software"), to
1017
+ deal with the Software without restriction, including without limitation the
1018
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1019
+ sell copies of the Software, and to permit persons to whom the Software is
1020
+ furnished to do so, subject to the following conditions:
1021
+
1022
+ * Redistributions of source code must retain the above copyright notice,
1023
+ this list of conditions and the following disclaimers.
1024
+
1025
+ * Redistributions in binary form must reproduce the above copyright
1026
+ notice, this list of conditions and the following disclaimers in the
1027
+ documentation and/or other materials provided with the distribution.
1028
+
1029
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
1030
+ Champaign, nor the names of its contributors may be used to endorse or
1031
+ promote products derived from this Software without specific prior
1032
+ written permission.
1033
+
1034
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1035
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1036
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1037
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1038
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1039
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1040
+ DEALINGS WITH THE SOFTWARE.
1041
+
1042
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
1043
+ component is subject to the following terms and
1044
+ conditions:
1045
+
1046
+ ------------
1047
+ PCRE LICENCE
1048
+ ------------
1049
+ PCRE is a library of functions to support regular expressions whose syntax
1050
+ and semantics are as close as possible to those of the Perl 5 language.
1051
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
1052
+ specified below. The documentation for PCRE, supplied in the "doc"
1053
+ directory, is distributed under the same terms as the software itself. The
1054
+ basic library functions are written in C and are freestanding. Also
1055
+ included in the distribution is a set of C++ wrapper functions, and a just-
1056
+ in-time compiler that can be used to optimize pattern matching. These are
1057
+ both optional features that can be omitted when the library is built.
1058
+
1059
+ THE BASIC LIBRARY FUNCTIONS
1060
+ ---------------------------
1061
+ Written by: Philip Hazel
1062
+ Email local part: ph10
1063
+ Email domain: cam.ac.uk
1064
+ University of Cambridge Computing Service,
1065
+ Cambridge, England.
1066
+ Copyright (c) 1997-2012 University of Cambridge
1067
+ All rights reserved.
1068
+
1069
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
1070
+ -------------------------------------
1071
+ Written by: Zoltan Herczeg
1072
+ Email local part: hzmester
1073
+ Emain domain: freemail.hu
1074
+ Copyright(c) 2010-2012 Zoltan Herczeg
1075
+ All rights reserved.
1076
+
1077
+ STACK-LESS JUST-IN-TIME COMPILER
1078
+ --------------------------------
1079
+ Written by: Zoltan Herczeg
1080
+ Email local part: hzmester
1081
+ Emain domain: freemail.hu
1082
+ Copyright(c) 2009-2012 Zoltan Herczeg
1083
+ All rights reserved.
1084
+
1085
+ THE C++ WRAPPER FUNCTIONS
1086
+ -------------------------
1087
+ Contributed by: Google Inc.
1088
+ Copyright (c) 2007-2012, Google Inc.
1089
+ All rights reserved.
1090
+
1091
+ THE "BSD" LICENCE
1092
+ -----------------
1093
+ Redistribution and use in source and binary forms, with or without
1094
+ modification, are permitted provided that the following conditions are met:
1095
+
1096
+ * Redistributions of source code must retain the above copyright notice,
1097
+ this list of conditions and the following disclaimer.
1098
+
1099
+ * Redistributions in binary form must reproduce the above copyright
1100
+ notice, this list of conditions and the following disclaimer in the
1101
+ documentation and/or other materials provided with the distribution.
1102
+
1103
+ * Neither the name of the University of Cambridge nor the name of Google
1104
+ Inc. nor the names of their contributors may be used to endorse or
1105
+ promote products derived from this software without specific prior
1106
+ written permission.
1107
+
1108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1109
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1110
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1111
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1112
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1113
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1114
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1115
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1116
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1117
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1118
+ POSSIBILITY OF SUCH DAMAGE.
1119
+
1120
+ 6. Some of the cuBLAS library routines were written by or
1121
+ derived from code written by Vasily Volkov and are subject
1122
+ to the Modified Berkeley Software Distribution License as
1123
+ follows:
1124
+
1125
+ Copyright (c) 2007-2009, Regents of the University of California
1126
+
1127
+ All rights reserved.
1128
+
1129
+ Redistribution and use in source and binary forms, with or without
1130
+ modification, are permitted provided that the following conditions are
1131
+ met:
1132
+ * Redistributions of source code must retain the above copyright
1133
+ notice, this list of conditions and the following disclaimer.
1134
+ * Redistributions in binary form must reproduce the above
1135
+ copyright notice, this list of conditions and the following
1136
+ disclaimer in the documentation and/or other materials provided
1137
+ with the distribution.
1138
+ * Neither the name of the University of California, Berkeley nor
1139
+ the names of its contributors may be used to endorse or promote
1140
+ products derived from this software without specific prior
1141
+ written permission.
1142
+
1143
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1144
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1145
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1146
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1147
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1148
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1149
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1150
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1151
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1152
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1153
+ POSSIBILITY OF SUCH DAMAGE.
1154
+
1155
+ 7. Some of the cuBLAS library routines were written by or
1156
+ derived from code written by Davide Barbieri and are
1157
+ subject to the Modified Berkeley Software Distribution
1158
+ License as follows:
1159
+
1160
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
1161
+
1162
+ All rights reserved.
1163
+
1164
+ Redistribution and use in source and binary forms, with or without
1165
+ modification, are permitted provided that the following conditions are
1166
+ met:
1167
+ * Redistributions of source code must retain the above copyright
1168
+ notice, this list of conditions and the following disclaimer.
1169
+ * Redistributions in binary form must reproduce the above
1170
+ copyright notice, this list of conditions and the following
1171
+ disclaimer in the documentation and/or other materials provided
1172
+ with the distribution.
1173
+ * The name of the author may not be used to endorse or promote
1174
+ products derived from this software without specific prior
1175
+ written permission.
1176
+
1177
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1178
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1179
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1180
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1181
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1182
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1183
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1184
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1185
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1186
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1187
+ POSSIBILITY OF SUCH DAMAGE.
1188
+
1189
+ 8. Some of the cuBLAS library routines were derived from
1190
+ code developed by the University of Tennessee and are
1191
+ subject to the Modified Berkeley Software Distribution
1192
+ License as follows:
1193
+
1194
+ Copyright (c) 2010 The University of Tennessee.
1195
+
1196
+ All rights reserved.
1197
+
1198
+ Redistribution and use in source and binary forms, with or without
1199
+ modification, are permitted provided that the following conditions are
1200
+ met:
1201
+ * Redistributions of source code must retain the above copyright
1202
+ notice, this list of conditions and the following disclaimer.
1203
+ * Redistributions in binary form must reproduce the above
1204
+ copyright notice, this list of conditions and the following
1205
+ disclaimer listed in this license in the documentation and/or
1206
+ other materials provided with the distribution.
1207
+ * Neither the name of the copyright holders nor the names of its
1208
+ contributors may be used to endorse or promote products derived
1209
+ from this software without specific prior written permission.
1210
+
1211
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1212
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1213
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1214
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1215
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1216
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1217
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1218
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1219
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1220
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1221
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1222
+
1223
+ 9. Some of the cuBLAS library routines were written by or
1224
+ derived from code written by Jonathan Hogg and are subject
1225
+ to the Modified Berkeley Software Distribution License as
1226
+ follows:
1227
+
1228
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
1229
+
1230
+ All rights reserved.
1231
+
1232
+ Redistribution and use in source and binary forms, with or without
1233
+ modification, are permitted provided that the following conditions are
1234
+ met:
1235
+ * Redistributions of source code must retain the above copyright
1236
+ notice, this list of conditions and the following disclaimer.
1237
+ * Redistributions in binary form must reproduce the above
1238
+ copyright notice, this list of conditions and the following
1239
+ disclaimer in the documentation and/or other materials provided
1240
+ with the distribution.
1241
+ * Neither the name of the STFC nor the names of its contributors
1242
+ may be used to endorse or promote products derived from this
1243
+ software without specific prior written permission.
1244
+
1245
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1246
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1247
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1248
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
1249
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1250
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1251
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1252
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
1253
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
1254
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
1255
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1256
+
1257
+ 10. Some of the cuBLAS library routines were written by or
1258
+ derived from code written by Ahmad M. Abdelfattah, David
1259
+ Keyes, and Hatem Ltaief, and are subject to the Apache
1260
+ License, Version 2.0, as follows:
1261
+
1262
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
1263
+ Authors:
1264
+ Ahmad Abdelfattah ([email protected])
1265
+ David Keyes ([email protected])
1266
+ Hatem Ltaief ([email protected])
1267
+
1268
+ Redistribution and use in source and binary forms, with or without
1269
+ modification, are permitted provided that the following conditions
1270
+ are met:
1271
+
1272
+ * Redistributions of source code must retain the above copyright
1273
+ notice, this list of conditions and the following disclaimer.
1274
+ * Redistributions in binary form must reproduce the above copyright
1275
+ notice, this list of conditions and the following disclaimer in the
1276
+ documentation and/or other materials provided with the distribution.
1277
+ * Neither the name of the King Abdullah University of Science and
1278
+ Technology nor the names of its contributors may be used to endorse
1279
+ or promote products derived from this software without specific prior
1280
+ written permission.
1281
+
1282
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1283
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1284
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1285
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1286
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1287
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1288
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1289
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1290
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1291
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1292
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
1293
+
1294
+ 11. Some of the cuSPARSE library routines were written by or
1295
+ derived from code written by Li-Wen Chang and are subject
1296
+ to the NCSA Open Source License as follows:
1297
+
1298
+ Copyright (c) 2012, University of Illinois.
1299
+
1300
+ All rights reserved.
1301
+
1302
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
1303
+
1304
+ Permission is hereby granted, free of charge, to any person obtaining
1305
+ a copy of this software and associated documentation files (the
1306
+ "Software"), to deal with the Software without restriction, including
1307
+ without limitation the rights to use, copy, modify, merge, publish,
1308
+ distribute, sublicense, and/or sell copies of the Software, and to
1309
+ permit persons to whom the Software is furnished to do so, subject to
1310
+ the following conditions:
1311
+ * Redistributions of source code must retain the above copyright
1312
+ notice, this list of conditions and the following disclaimer.
1313
+ * Redistributions in binary form must reproduce the above
1314
+ copyright notice, this list of conditions and the following
1315
+ disclaimers in the documentation and/or other materials provided
1316
+ with the distribution.
1317
+ * Neither the names of IMPACT Group, University of Illinois, nor
1318
+ the names of its contributors may be used to endorse or promote
1319
+ products derived from this Software without specific prior
1320
+ written permission.
1321
+
1322
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1323
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1324
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1325
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
1326
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1327
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
1328
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
1329
+ SOFTWARE.
1330
+
1331
+ 12. Some of the cuRAND library routines were written by or
1332
+ derived from code written by Mutsuo Saito and Makoto
1333
+ Matsumoto and are subject to the following license:
1334
+
1335
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
1336
+ University. All rights reserved.
1337
+
1338
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
1339
+ University and University of Tokyo. All rights reserved.
1340
+
1341
+ Redistribution and use in source and binary forms, with or without
1342
+ modification, are permitted provided that the following conditions are
1343
+ met:
1344
+ * Redistributions of source code must retain the above copyright
1345
+ notice, this list of conditions and the following disclaimer.
1346
+ * Redistributions in binary form must reproduce the above
1347
+ copyright notice, this list of conditions and the following
1348
+ disclaimer in the documentation and/or other materials provided
1349
+ with the distribution.
1350
+ * Neither the name of the Hiroshima University nor the names of
1351
+ its contributors may be used to endorse or promote products
1352
+ derived from this software without specific prior written
1353
+ permission.
1354
+
1355
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1356
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1357
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1358
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1359
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1360
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1361
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1362
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1363
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1364
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1365
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1366
+
1367
+ 13. Some of the cuRAND library routines were derived from
1368
+ code developed by D. E. Shaw Research and are subject to
1369
+ the following license:
1370
+
1371
+ Copyright 2010-2011, D. E. Shaw Research.
1372
+
1373
+ All rights reserved.
1374
+
1375
+ Redistribution and use in source and binary forms, with or without
1376
+ modification, are permitted provided that the following conditions are
1377
+ met:
1378
+ * Redistributions of source code must retain the above copyright
1379
+ notice, this list of conditions, and the following disclaimer.
1380
+ * Redistributions in binary form must reproduce the above
1381
+ copyright notice, this list of conditions, and the following
1382
+ disclaimer in the documentation and/or other materials provided
1383
+ with the distribution.
1384
+ * Neither the name of D. E. Shaw Research nor the names of its
1385
+ contributors may be used to endorse or promote products derived
1386
+ from this software without specific prior written permission.
1387
+
1388
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1389
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1390
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1391
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1392
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1393
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1394
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1395
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1396
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1397
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1398
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1399
+
1400
+ 14. Some of the Math library routines were written by or
1401
+ derived from code developed by Norbert Juffa and are
1402
+ subject to the following license:
1403
+
1404
+ Copyright (c) 2015-2017, Norbert Juffa
1405
+ All rights reserved.
1406
+
1407
+ Redistribution and use in source and binary forms, with or without
1408
+ modification, are permitted provided that the following conditions
1409
+ are met:
1410
+
1411
+ 1. Redistributions of source code must retain the above copyright
1412
+ notice, this list of conditions and the following disclaimer.
1413
+
1414
+ 2. Redistributions in binary form must reproduce the above copyright
1415
+ notice, this list of conditions and the following disclaimer in the
1416
+ documentation and/or other materials provided with the distribution.
1417
+
1418
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1419
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1420
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1421
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1422
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1423
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1424
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1425
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1426
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1427
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1428
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429
+
1430
+ 15. Licensee's use of the lz4 third party component is
1431
+ subject to the following terms and conditions:
1432
+
1433
+ Copyright (C) 2011-2013, Yann Collet.
1434
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1435
+
1436
+ Redistribution and use in source and binary forms, with or without
1437
+ modification, are permitted provided that the following conditions are
1438
+ met:
1439
+
1440
+ * Redistributions of source code must retain the above copyright
1441
+ notice, this list of conditions and the following disclaimer.
1442
+ * Redistributions in binary form must reproduce the above
1443
+ copyright notice, this list of conditions and the following disclaimer
1444
+ in the documentation and/or other materials provided with the
1445
+ distribution.
1446
+
1447
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1448
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1449
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1450
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1451
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1452
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1453
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1454
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1455
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1456
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1457
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1458
+
1459
+ 16. The NPP library uses code from the Boost Math Toolkit,
1460
+ and is subject to the following license:
1461
+
1462
+ Boost Software License - Version 1.0 - August 17th, 2003
1463
+ . . . .
1464
+
1465
+ Permission is hereby granted, free of charge, to any person or
1466
+ organization obtaining a copy of the software and accompanying
1467
+ documentation covered by this license (the "Software") to use,
1468
+ reproduce, display, distribute, execute, and transmit the Software,
1469
+ and to prepare derivative works of the Software, and to permit
1470
+ third-parties to whom the Software is furnished to do so, all
1471
+ subject to the following:
1472
+
1473
+ The copyright notices in the Software and this entire statement,
1474
+ including the above license grant, this restriction and the following
1475
+ disclaimer, must be included in all copies of the Software, in whole
1476
+ or in part, and all derivative works of the Software, unless such
1477
+ copies or derivative works are solely in the form of machine-executable
1478
+ object code generated by a source language processor.
1479
+
1480
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1481
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1482
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
1483
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
1484
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
1485
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
1486
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1487
+ OTHER DEALINGS IN THE SOFTWARE.
1488
+
1489
+ 17. Portions of the Nsight Eclipse Edition is subject to the
1490
+ following license:
1491
+
1492
+ The Eclipse Foundation makes available all content in this plug-in
1493
+ ("Content"). Unless otherwise indicated below, the Content is provided
1494
+ to you under the terms and conditions of the Eclipse Public License
1495
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
1496
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
1497
+ will mean the Content.
1498
+
1499
+ If you did not receive this Content directly from the Eclipse
1500
+ Foundation, the Content is being redistributed by another party
1501
+ ("Redistributor") and different terms and conditions may apply to your
1502
+ use of any object code in the Content. Check the Redistributor's
1503
+ license that was provided with the Content. If no such license exists,
1504
+ contact the Redistributor. Unless otherwise indicated below, the terms
1505
+ and conditions of the EPL still apply to any source code in the
1506
+ Content and such source code may be obtained at http://www.eclipse.org.
1507
+
1508
+ 18. Some of the cuBLAS library routines uses code from
1509
+ OpenAI, which is subject to the following license:
1510
+
1511
+ License URL
1512
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
1513
+
1514
+ License Text
1515
+ The MIT License
1516
+
1517
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
1518
+
1519
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1520
+ of this software and associated documentation files (the "Software"), to deal
1521
+ in the Software without restriction, including without limitation the rights
1522
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1523
+ copies of the Software, and to permit persons to whom the Software is
1524
+ furnished to do so, subject to the following conditions:
1525
+
1526
+ The above copyright notice and this permission notice shall be included in
1527
+ all copies or substantial portions of the Software.
1528
+
1529
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1530
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1531
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1532
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1533
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1534
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1535
+ THE SOFTWARE.
1536
+
1537
+ 19. Licensee's use of the Visual Studio Setup Configuration
1538
+ Samples is subject to the following license:
1539
+
1540
+ The MIT License (MIT)
1541
+ Copyright (C) Microsoft Corporation. All rights reserved.
1542
+
1543
+ Permission is hereby granted, free of charge, to any person
1544
+ obtaining a copy of this software and associated documentation
1545
+ files (the "Software"), to deal in the Software without restriction,
1546
+ including without limitation the rights to use, copy, modify, merge,
1547
+ publish, distribute, sublicense, and/or sell copies of the Software,
1548
+ and to permit persons to whom the Software is furnished to do so,
1549
+ subject to the following conditions:
1550
+
1551
+ The above copyright notice and this permission notice shall be included
1552
+ in all copies or substantial portions of the Software.
1553
+
1554
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1555
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1556
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1557
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1558
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1559
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1560
+
1561
+ 20. Licensee's use of linmath.h header for CPU functions for
1562
+ GL vector/matrix operations from lunarG is subject to the
1563
+ Apache License Version 2.0.
1564
+
1565
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
1566
+ subject to the MIT license .
1567
+
1568
+ -----------------
llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: nvidia-cusolver-cu12
3
+ Version: 11.4.5.107
4
+ Summary: CUDA solver native runtime libraries
5
+ Home-page: https://developer.nvidia.com/cuda-zone
6
+ Author: Nvidia CUDA Installer Team
7
+ Author-email: [email protected]
8
+ License: NVIDIA Proprietary Software
9
+ Keywords: cuda,nvidia,runtime,machine learning,deep learning
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: Other/Proprietary License
15
+ Classifier: Natural Language :: English
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.5
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Topic :: Scientific/Engineering
26
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
27
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Software Development
29
+ Classifier: Topic :: Software Development :: Libraries
30
+ Classifier: Operating System :: Microsoft :: Windows
31
+ Classifier: Operating System :: POSIX :: Linux
32
+ Requires-Python: >=3
33
+ License-File: License.txt
34
+ Requires-Dist: nvidia-cublas-cu12
35
+ Requires-Dist: nvidia-nvjitlink-cu12
36
+ Requires-Dist: nvidia-cusparse-cu12
37
+
38
+ CUDA solver native runtime libraries
llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-310.pyc,,
3
+ nvidia/cusolver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/cusolver/__pycache__/__init__.cpython-310.pyc,,
5
+ nvidia/cusolver/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc,,
7
+ nvidia/cusolver/include/cusolverDn.h,sha256=8KUcqUxWPr8jpz3ZVpTB6I3IXMme1ok7E7vi9XXKRzk,147406
8
+ nvidia/cusolver/include/cusolverMg.h,sha256=N8989nnS2BleeMyuftbQgBDJ4sMAkLPSnmy_S_7fxng,11549
9
+ nvidia/cusolver/include/cusolverRf.h,sha256=7BZfWeuMJ8w1Pz4iZeGmwvDZbDNNq0ivG5MHtiATtls,14292
10
+ nvidia/cusolver/include/cusolverSp.h,sha256=8fev0XawDBd0xrOxUlQ3WhclKlUuVAT64zKxwnP8iT0,32561
11
+ nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h,sha256=rTuS0rxwGV3bAz50ua59WVPQ9SvlijORj732oPejoCk,37495
12
+ nvidia/cusolver/include/cusolver_common.h,sha256=8SMCLEPkMN9Ni_KANkvPSHCieV1jrTARuS-Mhmuq5H8,8826
13
+ nvidia/cusolver/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ nvidia/cusolver/lib/__pycache__/__init__.cpython-310.pyc,,
15
+ nvidia/cusolver/lib/libcusolver.so.11,sha256=ECh6vHzpxfx-fBY3YVZrWZ6uGzYsR-EACRHRmEQ9bVI,114481816
16
+ nvidia/cusolver/lib/libcusolverMg.so.11,sha256=0f3uK8NQhMAFtQ5r76UCApP7coB7wWG2pQOMh1RMmwY,79763496
17
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
18
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
19
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA,sha256=b8Zxnx3ZVIwttTKBnzgVXjXu8-_pRL6wBkYMTV7i6gA,1626
20
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD,,
21
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
22
+ nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-manylinux1_x86_64
5
+
llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia
llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ End User License Agreement
2
+ --------------------------
3
+
4
+
5
+ Preface
6
+ -------
7
+
8
+ The Software License Agreement in Chapter 1 and the Supplement
9
+ in Chapter 2 contain license terms and conditions that govern
10
+ the use of NVIDIA software. By accepting this agreement, you
11
+ agree to comply with all the terms and conditions applicable
12
+ to the product(s) included herein.
13
+
14
+
15
+ NVIDIA Driver
16
+
17
+
18
+ Description
19
+
20
+ This package contains the operating system driver and
21
+ fundamental system software components for NVIDIA GPUs.
22
+
23
+
24
+ NVIDIA CUDA Toolkit
25
+
26
+
27
+ Description
28
+
29
+ The NVIDIA CUDA Toolkit provides command-line and graphical
30
+ tools for building, debugging and optimizing the performance
31
+ of applications accelerated by NVIDIA GPUs, runtime and math
32
+ libraries, and documentation including programming guides,
33
+ user manuals, and API references.
34
+
35
+
36
+ Default Install Location of CUDA Toolkit
37
+
38
+ Windows platform:
39
+
40
+ %ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
41
+
42
+ Linux platform:
43
+
44
+ /usr/local/cuda-#.#
45
+
46
+ Mac platform:
47
+
48
+ /Developer/NVIDIA/CUDA-#.#
49
+
50
+
51
+ NVIDIA CUDA Samples
52
+
53
+
54
+ Description
55
+
56
+ This package includes over 100+ CUDA examples that demonstrate
57
+ various CUDA programming principles, and efficient CUDA
58
+ implementation of algorithms in specific application domains.
59
+
60
+
61
+ Default Install Location of CUDA Samples
62
+
63
+ Windows platform:
64
+
65
+ %ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
66
+
67
+ Linux platform:
68
+
69
+ /usr/local/cuda-#.#/samples
70
+
71
+ and
72
+
73
+ $HOME/NVIDIA_CUDA-#.#_Samples
74
+
75
+ Mac platform:
76
+
77
+ /Developer/NVIDIA/CUDA-#.#/samples
78
+
79
+
80
+ NVIDIA Nsight Visual Studio Edition (Windows only)
81
+
82
+
83
+ Description
84
+
85
+ NVIDIA Nsight Development Platform, Visual Studio Edition is a
86
+ development environment integrated into Microsoft Visual
87
+ Studio that provides tools for debugging, profiling, analyzing
88
+ and optimizing your GPU computing and graphics applications.
89
+
90
+
91
+ Default Install Location of Nsight Visual Studio Edition
92
+
93
+ Windows platform:
94
+
95
+ %ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
96
+
97
+
98
+ 1. License Agreement for NVIDIA Software Development Kits
99
+ ---------------------------------------------------------
100
+
101
+
102
+ Release Date: July 26, 2018
103
+ ---------------------------
104
+
105
+
106
+ Important NoticeRead before downloading, installing,
107
+ copying or using the licensed software:
108
+ -------------------------------------------------------
109
+
110
+ This license agreement, including exhibits attached
111
+ ("Agreement”) is a legal agreement between you and NVIDIA
112
+ Corporation ("NVIDIA") and governs your use of a NVIDIA
113
+ software development kit (“SDK”).
114
+
115
+ Each SDK has its own set of software and materials, but here
116
+ is a description of the types of items that may be included in
117
+ a SDK: source code, header files, APIs, data sets and assets
118
+ (examples include images, textures, models, scenes, videos,
119
+ native API input/output files), binary software, sample code,
120
+ libraries, utility programs, programming code and
121
+ documentation.
122
+
123
+ This Agreement can be accepted only by an adult of legal age
124
+ of majority in the country in which the SDK is used.
125
+
126
+ If you are entering into this Agreement on behalf of a company
127
+ or other legal entity, you represent that you have the legal
128
+ authority to bind the entity to this Agreement, in which case
129
+ “you” will mean the entity you represent.
130
+
131
+ If you don’t have the required age or authority to accept
132
+ this Agreement, or if you don’t accept all the terms and
133
+ conditions of this Agreement, do not download, install or use
134
+ the SDK.
135
+
136
+ You agree to use the SDK only for purposes that are permitted
137
+ by (a) this Agreement, and (b) any applicable law, regulation
138
+ or generally accepted practices or guidelines in the relevant
139
+ jurisdictions.
140
+
141
+
142
+ 1.1. License
143
+
144
+
145
+ 1.1.1. License Grant
146
+
147
+ Subject to the terms of this Agreement, NVIDIA hereby grants
148
+ you a non-exclusive, non-transferable license, without the
149
+ right to sublicense (except as expressly provided in this
150
+ Agreement) to:
151
+
152
+ 1. Install and use the SDK,
153
+
154
+ 2. Modify and create derivative works of sample source code
155
+ delivered in the SDK, and
156
+
157
+ 3. Distribute those portions of the SDK that are identified
158
+ in this Agreement as distributable, as incorporated in
159
+ object code format into a software application that meets
160
+ the distribution requirements indicated in this Agreement.
161
+
162
+
163
+ 1.1.2. Distribution Requirements
164
+
165
+ These are the distribution requirements for you to exercise
166
+ the distribution grant:
167
+
168
+ 1. Your application must have material additional
169
+ functionality, beyond the included portions of the SDK.
170
+
171
+ 2. The distributable portions of the SDK shall only be
172
+ accessed by your application.
173
+
174
+ 3. The following notice shall be included in modifications
175
+ and derivative works of sample source code distributed:
176
+ “This software contains source code provided by NVIDIA
177
+ Corporation.”
178
+
179
+ 4. Unless a developer tool is identified in this Agreement
180
+ as distributable, it is delivered for your internal use
181
+ only.
182
+
183
+ 5. The terms under which you distribute your application
184
+ must be consistent with the terms of this Agreement,
185
+ including (without limitation) terms relating to the
186
+ license grant and license restrictions and protection of
187
+ NVIDIA’s intellectual property rights. Additionally, you
188
+ agree that you will protect the privacy, security and
189
+ legal rights of your application users.
190
+
191
+ 6. You agree to notify NVIDIA in writing of any known or
192
+ suspected distribution or use of the SDK not in compliance
193
+ with the requirements of this Agreement, and to enforce
194
+ the terms of your agreements with respect to distributed
195
+ SDK.
196
+
197
+
198
+ 1.1.3. Authorized Users
199
+
200
+ You may allow employees and contractors of your entity or of
201
+ your subsidiary(ies) to access and use the SDK from your
202
+ secure network to perform work on your behalf.
203
+
204
+ If you are an academic institution you may allow users
205
+ enrolled or employed by the academic institution to access and
206
+ use the SDK from your secure network.
207
+
208
+ You are responsible for the compliance with the terms of this
209
+ Agreement by your authorized users. If you become aware that
210
+ your authorized users didn’t follow the terms of this
211
+ Agreement, you agree to take reasonable steps to resolve the
212
+ non-compliance and prevent new occurrences.
213
+
214
+
215
+ 1.1.4. Pre-Release SDK
216
+
217
+ The SDK versions identified as alpha, beta, preview or
218
+ otherwise as pre-release, may not be fully functional, may
219
+ contain errors or design flaws, and may have reduced or
220
+ different security, privacy, accessibility, availability, and
221
+ reliability standards relative to commercial versions of
222
+ NVIDIA software and materials. Use of a pre-release SDK may
223
+ result in unexpected results, loss of data, project delays or
224
+ other unpredictable damage or loss.
225
+
226
+ You may use a pre-release SDK at your own risk, understanding
227
+ that pre-release SDKs are not intended for use in production
228
+ or business-critical systems.
229
+
230
+ NVIDIA may choose not to make available a commercial version
231
+ of any pre-release SDK. NVIDIA may also choose to abandon
232
+ development and terminate the availability of a pre-release
233
+ SDK at any time without liability.
234
+
235
+
236
+ 1.1.5. Updates
237
+
238
+ NVIDIA may, at its option, make available patches, workarounds
239
+ or other updates to this SDK. Unless the updates are provided
240
+ with their separate governing terms, they are deemed part of
241
+ the SDK licensed to you as provided in this Agreement. You
242
+ agree that the form and content of the SDK that NVIDIA
243
+ provides may change without prior notice to you. While NVIDIA
244
+ generally maintains compatibility between versions, NVIDIA may
245
+ in some cases make changes that introduce incompatibilities in
246
+ future versions of the SDK.
247
+
248
+
249
+ 1.1.6. Third Party Licenses
250
+
251
+ The SDK may come bundled with, or otherwise include or be
252
+ distributed with, third party software licensed by a NVIDIA
253
+ supplier and/or open source software provided under an open
254
+ source license. Use of third party software is subject to the
255
+ third-party license terms, or in the absence of third party
256
+ terms, the terms of this Agreement. Copyright to third party
257
+ software is held by the copyright holders indicated in the
258
+ third-party software or license.
259
+
260
+
261
+ 1.1.7. Reservation of Rights
262
+
263
+ NVIDIA reserves all rights, title, and interest in and to the
264
+ SDK, not expressly granted to you under this Agreement.
265
+
266
+
267
+ 1.2. Limitations
268
+
269
+ The following license limitations apply to your use of the
270
+ SDK:
271
+
272
+ 1. You may not reverse engineer, decompile or disassemble,
273
+ or remove copyright or other proprietary notices from any
274
+ portion of the SDK or copies of the SDK.
275
+
276
+ 2. Except as expressly provided in this Agreement, you may
277
+ not copy, sell, rent, sublicense, transfer, distribute,
278
+ modify, or create derivative works of any portion of the
279
+ SDK. For clarity, you may not distribute or sublicense the
280
+ SDK as a stand-alone product.
281
+
282
+ 3. Unless you have an agreement with NVIDIA for this
283
+ purpose, you may not indicate that an application created
284
+ with the SDK is sponsored or endorsed by NVIDIA.
285
+
286
+ 4. You may not bypass, disable, or circumvent any
287
+ encryption, security, digital rights management or
288
+ authentication mechanism in the SDK.
289
+
290
+ 5. You may not use the SDK in any manner that would cause it
291
+ to become subject to an open source software license. As
292
+ examples, licenses that require as a condition of use,
293
+ modification, and/or distribution that the SDK be:
294
+
295
+ a. Disclosed or distributed in source code form;
296
+
297
+ b. Licensed for the purpose of making derivative works;
298
+ or
299
+
300
+ c. Redistributable at no charge.
301
+
302
+ 6. Unless you have an agreement with NVIDIA for this
303
+ purpose, you may not use the SDK with any system or
304
+ application where the use or failure of the system or
305
+ application can reasonably be expected to threaten or
306
+ result in personal injury, death, or catastrophic loss.
307
+ Examples include use in avionics, navigation, military,
308
+ medical, life support or other life critical applications.
309
+ NVIDIA does not design, test or manufacture the SDK for
310
+ these critical uses and NVIDIA shall not be liable to you
311
+ or any third party, in whole or in part, for any claims or
312
+ damages arising from such uses.
313
+
314
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
315
+ and its affiliates, and their respective employees,
316
+ contractors, agents, officers and directors, from and
317
+ against any and all claims, damages, obligations, losses,
318
+ liabilities, costs or debt, fines, restitutions and
319
+ expenses (including but not limited to attorney’s fees
320
+ and costs incident to establishing the right of
321
+ indemnification) arising out of or related to your use of
322
+ the SDK outside of the scope of this Agreement, or not in
323
+ compliance with its terms.
324
+
325
+
326
+ 1.3. Ownership
327
+
328
+ 1. NVIDIA or its licensors hold all rights, title and
329
+ interest in and to the SDK and its modifications and
330
+ derivative works, including their respective intellectual
331
+ property rights, subject to your rights described in this
332
+ section. This SDK may include software and materials from
333
+ NVIDIA’s licensors, and these licensors are intended
334
+ third party beneficiaries that may enforce this Agreement
335
+ with respect to their intellectual property rights.
336
+
337
+ 2. You hold all rights, title and interest in and to your
338
+ applications and your derivative works of the sample
339
+ source code delivered in the SDK, including their
340
+ respective intellectual property rights, subject to
341
+ NVIDIA’s rights described in this section.
342
+
343
+ 3. You may, but don’t have to, provide to NVIDIA
344
+ suggestions, feature requests or other feedback regarding
345
+ the SDK, including possible enhancements or modifications
346
+ to the SDK. For any feedback that you voluntarily provide,
347
+ you hereby grant NVIDIA and its affiliates a perpetual,
348
+ non-exclusive, worldwide, irrevocable license to use,
349
+ reproduce, modify, license, sublicense (through multiple
350
+ tiers of sublicensees), and distribute (through multiple
351
+ tiers of distributors) it without the payment of any
352
+ royalties or fees to you. NVIDIA will use feedback at its
353
+ choice. NVIDIA is constantly looking for ways to improve
354
+ its products, so you may send feedback to NVIDIA through
355
+ the developer portal at https://developer.nvidia.com.
356
+
357
+
358
+ 1.4. No Warranties
359
+
360
+ THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
361
+ FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
362
+ ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
363
+ OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
364
+ BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
365
+ FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
366
+ ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
367
+ WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
368
+ DEALING OR COURSE OF TRADE.
369
+
370
+
371
+ 1.5. Limitation of Liability
372
+
373
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
374
+ AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
375
+ PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
376
+ OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
377
+ PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
378
+ WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
379
+ WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
380
+ OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
381
+ PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
382
+ LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
383
+ TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
384
+ AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
385
+ NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
386
+ LIMIT.
387
+
388
+ These exclusions and limitations of liability shall apply
389
+ regardless if NVIDIA or its affiliates have been advised of
390
+ the possibility of such damages, and regardless of whether a
391
+ remedy fails its essential purpose. These exclusions and
392
+ limitations of liability form an essential basis of the
393
+ bargain between the parties, and, absent any of these
394
+ exclusions or limitations of liability, the provisions of this
395
+ Agreement, including, without limitation, the economic terms,
396
+ would be substantially different.
397
+
398
+
399
+ 1.6. Termination
400
+
401
+ 1. This Agreement will continue to apply until terminated by
402
+ either you or NVIDIA as described below.
403
+
404
+ 2. If you want to terminate this Agreement, you may do so by
405
+ stopping to use the SDK.
406
+
407
+ 3. NVIDIA may, at any time, terminate this Agreement if:
408
+
409
+ a. (i) you fail to comply with any term of this
410
+ Agreement and the non-compliance is not fixed within
411
+ thirty (30) days following notice from NVIDIA (or
412
+ immediately if you violate NVIDIA’s intellectual
413
+ property rights);
414
+
415
+ b. (ii) you commence or participate in any legal
416
+ proceeding against NVIDIA with respect to the SDK; or
417
+
418
+ c. (iii) NVIDIA decides to no longer provide the SDK in
419
+ a country or, in NVIDIA’s sole discretion, the
420
+ continued use of it is no longer commercially viable.
421
+
422
+ 4. Upon any termination of this Agreement, you agree to
423
+ promptly discontinue use of the SDK and destroy all copies
424
+ in your possession or control. Your prior distributions in
425
+ accordance with this Agreement are not affected by the
426
+ termination of this Agreement. Upon written request, you
427
+ will certify in writing that you have complied with your
428
+ commitments under this section. Upon any termination of
429
+ this Agreement all provisions survive except for the
430
+ license grant provisions.
431
+
432
+
433
+ 1.7. General
434
+
435
+ If you wish to assign this Agreement or your rights and
436
+ obligations, including by merger, consolidation, dissolution
437
+ or operation of law, contact NVIDIA to ask for permission. Any
438
+ attempted assignment not approved by NVIDIA in writing shall
439
+ be void and of no effect. NVIDIA may assign, delegate or
440
+ transfer this Agreement and its rights and obligations, and if
441
+ to a non-affiliate you will be notified.
442
+
443
+ You agree to cooperate with NVIDIA and provide reasonably
444
+ requested information to verify your compliance with this
445
+ Agreement.
446
+
447
+ This Agreement will be governed in all respects by the laws of
448
+ the United States and of the State of Delaware as those laws
449
+ are applied to contracts entered into and performed entirely
450
+ within Delaware by Delaware residents, without regard to the
451
+ conflicts of laws principles. The United Nations Convention on
452
+ Contracts for the International Sale of Goods is specifically
453
+ disclaimed. You agree to all terms of this Agreement in the
454
+ English language.
455
+
456
+ The state or federal courts residing in Santa Clara County,
457
+ California shall have exclusive jurisdiction over any dispute
458
+ or claim arising out of this Agreement. Notwithstanding this,
459
+ you agree that NVIDIA shall still be allowed to apply for
460
+ injunctive remedies or an equivalent type of urgent legal
461
+ relief in any jurisdiction.
462
+
463
+ If any court of competent jurisdiction determines that any
464
+ provision of this Agreement is illegal, invalid or
465
+ unenforceable, such provision will be construed as limited to
466
+ the extent necessary to be consistent with and fully
467
+ enforceable under the law and the remaining provisions will
468
+ remain in full force and effect. Unless otherwise specified,
469
+ remedies are cumulative.
470
+
471
+ Each party acknowledges and agrees that the other is an
472
+ independent contractor in the performance of this Agreement.
473
+
474
+ The SDK has been developed entirely at private expense and is
475
+ “commercial items” consisting of “commercial computer
476
+ software” and “commercial computer software
477
+ documentation” provided with RESTRICTED RIGHTS. Use,
478
+ duplication or disclosure by the U.S. Government or a U.S.
479
+ Government subcontractor is subject to the restrictions in
480
+ this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
481
+ in subparagraphs (c)(1) and (2) of the Commercial Computer
482
+ Software - Restricted Rights clause at FAR 52.227-19, as
483
+ applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
484
+ Expressway, Santa Clara, CA 95051.
485
+
486
+ The SDK is subject to United States export laws and
487
+ regulations. You agree that you will not ship, transfer or
488
+ export the SDK into any country, or use the SDK in any manner,
489
+ prohibited by the United States Bureau of Industry and
490
+ Security or economic sanctions regulations administered by the
491
+ U.S. Department of Treasury’s Office of Foreign Assets
492
+ Control (OFAC), or any applicable export laws, restrictions or
493
+ regulations. These laws include restrictions on destinations,
494
+ end users and end use. By accepting this Agreement, you
495
+ confirm that you are not a resident or citizen of any country
496
+ currently embargoed by the U.S. and that you are not otherwise
497
+ prohibited from receiving the SDK.
498
+
499
+ Any notice delivered by NVIDIA to you under this Agreement
500
+ will be delivered via mail, email or fax. You agree that any
501
+ notices that NVIDIA sends you electronically will satisfy any
502
+ legal communication requirements. Please direct your legal
503
+ notices or other correspondence to NVIDIA Corporation, 2788
504
+ San Tomas Expressway, Santa Clara, California 95051, United
505
+ States of America, Attention: Legal Department.
506
+
507
+ This Agreement and any exhibits incorporated into this
508
+ Agreement constitute the entire agreement of the parties with
509
+ respect to the subject matter of this Agreement and supersede
510
+ all prior negotiations or documentation exchanged between the
511
+ parties relating to this SDK license. Any additional and/or
512
+ conflicting terms on documents issued by you are null, void,
513
+ and invalid. Any amendment or waiver under this Agreement
514
+ shall be in writing and signed by representatives of both
515
+ parties.
516
+
517
+
518
+ 2. CUDA Toolkit Supplement to Software License Agreement for
519
+ NVIDIA Software Development Kits
520
+ ------------------------------------------------------------
521
+
522
+
523
+ Release date: August 16, 2018
524
+ -----------------------------
525
+
526
+ The terms in this supplement govern your use of the NVIDIA
527
+ CUDA Toolkit SDK under the terms of your license agreement
528
+ (“Agreement”) as modified by this supplement. Capitalized
529
+ terms used but not defined below have the meaning assigned to
530
+ them in the Agreement.
531
+
532
+ This supplement is an exhibit to the Agreement and is
533
+ incorporated as an integral part of the Agreement. In the
534
+ event of conflict between the terms in this supplement and the
535
+ terms in the Agreement, the terms in this supplement govern.
536
+
537
+
538
+ 2.1. License Scope
539
+
540
+ The SDK is licensed for you to develop applications only for
541
+ use in systems with NVIDIA GPUs.
542
+
543
+
544
+ 2.2. Distribution
545
+
546
+ The portions of the SDK that are distributable under the
547
+ Agreement are listed in Attachment A.
548
+
549
+
550
+ 2.3. Operating Systems
551
+
552
+ Those portions of the SDK designed exclusively for use on the
553
+ Linux or FreeBSD operating systems, or other operating systems
554
+ derived from the source code to these operating systems, may
555
+ be copied and redistributed for use in accordance with this
556
+ Agreement, provided that the object code files are not
557
+ modified in any way (except for unzipping of compressed
558
+ files).
559
+
560
+
561
+ 2.4. Audio and Video Encoders and Decoders
562
+
563
+ You acknowledge and agree that it is your sole responsibility
564
+ to obtain any additional third-party licenses required to
565
+ make, have made, use, have used, sell, import, and offer for
566
+ sale your products or services that include or incorporate any
567
+ third-party software and content relating to audio and/or
568
+ video encoders and decoders from, including but not limited
569
+ to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
570
+ MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
571
+ under this Agreement any necessary patent or other rights with
572
+ respect to any audio and/or video encoders and decoders.
573
+
574
+
575
+ 2.5. Licensing
576
+
577
+ If the distribution terms in this Agreement are not suitable
578
+ for your organization, or for any questions regarding this
579
+ Agreement, please contact NVIDIA at
580
581
+
582
+
583
+ 2.6. Attachment A
584
+
585
+ The following portions of the SDK are distributable under the
586
+ Agreement:
587
+
588
+ Component
589
+
590
+ CUDA Runtime
591
+
592
+ Windows
593
+
594
+ cudart.dll, cudart_static.lib, cudadevrt.lib
595
+
596
+ Mac OSX
597
+
598
+ libcudart.dylib, libcudart_static.a, libcudadevrt.a
599
+
600
+ Linux
601
+
602
+ libcudart.so, libcudart_static.a, libcudadevrt.a
603
+
604
+ Android
605
+
606
+ libcudart.so, libcudart_static.a, libcudadevrt.a
607
+
608
+ Component
609
+
610
+ CUDA FFT Library
611
+
612
+ Windows
613
+
614
+ cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
615
+
616
+ Mac OSX
617
+
618
+ libcufft.dylib, libcufft_static.a, libcufftw.dylib,
619
+ libcufftw_static.a
620
+
621
+ Linux
622
+
623
+ libcufft.so, libcufft_static.a, libcufftw.so,
624
+ libcufftw_static.a
625
+
626
+ Android
627
+
628
+ libcufft.so, libcufft_static.a, libcufftw.so,
629
+ libcufftw_static.a
630
+
631
+ Component
632
+
633
+ CUDA BLAS Library
634
+
635
+ Windows
636
+
637
+ cublas.dll, cublasLt.dll
638
+
639
+ Mac OSX
640
+
641
+ libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
642
+ libcublasLt_static.a
643
+
644
+ Linux
645
+
646
+ libcublas.so, libcublasLt.so, libcublas_static.a,
647
+ libcublasLt_static.a
648
+
649
+ Android
650
+
651
+ libcublas.so, libcublasLt.so, libcublas_static.a,
652
+ libcublasLt_static.a
653
+
654
+ Component
655
+
656
+ NVIDIA "Drop-in" BLAS Library
657
+
658
+ Windows
659
+
660
+ nvblas.dll
661
+
662
+ Mac OSX
663
+
664
+ libnvblas.dylib
665
+
666
+ Linux
667
+
668
+ libnvblas.so
669
+
670
+ Component
671
+
672
+ CUDA Sparse Matrix Library
673
+
674
+ Windows
675
+
676
+ cusparse.dll, cusparse.lib
677
+
678
+ Mac OSX
679
+
680
+ libcusparse.dylib, libcusparse_static.a
681
+
682
+ Linux
683
+
684
+ libcusparse.so, libcusparse_static.a
685
+
686
+ Android
687
+
688
+ libcusparse.so, libcusparse_static.a
689
+
690
+ Component
691
+
692
+ CUDA Linear Solver Library
693
+
694
+ Windows
695
+
696
+ cusolver.dll, cusolver.lib
697
+
698
+ Mac OSX
699
+
700
+ libcusolver.dylib, libcusolver_static.a
701
+
702
+ Linux
703
+
704
+ libcusolver.so, libcusolver_static.a
705
+
706
+ Android
707
+
708
+ libcusolver.so, libcusolver_static.a
709
+
710
+ Component
711
+
712
+ CUDA Random Number Generation Library
713
+
714
+ Windows
715
+
716
+ curand.dll, curand.lib
717
+
718
+ Mac OSX
719
+
720
+ libcurand.dylib, libcurand_static.a
721
+
722
+ Linux
723
+
724
+ libcurand.so, libcurand_static.a
725
+
726
+ Android
727
+
728
+ libcurand.so, libcurand_static.a
729
+
730
+ Component
731
+
732
+ CUDA Accelerated Graph Library
733
+
734
+ Component
735
+
736
+ NVIDIA Performance Primitives Library
737
+
738
+ Windows
739
+
740
+ nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
741
+ nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
742
+ nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
743
+ nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
744
+ nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
745
+
746
+ Mac OSX
747
+
748
+ libnppc.dylib, libnppc_static.a, libnppial.dylib,
749
+ libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
750
+ libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
751
+ libnppidei_static.a, libnppif.dylib, libnppif_static.a,
752
+ libnppig.dylib, libnppig_static.a, libnppim.dylib,
753
+ libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
754
+ libnpps.dylib, libnpps_static.a
755
+
756
+ Linux
757
+
758
+ libnppc.so, libnppc_static.a, libnppial.so,
759
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
760
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
761
+ libnppidei_static.a, libnppif.so, libnppif_static.a
762
+ libnppig.so, libnppig_static.a, libnppim.so,
763
+ libnppim_static.a, libnppist.so, libnppist_static.a,
764
+ libnppisu.so, libnppisu_static.a, libnppitc.so
765
+ libnppitc_static.a, libnpps.so, libnpps_static.a
766
+
767
+ Android
768
+
769
+ libnppc.so, libnppc_static.a, libnppial.so,
770
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
771
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
772
+ libnppidei_static.a, libnppif.so, libnppif_static.a
773
+ libnppig.so, libnppig_static.a, libnppim.so,
774
+ libnppim_static.a, libnppist.so, libnppist_static.a,
775
+ libnppisu.so, libnppisu_static.a, libnppitc.so
776
+ libnppitc_static.a, libnpps.so, libnpps_static.a
777
+
778
+ Component
779
+
780
+ NVIDIA JPEG Library
781
+
782
+ Linux
783
+
784
+ libnvjpeg.so, libnvjpeg_static.a
785
+
786
+ Component
787
+
788
+ Internal common library required for statically linking to
789
+ cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
790
+
791
+ Mac OSX
792
+
793
+ libculibos.a
794
+
795
+ Linux
796
+
797
+ libculibos.a
798
+
799
+ Component
800
+
801
+ NVIDIA Runtime Compilation Library and Header
802
+
803
+ All
804
+
805
+ nvrtc.h
806
+
807
+ Windows
808
+
809
+ nvrtc.dll, nvrtc-builtins.dll
810
+
811
+ Mac OSX
812
+
813
+ libnvrtc.dylib, libnvrtc-builtins.dylib
814
+
815
+ Linux
816
+
817
+ libnvrtc.so, libnvrtc-builtins.so
818
+
819
+ Component
820
+
821
+ NVIDIA Optimizing Compiler Library
822
+
823
+ Windows
824
+
825
+ nvvm.dll
826
+
827
+ Mac OSX
828
+
829
+ libnvvm.dylib
830
+
831
+ Linux
832
+
833
+ libnvvm.so
834
+
835
+ Component
836
+
837
+ NVIDIA Common Device Math Functions Library
838
+
839
+ Windows
840
+
841
+ libdevice.10.bc
842
+
843
+ Mac OSX
844
+
845
+ libdevice.10.bc
846
+
847
+ Linux
848
+
849
+ libdevice.10.bc
850
+
851
+ Component
852
+
853
+ CUDA Occupancy Calculation Header Library
854
+
855
+ All
856
+
857
+ cuda_occupancy.h
858
+
859
+ Component
860
+
861
+ CUDA Half Precision Headers
862
+
863
+ All
864
+
865
+ cuda_fp16.h, cuda_fp16.hpp
866
+
867
+ Component
868
+
869
+ CUDA Profiling Tools Interface (CUPTI) Library
870
+
871
+ Windows
872
+
873
+ cupti.dll
874
+
875
+ Mac OSX
876
+
877
+ libcupti.dylib
878
+
879
+ Linux
880
+
881
+ libcupti.so
882
+
883
+ Component
884
+
885
+ NVIDIA Tools Extension Library
886
+
887
+ Windows
888
+
889
+ nvToolsExt.dll, nvToolsExt.lib
890
+
891
+ Mac OSX
892
+
893
+ libnvToolsExt.dylib
894
+
895
+ Linux
896
+
897
+ libnvToolsExt.so
898
+
899
+ Component
900
+
901
+ NVIDIA CUDA Driver Libraries
902
+
903
+ Linux
904
+
905
+ libcuda.so, libnvidia-fatbinaryloader.so,
906
+ libnvidia-ptxjitcompiler.so
907
+
908
+ The NVIDIA CUDA Driver Libraries are only distributable in
909
+ applications that meet this criteria:
910
+
911
+ 1. The application was developed starting from a NVIDIA CUDA
912
+ container obtained from Docker Hub or the NVIDIA GPU
913
+ Cloud, and
914
+
915
+ 2. The resulting application is packaged as a Docker
916
+ container and distributed to users on Docker Hub or the
917
+ NVIDIA GPU Cloud only.
918
+
919
+
920
+ 2.7. Attachment B
921
+
922
+
923
+ Additional Licensing Obligations
924
+
925
+ The following third party components included in the SOFTWARE
926
+ are licensed to Licensee pursuant to the following terms and
927
+ conditions:
928
+
929
+ 1. Licensee's use of the GDB third party component is
930
+ subject to the terms and conditions of GNU GPL v3:
931
+
932
+ This product includes copyrighted third-party software licensed
933
+ under the terms of the GNU General Public License v3 ("GPL v3").
934
+ All third-party software packages are copyright by their respective
935
+ authors. GPL v3 terms and conditions are hereby incorporated into
936
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
937
+
938
+ Consistent with these licensing requirements, the software
939
+ listed below is provided under the terms of the specified
940
+ open source software licenses. To obtain source code for
941
+ software provided under licenses that require
942
+ redistribution of source code, including the GNU General
943
+ Public License (GPL) and GNU Lesser General Public License
944
+ (LGPL), contact [email protected]. This offer is
945
+ valid for a period of three (3) years from the date of the
946
+ distribution of this product by NVIDIA CORPORATION.
947
+
948
+ Component License
949
+ CUDA-GDB GPL v3
950
+
951
+ 2. Licensee represents and warrants that any and all third
952
+ party licensing and/or royalty payment obligations in
953
+ connection with Licensee's use of the H.264 video codecs
954
+ are solely the responsibility of Licensee.
955
+
956
+ 3. Licensee's use of the Thrust library is subject to the
957
+ terms and conditions of the Apache License Version 2.0.
958
+ All third-party software packages are copyright by their
959
+ respective authors. Apache License Version 2.0 terms and
960
+ conditions are hereby incorporated into the Agreement by
961
+ this reference.
962
+ http://www.apache.org/licenses/LICENSE-2.0.html
963
+
964
+ In addition, Licensee acknowledges the following notice:
965
+ Thrust includes source code from the Boost Iterator,
966
+ Tuple, System, and Random Number libraries.
967
+
968
+ Boost Software License - Version 1.0 - August 17th, 2003
969
+ . . . .
970
+
971
+ Permission is hereby granted, free of charge, to any person or
972
+ organization obtaining a copy of the software and accompanying
973
+ documentation covered by this license (the "Software") to use,
974
+ reproduce, display, distribute, execute, and transmit the Software,
975
+ and to prepare derivative works of the Software, and to permit
976
+ third-parties to whom the Software is furnished to do so, all
977
+ subject to the following:
978
+
979
+ The copyright notices in the Software and this entire statement,
980
+ including the above license grant, this restriction and the following
981
+ disclaimer, must be included in all copies of the Software, in whole
982
+ or in part, and all derivative works of the Software, unless such
983
+ copies or derivative works are solely in the form of machine-executable
984
+ object code generated by a source language processor.
985
+
986
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
987
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
988
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
989
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
990
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
991
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
992
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
993
+ OTHER DEALINGS IN THE SOFTWARE.
994
+
995
+ 4. Licensee's use of the LLVM third party component is
996
+ subject to the following terms and conditions:
997
+
998
+ ======================================================
999
+ LLVM Release License
1000
+ ======================================================
1001
+ University of Illinois/NCSA
1002
+ Open Source License
1003
+
1004
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
1005
+ All rights reserved.
1006
+
1007
+ Developed by:
1008
+
1009
+ LLVM Team
1010
+
1011
+ University of Illinois at Urbana-Champaign
1012
+
1013
+ http://llvm.org
1014
+
1015
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1016
+ of this software and associated documentation files (the "Software"), to
1017
+ deal with the Software without restriction, including without limitation the
1018
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1019
+ sell copies of the Software, and to permit persons to whom the Software is
1020
+ furnished to do so, subject to the following conditions:
1021
+
1022
+ * Redistributions of source code must retain the above copyright notice,
1023
+ this list of conditions and the following disclaimers.
1024
+
1025
+ * Redistributions in binary form must reproduce the above copyright
1026
+ notice, this list of conditions and the following disclaimers in the
1027
+ documentation and/or other materials provided with the distribution.
1028
+
1029
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
1030
+ Champaign, nor the names of its contributors may be used to endorse or
1031
+ promote products derived from this Software without specific prior
1032
+ written permission.
1033
+
1034
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1035
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1036
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1037
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1038
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1039
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1040
+ DEALINGS WITH THE SOFTWARE.
1041
+
1042
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
1043
+ component is subject to the following terms and
1044
+ conditions:
1045
+
1046
+ ------------
1047
+ PCRE LICENCE
1048
+ ------------
1049
+ PCRE is a library of functions to support regular expressions whose syntax
1050
+ and semantics are as close as possible to those of the Perl 5 language.
1051
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
1052
+ specified below. The documentation for PCRE, supplied in the "doc"
1053
+ directory, is distributed under the same terms as the software itself. The
1054
+ basic library functions are written in C and are freestanding. Also
1055
+ included in the distribution is a set of C++ wrapper functions, and a just-
1056
+ in-time compiler that can be used to optimize pattern matching. These are
1057
+ both optional features that can be omitted when the library is built.
1058
+
1059
+ THE BASIC LIBRARY FUNCTIONS
1060
+ ---------------------------
1061
+ Written by: Philip Hazel
1062
+ Email local part: ph10
1063
+ Email domain: cam.ac.uk
1064
+ University of Cambridge Computing Service,
1065
+ Cambridge, England.
1066
+ Copyright (c) 1997-2012 University of Cambridge
1067
+ All rights reserved.
1068
+
1069
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
1070
+ -------------------------------------
1071
+ Written by: Zoltan Herczeg
1072
+ Email local part: hzmester
1073
+ Emain domain: freemail.hu
1074
+ Copyright(c) 2010-2012 Zoltan Herczeg
1075
+ All rights reserved.
1076
+
1077
+ STACK-LESS JUST-IN-TIME COMPILER
1078
+ --------------------------------
1079
+ Written by: Zoltan Herczeg
1080
+ Email local part: hzmester
1081
+ Emain domain: freemail.hu
1082
+ Copyright(c) 2009-2012 Zoltan Herczeg
1083
+ All rights reserved.
1084
+
1085
+ THE C++ WRAPPER FUNCTIONS
1086
+ -------------------------
1087
+ Contributed by: Google Inc.
1088
+ Copyright (c) 2007-2012, Google Inc.
1089
+ All rights reserved.
1090
+
1091
+ THE "BSD" LICENCE
1092
+ -----------------
1093
+ Redistribution and use in source and binary forms, with or without
1094
+ modification, are permitted provided that the following conditions are met:
1095
+
1096
+ * Redistributions of source code must retain the above copyright notice,
1097
+ this list of conditions and the following disclaimer.
1098
+
1099
+ * Redistributions in binary form must reproduce the above copyright
1100
+ notice, this list of conditions and the following disclaimer in the
1101
+ documentation and/or other materials provided with the distribution.
1102
+
1103
+ * Neither the name of the University of Cambridge nor the name of Google
1104
+ Inc. nor the names of their contributors may be used to endorse or
1105
+ promote products derived from this software without specific prior
1106
+ written permission.
1107
+
1108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1109
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1110
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1111
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1112
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1113
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1114
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1115
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1116
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1117
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1118
+ POSSIBILITY OF SUCH DAMAGE.
1119
+
1120
+ 6. Some of the cuBLAS library routines were written by or
1121
+ derived from code written by Vasily Volkov and are subject
1122
+ to the Modified Berkeley Software Distribution License as
1123
+ follows:
1124
+
1125
+ Copyright (c) 2007-2009, Regents of the University of California
1126
+
1127
+ All rights reserved.
1128
+
1129
+ Redistribution and use in source and binary forms, with or without
1130
+ modification, are permitted provided that the following conditions are
1131
+ met:
1132
+ * Redistributions of source code must retain the above copyright
1133
+ notice, this list of conditions and the following disclaimer.
1134
+ * Redistributions in binary form must reproduce the above
1135
+ copyright notice, this list of conditions and the following
1136
+ disclaimer in the documentation and/or other materials provided
1137
+ with the distribution.
1138
+ * Neither the name of the University of California, Berkeley nor
1139
+ the names of its contributors may be used to endorse or promote
1140
+ products derived from this software without specific prior
1141
+ written permission.
1142
+
1143
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1144
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1145
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1146
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1147
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1148
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1149
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1150
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1151
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1152
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1153
+ POSSIBILITY OF SUCH DAMAGE.
1154
+
1155
+ 7. Some of the cuBLAS library routines were written by or
1156
+ derived from code written by Davide Barbieri and are
1157
+ subject to the Modified Berkeley Software Distribution
1158
+ License as follows:
1159
+
1160
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
1161
+
1162
+ All rights reserved.
1163
+
1164
+ Redistribution and use in source and binary forms, with or without
1165
+ modification, are permitted provided that the following conditions are
1166
+ met:
1167
+ * Redistributions of source code must retain the above copyright
1168
+ notice, this list of conditions and the following disclaimer.
1169
+ * Redistributions in binary form must reproduce the above
1170
+ copyright notice, this list of conditions and the following
1171
+ disclaimer in the documentation and/or other materials provided
1172
+ with the distribution.
1173
+ * The name of the author may not be used to endorse or promote
1174
+ products derived from this software without specific prior
1175
+ written permission.
1176
+
1177
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1178
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1179
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1180
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1181
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1182
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1183
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1184
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1185
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1186
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1187
+ POSSIBILITY OF SUCH DAMAGE.
1188
+
1189
+ 8. Some of the cuBLAS library routines were derived from
1190
+ code developed by the University of Tennessee and are
1191
+ subject to the Modified Berkeley Software Distribution
1192
+ License as follows:
1193
+
1194
+ Copyright (c) 2010 The University of Tennessee.
1195
+
1196
+ All rights reserved.
1197
+
1198
+ Redistribution and use in source and binary forms, with or without
1199
+ modification, are permitted provided that the following conditions are
1200
+ met:
1201
+ * Redistributions of source code must retain the above copyright
1202
+ notice, this list of conditions and the following disclaimer.
1203
+ * Redistributions in binary form must reproduce the above
1204
+ copyright notice, this list of conditions and the following
1205
+ disclaimer listed in this license in the documentation and/or
1206
+ other materials provided with the distribution.
1207
+ * Neither the name of the copyright holders nor the names of its
1208
+ contributors may be used to endorse or promote products derived
1209
+ from this software without specific prior written permission.
1210
+
1211
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1212
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1213
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1214
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1215
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1216
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1217
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1218
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1219
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1220
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1221
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1222
+
1223
+ 9. Some of the cuBLAS library routines were written by or
1224
+ derived from code written by Jonathan Hogg and are subject
1225
+ to the Modified Berkeley Software Distribution License as
1226
+ follows:
1227
+
1228
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
1229
+
1230
+ All rights reserved.
1231
+
1232
+ Redistribution and use in source and binary forms, with or without
1233
+ modification, are permitted provided that the following conditions are
1234
+ met:
1235
+ * Redistributions of source code must retain the above copyright
1236
+ notice, this list of conditions and the following disclaimer.
1237
+ * Redistributions in binary form must reproduce the above
1238
+ copyright notice, this list of conditions and the following
1239
+ disclaimer in the documentation and/or other materials provided
1240
+ with the distribution.
1241
+ * Neither the name of the STFC nor the names of its contributors
1242
+ may be used to endorse or promote products derived from this
1243
+ software without specific prior written permission.
1244
+
1245
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1246
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1247
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1248
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
1249
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1250
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1251
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1252
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
1253
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
1254
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
1255
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1256
+
1257
+ 10. Some of the cuBLAS library routines were written by or
1258
+ derived from code written by Ahmad M. Abdelfattah, David
1259
+ Keyes, and Hatem Ltaief, and are subject to the Apache
1260
+ License, Version 2.0, as follows:
1261
+
1262
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
1263
+ Authors:
1264
+ Ahmad Abdelfattah ([email protected])
1265
+ David Keyes ([email protected])
1266
+ Hatem Ltaief ([email protected])
1267
+
1268
+ Redistribution and use in source and binary forms, with or without
1269
+ modification, are permitted provided that the following conditions
1270
+ are met:
1271
+
1272
+ * Redistributions of source code must retain the above copyright
1273
+ notice, this list of conditions and the following disclaimer.
1274
+ * Redistributions in binary form must reproduce the above copyright
1275
+ notice, this list of conditions and the following disclaimer in the
1276
+ documentation and/or other materials provided with the distribution.
1277
+ * Neither the name of the King Abdullah University of Science and
1278
+ Technology nor the names of its contributors may be used to endorse
1279
+ or promote products derived from this software without specific prior
1280
+ written permission.
1281
+
1282
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1283
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1284
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1285
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1286
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1287
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1288
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1289
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1290
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1291
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1292
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
1293
+
1294
+ 11. Some of the cuSPARSE library routines were written by or
1295
+ derived from code written by Li-Wen Chang and are subject
1296
+ to the NCSA Open Source License as follows:
1297
+
1298
+ Copyright (c) 2012, University of Illinois.
1299
+
1300
+ All rights reserved.
1301
+
1302
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
1303
+
1304
+ Permission is hereby granted, free of charge, to any person obtaining
1305
+ a copy of this software and associated documentation files (the
1306
+ "Software"), to deal with the Software without restriction, including
1307
+ without limitation the rights to use, copy, modify, merge, publish,
1308
+ distribute, sublicense, and/or sell copies of the Software, and to
1309
+ permit persons to whom the Software is furnished to do so, subject to
1310
+ the following conditions:
1311
+ * Redistributions of source code must retain the above copyright
1312
+ notice, this list of conditions and the following disclaimer.
1313
+ * Redistributions in binary form must reproduce the above
1314
+ copyright notice, this list of conditions and the following
1315
+ disclaimers in the documentation and/or other materials provided
1316
+ with the distribution.
1317
+ * Neither the names of IMPACT Group, University of Illinois, nor
1318
+ the names of its contributors may be used to endorse or promote
1319
+ products derived from this Software without specific prior
1320
+ written permission.
1321
+
1322
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1323
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1324
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1325
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
1326
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1327
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
1328
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
1329
+ SOFTWARE.
1330
+
1331
+ 12. Some of the cuRAND library routines were written by or
1332
+ derived from code written by Mutsuo Saito and Makoto
1333
+ Matsumoto and are subject to the following license:
1334
+
1335
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
1336
+ University. All rights reserved.
1337
+
1338
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
1339
+ University and University of Tokyo. All rights reserved.
1340
+
1341
+ Redistribution and use in source and binary forms, with or without
1342
+ modification, are permitted provided that the following conditions are
1343
+ met:
1344
+ * Redistributions of source code must retain the above copyright
1345
+ notice, this list of conditions and the following disclaimer.
1346
+ * Redistributions in binary form must reproduce the above
1347
+ copyright notice, this list of conditions and the following
1348
+ disclaimer in the documentation and/or other materials provided
1349
+ with the distribution.
1350
+ * Neither the name of the Hiroshima University nor the names of
1351
+ its contributors may be used to endorse or promote products
1352
+ derived from this software without specific prior written
1353
+ permission.
1354
+
1355
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1356
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1357
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1358
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1359
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1360
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1361
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1362
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1363
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1364
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1365
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1366
+
1367
+ 13. Some of the cuRAND library routines were derived from
1368
+ code developed by D. E. Shaw Research and are subject to
1369
+ the following license:
1370
+
1371
+ Copyright 2010-2011, D. E. Shaw Research.
1372
+
1373
+ All rights reserved.
1374
+
1375
+ Redistribution and use in source and binary forms, with or without
1376
+ modification, are permitted provided that the following conditions are
1377
+ met:
1378
+ * Redistributions of source code must retain the above copyright
1379
+ notice, this list of conditions, and the following disclaimer.
1380
+ * Redistributions in binary form must reproduce the above
1381
+ copyright notice, this list of conditions, and the following
1382
+ disclaimer in the documentation and/or other materials provided
1383
+ with the distribution.
1384
+ * Neither the name of D. E. Shaw Research nor the names of its
1385
+ contributors may be used to endorse or promote products derived
1386
+ from this software without specific prior written permission.
1387
+
1388
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1389
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1390
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1391
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1392
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1393
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1394
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1395
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1396
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1397
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1398
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1399
+
1400
+ 14. Some of the Math library routines were written by or
1401
+ derived from code developed by Norbert Juffa and are
1402
+ subject to the following license:
1403
+
1404
+ Copyright (c) 2015-2017, Norbert Juffa
1405
+ All rights reserved.
1406
+
1407
+ Redistribution and use in source and binary forms, with or without
1408
+ modification, are permitted provided that the following conditions
1409
+ are met:
1410
+
1411
+ 1. Redistributions of source code must retain the above copyright
1412
+ notice, this list of conditions and the following disclaimer.
1413
+
1414
+ 2. Redistributions in binary form must reproduce the above copyright
1415
+ notice, this list of conditions and the following disclaimer in the
1416
+ documentation and/or other materials provided with the distribution.
1417
+
1418
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1419
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1420
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1421
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1422
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1423
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1424
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1425
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1426
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1427
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1428
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429
+
1430
+ 15. Licensee's use of the lz4 third party component is
1431
+ subject to the following terms and conditions:
1432
+
1433
+ Copyright (C) 2011-2013, Yann Collet.
1434
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1435
+
1436
+ Redistribution and use in source and binary forms, with or without
1437
+ modification, are permitted provided that the following conditions are
1438
+ met:
1439
+
1440
+ * Redistributions of source code must retain the above copyright
1441
+ notice, this list of conditions and the following disclaimer.
1442
+ * Redistributions in binary form must reproduce the above
1443
+ copyright notice, this list of conditions and the following disclaimer
1444
+ in the documentation and/or other materials provided with the
1445
+ distribution.
1446
+
1447
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1448
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1449
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1450
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1451
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1452
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1453
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1454
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1455
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1456
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1457
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1458
+
1459
+ 16. The NPP library uses code from the Boost Math Toolkit,
1460
+ and is subject to the following license:
1461
+
1462
+ Boost Software License - Version 1.0 - August 17th, 2003
1463
+ . . . .
1464
+
1465
+ Permission is hereby granted, free of charge, to any person or
1466
+ organization obtaining a copy of the software and accompanying
1467
+ documentation covered by this license (the "Software") to use,
1468
+ reproduce, display, distribute, execute, and transmit the Software,
1469
+ and to prepare derivative works of the Software, and to permit
1470
+ third-parties to whom the Software is furnished to do so, all
1471
+ subject to the following:
1472
+
1473
+ The copyright notices in the Software and this entire statement,
1474
+ including the above license grant, this restriction and the following
1475
+ disclaimer, must be included in all copies of the Software, in whole
1476
+ or in part, and all derivative works of the Software, unless such
1477
+ copies or derivative works are solely in the form of machine-executable
1478
+ object code generated by a source language processor.
1479
+
1480
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1481
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1482
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
1483
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
1484
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
1485
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
1486
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1487
+ OTHER DEALINGS IN THE SOFTWARE.
1488
+
1489
+ 17. Portions of the Nsight Eclipse Edition is subject to the
1490
+ following license:
1491
+
1492
+ The Eclipse Foundation makes available all content in this plug-in
1493
+ ("Content"). Unless otherwise indicated below, the Content is provided
1494
+ to you under the terms and conditions of the Eclipse Public License
1495
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
1496
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
1497
+ will mean the Content.
1498
+
1499
+ If you did not receive this Content directly from the Eclipse
1500
+ Foundation, the Content is being redistributed by another party
1501
+ ("Redistributor") and different terms and conditions may apply to your
1502
+ use of any object code in the Content. Check the Redistributor's
1503
+ license that was provided with the Content. If no such license exists,
1504
+ contact the Redistributor. Unless otherwise indicated below, the terms
1505
+ and conditions of the EPL still apply to any source code in the
1506
+ Content and such source code may be obtained at http://www.eclipse.org.
1507
+
1508
+ 18. Some of the cuBLAS library routines uses code from
1509
+ OpenAI, which is subject to the following license:
1510
+
1511
+ License URL
1512
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
1513
+
1514
+ License Text
1515
+ The MIT License
1516
+
1517
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
1518
+
1519
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1520
+ of this software and associated documentation files (the "Software"), to deal
1521
+ in the Software without restriction, including without limitation the rights
1522
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1523
+ copies of the Software, and to permit persons to whom the Software is
1524
+ furnished to do so, subject to the following conditions:
1525
+
1526
+ The above copyright notice and this permission notice shall be included in
1527
+ all copies or substantial portions of the Software.
1528
+
1529
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1530
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1531
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1532
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1533
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1534
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1535
+ THE SOFTWARE.
1536
+
1537
+ 19. Licensee's use of the Visual Studio Setup Configuration
1538
+ Samples is subject to the following license:
1539
+
1540
+ The MIT License (MIT)
1541
+ Copyright (C) Microsoft Corporation. All rights reserved.
1542
+
1543
+ Permission is hereby granted, free of charge, to any person
1544
+ obtaining a copy of this software and associated documentation
1545
+ files (the "Software"), to deal in the Software without restriction,
1546
+ including without limitation the rights to use, copy, modify, merge,
1547
+ publish, distribute, sublicense, and/or sell copies of the Software,
1548
+ and to permit persons to whom the Software is furnished to do so,
1549
+ subject to the following conditions:
1550
+
1551
+ The above copyright notice and this permission notice shall be included
1552
+ in all copies or substantial portions of the Software.
1553
+
1554
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1555
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1556
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1557
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1558
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1559
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1560
+
1561
+ 20. Licensee's use of linmath.h header for CPU functions for
1562
+ GL vector/matrix operations from lunarG is subject to the
1563
+ Apache License Version 2.0.
1564
+
1565
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
1566
+ subject to the MIT license .
1567
+
1568
+ -----------------
llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: nvidia-cusparse-cu12
3
+ Version: 12.1.0.106
4
+ Summary: CUSPARSE native runtime libraries
5
+ Home-page: https://developer.nvidia.com/cuda-zone
6
+ Author: Nvidia CUDA Installer Team
7
+ Author-email: [email protected]
8
+ License: NVIDIA Proprietary Software
9
+ Keywords: cuda,nvidia,runtime,machine learning,deep learning
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: Other/Proprietary License
15
+ Classifier: Natural Language :: English
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.5
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Topic :: Scientific/Engineering
26
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
27
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Software Development
29
+ Classifier: Topic :: Software Development :: Libraries
30
+ Classifier: Operating System :: Microsoft :: Windows
31
+ Classifier: Operating System :: POSIX :: Linux
32
+ Requires-Python: >=3
33
+ License-File: License.txt
34
+ Requires-Dist: nvidia-nvjitlink-cu12
35
+
36
+ CUSPARSE native runtime libraries
llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-310.pyc,,
3
+ nvidia/cusparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/cusparse/__pycache__/__init__.cpython-310.pyc,,
5
+ nvidia/cusparse/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc,,
7
+ nvidia/cusparse/include/cusparse.h,sha256=yhV9iTcEW9XEyhaJmX4iddh_cMb8sfNAy6qva5ae4qw,287290
8
+ nvidia/cusparse/include/cusparse_v2.h,sha256=jkH2A9hYc-TEF0vuQ_SurbhPNEHkYGUIRuxKXhFAqnw,2587
9
+ nvidia/cusparse/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc,,
11
+ nvidia/cusparse/lib/libcusparse.so.12,sha256=UARmovVZ3mIqcbuSDT0pI-aRNSRXR6J0LuE-3_C6YIU,264876688
12
+ nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
13
+ nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
14
+ nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA,sha256=XpBtE4L1lFCx7gDu7Klx9dijNWQW26PS3fcOGjNIsXg,1550
15
+ nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD,,
16
+ nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
17
+ nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2003-2019 Stuart Bishop <[email protected]>
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a
4
+ copy of this software and associated documentation files (the "Software"),
5
+ to deal in the Software without restriction, including without limitation
6
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
+ and/or sell copies of the Software, and to permit persons to whom the
8
+ Software is furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in
11
+ all copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19
+ DEALINGS IN THE SOFTWARE.
llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/METADATA ADDED
@@ -0,0 +1,649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: pytz
3
+ Version: 2024.1
4
+ Summary: World timezone definitions, modern and historical
5
+ Home-page: http://pythonhosted.org/pytz
6
+ Author: Stuart Bishop
7
+ Author-email: [email protected]
8
+ Maintainer: Stuart Bishop
9
+ Maintainer-email: [email protected]
10
+ License: MIT
11
+ Download-URL: https://pypi.org/project/pytz/
12
+ Keywords: timezone,tzinfo,datetime,olson,time
13
+ Platform: Independent
14
+ Classifier: Development Status :: 6 - Mature
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: License :: OSI Approved :: MIT License
17
+ Classifier: Natural Language :: English
18
+ Classifier: Operating System :: OS Independent
19
+ Classifier: Programming Language :: Python
20
+ Classifier: Programming Language :: Python :: 2
21
+ Classifier: Programming Language :: Python :: 2.4
22
+ Classifier: Programming Language :: Python :: 2.5
23
+ Classifier: Programming Language :: Python :: 2.6
24
+ Classifier: Programming Language :: Python :: 2.7
25
+ Classifier: Programming Language :: Python :: 3
26
+ Classifier: Programming Language :: Python :: 3.1
27
+ Classifier: Programming Language :: Python :: 3.2
28
+ Classifier: Programming Language :: Python :: 3.3
29
+ Classifier: Programming Language :: Python :: 3.4
30
+ Classifier: Programming Language :: Python :: 3.5
31
+ Classifier: Programming Language :: Python :: 3.6
32
+ Classifier: Programming Language :: Python :: 3.7
33
+ Classifier: Programming Language :: Python :: 3.8
34
+ Classifier: Programming Language :: Python :: 3.9
35
+ Classifier: Programming Language :: Python :: 3.10
36
+ Classifier: Programming Language :: Python :: 3.11
37
+ Classifier: Programming Language :: Python :: 3.12
38
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
39
+ License-File: LICENSE.txt
40
+
41
+ pytz - World Timezone Definitions for Python
42
+ ============================================
43
+
44
+ :Author: Stuart Bishop <[email protected]>
45
+
46
+ Introduction
47
+ ~~~~~~~~~~~~
48
+
49
+ pytz brings the Olson tz database into Python. This library allows
50
+ accurate and cross platform timezone calculations using Python 2.4
51
+ or higher. It also solves the issue of ambiguous times at the end
52
+ of daylight saving time, which you can read more about in the Python
53
+ Library Reference (``datetime.tzinfo``).
54
+
55
+ Almost all of the Olson timezones are supported.
56
+
57
+ .. note::
58
+
59
+ Projects using Python 3.9 or later should be using the support
60
+ now included as part of the standard library, and third party
61
+ packages work with it such as `tzdata <https://pypi.org/project/tzdata/>`_.
62
+ pytz offers no advantages beyond backwards compatibility with
63
+ code written for earlier versions of Python.
64
+
65
+ .. note::
66
+
67
+ This library differs from the documented Python API for
68
+ tzinfo implementations; if you want to create local wallclock
69
+ times you need to use the ``localize()`` method documented in this
70
+ document. In addition, if you perform date arithmetic on local
71
+ times that cross DST boundaries, the result may be in an incorrect
72
+ timezone (ie. subtract 1 minute from 2002-10-27 1:00 EST and you get
73
+ 2002-10-27 0:59 EST instead of the correct 2002-10-27 1:59 EDT). A
74
+ ``normalize()`` method is provided to correct this. Unfortunately these
75
+ issues cannot be resolved without modifying the Python datetime
76
+ implementation (see PEP-431).
77
+
78
+
79
+ Installation
80
+ ~~~~~~~~~~~~
81
+
82
+ This package can either be installed using ``pip`` or from a tarball using the
83
+ standard Python distutils.
84
+
85
+ If you are installing using ``pip``, you don't need to download anything as the
86
+ latest version will be downloaded for you from PyPI::
87
+
88
+ pip install pytz
89
+
90
+ If you are installing from a tarball, run the following command as an
91
+ administrative user::
92
+
93
+ python setup.py install
94
+
95
+
96
+ pytz for Enterprise
97
+ ~~~~~~~~~~~~~~~~~~~
98
+
99
+ Available as part of the Tidelift Subscription.
100
+
101
+ The maintainers of pytz and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. `Learn more. <https://tidelift.com/subscription/pkg/pypi-pytz?utm_source=pypi-pytz&utm_medium=referral&utm_campaign=enterprise&utm_term=repo>`_.
102
+
103
+
104
+ Example & Usage
105
+ ~~~~~~~~~~~~~~~
106
+
107
+ Localized times and date arithmetic
108
+ -----------------------------------
109
+
110
+ >>> from datetime import datetime, timedelta
111
+ >>> from pytz import timezone
112
+ >>> import pytz
113
+ >>> utc = pytz.utc
114
+ >>> utc.zone
115
+ 'UTC'
116
+ >>> eastern = timezone('US/Eastern')
117
+ >>> eastern.zone
118
+ 'US/Eastern'
119
+ >>> amsterdam = timezone('Europe/Amsterdam')
120
+ >>> fmt = '%Y-%m-%d %H:%M:%S %Z%z'
121
+
122
+ This library only supports two ways of building a localized time. The
123
+ first is to use the ``localize()`` method provided by the pytz library.
124
+ This is used to localize a naive datetime (datetime with no timezone
125
+ information):
126
+
127
+ >>> loc_dt = eastern.localize(datetime(2002, 10, 27, 6, 0, 0))
128
+ >>> print(loc_dt.strftime(fmt))
129
+ 2002-10-27 06:00:00 EST-0500
130
+
131
+ The second way of building a localized time is by converting an existing
132
+ localized time using the standard ``astimezone()`` method:
133
+
134
+ >>> ams_dt = loc_dt.astimezone(amsterdam)
135
+ >>> ams_dt.strftime(fmt)
136
+ '2002-10-27 12:00:00 CET+0100'
137
+
138
+ Unfortunately using the tzinfo argument of the standard datetime
139
+ constructors ''does not work'' with pytz for many timezones.
140
+
141
+ >>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=amsterdam).strftime(fmt) # /!\ Does not work this way!
142
+ '2002-10-27 12:00:00 LMT+0018'
143
+
144
+ It is safe for timezones without daylight saving transitions though, such
145
+ as UTC:
146
+
147
+ >>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=pytz.utc).strftime(fmt) # /!\ Not recommended except for UTC
148
+ '2002-10-27 12:00:00 UTC+0000'
149
+
150
+ The preferred way of dealing with times is to always work in UTC,
151
+ converting to localtime only when generating output to be read
152
+ by humans.
153
+
154
+ >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
155
+ >>> loc_dt = utc_dt.astimezone(eastern)
156
+ >>> loc_dt.strftime(fmt)
157
+ '2002-10-27 01:00:00 EST-0500'
158
+
159
+ This library also allows you to do date arithmetic using local
160
+ times, although it is more complicated than working in UTC as you
161
+ need to use the ``normalize()`` method to handle daylight saving time
162
+ and other timezone transitions. In this example, ``loc_dt`` is set
163
+ to the instant when daylight saving time ends in the US/Eastern
164
+ timezone.
165
+
166
+ >>> before = loc_dt - timedelta(minutes=10)
167
+ >>> before.strftime(fmt)
168
+ '2002-10-27 00:50:00 EST-0500'
169
+ >>> eastern.normalize(before).strftime(fmt)
170
+ '2002-10-27 01:50:00 EDT-0400'
171
+ >>> after = eastern.normalize(before + timedelta(minutes=20))
172
+ >>> after.strftime(fmt)
173
+ '2002-10-27 01:10:00 EST-0500'
174
+
175
+ Creating local times is also tricky, and the reason why working with
176
+ local times is not recommended. Unfortunately, you cannot just pass
177
+ a ``tzinfo`` argument when constructing a datetime (see the next
178
+ section for more details)
179
+
180
+ >>> dt = datetime(2002, 10, 27, 1, 30, 0)
181
+ >>> dt1 = eastern.localize(dt, is_dst=True)
182
+ >>> dt1.strftime(fmt)
183
+ '2002-10-27 01:30:00 EDT-0400'
184
+ >>> dt2 = eastern.localize(dt, is_dst=False)
185
+ >>> dt2.strftime(fmt)
186
+ '2002-10-27 01:30:00 EST-0500'
187
+
188
+ Converting between timezones is more easily done, using the
189
+ standard astimezone method.
190
+
191
+ >>> utc_dt = datetime.fromtimestamp(1143408899, tz=utc)
192
+ >>> utc_dt.strftime(fmt)
193
+ '2006-03-26 21:34:59 UTC+0000'
194
+ >>> au_tz = timezone('Australia/Sydney')
195
+ >>> au_dt = utc_dt.astimezone(au_tz)
196
+ >>> au_dt.strftime(fmt)
197
+ '2006-03-27 08:34:59 AEDT+1100'
198
+ >>> utc_dt2 = au_dt.astimezone(utc)
199
+ >>> utc_dt2.strftime(fmt)
200
+ '2006-03-26 21:34:59 UTC+0000'
201
+ >>> utc_dt == utc_dt2
202
+ True
203
+
204
+ You can take shortcuts when dealing with the UTC side of timezone
205
+ conversions. ``normalize()`` and ``localize()`` are not really
206
+ necessary when there are no daylight saving time transitions to
207
+ deal with.
208
+
209
+ >>> utc_dt = datetime.fromtimestamp(1143408899, tz=utc)
210
+ >>> utc_dt.strftime(fmt)
211
+ '2006-03-26 21:34:59 UTC+0000'
212
+ >>> au_tz = timezone('Australia/Sydney')
213
+ >>> au_dt = au_tz.normalize(utc_dt.astimezone(au_tz))
214
+ >>> au_dt.strftime(fmt)
215
+ '2006-03-27 08:34:59 AEDT+1100'
216
+ >>> utc_dt2 = au_dt.astimezone(utc)
217
+ >>> utc_dt2.strftime(fmt)
218
+ '2006-03-26 21:34:59 UTC+0000'
219
+
220
+
221
+ ``tzinfo`` API
222
+ --------------
223
+
224
+ The ``tzinfo`` instances returned by the ``timezone()`` function have
225
+ been extended to cope with ambiguous times by adding an ``is_dst``
226
+ parameter to the ``utcoffset()``, ``dst()`` && ``tzname()`` methods.
227
+
228
+ >>> tz = timezone('America/St_Johns')
229
+
230
+ >>> normal = datetime(2009, 9, 1)
231
+ >>> ambiguous = datetime(2009, 10, 31, 23, 30)
232
+
233
+ The ``is_dst`` parameter is ignored for most timestamps. It is only used
234
+ during DST transition ambiguous periods to resolve that ambiguity.
235
+
236
+ >>> print(tz.utcoffset(normal, is_dst=True))
237
+ -1 day, 21:30:00
238
+ >>> print(tz.dst(normal, is_dst=True))
239
+ 1:00:00
240
+ >>> tz.tzname(normal, is_dst=True)
241
+ 'NDT'
242
+
243
+ >>> print(tz.utcoffset(ambiguous, is_dst=True))
244
+ -1 day, 21:30:00
245
+ >>> print(tz.dst(ambiguous, is_dst=True))
246
+ 1:00:00
247
+ >>> tz.tzname(ambiguous, is_dst=True)
248
+ 'NDT'
249
+
250
+ >>> print(tz.utcoffset(normal, is_dst=False))
251
+ -1 day, 21:30:00
252
+ >>> tz.dst(normal, is_dst=False).seconds
253
+ 3600
254
+ >>> tz.tzname(normal, is_dst=False)
255
+ 'NDT'
256
+
257
+ >>> print(tz.utcoffset(ambiguous, is_dst=False))
258
+ -1 day, 20:30:00
259
+ >>> tz.dst(ambiguous, is_dst=False)
260
+ datetime.timedelta(0)
261
+ >>> tz.tzname(ambiguous, is_dst=False)
262
+ 'NST'
263
+
264
+ If ``is_dst`` is not specified, ambiguous timestamps will raise
265
+ an ``pytz.exceptions.AmbiguousTimeError`` exception.
266
+
267
+ >>> print(tz.utcoffset(normal))
268
+ -1 day, 21:30:00
269
+ >>> print(tz.dst(normal))
270
+ 1:00:00
271
+ >>> tz.tzname(normal)
272
+ 'NDT'
273
+
274
+ >>> import pytz.exceptions
275
+ >>> try:
276
+ ... tz.utcoffset(ambiguous)
277
+ ... except pytz.exceptions.AmbiguousTimeError:
278
+ ... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous)
279
+ pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00
280
+ >>> try:
281
+ ... tz.dst(ambiguous)
282
+ ... except pytz.exceptions.AmbiguousTimeError:
283
+ ... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous)
284
+ pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00
285
+ >>> try:
286
+ ... tz.tzname(ambiguous)
287
+ ... except pytz.exceptions.AmbiguousTimeError:
288
+ ... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous)
289
+ pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00
290
+
291
+
292
+ Problems with Localtime
293
+ ~~~~~~~~~~~~~~~~~~~~~~~
294
+
295
+ The major problem we have to deal with is that certain datetimes
296
+ may occur twice in a year. For example, in the US/Eastern timezone
297
+ on the last Sunday morning in October, the following sequence
298
+ happens:
299
+
300
+ - 01:00 EDT occurs
301
+ - 1 hour later, instead of 2:00am the clock is turned back 1 hour
302
+ and 01:00 happens again (this time 01:00 EST)
303
+
304
+ In fact, every instant between 01:00 and 02:00 occurs twice. This means
305
+ that if you try and create a time in the 'US/Eastern' timezone
306
+ the standard datetime syntax, there is no way to specify if you meant
307
+ before of after the end-of-daylight-saving-time transition. Using the
308
+ pytz custom syntax, the best you can do is make an educated guess:
309
+
310
+ >>> loc_dt = eastern.localize(datetime(2002, 10, 27, 1, 30, 00))
311
+ >>> loc_dt.strftime(fmt)
312
+ '2002-10-27 01:30:00 EST-0500'
313
+
314
+ As you can see, the system has chosen one for you and there is a 50%
315
+ chance of it being out by one hour. For some applications, this does
316
+ not matter. However, if you are trying to schedule meetings with people
317
+ in different timezones or analyze log files it is not acceptable.
318
+
319
+ The best and simplest solution is to stick with using UTC. The pytz
320
+ package encourages using UTC for internal timezone representation by
321
+ including a special UTC implementation based on the standard Python
322
+ reference implementation in the Python documentation.
323
+
324
+ The UTC timezone unpickles to be the same instance, and pickles to a
325
+ smaller size than other pytz tzinfo instances. The UTC implementation
326
+ can be obtained as pytz.utc, pytz.UTC, or pytz.timezone('UTC').
327
+
328
+ >>> import pickle, pytz
329
+ >>> dt = datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
330
+ >>> naive = dt.replace(tzinfo=None)
331
+ >>> p = pickle.dumps(dt, 1)
332
+ >>> naive_p = pickle.dumps(naive, 1)
333
+ >>> len(p) - len(naive_p)
334
+ 17
335
+ >>> new = pickle.loads(p)
336
+ >>> new == dt
337
+ True
338
+ >>> new is dt
339
+ False
340
+ >>> new.tzinfo is dt.tzinfo
341
+ True
342
+ >>> pytz.utc is pytz.UTC is pytz.timezone('UTC')
343
+ True
344
+
345
+ Note that some other timezones are commonly thought of as the same (GMT,
346
+ Greenwich, Universal, etc.). The definition of UTC is distinct from these
347
+ other timezones, and they are not equivalent. For this reason, they will
348
+ not compare the same in Python.
349
+
350
+ >>> utc == pytz.timezone('GMT')
351
+ False
352
+
353
+ See the section `What is UTC`_, below.
354
+
355
+ If you insist on working with local times, this library provides a
356
+ facility for constructing them unambiguously:
357
+
358
+ >>> loc_dt = datetime(2002, 10, 27, 1, 30, 00)
359
+ >>> est_dt = eastern.localize(loc_dt, is_dst=True)
360
+ >>> edt_dt = eastern.localize(loc_dt, is_dst=False)
361
+ >>> print(est_dt.strftime(fmt) + ' / ' + edt_dt.strftime(fmt))
362
+ 2002-10-27 01:30:00 EDT-0400 / 2002-10-27 01:30:00 EST-0500
363
+
364
+ If you pass None as the is_dst flag to localize(), pytz will refuse to
365
+ guess and raise exceptions if you try to build ambiguous or non-existent
366
+ times.
367
+
368
+ For example, 1:30am on 27th Oct 2002 happened twice in the US/Eastern
369
+ timezone when the clocks where put back at the end of Daylight Saving
370
+ Time:
371
+
372
+ >>> dt = datetime(2002, 10, 27, 1, 30, 00)
373
+ >>> try:
374
+ ... eastern.localize(dt, is_dst=None)
375
+ ... except pytz.exceptions.AmbiguousTimeError:
376
+ ... print('pytz.exceptions.AmbiguousTimeError: %s' % dt)
377
+ pytz.exceptions.AmbiguousTimeError: 2002-10-27 01:30:00
378
+
379
+ Similarly, 2:30am on 7th April 2002 never happened at all in the
380
+ US/Eastern timezone, as the clocks where put forward at 2:00am skipping
381
+ the entire hour:
382
+
383
+ >>> dt = datetime(2002, 4, 7, 2, 30, 00)
384
+ >>> try:
385
+ ... eastern.localize(dt, is_dst=None)
386
+ ... except pytz.exceptions.NonExistentTimeError:
387
+ ... print('pytz.exceptions.NonExistentTimeError: %s' % dt)
388
+ pytz.exceptions.NonExistentTimeError: 2002-04-07 02:30:00
389
+
390
+ Both of these exceptions share a common base class to make error handling
391
+ easier:
392
+
393
+ >>> isinstance(pytz.AmbiguousTimeError(), pytz.InvalidTimeError)
394
+ True
395
+ >>> isinstance(pytz.NonExistentTimeError(), pytz.InvalidTimeError)
396
+ True
397
+
398
+
399
+ A special case is where countries change their timezone definitions
400
+ with no daylight savings time switch. For example, in 1915 Warsaw
401
+ switched from Warsaw time to Central European time with no daylight savings
402
+ transition. So at the stroke of midnight on August 5th 1915 the clocks
403
+ were wound back 24 minutes creating an ambiguous time period that cannot
404
+ be specified without referring to the timezone abbreviation or the
405
+ actual UTC offset. In this case midnight happened twice, neither time
406
+ during a daylight saving time period. pytz handles this transition by
407
+ treating the ambiguous period before the switch as daylight savings
408
+ time, and the ambiguous period after as standard time.
409
+
410
+
411
+ >>> warsaw = pytz.timezone('Europe/Warsaw')
412
+ >>> amb_dt1 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=True)
413
+ >>> amb_dt1.strftime(fmt)
414
+ '1915-08-04 23:59:59 WMT+0124'
415
+ >>> amb_dt2 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=False)
416
+ >>> amb_dt2.strftime(fmt)
417
+ '1915-08-04 23:59:59 CET+0100'
418
+ >>> switch_dt = warsaw.localize(datetime(1915, 8, 5, 00, 00, 00), is_dst=False)
419
+ >>> switch_dt.strftime(fmt)
420
+ '1915-08-05 00:00:00 CET+0100'
421
+ >>> str(switch_dt - amb_dt1)
422
+ '0:24:01'
423
+ >>> str(switch_dt - amb_dt2)
424
+ '0:00:01'
425
+
426
+ The best way of creating a time during an ambiguous time period is
427
+ by converting from another timezone such as UTC:
428
+
429
+ >>> utc_dt = datetime(1915, 8, 4, 22, 36, tzinfo=pytz.utc)
430
+ >>> utc_dt.astimezone(warsaw).strftime(fmt)
431
+ '1915-08-04 23:36:00 CET+0100'
432
+
433
+ The standard Python way of handling all these ambiguities is not to
434
+ handle them, such as demonstrated in this example using the US/Eastern
435
+ timezone definition from the Python documentation (Note that this
436
+ implementation only works for dates between 1987 and 2006 - it is
437
+ included for tests only!):
438
+
439
+ >>> from pytz.reference import Eastern # pytz.reference only for tests
440
+ >>> dt = datetime(2002, 10, 27, 0, 30, tzinfo=Eastern)
441
+ >>> str(dt)
442
+ '2002-10-27 00:30:00-04:00'
443
+ >>> str(dt + timedelta(hours=1))
444
+ '2002-10-27 01:30:00-05:00'
445
+ >>> str(dt + timedelta(hours=2))
446
+ '2002-10-27 02:30:00-05:00'
447
+ >>> str(dt + timedelta(hours=3))
448
+ '2002-10-27 03:30:00-05:00'
449
+
450
+ Notice the first two results? At first glance you might think they are
451
+ correct, but taking the UTC offset into account you find that they are
452
+ actually two hours appart instead of the 1 hour we asked for.
453
+
454
+ >>> from pytz.reference import UTC # pytz.reference only for tests
455
+ >>> str(dt.astimezone(UTC))
456
+ '2002-10-27 04:30:00+00:00'
457
+ >>> str((dt + timedelta(hours=1)).astimezone(UTC))
458
+ '2002-10-27 06:30:00+00:00'
459
+
460
+
461
+ Country Information
462
+ ~~~~~~~~~~~~~~~~~~~
463
+
464
+ A mechanism is provided to access the timezones commonly in use
465
+ for a particular country, looked up using the ISO 3166 country code.
466
+ It returns a list of strings that can be used to retrieve the relevant
467
+ tzinfo instance using ``pytz.timezone()``:
468
+
469
+ >>> print(' '.join(pytz.country_timezones['nz']))
470
+ Pacific/Auckland Pacific/Chatham
471
+
472
+ The Olson database comes with a ISO 3166 country code to English country
473
+ name mapping that pytz exposes as a dictionary:
474
+
475
+ >>> print(pytz.country_names['nz'])
476
+ New Zealand
477
+
478
+
479
+ What is UTC
480
+ ~~~~~~~~~~~
481
+
482
+ 'UTC' is `Coordinated Universal Time`_. It is a successor to, but distinct
483
+ from, Greenwich Mean Time (GMT) and the various definitions of Universal
484
+ Time. UTC is now the worldwide standard for regulating clocks and time
485
+ measurement.
486
+
487
+ All other timezones are defined relative to UTC, and include offsets like
488
+ UTC+0800 - hours to add or subtract from UTC to derive the local time. No
489
+ daylight saving time occurs in UTC, making it a useful timezone to perform
490
+ date arithmetic without worrying about the confusion and ambiguities caused
491
+ by daylight saving time transitions, your country changing its timezone, or
492
+ mobile computers that roam through multiple timezones.
493
+
494
+ .. _Coordinated Universal Time: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
495
+
496
+
497
+ Helpers
498
+ ~~~~~~~
499
+
500
+ There are two lists of timezones provided.
501
+
502
+ ``all_timezones`` is the exhaustive list of the timezone names that can
503
+ be used.
504
+
505
+ >>> from pytz import all_timezones
506
+ >>> len(all_timezones) >= 500
507
+ True
508
+ >>> 'Etc/Greenwich' in all_timezones
509
+ True
510
+
511
+ ``common_timezones`` is a list of useful, current timezones. It doesn't
512
+ contain deprecated zones or historical zones, except for a few I've
513
+ deemed in common usage, such as US/Eastern (open a bug report if you
514
+ think other timezones are deserving of being included here). It is also
515
+ a sequence of strings.
516
+
517
+ >>> from pytz import common_timezones
518
+ >>> len(common_timezones) < len(all_timezones)
519
+ True
520
+ >>> 'Etc/Greenwich' in common_timezones
521
+ False
522
+ >>> 'Australia/Melbourne' in common_timezones
523
+ True
524
+ >>> 'US/Eastern' in common_timezones
525
+ True
526
+ >>> 'Canada/Eastern' in common_timezones
527
+ True
528
+ >>> 'Australia/Yancowinna' in all_timezones
529
+ True
530
+ >>> 'Australia/Yancowinna' in common_timezones
531
+ False
532
+
533
+ Both ``common_timezones`` and ``all_timezones`` are alphabetically
534
+ sorted:
535
+
536
+ >>> common_timezones_dupe = common_timezones[:]
537
+ >>> common_timezones_dupe.sort()
538
+ >>> common_timezones == common_timezones_dupe
539
+ True
540
+ >>> all_timezones_dupe = all_timezones[:]
541
+ >>> all_timezones_dupe.sort()
542
+ >>> all_timezones == all_timezones_dupe
543
+ True
544
+
545
+ ``all_timezones`` and ``common_timezones`` are also available as sets.
546
+
547
+ >>> from pytz import all_timezones_set, common_timezones_set
548
+ >>> 'US/Eastern' in all_timezones_set
549
+ True
550
+ >>> 'US/Eastern' in common_timezones_set
551
+ True
552
+ >>> 'Australia/Victoria' in common_timezones_set
553
+ False
554
+
555
+ You can also retrieve lists of timezones used by particular countries
556
+ using the ``country_timezones()`` function. It requires an ISO-3166
557
+ two letter country code.
558
+
559
+ >>> from pytz import country_timezones
560
+ >>> print(' '.join(country_timezones('ch')))
561
+ Europe/Zurich
562
+ >>> print(' '.join(country_timezones('CH')))
563
+ Europe/Zurich
564
+
565
+
566
+ Internationalization - i18n/l10n
567
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
568
+
569
+ Pytz is an interface to the IANA database, which uses ASCII names. The `Unicode Consortium's Unicode Locales (CLDR) <http://cldr.unicode.org>`_
570
+ project provides translations. Python packages such as
571
+ `Babel <https://babel.pocoo.org/en/latest/api/dates.html#timezone-functionality>`_
572
+ and Thomas Khyn's `l18n <https://pypi.org/project/l18n/>`_ package can be used
573
+ to access these translations from Python.
574
+
575
+
576
+ License
577
+ ~~~~~~~
578
+
579
+ MIT license.
580
+
581
+ This code is also available as part of Zope 3 under the Zope Public
582
+ License, Version 2.1 (ZPL).
583
+
584
+ I'm happy to relicense this code if necessary for inclusion in other
585
+ open source projects.
586
+
587
+
588
+ Latest Versions
589
+ ~~~~~~~~~~~~~~~
590
+
591
+ This package will be updated after releases of the Olson timezone
592
+ database. The latest version can be downloaded from the `Python Package
593
+ Index <https://pypi.org/project/pytz/>`_. The code that is used
594
+ to generate this distribution is hosted on Github and available
595
+ using git::
596
+
597
+ git clone https://github.com/stub42/pytz.git
598
+
599
+ Announcements of new releases are made on
600
+ `Launchpad <https://launchpad.net/pytz>`_, and the
601
+ `Atom feed <http://feeds.launchpad.net/pytz/announcements.atom>`_
602
+ hosted there.
603
+
604
+
605
+ Bugs, Feature Requests & Patches
606
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
607
+
608
+ Bugs should be reported on `Github <https://github.com/stub42/pytz/issues>`_.
609
+ Feature requests are unlikely to be considered, and efforts instead directed
610
+ to timezone support now built into Python or packages that work with it.
611
+
612
+
613
+ Security Issues
614
+ ~~~~~~~~~~~~~~~
615
+
616
+ Reports about security issues can be made via `Tidelift <https://tidelift.com/security>`_.
617
+
618
+
619
+ Issues & Limitations
620
+ ~~~~~~~~~~~~~~~~~~~~
621
+
622
+ - This project is in maintenance mode. Projects using Python 3.9 or later
623
+ are best served by using the timezone functionaly now included in core
624
+ Python and packages that work with it such as `tzdata <https://pypi.org/project/tzdata/>`_.
625
+
626
+ - Offsets from UTC are rounded to the nearest whole minute, so timezones
627
+ such as Europe/Amsterdam pre 1937 will be up to 30 seconds out. This
628
+ was a limitation of the Python datetime library.
629
+
630
+ - If you think a timezone definition is incorrect, I probably can't fix
631
+ it. pytz is a direct translation of the Olson timezone database, and
632
+ changes to the timezone definitions need to be made to this source.
633
+ If you find errors they should be reported to the time zone mailing
634
+ list, linked from http://www.iana.org/time-zones.
635
+
636
+
637
+ Further Reading
638
+ ~~~~~~~~~~~~~~~
639
+
640
+ More info than you want to know about timezones:
641
+ https://data.iana.org/time-zones/tz-link.html
642
+
643
+
644
+ Contact
645
+ ~~~~~~~
646
+
647
+ Stuart Bishop <[email protected]>
648
+
649
+
llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/RECORD ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pytz-2024.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ pytz-2024.1.dist-info/LICENSE.txt,sha256=vosaN-vibFkqkPbA6zMQOn84POL010mMCvmlJpkKB7g,1088
3
+ pytz-2024.1.dist-info/METADATA,sha256=2mOz3YzpRCJtu0iklrKsUm8a8BmJglIL_qqGhhduPJk,22325
4
+ pytz-2024.1.dist-info/RECORD,,
5
+ pytz-2024.1.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110
6
+ pytz-2024.1.dist-info/top_level.txt,sha256=6xRYlt934v1yHb1JIrXgHyGxn3cqACvd-yE8ski_kcc,5
7
+ pytz-2024.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
8
+ pytz/__init__.py,sha256=RZJJJ1W2RyP9fllsMNO4w-yjJRpIazWJ9fvj5telYig,35101
9
+ pytz/__pycache__/__init__.cpython-310.pyc,,
10
+ pytz/__pycache__/exceptions.cpython-310.pyc,,
11
+ pytz/__pycache__/lazy.cpython-310.pyc,,
12
+ pytz/__pycache__/reference.cpython-310.pyc,,
13
+ pytz/__pycache__/tzfile.cpython-310.pyc,,
14
+ pytz/__pycache__/tzinfo.cpython-310.pyc,,
15
+ pytz/exceptions.py,sha256=434ZcuLlpLQY9mWoGq7zJMV1TyiYvVgpKBU1qZkbDjM,1571
16
+ pytz/lazy.py,sha256=toeR5uDWKBj6ezsUZ4elNP6CEMtK7CO2jS9A30nsFbo,5404
17
+ pytz/reference.py,sha256=zUtCki7JFEmrzrjNsfMD7YL0lWDxynKc1Ubo4iXSs74,3778
18
+ pytz/tzfile.py,sha256=K2y7pZs4vydpZVftrfAA_-hgw17y1Szc7z_QCse6udU,4723
19
+ pytz/tzinfo.py,sha256=XfaVOoO3KsCvtUYaCd0fvgBXWZ8tgevGYUoBh_uiE60,19340
20
+ pytz/zoneinfo/Africa/Abidjan,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
21
+ pytz/zoneinfo/Africa/Accra,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
22
+ pytz/zoneinfo/Africa/Addis_Ababa,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
23
+ pytz/zoneinfo/Africa/Algiers,sha256=vaFpjNVCwObnbfu82rOQzdJvN6nVgmpXpQ1aqzfzsqY,735
24
+ pytz/zoneinfo/Africa/Asmara,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
25
+ pytz/zoneinfo/Africa/Asmera,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
26
+ pytz/zoneinfo/Africa/Bamako,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
27
+ pytz/zoneinfo/Africa/Bangui,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
28
+ pytz/zoneinfo/Africa/Banjul,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
29
+ pytz/zoneinfo/Africa/Bissau,sha256=IjuxDP6EZiDHFvl_bHS6NN7sdRxLKXllooBC829poak,194
30
+ pytz/zoneinfo/Africa/Blantyre,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149
31
+ pytz/zoneinfo/Africa/Brazzaville,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
32
+ pytz/zoneinfo/Africa/Bujumbura,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149
33
+ pytz/zoneinfo/Africa/Cairo,sha256=Lft-GCLQhaSJm9VqUmsEFoHIS1Vhfa7pFJn9GZCpifs,2399
34
+ pytz/zoneinfo/Africa/Casablanca,sha256=4RqVbw_F3ZucopIC2ivAJ8WDwj5wRODAB67tBpdXcgA,2429
35
+ pytz/zoneinfo/Africa/Ceuta,sha256=Cw-2_nFDGbN8WqIsVpcauyZooWX8j3Kmx2PnC0fHut8,2052
36
+ pytz/zoneinfo/Africa/Conakry,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
37
+ pytz/zoneinfo/Africa/Dakar,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
38
+ pytz/zoneinfo/Africa/Dar_es_Salaam,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
39
+ pytz/zoneinfo/Africa/Djibouti,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
40
+ pytz/zoneinfo/Africa/Douala,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
41
+ pytz/zoneinfo/Africa/El_Aaiun,sha256=UWCCqQLJxd8qsTYw82kz9W1suwW5TRgnZw31sDWDz20,2295
42
+ pytz/zoneinfo/Africa/Freetown,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
43
+ pytz/zoneinfo/Africa/Gaborone,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149
44
+ pytz/zoneinfo/Africa/Harare,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149
45
+ pytz/zoneinfo/Africa/Johannesburg,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246
46
+ pytz/zoneinfo/Africa/Juba,sha256=UVnIqEPJwHLTMC-r5qZQHNv9opoYVsKdq-ta_5XUw_Q,679
47
+ pytz/zoneinfo/Africa/Kampala,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
48
+ pytz/zoneinfo/Africa/Khartoum,sha256=MYWDoJ3AcCItZdApoeOgtWWDDxquwTon5v5TOGP70-o,679
49
+ pytz/zoneinfo/Africa/Kigali,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149
50
+ pytz/zoneinfo/Africa/Kinshasa,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
51
+ pytz/zoneinfo/Africa/Lagos,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
52
+ pytz/zoneinfo/Africa/Libreville,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
53
+ pytz/zoneinfo/Africa/Lome,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
54
+ pytz/zoneinfo/Africa/Luanda,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
55
+ pytz/zoneinfo/Africa/Lubumbashi,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149
56
+ pytz/zoneinfo/Africa/Lusaka,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149
57
+ pytz/zoneinfo/Africa/Malabo,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
58
+ pytz/zoneinfo/Africa/Maputo,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149
59
+ pytz/zoneinfo/Africa/Maseru,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246
60
+ pytz/zoneinfo/Africa/Mbabane,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246
61
+ pytz/zoneinfo/Africa/Mogadishu,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
62
+ pytz/zoneinfo/Africa/Monrovia,sha256=-VsJW5cU4KdvfgYaQVv4lcuzmaKIVFMd42nO6RXOBdU,208
63
+ pytz/zoneinfo/Africa/Nairobi,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
64
+ pytz/zoneinfo/Africa/Ndjamena,sha256=8T3A0Zm9Gj0Bvm6rd88t3GAXKiKdGUfHlIqYlkYI0KM,199
65
+ pytz/zoneinfo/Africa/Niamey,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
66
+ pytz/zoneinfo/Africa/Nouakchott,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
67
+ pytz/zoneinfo/Africa/Ouagadougou,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
68
+ pytz/zoneinfo/Africa/Porto-Novo,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235
69
+ pytz/zoneinfo/Africa/Sao_Tome,sha256=MdjxpQ268uzJ7Zx1ZroFUtRUwqsJ6F_yY3AYV9FXw1I,254
70
+ pytz/zoneinfo/Africa/Timbuktu,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
71
+ pytz/zoneinfo/Africa/Tripoli,sha256=W1dptGD70T7ppGoo0fczFQeDiIp0nultLNPV66MwB2c,625
72
+ pytz/zoneinfo/Africa/Tunis,sha256=OFVMEM4eYT2Ez0beuhEUCTSIpcFldWxsV2uEoTZIUNI,689
73
+ pytz/zoneinfo/Africa/Windhoek,sha256=xuhvudrMH4alnVmouSTQI8YL8F_HbgsF2EQ7AZKzuHs,955
74
+ pytz/zoneinfo/America/Adak,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356
75
+ pytz/zoneinfo/America/Anchorage,sha256=oZA1NSPS2BWdymYpnCHFO8BlYVS-ll5KLg2Ez9CbETs,2371
76
+ pytz/zoneinfo/America/Anguilla,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
77
+ pytz/zoneinfo/America/Antigua,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
78
+ pytz/zoneinfo/America/Araguaina,sha256=G6v9wYFZ8EB4WQfIsqRbbiiKd2b27j7Zt5dFjBbzx2o,870
79
+ pytz/zoneinfo/America/Argentina/Buenos_Aires,sha256=JmU8lBwmy29gR6OmeytvFdMRx6ObJKnYNHmLyMmXX2M,1062
80
+ pytz/zoneinfo/America/Argentina/Catamarca,sha256=uMCJXXGYmNESHVvj5RYBZ0McrOdE14hwm17l25MgRW0,1062
81
+ pytz/zoneinfo/America/Argentina/ComodRivadavia,sha256=uMCJXXGYmNESHVvj5RYBZ0McrOdE14hwm17l25MgRW0,1062
82
+ pytz/zoneinfo/America/Argentina/Cordoba,sha256=uniNihhMHnr4XK4WpwiPUnrAT0YPmvzqB6f0hRLtXvY,1062
83
+ pytz/zoneinfo/America/Argentina/Jujuy,sha256=PGmAehypCxj0XCenCSWqylDIPbKLK0DlrwJK_24D590,1034
84
+ pytz/zoneinfo/America/Argentina/La_Rioja,sha256=Um6XoVXhsr62ad1mWuebe6NY0ZHauBdR9tMGDgqCOHg,1076
85
+ pytz/zoneinfo/America/Argentina/Mendoza,sha256=xcOVtvRyVYFAU90y2QYwpyQhpMLyAp7-Fxvku4kgl0c,1062
86
+ pytz/zoneinfo/America/Argentina/Rio_Gallegos,sha256=F9ZKR4o8gLHX7QBuIjMapGIdmzJxpqwbouPgZ5MqDpY,1062
87
+ pytz/zoneinfo/America/Argentina/Salta,sha256=h1KYrDNIapvDkYhi1PaB8WD1qWOe4vhhgDJWDCGV4jc,1034
88
+ pytz/zoneinfo/America/Argentina/San_Juan,sha256=AI2GltA80mPNzhHxYycuEwIbO1ANXyIqBQZMpjqKqdQ,1076
89
+ pytz/zoneinfo/America/Argentina/San_Luis,sha256=2ItGRcLVK2wx8MyJsHbIBBeAkU4B-MN5x1ZxNyZ7UJE,1088
90
+ pytz/zoneinfo/America/Argentina/Tucuman,sha256=twO-FqtNJV8XOzWTvFQ-xnEcWCoDUHY3gpVIG0Mzbf8,1090
91
+ pytz/zoneinfo/America/Argentina/Ushuaia,sha256=A6IbpVlY9IIPoSKMFRR9DMROdwXUSDc2HsASueOSnqo,1062
92
+ pytz/zoneinfo/America/Aruba,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
93
+ pytz/zoneinfo/America/Asuncion,sha256=V8wwkUoNqyj0C-fUSADpU7HU8H3Qkr3jNPJ4SLsGUIc,2030
94
+ pytz/zoneinfo/America/Atikokan,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182
95
+ pytz/zoneinfo/America/Atka,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356
96
+ pytz/zoneinfo/America/Bahia,sha256=qi7dA6FofDhLxVMmd2L8bK3HeaQnc9X-jiijwyfhs3g,1010
97
+ pytz/zoneinfo/America/Bahia_Banderas,sha256=L6iHYbA1Us1pljllFLEIAHW4ZaZhFKoG2Zr8TT5aY38,1152
98
+ pytz/zoneinfo/America/Barbados,sha256=ima-Qrrhazu4Qfvu2Z0-e6E-GTiYknuJBu6c2yVG9LE,436
99
+ pytz/zoneinfo/America/Belem,sha256=aZMUgtFDdHNISpqyQRYbmS2IBD-BAS3CaJnhu6onLCY,562
100
+ pytz/zoneinfo/America/Belize,sha256=pkfLY2KfPchbeJa1pWcXmWAwp4ZlRvxWLVezXnrbkws,1614
101
+ pytz/zoneinfo/America/Blanc-Sablon,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
102
+ pytz/zoneinfo/America/Boa_Vista,sha256=dMtaG11kGlJrgJJgGWEDZZAmnO_HfT3L4X8pI72LLFY,618
103
+ pytz/zoneinfo/America/Bogota,sha256=Z1ernZZGQxulE8KFWHYWcM3SV1jn2_QEc1Q0OJzHRak,232
104
+ pytz/zoneinfo/America/Boise,sha256=7HQsNPJiUheQgFz5kVLvTnf5xhXAYaeANqDskxKz2Vs,2410
105
+ pytz/zoneinfo/America/Buenos_Aires,sha256=JmU8lBwmy29gR6OmeytvFdMRx6ObJKnYNHmLyMmXX2M,1062
106
+ pytz/zoneinfo/America/Cambridge_Bay,sha256=_4xRlX3WdVpEcqoT6myD7NeTCXnn9OYk_iH006bwULo,2254
107
+ pytz/zoneinfo/America/Campo_Grande,sha256=gINiXg5i2e6Rh2Nbo2bFqhPAJL4F4cAqGnBankXTDXw,1430
108
+ pytz/zoneinfo/America/Cancun,sha256=lI4ZtiBtxKqNHvU47vRSwc5-GDl8JOdC2A6oc9s8iIo,834
109
+ pytz/zoneinfo/America/Caracas,sha256=mUNMFdDzZLav_ePA1ocBdmqVBierkeEszTIFpNCm5J0,250
110
+ pytz/zoneinfo/America/Catamarca,sha256=uMCJXXGYmNESHVvj5RYBZ0McrOdE14hwm17l25MgRW0,1062
111
+ pytz/zoneinfo/America/Cayenne,sha256=4k7Iv1woX4atqePKrcvMQD2Vk9Tmma7rW_AW_R62pCc,184
112
+ pytz/zoneinfo/America/Cayman,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182
113
+ pytz/zoneinfo/America/Chicago,sha256=_roybr6I6sIAF6cYdIxGxoRpoef153Fty48dQ6bm9oY,3592
114
+ pytz/zoneinfo/America/Chihuahua,sha256=ZAlPSsUfT3VGp1VdibnHIf-QsdEIqHuzX15wu2P2YQk,1102
115
+ pytz/zoneinfo/America/Ciudad_Juarez,sha256=OQstyPrMxx3nNEbzgDhq_W0mK49-ApNMK7_6p-6dJ64,1538
116
+ pytz/zoneinfo/America/Coral_Harbour,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182
117
+ pytz/zoneinfo/America/Cordoba,sha256=uniNihhMHnr4XK4WpwiPUnrAT0YPmvzqB6f0hRLtXvY,1062
118
+ pytz/zoneinfo/America/Costa_Rica,sha256=74rYa6lrgIkyls9PkHo8SCYl9oOqiuG5S7MWdnJelP4,316
119
+ pytz/zoneinfo/America/Creston,sha256=illz0sYuLL8lIPK0Tkou6dL0Vck_D0W_3rRTOvFYRmQ,360
120
+ pytz/zoneinfo/America/Cuiaba,sha256=GRJqkhRXNsOUcgjZddQxRIJdRYaw9pM_YLWbun88dkg,1402
121
+ pytz/zoneinfo/America/Curacao,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
122
+ pytz/zoneinfo/America/Danmarkshavn,sha256=YRZAfUCoVtaL1L-MYMYMH1wyOaVQnfUo_gFnvMXSuzw,698
123
+ pytz/zoneinfo/America/Dawson,sha256=rAHhyuMuyjf_eyA2SBG76MRBf_fj_xi5FAuiWVQgJhw,1614
124
+ pytz/zoneinfo/America/Dawson_Creek,sha256=aJXCyP4j3ggE4wGCN-LrS9hpD_5zWHzQTeSAKTWEPUM,1050
125
+ pytz/zoneinfo/America/Denver,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460
126
+ pytz/zoneinfo/America/Detroit,sha256=hecz8yqY2Cj5B61G3gLZdAVZvRgK9l0P90c_gN-uD5g,2230
127
+ pytz/zoneinfo/America/Dominica,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
128
+ pytz/zoneinfo/America/Edmonton,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332
129
+ pytz/zoneinfo/America/Eirunepe,sha256=j5eExkjFaqtC-D8XK0rGzoF9yEgbSlTbPqVG9WKhEa8,642
130
+ pytz/zoneinfo/America/El_Salvador,sha256=gvGN8Lkj-sGm2_rs8OUjAMf1oMtKp2Xes6UfWT0WqgU,224
131
+ pytz/zoneinfo/America/Ensenada,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374
132
+ pytz/zoneinfo/America/Fort_Nelson,sha256=erfODr3DrSpz65kAdO7Ts2dGbZxvddEP6gx4BX3y2J0,2240
133
+ pytz/zoneinfo/America/Fort_Wayne,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682
134
+ pytz/zoneinfo/America/Fortaleza,sha256=rjiSB0q1cBuMDOM9orW_uwe5UOLBwTlfjFotwOYe1mU,702
135
+ pytz/zoneinfo/America/Glace_Bay,sha256=G8DGLGCapH_aYCF_OhaL5Qonf7FOAgAPwelO5htCWBc,2192
136
+ pytz/zoneinfo/America/Godthab,sha256=KGXrMN-YkYpVCgLdpcfwMFQ77EsRAGsjUCG3yAUvVfw,1889
137
+ pytz/zoneinfo/America/Goose_Bay,sha256=JgaLueghSvX2g725FOfIgpgvsqxZGykWOhAZWGpQZRY,3210
138
+ pytz/zoneinfo/America/Grand_Turk,sha256=4YOFEPK60Bel2_fCsY6vSZxUcMJKjiKtyOf_Q0khEwU,1834
139
+ pytz/zoneinfo/America/Grenada,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
140
+ pytz/zoneinfo/America/Guadeloupe,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
141
+ pytz/zoneinfo/America/Guatemala,sha256=dugUgCd6QY52yHkHuUP4jRWzo5x439IQigaYCvEF46Q,280
142
+ pytz/zoneinfo/America/Guayaquil,sha256=j2UuIo-4RgSOlTNfu77mhZ92waNTeKFSvmoVemJooT0,232
143
+ pytz/zoneinfo/America/Guyana,sha256=R0bOvCRDC8SRIexmhsduPdHbbRPwI2GviD9otExiUrk,248
144
+ pytz/zoneinfo/America/Halifax,sha256=TZpmc5PwWoLfTfQoQ_b3U17BE2iVKSeNkR0Ho8mbTn8,3424
145
+ pytz/zoneinfo/America/Havana,sha256=HUQeAuKBsEkI5SLZjqynXICOUVOajkKzKH5r-Ov5Odc,2416
146
+ pytz/zoneinfo/America/Hermosillo,sha256=WnlVBpVBG8ONnz0wpxteXmuvSzOGwSlAisvDd1GtKYA,456
147
+ pytz/zoneinfo/America/Indiana/Indianapolis,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682
148
+ pytz/zoneinfo/America/Indiana/Knox,sha256=CsvZ5BKw2qVav3x_F8CU9taJdDk7jX41Cfsqms6jXV8,2444
149
+ pytz/zoneinfo/America/Indiana/Marengo,sha256=f3tQ-lgMSUA7nvn64pXhKtJL7mWzGajoCega5MEJSbI,1738
150
+ pytz/zoneinfo/America/Indiana/Petersburg,sha256=A88OHuM0Rg3iMLHjKgXq_d2jZCdVSytUQs-9W0KcFyQ,1920
151
+ pytz/zoneinfo/America/Indiana/Tell_City,sha256=4dWqAr9Y2BXfL4pAQk-81c3gGl2cNdHXOD7_wJhhhn8,1700
152
+ pytz/zoneinfo/America/Indiana/Vevay,sha256=H7VR2G-_sD_C5Rm4P3g1iRC1FWCPg4m0MGD3P1PLzsk,1430
153
+ pytz/zoneinfo/America/Indiana/Vincennes,sha256=62mAxT7APFCaoygflnEzdOpe-fuW1yObI6m6EUUcS7A,1710
154
+ pytz/zoneinfo/America/Indiana/Winamac,sha256=aZGM2jR8CH9BHSUq7XygiweDd6dorXLPXg246XsbR6s,1794
155
+ pytz/zoneinfo/America/Indianapolis,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682
156
+ pytz/zoneinfo/America/Inuvik,sha256=6J-mapDnrk9A1LtswoE34tqSy_ufedcEBNxixkrEjIo,2074
157
+ pytz/zoneinfo/America/Iqaluit,sha256=feOnxAN0N0r-M1qlkrA4JMyawoc0tqae0iiBCPDAs4k,2202
158
+ pytz/zoneinfo/America/Jamaica,sha256=wlagieUPRf5-beie-h7QsONbNzjGsm8vMs8uf28pw28,482
159
+ pytz/zoneinfo/America/Jujuy,sha256=PGmAehypCxj0XCenCSWqylDIPbKLK0DlrwJK_24D590,1034
160
+ pytz/zoneinfo/America/Juneau,sha256=k7hxb0aGRnfnE-DBi3LkcjAzRPyAf0_Hw0vVFfjGeb0,2353
161
+ pytz/zoneinfo/America/Kentucky/Louisville,sha256=tP072xV_n_vIQjxxcJ77AGeGj6yL1KPpn3fwids9g1U,2788
162
+ pytz/zoneinfo/America/Kentucky/Monticello,sha256=LtdyCo85BrXQs6rlH61Ym-8KqWHH6PwAOjD0QxhIdzM,2368
163
+ pytz/zoneinfo/America/Knox_IN,sha256=CsvZ5BKw2qVav3x_F8CU9taJdDk7jX41Cfsqms6jXV8,2444
164
+ pytz/zoneinfo/America/Kralendijk,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
165
+ pytz/zoneinfo/America/La_Paz,sha256=hqfD8LQHupdZhji2e93_9pOQAT-R7muzzjP0nyfbFXY,218
166
+ pytz/zoneinfo/America/Lima,sha256=HHgTnDUnCZzibvL0MrG8qyOuvjmYYw3e3R5VbnxMZs8,392
167
+ pytz/zoneinfo/America/Los_Angeles,sha256=aJd7ua1tGG_vxser02AQpm4wAI3LLTdgh6QcSYYecmg,2852
168
+ pytz/zoneinfo/America/Louisville,sha256=tP072xV_n_vIQjxxcJ77AGeGj6yL1KPpn3fwids9g1U,2788
169
+ pytz/zoneinfo/America/Lower_Princes,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
170
+ pytz/zoneinfo/America/Maceio,sha256=3R5DlSe32kQDmoSVIWpcyk2o7qohr-rliwqDSGFIMyQ,730
171
+ pytz/zoneinfo/America/Managua,sha256=xBzF01AHn2E2fD8Qdy-DHFe36UqoeNpKPfChduBKWdk,430
172
+ pytz/zoneinfo/America/Manaus,sha256=F6RLOOeOi9lymZiQmQ9pR8tFpPZ6EguNdPfOc6BhXDE,590
173
+ pytz/zoneinfo/America/Marigot,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
174
+ pytz/zoneinfo/America/Martinique,sha256=fMs80kOU2YFvC0f9y2eje97JeAtTYBamXrnlTunNLzQ,232
175
+ pytz/zoneinfo/America/Matamoros,sha256=fq-PqdmZrQ98UsFmHA9ivjBZv5GEBRTOuLQ5Cu5ajW8,1418
176
+ pytz/zoneinfo/America/Mazatlan,sha256=RQQVwlEVHRp2X-c_0hJ46y54abTlqUuLkyrUUicyc5g,1128
177
+ pytz/zoneinfo/America/Mendoza,sha256=xcOVtvRyVYFAU90y2QYwpyQhpMLyAp7-Fxvku4kgl0c,1062
178
+ pytz/zoneinfo/America/Menominee,sha256=Arv9WLbfhNcpRsUjHDU757BEdwlp08Gt30AixG3gZ04,2274
179
+ pytz/zoneinfo/America/Merida,sha256=ORJCGiO2mXG-kk5ZZGro1MNuKqRnJx6HJlvoezTMM90,1004
180
+ pytz/zoneinfo/America/Metlakatla,sha256=twmieGTVY2V-U8nFxqvx7asYv8GVjeWdLtrOI7UApVI,1423
181
+ pytz/zoneinfo/America/Mexico_City,sha256=A5MlfDUZ4O1-jMTRt0WPem7qqcW0Nrslls1hlc8C4-Q,1222
182
+ pytz/zoneinfo/America/Miquelon,sha256=l5txBJYe9HTRZlILcbSL_HNDYrjUb0ouecNy7QEkg9c,1652
183
+ pytz/zoneinfo/America/Moncton,sha256=Wmv-bk9aKKcWWzOpc1UFu67HOfwaIk2Wmh3LgqGctys,3154
184
+ pytz/zoneinfo/America/Monterrey,sha256=vKBLVjG0bNVDI07M4WwOVv2KbrYJVNTLmc19iM2CvTU,980
185
+ pytz/zoneinfo/America/Montevideo,sha256=dQEBE4mjZPtyRjKXK6Z-bMHJdFqpwhIzxDH4x04rKYk,1496
186
+ pytz/zoneinfo/America/Montreal,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494
187
+ pytz/zoneinfo/America/Montserrat,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
188
+ pytz/zoneinfo/America/Nassau,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494
189
+ pytz/zoneinfo/America/New_York,sha256=6e0H177gx2qdRC0JHvHwFmj-58TyYBTAqGixn-bBipU,3552
190
+ pytz/zoneinfo/America/Nipigon,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494
191
+ pytz/zoneinfo/America/Nome,sha256=2izM3-P-PqJ9za6MdhzFfMvPFNq7Gim69tAvEwPeY2s,2367
192
+ pytz/zoneinfo/America/Noronha,sha256=feeRAijQqKylZgqe84nKhsFLycT5zIBm7mLIvdyGw4w,702
193
+ pytz/zoneinfo/America/North_Dakota/Beulah,sha256=qtgbqNu8M3AkHF2n-_oSps1pYT4SxgclbkkPKbXaBHs,2396
194
+ pytz/zoneinfo/America/North_Dakota/Center,sha256=9ZWbK9YKkquULyBUFS3Lr_idxbt7V7y4W4EO0Kn20sw,2396
195
+ pytz/zoneinfo/America/North_Dakota/New_Salem,sha256=DH_bsQfuUnK2obdb06KgisO4XLqht12BXdrgUsZZveg,2396
196
+ pytz/zoneinfo/America/Nuuk,sha256=KGXrMN-YkYpVCgLdpcfwMFQ77EsRAGsjUCG3yAUvVfw,1889
197
+ pytz/zoneinfo/America/Ojinaga,sha256=9catgEQ2SD7qfuvTMxs15Cdd9SKaUy-svEzPBFw2Q3Q,1524
198
+ pytz/zoneinfo/America/Panama,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182
199
+ pytz/zoneinfo/America/Pangnirtung,sha256=feOnxAN0N0r-M1qlkrA4JMyawoc0tqae0iiBCPDAs4k,2202
200
+ pytz/zoneinfo/America/Paramaribo,sha256=Z7UZvNlgd-qEUHjEPYXIkLNTgjMcCzk9EfUUEmUyd7M,248
201
+ pytz/zoneinfo/America/Phoenix,sha256=illz0sYuLL8lIPK0Tkou6dL0Vck_D0W_3rRTOvFYRmQ,360
202
+ pytz/zoneinfo/America/Port-au-Prince,sha256=09ZAJd4IOiMpfdpUuF1U44R_hRt6BvpAkFXOnYO9yOM,1434
203
+ pytz/zoneinfo/America/Port_of_Spain,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
204
+ pytz/zoneinfo/America/Porto_Acre,sha256=0gpJUl46hQbp0P6Xj1S0NArIWeAryuuDXjsldvB5GHE,614
205
+ pytz/zoneinfo/America/Porto_Velho,sha256=uSMV2hZWj-VyBhFBwC950wcThfN3jq6KlycESmQTLOA,562
206
+ pytz/zoneinfo/America/Puerto_Rico,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
207
+ pytz/zoneinfo/America/Punta_Arenas,sha256=tR5uIf1351AWFqrqNtmXnhQWnKREmJaZqKBzaWRVMTQ,1902
208
+ pytz/zoneinfo/America/Rainy_River,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868
209
+ pytz/zoneinfo/America/Rankin_Inlet,sha256=nXgqjL3O2BV0em-Xk8qVRRZb_X0yQmHE6vmSSvI9Kzc,2066
210
+ pytz/zoneinfo/America/Recife,sha256=bJ_HE0-JFio4-owpZ0pLO8U3ai0fiGu8QHL0DexLiLc,702
211
+ pytz/zoneinfo/America/Regina,sha256=yjqT08pHbICYe83H8JmtaDBvCFqRv7Tfze3Y8xuXukw,980
212
+ pytz/zoneinfo/America/Resolute,sha256=CnMU2dBI-63vt8-J0Q1Ropx-8b9pRCLjhvrycMIedGg,2066
213
+ pytz/zoneinfo/America/Rio_Branco,sha256=0gpJUl46hQbp0P6Xj1S0NArIWeAryuuDXjsldvB5GHE,614
214
+ pytz/zoneinfo/America/Rosario,sha256=uniNihhMHnr4XK4WpwiPUnrAT0YPmvzqB6f0hRLtXvY,1062
215
+ pytz/zoneinfo/America/Santa_Isabel,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374
216
+ pytz/zoneinfo/America/Santarem,sha256=VmZP9S5pPucFxyqAOV908EmWXQZvgCgWLmlJJTUl0LE,588
217
+ pytz/zoneinfo/America/Santiago,sha256=0CDw13dCMUsoquMupoJgupkzAUNhDK6E0lVxURA7osA,2515
218
+ pytz/zoneinfo/America/Santo_Domingo,sha256=DKtaEj8fQ92ybITTWU4Bm160S9pzJmUVbjaWRnenxU4,458
219
+ pytz/zoneinfo/America/Sao_Paulo,sha256=BMBnRO4_4HjvO4t3njjrMGZr-ZPmegkvyvL8KPY6ZM4,1430
220
+ pytz/zoneinfo/America/Scoresbysund,sha256=K-qkiMCCFgOe8ccPMABA-lDjc9vb6wpluBOCVfiBdLI,1935
221
+ pytz/zoneinfo/America/Shiprock,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460
222
+ pytz/zoneinfo/America/Sitka,sha256=aiS7Fk37hZpzZ9VkeJQeF-BqTLRC1QOTCgMAJwT8UxA,2329
223
+ pytz/zoneinfo/America/St_Barthelemy,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
224
+ pytz/zoneinfo/America/St_Johns,sha256=r1-17uKv27eZ3JsVkw_DLZQbo6wvjuuVu7C2pDsmOgI,3655
225
+ pytz/zoneinfo/America/St_Kitts,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
226
+ pytz/zoneinfo/America/St_Lucia,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
227
+ pytz/zoneinfo/America/St_Thomas,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
228
+ pytz/zoneinfo/America/St_Vincent,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
229
+ pytz/zoneinfo/America/Swift_Current,sha256=RRKOF7vZC8VvYxD8PP4J1_hUPayKBP7Lu80avRkfPDY,560
230
+ pytz/zoneinfo/America/Tegucigalpa,sha256=EzOz7ntTlreMq69JZ2CcAb8Ps98V9bUMN480tpPIyw4,252
231
+ pytz/zoneinfo/America/Thule,sha256=8xuPRaZU8RgO5ECqFYHYmnHioc81sBOailkVu8Y02i8,1502
232
+ pytz/zoneinfo/America/Thunder_Bay,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494
233
+ pytz/zoneinfo/America/Tijuana,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374
234
+ pytz/zoneinfo/America/Toronto,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494
235
+ pytz/zoneinfo/America/Tortola,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
236
+ pytz/zoneinfo/America/Vancouver,sha256=sknKH0jSPWam-DHfM35qXs8Nam7d5TFlkUI9Sgxryyg,2892
237
+ pytz/zoneinfo/America/Virgin,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246
238
+ pytz/zoneinfo/America/Whitehorse,sha256=TrR6PCnYG-mSClBMohqlP8qnYhXMUsydI-L-quXFxyM,1614
239
+ pytz/zoneinfo/America/Winnipeg,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868
240
+ pytz/zoneinfo/America/Yakutat,sha256=tFwnKbvwhyyn4LNTAn5ye_JWDdxjCerNDt7oOwUwO2M,2305
241
+ pytz/zoneinfo/America/Yellowknife,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332
242
+ pytz/zoneinfo/Antarctica/Casey,sha256=VeaLOxTfDyjfGXq5Ul95JEIMXNWHSW-0N3yOoS7VK-c,423
243
+ pytz/zoneinfo/Antarctica/Davis,sha256=XB12dEq0Q-3XkzBNTNC7G1fzH-WxxctIuZqI3zp8ypI,283
244
+ pytz/zoneinfo/Antarctica/DumontDUrville,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172
245
+ pytz/zoneinfo/Antarctica/Macquarie,sha256=ie7RlaU8RHTorVVj-MX8StKMqx_oXf4UH2PUqpzcwe0,2260
246
+ pytz/zoneinfo/Antarctica/Mawson,sha256=EjIFbqRdr2ZJBaI1XvoWRptnnW1LFrlhydxDDuIQjSI,185
247
+ pytz/zoneinfo/Antarctica/McMurdo,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437
248
+ pytz/zoneinfo/Antarctica/Palmer,sha256=HTZY0M8td7oUx5REPgRCHuqKg5V3fjJEi4lYBNL4Etg,1404
249
+ pytz/zoneinfo/Antarctica/Rothera,sha256=_9NY-f8vkozQYrjbUHP5YjcICg0-LuyA9PnIeK123RU,150
250
+ pytz/zoneinfo/Antarctica/South_Pole,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437
251
+ pytz/zoneinfo/Antarctica/Syowa,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151
252
+ pytz/zoneinfo/Antarctica/Troll,sha256=fjcYppwr1FnjEssee-RLgGOANzoUyfjse-RGK46PR2E,1148
253
+ pytz/zoneinfo/Antarctica/Vostok,sha256=KfftwdzK6PkMDz0d-D3z4HKIBgY9KqsqHnTnqsPMrUg,213
254
+ pytz/zoneinfo/Arctic/Longyearbyen,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298
255
+ pytz/zoneinfo/Asia/Aden,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151
256
+ pytz/zoneinfo/Asia/Almaty,sha256=lPLWXk2f1mWYRQZFkIrq_5HkhocsUBis0M-yhdDHcBQ,983
257
+ pytz/zoneinfo/Asia/Amman,sha256=Qv4cXXw7KBQWE882cgj0kjQ3wh1vpV1orJ2v2Jjxr2U,1433
258
+ pytz/zoneinfo/Asia/Anadyr,sha256=WqKnHo5IHSWZ08d2sS5ytHtv0MQMoczP3W9zbDDrbYU,1174
259
+ pytz/zoneinfo/Asia/Aqtau,sha256=4n654FZtDssXSfhQszjZG5OmtbE2zo1KbiWcYrFJg00,969
260
+ pytz/zoneinfo/Asia/Aqtobe,sha256=1oFHTb-ybcTqLXm0r1ZOVgdYMTHlGoNs-Pgvux50d3E,997
261
+ pytz/zoneinfo/Asia/Ashgabat,sha256=-sfGnRumio7_Bs8w9YH4xRDWgjB3wBeW7c0C56Qqk64,605
262
+ pytz/zoneinfo/Asia/Ashkhabad,sha256=-sfGnRumio7_Bs8w9YH4xRDWgjB3wBeW7c0C56Qqk64,605
263
+ pytz/zoneinfo/Asia/Atyrau,sha256=_U8COUIE9nG_HKddZE1Q0sPuz3rMwfjwmfnVDY_vSmg,977
264
+ pytz/zoneinfo/Asia/Baghdad,sha256=S-plKI4zCLqI0idGABEk3oRTazNyrIj2T98-EtWtZD8,969
265
+ pytz/zoneinfo/Asia/Bahrain,sha256=wklGY3WPGp-z1OUwb_KOHzRTwBndt1RfDg9Uttt36G4,185
266
+ pytz/zoneinfo/Asia/Baku,sha256=6_hq98SGG0j0JA8qYx96WcIMZSLW4w460QXh_OM_ccg,1213
267
+ pytz/zoneinfo/Asia/Bangkok,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185
268
+ pytz/zoneinfo/Asia/Barnaul,sha256=3zeUimLTMrIZE0vX6XHFvB3MoqExoVbE5CSm6GV0zf0,1207
269
+ pytz/zoneinfo/Asia/Beirut,sha256=_Z_2ZAg_iL9vU51JDB8CB04uXBDrf1kLIis-JnXaS2o,2154
270
+ pytz/zoneinfo/Asia/Bishkek,sha256=IOoUyjABILCkXu1rjCIqSwAufRYFklc5YAC4jdhVw6Q,969
271
+ pytz/zoneinfo/Asia/Brunei,sha256=D5qtyWJ_SM8bTQeJJIYhqqojxlVKbrFC1EYMDU9GzXQ,469
272
+ pytz/zoneinfo/Asia/Calcutta,sha256=6Qw0EDbLcgMgDik8s7UTJn4QSjmllPNeGVJU5rwKF88,285
273
+ pytz/zoneinfo/Asia/Chita,sha256=LbSlS23swFkANUScg8zkNR0imANWNfOIaYd39HbLdIQ,1207
274
+ pytz/zoneinfo/Asia/Choibalsan,sha256=atm7FmPwZGsftLM7vS1LltjcdaDC-DSg1cIdP2MF17I,935
275
+ pytz/zoneinfo/Asia/Chongqing,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561
276
+ pytz/zoneinfo/Asia/Chungking,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561
277
+ pytz/zoneinfo/Asia/Colombo,sha256=w52L7bgT4m5hcgRuevIPY83xytfkBmkLhnKMwp16KsY,358
278
+ pytz/zoneinfo/Asia/Dacca,sha256=-xulJ2KVhvKp6rlZLMydpw7oXVirk-riEH-181xPE54,323
279
+ pytz/zoneinfo/Asia/Damascus,sha256=EthGheaHWmy5IrLCc9NmM3jvASQFHt8TsBF07I1tgbg,1873
280
+ pytz/zoneinfo/Asia/Dhaka,sha256=-xulJ2KVhvKp6rlZLMydpw7oXVirk-riEH-181xPE54,323
281
+ pytz/zoneinfo/Asia/Dili,sha256=0mUs0Utk-uW9deZV3cBUTpfWMgFvl0DyN29JuKvKMyw,213
282
+ pytz/zoneinfo/Asia/Dubai,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151
283
+ pytz/zoneinfo/Asia/Dushanbe,sha256=koYnnYWuFsBXd1vJfZsGdpwnbFHEwvkGBmSrrx3KIss,577
284
+ pytz/zoneinfo/Asia/Famagusta,sha256=CFrcygd8ude5x6OEtfM_Dw0KYHoxpPPzq46KoHVxjjc,2028
285
+ pytz/zoneinfo/Asia/Gaza,sha256=t0YxcUQL53VNKnKbKijn0OE_MaryEynonabse-iTtzs,3844
286
+ pytz/zoneinfo/Asia/Harbin,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561
287
+ pytz/zoneinfo/Asia/Hebron,sha256=6Y0USHKx-xoCxCr_WpCuM3olP1vUGnzrcnGiyQFcqdQ,3872
288
+ pytz/zoneinfo/Asia/Ho_Chi_Minh,sha256=Lnv1vpUNAXBo8v0b9d9AQpy-AEyO5Qa2Ig0PvDkjrmU,337
289
+ pytz/zoneinfo/Asia/Hong_Kong,sha256=al_O4kPlq5JpgkLYjEaZzrcgiiLul9NC0R5B69JVWhc,1233
290
+ pytz/zoneinfo/Asia/Hovd,sha256=Zn4PLGlD-URJDsbChor5bqWTzuAil2tbrGJW0j5TLbs,877
291
+ pytz/zoneinfo/Asia/Irkutsk,sha256=IVuoXCwdeI-KIUfFkEt6yBjqYP3V9GTrF-_WLnffFzk,1229
292
+ pytz/zoneinfo/Asia/Istanbul,sha256=Jk4wjndDta_uLWc8W1dWdjbavJJbsL5ROTmZboVnGKU,1933
293
+ pytz/zoneinfo/Asia/Jakarta,sha256=TvEzBvSzfzFCdOsMAZ0QgR95JA5xf3kAZONhy5gEXRE,383
294
+ pytz/zoneinfo/Asia/Jayapura,sha256=ihzUd-L8HUVqG-Na10MyPE-YYwjVFj-xerqjTN4EJZs,221
295
+ pytz/zoneinfo/Asia/Jerusalem,sha256=JUuWQmW5Tha0pJjw61Q5aN7CX0z4D7ops9OOSnda6Dc,2388
296
+ pytz/zoneinfo/Asia/Kabul,sha256=JZEbo8bSj_L7HnXUm2gAUlNlCvJlRJhFkSHCg5o3ggk,194
297
+ pytz/zoneinfo/Asia/Kamchatka,sha256=KY1PlJvRSNkY_5hyJBxj5DDweeYVQaBK05ZgL3kdcCY,1152
298
+ pytz/zoneinfo/Asia/Karachi,sha256=iB-mWMTXUyfBwAkZdz8_UmEw0xsgxIub-KNI7akzhkk,379
299
+ pytz/zoneinfo/Asia/Kashgar,sha256=F1ZOdZZDsVHwDJinksR-hjcqPzqOljvdreZIWFulJxY,151
300
+ pytz/zoneinfo/Asia/Kathmandu,sha256=_RsfeSWbCr8kM4YRJi7Xv6hAEiHW14IFhsXsfhbPjoM,198
301
+ pytz/zoneinfo/Asia/Katmandu,sha256=_RsfeSWbCr8kM4YRJi7Xv6hAEiHW14IFhsXsfhbPjoM,198
302
+ pytz/zoneinfo/Asia/Khandyga,sha256=bKfmw6k5qYDQsEHG3Mv-VYis3YhCeV7qijDxfxQNn_g,1257
303
+ pytz/zoneinfo/Asia/Kolkata,sha256=6Qw0EDbLcgMgDik8s7UTJn4QSjmllPNeGVJU5rwKF88,285
304
+ pytz/zoneinfo/Asia/Krasnoyarsk,sha256=D5KE_1wWSD2YdixDy8n3LBNaAlE1_y3TWXw6NrxFKKA,1193
305
+ pytz/zoneinfo/Asia/Kuala_Lumpur,sha256=XmeVImeqcJ8hJzm7TjAti1nWJAxawOqq7jIzDnHX2hI,401
306
+ pytz/zoneinfo/Asia/Kuching,sha256=D5qtyWJ_SM8bTQeJJIYhqqojxlVKbrFC1EYMDU9GzXQ,469
307
+ pytz/zoneinfo/Asia/Kuwait,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151
308
+ pytz/zoneinfo/Asia/Macao,sha256=MvAkRyRsrA2r052ItlyF5bh2FheRjI0jPwg0uIiH2Yk,1227
309
+ pytz/zoneinfo/Asia/Macau,sha256=MvAkRyRsrA2r052ItlyF5bh2FheRjI0jPwg0uIiH2Yk,1227
310
+ pytz/zoneinfo/Asia/Magadan,sha256=HccEEXBQvMmLoC_JE-zP_MlLAZ1WmNLQLfM3tJt55M4,1208
311
+ pytz/zoneinfo/Asia/Makassar,sha256=OhJtCqSTEU-u5n0opBVO5Bu-wQzcYPy9S_6aAhJXgOw,254
312
+ pytz/zoneinfo/Asia/Manila,sha256=ujfq0kl1EhxcYSOrG-FS750aNaYUt1TT4bFuK4EcL_c,328
313
+ pytz/zoneinfo/Asia/Muscat,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151
314
+ pytz/zoneinfo/Asia/Nicosia,sha256=0Unm0IFT7HyGeQ7F3vTa_-klfysCgrulqFO6BD1plZU,2002
315
+ pytz/zoneinfo/Asia/Novokuznetsk,sha256=pyxxtSUtYDeVmFk0Cg-F33laZS0iKtde9_GJnL9f0KM,1151
316
+ pytz/zoneinfo/Asia/Novosibirsk,sha256=5K2-Gx15ThlHfolyW85S5zREtAcMjeHBYWK4E8x2LdY,1207
317
+ pytz/zoneinfo/Asia/Omsk,sha256=HyXIWItJXBKVHUzWcQPi1Mmd6ZLmZk-QhRUo9Kv2XOI,1193
318
+ pytz/zoneinfo/Asia/Oral,sha256=WQT4qRmC9RI_ll8zB9FvkAL8ezGb8qoqWd75GTlC7kQ,991
319
+ pytz/zoneinfo/Asia/Phnom_Penh,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185
320
+ pytz/zoneinfo/Asia/Pontianak,sha256=inOXwuKtjKv1z_eliPZSIqjSt6whtuxhPeG1YpjU_BQ,353
321
+ pytz/zoneinfo/Asia/Pyongyang,sha256=_-g3GnDAtfDX4XAktXH9jFouLUDmOovnjoOfvRpUDsE,237
322
+ pytz/zoneinfo/Asia/Qatar,sha256=wklGY3WPGp-z1OUwb_KOHzRTwBndt1RfDg9Uttt36G4,185
323
+ pytz/zoneinfo/Asia/Qostanay,sha256=HIjln8QIPNRU6MkWzyPi6vDrjlmVZ4XzFxcUHtXMi7s,1025
324
+ pytz/zoneinfo/Asia/Qyzylorda,sha256=JZLNN6NuLkqaWEeVaCZiW_gL6BrIFL9lr65iK7myVPg,1011
325
+ pytz/zoneinfo/Asia/Rangoon,sha256=_YHASq4Z5YcUILIdhEzg27CGLzarUHPDHs1Dj0QgNGM,254
326
+ pytz/zoneinfo/Asia/Riyadh,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151
327
+ pytz/zoneinfo/Asia/Saigon,sha256=Lnv1vpUNAXBo8v0b9d9AQpy-AEyO5Qa2Ig0PvDkjrmU,337
328
+ pytz/zoneinfo/Asia/Sakhalin,sha256=xzAor82ihAe-yXEwC6OWiMzo9b6Z-oQl39NIkU5Hhbs,1188
329
+ pytz/zoneinfo/Asia/Samarkand,sha256=zJKSRt3lEvd6Qvg9b49QAyO4cTJyVnTKyPYcyudpHxk,563
330
+ pytz/zoneinfo/Asia/Seoul,sha256=LI9LsV3XcJC0l-KoQf8zI-y7rk-du57erS-N2Ptdi7Q,617
331
+ pytz/zoneinfo/Asia/Shanghai,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561
332
+ pytz/zoneinfo/Asia/Singapore,sha256=XmeVImeqcJ8hJzm7TjAti1nWJAxawOqq7jIzDnHX2hI,401
333
+ pytz/zoneinfo/Asia/Srednekolymsk,sha256=efaaT8iFHrcccp-VZKNMvtTuPLNjG5V9JH5KKHhH3SI,1194
334
+ pytz/zoneinfo/Asia/Taipei,sha256=DMmQwOpPql25ue3Nf8vAKKT4em06D1Z9rHbLIitxixk,761
335
+ pytz/zoneinfo/Asia/Tashkent,sha256=apRPy251fSRy_ixsg3BOZNmUbHdO86P5-PdgC1Xws7U,577
336
+ pytz/zoneinfo/Asia/Tbilisi,sha256=zQ-2bVq5_USUSbwN6q0qvWjD-HXkKaH4ifMVq1lEeIM,1021
337
+ pytz/zoneinfo/Asia/Tehran,sha256=LQMch2TMA4wI23SQzoIrlZh0_KceXQegurwxCZ5YDlY,1248
338
+ pytz/zoneinfo/Asia/Tel_Aviv,sha256=JUuWQmW5Tha0pJjw61Q5aN7CX0z4D7ops9OOSnda6Dc,2388
339
+ pytz/zoneinfo/Asia/Thimbu,sha256=G2nTQVEMmKlWt0B74_fUAL7KQ3YAu__J6HciiYs2IyU,189
340
+ pytz/zoneinfo/Asia/Thimphu,sha256=G2nTQVEMmKlWt0B74_fUAL7KQ3YAu__J6HciiYs2IyU,189
341
+ pytz/zoneinfo/Asia/Tokyo,sha256=oCueZgRNxcNcX3ZGdif9y6Su4cyVhga4XHdwlcrYLOs,309
342
+ pytz/zoneinfo/Asia/Tomsk,sha256=cr0ULZgWBnQfzDiJeYmqpA7Xo5QRzurvrHsrbZsnhOQ,1207
343
+ pytz/zoneinfo/Asia/Ujung_Pandang,sha256=OhJtCqSTEU-u5n0opBVO5Bu-wQzcYPy9S_6aAhJXgOw,254
344
+ pytz/zoneinfo/Asia/Ulaanbaatar,sha256=qUkXRsTc_u7B90JxULSu7yzKbGtGfKcfEFIasGPC2ec,877
345
+ pytz/zoneinfo/Asia/Ulan_Bator,sha256=qUkXRsTc_u7B90JxULSu7yzKbGtGfKcfEFIasGPC2ec,877
346
+ pytz/zoneinfo/Asia/Urumqi,sha256=F1ZOdZZDsVHwDJinksR-hjcqPzqOljvdreZIWFulJxY,151
347
+ pytz/zoneinfo/Asia/Ust-Nera,sha256=zsG8kgnw0Fcs5N2WwNTVmvWkTlpwf7Oo8y68HcXjYyw,1238
348
+ pytz/zoneinfo/Asia/Vientiane,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185
349
+ pytz/zoneinfo/Asia/Vladivostok,sha256=XMQLMh5SPbI6C4R3UO4KhbnG4hWVkHNedzCQeqxFk6A,1194
350
+ pytz/zoneinfo/Asia/Yakutsk,sha256=PPNrRGgg9jefOUM-6M8XqaIm-ElfmRZSWAtSGKLzNXQ,1193
351
+ pytz/zoneinfo/Asia/Yangon,sha256=_YHASq4Z5YcUILIdhEzg27CGLzarUHPDHs1Dj0QgNGM,254
352
+ pytz/zoneinfo/Asia/Yekaterinburg,sha256=4NyEW6Xjr4UsWPh63HIPI4G6GT_tVG1Xkgc2xbwGjzA,1229
353
+ pytz/zoneinfo/Asia/Yerevan,sha256=FM0pUA4NbTWBb_CsJ5KCLVrLoNmad7njBKqFrJBDoxE,1137
354
+ pytz/zoneinfo/Atlantic/Azores,sha256=NyNrE2YIwL9yVddpECcYWwci5JzrfjxiIXP7RP0MrL8,3498
355
+ pytz/zoneinfo/Atlantic/Bermuda,sha256=LNGKfMsnYvwImjTyzXrLhMOHHDu7qI67RbYNKvvI15I,2396
356
+ pytz/zoneinfo/Atlantic/Canary,sha256=ymK9ufqphvNjDK3hzikN4GfkcR3QeCBiPKyVc6FjlbA,1897
357
+ pytz/zoneinfo/Atlantic/Cape_Verde,sha256=o92pLdLFX_b9vUiq3rNpca4tupIO3dx9rNrnPcA8474,256
358
+ pytz/zoneinfo/Atlantic/Faeroe,sha256=NibdZPZtapnYR_myIZnMdTaSKGsOBGgujj0_T2NvAzs,1815
359
+ pytz/zoneinfo/Atlantic/Faroe,sha256=NibdZPZtapnYR_myIZnMdTaSKGsOBGgujj0_T2NvAzs,1815
360
+ pytz/zoneinfo/Atlantic/Jan_Mayen,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298
361
+ pytz/zoneinfo/Atlantic/Madeira,sha256=21Zcy0xRqDN3oY8jmjjO-LI7aC3G9mcS9ytaYg0g7ik,3503
362
+ pytz/zoneinfo/Atlantic/Reykjavik,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
363
+ pytz/zoneinfo/Atlantic/South_Georgia,sha256=I9SAcPPumy6Xf9P7dg2aE16oxwDIqyKFqinJTC-XsgM,150
364
+ pytz/zoneinfo/Atlantic/St_Helena,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
365
+ pytz/zoneinfo/Atlantic/Stanley,sha256=siEjXTAuTum_4XGtS98MBE34XW_5xgXShEX5OMnSFjo,1200
366
+ pytz/zoneinfo/Australia/ACT,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190
367
+ pytz/zoneinfo/Australia/Adelaide,sha256=ld2EbxU75oVgmPe703z-I6aqLg0Kmv62ZcCGzkT5R20,2208
368
+ pytz/zoneinfo/Australia/Brisbane,sha256=eW6Qzze2t0-speJmmvt1JMzbkSadIKdE84XHc7JUtGc,419
369
+ pytz/zoneinfo/Australia/Broken_Hill,sha256=3k_3ljTvS5GSfo7Xh6w71UgR3aAwYPBsnCJ-mlEYCqQ,2229
370
+ pytz/zoneinfo/Australia/Canberra,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190
371
+ pytz/zoneinfo/Australia/Currie,sha256=GLQSzgIfsWxOvmKOrhpfofWqINQf6h36NYy3mcq6gcg,2358
372
+ pytz/zoneinfo/Australia/Darwin,sha256=fn0IZhIW98FAnzLig-_GBtW5LA54jajdeeUzg4tCGvo,325
373
+ pytz/zoneinfo/Australia/Eucla,sha256=i1-XGG8I6E0dXIdWGF4DlkfDLWhiAxJ_3gMpt-nm_u4,456
374
+ pytz/zoneinfo/Australia/Hobart,sha256=GLQSzgIfsWxOvmKOrhpfofWqINQf6h36NYy3mcq6gcg,2358
375
+ pytz/zoneinfo/Australia/LHI,sha256=oyPFQzmRqWPrSXt9pNHQmEi_PvX11k2clknziOS6ud8,1846
376
+ pytz/zoneinfo/Australia/Lindeman,sha256=xM6Udx22oLNoLR1Y7GQhHOYov8nw3xQNqgc_NVQ2JK4,475
377
+ pytz/zoneinfo/Australia/Lord_Howe,sha256=oyPFQzmRqWPrSXt9pNHQmEi_PvX11k2clknziOS6ud8,1846
378
+ pytz/zoneinfo/Australia/Melbourne,sha256=lvx_MQcunMc6u2smIrl8X427bLsXvjkgpCSdjYCTNBM,2190
379
+ pytz/zoneinfo/Australia/NSW,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190
380
+ pytz/zoneinfo/Australia/North,sha256=fn0IZhIW98FAnzLig-_GBtW5LA54jajdeeUzg4tCGvo,325
381
+ pytz/zoneinfo/Australia/Perth,sha256=Al1DOUh4U_ofMUQSeVlzSyD3x7SUjP9dchSaBUGmeWg,446
382
+ pytz/zoneinfo/Australia/Queensland,sha256=eW6Qzze2t0-speJmmvt1JMzbkSadIKdE84XHc7JUtGc,419
383
+ pytz/zoneinfo/Australia/South,sha256=ld2EbxU75oVgmPe703z-I6aqLg0Kmv62ZcCGzkT5R20,2208
384
+ pytz/zoneinfo/Australia/Sydney,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190
385
+ pytz/zoneinfo/Australia/Tasmania,sha256=GLQSzgIfsWxOvmKOrhpfofWqINQf6h36NYy3mcq6gcg,2358
386
+ pytz/zoneinfo/Australia/Victoria,sha256=lvx_MQcunMc6u2smIrl8X427bLsXvjkgpCSdjYCTNBM,2190
387
+ pytz/zoneinfo/Australia/West,sha256=Al1DOUh4U_ofMUQSeVlzSyD3x7SUjP9dchSaBUGmeWg,446
388
+ pytz/zoneinfo/Australia/Yancowinna,sha256=3k_3ljTvS5GSfo7Xh6w71UgR3aAwYPBsnCJ-mlEYCqQ,2229
389
+ pytz/zoneinfo/Brazil/Acre,sha256=0gpJUl46hQbp0P6Xj1S0NArIWeAryuuDXjsldvB5GHE,614
390
+ pytz/zoneinfo/Brazil/DeNoronha,sha256=feeRAijQqKylZgqe84nKhsFLycT5zIBm7mLIvdyGw4w,702
391
+ pytz/zoneinfo/Brazil/East,sha256=BMBnRO4_4HjvO4t3njjrMGZr-ZPmegkvyvL8KPY6ZM4,1430
392
+ pytz/zoneinfo/Brazil/West,sha256=F6RLOOeOi9lymZiQmQ9pR8tFpPZ6EguNdPfOc6BhXDE,590
393
+ pytz/zoneinfo/CET,sha256=o4omkrM_IsITxooUo8krM921XfBdvRs9JhwGXGd-Ypg,2094
394
+ pytz/zoneinfo/CST6CDT,sha256=WGbtZ1FwjRX6Jeo_TCXKsfeDs4V9uhXGJfcnLJhk3s0,2310
395
+ pytz/zoneinfo/Canada/Atlantic,sha256=TZpmc5PwWoLfTfQoQ_b3U17BE2iVKSeNkR0Ho8mbTn8,3424
396
+ pytz/zoneinfo/Canada/Central,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868
397
+ pytz/zoneinfo/Canada/Eastern,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494
398
+ pytz/zoneinfo/Canada/Mountain,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332
399
+ pytz/zoneinfo/Canada/Newfoundland,sha256=r1-17uKv27eZ3JsVkw_DLZQbo6wvjuuVu7C2pDsmOgI,3655
400
+ pytz/zoneinfo/Canada/Pacific,sha256=sknKH0jSPWam-DHfM35qXs8Nam7d5TFlkUI9Sgxryyg,2892
401
+ pytz/zoneinfo/Canada/Saskatchewan,sha256=yjqT08pHbICYe83H8JmtaDBvCFqRv7Tfze3Y8xuXukw,980
402
+ pytz/zoneinfo/Canada/Yukon,sha256=TrR6PCnYG-mSClBMohqlP8qnYhXMUsydI-L-quXFxyM,1614
403
+ pytz/zoneinfo/Chile/Continental,sha256=0CDw13dCMUsoquMupoJgupkzAUNhDK6E0lVxURA7osA,2515
404
+ pytz/zoneinfo/Chile/EasterIsland,sha256=QbubBs_xQlvKweAnurhyHjIK4ji77Gh4G-usXul6XVM,2219
405
+ pytz/zoneinfo/Cuba,sha256=HUQeAuKBsEkI5SLZjqynXICOUVOajkKzKH5r-Ov5Odc,2416
406
+ pytz/zoneinfo/EET,sha256=gGVsW5-qnI7ty8vqVK1ADWhunrvAT8kUC79GUf-_7G8,1908
407
+ pytz/zoneinfo/EST,sha256=uKE_VPKfxGyYEsyqV_DdE2MW55vs_qUioOdIn5Goobc,114
408
+ pytz/zoneinfo/EST5EDT,sha256=fwzEMT1jgnY2dDjd0EqDl26_7LC-oF48Bd4ng5311H0,2310
409
+ pytz/zoneinfo/Egypt,sha256=Lft-GCLQhaSJm9VqUmsEFoHIS1Vhfa7pFJn9GZCpifs,2399
410
+ pytz/zoneinfo/Eire,sha256=QOjSocO1cihNo59vQkWxvIFPRSxE9apz0KARVx1czEM,3492
411
+ pytz/zoneinfo/Etc/GMT,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
412
+ pytz/zoneinfo/Etc/GMT+0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
413
+ pytz/zoneinfo/Etc/GMT+1,sha256=1Qzl2X9rQ_RXEf11yH09wQZCr_ph6UdFP7E0yu9s-IQ,116
414
+ pytz/zoneinfo/Etc/GMT+10,sha256=JEQyQyQlkC0o6ZTdeVjZhCIOh6cK5TF7H00Pkls-sUI,117
415
+ pytz/zoneinfo/Etc/GMT+11,sha256=tWvcvYMFCaE60nJVvDrrov7stJvs1KQYOyrhl3dzcUs,117
416
+ pytz/zoneinfo/Etc/GMT+12,sha256=b70HEhErq8IJmq8x7cOZy4eR__3fq5uHHpjvPBEHqMA,117
417
+ pytz/zoneinfo/Etc/GMT+2,sha256=T6Ep5zhslBKbYaECFUB6gUKh3iTZPyMoW1kjhonxrUo,116
418
+ pytz/zoneinfo/Etc/GMT+3,sha256=QGoYrE04bUJ-OzL37dt2MZT5FxWNLpJDPVXgJbstYZA,116
419
+ pytz/zoneinfo/Etc/GMT+4,sha256=RWrkNki-wV7X-coe0VvufBe6LrWVpkPJgia5QQYEnBo,116
420
+ pytz/zoneinfo/Etc/GMT+5,sha256=oRmeC41dgYXT-zzyZIRKXN9IvdL2Da5nTuwmG2_prIA,116
421
+ pytz/zoneinfo/Etc/GMT+6,sha256=d6dAnwiejyFI2n7AzFlFW0aFAT6zYNEjBIEG0uu0sbQ,116
422
+ pytz/zoneinfo/Etc/GMT+7,sha256=TqjYbzd0YHpx1wisFg08J19wTpg6ztJLLongZY_lozs,116
423
+ pytz/zoneinfo/Etc/GMT+8,sha256=th_8bIMmYgRPCesBrbmBhRr0jQO7whd70LiY9HfwJyk,116
424
+ pytz/zoneinfo/Etc/GMT+9,sha256=Qq5E6iUS7JMJIymT7YoqlI8MtqtVy0mr9t6zWFtWc9Y,116
425
+ pytz/zoneinfo/Etc/GMT-0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
426
+ pytz/zoneinfo/Etc/GMT-1,sha256=73F1eU8uAQGP3mcoB2q99CjfManGFHk3fefljp9pYC4,117
427
+ pytz/zoneinfo/Etc/GMT-10,sha256=fKWWNwLBOp1OkKjtc1w9LIXJR1mTTD-JdvYflRy1IrU,118
428
+ pytz/zoneinfo/Etc/GMT-11,sha256=D2S79n6psa9t9_2vj5wIrFpHH2OJLcCKP6vtwzFZINY,118
429
+ pytz/zoneinfo/Etc/GMT-12,sha256=me4V6lmWI8gSr8H7N41WAD0Eww1anh_EF34Qr9UoSnI,118
430
+ pytz/zoneinfo/Etc/GMT-13,sha256=xbmbG1BQA6Dlpa_iUwEGyJxW4a3t6lmawdPKAE8vbR8,118
431
+ pytz/zoneinfo/Etc/GMT-14,sha256=PpXoREBh02qFpvxVMj2pV9IAzSQvBE7XPvnN9qSZ-Kc,118
432
+ pytz/zoneinfo/Etc/GMT-2,sha256=ve6hWLdeuiLhqagaWLqMD6HNybS1chRwjudfTZ2bYBE,117
433
+ pytz/zoneinfo/Etc/GMT-3,sha256=N77jILanuLDVkLsdujXZSu-dsHiwN5MIpwh7fMUifso,117
434
+ pytz/zoneinfo/Etc/GMT-4,sha256=LSko5fVHqPl5zfwjGqkbMa_OFnvtpT6o_4xYxNz9n5o,117
435
+ pytz/zoneinfo/Etc/GMT-5,sha256=uLaSR5Mb18HRTsAA5SveY9PAJ97dO8QzIWqNXe3wZb4,117
436
+ pytz/zoneinfo/Etc/GMT-6,sha256=JSN-RUAphJ50fpIv7cYC6unrtrz9S1Wma-piDHlGe7c,117
437
+ pytz/zoneinfo/Etc/GMT-7,sha256=vVAOF8xU9T9ESnw68c0SFXpcvkoopaiwTR0zbefHHSU,117
438
+ pytz/zoneinfo/Etc/GMT-8,sha256=S7xFQbFMpiDZy4v5L4D9fCrjRIzzoLC5p8Se23xi7us,117
439
+ pytz/zoneinfo/Etc/GMT-9,sha256=I5vHNmUK-Yyg_S1skFN44VGVzBgktjFgVQiDIKO4aMI,117
440
+ pytz/zoneinfo/Etc/GMT0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
441
+ pytz/zoneinfo/Etc/Greenwich,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
442
+ pytz/zoneinfo/Etc/UCT,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114
443
+ pytz/zoneinfo/Etc/UTC,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114
444
+ pytz/zoneinfo/Etc/Universal,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114
445
+ pytz/zoneinfo/Etc/Zulu,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114
446
+ pytz/zoneinfo/Europe/Amsterdam,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933
447
+ pytz/zoneinfo/Europe/Andorra,sha256=gTB5jCQmvIw3JJi1_vAcOYuhtzPBR6RXUx9gVV6p6ug,1742
448
+ pytz/zoneinfo/Europe/Astrakhan,sha256=ZeGDZjwVVRoeR-J642zEnN26BPL58ViTJLbwnk7pLXk,1151
449
+ pytz/zoneinfo/Europe/Athens,sha256=XDY-FBUddRyQHN8GxQLZ4awjuOlWlzlUdjv7OdXFNzA,2262
450
+ pytz/zoneinfo/Europe/Belfast,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664
451
+ pytz/zoneinfo/Europe/Belgrade,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920
452
+ pytz/zoneinfo/Europe/Berlin,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298
453
+ pytz/zoneinfo/Europe/Bratislava,sha256=G9fdhUXmzx651BnyZ6V7AOYIV9EV5aMJMm44eJaLLZw,2301
454
+ pytz/zoneinfo/Europe/Brussels,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933
455
+ pytz/zoneinfo/Europe/Bucharest,sha256=nfg6-bU2D6DMEWb9EMIBR5kxnNsbDSx0UKfHH_ZzqFc,2184
456
+ pytz/zoneinfo/Europe/Budapest,sha256=lNwqxWciBvw9ei81VQwIKHbC_ZDJjpgHU6HFg4wCUkY,2368
457
+ pytz/zoneinfo/Europe/Busingen,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909
458
+ pytz/zoneinfo/Europe/Chisinau,sha256=p1J_rqFE13pL8cpBRrEFe-teCI8f0fKK4uTUy_4diF4,2390
459
+ pytz/zoneinfo/Europe/Copenhagen,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298
460
+ pytz/zoneinfo/Europe/Dublin,sha256=QOjSocO1cihNo59vQkWxvIFPRSxE9apz0KARVx1czEM,3492
461
+ pytz/zoneinfo/Europe/Gibraltar,sha256=a87WpaBlvxI4gAU9OpQOkN8VUJbirVWYf-VfFLTIoS4,3068
462
+ pytz/zoneinfo/Europe/Guernsey,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664
463
+ pytz/zoneinfo/Europe/Helsinki,sha256=GEkB7LsVhmegt7YuuWheCDvDGC7b7Nw9bTdDGS9qkJc,1900
464
+ pytz/zoneinfo/Europe/Isle_of_Man,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664
465
+ pytz/zoneinfo/Europe/Istanbul,sha256=Jk4wjndDta_uLWc8W1dWdjbavJJbsL5ROTmZboVnGKU,1933
466
+ pytz/zoneinfo/Europe/Jersey,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664
467
+ pytz/zoneinfo/Europe/Kaliningrad,sha256=s7GXSe1YvMcs7AiUhHNTA6I4nAOQn_Kmz_ZqJYO-LMM,1493
468
+ pytz/zoneinfo/Europe/Kiev,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120
469
+ pytz/zoneinfo/Europe/Kirov,sha256=P7T2Zf5Eo6o4L4Dbg_BfiFjUgTj0dQXlrwY-QZ1eBVk,1185
470
+ pytz/zoneinfo/Europe/Kyiv,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120
471
+ pytz/zoneinfo/Europe/Lisbon,sha256=mpUpxGexMhbOBImDLSQs5-GAk7pm7tg4qYW044Kkle0,3497
472
+ pytz/zoneinfo/Europe/Ljubljana,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920
473
+ pytz/zoneinfo/Europe/London,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664
474
+ pytz/zoneinfo/Europe/Luxembourg,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933
475
+ pytz/zoneinfo/Europe/Madrid,sha256=mkLX03rW3t0tmzKBIPe_noUvaFDErwC6_5ZPZZsWHOo,2614
476
+ pytz/zoneinfo/Europe/Malta,sha256=EhKcbPL47765tWAiQ57cusaK2TaIQqZCgtJoEZs3Ud0,2620
477
+ pytz/zoneinfo/Europe/Mariehamn,sha256=GEkB7LsVhmegt7YuuWheCDvDGC7b7Nw9bTdDGS9qkJc,1900
478
+ pytz/zoneinfo/Europe/Minsk,sha256=KgPm0fHycntgd3xbTmmDl4O13Xh_9e2zUnd8XFSU29o,1307
479
+ pytz/zoneinfo/Europe/Monaco,sha256=q3ehSIot1GZ6TyMHIjbg0oRf4ghAXuwbSDSYVim6evg,2962
480
+ pytz/zoneinfo/Europe/Moscow,sha256=KmkofRcj6T8Ph28PJChm8JVp13uRvef6TZ0GuPzUiDw,1535
481
+ pytz/zoneinfo/Europe/Nicosia,sha256=0Unm0IFT7HyGeQ7F3vTa_-klfysCgrulqFO6BD1plZU,2002
482
+ pytz/zoneinfo/Europe/Oslo,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298
483
+ pytz/zoneinfo/Europe/Paris,sha256=q3ehSIot1GZ6TyMHIjbg0oRf4ghAXuwbSDSYVim6evg,2962
484
+ pytz/zoneinfo/Europe/Podgorica,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920
485
+ pytz/zoneinfo/Europe/Prague,sha256=G9fdhUXmzx651BnyZ6V7AOYIV9EV5aMJMm44eJaLLZw,2301
486
+ pytz/zoneinfo/Europe/Riga,sha256=hJ2_0m1taW9IuA-hMyP5n-WX7YOrR0heKszJhgljRWk,2198
487
+ pytz/zoneinfo/Europe/Rome,sha256=1a3oLMSiMpSbh9QxV8hLLDVbZqash89iUO1urYC1AY8,2641
488
+ pytz/zoneinfo/Europe/Samara,sha256=nXL0IxbT6qu10CNuaDHxx4W1OaAnaaKTtIJ9N9URMoU,1201
489
+ pytz/zoneinfo/Europe/San_Marino,sha256=1a3oLMSiMpSbh9QxV8hLLDVbZqash89iUO1urYC1AY8,2641
490
+ pytz/zoneinfo/Europe/Sarajevo,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920
491
+ pytz/zoneinfo/Europe/Saratov,sha256=ygwjvXN13TgaWxjg6ysWEnHWNxwrVtkEbrk8t9bzVVw,1169
492
+ pytz/zoneinfo/Europe/Simferopol,sha256=tzl7xdNVSZprNCul4YE5LSpoR9JoujmOq8VbbB8wHic,1469
493
+ pytz/zoneinfo/Europe/Skopje,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920
494
+ pytz/zoneinfo/Europe/Sofia,sha256=hCQKXfMNrnA5xHNw_uzTjKzVw4-Bvsq5oGO4yUCv5tY,2077
495
+ pytz/zoneinfo/Europe/Stockholm,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298
496
+ pytz/zoneinfo/Europe/Tallinn,sha256=4a6JC0aIpMzqIV7O35zoG0LLJwkQq5AoXZ2ivkic6-w,2148
497
+ pytz/zoneinfo/Europe/Tirane,sha256=ztlZyCS9WCXeVW8nBun3Tyi5HUY0EtFbiBbEc1gucuw,2084
498
+ pytz/zoneinfo/Europe/Tiraspol,sha256=p1J_rqFE13pL8cpBRrEFe-teCI8f0fKK4uTUy_4diF4,2390
499
+ pytz/zoneinfo/Europe/Ulyanovsk,sha256=c8Ad5p7CKj_1cCA7lVRpcPqbQXGYaX83cuu6uIFx-Bg,1253
500
+ pytz/zoneinfo/Europe/Uzhgorod,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120
501
+ pytz/zoneinfo/Europe/Vaduz,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909
502
+ pytz/zoneinfo/Europe/Vatican,sha256=1a3oLMSiMpSbh9QxV8hLLDVbZqash89iUO1urYC1AY8,2641
503
+ pytz/zoneinfo/Europe/Vienna,sha256=ZmI3kADE6bnrJEccqh73XXBY36L1G4DkpiTQImtNrUk,2200
504
+ pytz/zoneinfo/Europe/Vilnius,sha256=UFzRX3orCTB8d9IzlxJPy5eUA2oBPuCu1UJl-2D7C3U,2162
505
+ pytz/zoneinfo/Europe/Volgograd,sha256=RgFvt7mzZ-TtIKL9BVHmoNZLIeLIuiDdXeY10g2_vks,1193
506
+ pytz/zoneinfo/Europe/Warsaw,sha256=TiLDPbeVF0ckgLVEkaSeDaKZ8wctdJDOl_HE_Wd5rKs,2654
507
+ pytz/zoneinfo/Europe/Zagreb,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920
508
+ pytz/zoneinfo/Europe/Zaporozhye,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120
509
+ pytz/zoneinfo/Europe/Zurich,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909
510
+ pytz/zoneinfo/Factory,sha256=aFFlKx93HXoJoF4SSuTlD8cZtJA-ne5oKzAa6eX2V4k,116
511
+ pytz/zoneinfo/GB,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664
512
+ pytz/zoneinfo/GB-Eire,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664
513
+ pytz/zoneinfo/GMT,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
514
+ pytz/zoneinfo/GMT+0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
515
+ pytz/zoneinfo/GMT-0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
516
+ pytz/zoneinfo/GMT0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
517
+ pytz/zoneinfo/Greenwich,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114
518
+ pytz/zoneinfo/HST,sha256=1YkCncvgL9Z5CmUo4Vk8VbQmgA7ZAQ0PtE37j1yOli8,115
519
+ pytz/zoneinfo/Hongkong,sha256=al_O4kPlq5JpgkLYjEaZzrcgiiLul9NC0R5B69JVWhc,1233
520
+ pytz/zoneinfo/Iceland,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148
521
+ pytz/zoneinfo/Indian/Antananarivo,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
522
+ pytz/zoneinfo/Indian/Chagos,sha256=2errXzKdFIcpU0L-XRhSHxhNabIzbI5lXV3Pq6lt40Y,185
523
+ pytz/zoneinfo/Indian/Christmas,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185
524
+ pytz/zoneinfo/Indian/Cocos,sha256=_YHASq4Z5YcUILIdhEzg27CGLzarUHPDHs1Dj0QgNGM,254
525
+ pytz/zoneinfo/Indian/Comoro,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
526
+ pytz/zoneinfo/Indian/Kerguelen,sha256=F73ffVfBoUoHre0-DwsiQrYJcLpPOW-JJGk3n88lM5U,185
527
+ pytz/zoneinfo/Indian/Mahe,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151
528
+ pytz/zoneinfo/Indian/Maldives,sha256=F73ffVfBoUoHre0-DwsiQrYJcLpPOW-JJGk3n88lM5U,185
529
+ pytz/zoneinfo/Indian/Mauritius,sha256=Znqrc1chimlciJsYBOl0NvIHnrNdCxncGxWczq1PBeI,227
530
+ pytz/zoneinfo/Indian/Mayotte,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265
531
+ pytz/zoneinfo/Indian/Reunion,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151
532
+ pytz/zoneinfo/Iran,sha256=LQMch2TMA4wI23SQzoIrlZh0_KceXQegurwxCZ5YDlY,1248
533
+ pytz/zoneinfo/Israel,sha256=JUuWQmW5Tha0pJjw61Q5aN7CX0z4D7ops9OOSnda6Dc,2388
534
+ pytz/zoneinfo/Jamaica,sha256=wlagieUPRf5-beie-h7QsONbNzjGsm8vMs8uf28pw28,482
535
+ pytz/zoneinfo/Japan,sha256=oCueZgRNxcNcX3ZGdif9y6Su4cyVhga4XHdwlcrYLOs,309
536
+ pytz/zoneinfo/Kwajalein,sha256=TmZ_0f-ySQ-saBAlRXV0f49Itwne51VBXn6rWcrWqHQ,302
537
+ pytz/zoneinfo/Libya,sha256=W1dptGD70T7ppGoo0fczFQeDiIp0nultLNPV66MwB2c,625
538
+ pytz/zoneinfo/MET,sha256=i3CKSuP4N_PAj7o-Cbk8zPEdFs0CWWBCAfg2JXDx5V8,2094
539
+ pytz/zoneinfo/MST,sha256=6IQwvtT12Bz1pTiqFuoVxNY-4ViS7ZrYHo5nPWwzKPw,114
540
+ pytz/zoneinfo/MST7MDT,sha256=910Ek32FKoSyZWY_H19VHaVvqb-JsvnWTOOHvhrKsE0,2310
541
+ pytz/zoneinfo/Mexico/BajaNorte,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374
542
+ pytz/zoneinfo/Mexico/BajaSur,sha256=RQQVwlEVHRp2X-c_0hJ46y54abTlqUuLkyrUUicyc5g,1128
543
+ pytz/zoneinfo/Mexico/General,sha256=A5MlfDUZ4O1-jMTRt0WPem7qqcW0Nrslls1hlc8C4-Q,1222
544
+ pytz/zoneinfo/NZ,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437
545
+ pytz/zoneinfo/NZ-CHAT,sha256=xhexVc5lfJ_qAv2d3HrII6lfRSxKZYBAjY2zpYkCGE8,2054
546
+ pytz/zoneinfo/Navajo,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460
547
+ pytz/zoneinfo/PRC,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561
548
+ pytz/zoneinfo/PST8PDT,sha256=Q7TCLkE69a6g7mPoPAkqhg-0dStyiAC0jVlM72KG_R8,2310
549
+ pytz/zoneinfo/Pacific/Apia,sha256=M3QKsp75Q7H1X3aeE_9ZqQli9aEkNCCQctZQ5sEKu00,598
550
+ pytz/zoneinfo/Pacific/Auckland,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437
551
+ pytz/zoneinfo/Pacific/Bougainville,sha256=hWE86eXnNx-vABbp7-YSIqWyecHPMIWLftVloAoPhL8,254
552
+ pytz/zoneinfo/Pacific/Chatham,sha256=xhexVc5lfJ_qAv2d3HrII6lfRSxKZYBAjY2zpYkCGE8,2054
553
+ pytz/zoneinfo/Pacific/Chuuk,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172
554
+ pytz/zoneinfo/Pacific/Easter,sha256=QbubBs_xQlvKweAnurhyHjIK4ji77Gh4G-usXul6XVM,2219
555
+ pytz/zoneinfo/Pacific/Efate,sha256=oSxNcQYx5-1FU2_yHzHI-hT-dMJcPxzy4XmdI1UxXAo,524
556
+ pytz/zoneinfo/Pacific/Enderbury,sha256=HNTAKrsH_R2W3QRlKcmNld5KcXdP0ygXCjEovc1i-6Q,220
557
+ pytz/zoneinfo/Pacific/Fakaofo,sha256=qOodpTMKjztvZIXVLe_f_kZ6WcHl9fCLE9ZsyvdFKLI,186
558
+ pytz/zoneinfo/Pacific/Fiji,sha256=jB5FbOsCnHVQQ2ohPiWEQUPhG6JybB3Nog3qT6WJQ0I,564
559
+ pytz/zoneinfo/Pacific/Funafuti,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152
560
+ pytz/zoneinfo/Pacific/Galapagos,sha256=_GJUYOjSiIjoNBO2qdq23isLMJ4NCVk3DKIRGeDc8BA,224
561
+ pytz/zoneinfo/Pacific/Gambier,sha256=gAS7gr1HH_re0uYnL6eWo5KGJ-B5QaiM8mV2cY5mQxE,150
562
+ pytz/zoneinfo/Pacific/Guadalcanal,sha256=M4kTWqaSQaV1AMhyLSvmwoBJF7X9icrILbvQJwp940g,152
563
+ pytz/zoneinfo/Pacific/Guam,sha256=Ex9znmf6rNfGze6gNpZJCMr1TT4rkl2SnrhecrdJufI,494
564
+ pytz/zoneinfo/Pacific/Honolulu,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329
565
+ pytz/zoneinfo/Pacific/Johnston,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329
566
+ pytz/zoneinfo/Pacific/Kanton,sha256=HNTAKrsH_R2W3QRlKcmNld5KcXdP0ygXCjEovc1i-6Q,220
567
+ pytz/zoneinfo/Pacific/Kiritimati,sha256=hYk1Ooz-Lj1PuZCbNV2WJIvOLtCwSwq2u63cb1Z-3NQ,224
568
+ pytz/zoneinfo/Pacific/Kosrae,sha256=Q0jrb4zeDrd61bU4V8TqjMc0Iep8rWZyZqJ0uqsunxs,337
569
+ pytz/zoneinfo/Pacific/Kwajalein,sha256=TmZ_0f-ySQ-saBAlRXV0f49Itwne51VBXn6rWcrWqHQ,302
570
+ pytz/zoneinfo/Pacific/Majuro,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152
571
+ pytz/zoneinfo/Pacific/Marquesas,sha256=FTxPJTWtk48LVb3N2U64KLpLsmvu0DQBubTCg-dvyGM,159
572
+ pytz/zoneinfo/Pacific/Midway,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175
573
+ pytz/zoneinfo/Pacific/Nauru,sha256=9ASKgLHB-8nsTEK1ApzfTH0yQtbNAmGX-JI7uHZiqnA,238
574
+ pytz/zoneinfo/Pacific/Niue,sha256=OllXxukncR7a-SMmdFox5az1xpIPMhbahQhtObmpuDM,189
575
+ pytz/zoneinfo/Pacific/Norfolk,sha256=DMdX1Bm18lzNuiCWzwfeHUMRGXPS8v5AWnh-_EX_AZw,866
576
+ pytz/zoneinfo/Pacific/Noumea,sha256=tkHxxnxsXTOqz3YzWi0mkhTCIONzg-W7EpSRMdPjKdQ,290
577
+ pytz/zoneinfo/Pacific/Pago_Pago,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175
578
+ pytz/zoneinfo/Pacific/Palau,sha256=aN2HbT0reqwKrtLKDK9M2zb0d0ikdNlTrrntVxdH66o,166
579
+ pytz/zoneinfo/Pacific/Pitcairn,sha256=U4jAUuvsRNoy8XrPa16YpcXCcqHJY0u6JvCNgPEWO1c,188
580
+ pytz/zoneinfo/Pacific/Pohnpei,sha256=M4kTWqaSQaV1AMhyLSvmwoBJF7X9icrILbvQJwp940g,152
581
+ pytz/zoneinfo/Pacific/Ponape,sha256=M4kTWqaSQaV1AMhyLSvmwoBJF7X9icrILbvQJwp940g,152
582
+ pytz/zoneinfo/Pacific/Port_Moresby,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172
583
+ pytz/zoneinfo/Pacific/Rarotonga,sha256=wPEsoXbyDnuhfzkgLvUqhSzrMx_FD42uAPluSPMh3Bc,589
584
+ pytz/zoneinfo/Pacific/Saipan,sha256=Ex9znmf6rNfGze6gNpZJCMr1TT4rkl2SnrhecrdJufI,494
585
+ pytz/zoneinfo/Pacific/Samoa,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175
586
+ pytz/zoneinfo/Pacific/Tahiti,sha256=BRff9G3E-iWKhOWR1Wu02Z0iMgjrwDXV-XNrqItXdTY,151
587
+ pytz/zoneinfo/Pacific/Tarawa,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152
588
+ pytz/zoneinfo/Pacific/Tongatapu,sha256=OppBZqTAZib9HY7U9AC-JavO7m6NxPGUtUfPQAl9oBY,358
589
+ pytz/zoneinfo/Pacific/Truk,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172
590
+ pytz/zoneinfo/Pacific/Wake,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152
591
+ pytz/zoneinfo/Pacific/Wallis,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152
592
+ pytz/zoneinfo/Pacific/Yap,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172
593
+ pytz/zoneinfo/Poland,sha256=TiLDPbeVF0ckgLVEkaSeDaKZ8wctdJDOl_HE_Wd5rKs,2654
594
+ pytz/zoneinfo/Portugal,sha256=mpUpxGexMhbOBImDLSQs5-GAk7pm7tg4qYW044Kkle0,3497
595
+ pytz/zoneinfo/ROC,sha256=DMmQwOpPql25ue3Nf8vAKKT4em06D1Z9rHbLIitxixk,761
596
+ pytz/zoneinfo/ROK,sha256=LI9LsV3XcJC0l-KoQf8zI-y7rk-du57erS-N2Ptdi7Q,617
597
+ pytz/zoneinfo/Singapore,sha256=XmeVImeqcJ8hJzm7TjAti1nWJAxawOqq7jIzDnHX2hI,401
598
+ pytz/zoneinfo/Turkey,sha256=Jk4wjndDta_uLWc8W1dWdjbavJJbsL5ROTmZboVnGKU,1933
599
+ pytz/zoneinfo/UCT,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114
600
+ pytz/zoneinfo/US/Alaska,sha256=oZA1NSPS2BWdymYpnCHFO8BlYVS-ll5KLg2Ez9CbETs,2371
601
+ pytz/zoneinfo/US/Aleutian,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356
602
+ pytz/zoneinfo/US/Arizona,sha256=illz0sYuLL8lIPK0Tkou6dL0Vck_D0W_3rRTOvFYRmQ,360
603
+ pytz/zoneinfo/US/Central,sha256=_roybr6I6sIAF6cYdIxGxoRpoef153Fty48dQ6bm9oY,3592
604
+ pytz/zoneinfo/US/East-Indiana,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682
605
+ pytz/zoneinfo/US/Eastern,sha256=6e0H177gx2qdRC0JHvHwFmj-58TyYBTAqGixn-bBipU,3552
606
+ pytz/zoneinfo/US/Hawaii,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329
607
+ pytz/zoneinfo/US/Indiana-Starke,sha256=CsvZ5BKw2qVav3x_F8CU9taJdDk7jX41Cfsqms6jXV8,2444
608
+ pytz/zoneinfo/US/Michigan,sha256=hecz8yqY2Cj5B61G3gLZdAVZvRgK9l0P90c_gN-uD5g,2230
609
+ pytz/zoneinfo/US/Mountain,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460
610
+ pytz/zoneinfo/US/Pacific,sha256=aJd7ua1tGG_vxser02AQpm4wAI3LLTdgh6QcSYYecmg,2852
611
+ pytz/zoneinfo/US/Samoa,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175
612
+ pytz/zoneinfo/UTC,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114
613
+ pytz/zoneinfo/Universal,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114
614
+ pytz/zoneinfo/W-SU,sha256=KmkofRcj6T8Ph28PJChm8JVp13uRvef6TZ0GuPzUiDw,1535
615
+ pytz/zoneinfo/WET,sha256=Sc0l03EfVs_aIi17I4KyZJFkwiAHat5BgpjuuFDhgQ0,1905
616
+ pytz/zoneinfo/Zulu,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114
617
+ pytz/zoneinfo/iso3166.tab,sha256=oBpdFY8x1GrY5vjMKgbGQYEGgqk5fUYDIPaNVCG2XnE,4791
618
+ pytz/zoneinfo/leapseconds,sha256=fjC39Eu3wB6I4g7x_VL7HzvDVbiKbLUjfQAEgo7442I,3257
619
+ pytz/zoneinfo/tzdata.zi,sha256=8PWtzwDNZfLJU8Wa6Ktci7tg9V5mpvh26Vb0P8jBU0w,109390
620
+ pytz/zoneinfo/zone.tab,sha256=qSLfeCWE3tsCDIIQbr71DMkmCUXTIUEgNZgfN-60d-Y,18846
621
+ pytz/zoneinfo/zone1970.tab,sha256=FJErvL9wggoFluO2WceYn8ZQ-nA9A073Lub1x2Pzg40,17582
622
+ pytz/zoneinfo/zonenow.tab,sha256=YoPd7huhHsKlJliOO-eMIBE5-bHBKpbfjkSJQFAto6I,8311
llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
6
+
llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pytz
llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/zip-safe ADDED
@@ -0,0 +1 @@
 
 
1
+
llmeval-env/lib/python3.10/site-packages/regex/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .regex import *
2
+ from . import regex
3
+ __all__ = regex.__all__
llmeval-env/lib/python3.10/site-packages/regex/regex.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Secret Labs' Regular Expression Engine
3
+ #
4
+ # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
5
+ #
6
+ # This version of the SRE library can be redistributed under CNRI's
7
+ # Python 1.6 license. For any other use, please contact Secret Labs
8
+ # AB ([email protected]).
9
+ #
10
+ # Portions of this engine have been developed in cooperation with
11
+ # CNRI. Hewlett-Packard provided funding for 1.6 integration and
12
+ # other compatibility work.
13
+ #
14
+ # 2010-01-16 mrab Python front-end re-written and extended
15
+
16
+ r"""Support for regular expressions (RE).
17
+
18
+ This module provides regular expression matching operations similar to those
19
+ found in Perl. It supports both 8-bit and Unicode strings; both the pattern and
20
+ the strings being processed can contain null bytes and characters outside the
21
+ US ASCII range.
22
+
23
+ Regular expressions can contain both special and ordinary characters. Most
24
+ ordinary characters, like "A", "a", or "0", are the simplest regular
25
+ expressions; they simply match themselves. You can concatenate ordinary
26
+ characters, so last matches the string 'last'.
27
+
28
+ There are a few differences between the old (legacy) behaviour and the new
29
+ (enhanced) behaviour, which are indicated by VERSION0 or VERSION1.
30
+
31
+ The special characters are:
32
+ "." Matches any character except a newline.
33
+ "^" Matches the start of the string.
34
+ "$" Matches the end of the string or just before the
35
+ newline at the end of the string.
36
+ "*" Matches 0 or more (greedy) repetitions of the preceding
37
+ RE. Greedy means that it will match as many repetitions
38
+ as possible.
39
+ "+" Matches 1 or more (greedy) repetitions of the preceding
40
+ RE.
41
+ "?" Matches 0 or 1 (greedy) of the preceding RE.
42
+ *?,+?,?? Non-greedy versions of the previous three special
43
+ characters.
44
+ *+,++,?+ Possessive versions of the previous three special
45
+ characters.
46
+ {m,n} Matches from m to n repetitions of the preceding RE.
47
+ {m,n}? Non-greedy version of the above.
48
+ {m,n}+ Possessive version of the above.
49
+ {...} Fuzzy matching constraints.
50
+ "\\" Either escapes special characters or signals a special
51
+ sequence.
52
+ [...] Indicates a set of characters. A "^" as the first
53
+ character indicates a complementing set.
54
+ "|" A|B, creates an RE that will match either A or B.
55
+ (...) Matches the RE inside the parentheses. The contents are
56
+ captured and can be retrieved or matched later in the
57
+ string.
58
+ (?flags-flags) VERSION1: Sets/clears the flags for the remainder of
59
+ the group or pattern; VERSION0: Sets the flags for the
60
+ entire pattern.
61
+ (?:...) Non-capturing version of regular parentheses.
62
+ (?>...) Atomic non-capturing version of regular parentheses.
63
+ (?flags-flags:...) Non-capturing version of regular parentheses with local
64
+ flags.
65
+ (?P<name>...) The substring matched by the group is accessible by
66
+ name.
67
+ (?<name>...) The substring matched by the group is accessible by
68
+ name.
69
+ (?P=name) Matches the text matched earlier by the group named
70
+ name.
71
+ (?#...) A comment; ignored.
72
+ (?=...) Matches if ... matches next, but doesn't consume the
73
+ string.
74
+ (?!...) Matches if ... doesn't match next.
75
+ (?<=...) Matches if preceded by ....
76
+ (?<!...) Matches if not preceded by ....
77
+ (?(id)yes|no) Matches yes pattern if group id matched, the (optional)
78
+ no pattern otherwise.
79
+ (?(DEFINE)...) If there's no group called "DEFINE", then ... will be
80
+ ignored, but any group definitions will be available.
81
+ (?|...|...) (?|A|B), creates an RE that will match either A or B,
82
+ but reuses capture group numbers across the
83
+ alternatives.
84
+ (*FAIL) Forces matching to fail, which means immediate
85
+ backtracking.
86
+ (*F) Abbreviation for (*FAIL).
87
+ (*PRUNE) Discards the current backtracking information. Its
88
+ effect doesn't extend outside an atomic group or a
89
+ lookaround.
90
+ (*SKIP) Similar to (*PRUNE), except that it also sets where in
91
+ the text the next attempt at matching the entire
92
+ pattern will start. Its effect doesn't extend outside
93
+ an atomic group or a lookaround.
94
+
95
+ The fuzzy matching constraints are: "i" to permit insertions, "d" to permit
96
+ deletions, "s" to permit substitutions, "e" to permit any of these. Limits are
97
+ optional with "<=" and "<". If any type of error is provided then any type not
98
+ provided is not permitted.
99
+
100
+ A cost equation may be provided.
101
+
102
+ Examples:
103
+ (?:fuzzy){i<=2}
104
+ (?:fuzzy){i<=1,s<=2,d<=1,1i+1s+1d<3}
105
+
106
+ VERSION1: Set operators are supported, and a set can include nested sets. The
107
+ set operators, in order of increasing precedence, are:
108
+ || Set union ("x||y" means "x or y").
109
+ ~~ (double tilde) Symmetric set difference ("x~~y" means "x or y, but not
110
+ both").
111
+ && Set intersection ("x&&y" means "x and y").
112
+ -- (double dash) Set difference ("x--y" means "x but not y").
113
+
114
+ Implicit union, ie, simple juxtaposition like in [ab], has the highest
115
+ precedence.
116
+
117
+ VERSION0 and VERSION1:
118
+ The special sequences consist of "\\" and a character from the list below. If
119
+ the ordinary character is not on the list, then the resulting RE will match the
120
+ second character.
121
+ \number Matches the contents of the group of the same number if
122
+ number is no more than 2 digits, otherwise the character
123
+ with the 3-digit octal code.
124
+ \a Matches the bell character.
125
+ \A Matches only at the start of the string.
126
+ \b Matches the empty string, but only at the start or end of a
127
+ word.
128
+ \B Matches the empty string, but not at the start or end of a
129
+ word.
130
+ \d Matches any decimal digit; equivalent to the set [0-9] when
131
+ matching a bytestring or a Unicode string with the ASCII
132
+ flag, or the whole range of Unicode digits when matching a
133
+ Unicode string.
134
+ \D Matches any non-digit character; equivalent to [^\d].
135
+ \f Matches the formfeed character.
136
+ \g<name> Matches the text matched by the group named name.
137
+ \G Matches the empty string, but only at the position where
138
+ the search started.
139
+ \h Matches horizontal whitespace.
140
+ \K Keeps only what follows for the entire match.
141
+ \L<name> Named list. The list is provided as a keyword argument.
142
+ \m Matches the empty string, but only at the start of a word.
143
+ \M Matches the empty string, but only at the end of a word.
144
+ \n Matches the newline character.
145
+ \N{name} Matches the named character.
146
+ \p{name=value} Matches the character if its property has the specified
147
+ value.
148
+ \P{name=value} Matches the character if its property hasn't the specified
149
+ value.
150
+ \r Matches the carriage-return character.
151
+ \s Matches any whitespace character; equivalent to
152
+ [ \t\n\r\f\v].
153
+ \S Matches any non-whitespace character; equivalent to [^\s].
154
+ \t Matches the tab character.
155
+ \uXXXX Matches the Unicode codepoint with 4-digit hex code XXXX.
156
+ \UXXXXXXXX Matches the Unicode codepoint with 8-digit hex code
157
+ XXXXXXXX.
158
+ \v Matches the vertical tab character.
159
+ \w Matches any alphanumeric character; equivalent to
160
+ [a-zA-Z0-9_] when matching a bytestring or a Unicode string
161
+ with the ASCII flag, or the whole range of Unicode
162
+ alphanumeric characters (letters plus digits plus
163
+ underscore) when matching a Unicode string. With LOCALE, it
164
+ will match the set [0-9_] plus characters defined as
165
+ letters for the current locale.
166
+ \W Matches the complement of \w; equivalent to [^\w].
167
+ \xXX Matches the character with 2-digit hex code XX.
168
+ \X Matches a grapheme.
169
+ \Z Matches only at the end of the string.
170
+ \\ Matches a literal backslash.
171
+
172
+ This module exports the following functions:
173
+ match Match a regular expression pattern at the beginning of a string.
174
+ fullmatch Match a regular expression pattern against all of a string.
175
+ search Search a string for the presence of a pattern.
176
+ sub Substitute occurrences of a pattern found in a string using a
177
+ template string.
178
+ subf Substitute occurrences of a pattern found in a string using a
179
+ format string.
180
+ subn Same as sub, but also return the number of substitutions made.
181
+ subfn Same as subf, but also return the number of substitutions made.
182
+ split Split a string by the occurrences of a pattern. VERSION1: will
183
+ split at zero-width match; VERSION0: won't split at zero-width
184
+ match.
185
+ splititer Return an iterator yielding the parts of a split string.
186
+ findall Find all occurrences of a pattern in a string.
187
+ finditer Return an iterator yielding a match object for each match.
188
+ compile Compile a pattern into a Pattern object.
189
+ purge Clear the regular expression cache.
190
+ escape Backslash all non-alphanumerics or special characters in a
191
+ string.
192
+
193
+ Most of the functions support a concurrent parameter: if True, the GIL will be
194
+ released during matching, allowing other Python threads to run concurrently. If
195
+ the string changes during matching, the behaviour is undefined. This parameter
196
+ is not needed when working on the builtin (immutable) string classes.
197
+
198
+ Some of the functions in this module take flags as optional parameters. Most of
199
+ these flags can also be set within an RE:
200
+ A a ASCII Make \w, \W, \b, \B, \d, and \D match the
201
+ corresponding ASCII character categories. Default
202
+ when matching a bytestring.
203
+ B b BESTMATCH Find the best fuzzy match (default is first).
204
+ D DEBUG Print the parsed pattern.
205
+ E e ENHANCEMATCH Attempt to improve the fit after finding the first
206
+ fuzzy match.
207
+ F f FULLCASE Use full case-folding when performing
208
+ case-insensitive matching in Unicode.
209
+ I i IGNORECASE Perform case-insensitive matching.
210
+ L L LOCALE Make \w, \W, \b, \B, \d, and \D dependent on the
211
+ current locale. (One byte per character only.)
212
+ M m MULTILINE "^" matches the beginning of lines (after a newline)
213
+ as well as the string. "$" matches the end of lines
214
+ (before a newline) as well as the end of the string.
215
+ P p POSIX Perform POSIX-standard matching (leftmost longest).
216
+ R r REVERSE Searches backwards.
217
+ S s DOTALL "." matches any character at all, including the
218
+ newline.
219
+ U u UNICODE Make \w, \W, \b, \B, \d, and \D dependent on the
220
+ Unicode locale. Default when matching a Unicode
221
+ string.
222
+ V0 V0 VERSION0 Turn on the old legacy behaviour.
223
+ V1 V1 VERSION1 Turn on the new enhanced behaviour. This flag
224
+ includes the FULLCASE flag.
225
+ W w WORD Make \b and \B work with default Unicode word breaks
226
+ and make ".", "^" and "$" work with Unicode line
227
+ breaks.
228
+ X x VERBOSE Ignore whitespace and comments for nicer looking REs.
229
+
230
+ This module also defines an exception 'error'.
231
+
232
+ """
233
+
234
+ # Public symbols.
235
+ __all__ = ["cache_all", "compile", "DEFAULT_VERSION", "escape", "findall",
236
+ "finditer", "fullmatch", "match", "purge", "search", "split", "splititer",
237
+ "sub", "subf", "subfn", "subn", "template", "Scanner", "A", "ASCII", "B",
238
+ "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH", "S", "DOTALL", "F",
239
+ "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "P", "POSIX",
240
+ "R", "REVERSE", "T", "TEMPLATE", "U", "UNICODE", "V0", "VERSION0", "V1",
241
+ "VERSION1", "X", "VERBOSE", "W", "WORD", "error", "Regex", "__version__",
242
+ "__doc__", "RegexFlag"]
243
+
244
+ __version__ = "2.5.142"
245
+
246
+ # --------------------------------------------------------------------
247
+ # Public interface.
248
+
249
+ def match(pattern, string, flags=0, pos=None, endpos=None, partial=False,
250
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
251
+ """Try to apply the pattern at the start of the string, returning a match
252
+ object, or None if no match was found."""
253
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
254
+ return pat.match(string, pos, endpos, concurrent, partial, timeout)
255
+
256
+ def fullmatch(pattern, string, flags=0, pos=None, endpos=None, partial=False,
257
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
258
+ """Try to apply the pattern against all of the string, returning a match
259
+ object, or None if no match was found."""
260
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
261
+ return pat.fullmatch(string, pos, endpos, concurrent, partial, timeout)
262
+
263
+ def search(pattern, string, flags=0, pos=None, endpos=None, partial=False,
264
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
265
+ """Search through string looking for a match to the pattern, returning a
266
+ match object, or None if no match was found."""
267
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
268
+ return pat.search(string, pos, endpos, concurrent, partial, timeout)
269
+
270
+ def sub(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
271
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
272
+ """Return the string obtained by replacing the leftmost (or rightmost with a
273
+ reverse pattern) non-overlapping occurrences of the pattern in string by the
274
+ replacement repl. repl can be either a string or a callable; if a string,
275
+ backslash escapes in it are processed; if a callable, it's passed the match
276
+ object and must return a replacement string to be used."""
277
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
278
+ return pat.sub(repl, string, count, pos, endpos, concurrent, timeout)
279
+
280
+ def subf(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
281
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
282
+ """Return the string obtained by replacing the leftmost (or rightmost with a
283
+ reverse pattern) non-overlapping occurrences of the pattern in string by the
284
+ replacement format. format can be either a string or a callable; if a string,
285
+ it's treated as a format string; if a callable, it's passed the match object
286
+ and must return a replacement string to be used."""
287
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
288
+ return pat.subf(format, string, count, pos, endpos, concurrent, timeout)
289
+
290
+ def subn(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
291
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
292
+ """Return a 2-tuple containing (new_string, number). new_string is the string
293
+ obtained by replacing the leftmost (or rightmost with a reverse pattern)
294
+ non-overlapping occurrences of the pattern in the source string by the
295
+ replacement repl. number is the number of substitutions that were made. repl
296
+ can be either a string or a callable; if a string, backslash escapes in it
297
+ are processed; if a callable, it's passed the match object and must return a
298
+ replacement string to be used."""
299
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
300
+ return pat.subn(repl, string, count, pos, endpos, concurrent, timeout)
301
+
302
+ def subfn(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
303
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
304
+ """Return a 2-tuple containing (new_string, number). new_string is the string
305
+ obtained by replacing the leftmost (or rightmost with a reverse pattern)
306
+ non-overlapping occurrences of the pattern in the source string by the
307
+ replacement format. number is the number of substitutions that were made. format
308
+ can be either a string or a callable; if a string, it's treated as a format
309
+ string; if a callable, it's passed the match object and must return a
310
+ replacement string to be used."""
311
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
312
+ return pat.subfn(format, string, count, pos, endpos, concurrent, timeout)
313
+
314
+ def split(pattern, string, maxsplit=0, flags=0, concurrent=None, timeout=None,
315
+ ignore_unused=False, **kwargs):
316
+ """Split the source string by the occurrences of the pattern, returning a
317
+ list containing the resulting substrings. If capturing parentheses are used
318
+ in pattern, then the text of all groups in the pattern are also returned as
319
+ part of the resulting list. If maxsplit is nonzero, at most maxsplit splits
320
+ occur, and the remainder of the string is returned as the final element of
321
+ the list."""
322
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
323
+ return pat.split(string, maxsplit, concurrent, timeout)
324
+
325
+ def splititer(pattern, string, maxsplit=0, flags=0, concurrent=None,
326
+ timeout=None, ignore_unused=False, **kwargs):
327
+ "Return an iterator yielding the parts of a split string."
328
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
329
+ return pat.splititer(string, maxsplit, concurrent, timeout)
330
+
331
+ def findall(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
332
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
333
+ """Return a list of all matches in the string. The matches may be overlapped
334
+ if overlapped is True. If one or more groups are present in the pattern,
335
+ return a list of groups; this will be a list of tuples if the pattern has
336
+ more than one group. Empty matches are included in the result."""
337
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
338
+ return pat.findall(string, pos, endpos, overlapped, concurrent, timeout)
339
+
340
+ def finditer(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
341
+ partial=False, concurrent=None, timeout=None, ignore_unused=False, **kwargs):
342
+ """Return an iterator over all matches in the string. The matches may be
343
+ overlapped if overlapped is True. For each match, the iterator returns a
344
+ match object. Empty matches are included in the result."""
345
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
346
+ return pat.finditer(string, pos, endpos, overlapped, concurrent, partial,
347
+ timeout)
348
+
349
+ def compile(pattern, flags=0, ignore_unused=False, cache_pattern=None, **kwargs):
350
+ "Compile a regular expression pattern, returning a pattern object."
351
+ if cache_pattern is None:
352
+ cache_pattern = _cache_all
353
+ return _compile(pattern, flags, ignore_unused, kwargs, cache_pattern)
354
+
355
+ def purge():
356
+ "Clear the regular expression cache"
357
+ _cache.clear()
358
+ _locale_sensitive.clear()
359
+
360
+ # Whether to cache all patterns.
361
+ _cache_all = True
362
+
363
+ def cache_all(value=True):
364
+ """Sets whether to cache all patterns, even those are compiled explicitly.
365
+ Passing None has no effect, but returns the current setting."""
366
+ global _cache_all
367
+
368
+ if value is None:
369
+ return _cache_all
370
+
371
+ _cache_all = value
372
+
373
+ def template(pattern, flags=0):
374
+ "Compile a template pattern, returning a pattern object."
375
+ return _compile(pattern, flags | TEMPLATE, False, {}, False)
376
+
377
+ def escape(pattern, special_only=True, literal_spaces=False):
378
+ """Escape a string for use as a literal in a pattern. If special_only is
379
+ True, escape only special characters, else escape all non-alphanumeric
380
+ characters. If literal_spaces is True, don't escape spaces."""
381
+ # Convert it to Unicode.
382
+ if isinstance(pattern, bytes):
383
+ p = pattern.decode("latin-1")
384
+ else:
385
+ p = pattern
386
+
387
+ s = []
388
+ if special_only:
389
+ for c in p:
390
+ if c == " " and literal_spaces:
391
+ s.append(c)
392
+ elif c in _METACHARS or c.isspace():
393
+ s.append("\\")
394
+ s.append(c)
395
+ else:
396
+ s.append(c)
397
+ else:
398
+ for c in p:
399
+ if c == " " and literal_spaces:
400
+ s.append(c)
401
+ elif c in _ALNUM:
402
+ s.append(c)
403
+ else:
404
+ s.append("\\")
405
+ s.append(c)
406
+
407
+ r = "".join(s)
408
+ # Convert it back to bytes if necessary.
409
+ if isinstance(pattern, bytes):
410
+ r = r.encode("latin-1")
411
+
412
+ return r
413
+
414
+ # --------------------------------------------------------------------
415
+ # Internals.
416
+
417
+ import regex._regex_core as _regex_core
418
+ import regex._regex as _regex
419
+ from threading import RLock as _RLock
420
+ from locale import getpreferredencoding as _getpreferredencoding
421
+ from regex._regex_core import *
422
+ from regex._regex_core import (_ALL_VERSIONS, _ALL_ENCODINGS, _FirstSetError,
423
+ _UnscopedFlagSet, _check_group_features, _compile_firstset,
424
+ _compile_replacement, _flatten_code, _fold_case, _get_required_string,
425
+ _parse_pattern, _shrink_cache)
426
+ from regex._regex_core import (ALNUM as _ALNUM, Info as _Info, OP as _OP, Source
427
+ as _Source, Fuzzy as _Fuzzy)
428
+
429
+ # Version 0 is the old behaviour, compatible with the original 're' module.
430
+ # Version 1 is the new behaviour, which differs slightly.
431
+
432
+ DEFAULT_VERSION = VERSION0
433
+
434
+ _METACHARS = frozenset("()[]{}?*+|^$\\.-#&~")
435
+
436
+ _regex_core.DEFAULT_VERSION = DEFAULT_VERSION
437
+
438
+ # Caches for the patterns and replacements.
439
+ _cache = {}
440
+ _cache_lock = _RLock()
441
+ _named_args = {}
442
+ _replacement_cache = {}
443
+ _locale_sensitive = {}
444
+
445
+ # Maximum size of the cache.
446
+ _MAXCACHE = 500
447
+ _MAXREPCACHE = 500
448
+
449
+ def _compile(pattern, flags, ignore_unused, kwargs, cache_it):
450
+ "Compiles a regular expression to a PatternObject."
451
+
452
+ global DEFAULT_VERSION
453
+ try:
454
+ from regex import DEFAULT_VERSION
455
+ except ImportError:
456
+ pass
457
+
458
+ # We won't bother to cache the pattern if we're debugging.
459
+ if (flags & DEBUG) != 0:
460
+ cache_it = False
461
+
462
+ # What locale is this pattern using?
463
+ locale_key = (type(pattern), pattern)
464
+ if _locale_sensitive.get(locale_key, True) or (flags & LOCALE) != 0:
465
+ # This pattern is, or might be, locale-sensitive.
466
+ pattern_locale = _getpreferredencoding()
467
+ else:
468
+ # This pattern is definitely not locale-sensitive.
469
+ pattern_locale = None
470
+
471
+ def complain_unused_args():
472
+ if ignore_unused:
473
+ return
474
+
475
+ # Complain about any unused keyword arguments, possibly resulting from a typo.
476
+ unused_kwargs = set(kwargs) - {k for k, v in args_needed}
477
+ if unused_kwargs:
478
+ any_one = next(iter(unused_kwargs))
479
+ raise ValueError('unused keyword argument {!a}'.format(any_one))
480
+
481
+ if cache_it:
482
+ try:
483
+ # Do we know what keyword arguments are needed?
484
+ args_key = pattern, type(pattern), flags
485
+ args_needed = _named_args[args_key]
486
+
487
+ # Are we being provided with its required keyword arguments?
488
+ args_supplied = set()
489
+ if args_needed:
490
+ for k, v in args_needed:
491
+ try:
492
+ args_supplied.add((k, frozenset(kwargs[k])))
493
+ except KeyError:
494
+ raise error("missing named list: {!r}".format(k))
495
+
496
+ complain_unused_args()
497
+
498
+ args_supplied = frozenset(args_supplied)
499
+
500
+ # Have we already seen this regular expression and named list?
501
+ pattern_key = (pattern, type(pattern), flags, args_supplied,
502
+ DEFAULT_VERSION, pattern_locale)
503
+ return _cache[pattern_key]
504
+ except KeyError:
505
+ # It's a new pattern, or new named list for a known pattern.
506
+ pass
507
+
508
+ # Guess the encoding from the class of the pattern string.
509
+ if isinstance(pattern, str):
510
+ guess_encoding = UNICODE
511
+ elif isinstance(pattern, bytes):
512
+ guess_encoding = ASCII
513
+ elif isinstance(pattern, Pattern):
514
+ if flags:
515
+ raise ValueError("cannot process flags argument with a compiled pattern")
516
+
517
+ return pattern
518
+ else:
519
+ raise TypeError("first argument must be a string or compiled pattern")
520
+
521
+ # Set the default version in the core code in case it has been changed.
522
+ _regex_core.DEFAULT_VERSION = DEFAULT_VERSION
523
+
524
+ global_flags = flags
525
+
526
+ while True:
527
+ caught_exception = None
528
+ try:
529
+ source = _Source(pattern)
530
+ info = _Info(global_flags, source.char_type, kwargs)
531
+ info.guess_encoding = guess_encoding
532
+ source.ignore_space = bool(info.flags & VERBOSE)
533
+ parsed = _parse_pattern(source, info)
534
+ break
535
+ except _UnscopedFlagSet:
536
+ # Remember the global flags for the next attempt.
537
+ global_flags = info.global_flags
538
+ except error as e:
539
+ caught_exception = e
540
+
541
+ if caught_exception:
542
+ raise error(caught_exception.msg, caught_exception.pattern,
543
+ caught_exception.pos)
544
+
545
+ if not source.at_end():
546
+ raise error("unbalanced parenthesis", pattern, source.pos)
547
+
548
+ # Check the global flags for conflicts.
549
+ version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
550
+ if version not in (0, VERSION0, VERSION1):
551
+ raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible")
552
+
553
+ if (info.flags & _ALL_ENCODINGS) not in (0, ASCII, LOCALE, UNICODE):
554
+ raise ValueError("ASCII, LOCALE and UNICODE flags are mutually incompatible")
555
+
556
+ if isinstance(pattern, bytes) and (info.flags & UNICODE):
557
+ raise ValueError("cannot use UNICODE flag with a bytes pattern")
558
+
559
+ if not (info.flags & _ALL_ENCODINGS):
560
+ if isinstance(pattern, str):
561
+ info.flags |= UNICODE
562
+ else:
563
+ info.flags |= ASCII
564
+
565
+ reverse = bool(info.flags & REVERSE)
566
+ fuzzy = isinstance(parsed, _Fuzzy)
567
+
568
+ # Remember whether this pattern as an inline locale flag.
569
+ _locale_sensitive[locale_key] = info.inline_locale
570
+
571
+ # Fix the group references.
572
+ caught_exception = None
573
+ try:
574
+ parsed.fix_groups(pattern, reverse, False)
575
+ except error as e:
576
+ caught_exception = e
577
+
578
+ if caught_exception:
579
+ raise error(caught_exception.msg, caught_exception.pattern,
580
+ caught_exception.pos)
581
+
582
+ # Should we print the parsed pattern?
583
+ if flags & DEBUG:
584
+ parsed.dump(indent=0, reverse=reverse)
585
+
586
+ # Optimise the parsed pattern.
587
+ parsed = parsed.optimise(info, reverse)
588
+ parsed = parsed.pack_characters(info)
589
+
590
+ # Get the required string.
591
+ req_offset, req_chars, req_flags = _get_required_string(parsed, info.flags)
592
+
593
+ # Build the named lists.
594
+ named_lists = {}
595
+ named_list_indexes = [None] * len(info.named_lists_used)
596
+ args_needed = set()
597
+ for key, index in info.named_lists_used.items():
598
+ name, case_flags = key
599
+ values = frozenset(kwargs[name])
600
+ if case_flags:
601
+ items = frozenset(_fold_case(info, v) for v in values)
602
+ else:
603
+ items = values
604
+ named_lists[name] = values
605
+ named_list_indexes[index] = items
606
+ args_needed.add((name, values))
607
+
608
+ complain_unused_args()
609
+
610
+ # Check the features of the groups.
611
+ _check_group_features(info, parsed)
612
+
613
+ # Compile the parsed pattern. The result is a list of tuples.
614
+ code = parsed.compile(reverse)
615
+
616
+ # Is there a group call to the pattern as a whole?
617
+ key = (0, reverse, fuzzy)
618
+ ref = info.call_refs.get(key)
619
+ if ref is not None:
620
+ code = [(_OP.CALL_REF, ref)] + code + [(_OP.END, )]
621
+
622
+ # Add the final 'success' opcode.
623
+ code += [(_OP.SUCCESS, )]
624
+
625
+ # Compile the additional copies of the groups that we need.
626
+ for group, rev, fuz in info.additional_groups:
627
+ code += group.compile(rev, fuz)
628
+
629
+ # Flatten the code into a list of ints.
630
+ code = _flatten_code(code)
631
+
632
+ if not parsed.has_simple_start():
633
+ # Get the first set, if possible.
634
+ try:
635
+ fs_code = _compile_firstset(info, parsed.get_firstset(reverse))
636
+ fs_code = _flatten_code(fs_code)
637
+ code = fs_code + code
638
+ except _FirstSetError:
639
+ pass
640
+
641
+ # The named capture groups.
642
+ index_group = dict((v, n) for n, v in info.group_index.items())
643
+
644
+ # Create the PatternObject.
645
+ #
646
+ # Local flags like IGNORECASE affect the code generation, but aren't needed
647
+ # by the PatternObject itself. Conversely, global flags like LOCALE _don't_
648
+ # affect the code generation but _are_ needed by the PatternObject.
649
+ compiled_pattern = _regex.compile(pattern, info.flags | version, code,
650
+ info.group_index, index_group, named_lists, named_list_indexes,
651
+ req_offset, req_chars, req_flags, info.group_count)
652
+
653
+ # Do we need to reduce the size of the cache?
654
+ if len(_cache) >= _MAXCACHE:
655
+ with _cache_lock:
656
+ _shrink_cache(_cache, _named_args, _locale_sensitive, _MAXCACHE)
657
+
658
+ if cache_it:
659
+ if (info.flags & LOCALE) == 0:
660
+ pattern_locale = None
661
+
662
+ args_needed = frozenset(args_needed)
663
+
664
+ # Store this regular expression and named list.
665
+ pattern_key = (pattern, type(pattern), flags, args_needed,
666
+ DEFAULT_VERSION, pattern_locale)
667
+ _cache[pattern_key] = compiled_pattern
668
+
669
+ # Store what keyword arguments are needed.
670
+ _named_args[args_key] = args_needed
671
+
672
+ return compiled_pattern
673
+
674
+ def _compile_replacement_helper(pattern, template):
675
+ "Compiles a replacement template."
676
+ # This function is called by the _regex module.
677
+
678
+ # Have we seen this before?
679
+ key = pattern.pattern, pattern.flags, template
680
+ compiled = _replacement_cache.get(key)
681
+ if compiled is not None:
682
+ return compiled
683
+
684
+ if len(_replacement_cache) >= _MAXREPCACHE:
685
+ _replacement_cache.clear()
686
+
687
+ is_unicode = isinstance(template, str)
688
+ source = _Source(template)
689
+ if is_unicode:
690
+ def make_string(char_codes):
691
+ return "".join(chr(c) for c in char_codes)
692
+ else:
693
+ def make_string(char_codes):
694
+ return bytes(char_codes)
695
+
696
+ compiled = []
697
+ literal = []
698
+ while True:
699
+ ch = source.get()
700
+ if not ch:
701
+ break
702
+ if ch == "\\":
703
+ # '_compile_replacement' will return either an int group reference
704
+ # or a string literal. It returns items (plural) in order to handle
705
+ # a 2-character literal (an invalid escape sequence).
706
+ is_group, items = _compile_replacement(source, pattern, is_unicode)
707
+ if is_group:
708
+ # It's a group, so first flush the literal.
709
+ if literal:
710
+ compiled.append(make_string(literal))
711
+ literal = []
712
+ compiled.extend(items)
713
+ else:
714
+ literal.extend(items)
715
+ else:
716
+ literal.append(ord(ch))
717
+
718
+ # Flush the literal.
719
+ if literal:
720
+ compiled.append(make_string(literal))
721
+
722
+ _replacement_cache[key] = compiled
723
+
724
+ return compiled
725
+
726
+ # We define Pattern here after all the support objects have been defined.
727
+ _pat = _compile('', 0, False, {}, False)
728
+ Pattern = type(_pat)
729
+ Match = type(_pat.match(''))
730
+ del _pat
731
+
732
+ # Make Pattern public for typing annotations.
733
+ __all__.append("Pattern")
734
+ __all__.append("Match")
735
+
736
+ # We'll define an alias for the 'compile' function so that the repr of a
737
+ # pattern object is eval-able.
738
+ Regex = compile
739
+
740
+ # Register myself for pickling.
741
+ import copyreg as _copy_reg
742
+
743
+ def _pickle(pattern):
744
+ return _regex.compile, pattern._pickled_data
745
+
746
+ _copy_reg.pickle(Pattern, _pickle)
llmeval-env/lib/python3.10/site-packages/regex/test_regex.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/sklearn/__init__.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn` module includes functions to configure global settings and
3
+ get information about the working environment.
4
+ """
5
+
6
+ # Machine learning module for Python
7
+ # ==================================
8
+ #
9
+ # sklearn is a Python module integrating classical machine
10
+ # learning algorithms in the tightly-knit world of scientific Python
11
+ # packages (numpy, scipy, matplotlib).
12
+ #
13
+ # It aims to provide simple and efficient solutions to learning problems
14
+ # that are accessible to everybody and reusable in various contexts:
15
+ # machine-learning as a versatile tool for science and engineering.
16
+ #
17
+ # See https://scikit-learn.org for complete documentation.
18
+
19
+ import logging
20
+ import os
21
+ import random
22
+ import sys
23
+
24
+ from ._config import config_context, get_config, set_config
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ # PEP0440 compatible formatted version, see:
30
+ # https://www.python.org/dev/peps/pep-0440/
31
+ #
32
+ # Generic release markers:
33
+ # X.Y.0 # For first release after an increment in Y
34
+ # X.Y.Z # For bugfix releases
35
+ #
36
+ # Admissible pre-release markers:
37
+ # X.Y.ZaN # Alpha release
38
+ # X.Y.ZbN # Beta release
39
+ # X.Y.ZrcN # Release Candidate
40
+ # X.Y.Z # Final release
41
+ #
42
+ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
43
+ # 'X.Y.dev0' is the canonical version of 'X.Y.dev'
44
+ #
45
+ __version__ = "1.4.2"
46
+
47
+
48
+ # On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
49
+ # simultaneously. This can happen for instance when calling BLAS inside a
50
+ # prange. Setting the following environment variable allows multiple OpenMP
51
+ # libraries to be loaded. It should not degrade performances since we manually
52
+ # take care of potential over-subcription performance issues, in sections of
53
+ # the code where nested OpenMP loops can happen, by dynamically reconfiguring
54
+ # the inner OpenMP runtime to temporarily disable it while under the scope of
55
+ # the outer OpenMP parallel section.
56
+ os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
57
+
58
+ # Workaround issue discovered in intel-openmp 2019.5:
59
+ # https://github.com/ContinuumIO/anaconda-issues/issues/11294
60
+ os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
61
+
62
+ try:
63
+ # This variable is injected in the __builtins__ by the build
64
+ # process. It is used to enable importing subpackages of sklearn when
65
+ # the binaries are not built
66
+ # mypy error: Cannot determine type of '__SKLEARN_SETUP__'
67
+ __SKLEARN_SETUP__ # type: ignore
68
+ except NameError:
69
+ __SKLEARN_SETUP__ = False
70
+
71
+ if __SKLEARN_SETUP__:
72
+ sys.stderr.write("Partial import of sklearn during the build process.\n")
73
+ # We are not importing the rest of scikit-learn during the build
74
+ # process, as it may not be compiled yet
75
+ else:
76
+ # `_distributor_init` allows distributors to run custom init code.
77
+ # For instance, for the Windows wheel, this is used to pre-load the
78
+ # vcomp shared library runtime for OpenMP embedded in the sklearn/.libs
79
+ # sub-folder.
80
+ # It is necessary to do this prior to importing show_versions as the
81
+ # later is linked to the OpenMP runtime to make it possible to introspect
82
+ # it and importing it first would fail if the OpenMP dll cannot be found.
83
+ from . import (
84
+ __check_build, # noqa: F401
85
+ _distributor_init, # noqa: F401
86
+ )
87
+ from .base import clone
88
+ from .utils._show_versions import show_versions
89
+
90
+ __all__ = [
91
+ "calibration",
92
+ "cluster",
93
+ "covariance",
94
+ "cross_decomposition",
95
+ "datasets",
96
+ "decomposition",
97
+ "dummy",
98
+ "ensemble",
99
+ "exceptions",
100
+ "experimental",
101
+ "externals",
102
+ "feature_extraction",
103
+ "feature_selection",
104
+ "gaussian_process",
105
+ "inspection",
106
+ "isotonic",
107
+ "kernel_approximation",
108
+ "kernel_ridge",
109
+ "linear_model",
110
+ "manifold",
111
+ "metrics",
112
+ "mixture",
113
+ "model_selection",
114
+ "multiclass",
115
+ "multioutput",
116
+ "naive_bayes",
117
+ "neighbors",
118
+ "neural_network",
119
+ "pipeline",
120
+ "preprocessing",
121
+ "random_projection",
122
+ "semi_supervised",
123
+ "svm",
124
+ "tree",
125
+ "discriminant_analysis",
126
+ "impute",
127
+ "compose",
128
+ # Non-modules:
129
+ "clone",
130
+ "get_config",
131
+ "set_config",
132
+ "config_context",
133
+ "show_versions",
134
+ ]
135
+
136
+ _BUILT_WITH_MESON = False
137
+ try:
138
+ import sklearn._built_with_meson # noqa: F401
139
+
140
+ _BUILT_WITH_MESON = True
141
+ except ModuleNotFoundError:
142
+ pass
143
+
144
+
145
+ def setup_module(module):
146
+ """Fixture for the tests to assure globally controllable seeding of RNGs"""
147
+
148
+ import numpy as np
149
+
150
+ # Check if a random seed exists in the environment, if not create one.
151
+ _random_seed = os.environ.get("SKLEARN_SEED", None)
152
+ if _random_seed is None:
153
+ _random_seed = np.random.uniform() * np.iinfo(np.int32).max
154
+ _random_seed = int(_random_seed)
155
+ print("I: Seeding RNGs with %r" % _random_seed)
156
+ np.random.seed(_random_seed)
157
+ random.seed(_random_seed)
llmeval-env/lib/python3.10/site-packages/sklearn/_config.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Global configuration state and functions for management
2
+ """
3
+ import os
4
+ import threading
5
+ from contextlib import contextmanager as contextmanager
6
+
7
+ _global_config = {
8
+ "assume_finite": bool(os.environ.get("SKLEARN_ASSUME_FINITE", False)),
9
+ "working_memory": int(os.environ.get("SKLEARN_WORKING_MEMORY", 1024)),
10
+ "print_changed_only": True,
11
+ "display": "diagram",
12
+ "pairwise_dist_chunk_size": int(
13
+ os.environ.get("SKLEARN_PAIRWISE_DIST_CHUNK_SIZE", 256)
14
+ ),
15
+ "enable_cython_pairwise_dist": True,
16
+ "array_api_dispatch": False,
17
+ "transform_output": "default",
18
+ "enable_metadata_routing": False,
19
+ "skip_parameter_validation": False,
20
+ }
21
+ _threadlocal = threading.local()
22
+
23
+
24
+ def _get_threadlocal_config():
25
+ """Get a threadlocal **mutable** configuration. If the configuration
26
+ does not exist, copy the default global configuration."""
27
+ if not hasattr(_threadlocal, "global_config"):
28
+ _threadlocal.global_config = _global_config.copy()
29
+ return _threadlocal.global_config
30
+
31
+
32
+ def get_config():
33
+ """Retrieve current values for configuration set by :func:`set_config`.
34
+
35
+ Returns
36
+ -------
37
+ config : dict
38
+ Keys are parameter names that can be passed to :func:`set_config`.
39
+
40
+ See Also
41
+ --------
42
+ config_context : Context manager for global scikit-learn configuration.
43
+ set_config : Set global scikit-learn configuration.
44
+
45
+ Examples
46
+ --------
47
+ >>> import sklearn
48
+ >>> config = sklearn.get_config()
49
+ >>> config.keys()
50
+ dict_keys([...])
51
+ """
52
+ # Return a copy of the threadlocal configuration so that users will
53
+ # not be able to modify the configuration with the returned dict.
54
+ return _get_threadlocal_config().copy()
55
+
56
+
57
+ def set_config(
58
+ assume_finite=None,
59
+ working_memory=None,
60
+ print_changed_only=None,
61
+ display=None,
62
+ pairwise_dist_chunk_size=None,
63
+ enable_cython_pairwise_dist=None,
64
+ array_api_dispatch=None,
65
+ transform_output=None,
66
+ enable_metadata_routing=None,
67
+ skip_parameter_validation=None,
68
+ ):
69
+ """Set global scikit-learn configuration.
70
+
71
+ .. versionadded:: 0.19
72
+
73
+ Parameters
74
+ ----------
75
+ assume_finite : bool, default=None
76
+ If True, validation for finiteness will be skipped,
77
+ saving time, but leading to potential crashes. If
78
+ False, validation for finiteness will be performed,
79
+ avoiding error. Global default: False.
80
+
81
+ .. versionadded:: 0.19
82
+
83
+ working_memory : int, default=None
84
+ If set, scikit-learn will attempt to limit the size of temporary arrays
85
+ to this number of MiB (per job when parallelised), often saving both
86
+ computation time and memory on expensive operations that can be
87
+ performed in chunks. Global default: 1024.
88
+
89
+ .. versionadded:: 0.20
90
+
91
+ print_changed_only : bool, default=None
92
+ If True, only the parameters that were set to non-default
93
+ values will be printed when printing an estimator. For example,
94
+ ``print(SVC())`` while True will only print 'SVC()' while the default
95
+ behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with
96
+ all the non-changed parameters.
97
+
98
+ .. versionadded:: 0.21
99
+
100
+ display : {'text', 'diagram'}, default=None
101
+ If 'diagram', estimators will be displayed as a diagram in a Jupyter
102
+ lab or notebook context. If 'text', estimators will be displayed as
103
+ text. Default is 'diagram'.
104
+
105
+ .. versionadded:: 0.23
106
+
107
+ pairwise_dist_chunk_size : int, default=None
108
+ The number of row vectors per chunk for the accelerated pairwise-
109
+ distances reduction backend. Default is 256 (suitable for most of
110
+ modern laptops' caches and architectures).
111
+
112
+ Intended for easier benchmarking and testing of scikit-learn internals.
113
+ End users are not expected to benefit from customizing this configuration
114
+ setting.
115
+
116
+ .. versionadded:: 1.1
117
+
118
+ enable_cython_pairwise_dist : bool, default=None
119
+ Use the accelerated pairwise-distances reduction backend when
120
+ possible. Global default: True.
121
+
122
+ Intended for easier benchmarking and testing of scikit-learn internals.
123
+ End users are not expected to benefit from customizing this configuration
124
+ setting.
125
+
126
+ .. versionadded:: 1.1
127
+
128
+ array_api_dispatch : bool, default=None
129
+ Use Array API dispatching when inputs follow the Array API standard.
130
+ Default is False.
131
+
132
+ See the :ref:`User Guide <array_api>` for more details.
133
+
134
+ .. versionadded:: 1.2
135
+
136
+ transform_output : str, default=None
137
+ Configure output of `transform` and `fit_transform`.
138
+
139
+ See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
140
+ for an example on how to use the API.
141
+
142
+ - `"default"`: Default output format of a transformer
143
+ - `"pandas"`: DataFrame output
144
+ - `"polars"`: Polars output
145
+ - `None`: Transform configuration is unchanged
146
+
147
+ .. versionadded:: 1.2
148
+ .. versionadded:: 1.4
149
+ `"polars"` option was added.
150
+
151
+ enable_metadata_routing : bool, default=None
152
+ Enable metadata routing. By default this feature is disabled.
153
+
154
+ Refer to :ref:`metadata routing user guide <metadata_routing>` for more
155
+ details.
156
+
157
+ - `True`: Metadata routing is enabled
158
+ - `False`: Metadata routing is disabled, use the old syntax.
159
+ - `None`: Configuration is unchanged
160
+
161
+ .. versionadded:: 1.3
162
+
163
+ skip_parameter_validation : bool, default=None
164
+ If `True`, disable the validation of the hyper-parameters' types and values in
165
+ the fit method of estimators and for arguments passed to public helper
166
+ functions. It can save time in some situations but can lead to low level
167
+ crashes and exceptions with confusing error messages.
168
+
169
+ Note that for data parameters, such as `X` and `y`, only type validation is
170
+ skipped but validation with `check_array` will continue to run.
171
+
172
+ .. versionadded:: 1.3
173
+
174
+ See Also
175
+ --------
176
+ config_context : Context manager for global scikit-learn configuration.
177
+ get_config : Retrieve current values of the global configuration.
178
+
179
+ Examples
180
+ --------
181
+ >>> from sklearn import set_config
182
+ >>> set_config(display='diagram') # doctest: +SKIP
183
+ """
184
+ local_config = _get_threadlocal_config()
185
+
186
+ if assume_finite is not None:
187
+ local_config["assume_finite"] = assume_finite
188
+ if working_memory is not None:
189
+ local_config["working_memory"] = working_memory
190
+ if print_changed_only is not None:
191
+ local_config["print_changed_only"] = print_changed_only
192
+ if display is not None:
193
+ local_config["display"] = display
194
+ if pairwise_dist_chunk_size is not None:
195
+ local_config["pairwise_dist_chunk_size"] = pairwise_dist_chunk_size
196
+ if enable_cython_pairwise_dist is not None:
197
+ local_config["enable_cython_pairwise_dist"] = enable_cython_pairwise_dist
198
+ if array_api_dispatch is not None:
199
+ from .utils._array_api import _check_array_api_dispatch
200
+
201
+ _check_array_api_dispatch(array_api_dispatch)
202
+ local_config["array_api_dispatch"] = array_api_dispatch
203
+ if transform_output is not None:
204
+ local_config["transform_output"] = transform_output
205
+ if enable_metadata_routing is not None:
206
+ local_config["enable_metadata_routing"] = enable_metadata_routing
207
+ if skip_parameter_validation is not None:
208
+ local_config["skip_parameter_validation"] = skip_parameter_validation
209
+
210
+
211
+ @contextmanager
212
+ def config_context(
213
+ *,
214
+ assume_finite=None,
215
+ working_memory=None,
216
+ print_changed_only=None,
217
+ display=None,
218
+ pairwise_dist_chunk_size=None,
219
+ enable_cython_pairwise_dist=None,
220
+ array_api_dispatch=None,
221
+ transform_output=None,
222
+ enable_metadata_routing=None,
223
+ skip_parameter_validation=None,
224
+ ):
225
+ """Context manager for global scikit-learn configuration.
226
+
227
+ Parameters
228
+ ----------
229
+ assume_finite : bool, default=None
230
+ If True, validation for finiteness will be skipped,
231
+ saving time, but leading to potential crashes. If
232
+ False, validation for finiteness will be performed,
233
+ avoiding error. If None, the existing value won't change.
234
+ The default value is False.
235
+
236
+ working_memory : int, default=None
237
+ If set, scikit-learn will attempt to limit the size of temporary arrays
238
+ to this number of MiB (per job when parallelised), often saving both
239
+ computation time and memory on expensive operations that can be
240
+ performed in chunks. If None, the existing value won't change.
241
+ The default value is 1024.
242
+
243
+ print_changed_only : bool, default=None
244
+ If True, only the parameters that were set to non-default
245
+ values will be printed when printing an estimator. For example,
246
+ ``print(SVC())`` while True will only print 'SVC()', but would print
247
+ 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters
248
+ when False. If None, the existing value won't change.
249
+ The default value is True.
250
+
251
+ .. versionchanged:: 0.23
252
+ Default changed from False to True.
253
+
254
+ display : {'text', 'diagram'}, default=None
255
+ If 'diagram', estimators will be displayed as a diagram in a Jupyter
256
+ lab or notebook context. If 'text', estimators will be displayed as
257
+ text. If None, the existing value won't change.
258
+ The default value is 'diagram'.
259
+
260
+ .. versionadded:: 0.23
261
+
262
+ pairwise_dist_chunk_size : int, default=None
263
+ The number of row vectors per chunk for the accelerated pairwise-
264
+ distances reduction backend. Default is 256 (suitable for most of
265
+ modern laptops' caches and architectures).
266
+
267
+ Intended for easier benchmarking and testing of scikit-learn internals.
268
+ End users are not expected to benefit from customizing this configuration
269
+ setting.
270
+
271
+ .. versionadded:: 1.1
272
+
273
+ enable_cython_pairwise_dist : bool, default=None
274
+ Use the accelerated pairwise-distances reduction backend when
275
+ possible. Global default: True.
276
+
277
+ Intended for easier benchmarking and testing of scikit-learn internals.
278
+ End users are not expected to benefit from customizing this configuration
279
+ setting.
280
+
281
+ .. versionadded:: 1.1
282
+
283
+ array_api_dispatch : bool, default=None
284
+ Use Array API dispatching when inputs follow the Array API standard.
285
+ Default is False.
286
+
287
+ See the :ref:`User Guide <array_api>` for more details.
288
+
289
+ .. versionadded:: 1.2
290
+
291
+ transform_output : str, default=None
292
+ Configure output of `transform` and `fit_transform`.
293
+
294
+ See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
295
+ for an example on how to use the API.
296
+
297
+ - `"default"`: Default output format of a transformer
298
+ - `"pandas"`: DataFrame output
299
+ - `"polars"`: Polars output
300
+ - `None`: Transform configuration is unchanged
301
+
302
+ .. versionadded:: 1.2
303
+ .. versionadded:: 1.4
304
+ `"polars"` option was added.
305
+
306
+ enable_metadata_routing : bool, default=None
307
+ Enable metadata routing. By default this feature is disabled.
308
+
309
+ Refer to :ref:`metadata routing user guide <metadata_routing>` for more
310
+ details.
311
+
312
+ - `True`: Metadata routing is enabled
313
+ - `False`: Metadata routing is disabled, use the old syntax.
314
+ - `None`: Configuration is unchanged
315
+
316
+ .. versionadded:: 1.3
317
+
318
+ skip_parameter_validation : bool, default=None
319
+ If `True`, disable the validation of the hyper-parameters' types and values in
320
+ the fit method of estimators and for arguments passed to public helper
321
+ functions. It can save time in some situations but can lead to low level
322
+ crashes and exceptions with confusing error messages.
323
+
324
+ Note that for data parameters, such as `X` and `y`, only type validation is
325
+ skipped but validation with `check_array` will continue to run.
326
+
327
+ .. versionadded:: 1.3
328
+
329
+ Yields
330
+ ------
331
+ None.
332
+
333
+ See Also
334
+ --------
335
+ set_config : Set global scikit-learn configuration.
336
+ get_config : Retrieve current values of the global configuration.
337
+
338
+ Notes
339
+ -----
340
+ All settings, not just those presently modified, will be returned to
341
+ their previous values when the context manager is exited.
342
+
343
+ Examples
344
+ --------
345
+ >>> import sklearn
346
+ >>> from sklearn.utils.validation import assert_all_finite
347
+ >>> with sklearn.config_context(assume_finite=True):
348
+ ... assert_all_finite([float('nan')])
349
+ >>> with sklearn.config_context(assume_finite=True):
350
+ ... with sklearn.config_context(assume_finite=False):
351
+ ... assert_all_finite([float('nan')])
352
+ Traceback (most recent call last):
353
+ ...
354
+ ValueError: Input contains NaN...
355
+ """
356
+ old_config = get_config()
357
+ set_config(
358
+ assume_finite=assume_finite,
359
+ working_memory=working_memory,
360
+ print_changed_only=print_changed_only,
361
+ display=display,
362
+ pairwise_dist_chunk_size=pairwise_dist_chunk_size,
363
+ enable_cython_pairwise_dist=enable_cython_pairwise_dist,
364
+ array_api_dispatch=array_api_dispatch,
365
+ transform_output=transform_output,
366
+ enable_metadata_routing=enable_metadata_routing,
367
+ skip_parameter_validation=skip_parameter_validation,
368
+ )
369
+
370
+ try:
371
+ yield
372
+ finally:
373
+ set_config(**old_config)
llmeval-env/lib/python3.10/site-packages/sklearn/_distributor_init.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Distributor init file
2
+
3
+ Distributors: you can add custom code here to support particular distributions
4
+ of scikit-learn.
5
+
6
+ For example, this is a good place to put any checks for hardware requirements.
7
+
8
+ The scikit-learn standard source distribution will not put code in this file,
9
+ so you can safely replace this file with your own version.
10
+ """
llmeval-env/lib/python3.10/site-packages/sklearn/_isotonic.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (307 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/_min_dependencies.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """All minimum dependencies for scikit-learn."""
2
+ import argparse
3
+ from collections import defaultdict
4
+
5
+ # scipy and cython should by in sync with pyproject.toml
6
+ NUMPY_MIN_VERSION = "1.19.5"
7
+ SCIPY_MIN_VERSION = "1.6.0"
8
+ JOBLIB_MIN_VERSION = "1.2.0"
9
+ THREADPOOLCTL_MIN_VERSION = "2.0.0"
10
+ PYTEST_MIN_VERSION = "7.1.2"
11
+ CYTHON_MIN_VERSION = "3.0.8"
12
+
13
+
14
+ # 'build' and 'install' is included to have structured metadata for CI.
15
+ # It will NOT be included in setup's extras_require
16
+ # The values are (version_spec, comma separated tags)
17
+ dependent_packages = {
18
+ "numpy": (NUMPY_MIN_VERSION, "build, install"),
19
+ "scipy": (SCIPY_MIN_VERSION, "build, install"),
20
+ "joblib": (JOBLIB_MIN_VERSION, "install"),
21
+ "threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
22
+ "cython": (CYTHON_MIN_VERSION, "build"),
23
+ "matplotlib": ("3.3.4", "benchmark, docs, examples, tests"),
24
+ "scikit-image": ("0.17.2", "docs, examples, tests"),
25
+ "pandas": ("1.1.5", "benchmark, docs, examples, tests"),
26
+ "seaborn": ("0.9.0", "docs, examples"),
27
+ "memory_profiler": ("0.57.0", "benchmark, docs"),
28
+ "pytest": (PYTEST_MIN_VERSION, "tests"),
29
+ "pytest-cov": ("2.9.0", "tests"),
30
+ "ruff": ("0.0.272", "tests"),
31
+ "black": ("23.3.0", "tests"),
32
+ "mypy": ("1.3", "tests"),
33
+ "pyamg": ("4.0.0", "tests"),
34
+ "polars": ("0.19.12", "tests"),
35
+ "pyarrow": ("12.0.0", "tests"),
36
+ "sphinx": ("6.0.0", "docs"),
37
+ "sphinx-copybutton": ("0.5.2", "docs"),
38
+ "sphinx-gallery": ("0.15.0", "docs"),
39
+ "numpydoc": ("1.2.0", "docs, tests"),
40
+ "Pillow": ("7.1.2", "docs"),
41
+ "pooch": ("1.6.0", "docs, examples, tests"),
42
+ "sphinx-prompt": ("1.3.0", "docs"),
43
+ "sphinxext-opengraph": ("0.4.2", "docs"),
44
+ "plotly": ("5.14.0", "docs, examples"),
45
+ # XXX: Pin conda-lock to the latest released version (needs manual update
46
+ # from time to time)
47
+ "conda-lock": ("2.4.2", "maintenance"),
48
+ }
49
+
50
+
51
+ # create inverse mapping for setuptools
52
+ tag_to_packages: dict = defaultdict(list)
53
+ for package, (min_version, extras) in dependent_packages.items():
54
+ for extra in extras.split(", "):
55
+ tag_to_packages[extra].append("{}>={}".format(package, min_version))
56
+
57
+
58
+ # Used by CI to get the min dependencies
59
+ if __name__ == "__main__":
60
+ parser = argparse.ArgumentParser(description="Get min dependencies for a package")
61
+
62
+ parser.add_argument("package", choices=dependent_packages)
63
+ args = parser.parse_args()
64
+ min_version = dependent_packages[args.package][0]
65
+ print(min_version)
llmeval-env/lib/python3.10/site-packages/sklearn/base.py ADDED
@@ -0,0 +1,1478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base classes for all estimators."""
2
+
3
+ # Author: Gael Varoquaux <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import copy
7
+ import functools
8
+ import inspect
9
+ import platform
10
+ import re
11
+ import warnings
12
+ from collections import defaultdict
13
+
14
+ import numpy as np
15
+
16
+ from . import __version__
17
+ from ._config import config_context, get_config
18
+ from .exceptions import InconsistentVersionWarning
19
+ from .utils import _IS_32BIT
20
+ from .utils._estimator_html_repr import _HTMLDocumentationLinkMixin, estimator_html_repr
21
+ from .utils._metadata_requests import _MetadataRequester, _routing_enabled
22
+ from .utils._param_validation import validate_parameter_constraints
23
+ from .utils._set_output import _SetOutputMixin
24
+ from .utils._tags import (
25
+ _DEFAULT_TAGS,
26
+ )
27
+ from .utils.validation import (
28
+ _check_feature_names_in,
29
+ _check_y,
30
+ _generate_get_feature_names_out,
31
+ _get_feature_names,
32
+ _is_fitted,
33
+ _num_features,
34
+ check_array,
35
+ check_is_fitted,
36
+ check_X_y,
37
+ )
38
+
39
+
40
+ def clone(estimator, *, safe=True):
41
+ """Construct a new unfitted estimator with the same parameters.
42
+
43
+ Clone does a deep copy of the model in an estimator
44
+ without actually copying attached data. It returns a new estimator
45
+ with the same parameters that has not been fitted on any data.
46
+
47
+ .. versionchanged:: 1.3
48
+ Delegates to `estimator.__sklearn_clone__` if the method exists.
49
+
50
+ Parameters
51
+ ----------
52
+ estimator : {list, tuple, set} of estimator instance or a single \
53
+ estimator instance
54
+ The estimator or group of estimators to be cloned.
55
+ safe : bool, default=True
56
+ If safe is False, clone will fall back to a deep copy on objects
57
+ that are not estimators. Ignored if `estimator.__sklearn_clone__`
58
+ exists.
59
+
60
+ Returns
61
+ -------
62
+ estimator : object
63
+ The deep copy of the input, an estimator if input is an estimator.
64
+
65
+ Notes
66
+ -----
67
+ If the estimator's `random_state` parameter is an integer (or if the
68
+ estimator doesn't have a `random_state` parameter), an *exact clone* is
69
+ returned: the clone and the original estimator will give the exact same
70
+ results. Otherwise, *statistical clone* is returned: the clone might
71
+ return different results from the original estimator. More details can be
72
+ found in :ref:`randomness`.
73
+
74
+ Examples
75
+ --------
76
+ >>> from sklearn.base import clone
77
+ >>> from sklearn.linear_model import LogisticRegression
78
+ >>> X = [[-1, 0], [0, 1], [0, -1], [1, 0]]
79
+ >>> y = [0, 0, 1, 1]
80
+ >>> classifier = LogisticRegression().fit(X, y)
81
+ >>> cloned_classifier = clone(classifier)
82
+ >>> hasattr(classifier, "classes_")
83
+ True
84
+ >>> hasattr(cloned_classifier, "classes_")
85
+ False
86
+ >>> classifier is cloned_classifier
87
+ False
88
+ """
89
+ if hasattr(estimator, "__sklearn_clone__") and not inspect.isclass(estimator):
90
+ return estimator.__sklearn_clone__()
91
+ return _clone_parametrized(estimator, safe=safe)
92
+
93
+
94
+ def _clone_parametrized(estimator, *, safe=True):
95
+ """Default implementation of clone. See :func:`sklearn.base.clone` for details."""
96
+
97
+ estimator_type = type(estimator)
98
+ if estimator_type is dict:
99
+ return {k: clone(v, safe=safe) for k, v in estimator.items()}
100
+ elif estimator_type in (list, tuple, set, frozenset):
101
+ return estimator_type([clone(e, safe=safe) for e in estimator])
102
+ elif not hasattr(estimator, "get_params") or isinstance(estimator, type):
103
+ if not safe:
104
+ return copy.deepcopy(estimator)
105
+ else:
106
+ if isinstance(estimator, type):
107
+ raise TypeError(
108
+ "Cannot clone object. "
109
+ + "You should provide an instance of "
110
+ + "scikit-learn estimator instead of a class."
111
+ )
112
+ else:
113
+ raise TypeError(
114
+ "Cannot clone object '%s' (type %s): "
115
+ "it does not seem to be a scikit-learn "
116
+ "estimator as it does not implement a "
117
+ "'get_params' method." % (repr(estimator), type(estimator))
118
+ )
119
+
120
+ klass = estimator.__class__
121
+ new_object_params = estimator.get_params(deep=False)
122
+ for name, param in new_object_params.items():
123
+ new_object_params[name] = clone(param, safe=False)
124
+
125
+ new_object = klass(**new_object_params)
126
+ try:
127
+ new_object._metadata_request = copy.deepcopy(estimator._metadata_request)
128
+ except AttributeError:
129
+ pass
130
+
131
+ params_set = new_object.get_params(deep=False)
132
+
133
+ # quick sanity check of the parameters of the clone
134
+ for name in new_object_params:
135
+ param1 = new_object_params[name]
136
+ param2 = params_set[name]
137
+ if param1 is not param2:
138
+ raise RuntimeError(
139
+ "Cannot clone object %s, as the constructor "
140
+ "either does not set or modifies parameter %s" % (estimator, name)
141
+ )
142
+
143
+ # _sklearn_output_config is used by `set_output` to configure the output
144
+ # container of an estimator.
145
+ if hasattr(estimator, "_sklearn_output_config"):
146
+ new_object._sklearn_output_config = copy.deepcopy(
147
+ estimator._sklearn_output_config
148
+ )
149
+ return new_object
150
+
151
+
152
+ class BaseEstimator(_HTMLDocumentationLinkMixin, _MetadataRequester):
153
+ """Base class for all estimators in scikit-learn.
154
+
155
+ Inheriting from this class provides default implementations of:
156
+
157
+ - setting and getting parameters used by `GridSearchCV` and friends;
158
+ - textual and HTML representation displayed in terminals and IDEs;
159
+ - estimator serialization;
160
+ - parameters validation;
161
+ - data validation;
162
+ - feature names validation.
163
+
164
+ Read more in the :ref:`User Guide <rolling_your_own_estimator>`.
165
+
166
+
167
+ Notes
168
+ -----
169
+ All estimators should specify all the parameters that can be set
170
+ at the class level in their ``__init__`` as explicit keyword
171
+ arguments (no ``*args`` or ``**kwargs``).
172
+
173
+ Examples
174
+ --------
175
+ >>> import numpy as np
176
+ >>> from sklearn.base import BaseEstimator
177
+ >>> class MyEstimator(BaseEstimator):
178
+ ... def __init__(self, *, param=1):
179
+ ... self.param = param
180
+ ... def fit(self, X, y=None):
181
+ ... self.is_fitted_ = True
182
+ ... return self
183
+ ... def predict(self, X):
184
+ ... return np.full(shape=X.shape[0], fill_value=self.param)
185
+ >>> estimator = MyEstimator(param=2)
186
+ >>> estimator.get_params()
187
+ {'param': 2}
188
+ >>> X = np.array([[1, 2], [2, 3], [3, 4]])
189
+ >>> y = np.array([1, 0, 1])
190
+ >>> estimator.fit(X, y).predict(X)
191
+ array([2, 2, 2])
192
+ >>> estimator.set_params(param=3).fit(X, y).predict(X)
193
+ array([3, 3, 3])
194
+ """
195
+
196
+ @classmethod
197
+ def _get_param_names(cls):
198
+ """Get parameter names for the estimator"""
199
+ # fetch the constructor or the original constructor before
200
+ # deprecation wrapping if any
201
+ init = getattr(cls.__init__, "deprecated_original", cls.__init__)
202
+ if init is object.__init__:
203
+ # No explicit constructor to introspect
204
+ return []
205
+
206
+ # introspect the constructor arguments to find the model parameters
207
+ # to represent
208
+ init_signature = inspect.signature(init)
209
+ # Consider the constructor parameters excluding 'self'
210
+ parameters = [
211
+ p
212
+ for p in init_signature.parameters.values()
213
+ if p.name != "self" and p.kind != p.VAR_KEYWORD
214
+ ]
215
+ for p in parameters:
216
+ if p.kind == p.VAR_POSITIONAL:
217
+ raise RuntimeError(
218
+ "scikit-learn estimators should always "
219
+ "specify their parameters in the signature"
220
+ " of their __init__ (no varargs)."
221
+ " %s with constructor %s doesn't "
222
+ " follow this convention." % (cls, init_signature)
223
+ )
224
+ # Extract and sort argument names excluding 'self'
225
+ return sorted([p.name for p in parameters])
226
+
227
+ def get_params(self, deep=True):
228
+ """
229
+ Get parameters for this estimator.
230
+
231
+ Parameters
232
+ ----------
233
+ deep : bool, default=True
234
+ If True, will return the parameters for this estimator and
235
+ contained subobjects that are estimators.
236
+
237
+ Returns
238
+ -------
239
+ params : dict
240
+ Parameter names mapped to their values.
241
+ """
242
+ out = dict()
243
+ for key in self._get_param_names():
244
+ value = getattr(self, key)
245
+ if deep and hasattr(value, "get_params") and not isinstance(value, type):
246
+ deep_items = value.get_params().items()
247
+ out.update((key + "__" + k, val) for k, val in deep_items)
248
+ out[key] = value
249
+ return out
250
+
251
+ def set_params(self, **params):
252
+ """Set the parameters of this estimator.
253
+
254
+ The method works on simple estimators as well as on nested objects
255
+ (such as :class:`~sklearn.pipeline.Pipeline`). The latter have
256
+ parameters of the form ``<component>__<parameter>`` so that it's
257
+ possible to update each component of a nested object.
258
+
259
+ Parameters
260
+ ----------
261
+ **params : dict
262
+ Estimator parameters.
263
+
264
+ Returns
265
+ -------
266
+ self : estimator instance
267
+ Estimator instance.
268
+ """
269
+ if not params:
270
+ # Simple optimization to gain speed (inspect is slow)
271
+ return self
272
+ valid_params = self.get_params(deep=True)
273
+
274
+ nested_params = defaultdict(dict) # grouped by prefix
275
+ for key, value in params.items():
276
+ key, delim, sub_key = key.partition("__")
277
+ if key not in valid_params:
278
+ local_valid_params = self._get_param_names()
279
+ raise ValueError(
280
+ f"Invalid parameter {key!r} for estimator {self}. "
281
+ f"Valid parameters are: {local_valid_params!r}."
282
+ )
283
+
284
+ if delim:
285
+ nested_params[key][sub_key] = value
286
+ else:
287
+ setattr(self, key, value)
288
+ valid_params[key] = value
289
+
290
+ for key, sub_params in nested_params.items():
291
+ valid_params[key].set_params(**sub_params)
292
+
293
+ return self
294
+
295
+ def __sklearn_clone__(self):
296
+ return _clone_parametrized(self)
297
+
298
+ def __repr__(self, N_CHAR_MAX=700):
299
+ # N_CHAR_MAX is the (approximate) maximum number of non-blank
300
+ # characters to render. We pass it as an optional parameter to ease
301
+ # the tests.
302
+
303
+ from .utils._pprint import _EstimatorPrettyPrinter
304
+
305
+ N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences
306
+
307
+ # use ellipsis for sequences with a lot of elements
308
+ pp = _EstimatorPrettyPrinter(
309
+ compact=True,
310
+ indent=1,
311
+ indent_at_name=True,
312
+ n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,
313
+ )
314
+
315
+ repr_ = pp.pformat(self)
316
+
317
+ # Use bruteforce ellipsis when there are a lot of non-blank characters
318
+ n_nonblank = len("".join(repr_.split()))
319
+ if n_nonblank > N_CHAR_MAX:
320
+ lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends
321
+ regex = r"^(\s*\S){%d}" % lim
322
+ # The regex '^(\s*\S){%d}' % n
323
+ # matches from the start of the string until the nth non-blank
324
+ # character:
325
+ # - ^ matches the start of string
326
+ # - (pattern){n} matches n repetitions of pattern
327
+ # - \s*\S matches a non-blank char following zero or more blanks
328
+ left_lim = re.match(regex, repr_).end()
329
+ right_lim = re.match(regex, repr_[::-1]).end()
330
+
331
+ if "\n" in repr_[left_lim:-right_lim]:
332
+ # The left side and right side aren't on the same line.
333
+ # To avoid weird cuts, e.g.:
334
+ # categoric...ore',
335
+ # we need to start the right side with an appropriate newline
336
+ # character so that it renders properly as:
337
+ # categoric...
338
+ # handle_unknown='ignore',
339
+ # so we add [^\n]*\n which matches until the next \n
340
+ regex += r"[^\n]*\n"
341
+ right_lim = re.match(regex, repr_[::-1]).end()
342
+
343
+ ellipsis = "..."
344
+ if left_lim + len(ellipsis) < len(repr_) - right_lim:
345
+ # Only add ellipsis if it results in a shorter repr
346
+ repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:]
347
+
348
+ return repr_
349
+
350
+ def __getstate__(self):
351
+ if getattr(self, "__slots__", None):
352
+ raise TypeError(
353
+ "You cannot use `__slots__` in objects inheriting from "
354
+ "`sklearn.base.BaseEstimator`."
355
+ )
356
+
357
+ try:
358
+ state = super().__getstate__()
359
+ if state is None:
360
+ # For Python 3.11+, empty instance (no `__slots__`,
361
+ # and `__dict__`) will return a state equal to `None`.
362
+ state = self.__dict__.copy()
363
+ except AttributeError:
364
+ # Python < 3.11
365
+ state = self.__dict__.copy()
366
+
367
+ if type(self).__module__.startswith("sklearn."):
368
+ return dict(state.items(), _sklearn_version=__version__)
369
+ else:
370
+ return state
371
+
372
+ def __setstate__(self, state):
373
+ if type(self).__module__.startswith("sklearn."):
374
+ pickle_version = state.pop("_sklearn_version", "pre-0.18")
375
+ if pickle_version != __version__:
376
+ warnings.warn(
377
+ InconsistentVersionWarning(
378
+ estimator_name=self.__class__.__name__,
379
+ current_sklearn_version=__version__,
380
+ original_sklearn_version=pickle_version,
381
+ ),
382
+ )
383
+ try:
384
+ super().__setstate__(state)
385
+ except AttributeError:
386
+ self.__dict__.update(state)
387
+
388
+ def _more_tags(self):
389
+ return _DEFAULT_TAGS
390
+
391
+ def _get_tags(self):
392
+ collected_tags = {}
393
+ for base_class in reversed(inspect.getmro(self.__class__)):
394
+ if hasattr(base_class, "_more_tags"):
395
+ # need the if because mixins might not have _more_tags
396
+ # but might do redundant work in estimators
397
+ # (i.e. calling more tags on BaseEstimator multiple times)
398
+ more_tags = base_class._more_tags(self)
399
+ collected_tags.update(more_tags)
400
+ return collected_tags
401
+
402
+ def _check_n_features(self, X, reset):
403
+ """Set the `n_features_in_` attribute, or check against it.
404
+
405
+ Parameters
406
+ ----------
407
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
408
+ The input samples.
409
+ reset : bool
410
+ If True, the `n_features_in_` attribute is set to `X.shape[1]`.
411
+ If False and the attribute exists, then check that it is equal to
412
+ `X.shape[1]`. If False and the attribute does *not* exist, then
413
+ the check is skipped.
414
+ .. note::
415
+ It is recommended to call reset=True in `fit` and in the first
416
+ call to `partial_fit`. All other methods that validate `X`
417
+ should set `reset=False`.
418
+ """
419
+ try:
420
+ n_features = _num_features(X)
421
+ except TypeError as e:
422
+ if not reset and hasattr(self, "n_features_in_"):
423
+ raise ValueError(
424
+ "X does not contain any features, but "
425
+ f"{self.__class__.__name__} is expecting "
426
+ f"{self.n_features_in_} features"
427
+ ) from e
428
+ # If the number of features is not defined and reset=True,
429
+ # then we skip this check
430
+ return
431
+
432
+ if reset:
433
+ self.n_features_in_ = n_features
434
+ return
435
+
436
+ if not hasattr(self, "n_features_in_"):
437
+ # Skip this check if the expected number of expected input features
438
+ # was not recorded by calling fit first. This is typically the case
439
+ # for stateless transformers.
440
+ return
441
+
442
+ if n_features != self.n_features_in_:
443
+ raise ValueError(
444
+ f"X has {n_features} features, but {self.__class__.__name__} "
445
+ f"is expecting {self.n_features_in_} features as input."
446
+ )
447
+
448
+ def _check_feature_names(self, X, *, reset):
449
+ """Set or check the `feature_names_in_` attribute.
450
+
451
+ .. versionadded:: 1.0
452
+
453
+ Parameters
454
+ ----------
455
+ X : {ndarray, dataframe} of shape (n_samples, n_features)
456
+ The input samples.
457
+
458
+ reset : bool
459
+ Whether to reset the `feature_names_in_` attribute.
460
+ If False, the input will be checked for consistency with
461
+ feature names of data provided when reset was last True.
462
+ .. note::
463
+ It is recommended to call `reset=True` in `fit` and in the first
464
+ call to `partial_fit`. All other methods that validate `X`
465
+ should set `reset=False`.
466
+ """
467
+
468
+ if reset:
469
+ feature_names_in = _get_feature_names(X)
470
+ if feature_names_in is not None:
471
+ self.feature_names_in_ = feature_names_in
472
+ elif hasattr(self, "feature_names_in_"):
473
+ # Delete the attribute when the estimator is fitted on a new dataset
474
+ # that has no feature names.
475
+ delattr(self, "feature_names_in_")
476
+ return
477
+
478
+ fitted_feature_names = getattr(self, "feature_names_in_", None)
479
+ X_feature_names = _get_feature_names(X)
480
+
481
+ if fitted_feature_names is None and X_feature_names is None:
482
+ # no feature names seen in fit and in X
483
+ return
484
+
485
+ if X_feature_names is not None and fitted_feature_names is None:
486
+ warnings.warn(
487
+ f"X has feature names, but {self.__class__.__name__} was fitted without"
488
+ " feature names"
489
+ )
490
+ return
491
+
492
+ if X_feature_names is None and fitted_feature_names is not None:
493
+ warnings.warn(
494
+ "X does not have valid feature names, but"
495
+ f" {self.__class__.__name__} was fitted with feature names"
496
+ )
497
+ return
498
+
499
+ # validate the feature names against the `feature_names_in_` attribute
500
+ if len(fitted_feature_names) != len(X_feature_names) or np.any(
501
+ fitted_feature_names != X_feature_names
502
+ ):
503
+ message = (
504
+ "The feature names should match those that were passed during fit.\n"
505
+ )
506
+ fitted_feature_names_set = set(fitted_feature_names)
507
+ X_feature_names_set = set(X_feature_names)
508
+
509
+ unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)
510
+ missing_names = sorted(fitted_feature_names_set - X_feature_names_set)
511
+
512
+ def add_names(names):
513
+ output = ""
514
+ max_n_names = 5
515
+ for i, name in enumerate(names):
516
+ if i >= max_n_names:
517
+ output += "- ...\n"
518
+ break
519
+ output += f"- {name}\n"
520
+ return output
521
+
522
+ if unexpected_names:
523
+ message += "Feature names unseen at fit time:\n"
524
+ message += add_names(unexpected_names)
525
+
526
+ if missing_names:
527
+ message += "Feature names seen at fit time, yet now missing:\n"
528
+ message += add_names(missing_names)
529
+
530
+ if not missing_names and not unexpected_names:
531
+ message += (
532
+ "Feature names must be in the same order as they were in fit.\n"
533
+ )
534
+
535
+ raise ValueError(message)
536
+
537
+ def _validate_data(
538
+ self,
539
+ X="no_validation",
540
+ y="no_validation",
541
+ reset=True,
542
+ validate_separately=False,
543
+ cast_to_ndarray=True,
544
+ **check_params,
545
+ ):
546
+ """Validate input data and set or check the `n_features_in_` attribute.
547
+
548
+ Parameters
549
+ ----------
550
+ X : {array-like, sparse matrix, dataframe} of shape \
551
+ (n_samples, n_features), default='no validation'
552
+ The input samples.
553
+ If `'no_validation'`, no validation is performed on `X`. This is
554
+ useful for meta-estimator which can delegate input validation to
555
+ their underlying estimator(s). In that case `y` must be passed and
556
+ the only accepted `check_params` are `multi_output` and
557
+ `y_numeric`.
558
+
559
+ y : array-like of shape (n_samples,), default='no_validation'
560
+ The targets.
561
+
562
+ - If `None`, `check_array` is called on `X`. If the estimator's
563
+ requires_y tag is True, then an error will be raised.
564
+ - If `'no_validation'`, `check_array` is called on `X` and the
565
+ estimator's requires_y tag is ignored. This is a default
566
+ placeholder and is never meant to be explicitly set. In that case
567
+ `X` must be passed.
568
+ - Otherwise, only `y` with `_check_y` or both `X` and `y` are
569
+ checked with either `check_array` or `check_X_y` depending on
570
+ `validate_separately`.
571
+
572
+ reset : bool, default=True
573
+ Whether to reset the `n_features_in_` attribute.
574
+ If False, the input will be checked for consistency with data
575
+ provided when reset was last True.
576
+ .. note::
577
+ It is recommended to call reset=True in `fit` and in the first
578
+ call to `partial_fit`. All other methods that validate `X`
579
+ should set `reset=False`.
580
+
581
+ validate_separately : False or tuple of dicts, default=False
582
+ Only used if y is not None.
583
+ If False, call validate_X_y(). Else, it must be a tuple of kwargs
584
+ to be used for calling check_array() on X and y respectively.
585
+
586
+ `estimator=self` is automatically added to these dicts to generate
587
+ more informative error message in case of invalid input data.
588
+
589
+ cast_to_ndarray : bool, default=True
590
+ Cast `X` and `y` to ndarray with checks in `check_params`. If
591
+ `False`, `X` and `y` are unchanged and only `feature_names_in_` and
592
+ `n_features_in_` are checked.
593
+
594
+ **check_params : kwargs
595
+ Parameters passed to :func:`sklearn.utils.check_array` or
596
+ :func:`sklearn.utils.check_X_y`. Ignored if validate_separately
597
+ is not False.
598
+
599
+ `estimator=self` is automatically added to these params to generate
600
+ more informative error message in case of invalid input data.
601
+
602
+ Returns
603
+ -------
604
+ out : {ndarray, sparse matrix} or tuple of these
605
+ The validated input. A tuple is returned if both `X` and `y` are
606
+ validated.
607
+ """
608
+ self._check_feature_names(X, reset=reset)
609
+
610
+ if y is None and self._get_tags()["requires_y"]:
611
+ raise ValueError(
612
+ f"This {self.__class__.__name__} estimator "
613
+ "requires y to be passed, but the target y is None."
614
+ )
615
+
616
+ no_val_X = isinstance(X, str) and X == "no_validation"
617
+ no_val_y = y is None or isinstance(y, str) and y == "no_validation"
618
+
619
+ if no_val_X and no_val_y:
620
+ raise ValueError("Validation should be done on X, y or both.")
621
+
622
+ default_check_params = {"estimator": self}
623
+ check_params = {**default_check_params, **check_params}
624
+
625
+ if not cast_to_ndarray:
626
+ if not no_val_X and no_val_y:
627
+ out = X
628
+ elif no_val_X and not no_val_y:
629
+ out = y
630
+ else:
631
+ out = X, y
632
+ elif not no_val_X and no_val_y:
633
+ out = check_array(X, input_name="X", **check_params)
634
+ elif no_val_X and not no_val_y:
635
+ out = _check_y(y, **check_params)
636
+ else:
637
+ if validate_separately:
638
+ # We need this because some estimators validate X and y
639
+ # separately, and in general, separately calling check_array()
640
+ # on X and y isn't equivalent to just calling check_X_y()
641
+ # :(
642
+ check_X_params, check_y_params = validate_separately
643
+ if "estimator" not in check_X_params:
644
+ check_X_params = {**default_check_params, **check_X_params}
645
+ X = check_array(X, input_name="X", **check_X_params)
646
+ if "estimator" not in check_y_params:
647
+ check_y_params = {**default_check_params, **check_y_params}
648
+ y = check_array(y, input_name="y", **check_y_params)
649
+ else:
650
+ X, y = check_X_y(X, y, **check_params)
651
+ out = X, y
652
+
653
+ if not no_val_X and check_params.get("ensure_2d", True):
654
+ self._check_n_features(X, reset=reset)
655
+
656
+ return out
657
+
658
+ def _validate_params(self):
659
+ """Validate types and values of constructor parameters
660
+
661
+ The expected type and values must be defined in the `_parameter_constraints`
662
+ class attribute, which is a dictionary `param_name: list of constraints`. See
663
+ the docstring of `validate_parameter_constraints` for a description of the
664
+ accepted constraints.
665
+ """
666
+ validate_parameter_constraints(
667
+ self._parameter_constraints,
668
+ self.get_params(deep=False),
669
+ caller_name=self.__class__.__name__,
670
+ )
671
+
672
+ @property
673
+ def _repr_html_(self):
674
+ """HTML representation of estimator.
675
+
676
+ This is redundant with the logic of `_repr_mimebundle_`. The latter
677
+ should be favorted in the long term, `_repr_html_` is only
678
+ implemented for consumers who do not interpret `_repr_mimbundle_`.
679
+ """
680
+ if get_config()["display"] != "diagram":
681
+ raise AttributeError(
682
+ "_repr_html_ is only defined when the "
683
+ "'display' configuration option is set to "
684
+ "'diagram'"
685
+ )
686
+ return self._repr_html_inner
687
+
688
+ def _repr_html_inner(self):
689
+ """This function is returned by the @property `_repr_html_` to make
690
+ `hasattr(estimator, "_repr_html_") return `True` or `False` depending
691
+ on `get_config()["display"]`.
692
+ """
693
+ return estimator_html_repr(self)
694
+
695
+ def _repr_mimebundle_(self, **kwargs):
696
+ """Mime bundle used by jupyter kernels to display estimator"""
697
+ output = {"text/plain": repr(self)}
698
+ if get_config()["display"] == "diagram":
699
+ output["text/html"] = estimator_html_repr(self)
700
+ return output
701
+
702
+
703
+ class ClassifierMixin:
704
+ """Mixin class for all classifiers in scikit-learn.
705
+
706
+ This mixin defines the following functionality:
707
+
708
+ - `_estimator_type` class attribute defaulting to `"classifier"`;
709
+ - `score` method that default to :func:`~sklearn.metrics.accuracy_score`.
710
+ - enforce that `fit` requires `y` to be passed through the `requires_y` tag.
711
+
712
+ Read more in the :ref:`User Guide <rolling_your_own_estimator>`.
713
+
714
+ Examples
715
+ --------
716
+ >>> import numpy as np
717
+ >>> from sklearn.base import BaseEstimator, ClassifierMixin
718
+ >>> # Mixin classes should always be on the left-hand side for a correct MRO
719
+ >>> class MyEstimator(ClassifierMixin, BaseEstimator):
720
+ ... def __init__(self, *, param=1):
721
+ ... self.param = param
722
+ ... def fit(self, X, y=None):
723
+ ... self.is_fitted_ = True
724
+ ... return self
725
+ ... def predict(self, X):
726
+ ... return np.full(shape=X.shape[0], fill_value=self.param)
727
+ >>> estimator = MyEstimator(param=1)
728
+ >>> X = np.array([[1, 2], [2, 3], [3, 4]])
729
+ >>> y = np.array([1, 0, 1])
730
+ >>> estimator.fit(X, y).predict(X)
731
+ array([1, 1, 1])
732
+ >>> estimator.score(X, y)
733
+ 0.66...
734
+ """
735
+
736
+ _estimator_type = "classifier"
737
+
738
+ def score(self, X, y, sample_weight=None):
739
+ """
740
+ Return the mean accuracy on the given test data and labels.
741
+
742
+ In multi-label classification, this is the subset accuracy
743
+ which is a harsh metric since you require for each sample that
744
+ each label set be correctly predicted.
745
+
746
+ Parameters
747
+ ----------
748
+ X : array-like of shape (n_samples, n_features)
749
+ Test samples.
750
+
751
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
752
+ True labels for `X`.
753
+
754
+ sample_weight : array-like of shape (n_samples,), default=None
755
+ Sample weights.
756
+
757
+ Returns
758
+ -------
759
+ score : float
760
+ Mean accuracy of ``self.predict(X)`` w.r.t. `y`.
761
+ """
762
+ from .metrics import accuracy_score
763
+
764
+ return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
765
+
766
+ def _more_tags(self):
767
+ return {"requires_y": True}
768
+
769
+
770
+ class RegressorMixin:
771
+ """Mixin class for all regression estimators in scikit-learn.
772
+
773
+ This mixin defines the following functionality:
774
+
775
+ - `_estimator_type` class attribute defaulting to `"regressor"`;
776
+ - `score` method that default to :func:`~sklearn.metrics.r2_score`.
777
+ - enforce that `fit` requires `y` to be passed through the `requires_y` tag.
778
+
779
+ Read more in the :ref:`User Guide <rolling_your_own_estimator>`.
780
+
781
+ Examples
782
+ --------
783
+ >>> import numpy as np
784
+ >>> from sklearn.base import BaseEstimator, RegressorMixin
785
+ >>> # Mixin classes should always be on the left-hand side for a correct MRO
786
+ >>> class MyEstimator(RegressorMixin, BaseEstimator):
787
+ ... def __init__(self, *, param=1):
788
+ ... self.param = param
789
+ ... def fit(self, X, y=None):
790
+ ... self.is_fitted_ = True
791
+ ... return self
792
+ ... def predict(self, X):
793
+ ... return np.full(shape=X.shape[0], fill_value=self.param)
794
+ >>> estimator = MyEstimator(param=0)
795
+ >>> X = np.array([[1, 2], [2, 3], [3, 4]])
796
+ >>> y = np.array([-1, 0, 1])
797
+ >>> estimator.fit(X, y).predict(X)
798
+ array([0, 0, 0])
799
+ >>> estimator.score(X, y)
800
+ 0.0
801
+ """
802
+
803
+ _estimator_type = "regressor"
804
+
805
+ def score(self, X, y, sample_weight=None):
806
+ """Return the coefficient of determination of the prediction.
807
+
808
+ The coefficient of determination :math:`R^2` is defined as
809
+ :math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual
810
+ sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
811
+ is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
812
+ The best possible score is 1.0 and it can be negative (because the
813
+ model can be arbitrarily worse). A constant model that always predicts
814
+ the expected value of `y`, disregarding the input features, would get
815
+ a :math:`R^2` score of 0.0.
816
+
817
+ Parameters
818
+ ----------
819
+ X : array-like of shape (n_samples, n_features)
820
+ Test samples. For some estimators this may be a precomputed
821
+ kernel matrix or a list of generic objects instead with shape
822
+ ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
823
+ is the number of samples used in the fitting for the estimator.
824
+
825
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
826
+ True values for `X`.
827
+
828
+ sample_weight : array-like of shape (n_samples,), default=None
829
+ Sample weights.
830
+
831
+ Returns
832
+ -------
833
+ score : float
834
+ :math:`R^2` of ``self.predict(X)`` w.r.t. `y`.
835
+
836
+ Notes
837
+ -----
838
+ The :math:`R^2` score used when calling ``score`` on a regressor uses
839
+ ``multioutput='uniform_average'`` from version 0.23 to keep consistent
840
+ with default value of :func:`~sklearn.metrics.r2_score`.
841
+ This influences the ``score`` method of all the multioutput
842
+ regressors (except for
843
+ :class:`~sklearn.multioutput.MultiOutputRegressor`).
844
+ """
845
+
846
+ from .metrics import r2_score
847
+
848
+ y_pred = self.predict(X)
849
+ return r2_score(y, y_pred, sample_weight=sample_weight)
850
+
851
+ def _more_tags(self):
852
+ return {"requires_y": True}
853
+
854
+
855
+ class ClusterMixin:
856
+ """Mixin class for all cluster estimators in scikit-learn.
857
+
858
+ - `_estimator_type` class attribute defaulting to `"clusterer"`;
859
+ - `fit_predict` method returning the cluster labels associated to each sample.
860
+
861
+ Examples
862
+ --------
863
+ >>> import numpy as np
864
+ >>> from sklearn.base import BaseEstimator, ClusterMixin
865
+ >>> class MyClusterer(ClusterMixin, BaseEstimator):
866
+ ... def fit(self, X, y=None):
867
+ ... self.labels_ = np.ones(shape=(len(X),), dtype=np.int64)
868
+ ... return self
869
+ >>> X = [[1, 2], [2, 3], [3, 4]]
870
+ >>> MyClusterer().fit_predict(X)
871
+ array([1, 1, 1])
872
+ """
873
+
874
+ _estimator_type = "clusterer"
875
+
876
+ def fit_predict(self, X, y=None, **kwargs):
877
+ """
878
+ Perform clustering on `X` and returns cluster labels.
879
+
880
+ Parameters
881
+ ----------
882
+ X : array-like of shape (n_samples, n_features)
883
+ Input data.
884
+
885
+ y : Ignored
886
+ Not used, present for API consistency by convention.
887
+
888
+ **kwargs : dict
889
+ Arguments to be passed to ``fit``.
890
+
891
+ .. versionadded:: 1.4
892
+
893
+ Returns
894
+ -------
895
+ labels : ndarray of shape (n_samples,), dtype=np.int64
896
+ Cluster labels.
897
+ """
898
+ # non-optimized default implementation; override when a better
899
+ # method is possible for a given clustering algorithm
900
+ self.fit(X, **kwargs)
901
+ return self.labels_
902
+
903
+ def _more_tags(self):
904
+ return {"preserves_dtype": []}
905
+
906
+
907
+ class BiclusterMixin:
908
+ """Mixin class for all bicluster estimators in scikit-learn.
909
+
910
+ This mixin defines the following functionality:
911
+
912
+ - `biclusters_` property that returns the row and column indicators;
913
+ - `get_indices` method that returns the row and column indices of a bicluster;
914
+ - `get_shape` method that returns the shape of a bicluster;
915
+ - `get_submatrix` method that returns the submatrix corresponding to a bicluster.
916
+
917
+ Examples
918
+ --------
919
+ >>> import numpy as np
920
+ >>> from sklearn.base import BaseEstimator, BiclusterMixin
921
+ >>> class DummyBiClustering(BiclusterMixin, BaseEstimator):
922
+ ... def fit(self, X, y=None):
923
+ ... self.rows_ = np.ones(shape=(1, X.shape[0]), dtype=bool)
924
+ ... self.columns_ = np.ones(shape=(1, X.shape[1]), dtype=bool)
925
+ ... return self
926
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
927
+ ... [4, 7], [3, 5], [3, 6]])
928
+ >>> bicluster = DummyBiClustering().fit(X)
929
+ >>> hasattr(bicluster, "biclusters_")
930
+ True
931
+ >>> bicluster.get_indices(0)
932
+ (array([0, 1, 2, 3, 4, 5]), array([0, 1]))
933
+ """
934
+
935
+ @property
936
+ def biclusters_(self):
937
+ """Convenient way to get row and column indicators together.
938
+
939
+ Returns the ``rows_`` and ``columns_`` members.
940
+ """
941
+ return self.rows_, self.columns_
942
+
943
+ def get_indices(self, i):
944
+ """Row and column indices of the `i`'th bicluster.
945
+
946
+ Only works if ``rows_`` and ``columns_`` attributes exist.
947
+
948
+ Parameters
949
+ ----------
950
+ i : int
951
+ The index of the cluster.
952
+
953
+ Returns
954
+ -------
955
+ row_ind : ndarray, dtype=np.intp
956
+ Indices of rows in the dataset that belong to the bicluster.
957
+ col_ind : ndarray, dtype=np.intp
958
+ Indices of columns in the dataset that belong to the bicluster.
959
+ """
960
+ rows = self.rows_[i]
961
+ columns = self.columns_[i]
962
+ return np.nonzero(rows)[0], np.nonzero(columns)[0]
963
+
964
+ def get_shape(self, i):
965
+ """Shape of the `i`'th bicluster.
966
+
967
+ Parameters
968
+ ----------
969
+ i : int
970
+ The index of the cluster.
971
+
972
+ Returns
973
+ -------
974
+ n_rows : int
975
+ Number of rows in the bicluster.
976
+
977
+ n_cols : int
978
+ Number of columns in the bicluster.
979
+ """
980
+ indices = self.get_indices(i)
981
+ return tuple(len(i) for i in indices)
982
+
983
+ def get_submatrix(self, i, data):
984
+ """Return the submatrix corresponding to bicluster `i`.
985
+
986
+ Parameters
987
+ ----------
988
+ i : int
989
+ The index of the cluster.
990
+ data : array-like of shape (n_samples, n_features)
991
+ The data.
992
+
993
+ Returns
994
+ -------
995
+ submatrix : ndarray of shape (n_rows, n_cols)
996
+ The submatrix corresponding to bicluster `i`.
997
+
998
+ Notes
999
+ -----
1000
+ Works with sparse matrices. Only works if ``rows_`` and
1001
+ ``columns_`` attributes exist.
1002
+ """
1003
+ from .utils.validation import check_array
1004
+
1005
+ data = check_array(data, accept_sparse="csr")
1006
+ row_ind, col_ind = self.get_indices(i)
1007
+ return data[row_ind[:, np.newaxis], col_ind]
1008
+
1009
+
1010
+ class TransformerMixin(_SetOutputMixin):
1011
+ """Mixin class for all transformers in scikit-learn.
1012
+
1013
+ This mixin defines the following functionality:
1014
+
1015
+ - a `fit_transform` method that delegates to `fit` and `transform`;
1016
+ - a `set_output` method to output `X` as a specific container type.
1017
+
1018
+ If :term:`get_feature_names_out` is defined, then :class:`BaseEstimator` will
1019
+ automatically wrap `transform` and `fit_transform` to follow the `set_output`
1020
+ API. See the :ref:`developer_api_set_output` for details.
1021
+
1022
+ :class:`OneToOneFeatureMixin` and
1023
+ :class:`ClassNamePrefixFeaturesOutMixin` are helpful mixins for
1024
+ defining :term:`get_feature_names_out`.
1025
+
1026
+ Examples
1027
+ --------
1028
+ >>> import numpy as np
1029
+ >>> from sklearn.base import BaseEstimator, TransformerMixin
1030
+ >>> class MyTransformer(TransformerMixin, BaseEstimator):
1031
+ ... def __init__(self, *, param=1):
1032
+ ... self.param = param
1033
+ ... def fit(self, X, y=None):
1034
+ ... return self
1035
+ ... def transform(self, X):
1036
+ ... return np.full(shape=len(X), fill_value=self.param)
1037
+ >>> transformer = MyTransformer()
1038
+ >>> X = [[1, 2], [2, 3], [3, 4]]
1039
+ >>> transformer.fit_transform(X)
1040
+ array([1, 1, 1])
1041
+ """
1042
+
1043
+ def fit_transform(self, X, y=None, **fit_params):
1044
+ """
1045
+ Fit to data, then transform it.
1046
+
1047
+ Fits transformer to `X` and `y` with optional parameters `fit_params`
1048
+ and returns a transformed version of `X`.
1049
+
1050
+ Parameters
1051
+ ----------
1052
+ X : array-like of shape (n_samples, n_features)
1053
+ Input samples.
1054
+
1055
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
1056
+ default=None
1057
+ Target values (None for unsupervised transformations).
1058
+
1059
+ **fit_params : dict
1060
+ Additional fit parameters.
1061
+
1062
+ Returns
1063
+ -------
1064
+ X_new : ndarray array of shape (n_samples, n_features_new)
1065
+ Transformed array.
1066
+ """
1067
+ # non-optimized default implementation; override when a better
1068
+ # method is possible for a given clustering algorithm
1069
+
1070
+ # we do not route parameters here, since consumers don't route. But
1071
+ # since it's possible for a `transform` method to also consume
1072
+ # metadata, we check if that's the case, and we raise a warning telling
1073
+ # users that they should implement a custom `fit_transform` method
1074
+ # to forward metadata to `transform` as well.
1075
+ #
1076
+ # For that, we calculate routing and check if anything would be routed
1077
+ # to `transform` if we were to route them.
1078
+ if _routing_enabled():
1079
+ transform_params = self.get_metadata_routing().consumes(
1080
+ method="transform", params=fit_params.keys()
1081
+ )
1082
+ if transform_params:
1083
+ warnings.warn(
1084
+ (
1085
+ f"This object ({self.__class__.__name__}) has a `transform`"
1086
+ " method which consumes metadata, but `fit_transform` does not"
1087
+ " forward metadata to `transform`. Please implement a custom"
1088
+ " `fit_transform` method to forward metadata to `transform` as"
1089
+ " well. Alternatively, you can explicitly do"
1090
+ " `set_transform_request`and set all values to `False` to"
1091
+ " disable metadata routed to `transform`, if that's an option."
1092
+ ),
1093
+ UserWarning,
1094
+ )
1095
+
1096
+ if y is None:
1097
+ # fit method of arity 1 (unsupervised transformation)
1098
+ return self.fit(X, **fit_params).transform(X)
1099
+ else:
1100
+ # fit method of arity 2 (supervised transformation)
1101
+ return self.fit(X, y, **fit_params).transform(X)
1102
+
1103
+
1104
+ class OneToOneFeatureMixin:
1105
+ """Provides `get_feature_names_out` for simple transformers.
1106
+
1107
+ This mixin assumes there's a 1-to-1 correspondence between input features
1108
+ and output features, such as :class:`~sklearn.preprocessing.StandardScaler`.
1109
+
1110
+ Examples
1111
+ --------
1112
+ >>> import numpy as np
1113
+ >>> from sklearn.base import OneToOneFeatureMixin
1114
+ >>> class MyEstimator(OneToOneFeatureMixin):
1115
+ ... def fit(self, X, y=None):
1116
+ ... self.n_features_in_ = X.shape[1]
1117
+ ... return self
1118
+ >>> X = np.array([[1, 2], [3, 4]])
1119
+ >>> MyEstimator().fit(X).get_feature_names_out()
1120
+ array(['x0', 'x1'], dtype=object)
1121
+ """
1122
+
1123
+ def get_feature_names_out(self, input_features=None):
1124
+ """Get output feature names for transformation.
1125
+
1126
+ Parameters
1127
+ ----------
1128
+ input_features : array-like of str or None, default=None
1129
+ Input features.
1130
+
1131
+ - If `input_features` is `None`, then `feature_names_in_` is
1132
+ used as feature names in. If `feature_names_in_` is not defined,
1133
+ then the following input feature names are generated:
1134
+ `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
1135
+ - If `input_features` is an array-like, then `input_features` must
1136
+ match `feature_names_in_` if `feature_names_in_` is defined.
1137
+
1138
+ Returns
1139
+ -------
1140
+ feature_names_out : ndarray of str objects
1141
+ Same as input features.
1142
+ """
1143
+ check_is_fitted(self, "n_features_in_")
1144
+ return _check_feature_names_in(self, input_features)
1145
+
1146
+
1147
+ class ClassNamePrefixFeaturesOutMixin:
1148
+ """Mixin class for transformers that generate their own names by prefixing.
1149
+
1150
+ This mixin is useful when the transformer needs to generate its own feature
1151
+ names out, such as :class:`~sklearn.decomposition.PCA`. For example, if
1152
+ :class:`~sklearn.decomposition.PCA` outputs 3 features, then the generated feature
1153
+ names out are: `["pca0", "pca1", "pca2"]`.
1154
+
1155
+ This mixin assumes that a `_n_features_out` attribute is defined when the
1156
+ transformer is fitted. `_n_features_out` is the number of output features
1157
+ that the transformer will return in `transform` of `fit_transform`.
1158
+
1159
+ Examples
1160
+ --------
1161
+ >>> import numpy as np
1162
+ >>> from sklearn.base import ClassNamePrefixFeaturesOutMixin
1163
+ >>> class MyEstimator(ClassNamePrefixFeaturesOutMixin):
1164
+ ... def fit(self, X, y=None):
1165
+ ... self._n_features_out = X.shape[1]
1166
+ ... return self
1167
+ >>> X = np.array([[1, 2], [3, 4]])
1168
+ >>> MyEstimator().fit(X).get_feature_names_out()
1169
+ array(['myestimator0', 'myestimator1'], dtype=object)
1170
+ """
1171
+
1172
+ def get_feature_names_out(self, input_features=None):
1173
+ """Get output feature names for transformation.
1174
+
1175
+ The feature names out will prefixed by the lowercased class name. For
1176
+ example, if the transformer outputs 3 features, then the feature names
1177
+ out are: `["class_name0", "class_name1", "class_name2"]`.
1178
+
1179
+ Parameters
1180
+ ----------
1181
+ input_features : array-like of str or None, default=None
1182
+ Only used to validate feature names with the names seen in `fit`.
1183
+
1184
+ Returns
1185
+ -------
1186
+ feature_names_out : ndarray of str objects
1187
+ Transformed feature names.
1188
+ """
1189
+ check_is_fitted(self, "_n_features_out")
1190
+ return _generate_get_feature_names_out(
1191
+ self, self._n_features_out, input_features=input_features
1192
+ )
1193
+
1194
+
1195
+ class DensityMixin:
1196
+ """Mixin class for all density estimators in scikit-learn.
1197
+
1198
+ This mixin defines the following functionality:
1199
+
1200
+ - `_estimator_type` class attribute defaulting to `"DensityEstimator"`;
1201
+ - `score` method that default that do no-op.
1202
+
1203
+ Examples
1204
+ --------
1205
+ >>> from sklearn.base import DensityMixin
1206
+ >>> class MyEstimator(DensityMixin):
1207
+ ... def fit(self, X, y=None):
1208
+ ... self.is_fitted_ = True
1209
+ ... return self
1210
+ >>> estimator = MyEstimator()
1211
+ >>> hasattr(estimator, "score")
1212
+ True
1213
+ """
1214
+
1215
+ _estimator_type = "DensityEstimator"
1216
+
1217
+ def score(self, X, y=None):
1218
+ """Return the score of the model on the data `X`.
1219
+
1220
+ Parameters
1221
+ ----------
1222
+ X : array-like of shape (n_samples, n_features)
1223
+ Test samples.
1224
+
1225
+ y : Ignored
1226
+ Not used, present for API consistency by convention.
1227
+
1228
+ Returns
1229
+ -------
1230
+ score : float
1231
+ """
1232
+ pass
1233
+
1234
+
1235
+ class OutlierMixin:
1236
+ """Mixin class for all outlier detection estimators in scikit-learn.
1237
+
1238
+ This mixin defines the following functionality:
1239
+
1240
+ - `_estimator_type` class attribute defaulting to `outlier_detector`;
1241
+ - `fit_predict` method that default to `fit` and `predict`.
1242
+
1243
+ Examples
1244
+ --------
1245
+ >>> import numpy as np
1246
+ >>> from sklearn.base import BaseEstimator, OutlierMixin
1247
+ >>> class MyEstimator(OutlierMixin):
1248
+ ... def fit(self, X, y=None):
1249
+ ... self.is_fitted_ = True
1250
+ ... return self
1251
+ ... def predict(self, X):
1252
+ ... return np.ones(shape=len(X))
1253
+ >>> estimator = MyEstimator()
1254
+ >>> X = np.array([[1, 2], [2, 3], [3, 4]])
1255
+ >>> estimator.fit_predict(X)
1256
+ array([1., 1., 1.])
1257
+ """
1258
+
1259
+ _estimator_type = "outlier_detector"
1260
+
1261
+ def fit_predict(self, X, y=None, **kwargs):
1262
+ """Perform fit on X and returns labels for X.
1263
+
1264
+ Returns -1 for outliers and 1 for inliers.
1265
+
1266
+ Parameters
1267
+ ----------
1268
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1269
+ The input samples.
1270
+
1271
+ y : Ignored
1272
+ Not used, present for API consistency by convention.
1273
+
1274
+ **kwargs : dict
1275
+ Arguments to be passed to ``fit``.
1276
+
1277
+ .. versionadded:: 1.4
1278
+
1279
+ Returns
1280
+ -------
1281
+ y : ndarray of shape (n_samples,)
1282
+ 1 for inliers, -1 for outliers.
1283
+ """
1284
+ # we do not route parameters here, since consumers don't route. But
1285
+ # since it's possible for a `predict` method to also consume
1286
+ # metadata, we check if that's the case, and we raise a warning telling
1287
+ # users that they should implement a custom `fit_predict` method
1288
+ # to forward metadata to `predict` as well.
1289
+ #
1290
+ # For that, we calculate routing and check if anything would be routed
1291
+ # to `predict` if we were to route them.
1292
+ if _routing_enabled():
1293
+ transform_params = self.get_metadata_routing().consumes(
1294
+ method="predict", params=kwargs.keys()
1295
+ )
1296
+ if transform_params:
1297
+ warnings.warn(
1298
+ (
1299
+ f"This object ({self.__class__.__name__}) has a `predict` "
1300
+ "method which consumes metadata, but `fit_predict` does not "
1301
+ "forward metadata to `predict`. Please implement a custom "
1302
+ "`fit_predict` method to forward metadata to `predict` as well."
1303
+ "Alternatively, you can explicitly do `set_predict_request`"
1304
+ "and set all values to `False` to disable metadata routed to "
1305
+ "`predict`, if that's an option."
1306
+ ),
1307
+ UserWarning,
1308
+ )
1309
+
1310
+ # override for transductive outlier detectors like LocalOulierFactor
1311
+ return self.fit(X, **kwargs).predict(X)
1312
+
1313
+
1314
+ class MetaEstimatorMixin:
1315
+ """Mixin class for all meta estimators in scikit-learn.
1316
+
1317
+ This mixin defines the following functionality:
1318
+
1319
+ - define `_required_parameters` that specify the mandatory `estimator` parameter.
1320
+
1321
+ Examples
1322
+ --------
1323
+ >>> from sklearn.base import MetaEstimatorMixin
1324
+ >>> from sklearn.datasets import load_iris
1325
+ >>> from sklearn.linear_model import LogisticRegression
1326
+ >>> class MyEstimator(MetaEstimatorMixin):
1327
+ ... def __init__(self, *, estimator=None):
1328
+ ... self.estimator = estimator
1329
+ ... def fit(self, X, y=None):
1330
+ ... if self.estimator is None:
1331
+ ... self.estimator_ = LogisticRegression()
1332
+ ... else:
1333
+ ... self.estimator_ = self.estimator
1334
+ ... return self
1335
+ >>> X, y = load_iris(return_X_y=True)
1336
+ >>> estimator = MyEstimator().fit(X, y)
1337
+ >>> estimator.estimator_
1338
+ LogisticRegression()
1339
+ """
1340
+
1341
+ _required_parameters = ["estimator"]
1342
+
1343
+
1344
+ class MultiOutputMixin:
1345
+ """Mixin to mark estimators that support multioutput."""
1346
+
1347
+ def _more_tags(self):
1348
+ return {"multioutput": True}
1349
+
1350
+
1351
+ class _UnstableArchMixin:
1352
+ """Mark estimators that are non-determinstic on 32bit or PowerPC"""
1353
+
1354
+ def _more_tags(self):
1355
+ return {
1356
+ "non_deterministic": _IS_32BIT or platform.machine().startswith(
1357
+ ("ppc", "powerpc")
1358
+ )
1359
+ }
1360
+
1361
+
1362
+ def is_classifier(estimator):
1363
+ """Return True if the given estimator is (probably) a classifier.
1364
+
1365
+ Parameters
1366
+ ----------
1367
+ estimator : object
1368
+ Estimator object to test.
1369
+
1370
+ Returns
1371
+ -------
1372
+ out : bool
1373
+ True if estimator is a classifier and False otherwise.
1374
+
1375
+ Examples
1376
+ --------
1377
+ >>> from sklearn.base import is_classifier
1378
+ >>> from sklearn.svm import SVC, SVR
1379
+ >>> classifier = SVC()
1380
+ >>> regressor = SVR()
1381
+ >>> is_classifier(classifier)
1382
+ True
1383
+ >>> is_classifier(regressor)
1384
+ False
1385
+ """
1386
+ return getattr(estimator, "_estimator_type", None) == "classifier"
1387
+
1388
+
1389
+ def is_regressor(estimator):
1390
+ """Return True if the given estimator is (probably) a regressor.
1391
+
1392
+ Parameters
1393
+ ----------
1394
+ estimator : estimator instance
1395
+ Estimator object to test.
1396
+
1397
+ Returns
1398
+ -------
1399
+ out : bool
1400
+ True if estimator is a regressor and False otherwise.
1401
+
1402
+ Examples
1403
+ --------
1404
+ >>> from sklearn.base import is_regressor
1405
+ >>> from sklearn.svm import SVC, SVR
1406
+ >>> classifier = SVC()
1407
+ >>> regressor = SVR()
1408
+ >>> is_regressor(classifier)
1409
+ False
1410
+ >>> is_regressor(regressor)
1411
+ True
1412
+ """
1413
+ return getattr(estimator, "_estimator_type", None) == "regressor"
1414
+
1415
+
1416
+ def is_outlier_detector(estimator):
1417
+ """Return True if the given estimator is (probably) an outlier detector.
1418
+
1419
+ Parameters
1420
+ ----------
1421
+ estimator : estimator instance
1422
+ Estimator object to test.
1423
+
1424
+ Returns
1425
+ -------
1426
+ out : bool
1427
+ True if estimator is an outlier detector and False otherwise.
1428
+ """
1429
+ return getattr(estimator, "_estimator_type", None) == "outlier_detector"
1430
+
1431
+
1432
+ def _fit_context(*, prefer_skip_nested_validation):
1433
+ """Decorator to run the fit methods of estimators within context managers.
1434
+
1435
+ Parameters
1436
+ ----------
1437
+ prefer_skip_nested_validation : bool
1438
+ If True, the validation of parameters of inner estimators or functions
1439
+ called during fit will be skipped.
1440
+
1441
+ This is useful to avoid validating many times the parameters passed by the
1442
+ user from the public facing API. It's also useful to avoid validating
1443
+ parameters that we pass internally to inner functions that are guaranteed to
1444
+ be valid by the test suite.
1445
+
1446
+ It should be set to True for most estimators, except for those that receive
1447
+ non-validated objects as parameters, such as meta-estimators that are given
1448
+ estimator objects.
1449
+
1450
+ Returns
1451
+ -------
1452
+ decorated_fit : method
1453
+ The decorated fit method.
1454
+ """
1455
+
1456
+ def decorator(fit_method):
1457
+ @functools.wraps(fit_method)
1458
+ def wrapper(estimator, *args, **kwargs):
1459
+ global_skip_validation = get_config()["skip_parameter_validation"]
1460
+
1461
+ # we don't want to validate again for each call to partial_fit
1462
+ partial_fit_and_fitted = (
1463
+ fit_method.__name__ == "partial_fit" and _is_fitted(estimator)
1464
+ )
1465
+
1466
+ if not global_skip_validation and not partial_fit_and_fitted:
1467
+ estimator._validate_params()
1468
+
1469
+ with config_context(
1470
+ skip_parameter_validation=(
1471
+ prefer_skip_nested_validation or global_skip_validation
1472
+ )
1473
+ ):
1474
+ return fit_method(estimator, *args, **kwargs)
1475
+
1476
+ return wrapper
1477
+
1478
+ return decorator
llmeval-env/lib/python3.10/site-packages/sklearn/calibration.py ADDED
@@ -0,0 +1,1410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Calibration of predicted probabilities."""
2
+
3
+ # Author: Alexandre Gramfort <[email protected]>
4
+ # Balazs Kegl <[email protected]>
5
+ # Jan Hendrik Metzen <[email protected]>
6
+ # Mathieu Blondel <[email protected]>
7
+ #
8
+ # License: BSD 3 clause
9
+
10
+ import warnings
11
+ from inspect import signature
12
+ from math import log
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ from scipy.optimize import minimize
17
+ from scipy.special import expit
18
+
19
+ from sklearn.utils import Bunch
20
+
21
+ from ._loss import HalfBinomialLoss
22
+ from .base import (
23
+ BaseEstimator,
24
+ ClassifierMixin,
25
+ MetaEstimatorMixin,
26
+ RegressorMixin,
27
+ _fit_context,
28
+ clone,
29
+ )
30
+ from .isotonic import IsotonicRegression
31
+ from .model_selection import check_cv, cross_val_predict
32
+ from .preprocessing import LabelEncoder, label_binarize
33
+ from .svm import LinearSVC
34
+ from .utils import (
35
+ _safe_indexing,
36
+ column_or_1d,
37
+ indexable,
38
+ )
39
+ from .utils._param_validation import (
40
+ HasMethods,
41
+ Interval,
42
+ StrOptions,
43
+ validate_params,
44
+ )
45
+ from .utils._plotting import _BinaryClassifierCurveDisplayMixin
46
+ from .utils._response import _get_response_values, _process_predict_proba
47
+ from .utils.metadata_routing import (
48
+ MetadataRouter,
49
+ MethodMapping,
50
+ _routing_enabled,
51
+ process_routing,
52
+ )
53
+ from .utils.multiclass import check_classification_targets
54
+ from .utils.parallel import Parallel, delayed
55
+ from .utils.validation import (
56
+ _check_method_params,
57
+ _check_pos_label_consistency,
58
+ _check_response_method,
59
+ _check_sample_weight,
60
+ _num_samples,
61
+ check_consistent_length,
62
+ check_is_fitted,
63
+ )
64
+
65
+
66
+ class CalibratedClassifierCV(ClassifierMixin, MetaEstimatorMixin, BaseEstimator):
67
+ """Probability calibration with isotonic regression or logistic regression.
68
+
69
+ This class uses cross-validation to both estimate the parameters of a
70
+ classifier and subsequently calibrate a classifier. With default
71
+ `ensemble=True`, for each cv split it
72
+ fits a copy of the base estimator to the training subset, and calibrates it
73
+ using the testing subset. For prediction, predicted probabilities are
74
+ averaged across these individual calibrated classifiers. When
75
+ `ensemble=False`, cross-validation is used to obtain unbiased predictions,
76
+ via :func:`~sklearn.model_selection.cross_val_predict`, which are then
77
+ used for calibration. For prediction, the base estimator, trained using all
78
+ the data, is used. This is the prediction method implemented when
79
+ `probabilities=True` for :class:`~sklearn.svm.SVC` and :class:`~sklearn.svm.NuSVC`
80
+ estimators (see :ref:`User Guide <scores_probabilities>` for details).
81
+
82
+ Already fitted classifiers can be calibrated via the parameter
83
+ `cv="prefit"`. In this case, no cross-validation is used and all provided
84
+ data is used for calibration. The user has to take care manually that data
85
+ for model fitting and calibration are disjoint.
86
+
87
+ The calibration is based on the :term:`decision_function` method of the
88
+ `estimator` if it exists, else on :term:`predict_proba`.
89
+
90
+ Read more in the :ref:`User Guide <calibration>`.
91
+
92
+ Parameters
93
+ ----------
94
+ estimator : estimator instance, default=None
95
+ The classifier whose output need to be calibrated to provide more
96
+ accurate `predict_proba` outputs. The default classifier is
97
+ a :class:`~sklearn.svm.LinearSVC`.
98
+
99
+ .. versionadded:: 1.2
100
+
101
+ method : {'sigmoid', 'isotonic'}, default='sigmoid'
102
+ The method to use for calibration. Can be 'sigmoid' which
103
+ corresponds to Platt's method (i.e. a logistic regression model) or
104
+ 'isotonic' which is a non-parametric approach. It is not advised to
105
+ use isotonic calibration with too few calibration samples
106
+ ``(<<1000)`` since it tends to overfit.
107
+
108
+ cv : int, cross-validation generator, iterable or "prefit", \
109
+ default=None
110
+ Determines the cross-validation splitting strategy.
111
+ Possible inputs for cv are:
112
+
113
+ - None, to use the default 5-fold cross-validation,
114
+ - integer, to specify the number of folds.
115
+ - :term:`CV splitter`,
116
+ - An iterable yielding (train, test) splits as arrays of indices.
117
+
118
+ For integer/None inputs, if ``y`` is binary or multiclass,
119
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
120
+ neither binary nor multiclass, :class:`~sklearn.model_selection.KFold`
121
+ is used.
122
+
123
+ Refer to the :ref:`User Guide <cross_validation>` for the various
124
+ cross-validation strategies that can be used here.
125
+
126
+ If "prefit" is passed, it is assumed that `estimator` has been
127
+ fitted already and all data is used for calibration.
128
+
129
+ .. versionchanged:: 0.22
130
+ ``cv`` default value if None changed from 3-fold to 5-fold.
131
+
132
+ n_jobs : int, default=None
133
+ Number of jobs to run in parallel.
134
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
135
+ ``-1`` means using all processors.
136
+
137
+ Base estimator clones are fitted in parallel across cross-validation
138
+ iterations. Therefore parallelism happens only when `cv != "prefit"`.
139
+
140
+ See :term:`Glossary <n_jobs>` for more details.
141
+
142
+ .. versionadded:: 0.24
143
+
144
+ ensemble : bool, default=True
145
+ Determines how the calibrator is fitted when `cv` is not `'prefit'`.
146
+ Ignored if `cv='prefit'`.
147
+
148
+ If `True`, the `estimator` is fitted using training data, and
149
+ calibrated using testing data, for each `cv` fold. The final estimator
150
+ is an ensemble of `n_cv` fitted classifier and calibrator pairs, where
151
+ `n_cv` is the number of cross-validation folds. The output is the
152
+ average predicted probabilities of all pairs.
153
+
154
+ If `False`, `cv` is used to compute unbiased predictions, via
155
+ :func:`~sklearn.model_selection.cross_val_predict`, which are then
156
+ used for calibration. At prediction time, the classifier used is the
157
+ `estimator` trained on all the data.
158
+ Note that this method is also internally implemented in
159
+ :mod:`sklearn.svm` estimators with the `probabilities=True` parameter.
160
+
161
+ .. versionadded:: 0.24
162
+
163
+ Attributes
164
+ ----------
165
+ classes_ : ndarray of shape (n_classes,)
166
+ The class labels.
167
+
168
+ n_features_in_ : int
169
+ Number of features seen during :term:`fit`. Only defined if the
170
+ underlying estimator exposes such an attribute when fit.
171
+
172
+ .. versionadded:: 0.24
173
+
174
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
175
+ Names of features seen during :term:`fit`. Only defined if the
176
+ underlying estimator exposes such an attribute when fit.
177
+
178
+ .. versionadded:: 1.0
179
+
180
+ calibrated_classifiers_ : list (len() equal to cv or 1 if `cv="prefit"` \
181
+ or `ensemble=False`)
182
+ The list of classifier and calibrator pairs.
183
+
184
+ - When `cv="prefit"`, the fitted `estimator` and fitted
185
+ calibrator.
186
+ - When `cv` is not "prefit" and `ensemble=True`, `n_cv` fitted
187
+ `estimator` and calibrator pairs. `n_cv` is the number of
188
+ cross-validation folds.
189
+ - When `cv` is not "prefit" and `ensemble=False`, the `estimator`,
190
+ fitted on all the data, and fitted calibrator.
191
+
192
+ .. versionchanged:: 0.24
193
+ Single calibrated classifier case when `ensemble=False`.
194
+
195
+ See Also
196
+ --------
197
+ calibration_curve : Compute true and predicted probabilities
198
+ for a calibration curve.
199
+
200
+ References
201
+ ----------
202
+ .. [1] Obtaining calibrated probability estimates from decision trees
203
+ and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
204
+
205
+ .. [2] Transforming Classifier Scores into Accurate Multiclass
206
+ Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
207
+
208
+ .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
209
+ Regularized Likelihood Methods, J. Platt, (1999)
210
+
211
+ .. [4] Predicting Good Probabilities with Supervised Learning,
212
+ A. Niculescu-Mizil & R. Caruana, ICML 2005
213
+
214
+ Examples
215
+ --------
216
+ >>> from sklearn.datasets import make_classification
217
+ >>> from sklearn.naive_bayes import GaussianNB
218
+ >>> from sklearn.calibration import CalibratedClassifierCV
219
+ >>> X, y = make_classification(n_samples=100, n_features=2,
220
+ ... n_redundant=0, random_state=42)
221
+ >>> base_clf = GaussianNB()
222
+ >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv=3)
223
+ >>> calibrated_clf.fit(X, y)
224
+ CalibratedClassifierCV(...)
225
+ >>> len(calibrated_clf.calibrated_classifiers_)
226
+ 3
227
+ >>> calibrated_clf.predict_proba(X)[:5, :]
228
+ array([[0.110..., 0.889...],
229
+ [0.072..., 0.927...],
230
+ [0.928..., 0.071...],
231
+ [0.928..., 0.071...],
232
+ [0.071..., 0.928...]])
233
+ >>> from sklearn.model_selection import train_test_split
234
+ >>> X, y = make_classification(n_samples=100, n_features=2,
235
+ ... n_redundant=0, random_state=42)
236
+ >>> X_train, X_calib, y_train, y_calib = train_test_split(
237
+ ... X, y, random_state=42
238
+ ... )
239
+ >>> base_clf = GaussianNB()
240
+ >>> base_clf.fit(X_train, y_train)
241
+ GaussianNB()
242
+ >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv="prefit")
243
+ >>> calibrated_clf.fit(X_calib, y_calib)
244
+ CalibratedClassifierCV(...)
245
+ >>> len(calibrated_clf.calibrated_classifiers_)
246
+ 1
247
+ >>> calibrated_clf.predict_proba([[-0.5, 0.5]])
248
+ array([[0.936..., 0.063...]])
249
+ """
250
+
251
+ _parameter_constraints: dict = {
252
+ "estimator": [
253
+ HasMethods(["fit", "predict_proba"]),
254
+ HasMethods(["fit", "decision_function"]),
255
+ None,
256
+ ],
257
+ "method": [StrOptions({"isotonic", "sigmoid"})],
258
+ "cv": ["cv_object", StrOptions({"prefit"})],
259
+ "n_jobs": [Integral, None],
260
+ "ensemble": ["boolean"],
261
+ }
262
+
263
+ def __init__(
264
+ self,
265
+ estimator=None,
266
+ *,
267
+ method="sigmoid",
268
+ cv=None,
269
+ n_jobs=None,
270
+ ensemble=True,
271
+ ):
272
+ self.estimator = estimator
273
+ self.method = method
274
+ self.cv = cv
275
+ self.n_jobs = n_jobs
276
+ self.ensemble = ensemble
277
+
278
+ def _get_estimator(self):
279
+ """Resolve which estimator to return (default is LinearSVC)"""
280
+ if self.estimator is None:
281
+ # we want all classifiers that don't expose a random_state
282
+ # to be deterministic (and we don't want to expose this one).
283
+ estimator = LinearSVC(random_state=0, dual="auto")
284
+ if _routing_enabled():
285
+ estimator.set_fit_request(sample_weight=True)
286
+ else:
287
+ estimator = self.estimator
288
+
289
+ return estimator
290
+
291
+ @_fit_context(
292
+ # CalibratedClassifierCV.estimator is not validated yet
293
+ prefer_skip_nested_validation=False
294
+ )
295
+ def fit(self, X, y, sample_weight=None, **fit_params):
296
+ """Fit the calibrated model.
297
+
298
+ Parameters
299
+ ----------
300
+ X : array-like of shape (n_samples, n_features)
301
+ Training data.
302
+
303
+ y : array-like of shape (n_samples,)
304
+ Target values.
305
+
306
+ sample_weight : array-like of shape (n_samples,), default=None
307
+ Sample weights. If None, then samples are equally weighted.
308
+
309
+ **fit_params : dict
310
+ Parameters to pass to the `fit` method of the underlying
311
+ classifier.
312
+
313
+ Returns
314
+ -------
315
+ self : object
316
+ Returns an instance of self.
317
+ """
318
+ check_classification_targets(y)
319
+ X, y = indexable(X, y)
320
+ if sample_weight is not None:
321
+ sample_weight = _check_sample_weight(sample_weight, X)
322
+
323
+ estimator = self._get_estimator()
324
+
325
+ self.calibrated_classifiers_ = []
326
+ if self.cv == "prefit":
327
+ # `classes_` should be consistent with that of estimator
328
+ check_is_fitted(self.estimator, attributes=["classes_"])
329
+ self.classes_ = self.estimator.classes_
330
+
331
+ predictions, _ = _get_response_values(
332
+ estimator,
333
+ X,
334
+ response_method=["decision_function", "predict_proba"],
335
+ )
336
+ if predictions.ndim == 1:
337
+ # Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
338
+ predictions = predictions.reshape(-1, 1)
339
+
340
+ calibrated_classifier = _fit_calibrator(
341
+ estimator,
342
+ predictions,
343
+ y,
344
+ self.classes_,
345
+ self.method,
346
+ sample_weight,
347
+ )
348
+ self.calibrated_classifiers_.append(calibrated_classifier)
349
+ else:
350
+ # Set `classes_` using all `y`
351
+ label_encoder_ = LabelEncoder().fit(y)
352
+ self.classes_ = label_encoder_.classes_
353
+
354
+ if _routing_enabled():
355
+ routed_params = process_routing(
356
+ self,
357
+ "fit",
358
+ sample_weight=sample_weight,
359
+ **fit_params,
360
+ )
361
+ else:
362
+ # sample_weight checks
363
+ fit_parameters = signature(estimator.fit).parameters
364
+ supports_sw = "sample_weight" in fit_parameters
365
+ if sample_weight is not None and not supports_sw:
366
+ estimator_name = type(estimator).__name__
367
+ warnings.warn(
368
+ f"Since {estimator_name} does not appear to accept"
369
+ " sample_weight, sample weights will only be used for the"
370
+ " calibration itself. This can be caused by a limitation of"
371
+ " the current scikit-learn API. See the following issue for"
372
+ " more details:"
373
+ " https://github.com/scikit-learn/scikit-learn/issues/21134."
374
+ " Be warned that the result of the calibration is likely to be"
375
+ " incorrect."
376
+ )
377
+ routed_params = Bunch()
378
+ routed_params.splitter = Bunch(split={}) # no routing for splitter
379
+ routed_params.estimator = Bunch(fit=fit_params)
380
+ if sample_weight is not None and supports_sw:
381
+ routed_params.estimator.fit["sample_weight"] = sample_weight
382
+
383
+ # Check that each cross-validation fold can have at least one
384
+ # example per class
385
+ if isinstance(self.cv, int):
386
+ n_folds = self.cv
387
+ elif hasattr(self.cv, "n_splits"):
388
+ n_folds = self.cv.n_splits
389
+ else:
390
+ n_folds = None
391
+ if n_folds and np.any(
392
+ [np.sum(y == class_) < n_folds for class_ in self.classes_]
393
+ ):
394
+ raise ValueError(
395
+ f"Requesting {n_folds}-fold "
396
+ "cross-validation but provided less than "
397
+ f"{n_folds} examples for at least one class."
398
+ )
399
+ cv = check_cv(self.cv, y, classifier=True)
400
+
401
+ if self.ensemble:
402
+ parallel = Parallel(n_jobs=self.n_jobs)
403
+ self.calibrated_classifiers_ = parallel(
404
+ delayed(_fit_classifier_calibrator_pair)(
405
+ clone(estimator),
406
+ X,
407
+ y,
408
+ train=train,
409
+ test=test,
410
+ method=self.method,
411
+ classes=self.classes_,
412
+ sample_weight=sample_weight,
413
+ fit_params=routed_params.estimator.fit,
414
+ )
415
+ for train, test in cv.split(X, y, **routed_params.splitter.split)
416
+ )
417
+ else:
418
+ this_estimator = clone(estimator)
419
+ method_name = _check_response_method(
420
+ this_estimator,
421
+ ["decision_function", "predict_proba"],
422
+ ).__name__
423
+ predictions = cross_val_predict(
424
+ estimator=this_estimator,
425
+ X=X,
426
+ y=y,
427
+ cv=cv,
428
+ method=method_name,
429
+ n_jobs=self.n_jobs,
430
+ params=routed_params.estimator.fit,
431
+ )
432
+ if len(self.classes_) == 2:
433
+ # Ensure shape (n_samples, 1) in the binary case
434
+ if method_name == "predict_proba":
435
+ # Select the probability column of the postive class
436
+ predictions = _process_predict_proba(
437
+ y_pred=predictions,
438
+ target_type="binary",
439
+ classes=self.classes_,
440
+ pos_label=self.classes_[1],
441
+ )
442
+ predictions = predictions.reshape(-1, 1)
443
+
444
+ this_estimator.fit(X, y, **routed_params.estimator.fit)
445
+ # Note: Here we don't pass on fit_params because the supported
446
+ # calibrators don't support fit_params anyway
447
+ calibrated_classifier = _fit_calibrator(
448
+ this_estimator,
449
+ predictions,
450
+ y,
451
+ self.classes_,
452
+ self.method,
453
+ sample_weight,
454
+ )
455
+ self.calibrated_classifiers_.append(calibrated_classifier)
456
+
457
+ first_clf = self.calibrated_classifiers_[0].estimator
458
+ if hasattr(first_clf, "n_features_in_"):
459
+ self.n_features_in_ = first_clf.n_features_in_
460
+ if hasattr(first_clf, "feature_names_in_"):
461
+ self.feature_names_in_ = first_clf.feature_names_in_
462
+ return self
463
+
464
+ def predict_proba(self, X):
465
+ """Calibrated probabilities of classification.
466
+
467
+ This function returns calibrated probabilities of classification
468
+ according to each class on an array of test vectors X.
469
+
470
+ Parameters
471
+ ----------
472
+ X : array-like of shape (n_samples, n_features)
473
+ The samples, as accepted by `estimator.predict_proba`.
474
+
475
+ Returns
476
+ -------
477
+ C : ndarray of shape (n_samples, n_classes)
478
+ The predicted probas.
479
+ """
480
+ check_is_fitted(self)
481
+ # Compute the arithmetic mean of the predictions of the calibrated
482
+ # classifiers
483
+ mean_proba = np.zeros((_num_samples(X), len(self.classes_)))
484
+ for calibrated_classifier in self.calibrated_classifiers_:
485
+ proba = calibrated_classifier.predict_proba(X)
486
+ mean_proba += proba
487
+
488
+ mean_proba /= len(self.calibrated_classifiers_)
489
+
490
+ return mean_proba
491
+
492
+ def predict(self, X):
493
+ """Predict the target of new samples.
494
+
495
+ The predicted class is the class that has the highest probability,
496
+ and can thus be different from the prediction of the uncalibrated classifier.
497
+
498
+ Parameters
499
+ ----------
500
+ X : array-like of shape (n_samples, n_features)
501
+ The samples, as accepted by `estimator.predict`.
502
+
503
+ Returns
504
+ -------
505
+ C : ndarray of shape (n_samples,)
506
+ The predicted class.
507
+ """
508
+ check_is_fitted(self)
509
+ return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
510
+
511
+ def get_metadata_routing(self):
512
+ """Get metadata routing of this object.
513
+
514
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
515
+ mechanism works.
516
+
517
+ Returns
518
+ -------
519
+ routing : MetadataRouter
520
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
521
+ routing information.
522
+ """
523
+ router = (
524
+ MetadataRouter(owner=self.__class__.__name__)
525
+ .add_self_request(self)
526
+ .add(
527
+ estimator=self._get_estimator(),
528
+ method_mapping=MethodMapping().add(callee="fit", caller="fit"),
529
+ )
530
+ .add(
531
+ splitter=self.cv,
532
+ method_mapping=MethodMapping().add(callee="split", caller="fit"),
533
+ )
534
+ )
535
+ return router
536
+
537
+ def _more_tags(self):
538
+ return {
539
+ "_xfail_checks": {
540
+ "check_sample_weights_invariance": (
541
+ "Due to the cross-validation and sample ordering, removing a sample"
542
+ " is not strictly equal to putting is weight to zero. Specific unit"
543
+ " tests are added for CalibratedClassifierCV specifically."
544
+ ),
545
+ }
546
+ }
547
+
548
+
549
+ def _fit_classifier_calibrator_pair(
550
+ estimator,
551
+ X,
552
+ y,
553
+ train,
554
+ test,
555
+ method,
556
+ classes,
557
+ sample_weight=None,
558
+ fit_params=None,
559
+ ):
560
+ """Fit a classifier/calibration pair on a given train/test split.
561
+
562
+ Fit the classifier on the train set, compute its predictions on the test
563
+ set and use the predictions as input to fit the calibrator along with the
564
+ test labels.
565
+
566
+ Parameters
567
+ ----------
568
+ estimator : estimator instance
569
+ Cloned base estimator.
570
+
571
+ X : array-like, shape (n_samples, n_features)
572
+ Sample data.
573
+
574
+ y : array-like, shape (n_samples,)
575
+ Targets.
576
+
577
+ train : ndarray, shape (n_train_indices,)
578
+ Indices of the training subset.
579
+
580
+ test : ndarray, shape (n_test_indices,)
581
+ Indices of the testing subset.
582
+
583
+ method : {'sigmoid', 'isotonic'}
584
+ Method to use for calibration.
585
+
586
+ classes : ndarray, shape (n_classes,)
587
+ The target classes.
588
+
589
+ sample_weight : array-like, default=None
590
+ Sample weights for `X`.
591
+
592
+ fit_params : dict, default=None
593
+ Parameters to pass to the `fit` method of the underlying
594
+ classifier.
595
+
596
+ Returns
597
+ -------
598
+ calibrated_classifier : _CalibratedClassifier instance
599
+ """
600
+ fit_params_train = _check_method_params(X, params=fit_params, indices=train)
601
+ X_train, y_train = _safe_indexing(X, train), _safe_indexing(y, train)
602
+ X_test, y_test = _safe_indexing(X, test), _safe_indexing(y, test)
603
+
604
+ estimator.fit(X_train, y_train, **fit_params_train)
605
+
606
+ predictions, _ = _get_response_values(
607
+ estimator,
608
+ X_test,
609
+ response_method=["decision_function", "predict_proba"],
610
+ )
611
+ if predictions.ndim == 1:
612
+ # Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
613
+ predictions = predictions.reshape(-1, 1)
614
+
615
+ sw_test = None if sample_weight is None else _safe_indexing(sample_weight, test)
616
+ calibrated_classifier = _fit_calibrator(
617
+ estimator, predictions, y_test, classes, method, sample_weight=sw_test
618
+ )
619
+ return calibrated_classifier
620
+
621
+
622
+ def _fit_calibrator(clf, predictions, y, classes, method, sample_weight=None):
623
+ """Fit calibrator(s) and return a `_CalibratedClassifier`
624
+ instance.
625
+
626
+ `n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted.
627
+ However, if `n_classes` equals 2, one calibrator is fitted.
628
+
629
+ Parameters
630
+ ----------
631
+ clf : estimator instance
632
+ Fitted classifier.
633
+
634
+ predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) \
635
+ when binary.
636
+ Raw predictions returned by the un-calibrated base classifier.
637
+
638
+ y : array-like, shape (n_samples,)
639
+ The targets.
640
+
641
+ classes : ndarray, shape (n_classes,)
642
+ All the prediction classes.
643
+
644
+ method : {'sigmoid', 'isotonic'}
645
+ The method to use for calibration.
646
+
647
+ sample_weight : ndarray, shape (n_samples,), default=None
648
+ Sample weights. If None, then samples are equally weighted.
649
+
650
+ Returns
651
+ -------
652
+ pipeline : _CalibratedClassifier instance
653
+ """
654
+ Y = label_binarize(y, classes=classes)
655
+ label_encoder = LabelEncoder().fit(classes)
656
+ pos_class_indices = label_encoder.transform(clf.classes_)
657
+ calibrators = []
658
+ for class_idx, this_pred in zip(pos_class_indices, predictions.T):
659
+ if method == "isotonic":
660
+ calibrator = IsotonicRegression(out_of_bounds="clip")
661
+ else: # "sigmoid"
662
+ calibrator = _SigmoidCalibration()
663
+ calibrator.fit(this_pred, Y[:, class_idx], sample_weight)
664
+ calibrators.append(calibrator)
665
+
666
+ pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes)
667
+ return pipeline
668
+
669
+
670
+ class _CalibratedClassifier:
671
+ """Pipeline-like chaining a fitted classifier and its fitted calibrators.
672
+
673
+ Parameters
674
+ ----------
675
+ estimator : estimator instance
676
+ Fitted classifier.
677
+
678
+ calibrators : list of fitted estimator instances
679
+ List of fitted calibrators (either 'IsotonicRegression' or
680
+ '_SigmoidCalibration'). The number of calibrators equals the number of
681
+ classes. However, if there are 2 classes, the list contains only one
682
+ fitted calibrator.
683
+
684
+ classes : array-like of shape (n_classes,)
685
+ All the prediction classes.
686
+
687
+ method : {'sigmoid', 'isotonic'}, default='sigmoid'
688
+ The method to use for calibration. Can be 'sigmoid' which
689
+ corresponds to Platt's method or 'isotonic' which is a
690
+ non-parametric approach based on isotonic regression.
691
+ """
692
+
693
+ def __init__(self, estimator, calibrators, *, classes, method="sigmoid"):
694
+ self.estimator = estimator
695
+ self.calibrators = calibrators
696
+ self.classes = classes
697
+ self.method = method
698
+
699
+ def predict_proba(self, X):
700
+ """Calculate calibrated probabilities.
701
+
702
+ Calculates classification calibrated probabilities
703
+ for each class, in a one-vs-all manner, for `X`.
704
+
705
+ Parameters
706
+ ----------
707
+ X : ndarray of shape (n_samples, n_features)
708
+ The sample data.
709
+
710
+ Returns
711
+ -------
712
+ proba : array, shape (n_samples, n_classes)
713
+ The predicted probabilities. Can be exact zeros.
714
+ """
715
+ predictions, _ = _get_response_values(
716
+ self.estimator,
717
+ X,
718
+ response_method=["decision_function", "predict_proba"],
719
+ )
720
+ if predictions.ndim == 1:
721
+ # Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
722
+ predictions = predictions.reshape(-1, 1)
723
+
724
+ n_classes = len(self.classes)
725
+
726
+ label_encoder = LabelEncoder().fit(self.classes)
727
+ pos_class_indices = label_encoder.transform(self.estimator.classes_)
728
+
729
+ proba = np.zeros((_num_samples(X), n_classes))
730
+ for class_idx, this_pred, calibrator in zip(
731
+ pos_class_indices, predictions.T, self.calibrators
732
+ ):
733
+ if n_classes == 2:
734
+ # When binary, `predictions` consists only of predictions for
735
+ # clf.classes_[1] but `pos_class_indices` = 0
736
+ class_idx += 1
737
+ proba[:, class_idx] = calibrator.predict(this_pred)
738
+
739
+ # Normalize the probabilities
740
+ if n_classes == 2:
741
+ proba[:, 0] = 1.0 - proba[:, 1]
742
+ else:
743
+ denominator = np.sum(proba, axis=1)[:, np.newaxis]
744
+ # In the edge case where for each class calibrator returns a null
745
+ # probability for a given sample, use the uniform distribution
746
+ # instead.
747
+ uniform_proba = np.full_like(proba, 1 / n_classes)
748
+ proba = np.divide(
749
+ proba, denominator, out=uniform_proba, where=denominator != 0
750
+ )
751
+
752
+ # Deal with cases where the predicted probability minimally exceeds 1.0
753
+ proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
754
+
755
+ return proba
756
+
757
+
758
+ # The max_abs_prediction_threshold was approximated using
759
+ # logit(np.finfo(np.float64).eps) which is about -36
760
+ def _sigmoid_calibration(
761
+ predictions, y, sample_weight=None, max_abs_prediction_threshold=30
762
+ ):
763
+ """Probability Calibration with sigmoid method (Platt 2000)
764
+
765
+ Parameters
766
+ ----------
767
+ predictions : ndarray of shape (n_samples,)
768
+ The decision function or predict proba for the samples.
769
+
770
+ y : ndarray of shape (n_samples,)
771
+ The targets.
772
+
773
+ sample_weight : array-like of shape (n_samples,), default=None
774
+ Sample weights. If None, then samples are equally weighted.
775
+
776
+ Returns
777
+ -------
778
+ a : float
779
+ The slope.
780
+
781
+ b : float
782
+ The intercept.
783
+
784
+ References
785
+ ----------
786
+ Platt, "Probabilistic Outputs for Support Vector Machines"
787
+ """
788
+ predictions = column_or_1d(predictions)
789
+ y = column_or_1d(y)
790
+
791
+ F = predictions # F follows Platt's notations
792
+
793
+ scale_constant = 1.0
794
+ max_prediction = np.max(np.abs(F))
795
+
796
+ # If the predictions have large values we scale them in order to bring
797
+ # them within a suitable range. This has no effect on the final
798
+ # (prediction) result because linear models like Logisitic Regression
799
+ # without a penalty are invariant to multiplying the features by a
800
+ # constant.
801
+ if max_prediction >= max_abs_prediction_threshold:
802
+ scale_constant = max_prediction
803
+ # We rescale the features in a copy: inplace rescaling could confuse
804
+ # the caller and make the code harder to reason about.
805
+ F = F / scale_constant
806
+
807
+ # Bayesian priors (see Platt end of section 2.2):
808
+ # It corresponds to the number of samples, taking into account the
809
+ # `sample_weight`.
810
+ mask_negative_samples = y <= 0
811
+ if sample_weight is not None:
812
+ prior0 = (sample_weight[mask_negative_samples]).sum()
813
+ prior1 = (sample_weight[~mask_negative_samples]).sum()
814
+ else:
815
+ prior0 = float(np.sum(mask_negative_samples))
816
+ prior1 = y.shape[0] - prior0
817
+ T = np.zeros_like(y, dtype=predictions.dtype)
818
+ T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0)
819
+ T[y <= 0] = 1.0 / (prior0 + 2.0)
820
+
821
+ bin_loss = HalfBinomialLoss()
822
+
823
+ def loss_grad(AB):
824
+ # .astype below is needed to ensure y_true and raw_prediction have the
825
+ # same dtype. With result = np.float64(0) * np.array([1, 2], dtype=np.float32)
826
+ # - in Numpy 2, result.dtype is float64
827
+ # - in Numpy<2, result.dtype is float32
828
+ raw_prediction = -(AB[0] * F + AB[1]).astype(dtype=predictions.dtype)
829
+ l, g = bin_loss.loss_gradient(
830
+ y_true=T,
831
+ raw_prediction=raw_prediction,
832
+ sample_weight=sample_weight,
833
+ )
834
+ loss = l.sum()
835
+ # TODO: Remove casting to np.float64 when minimum supported SciPy is 1.11.2
836
+ # With SciPy >= 1.11.2, the LBFGS implementation will cast to float64
837
+ # https://github.com/scipy/scipy/pull/18825.
838
+ # Here we cast to float64 to support SciPy < 1.11.2
839
+ grad = np.asarray([-g @ F, -g.sum()], dtype=np.float64)
840
+ return loss, grad
841
+
842
+ AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))])
843
+
844
+ opt_result = minimize(
845
+ loss_grad,
846
+ AB0,
847
+ method="L-BFGS-B",
848
+ jac=True,
849
+ options={
850
+ "gtol": 1e-6,
851
+ "ftol": 64 * np.finfo(float).eps,
852
+ },
853
+ )
854
+ AB_ = opt_result.x
855
+
856
+ # The tuned multiplicative parameter is converted back to the original
857
+ # input feature scale. The offset parameter does not need rescaling since
858
+ # we did not rescale the outcome variable.
859
+ return AB_[0] / scale_constant, AB_[1]
860
+
861
+
862
+ class _SigmoidCalibration(RegressorMixin, BaseEstimator):
863
+ """Sigmoid regression model.
864
+
865
+ Attributes
866
+ ----------
867
+ a_ : float
868
+ The slope.
869
+
870
+ b_ : float
871
+ The intercept.
872
+ """
873
+
874
+ def fit(self, X, y, sample_weight=None):
875
+ """Fit the model using X, y as training data.
876
+
877
+ Parameters
878
+ ----------
879
+ X : array-like of shape (n_samples,)
880
+ Training data.
881
+
882
+ y : array-like of shape (n_samples,)
883
+ Training target.
884
+
885
+ sample_weight : array-like of shape (n_samples,), default=None
886
+ Sample weights. If None, then samples are equally weighted.
887
+
888
+ Returns
889
+ -------
890
+ self : object
891
+ Returns an instance of self.
892
+ """
893
+ X = column_or_1d(X)
894
+ y = column_or_1d(y)
895
+ X, y = indexable(X, y)
896
+
897
+ self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
898
+ return self
899
+
900
+ def predict(self, T):
901
+ """Predict new data by linear interpolation.
902
+
903
+ Parameters
904
+ ----------
905
+ T : array-like of shape (n_samples,)
906
+ Data to predict from.
907
+
908
+ Returns
909
+ -------
910
+ T_ : ndarray of shape (n_samples,)
911
+ The predicted data.
912
+ """
913
+ T = column_or_1d(T)
914
+ return expit(-(self.a_ * T + self.b_))
915
+
916
+
917
+ @validate_params(
918
+ {
919
+ "y_true": ["array-like"],
920
+ "y_prob": ["array-like"],
921
+ "pos_label": [Real, str, "boolean", None],
922
+ "n_bins": [Interval(Integral, 1, None, closed="left")],
923
+ "strategy": [StrOptions({"uniform", "quantile"})],
924
+ },
925
+ prefer_skip_nested_validation=True,
926
+ )
927
+ def calibration_curve(
928
+ y_true,
929
+ y_prob,
930
+ *,
931
+ pos_label=None,
932
+ n_bins=5,
933
+ strategy="uniform",
934
+ ):
935
+ """Compute true and predicted probabilities for a calibration curve.
936
+
937
+ The method assumes the inputs come from a binary classifier, and
938
+ discretize the [0, 1] interval into bins.
939
+
940
+ Calibration curves may also be referred to as reliability diagrams.
941
+
942
+ Read more in the :ref:`User Guide <calibration>`.
943
+
944
+ Parameters
945
+ ----------
946
+ y_true : array-like of shape (n_samples,)
947
+ True targets.
948
+
949
+ y_prob : array-like of shape (n_samples,)
950
+ Probabilities of the positive class.
951
+
952
+ pos_label : int, float, bool or str, default=None
953
+ The label of the positive class.
954
+
955
+ .. versionadded:: 1.1
956
+
957
+ n_bins : int, default=5
958
+ Number of bins to discretize the [0, 1] interval. A bigger number
959
+ requires more data. Bins with no samples (i.e. without
960
+ corresponding values in `y_prob`) will not be returned, thus the
961
+ returned arrays may have less than `n_bins` values.
962
+
963
+ strategy : {'uniform', 'quantile'}, default='uniform'
964
+ Strategy used to define the widths of the bins.
965
+
966
+ uniform
967
+ The bins have identical widths.
968
+ quantile
969
+ The bins have the same number of samples and depend on `y_prob`.
970
+
971
+ Returns
972
+ -------
973
+ prob_true : ndarray of shape (n_bins,) or smaller
974
+ The proportion of samples whose class is the positive class, in each
975
+ bin (fraction of positives).
976
+
977
+ prob_pred : ndarray of shape (n_bins,) or smaller
978
+ The mean predicted probability in each bin.
979
+
980
+ References
981
+ ----------
982
+ Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
983
+ Probabilities With Supervised Learning, in Proceedings of the 22nd
984
+ International Conference on Machine Learning (ICML).
985
+ See section 4 (Qualitative Analysis of Predictions).
986
+
987
+ Examples
988
+ --------
989
+ >>> import numpy as np
990
+ >>> from sklearn.calibration import calibration_curve
991
+ >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
992
+ >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.])
993
+ >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
994
+ >>> prob_true
995
+ array([0. , 0.5, 1. ])
996
+ >>> prob_pred
997
+ array([0.2 , 0.525, 0.85 ])
998
+ """
999
+ y_true = column_or_1d(y_true)
1000
+ y_prob = column_or_1d(y_prob)
1001
+ check_consistent_length(y_true, y_prob)
1002
+ pos_label = _check_pos_label_consistency(pos_label, y_true)
1003
+
1004
+ if y_prob.min() < 0 or y_prob.max() > 1:
1005
+ raise ValueError("y_prob has values outside [0, 1].")
1006
+
1007
+ labels = np.unique(y_true)
1008
+ if len(labels) > 2:
1009
+ raise ValueError(
1010
+ f"Only binary classification is supported. Provided labels {labels}."
1011
+ )
1012
+ y_true = y_true == pos_label
1013
+
1014
+ if strategy == "quantile": # Determine bin edges by distribution of data
1015
+ quantiles = np.linspace(0, 1, n_bins + 1)
1016
+ bins = np.percentile(y_prob, quantiles * 100)
1017
+ elif strategy == "uniform":
1018
+ bins = np.linspace(0.0, 1.0, n_bins + 1)
1019
+ else:
1020
+ raise ValueError(
1021
+ "Invalid entry to 'strategy' input. Strategy "
1022
+ "must be either 'quantile' or 'uniform'."
1023
+ )
1024
+
1025
+ binids = np.searchsorted(bins[1:-1], y_prob)
1026
+
1027
+ bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
1028
+ bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
1029
+ bin_total = np.bincount(binids, minlength=len(bins))
1030
+
1031
+ nonzero = bin_total != 0
1032
+ prob_true = bin_true[nonzero] / bin_total[nonzero]
1033
+ prob_pred = bin_sums[nonzero] / bin_total[nonzero]
1034
+
1035
+ return prob_true, prob_pred
1036
+
1037
+
1038
+ class CalibrationDisplay(_BinaryClassifierCurveDisplayMixin):
1039
+ """Calibration curve (also known as reliability diagram) visualization.
1040
+
1041
+ It is recommended to use
1042
+ :func:`~sklearn.calibration.CalibrationDisplay.from_estimator` or
1043
+ :func:`~sklearn.calibration.CalibrationDisplay.from_predictions`
1044
+ to create a `CalibrationDisplay`. All parameters are stored as attributes.
1045
+
1046
+ Read more about calibration in the :ref:`User Guide <calibration>` and
1047
+ more about the scikit-learn visualization API in :ref:`visualizations`.
1048
+
1049
+ .. versionadded:: 1.0
1050
+
1051
+ Parameters
1052
+ ----------
1053
+ prob_true : ndarray of shape (n_bins,)
1054
+ The proportion of samples whose class is the positive class (fraction
1055
+ of positives), in each bin.
1056
+
1057
+ prob_pred : ndarray of shape (n_bins,)
1058
+ The mean predicted probability in each bin.
1059
+
1060
+ y_prob : ndarray of shape (n_samples,)
1061
+ Probability estimates for the positive class, for each sample.
1062
+
1063
+ estimator_name : str, default=None
1064
+ Name of estimator. If None, the estimator name is not shown.
1065
+
1066
+ pos_label : int, float, bool or str, default=None
1067
+ The positive class when computing the calibration curve.
1068
+ By default, `pos_label` is set to `estimators.classes_[1]` when using
1069
+ `from_estimator` and set to 1 when using `from_predictions`.
1070
+
1071
+ .. versionadded:: 1.1
1072
+
1073
+ Attributes
1074
+ ----------
1075
+ line_ : matplotlib Artist
1076
+ Calibration curve.
1077
+
1078
+ ax_ : matplotlib Axes
1079
+ Axes with calibration curve.
1080
+
1081
+ figure_ : matplotlib Figure
1082
+ Figure containing the curve.
1083
+
1084
+ See Also
1085
+ --------
1086
+ calibration_curve : Compute true and predicted probabilities for a
1087
+ calibration curve.
1088
+ CalibrationDisplay.from_predictions : Plot calibration curve using true
1089
+ and predicted labels.
1090
+ CalibrationDisplay.from_estimator : Plot calibration curve using an
1091
+ estimator and data.
1092
+
1093
+ Examples
1094
+ --------
1095
+ >>> from sklearn.datasets import make_classification
1096
+ >>> from sklearn.model_selection import train_test_split
1097
+ >>> from sklearn.linear_model import LogisticRegression
1098
+ >>> from sklearn.calibration import calibration_curve, CalibrationDisplay
1099
+ >>> X, y = make_classification(random_state=0)
1100
+ >>> X_train, X_test, y_train, y_test = train_test_split(
1101
+ ... X, y, random_state=0)
1102
+ >>> clf = LogisticRegression(random_state=0)
1103
+ >>> clf.fit(X_train, y_train)
1104
+ LogisticRegression(random_state=0)
1105
+ >>> y_prob = clf.predict_proba(X_test)[:, 1]
1106
+ >>> prob_true, prob_pred = calibration_curve(y_test, y_prob, n_bins=10)
1107
+ >>> disp = CalibrationDisplay(prob_true, prob_pred, y_prob)
1108
+ >>> disp.plot()
1109
+ <...>
1110
+ """
1111
+
1112
+ def __init__(
1113
+ self, prob_true, prob_pred, y_prob, *, estimator_name=None, pos_label=None
1114
+ ):
1115
+ self.prob_true = prob_true
1116
+ self.prob_pred = prob_pred
1117
+ self.y_prob = y_prob
1118
+ self.estimator_name = estimator_name
1119
+ self.pos_label = pos_label
1120
+
1121
+ def plot(self, *, ax=None, name=None, ref_line=True, **kwargs):
1122
+ """Plot visualization.
1123
+
1124
+ Extra keyword arguments will be passed to
1125
+ :func:`matplotlib.pyplot.plot`.
1126
+
1127
+ Parameters
1128
+ ----------
1129
+ ax : Matplotlib Axes, default=None
1130
+ Axes object to plot on. If `None`, a new figure and axes is
1131
+ created.
1132
+
1133
+ name : str, default=None
1134
+ Name for labeling curve. If `None`, use `estimator_name` if
1135
+ not `None`, otherwise no labeling is shown.
1136
+
1137
+ ref_line : bool, default=True
1138
+ If `True`, plots a reference line representing a perfectly
1139
+ calibrated classifier.
1140
+
1141
+ **kwargs : dict
1142
+ Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
1143
+
1144
+ Returns
1145
+ -------
1146
+ display : :class:`~sklearn.calibration.CalibrationDisplay`
1147
+ Object that stores computed values.
1148
+ """
1149
+ self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name)
1150
+
1151
+ info_pos_label = (
1152
+ f"(Positive class: {self.pos_label})" if self.pos_label is not None else ""
1153
+ )
1154
+
1155
+ line_kwargs = {"marker": "s", "linestyle": "-"}
1156
+ if name is not None:
1157
+ line_kwargs["label"] = name
1158
+ line_kwargs.update(**kwargs)
1159
+
1160
+ ref_line_label = "Perfectly calibrated"
1161
+ existing_ref_line = ref_line_label in self.ax_.get_legend_handles_labels()[1]
1162
+ if ref_line and not existing_ref_line:
1163
+ self.ax_.plot([0, 1], [0, 1], "k:", label=ref_line_label)
1164
+ self.line_ = self.ax_.plot(self.prob_pred, self.prob_true, **line_kwargs)[0]
1165
+
1166
+ # We always have to show the legend for at least the reference line
1167
+ self.ax_.legend(loc="lower right")
1168
+
1169
+ xlabel = f"Mean predicted probability {info_pos_label}"
1170
+ ylabel = f"Fraction of positives {info_pos_label}"
1171
+ self.ax_.set(xlabel=xlabel, ylabel=ylabel)
1172
+
1173
+ return self
1174
+
1175
+ @classmethod
1176
+ def from_estimator(
1177
+ cls,
1178
+ estimator,
1179
+ X,
1180
+ y,
1181
+ *,
1182
+ n_bins=5,
1183
+ strategy="uniform",
1184
+ pos_label=None,
1185
+ name=None,
1186
+ ref_line=True,
1187
+ ax=None,
1188
+ **kwargs,
1189
+ ):
1190
+ """Plot calibration curve using a binary classifier and data.
1191
+
1192
+ A calibration curve, also known as a reliability diagram, uses inputs
1193
+ from a binary classifier and plots the average predicted probability
1194
+ for each bin against the fraction of positive classes, on the
1195
+ y-axis.
1196
+
1197
+ Extra keyword arguments will be passed to
1198
+ :func:`matplotlib.pyplot.plot`.
1199
+
1200
+ Read more about calibration in the :ref:`User Guide <calibration>` and
1201
+ more about the scikit-learn visualization API in :ref:`visualizations`.
1202
+
1203
+ .. versionadded:: 1.0
1204
+
1205
+ Parameters
1206
+ ----------
1207
+ estimator : estimator instance
1208
+ Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
1209
+ in which the last estimator is a classifier. The classifier must
1210
+ have a :term:`predict_proba` method.
1211
+
1212
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1213
+ Input values.
1214
+
1215
+ y : array-like of shape (n_samples,)
1216
+ Binary target values.
1217
+
1218
+ n_bins : int, default=5
1219
+ Number of bins to discretize the [0, 1] interval into when
1220
+ calculating the calibration curve. A bigger number requires more
1221
+ data.
1222
+
1223
+ strategy : {'uniform', 'quantile'}, default='uniform'
1224
+ Strategy used to define the widths of the bins.
1225
+
1226
+ - `'uniform'`: The bins have identical widths.
1227
+ - `'quantile'`: The bins have the same number of samples and depend
1228
+ on predicted probabilities.
1229
+
1230
+ pos_label : int, float, bool or str, default=None
1231
+ The positive class when computing the calibration curve.
1232
+ By default, `estimators.classes_[1]` is considered as the
1233
+ positive class.
1234
+
1235
+ .. versionadded:: 1.1
1236
+
1237
+ name : str, default=None
1238
+ Name for labeling curve. If `None`, the name of the estimator is
1239
+ used.
1240
+
1241
+ ref_line : bool, default=True
1242
+ If `True`, plots a reference line representing a perfectly
1243
+ calibrated classifier.
1244
+
1245
+ ax : matplotlib axes, default=None
1246
+ Axes object to plot on. If `None`, a new figure and axes is
1247
+ created.
1248
+
1249
+ **kwargs : dict
1250
+ Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
1251
+
1252
+ Returns
1253
+ -------
1254
+ display : :class:`~sklearn.calibration.CalibrationDisplay`.
1255
+ Object that stores computed values.
1256
+
1257
+ See Also
1258
+ --------
1259
+ CalibrationDisplay.from_predictions : Plot calibration curve using true
1260
+ and predicted labels.
1261
+
1262
+ Examples
1263
+ --------
1264
+ >>> import matplotlib.pyplot as plt
1265
+ >>> from sklearn.datasets import make_classification
1266
+ >>> from sklearn.model_selection import train_test_split
1267
+ >>> from sklearn.linear_model import LogisticRegression
1268
+ >>> from sklearn.calibration import CalibrationDisplay
1269
+ >>> X, y = make_classification(random_state=0)
1270
+ >>> X_train, X_test, y_train, y_test = train_test_split(
1271
+ ... X, y, random_state=0)
1272
+ >>> clf = LogisticRegression(random_state=0)
1273
+ >>> clf.fit(X_train, y_train)
1274
+ LogisticRegression(random_state=0)
1275
+ >>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test)
1276
+ >>> plt.show()
1277
+ """
1278
+ y_prob, pos_label, name = cls._validate_and_get_response_values(
1279
+ estimator,
1280
+ X,
1281
+ y,
1282
+ response_method="predict_proba",
1283
+ pos_label=pos_label,
1284
+ name=name,
1285
+ )
1286
+
1287
+ return cls.from_predictions(
1288
+ y,
1289
+ y_prob,
1290
+ n_bins=n_bins,
1291
+ strategy=strategy,
1292
+ pos_label=pos_label,
1293
+ name=name,
1294
+ ref_line=ref_line,
1295
+ ax=ax,
1296
+ **kwargs,
1297
+ )
1298
+
1299
+ @classmethod
1300
+ def from_predictions(
1301
+ cls,
1302
+ y_true,
1303
+ y_prob,
1304
+ *,
1305
+ n_bins=5,
1306
+ strategy="uniform",
1307
+ pos_label=None,
1308
+ name=None,
1309
+ ref_line=True,
1310
+ ax=None,
1311
+ **kwargs,
1312
+ ):
1313
+ """Plot calibration curve using true labels and predicted probabilities.
1314
+
1315
+ Calibration curve, also known as reliability diagram, uses inputs
1316
+ from a binary classifier and plots the average predicted probability
1317
+ for each bin against the fraction of positive classes, on the
1318
+ y-axis.
1319
+
1320
+ Extra keyword arguments will be passed to
1321
+ :func:`matplotlib.pyplot.plot`.
1322
+
1323
+ Read more about calibration in the :ref:`User Guide <calibration>` and
1324
+ more about the scikit-learn visualization API in :ref:`visualizations`.
1325
+
1326
+ .. versionadded:: 1.0
1327
+
1328
+ Parameters
1329
+ ----------
1330
+ y_true : array-like of shape (n_samples,)
1331
+ True labels.
1332
+
1333
+ y_prob : array-like of shape (n_samples,)
1334
+ The predicted probabilities of the positive class.
1335
+
1336
+ n_bins : int, default=5
1337
+ Number of bins to discretize the [0, 1] interval into when
1338
+ calculating the calibration curve. A bigger number requires more
1339
+ data.
1340
+
1341
+ strategy : {'uniform', 'quantile'}, default='uniform'
1342
+ Strategy used to define the widths of the bins.
1343
+
1344
+ - `'uniform'`: The bins have identical widths.
1345
+ - `'quantile'`: The bins have the same number of samples and depend
1346
+ on predicted probabilities.
1347
+
1348
+ pos_label : int, float, bool or str, default=None
1349
+ The positive class when computing the calibration curve.
1350
+ By default `pos_label` is set to 1.
1351
+
1352
+ .. versionadded:: 1.1
1353
+
1354
+ name : str, default=None
1355
+ Name for labeling curve.
1356
+
1357
+ ref_line : bool, default=True
1358
+ If `True`, plots a reference line representing a perfectly
1359
+ calibrated classifier.
1360
+
1361
+ ax : matplotlib axes, default=None
1362
+ Axes object to plot on. If `None`, a new figure and axes is
1363
+ created.
1364
+
1365
+ **kwargs : dict
1366
+ Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
1367
+
1368
+ Returns
1369
+ -------
1370
+ display : :class:`~sklearn.calibration.CalibrationDisplay`.
1371
+ Object that stores computed values.
1372
+
1373
+ See Also
1374
+ --------
1375
+ CalibrationDisplay.from_estimator : Plot calibration curve using an
1376
+ estimator and data.
1377
+
1378
+ Examples
1379
+ --------
1380
+ >>> import matplotlib.pyplot as plt
1381
+ >>> from sklearn.datasets import make_classification
1382
+ >>> from sklearn.model_selection import train_test_split
1383
+ >>> from sklearn.linear_model import LogisticRegression
1384
+ >>> from sklearn.calibration import CalibrationDisplay
1385
+ >>> X, y = make_classification(random_state=0)
1386
+ >>> X_train, X_test, y_train, y_test = train_test_split(
1387
+ ... X, y, random_state=0)
1388
+ >>> clf = LogisticRegression(random_state=0)
1389
+ >>> clf.fit(X_train, y_train)
1390
+ LogisticRegression(random_state=0)
1391
+ >>> y_prob = clf.predict_proba(X_test)[:, 1]
1392
+ >>> disp = CalibrationDisplay.from_predictions(y_test, y_prob)
1393
+ >>> plt.show()
1394
+ """
1395
+ pos_label_validated, name = cls._validate_from_predictions_params(
1396
+ y_true, y_prob, sample_weight=None, pos_label=pos_label, name=name
1397
+ )
1398
+
1399
+ prob_true, prob_pred = calibration_curve(
1400
+ y_true, y_prob, n_bins=n_bins, strategy=strategy, pos_label=pos_label
1401
+ )
1402
+
1403
+ disp = cls(
1404
+ prob_true=prob_true,
1405
+ prob_pred=prob_pred,
1406
+ y_prob=y_prob,
1407
+ estimator_name=name,
1408
+ pos_label=pos_label_validated,
1409
+ )
1410
+ return disp.plot(ax=ax, ref_line=ref_line, **kwargs)
llmeval-env/lib/python3.10/site-packages/sklearn/conftest.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ import platform
3
+ import sys
4
+ from contextlib import suppress
5
+ from functools import wraps
6
+ from os import environ
7
+ from unittest import SkipTest
8
+
9
+ import joblib
10
+ import numpy as np
11
+ import pytest
12
+ from _pytest.doctest import DoctestItem
13
+ from threadpoolctl import threadpool_limits
14
+
15
+ from sklearn import config_context, set_config
16
+ from sklearn._min_dependencies import PYTEST_MIN_VERSION
17
+ from sklearn.datasets import (
18
+ fetch_20newsgroups,
19
+ fetch_20newsgroups_vectorized,
20
+ fetch_california_housing,
21
+ fetch_covtype,
22
+ fetch_kddcup99,
23
+ fetch_olivetti_faces,
24
+ fetch_rcv1,
25
+ fetch_species_distributions,
26
+ )
27
+ from sklearn.tests import random_seed
28
+ from sklearn.utils import _IS_32BIT
29
+ from sklearn.utils._testing import get_pytest_filterwarning_lines
30
+ from sklearn.utils.fixes import (
31
+ np_base_version,
32
+ parse_version,
33
+ sp_version,
34
+ )
35
+
36
+ if parse_version(pytest.__version__) < parse_version(PYTEST_MIN_VERSION):
37
+ raise ImportError(
38
+ f"Your version of pytest is too old. Got version {pytest.__version__}, you"
39
+ f" should have pytest >= {PYTEST_MIN_VERSION} installed."
40
+ )
41
+
42
+ scipy_datasets_require_network = sp_version >= parse_version("1.10")
43
+
44
+
45
+ @pytest.fixture
46
+ def enable_slep006():
47
+ """Enable SLEP006 for all tests."""
48
+ with config_context(enable_metadata_routing=True):
49
+ yield
50
+
51
+
52
+ def raccoon_face_or_skip():
53
+ # SciPy >= 1.10 requires network to access to get data
54
+ if scipy_datasets_require_network:
55
+ run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0"
56
+ if not run_network_tests:
57
+ raise SkipTest("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0")
58
+
59
+ try:
60
+ import pooch # noqa
61
+ except ImportError:
62
+ raise SkipTest("test requires pooch to be installed")
63
+
64
+ from scipy.datasets import face
65
+ else:
66
+ from scipy.misc import face
67
+
68
+ return face(gray=True)
69
+
70
+
71
+ dataset_fetchers = {
72
+ "fetch_20newsgroups_fxt": fetch_20newsgroups,
73
+ "fetch_20newsgroups_vectorized_fxt": fetch_20newsgroups_vectorized,
74
+ "fetch_california_housing_fxt": fetch_california_housing,
75
+ "fetch_covtype_fxt": fetch_covtype,
76
+ "fetch_kddcup99_fxt": fetch_kddcup99,
77
+ "fetch_olivetti_faces_fxt": fetch_olivetti_faces,
78
+ "fetch_rcv1_fxt": fetch_rcv1,
79
+ "fetch_species_distributions_fxt": fetch_species_distributions,
80
+ }
81
+
82
+ if scipy_datasets_require_network:
83
+ dataset_fetchers["raccoon_face_fxt"] = raccoon_face_or_skip
84
+
85
+ _SKIP32_MARK = pytest.mark.skipif(
86
+ environ.get("SKLEARN_RUN_FLOAT32_TESTS", "0") != "1",
87
+ reason="Set SKLEARN_RUN_FLOAT32_TESTS=1 to run float32 dtype tests",
88
+ )
89
+
90
+
91
+ # Global fixtures
92
+ @pytest.fixture(params=[pytest.param(np.float32, marks=_SKIP32_MARK), np.float64])
93
+ def global_dtype(request):
94
+ yield request.param
95
+
96
+
97
+ def _fetch_fixture(f):
98
+ """Fetch dataset (download if missing and requested by environment)."""
99
+ download_if_missing = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0"
100
+
101
+ @wraps(f)
102
+ def wrapped(*args, **kwargs):
103
+ kwargs["download_if_missing"] = download_if_missing
104
+ try:
105
+ return f(*args, **kwargs)
106
+ except OSError as e:
107
+ if str(e) != "Data not found and `download_if_missing` is False":
108
+ raise
109
+ pytest.skip("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0")
110
+
111
+ return pytest.fixture(lambda: wrapped)
112
+
113
+
114
+ # Adds fixtures for fetching data
115
+ fetch_20newsgroups_fxt = _fetch_fixture(fetch_20newsgroups)
116
+ fetch_20newsgroups_vectorized_fxt = _fetch_fixture(fetch_20newsgroups_vectorized)
117
+ fetch_california_housing_fxt = _fetch_fixture(fetch_california_housing)
118
+ fetch_covtype_fxt = _fetch_fixture(fetch_covtype)
119
+ fetch_kddcup99_fxt = _fetch_fixture(fetch_kddcup99)
120
+ fetch_olivetti_faces_fxt = _fetch_fixture(fetch_olivetti_faces)
121
+ fetch_rcv1_fxt = _fetch_fixture(fetch_rcv1)
122
+ fetch_species_distributions_fxt = _fetch_fixture(fetch_species_distributions)
123
+ raccoon_face_fxt = pytest.fixture(raccoon_face_or_skip)
124
+
125
+
126
+ def pytest_collection_modifyitems(config, items):
127
+ """Called after collect is completed.
128
+
129
+ Parameters
130
+ ----------
131
+ config : pytest config
132
+ items : list of collected items
133
+ """
134
+ run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0"
135
+ skip_network = pytest.mark.skip(
136
+ reason="test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0"
137
+ )
138
+
139
+ # download datasets during collection to avoid thread unsafe behavior
140
+ # when running pytest in parallel with pytest-xdist
141
+ dataset_features_set = set(dataset_fetchers)
142
+ datasets_to_download = set()
143
+
144
+ for item in items:
145
+ if isinstance(item, DoctestItem) and "fetch_" in item.name:
146
+ fetcher_function_name = item.name.split(".")[-1]
147
+ dataset_fetchers_key = f"{fetcher_function_name}_fxt"
148
+ dataset_to_fetch = set([dataset_fetchers_key]) & dataset_features_set
149
+ elif not hasattr(item, "fixturenames"):
150
+ continue
151
+ else:
152
+ item_fixtures = set(item.fixturenames)
153
+ dataset_to_fetch = item_fixtures & dataset_features_set
154
+
155
+ if not dataset_to_fetch:
156
+ continue
157
+
158
+ if run_network_tests:
159
+ datasets_to_download |= dataset_to_fetch
160
+ else:
161
+ # network tests are skipped
162
+ item.add_marker(skip_network)
163
+
164
+ # Only download datasets on the first worker spawned by pytest-xdist
165
+ # to avoid thread unsafe behavior. If pytest-xdist is not used, we still
166
+ # download before tests run.
167
+ worker_id = environ.get("PYTEST_XDIST_WORKER", "gw0")
168
+ if worker_id == "gw0" and run_network_tests:
169
+ for name in datasets_to_download:
170
+ with suppress(SkipTest):
171
+ dataset_fetchers[name]()
172
+
173
+ for item in items:
174
+ # Known failure on with GradientBoostingClassifier on ARM64
175
+ if (
176
+ item.name.endswith("GradientBoostingClassifier")
177
+ and platform.machine() == "aarch64"
178
+ ):
179
+ marker = pytest.mark.xfail(
180
+ reason=(
181
+ "know failure. See "
182
+ "https://github.com/scikit-learn/scikit-learn/issues/17797" # noqa
183
+ )
184
+ )
185
+ item.add_marker(marker)
186
+
187
+ skip_doctests = False
188
+ try:
189
+ import matplotlib # noqa
190
+ except ImportError:
191
+ skip_doctests = True
192
+ reason = "matplotlib is required to run the doctests"
193
+
194
+ if _IS_32BIT:
195
+ reason = "doctest are only run when the default numpy int is 64 bits."
196
+ skip_doctests = True
197
+ elif sys.platform.startswith("win32"):
198
+ reason = (
199
+ "doctests are not run for Windows because numpy arrays "
200
+ "repr is inconsistent across platforms."
201
+ )
202
+ skip_doctests = True
203
+
204
+ if np_base_version >= parse_version("2"):
205
+ reason = "Due to NEP 51 numpy scalar repr has changed in numpy 2"
206
+ skip_doctests = True
207
+
208
+ # Normally doctest has the entire module's scope. Here we set globs to an empty dict
209
+ # to remove the module's scope:
210
+ # https://docs.python.org/3/library/doctest.html#what-s-the-execution-context
211
+ for item in items:
212
+ if isinstance(item, DoctestItem):
213
+ item.dtest.globs = {}
214
+
215
+ if skip_doctests:
216
+ skip_marker = pytest.mark.skip(reason=reason)
217
+
218
+ for item in items:
219
+ if isinstance(item, DoctestItem):
220
+ # work-around an internal error with pytest if adding a skip
221
+ # mark to a doctest in a contextmanager, see
222
+ # https://github.com/pytest-dev/pytest/issues/8796 for more
223
+ # details.
224
+ if item.name != "sklearn._config.config_context":
225
+ item.add_marker(skip_marker)
226
+ try:
227
+ import PIL # noqa
228
+
229
+ pillow_installed = True
230
+ except ImportError:
231
+ pillow_installed = False
232
+
233
+ if not pillow_installed:
234
+ skip_marker = pytest.mark.skip(reason="pillow (or PIL) not installed!")
235
+ for item in items:
236
+ if item.name in [
237
+ "sklearn.feature_extraction.image.PatchExtractor",
238
+ "sklearn.feature_extraction.image.extract_patches_2d",
239
+ ]:
240
+ item.add_marker(skip_marker)
241
+
242
+
243
+ @pytest.fixture(scope="function")
244
+ def pyplot():
245
+ """Setup and teardown fixture for matplotlib.
246
+
247
+ This fixture checks if we can import matplotlib. If not, the tests will be
248
+ skipped. Otherwise, we close the figures before and after running the
249
+ functions.
250
+
251
+ Returns
252
+ -------
253
+ pyplot : module
254
+ The ``matplotlib.pyplot`` module.
255
+ """
256
+ pyplot = pytest.importorskip("matplotlib.pyplot")
257
+ pyplot.close("all")
258
+ yield pyplot
259
+ pyplot.close("all")
260
+
261
+
262
+ def pytest_configure(config):
263
+ # Use matplotlib agg backend during the tests including doctests
264
+ try:
265
+ import matplotlib
266
+
267
+ matplotlib.use("agg")
268
+ except ImportError:
269
+ pass
270
+
271
+ allowed_parallelism = joblib.cpu_count(only_physical_cores=True)
272
+ xdist_worker_count = environ.get("PYTEST_XDIST_WORKER_COUNT")
273
+ if xdist_worker_count is not None:
274
+ # Set the number of OpenMP and BLAS threads based on the number of workers
275
+ # xdist is using to prevent oversubscription.
276
+ allowed_parallelism = max(allowed_parallelism // int(xdist_worker_count), 1)
277
+ threadpool_limits(allowed_parallelism)
278
+
279
+ # Register global_random_seed plugin if it is not already registered
280
+ if not config.pluginmanager.hasplugin("sklearn.tests.random_seed"):
281
+ config.pluginmanager.register(random_seed)
282
+
283
+ if environ.get("SKLEARN_WARNINGS_AS_ERRORS", "0") != "0":
284
+ # This seems like the only way to programmatically change the config
285
+ # filterwarnings. This was suggested in
286
+ # https://github.com/pytest-dev/pytest/issues/3311#issuecomment-373177592
287
+ for line in get_pytest_filterwarning_lines():
288
+ config.addinivalue_line("filterwarnings", line)
289
+
290
+
291
+ @pytest.fixture
292
+ def hide_available_pandas(monkeypatch):
293
+ """Pretend pandas was not installed."""
294
+ import_orig = builtins.__import__
295
+
296
+ def mocked_import(name, *args, **kwargs):
297
+ if name == "pandas":
298
+ raise ImportError()
299
+ return import_orig(name, *args, **kwargs)
300
+
301
+ monkeypatch.setattr(builtins, "__import__", mocked_import)
302
+
303
+
304
+ @pytest.fixture
305
+ def print_changed_only_false():
306
+ """Set `print_changed_only` to False for the duration of the test."""
307
+ set_config(print_changed_only=False)
308
+ yield
309
+ set_config(print_changed_only=True) # reset to default
llmeval-env/lib/python3.10/site-packages/sklearn/discriminant_analysis.py ADDED
@@ -0,0 +1,1047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Linear Discriminant Analysis and Quadratic Discriminant Analysis
3
+ """
4
+
5
+ # Authors: Clemens Brunner
6
+ # Martin Billinger
7
+ # Matthieu Perrot
8
+ # Mathieu Blondel
9
+
10
+ # License: BSD 3-Clause
11
+
12
+ import warnings
13
+ from numbers import Integral, Real
14
+
15
+ import numpy as np
16
+ import scipy.linalg
17
+ from scipy import linalg
18
+
19
+ from .base import (
20
+ BaseEstimator,
21
+ ClassifierMixin,
22
+ ClassNamePrefixFeaturesOutMixin,
23
+ TransformerMixin,
24
+ _fit_context,
25
+ )
26
+ from .covariance import empirical_covariance, ledoit_wolf, shrunk_covariance
27
+ from .linear_model._base import LinearClassifierMixin
28
+ from .preprocessing import StandardScaler
29
+ from .utils._array_api import _expit, device, get_namespace, size
30
+ from .utils._param_validation import HasMethods, Interval, StrOptions
31
+ from .utils.extmath import softmax
32
+ from .utils.multiclass import check_classification_targets, unique_labels
33
+ from .utils.validation import check_is_fitted
34
+
35
+ __all__ = ["LinearDiscriminantAnalysis", "QuadraticDiscriminantAnalysis"]
36
+
37
+
38
+ def _cov(X, shrinkage=None, covariance_estimator=None):
39
+ """Estimate covariance matrix (using optional covariance_estimator).
40
+ Parameters
41
+ ----------
42
+ X : array-like of shape (n_samples, n_features)
43
+ Input data.
44
+
45
+ shrinkage : {'empirical', 'auto'} or float, default=None
46
+ Shrinkage parameter, possible values:
47
+ - None or 'empirical': no shrinkage (default).
48
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
49
+ - float between 0 and 1: fixed shrinkage parameter.
50
+
51
+ Shrinkage parameter is ignored if `covariance_estimator`
52
+ is not None.
53
+
54
+ covariance_estimator : estimator, default=None
55
+ If not None, `covariance_estimator` is used to estimate
56
+ the covariance matrices instead of relying on the empirical
57
+ covariance estimator (with potential shrinkage).
58
+ The object should have a fit method and a ``covariance_`` attribute
59
+ like the estimators in :mod:`sklearn.covariance``.
60
+ if None the shrinkage parameter drives the estimate.
61
+
62
+ .. versionadded:: 0.24
63
+
64
+ Returns
65
+ -------
66
+ s : ndarray of shape (n_features, n_features)
67
+ Estimated covariance matrix.
68
+ """
69
+ if covariance_estimator is None:
70
+ shrinkage = "empirical" if shrinkage is None else shrinkage
71
+ if isinstance(shrinkage, str):
72
+ if shrinkage == "auto":
73
+ sc = StandardScaler() # standardize features
74
+ X = sc.fit_transform(X)
75
+ s = ledoit_wolf(X)[0]
76
+ # rescale
77
+ s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
78
+ elif shrinkage == "empirical":
79
+ s = empirical_covariance(X)
80
+ elif isinstance(shrinkage, Real):
81
+ s = shrunk_covariance(empirical_covariance(X), shrinkage)
82
+ else:
83
+ if shrinkage is not None and shrinkage != 0:
84
+ raise ValueError(
85
+ "covariance_estimator and shrinkage parameters "
86
+ "are not None. Only one of the two can be set."
87
+ )
88
+ covariance_estimator.fit(X)
89
+ if not hasattr(covariance_estimator, "covariance_"):
90
+ raise ValueError(
91
+ "%s does not have a covariance_ attribute"
92
+ % covariance_estimator.__class__.__name__
93
+ )
94
+ s = covariance_estimator.covariance_
95
+ return s
96
+
97
+
98
+ def _class_means(X, y):
99
+ """Compute class means.
100
+
101
+ Parameters
102
+ ----------
103
+ X : array-like of shape (n_samples, n_features)
104
+ Input data.
105
+
106
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
107
+ Target values.
108
+
109
+ Returns
110
+ -------
111
+ means : array-like of shape (n_classes, n_features)
112
+ Class means.
113
+ """
114
+ xp, is_array_api_compliant = get_namespace(X)
115
+ classes, y = xp.unique_inverse(y)
116
+ means = xp.zeros((classes.shape[0], X.shape[1]), device=device(X), dtype=X.dtype)
117
+
118
+ if is_array_api_compliant:
119
+ for i in range(classes.shape[0]):
120
+ means[i, :] = xp.mean(X[y == i], axis=0)
121
+ else:
122
+ # TODO: Explore the choice of using bincount + add.at as it seems sub optimal
123
+ # from a performance-wise
124
+ cnt = np.bincount(y)
125
+ np.add.at(means, y, X)
126
+ means /= cnt[:, None]
127
+ return means
128
+
129
+
130
+ def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None):
131
+ """Compute weighted within-class covariance matrix.
132
+
133
+ The per-class covariance are weighted by the class priors.
134
+
135
+ Parameters
136
+ ----------
137
+ X : array-like of shape (n_samples, n_features)
138
+ Input data.
139
+
140
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
141
+ Target values.
142
+
143
+ priors : array-like of shape (n_classes,)
144
+ Class priors.
145
+
146
+ shrinkage : 'auto' or float, default=None
147
+ Shrinkage parameter, possible values:
148
+ - None: no shrinkage (default).
149
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
150
+ - float between 0 and 1: fixed shrinkage parameter.
151
+
152
+ Shrinkage parameter is ignored if `covariance_estimator` is not None.
153
+
154
+ covariance_estimator : estimator, default=None
155
+ If not None, `covariance_estimator` is used to estimate
156
+ the covariance matrices instead of relying the empirical
157
+ covariance estimator (with potential shrinkage).
158
+ The object should have a fit method and a ``covariance_`` attribute
159
+ like the estimators in sklearn.covariance.
160
+ If None, the shrinkage parameter drives the estimate.
161
+
162
+ .. versionadded:: 0.24
163
+
164
+ Returns
165
+ -------
166
+ cov : array-like of shape (n_features, n_features)
167
+ Weighted within-class covariance matrix
168
+ """
169
+ classes = np.unique(y)
170
+ cov = np.zeros(shape=(X.shape[1], X.shape[1]))
171
+ for idx, group in enumerate(classes):
172
+ Xg = X[y == group, :]
173
+ cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage, covariance_estimator))
174
+ return cov
175
+
176
+
177
+ class LinearDiscriminantAnalysis(
178
+ ClassNamePrefixFeaturesOutMixin,
179
+ LinearClassifierMixin,
180
+ TransformerMixin,
181
+ BaseEstimator,
182
+ ):
183
+ """Linear Discriminant Analysis.
184
+
185
+ A classifier with a linear decision boundary, generated by fitting class
186
+ conditional densities to the data and using Bayes' rule.
187
+
188
+ The model fits a Gaussian density to each class, assuming that all classes
189
+ share the same covariance matrix.
190
+
191
+ The fitted model can also be used to reduce the dimensionality of the input
192
+ by projecting it to the most discriminative directions, using the
193
+ `transform` method.
194
+
195
+ .. versionadded:: 0.17
196
+ *LinearDiscriminantAnalysis*.
197
+
198
+ Read more in the :ref:`User Guide <lda_qda>`.
199
+
200
+ Parameters
201
+ ----------
202
+ solver : {'svd', 'lsqr', 'eigen'}, default='svd'
203
+ Solver to use, possible values:
204
+ - 'svd': Singular value decomposition (default).
205
+ Does not compute the covariance matrix, therefore this solver is
206
+ recommended for data with a large number of features.
207
+ - 'lsqr': Least squares solution.
208
+ Can be combined with shrinkage or custom covariance estimator.
209
+ - 'eigen': Eigenvalue decomposition.
210
+ Can be combined with shrinkage or custom covariance estimator.
211
+
212
+ .. versionchanged:: 1.2
213
+ `solver="svd"` now has experimental Array API support. See the
214
+ :ref:`Array API User Guide <array_api>` for more details.
215
+
216
+ shrinkage : 'auto' or float, default=None
217
+ Shrinkage parameter, possible values:
218
+ - None: no shrinkage (default).
219
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
220
+ - float between 0 and 1: fixed shrinkage parameter.
221
+
222
+ This should be left to None if `covariance_estimator` is used.
223
+ Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
224
+
225
+ priors : array-like of shape (n_classes,), default=None
226
+ The class prior probabilities. By default, the class proportions are
227
+ inferred from the training data.
228
+
229
+ n_components : int, default=None
230
+ Number of components (<= min(n_classes - 1, n_features)) for
231
+ dimensionality reduction. If None, will be set to
232
+ min(n_classes - 1, n_features). This parameter only affects the
233
+ `transform` method.
234
+
235
+ store_covariance : bool, default=False
236
+ If True, explicitly compute the weighted within-class covariance
237
+ matrix when solver is 'svd'. The matrix is always computed
238
+ and stored for the other solvers.
239
+
240
+ .. versionadded:: 0.17
241
+
242
+ tol : float, default=1.0e-4
243
+ Absolute threshold for a singular value of X to be considered
244
+ significant, used to estimate the rank of X. Dimensions whose
245
+ singular values are non-significant are discarded. Only used if
246
+ solver is 'svd'.
247
+
248
+ .. versionadded:: 0.17
249
+
250
+ covariance_estimator : covariance estimator, default=None
251
+ If not None, `covariance_estimator` is used to estimate
252
+ the covariance matrices instead of relying on the empirical
253
+ covariance estimator (with potential shrinkage).
254
+ The object should have a fit method and a ``covariance_`` attribute
255
+ like the estimators in :mod:`sklearn.covariance`.
256
+ if None the shrinkage parameter drives the estimate.
257
+
258
+ This should be left to None if `shrinkage` is used.
259
+ Note that `covariance_estimator` works only with 'lsqr' and 'eigen'
260
+ solvers.
261
+
262
+ .. versionadded:: 0.24
263
+
264
+ Attributes
265
+ ----------
266
+ coef_ : ndarray of shape (n_features,) or (n_classes, n_features)
267
+ Weight vector(s).
268
+
269
+ intercept_ : ndarray of shape (n_classes,)
270
+ Intercept term.
271
+
272
+ covariance_ : array-like of shape (n_features, n_features)
273
+ Weighted within-class covariance matrix. It corresponds to
274
+ `sum_k prior_k * C_k` where `C_k` is the covariance matrix of the
275
+ samples in class `k`. The `C_k` are estimated using the (potentially
276
+ shrunk) biased estimator of covariance. If solver is 'svd', only
277
+ exists when `store_covariance` is True.
278
+
279
+ explained_variance_ratio_ : ndarray of shape (n_components,)
280
+ Percentage of variance explained by each of the selected components.
281
+ If ``n_components`` is not set then all components are stored and the
282
+ sum of explained variances is equal to 1.0. Only available when eigen
283
+ or svd solver is used.
284
+
285
+ means_ : array-like of shape (n_classes, n_features)
286
+ Class-wise means.
287
+
288
+ priors_ : array-like of shape (n_classes,)
289
+ Class priors (sum to 1).
290
+
291
+ scalings_ : array-like of shape (rank, n_classes - 1)
292
+ Scaling of the features in the space spanned by the class centroids.
293
+ Only available for 'svd' and 'eigen' solvers.
294
+
295
+ xbar_ : array-like of shape (n_features,)
296
+ Overall mean. Only present if solver is 'svd'.
297
+
298
+ classes_ : array-like of shape (n_classes,)
299
+ Unique class labels.
300
+
301
+ n_features_in_ : int
302
+ Number of features seen during :term:`fit`.
303
+
304
+ .. versionadded:: 0.24
305
+
306
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
307
+ Names of features seen during :term:`fit`. Defined only when `X`
308
+ has feature names that are all strings.
309
+
310
+ .. versionadded:: 1.0
311
+
312
+ See Also
313
+ --------
314
+ QuadraticDiscriminantAnalysis : Quadratic Discriminant Analysis.
315
+
316
+ Examples
317
+ --------
318
+ >>> import numpy as np
319
+ >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
320
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
321
+ >>> y = np.array([1, 1, 1, 2, 2, 2])
322
+ >>> clf = LinearDiscriminantAnalysis()
323
+ >>> clf.fit(X, y)
324
+ LinearDiscriminantAnalysis()
325
+ >>> print(clf.predict([[-0.8, -1]]))
326
+ [1]
327
+ """
328
+
329
+ _parameter_constraints: dict = {
330
+ "solver": [StrOptions({"svd", "lsqr", "eigen"})],
331
+ "shrinkage": [StrOptions({"auto"}), Interval(Real, 0, 1, closed="both"), None],
332
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
333
+ "priors": ["array-like", None],
334
+ "store_covariance": ["boolean"],
335
+ "tol": [Interval(Real, 0, None, closed="left")],
336
+ "covariance_estimator": [HasMethods("fit"), None],
337
+ }
338
+
339
+ def __init__(
340
+ self,
341
+ solver="svd",
342
+ shrinkage=None,
343
+ priors=None,
344
+ n_components=None,
345
+ store_covariance=False,
346
+ tol=1e-4,
347
+ covariance_estimator=None,
348
+ ):
349
+ self.solver = solver
350
+ self.shrinkage = shrinkage
351
+ self.priors = priors
352
+ self.n_components = n_components
353
+ self.store_covariance = store_covariance # used only in svd solver
354
+ self.tol = tol # used only in svd solver
355
+ self.covariance_estimator = covariance_estimator
356
+
357
+ def _solve_lstsq(self, X, y, shrinkage, covariance_estimator):
358
+ """Least squares solver.
359
+
360
+ The least squares solver computes a straightforward solution of the
361
+ optimal decision rule based directly on the discriminant functions. It
362
+ can only be used for classification (with any covariance estimator),
363
+ because
364
+ estimation of eigenvectors is not performed. Therefore, dimensionality
365
+ reduction with the transform is not supported.
366
+
367
+ Parameters
368
+ ----------
369
+ X : array-like of shape (n_samples, n_features)
370
+ Training data.
371
+
372
+ y : array-like of shape (n_samples,) or (n_samples, n_classes)
373
+ Target values.
374
+
375
+ shrinkage : 'auto', float or None
376
+ Shrinkage parameter, possible values:
377
+ - None: no shrinkage.
378
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
379
+ - float between 0 and 1: fixed shrinkage parameter.
380
+
381
+ Shrinkage parameter is ignored if `covariance_estimator` i
382
+ not None
383
+
384
+ covariance_estimator : estimator, default=None
385
+ If not None, `covariance_estimator` is used to estimate
386
+ the covariance matrices instead of relying the empirical
387
+ covariance estimator (with potential shrinkage).
388
+ The object should have a fit method and a ``covariance_`` attribute
389
+ like the estimators in sklearn.covariance.
390
+ if None the shrinkage parameter drives the estimate.
391
+
392
+ .. versionadded:: 0.24
393
+
394
+ Notes
395
+ -----
396
+ This solver is based on [1]_, section 2.6.2, pp. 39-41.
397
+
398
+ References
399
+ ----------
400
+ .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
401
+ (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
402
+ 0-471-05669-3.
403
+ """
404
+ self.means_ = _class_means(X, y)
405
+ self.covariance_ = _class_cov(
406
+ X, y, self.priors_, shrinkage, covariance_estimator
407
+ )
408
+ self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
409
+ self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(
410
+ self.priors_
411
+ )
412
+
413
+ def _solve_eigen(self, X, y, shrinkage, covariance_estimator):
414
+ """Eigenvalue solver.
415
+
416
+ The eigenvalue solver computes the optimal solution of the Rayleigh
417
+ coefficient (basically the ratio of between class scatter to within
418
+ class scatter). This solver supports both classification and
419
+ dimensionality reduction (with any covariance estimator).
420
+
421
+ Parameters
422
+ ----------
423
+ X : array-like of shape (n_samples, n_features)
424
+ Training data.
425
+
426
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
427
+ Target values.
428
+
429
+ shrinkage : 'auto', float or None
430
+ Shrinkage parameter, possible values:
431
+ - None: no shrinkage.
432
+ - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
433
+ - float between 0 and 1: fixed shrinkage constant.
434
+
435
+ Shrinkage parameter is ignored if `covariance_estimator` i
436
+ not None
437
+
438
+ covariance_estimator : estimator, default=None
439
+ If not None, `covariance_estimator` is used to estimate
440
+ the covariance matrices instead of relying the empirical
441
+ covariance estimator (with potential shrinkage).
442
+ The object should have a fit method and a ``covariance_`` attribute
443
+ like the estimators in sklearn.covariance.
444
+ if None the shrinkage parameter drives the estimate.
445
+
446
+ .. versionadded:: 0.24
447
+
448
+ Notes
449
+ -----
450
+ This solver is based on [1]_, section 3.8.3, pp. 121-124.
451
+
452
+ References
453
+ ----------
454
+ .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
455
+ (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
456
+ 0-471-05669-3.
457
+ """
458
+ self.means_ = _class_means(X, y)
459
+ self.covariance_ = _class_cov(
460
+ X, y, self.priors_, shrinkage, covariance_estimator
461
+ )
462
+
463
+ Sw = self.covariance_ # within scatter
464
+ St = _cov(X, shrinkage, covariance_estimator) # total scatter
465
+ Sb = St - Sw # between scatter
466
+
467
+ evals, evecs = linalg.eigh(Sb, Sw)
468
+ self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][
469
+ : self._max_components
470
+ ]
471
+ evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
472
+
473
+ self.scalings_ = evecs
474
+ self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
475
+ self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(
476
+ self.priors_
477
+ )
478
+
479
+ def _solve_svd(self, X, y):
480
+ """SVD solver.
481
+
482
+ Parameters
483
+ ----------
484
+ X : array-like of shape (n_samples, n_features)
485
+ Training data.
486
+
487
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
488
+ Target values.
489
+ """
490
+ xp, is_array_api_compliant = get_namespace(X)
491
+
492
+ if is_array_api_compliant:
493
+ svd = xp.linalg.svd
494
+ else:
495
+ svd = scipy.linalg.svd
496
+
497
+ n_samples, n_features = X.shape
498
+ n_classes = self.classes_.shape[0]
499
+
500
+ self.means_ = _class_means(X, y)
501
+ if self.store_covariance:
502
+ self.covariance_ = _class_cov(X, y, self.priors_)
503
+
504
+ Xc = []
505
+ for idx, group in enumerate(self.classes_):
506
+ Xg = X[y == group]
507
+ Xc.append(Xg - self.means_[idx, :])
508
+
509
+ self.xbar_ = self.priors_ @ self.means_
510
+
511
+ Xc = xp.concat(Xc, axis=0)
512
+
513
+ # 1) within (univariate) scaling by with classes std-dev
514
+ std = xp.std(Xc, axis=0)
515
+ # avoid division by zero in normalization
516
+ std[std == 0] = 1.0
517
+ fac = xp.asarray(1.0 / (n_samples - n_classes))
518
+
519
+ # 2) Within variance scaling
520
+ X = xp.sqrt(fac) * (Xc / std)
521
+ # SVD of centered (within)scaled data
522
+ U, S, Vt = svd(X, full_matrices=False)
523
+
524
+ rank = xp.sum(xp.astype(S > self.tol, xp.int32))
525
+ # Scaling of within covariance is: V' 1/S
526
+ scalings = (Vt[:rank, :] / std).T / S[:rank]
527
+ fac = 1.0 if n_classes == 1 else 1.0 / (n_classes - 1)
528
+
529
+ # 3) Between variance scaling
530
+ # Scale weighted centers
531
+ X = (
532
+ (xp.sqrt((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T
533
+ ).T @ scalings
534
+ # Centers are living in a space with n_classes-1 dim (maximum)
535
+ # Use SVD to find projection in the space spanned by the
536
+ # (n_classes) centers
537
+ _, S, Vt = svd(X, full_matrices=False)
538
+
539
+ if self._max_components == 0:
540
+ self.explained_variance_ratio_ = xp.empty((0,), dtype=S.dtype)
541
+ else:
542
+ self.explained_variance_ratio_ = (S**2 / xp.sum(S**2))[
543
+ : self._max_components
544
+ ]
545
+
546
+ rank = xp.sum(xp.astype(S > self.tol * S[0], xp.int32))
547
+ self.scalings_ = scalings @ Vt.T[:, :rank]
548
+ coef = (self.means_ - self.xbar_) @ self.scalings_
549
+ self.intercept_ = -0.5 * xp.sum(coef**2, axis=1) + xp.log(self.priors_)
550
+ self.coef_ = coef @ self.scalings_.T
551
+ self.intercept_ -= self.xbar_ @ self.coef_.T
552
+
553
+ @_fit_context(
554
+ # LinearDiscriminantAnalysis.covariance_estimator is not validated yet
555
+ prefer_skip_nested_validation=False
556
+ )
557
+ def fit(self, X, y):
558
+ """Fit the Linear Discriminant Analysis model.
559
+
560
+ .. versionchanged:: 0.19
561
+ *store_covariance* has been moved to main constructor.
562
+
563
+ .. versionchanged:: 0.19
564
+ *tol* has been moved to main constructor.
565
+
566
+ Parameters
567
+ ----------
568
+ X : array-like of shape (n_samples, n_features)
569
+ Training data.
570
+
571
+ y : array-like of shape (n_samples,)
572
+ Target values.
573
+
574
+ Returns
575
+ -------
576
+ self : object
577
+ Fitted estimator.
578
+ """
579
+ xp, _ = get_namespace(X)
580
+
581
+ X, y = self._validate_data(
582
+ X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32]
583
+ )
584
+ self.classes_ = unique_labels(y)
585
+ n_samples, _ = X.shape
586
+ n_classes = self.classes_.shape[0]
587
+
588
+ if n_samples == n_classes:
589
+ raise ValueError(
590
+ "The number of samples must be more than the number of classes."
591
+ )
592
+
593
+ if self.priors is None: # estimate priors from sample
594
+ _, cnts = xp.unique_counts(y) # non-negative ints
595
+ self.priors_ = xp.astype(cnts, X.dtype) / float(y.shape[0])
596
+ else:
597
+ self.priors_ = xp.asarray(self.priors, dtype=X.dtype)
598
+
599
+ if xp.any(self.priors_ < 0):
600
+ raise ValueError("priors must be non-negative")
601
+
602
+ if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-5:
603
+ warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning)
604
+ self.priors_ = self.priors_ / self.priors_.sum()
605
+
606
+ # Maximum number of components no matter what n_components is
607
+ # specified:
608
+ max_components = min(n_classes - 1, X.shape[1])
609
+
610
+ if self.n_components is None:
611
+ self._max_components = max_components
612
+ else:
613
+ if self.n_components > max_components:
614
+ raise ValueError(
615
+ "n_components cannot be larger than min(n_features, n_classes - 1)."
616
+ )
617
+ self._max_components = self.n_components
618
+
619
+ if self.solver == "svd":
620
+ if self.shrinkage is not None:
621
+ raise NotImplementedError("shrinkage not supported with 'svd' solver.")
622
+ if self.covariance_estimator is not None:
623
+ raise ValueError(
624
+ "covariance estimator "
625
+ "is not supported "
626
+ "with svd solver. Try another solver"
627
+ )
628
+ self._solve_svd(X, y)
629
+ elif self.solver == "lsqr":
630
+ self._solve_lstsq(
631
+ X,
632
+ y,
633
+ shrinkage=self.shrinkage,
634
+ covariance_estimator=self.covariance_estimator,
635
+ )
636
+ elif self.solver == "eigen":
637
+ self._solve_eigen(
638
+ X,
639
+ y,
640
+ shrinkage=self.shrinkage,
641
+ covariance_estimator=self.covariance_estimator,
642
+ )
643
+ if size(self.classes_) == 2: # treat binary case as a special case
644
+ coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype)
645
+ self.coef_ = xp.reshape(coef_, (1, -1))
646
+ intercept_ = xp.asarray(
647
+ self.intercept_[1] - self.intercept_[0], dtype=X.dtype
648
+ )
649
+ self.intercept_ = xp.reshape(intercept_, (1,))
650
+ self._n_features_out = self._max_components
651
+ return self
652
+
653
+ def transform(self, X):
654
+ """Project data to maximize class separation.
655
+
656
+ Parameters
657
+ ----------
658
+ X : array-like of shape (n_samples, n_features)
659
+ Input data.
660
+
661
+ Returns
662
+ -------
663
+ X_new : ndarray of shape (n_samples, n_components) or \
664
+ (n_samples, min(rank, n_components))
665
+ Transformed data. In the case of the 'svd' solver, the shape
666
+ is (n_samples, min(rank, n_components)).
667
+ """
668
+ if self.solver == "lsqr":
669
+ raise NotImplementedError(
670
+ "transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')."
671
+ )
672
+ check_is_fitted(self)
673
+ xp, _ = get_namespace(X)
674
+ X = self._validate_data(X, reset=False)
675
+
676
+ if self.solver == "svd":
677
+ X_new = (X - self.xbar_) @ self.scalings_
678
+ elif self.solver == "eigen":
679
+ X_new = X @ self.scalings_
680
+
681
+ return X_new[:, : self._max_components]
682
+
683
+ def predict_proba(self, X):
684
+ """Estimate probability.
685
+
686
+ Parameters
687
+ ----------
688
+ X : array-like of shape (n_samples, n_features)
689
+ Input data.
690
+
691
+ Returns
692
+ -------
693
+ C : ndarray of shape (n_samples, n_classes)
694
+ Estimated probabilities.
695
+ """
696
+ check_is_fitted(self)
697
+ xp, is_array_api_compliant = get_namespace(X)
698
+ decision = self.decision_function(X)
699
+ if size(self.classes_) == 2:
700
+ proba = _expit(decision)
701
+ return xp.stack([1 - proba, proba], axis=1)
702
+ else:
703
+ return softmax(decision)
704
+
705
+ def predict_log_proba(self, X):
706
+ """Estimate log probability.
707
+
708
+ Parameters
709
+ ----------
710
+ X : array-like of shape (n_samples, n_features)
711
+ Input data.
712
+
713
+ Returns
714
+ -------
715
+ C : ndarray of shape (n_samples, n_classes)
716
+ Estimated log probabilities.
717
+ """
718
+ xp, _ = get_namespace(X)
719
+ prediction = self.predict_proba(X)
720
+
721
+ info = xp.finfo(prediction.dtype)
722
+ if hasattr(info, "smallest_normal"):
723
+ smallest_normal = info.smallest_normal
724
+ else:
725
+ # smallest_normal was introduced in NumPy 1.22
726
+ smallest_normal = info.tiny
727
+
728
+ prediction[prediction == 0.0] += smallest_normal
729
+ return xp.log(prediction)
730
+
731
+ def decision_function(self, X):
732
+ """Apply decision function to an array of samples.
733
+
734
+ The decision function is equal (up to a constant factor) to the
735
+ log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
736
+ classification setting this instead corresponds to the difference
737
+ `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
738
+
739
+ Parameters
740
+ ----------
741
+ X : array-like of shape (n_samples, n_features)
742
+ Array of samples (test vectors).
743
+
744
+ Returns
745
+ -------
746
+ C : ndarray of shape (n_samples,) or (n_samples, n_classes)
747
+ Decision function values related to each class, per sample.
748
+ In the two-class case, the shape is (n_samples,), giving the
749
+ log likelihood ratio of the positive class.
750
+ """
751
+ # Only override for the doc
752
+ return super().decision_function(X)
753
+
754
+ def _more_tags(self):
755
+ return {"array_api_support": True}
756
+
757
+
758
+ class QuadraticDiscriminantAnalysis(ClassifierMixin, BaseEstimator):
759
+ """Quadratic Discriminant Analysis.
760
+
761
+ A classifier with a quadratic decision boundary, generated
762
+ by fitting class conditional densities to the data
763
+ and using Bayes' rule.
764
+
765
+ The model fits a Gaussian density to each class.
766
+
767
+ .. versionadded:: 0.17
768
+ *QuadraticDiscriminantAnalysis*
769
+
770
+ Read more in the :ref:`User Guide <lda_qda>`.
771
+
772
+ Parameters
773
+ ----------
774
+ priors : array-like of shape (n_classes,), default=None
775
+ Class priors. By default, the class proportions are inferred from the
776
+ training data.
777
+
778
+ reg_param : float, default=0.0
779
+ Regularizes the per-class covariance estimates by transforming S2 as
780
+ ``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``,
781
+ where S2 corresponds to the `scaling_` attribute of a given class.
782
+
783
+ store_covariance : bool, default=False
784
+ If True, the class covariance matrices are explicitly computed and
785
+ stored in the `self.covariance_` attribute.
786
+
787
+ .. versionadded:: 0.17
788
+
789
+ tol : float, default=1.0e-4
790
+ Absolute threshold for a singular value to be considered significant,
791
+ used to estimate the rank of `Xk` where `Xk` is the centered matrix
792
+ of samples in class k. This parameter does not affect the
793
+ predictions. It only controls a warning that is raised when features
794
+ are considered to be colinear.
795
+
796
+ .. versionadded:: 0.17
797
+
798
+ Attributes
799
+ ----------
800
+ covariance_ : list of len n_classes of ndarray \
801
+ of shape (n_features, n_features)
802
+ For each class, gives the covariance matrix estimated using the
803
+ samples of that class. The estimations are unbiased. Only present if
804
+ `store_covariance` is True.
805
+
806
+ means_ : array-like of shape (n_classes, n_features)
807
+ Class-wise means.
808
+
809
+ priors_ : array-like of shape (n_classes,)
810
+ Class priors (sum to 1).
811
+
812
+ rotations_ : list of len n_classes of ndarray of shape (n_features, n_k)
813
+ For each class k an array of shape (n_features, n_k), where
814
+ ``n_k = min(n_features, number of elements in class k)``
815
+ It is the rotation of the Gaussian distribution, i.e. its
816
+ principal axis. It corresponds to `V`, the matrix of eigenvectors
817
+ coming from the SVD of `Xk = U S Vt` where `Xk` is the centered
818
+ matrix of samples from class k.
819
+
820
+ scalings_ : list of len n_classes of ndarray of shape (n_k,)
821
+ For each class, contains the scaling of
822
+ the Gaussian distributions along its principal axes, i.e. the
823
+ variance in the rotated coordinate system. It corresponds to `S^2 /
824
+ (n_samples - 1)`, where `S` is the diagonal matrix of singular values
825
+ from the SVD of `Xk`, where `Xk` is the centered matrix of samples
826
+ from class k.
827
+
828
+ classes_ : ndarray of shape (n_classes,)
829
+ Unique class labels.
830
+
831
+ n_features_in_ : int
832
+ Number of features seen during :term:`fit`.
833
+
834
+ .. versionadded:: 0.24
835
+
836
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
837
+ Names of features seen during :term:`fit`. Defined only when `X`
838
+ has feature names that are all strings.
839
+
840
+ .. versionadded:: 1.0
841
+
842
+ See Also
843
+ --------
844
+ LinearDiscriminantAnalysis : Linear Discriminant Analysis.
845
+
846
+ Examples
847
+ --------
848
+ >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
849
+ >>> import numpy as np
850
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
851
+ >>> y = np.array([1, 1, 1, 2, 2, 2])
852
+ >>> clf = QuadraticDiscriminantAnalysis()
853
+ >>> clf.fit(X, y)
854
+ QuadraticDiscriminantAnalysis()
855
+ >>> print(clf.predict([[-0.8, -1]]))
856
+ [1]
857
+ """
858
+
859
+ _parameter_constraints: dict = {
860
+ "priors": ["array-like", None],
861
+ "reg_param": [Interval(Real, 0, 1, closed="both")],
862
+ "store_covariance": ["boolean"],
863
+ "tol": [Interval(Real, 0, None, closed="left")],
864
+ }
865
+
866
+ def __init__(
867
+ self, *, priors=None, reg_param=0.0, store_covariance=False, tol=1.0e-4
868
+ ):
869
+ self.priors = priors
870
+ self.reg_param = reg_param
871
+ self.store_covariance = store_covariance
872
+ self.tol = tol
873
+
874
+ @_fit_context(prefer_skip_nested_validation=True)
875
+ def fit(self, X, y):
876
+ """Fit the model according to the given training data and parameters.
877
+
878
+ .. versionchanged:: 0.19
879
+ ``store_covariances`` has been moved to main constructor as
880
+ ``store_covariance``
881
+
882
+ .. versionchanged:: 0.19
883
+ ``tol`` has been moved to main constructor.
884
+
885
+ Parameters
886
+ ----------
887
+ X : array-like of shape (n_samples, n_features)
888
+ Training vector, where `n_samples` is the number of samples and
889
+ `n_features` is the number of features.
890
+
891
+ y : array-like of shape (n_samples,)
892
+ Target values (integers).
893
+
894
+ Returns
895
+ -------
896
+ self : object
897
+ Fitted estimator.
898
+ """
899
+ X, y = self._validate_data(X, y)
900
+ check_classification_targets(y)
901
+ self.classes_, y = np.unique(y, return_inverse=True)
902
+ n_samples, n_features = X.shape
903
+ n_classes = len(self.classes_)
904
+ if n_classes < 2:
905
+ raise ValueError(
906
+ "The number of classes has to be greater than one; got %d class"
907
+ % (n_classes)
908
+ )
909
+ if self.priors is None:
910
+ self.priors_ = np.bincount(y) / float(n_samples)
911
+ else:
912
+ self.priors_ = np.array(self.priors)
913
+
914
+ cov = None
915
+ store_covariance = self.store_covariance
916
+ if store_covariance:
917
+ cov = []
918
+ means = []
919
+ scalings = []
920
+ rotations = []
921
+ for ind in range(n_classes):
922
+ Xg = X[y == ind, :]
923
+ meang = Xg.mean(0)
924
+ means.append(meang)
925
+ if len(Xg) == 1:
926
+ raise ValueError(
927
+ "y has only 1 sample in class %s, covariance is ill defined."
928
+ % str(self.classes_[ind])
929
+ )
930
+ Xgc = Xg - meang
931
+ # Xgc = U * S * V.T
932
+ _, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
933
+ rank = np.sum(S > self.tol)
934
+ if rank < n_features:
935
+ warnings.warn("Variables are collinear")
936
+ S2 = (S**2) / (len(Xg) - 1)
937
+ S2 = ((1 - self.reg_param) * S2) + self.reg_param
938
+ if self.store_covariance or store_covariance:
939
+ # cov = V * (S^2 / (n-1)) * V.T
940
+ cov.append(np.dot(S2 * Vt.T, Vt))
941
+ scalings.append(S2)
942
+ rotations.append(Vt.T)
943
+ if self.store_covariance or store_covariance:
944
+ self.covariance_ = cov
945
+ self.means_ = np.asarray(means)
946
+ self.scalings_ = scalings
947
+ self.rotations_ = rotations
948
+ return self
949
+
950
+ def _decision_function(self, X):
951
+ # return log posterior, see eq (4.12) p. 110 of the ESL.
952
+ check_is_fitted(self)
953
+
954
+ X = self._validate_data(X, reset=False)
955
+ norm2 = []
956
+ for i in range(len(self.classes_)):
957
+ R = self.rotations_[i]
958
+ S = self.scalings_[i]
959
+ Xm = X - self.means_[i]
960
+ X2 = np.dot(Xm, R * (S ** (-0.5)))
961
+ norm2.append(np.sum(X2**2, axis=1))
962
+ norm2 = np.array(norm2).T # shape = [len(X), n_classes]
963
+ u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
964
+ return -0.5 * (norm2 + u) + np.log(self.priors_)
965
+
966
+ def decision_function(self, X):
967
+ """Apply decision function to an array of samples.
968
+
969
+ The decision function is equal (up to a constant factor) to the
970
+ log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
971
+ classification setting this instead corresponds to the difference
972
+ `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
973
+
974
+ Parameters
975
+ ----------
976
+ X : array-like of shape (n_samples, n_features)
977
+ Array of samples (test vectors).
978
+
979
+ Returns
980
+ -------
981
+ C : ndarray of shape (n_samples,) or (n_samples, n_classes)
982
+ Decision function values related to each class, per sample.
983
+ In the two-class case, the shape is (n_samples,), giving the
984
+ log likelihood ratio of the positive class.
985
+ """
986
+ dec_func = self._decision_function(X)
987
+ # handle special case of two classes
988
+ if len(self.classes_) == 2:
989
+ return dec_func[:, 1] - dec_func[:, 0]
990
+ return dec_func
991
+
992
+ def predict(self, X):
993
+ """Perform classification on an array of test vectors X.
994
+
995
+ The predicted class C for each sample in X is returned.
996
+
997
+ Parameters
998
+ ----------
999
+ X : array-like of shape (n_samples, n_features)
1000
+ Vector to be scored, where `n_samples` is the number of samples and
1001
+ `n_features` is the number of features.
1002
+
1003
+ Returns
1004
+ -------
1005
+ C : ndarray of shape (n_samples,)
1006
+ Estimated probabilities.
1007
+ """
1008
+ d = self._decision_function(X)
1009
+ y_pred = self.classes_.take(d.argmax(1))
1010
+ return y_pred
1011
+
1012
+ def predict_proba(self, X):
1013
+ """Return posterior probabilities of classification.
1014
+
1015
+ Parameters
1016
+ ----------
1017
+ X : array-like of shape (n_samples, n_features)
1018
+ Array of samples/test vectors.
1019
+
1020
+ Returns
1021
+ -------
1022
+ C : ndarray of shape (n_samples, n_classes)
1023
+ Posterior probabilities of classification per class.
1024
+ """
1025
+ values = self._decision_function(X)
1026
+ # compute the likelihood of the underlying gaussian models
1027
+ # up to a multiplicative constant.
1028
+ likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
1029
+ # compute posterior probabilities
1030
+ return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
1031
+
1032
+ def predict_log_proba(self, X):
1033
+ """Return log of posterior probabilities of classification.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ X : array-like of shape (n_samples, n_features)
1038
+ Array of samples/test vectors.
1039
+
1040
+ Returns
1041
+ -------
1042
+ C : ndarray of shape (n_samples, n_classes)
1043
+ Posterior log-probabilities of classification per class.
1044
+ """
1045
+ # XXX : can do better to avoid precision overflows
1046
+ probas_ = self.predict_proba(X)
1047
+ return np.log(probas_)
llmeval-env/lib/python3.10/site-packages/sklearn/dummy.py ADDED
@@ -0,0 +1,682 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Mathieu Blondel <[email protected]>
2
+ # Arnaud Joly <[email protected]>
3
+ # Maheshakya Wijewardena <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import warnings
7
+ from numbers import Integral, Real
8
+
9
+ import numpy as np
10
+ import scipy.sparse as sp
11
+
12
+ from .base import (
13
+ BaseEstimator,
14
+ ClassifierMixin,
15
+ MultiOutputMixin,
16
+ RegressorMixin,
17
+ _fit_context,
18
+ )
19
+ from .utils import check_random_state
20
+ from .utils._param_validation import Interval, StrOptions
21
+ from .utils.multiclass import class_distribution
22
+ from .utils.random import _random_choice_csc
23
+ from .utils.stats import _weighted_percentile
24
+ from .utils.validation import (
25
+ _check_sample_weight,
26
+ _num_samples,
27
+ check_array,
28
+ check_consistent_length,
29
+ check_is_fitted,
30
+ )
31
+
32
+
33
+ class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator):
34
+ """DummyClassifier makes predictions that ignore the input features.
35
+
36
+ This classifier serves as a simple baseline to compare against other more
37
+ complex classifiers.
38
+
39
+ The specific behavior of the baseline is selected with the `strategy`
40
+ parameter.
41
+
42
+ All strategies make predictions that ignore the input feature values passed
43
+ as the `X` argument to `fit` and `predict`. The predictions, however,
44
+ typically depend on values observed in the `y` parameter passed to `fit`.
45
+
46
+ Note that the "stratified" and "uniform" strategies lead to
47
+ non-deterministic predictions that can be rendered deterministic by setting
48
+ the `random_state` parameter if needed. The other strategies are naturally
49
+ deterministic and, once fit, always return the same constant prediction
50
+ for any value of `X`.
51
+
52
+ Read more in the :ref:`User Guide <dummy_estimators>`.
53
+
54
+ .. versionadded:: 0.13
55
+
56
+ Parameters
57
+ ----------
58
+ strategy : {"most_frequent", "prior", "stratified", "uniform", \
59
+ "constant"}, default="prior"
60
+ Strategy to use to generate predictions.
61
+
62
+ * "most_frequent": the `predict` method always returns the most
63
+ frequent class label in the observed `y` argument passed to `fit`.
64
+ The `predict_proba` method returns the matching one-hot encoded
65
+ vector.
66
+ * "prior": the `predict` method always returns the most frequent
67
+ class label in the observed `y` argument passed to `fit` (like
68
+ "most_frequent"). ``predict_proba`` always returns the empirical
69
+ class distribution of `y` also known as the empirical class prior
70
+ distribution.
71
+ * "stratified": the `predict_proba` method randomly samples one-hot
72
+ vectors from a multinomial distribution parametrized by the empirical
73
+ class prior probabilities.
74
+ The `predict` method returns the class label which got probability
75
+ one in the one-hot vector of `predict_proba`.
76
+ Each sampled row of both methods is therefore independent and
77
+ identically distributed.
78
+ * "uniform": generates predictions uniformly at random from the list
79
+ of unique classes observed in `y`, i.e. each class has equal
80
+ probability.
81
+ * "constant": always predicts a constant label that is provided by
82
+ the user. This is useful for metrics that evaluate a non-majority
83
+ class.
84
+
85
+ .. versionchanged:: 0.24
86
+ The default value of `strategy` has changed to "prior" in version
87
+ 0.24.
88
+
89
+ random_state : int, RandomState instance or None, default=None
90
+ Controls the randomness to generate the predictions when
91
+ ``strategy='stratified'`` or ``strategy='uniform'``.
92
+ Pass an int for reproducible output across multiple function calls.
93
+ See :term:`Glossary <random_state>`.
94
+
95
+ constant : int or str or array-like of shape (n_outputs,), default=None
96
+ The explicit constant as predicted by the "constant" strategy. This
97
+ parameter is useful only for the "constant" strategy.
98
+
99
+ Attributes
100
+ ----------
101
+ classes_ : ndarray of shape (n_classes,) or list of such arrays
102
+ Unique class labels observed in `y`. For multi-output classification
103
+ problems, this attribute is a list of arrays as each output has an
104
+ independent set of possible classes.
105
+
106
+ n_classes_ : int or list of int
107
+ Number of label for each output.
108
+
109
+ class_prior_ : ndarray of shape (n_classes,) or list of such arrays
110
+ Frequency of each class observed in `y`. For multioutput classification
111
+ problems, this is computed independently for each output.
112
+
113
+ n_outputs_ : int
114
+ Number of outputs.
115
+
116
+ sparse_output_ : bool
117
+ True if the array returned from predict is to be in sparse CSC format.
118
+ Is automatically set to True if the input `y` is passed in sparse
119
+ format.
120
+
121
+ See Also
122
+ --------
123
+ DummyRegressor : Regressor that makes predictions using simple rules.
124
+
125
+ Examples
126
+ --------
127
+ >>> import numpy as np
128
+ >>> from sklearn.dummy import DummyClassifier
129
+ >>> X = np.array([-1, 1, 1, 1])
130
+ >>> y = np.array([0, 1, 1, 1])
131
+ >>> dummy_clf = DummyClassifier(strategy="most_frequent")
132
+ >>> dummy_clf.fit(X, y)
133
+ DummyClassifier(strategy='most_frequent')
134
+ >>> dummy_clf.predict(X)
135
+ array([1, 1, 1, 1])
136
+ >>> dummy_clf.score(X, y)
137
+ 0.75
138
+ """
139
+
140
+ _parameter_constraints: dict = {
141
+ "strategy": [
142
+ StrOptions({"most_frequent", "prior", "stratified", "uniform", "constant"})
143
+ ],
144
+ "random_state": ["random_state"],
145
+ "constant": [Integral, str, "array-like", None],
146
+ }
147
+
148
+ def __init__(self, *, strategy="prior", random_state=None, constant=None):
149
+ self.strategy = strategy
150
+ self.random_state = random_state
151
+ self.constant = constant
152
+
153
+ @_fit_context(prefer_skip_nested_validation=True)
154
+ def fit(self, X, y, sample_weight=None):
155
+ """Fit the baseline classifier.
156
+
157
+ Parameters
158
+ ----------
159
+ X : array-like of shape (n_samples, n_features)
160
+ Training data.
161
+
162
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
163
+ Target values.
164
+
165
+ sample_weight : array-like of shape (n_samples,), default=None
166
+ Sample weights.
167
+
168
+ Returns
169
+ -------
170
+ self : object
171
+ Returns the instance itself.
172
+ """
173
+ self._strategy = self.strategy
174
+
175
+ if self._strategy == "uniform" and sp.issparse(y):
176
+ y = y.toarray()
177
+ warnings.warn(
178
+ (
179
+ "A local copy of the target data has been converted "
180
+ "to a numpy array. Predicting on sparse target data "
181
+ "with the uniform strategy would not save memory "
182
+ "and would be slower."
183
+ ),
184
+ UserWarning,
185
+ )
186
+
187
+ self.sparse_output_ = sp.issparse(y)
188
+
189
+ if not self.sparse_output_:
190
+ y = np.asarray(y)
191
+ y = np.atleast_1d(y)
192
+
193
+ if y.ndim == 1:
194
+ y = np.reshape(y, (-1, 1))
195
+
196
+ self.n_outputs_ = y.shape[1]
197
+
198
+ check_consistent_length(X, y)
199
+
200
+ if sample_weight is not None:
201
+ sample_weight = _check_sample_weight(sample_weight, X)
202
+
203
+ if self._strategy == "constant":
204
+ if self.constant is None:
205
+ raise ValueError(
206
+ "Constant target value has to be specified "
207
+ "when the constant strategy is used."
208
+ )
209
+ else:
210
+ constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
211
+ if constant.shape[0] != self.n_outputs_:
212
+ raise ValueError(
213
+ "Constant target value should have shape (%d, 1)."
214
+ % self.n_outputs_
215
+ )
216
+
217
+ (self.classes_, self.n_classes_, self.class_prior_) = class_distribution(
218
+ y, sample_weight
219
+ )
220
+
221
+ if self._strategy == "constant":
222
+ for k in range(self.n_outputs_):
223
+ if not any(constant[k][0] == c for c in self.classes_[k]):
224
+ # Checking in case of constant strategy if the constant
225
+ # provided by the user is in y.
226
+ err_msg = (
227
+ "The constant target value must be present in "
228
+ "the training data. You provided constant={}. "
229
+ "Possible values are: {}.".format(
230
+ self.constant, self.classes_[k].tolist()
231
+ )
232
+ )
233
+ raise ValueError(err_msg)
234
+
235
+ if self.n_outputs_ == 1:
236
+ self.n_classes_ = self.n_classes_[0]
237
+ self.classes_ = self.classes_[0]
238
+ self.class_prior_ = self.class_prior_[0]
239
+
240
+ return self
241
+
242
+ def predict(self, X):
243
+ """Perform classification on test vectors X.
244
+
245
+ Parameters
246
+ ----------
247
+ X : array-like of shape (n_samples, n_features)
248
+ Test data.
249
+
250
+ Returns
251
+ -------
252
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
253
+ Predicted target values for X.
254
+ """
255
+ check_is_fitted(self)
256
+
257
+ # numpy random_state expects Python int and not long as size argument
258
+ # under Windows
259
+ n_samples = _num_samples(X)
260
+ rs = check_random_state(self.random_state)
261
+
262
+ n_classes_ = self.n_classes_
263
+ classes_ = self.classes_
264
+ class_prior_ = self.class_prior_
265
+ constant = self.constant
266
+ if self.n_outputs_ == 1:
267
+ # Get same type even for self.n_outputs_ == 1
268
+ n_classes_ = [n_classes_]
269
+ classes_ = [classes_]
270
+ class_prior_ = [class_prior_]
271
+ constant = [constant]
272
+ # Compute probability only once
273
+ if self._strategy == "stratified":
274
+ proba = self.predict_proba(X)
275
+ if self.n_outputs_ == 1:
276
+ proba = [proba]
277
+
278
+ if self.sparse_output_:
279
+ class_prob = None
280
+ if self._strategy in ("most_frequent", "prior"):
281
+ classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
282
+
283
+ elif self._strategy == "stratified":
284
+ class_prob = class_prior_
285
+
286
+ elif self._strategy == "uniform":
287
+ raise ValueError(
288
+ "Sparse target prediction is not "
289
+ "supported with the uniform strategy"
290
+ )
291
+
292
+ elif self._strategy == "constant":
293
+ classes_ = [np.array([c]) for c in constant]
294
+
295
+ y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state)
296
+ else:
297
+ if self._strategy in ("most_frequent", "prior"):
298
+ y = np.tile(
299
+ [
300
+ classes_[k][class_prior_[k].argmax()]
301
+ for k in range(self.n_outputs_)
302
+ ],
303
+ [n_samples, 1],
304
+ )
305
+
306
+ elif self._strategy == "stratified":
307
+ y = np.vstack(
308
+ [
309
+ classes_[k][proba[k].argmax(axis=1)]
310
+ for k in range(self.n_outputs_)
311
+ ]
312
+ ).T
313
+
314
+ elif self._strategy == "uniform":
315
+ ret = [
316
+ classes_[k][rs.randint(n_classes_[k], size=n_samples)]
317
+ for k in range(self.n_outputs_)
318
+ ]
319
+ y = np.vstack(ret).T
320
+
321
+ elif self._strategy == "constant":
322
+ y = np.tile(self.constant, (n_samples, 1))
323
+
324
+ if self.n_outputs_ == 1:
325
+ y = np.ravel(y)
326
+
327
+ return y
328
+
329
+ def predict_proba(self, X):
330
+ """
331
+ Return probability estimates for the test vectors X.
332
+
333
+ Parameters
334
+ ----------
335
+ X : array-like of shape (n_samples, n_features)
336
+ Test data.
337
+
338
+ Returns
339
+ -------
340
+ P : ndarray of shape (n_samples, n_classes) or list of such arrays
341
+ Returns the probability of the sample for each class in
342
+ the model, where classes are ordered arithmetically, for each
343
+ output.
344
+ """
345
+ check_is_fitted(self)
346
+
347
+ # numpy random_state expects Python int and not long as size argument
348
+ # under Windows
349
+ n_samples = _num_samples(X)
350
+ rs = check_random_state(self.random_state)
351
+
352
+ n_classes_ = self.n_classes_
353
+ classes_ = self.classes_
354
+ class_prior_ = self.class_prior_
355
+ constant = self.constant
356
+ if self.n_outputs_ == 1:
357
+ # Get same type even for self.n_outputs_ == 1
358
+ n_classes_ = [n_classes_]
359
+ classes_ = [classes_]
360
+ class_prior_ = [class_prior_]
361
+ constant = [constant]
362
+
363
+ P = []
364
+ for k in range(self.n_outputs_):
365
+ if self._strategy == "most_frequent":
366
+ ind = class_prior_[k].argmax()
367
+ out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
368
+ out[:, ind] = 1.0
369
+ elif self._strategy == "prior":
370
+ out = np.ones((n_samples, 1)) * class_prior_[k]
371
+
372
+ elif self._strategy == "stratified":
373
+ out = rs.multinomial(1, class_prior_[k], size=n_samples)
374
+ out = out.astype(np.float64)
375
+
376
+ elif self._strategy == "uniform":
377
+ out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
378
+ out /= n_classes_[k]
379
+
380
+ elif self._strategy == "constant":
381
+ ind = np.where(classes_[k] == constant[k])
382
+ out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
383
+ out[:, ind] = 1.0
384
+
385
+ P.append(out)
386
+
387
+ if self.n_outputs_ == 1:
388
+ P = P[0]
389
+
390
+ return P
391
+
392
+ def predict_log_proba(self, X):
393
+ """
394
+ Return log probability estimates for the test vectors X.
395
+
396
+ Parameters
397
+ ----------
398
+ X : {array-like, object with finite length or shape}
399
+ Training data.
400
+
401
+ Returns
402
+ -------
403
+ P : ndarray of shape (n_samples, n_classes) or list of such arrays
404
+ Returns the log probability of the sample for each class in
405
+ the model, where classes are ordered arithmetically for each
406
+ output.
407
+ """
408
+ proba = self.predict_proba(X)
409
+ if self.n_outputs_ == 1:
410
+ return np.log(proba)
411
+ else:
412
+ return [np.log(p) for p in proba]
413
+
414
+ def _more_tags(self):
415
+ return {
416
+ "poor_score": True,
417
+ "no_validation": True,
418
+ "_xfail_checks": {
419
+ "check_methods_subset_invariance": "fails for the predict method",
420
+ "check_methods_sample_order_invariance": "fails for the predict method",
421
+ },
422
+ }
423
+
424
+ def score(self, X, y, sample_weight=None):
425
+ """Return the mean accuracy on the given test data and labels.
426
+
427
+ In multi-label classification, this is the subset accuracy
428
+ which is a harsh metric since you require for each sample that
429
+ each label set be correctly predicted.
430
+
431
+ Parameters
432
+ ----------
433
+ X : None or array-like of shape (n_samples, n_features)
434
+ Test samples. Passing None as test samples gives the same result
435
+ as passing real test samples, since DummyClassifier
436
+ operates independently of the sampled observations.
437
+
438
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
439
+ True labels for X.
440
+
441
+ sample_weight : array-like of shape (n_samples,), default=None
442
+ Sample weights.
443
+
444
+ Returns
445
+ -------
446
+ score : float
447
+ Mean accuracy of self.predict(X) w.r.t. y.
448
+ """
449
+ if X is None:
450
+ X = np.zeros(shape=(len(y), 1))
451
+ return super().score(X, y, sample_weight)
452
+
453
+
454
+ class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
455
+ """Regressor that makes predictions using simple rules.
456
+
457
+ This regressor is useful as a simple baseline to compare with other
458
+ (real) regressors. Do not use it for real problems.
459
+
460
+ Read more in the :ref:`User Guide <dummy_estimators>`.
461
+
462
+ .. versionadded:: 0.13
463
+
464
+ Parameters
465
+ ----------
466
+ strategy : {"mean", "median", "quantile", "constant"}, default="mean"
467
+ Strategy to use to generate predictions.
468
+
469
+ * "mean": always predicts the mean of the training set
470
+ * "median": always predicts the median of the training set
471
+ * "quantile": always predicts a specified quantile of the training set,
472
+ provided with the quantile parameter.
473
+ * "constant": always predicts a constant value that is provided by
474
+ the user.
475
+
476
+ constant : int or float or array-like of shape (n_outputs,), default=None
477
+ The explicit constant as predicted by the "constant" strategy. This
478
+ parameter is useful only for the "constant" strategy.
479
+
480
+ quantile : float in [0.0, 1.0], default=None
481
+ The quantile to predict using the "quantile" strategy. A quantile of
482
+ 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
483
+ maximum.
484
+
485
+ Attributes
486
+ ----------
487
+ constant_ : ndarray of shape (1, n_outputs)
488
+ Mean or median or quantile of the training targets or constant value
489
+ given by the user.
490
+
491
+ n_outputs_ : int
492
+ Number of outputs.
493
+
494
+ See Also
495
+ --------
496
+ DummyClassifier: Classifier that makes predictions using simple rules.
497
+
498
+ Examples
499
+ --------
500
+ >>> import numpy as np
501
+ >>> from sklearn.dummy import DummyRegressor
502
+ >>> X = np.array([1.0, 2.0, 3.0, 4.0])
503
+ >>> y = np.array([2.0, 3.0, 5.0, 10.0])
504
+ >>> dummy_regr = DummyRegressor(strategy="mean")
505
+ >>> dummy_regr.fit(X, y)
506
+ DummyRegressor()
507
+ >>> dummy_regr.predict(X)
508
+ array([5., 5., 5., 5.])
509
+ >>> dummy_regr.score(X, y)
510
+ 0.0
511
+ """
512
+
513
+ _parameter_constraints: dict = {
514
+ "strategy": [StrOptions({"mean", "median", "quantile", "constant"})],
515
+ "quantile": [Interval(Real, 0.0, 1.0, closed="both"), None],
516
+ "constant": [
517
+ Interval(Real, None, None, closed="neither"),
518
+ "array-like",
519
+ None,
520
+ ],
521
+ }
522
+
523
+ def __init__(self, *, strategy="mean", constant=None, quantile=None):
524
+ self.strategy = strategy
525
+ self.constant = constant
526
+ self.quantile = quantile
527
+
528
+ @_fit_context(prefer_skip_nested_validation=True)
529
+ def fit(self, X, y, sample_weight=None):
530
+ """Fit the random regressor.
531
+
532
+ Parameters
533
+ ----------
534
+ X : array-like of shape (n_samples, n_features)
535
+ Training data.
536
+
537
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
538
+ Target values.
539
+
540
+ sample_weight : array-like of shape (n_samples,), default=None
541
+ Sample weights.
542
+
543
+ Returns
544
+ -------
545
+ self : object
546
+ Fitted estimator.
547
+ """
548
+ y = check_array(y, ensure_2d=False, input_name="y")
549
+ if len(y) == 0:
550
+ raise ValueError("y must not be empty.")
551
+
552
+ if y.ndim == 1:
553
+ y = np.reshape(y, (-1, 1))
554
+ self.n_outputs_ = y.shape[1]
555
+
556
+ check_consistent_length(X, y, sample_weight)
557
+
558
+ if sample_weight is not None:
559
+ sample_weight = _check_sample_weight(sample_weight, X)
560
+
561
+ if self.strategy == "mean":
562
+ self.constant_ = np.average(y, axis=0, weights=sample_weight)
563
+
564
+ elif self.strategy == "median":
565
+ if sample_weight is None:
566
+ self.constant_ = np.median(y, axis=0)
567
+ else:
568
+ self.constant_ = [
569
+ _weighted_percentile(y[:, k], sample_weight, percentile=50.0)
570
+ for k in range(self.n_outputs_)
571
+ ]
572
+
573
+ elif self.strategy == "quantile":
574
+ if self.quantile is None:
575
+ raise ValueError(
576
+ "When using `strategy='quantile', you have to specify the desired "
577
+ "quantile in the range [0, 1]."
578
+ )
579
+ percentile = self.quantile * 100.0
580
+ if sample_weight is None:
581
+ self.constant_ = np.percentile(y, axis=0, q=percentile)
582
+ else:
583
+ self.constant_ = [
584
+ _weighted_percentile(y[:, k], sample_weight, percentile=percentile)
585
+ for k in range(self.n_outputs_)
586
+ ]
587
+
588
+ elif self.strategy == "constant":
589
+ if self.constant is None:
590
+ raise TypeError(
591
+ "Constant target value has to be specified "
592
+ "when the constant strategy is used."
593
+ )
594
+
595
+ self.constant_ = check_array(
596
+ self.constant,
597
+ accept_sparse=["csr", "csc", "coo"],
598
+ ensure_2d=False,
599
+ ensure_min_samples=0,
600
+ )
601
+
602
+ if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]:
603
+ raise ValueError(
604
+ "Constant target value should have shape (%d, 1)." % y.shape[1]
605
+ )
606
+
607
+ self.constant_ = np.reshape(self.constant_, (1, -1))
608
+ return self
609
+
610
+ def predict(self, X, return_std=False):
611
+ """Perform classification on test vectors X.
612
+
613
+ Parameters
614
+ ----------
615
+ X : array-like of shape (n_samples, n_features)
616
+ Test data.
617
+
618
+ return_std : bool, default=False
619
+ Whether to return the standard deviation of posterior prediction.
620
+ All zeros in this case.
621
+
622
+ .. versionadded:: 0.20
623
+
624
+ Returns
625
+ -------
626
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
627
+ Predicted target values for X.
628
+
629
+ y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)
630
+ Standard deviation of predictive distribution of query points.
631
+ """
632
+ check_is_fitted(self)
633
+ n_samples = _num_samples(X)
634
+
635
+ y = np.full(
636
+ (n_samples, self.n_outputs_),
637
+ self.constant_,
638
+ dtype=np.array(self.constant_).dtype,
639
+ )
640
+ y_std = np.zeros((n_samples, self.n_outputs_))
641
+
642
+ if self.n_outputs_ == 1:
643
+ y = np.ravel(y)
644
+ y_std = np.ravel(y_std)
645
+
646
+ return (y, y_std) if return_std else y
647
+
648
+ def _more_tags(self):
649
+ return {"poor_score": True, "no_validation": True}
650
+
651
+ def score(self, X, y, sample_weight=None):
652
+ """Return the coefficient of determination R^2 of the prediction.
653
+
654
+ The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the
655
+ residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the
656
+ total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best
657
+ possible score is 1.0 and it can be negative (because the model can be
658
+ arbitrarily worse). A constant model that always predicts the expected
659
+ value of y, disregarding the input features, would get a R^2 score of
660
+ 0.0.
661
+
662
+ Parameters
663
+ ----------
664
+ X : None or array-like of shape (n_samples, n_features)
665
+ Test samples. Passing None as test samples gives the same result
666
+ as passing real test samples, since `DummyRegressor`
667
+ operates independently of the sampled observations.
668
+
669
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
670
+ True values for X.
671
+
672
+ sample_weight : array-like of shape (n_samples,), default=None
673
+ Sample weights.
674
+
675
+ Returns
676
+ -------
677
+ score : float
678
+ R^2 of `self.predict(X)` w.r.t. y.
679
+ """
680
+ if X is None:
681
+ X = np.zeros(shape=(len(y), 1))
682
+ return super().score(X, y, sample_weight)
llmeval-env/lib/python3.10/site-packages/sklearn/exceptions.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.exceptions` module includes all custom warnings and error
3
+ classes used across scikit-learn.
4
+ """
5
+
6
+ __all__ = [
7
+ "NotFittedError",
8
+ "ConvergenceWarning",
9
+ "DataConversionWarning",
10
+ "DataDimensionalityWarning",
11
+ "EfficiencyWarning",
12
+ "FitFailedWarning",
13
+ "SkipTestWarning",
14
+ "UndefinedMetricWarning",
15
+ "PositiveSpectrumWarning",
16
+ "UnsetMetadataPassedError",
17
+ ]
18
+
19
+
20
+ class UnsetMetadataPassedError(ValueError):
21
+ """Exception class to raise if a metadata is passed which is not explicitly \
22
+ requested (metadata=True) or not requested (metadata=False).
23
+
24
+ .. versionadded:: 1.3
25
+
26
+ Parameters
27
+ ----------
28
+ message : str
29
+ The message
30
+
31
+ unrequested_params : dict
32
+ A dictionary of parameters and their values which are provided but not
33
+ requested.
34
+
35
+ routed_params : dict
36
+ A dictionary of routed parameters.
37
+ """
38
+
39
+ def __init__(self, *, message, unrequested_params, routed_params):
40
+ super().__init__(message)
41
+ self.unrequested_params = unrequested_params
42
+ self.routed_params = routed_params
43
+
44
+
45
+ class NotFittedError(ValueError, AttributeError):
46
+ """Exception class to raise if estimator is used before fitting.
47
+
48
+ This class inherits from both ValueError and AttributeError to help with
49
+ exception handling and backward compatibility.
50
+
51
+ Examples
52
+ --------
53
+ >>> from sklearn.svm import LinearSVC
54
+ >>> from sklearn.exceptions import NotFittedError
55
+ >>> try:
56
+ ... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
57
+ ... except NotFittedError as e:
58
+ ... print(repr(e))
59
+ NotFittedError("This LinearSVC instance is not fitted yet. Call 'fit' with
60
+ appropriate arguments before using this estimator."...)
61
+
62
+ .. versionchanged:: 0.18
63
+ Moved from sklearn.utils.validation.
64
+ """
65
+
66
+
67
+ class ConvergenceWarning(UserWarning):
68
+ """Custom warning to capture convergence problems
69
+
70
+ .. versionchanged:: 0.18
71
+ Moved from sklearn.utils.
72
+ """
73
+
74
+
75
+ class DataConversionWarning(UserWarning):
76
+ """Warning used to notify implicit data conversions happening in the code.
77
+
78
+ This warning occurs when some input data needs to be converted or
79
+ interpreted in a way that may not match the user's expectations.
80
+
81
+ For example, this warning may occur when the user
82
+ - passes an integer array to a function which expects float input and
83
+ will convert the input
84
+ - requests a non-copying operation, but a copy is required to meet the
85
+ implementation's data-type expectations;
86
+ - passes an input whose shape can be interpreted ambiguously.
87
+
88
+ .. versionchanged:: 0.18
89
+ Moved from sklearn.utils.validation.
90
+ """
91
+
92
+
93
+ class DataDimensionalityWarning(UserWarning):
94
+ """Custom warning to notify potential issues with data dimensionality.
95
+
96
+ For example, in random projection, this warning is raised when the
97
+ number of components, which quantifies the dimensionality of the target
98
+ projection space, is higher than the number of features, which quantifies
99
+ the dimensionality of the original source space, to imply that the
100
+ dimensionality of the problem will not be reduced.
101
+
102
+ .. versionchanged:: 0.18
103
+ Moved from sklearn.utils.
104
+ """
105
+
106
+
107
+ class EfficiencyWarning(UserWarning):
108
+ """Warning used to notify the user of inefficient computation.
109
+
110
+ This warning notifies the user that the efficiency may not be optimal due
111
+ to some reason which may be included as a part of the warning message.
112
+ This may be subclassed into a more specific Warning class.
113
+
114
+ .. versionadded:: 0.18
115
+ """
116
+
117
+
118
+ class FitFailedWarning(RuntimeWarning):
119
+ """Warning class used if there is an error while fitting the estimator.
120
+
121
+ This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
122
+ and the cross-validation helper function cross_val_score to warn when there
123
+ is an error while fitting the estimator.
124
+
125
+ .. versionchanged:: 0.18
126
+ Moved from sklearn.cross_validation.
127
+ """
128
+
129
+
130
+ class SkipTestWarning(UserWarning):
131
+ """Warning class used to notify the user of a test that was skipped.
132
+
133
+ For example, one of the estimator checks requires a pandas import.
134
+ If the pandas package cannot be imported, the test will be skipped rather
135
+ than register as a failure.
136
+ """
137
+
138
+
139
+ class UndefinedMetricWarning(UserWarning):
140
+ """Warning used when the metric is invalid
141
+
142
+ .. versionchanged:: 0.18
143
+ Moved from sklearn.base.
144
+ """
145
+
146
+
147
+ class PositiveSpectrumWarning(UserWarning):
148
+ """Warning raised when the eigenvalues of a PSD matrix have issues
149
+
150
+ This warning is typically raised by ``_check_psd_eigenvalues`` when the
151
+ eigenvalues of a positive semidefinite (PSD) matrix such as a gram matrix
152
+ (kernel) present significant negative eigenvalues, or bad conditioning i.e.
153
+ very small non-zero eigenvalues compared to the largest eigenvalue.
154
+
155
+ .. versionadded:: 0.22
156
+ """
157
+
158
+
159
+ class InconsistentVersionWarning(UserWarning):
160
+ """Warning raised when an estimator is unpickled with a inconsistent version.
161
+
162
+ Parameters
163
+ ----------
164
+ estimator_name : str
165
+ Estimator name.
166
+
167
+ current_sklearn_version : str
168
+ Current scikit-learn version.
169
+
170
+ original_sklearn_version : str
171
+ Original scikit-learn version.
172
+ """
173
+
174
+ def __init__(
175
+ self, *, estimator_name, current_sklearn_version, original_sklearn_version
176
+ ):
177
+ self.estimator_name = estimator_name
178
+ self.current_sklearn_version = current_sklearn_version
179
+ self.original_sklearn_version = original_sklearn_version
180
+
181
+ def __str__(self):
182
+ return (
183
+ f"Trying to unpickle estimator {self.estimator_name} from version"
184
+ f" {self.original_sklearn_version} when "
185
+ f"using version {self.current_sklearn_version}. This might lead to breaking"
186
+ " code or "
187
+ "invalid results. Use at your own risk. "
188
+ "For more info please refer to:\n"
189
+ "https://scikit-learn.org/stable/model_persistence.html"
190
+ "#security-maintainability-limitations"
191
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/isotonic.py ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Fabian Pedregosa <[email protected]>
2
+ # Alexandre Gramfort <[email protected]>
3
+ # Nelle Varoquaux <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import math
7
+ import warnings
8
+ from numbers import Real
9
+
10
+ import numpy as np
11
+ from scipy import interpolate
12
+ from scipy.stats import spearmanr
13
+
14
+ from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique
15
+ from .base import BaseEstimator, RegressorMixin, TransformerMixin, _fit_context
16
+ from .utils import check_array, check_consistent_length
17
+ from .utils._param_validation import Interval, StrOptions, validate_params
18
+ from .utils.validation import _check_sample_weight, check_is_fitted
19
+
20
+ __all__ = ["check_increasing", "isotonic_regression", "IsotonicRegression"]
21
+
22
+
23
+ @validate_params(
24
+ {
25
+ "x": ["array-like"],
26
+ "y": ["array-like"],
27
+ },
28
+ prefer_skip_nested_validation=True,
29
+ )
30
+ def check_increasing(x, y):
31
+ """Determine whether y is monotonically correlated with x.
32
+
33
+ y is found increasing or decreasing with respect to x based on a Spearman
34
+ correlation test.
35
+
36
+ Parameters
37
+ ----------
38
+ x : array-like of shape (n_samples,)
39
+ Training data.
40
+
41
+ y : array-like of shape (n_samples,)
42
+ Training target.
43
+
44
+ Returns
45
+ -------
46
+ increasing_bool : boolean
47
+ Whether the relationship is increasing or decreasing.
48
+
49
+ Notes
50
+ -----
51
+ The Spearman correlation coefficient is estimated from the data, and the
52
+ sign of the resulting estimate is used as the result.
53
+
54
+ In the event that the 95% confidence interval based on Fisher transform
55
+ spans zero, a warning is raised.
56
+
57
+ References
58
+ ----------
59
+ Fisher transformation. Wikipedia.
60
+ https://en.wikipedia.org/wiki/Fisher_transformation
61
+
62
+ Examples
63
+ --------
64
+ >>> from sklearn.isotonic import check_increasing
65
+ >>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10]
66
+ >>> check_increasing(x, y)
67
+ True
68
+ >>> y = [10, 8, 6, 4, 2]
69
+ >>> check_increasing(x, y)
70
+ False
71
+ """
72
+
73
+ # Calculate Spearman rho estimate and set return accordingly.
74
+ rho, _ = spearmanr(x, y)
75
+ increasing_bool = rho >= 0
76
+
77
+ # Run Fisher transform to get the rho CI, but handle rho=+/-1
78
+ if rho not in [-1.0, 1.0] and len(x) > 3:
79
+ F = 0.5 * math.log((1.0 + rho) / (1.0 - rho))
80
+ F_se = 1 / math.sqrt(len(x) - 3)
81
+
82
+ # Use a 95% CI, i.e., +/-1.96 S.E.
83
+ # https://en.wikipedia.org/wiki/Fisher_transformation
84
+ rho_0 = math.tanh(F - 1.96 * F_se)
85
+ rho_1 = math.tanh(F + 1.96 * F_se)
86
+
87
+ # Warn if the CI spans zero.
88
+ if np.sign(rho_0) != np.sign(rho_1):
89
+ warnings.warn(
90
+ "Confidence interval of the Spearman "
91
+ "correlation coefficient spans zero. "
92
+ "Determination of ``increasing`` may be "
93
+ "suspect."
94
+ )
95
+
96
+ return increasing_bool
97
+
98
+
99
+ @validate_params(
100
+ {
101
+ "y": ["array-like"],
102
+ "sample_weight": ["array-like", None],
103
+ "y_min": [Interval(Real, None, None, closed="both"), None],
104
+ "y_max": [Interval(Real, None, None, closed="both"), None],
105
+ "increasing": ["boolean"],
106
+ },
107
+ prefer_skip_nested_validation=True,
108
+ )
109
+ def isotonic_regression(
110
+ y, *, sample_weight=None, y_min=None, y_max=None, increasing=True
111
+ ):
112
+ """Solve the isotonic regression model.
113
+
114
+ Read more in the :ref:`User Guide <isotonic>`.
115
+
116
+ Parameters
117
+ ----------
118
+ y : array-like of shape (n_samples,)
119
+ The data.
120
+
121
+ sample_weight : array-like of shape (n_samples,), default=None
122
+ Weights on each point of the regression.
123
+ If None, weight is set to 1 (equal weights).
124
+
125
+ y_min : float, default=None
126
+ Lower bound on the lowest predicted value (the minimum value may
127
+ still be higher). If not set, defaults to -inf.
128
+
129
+ y_max : float, default=None
130
+ Upper bound on the highest predicted value (the maximum may still be
131
+ lower). If not set, defaults to +inf.
132
+
133
+ increasing : bool, default=True
134
+ Whether to compute ``y_`` is increasing (if set to True) or decreasing
135
+ (if set to False).
136
+
137
+ Returns
138
+ -------
139
+ y_ : ndarray of shape (n_samples,)
140
+ Isotonic fit of y.
141
+
142
+ References
143
+ ----------
144
+ "Active set algorithms for isotonic regression; A unifying framework"
145
+ by Michael J. Best and Nilotpal Chakravarti, section 3.
146
+
147
+ Examples
148
+ --------
149
+ >>> from sklearn.isotonic import isotonic_regression
150
+ >>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4])
151
+ array([2.75 , 2.75 , 2.75 , 2.75 , 7.33...,
152
+ 7.33..., 7.33..., 7.33..., 7.33..., 7.33...])
153
+ """
154
+ order = np.s_[:] if increasing else np.s_[::-1]
155
+ y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32])
156
+ y = np.array(y[order], dtype=y.dtype)
157
+ sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True)
158
+ sample_weight = np.ascontiguousarray(sample_weight[order])
159
+
160
+ _inplace_contiguous_isotonic_regression(y, sample_weight)
161
+ if y_min is not None or y_max is not None:
162
+ # Older versions of np.clip don't accept None as a bound, so use np.inf
163
+ if y_min is None:
164
+ y_min = -np.inf
165
+ if y_max is None:
166
+ y_max = np.inf
167
+ np.clip(y, y_min, y_max, y)
168
+ return y[order]
169
+
170
+
171
+ class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator):
172
+ """Isotonic regression model.
173
+
174
+ Read more in the :ref:`User Guide <isotonic>`.
175
+
176
+ .. versionadded:: 0.13
177
+
178
+ Parameters
179
+ ----------
180
+ y_min : float, default=None
181
+ Lower bound on the lowest predicted value (the minimum value may
182
+ still be higher). If not set, defaults to -inf.
183
+
184
+ y_max : float, default=None
185
+ Upper bound on the highest predicted value (the maximum may still be
186
+ lower). If not set, defaults to +inf.
187
+
188
+ increasing : bool or 'auto', default=True
189
+ Determines whether the predictions should be constrained to increase
190
+ or decrease with `X`. 'auto' will decide based on the Spearman
191
+ correlation estimate's sign.
192
+
193
+ out_of_bounds : {'nan', 'clip', 'raise'}, default='nan'
194
+ Handles how `X` values outside of the training domain are handled
195
+ during prediction.
196
+
197
+ - 'nan', predictions will be NaN.
198
+ - 'clip', predictions will be set to the value corresponding to
199
+ the nearest train interval endpoint.
200
+ - 'raise', a `ValueError` is raised.
201
+
202
+ Attributes
203
+ ----------
204
+ X_min_ : float
205
+ Minimum value of input array `X_` for left bound.
206
+
207
+ X_max_ : float
208
+ Maximum value of input array `X_` for right bound.
209
+
210
+ X_thresholds_ : ndarray of shape (n_thresholds,)
211
+ Unique ascending `X` values used to interpolate
212
+ the y = f(X) monotonic function.
213
+
214
+ .. versionadded:: 0.24
215
+
216
+ y_thresholds_ : ndarray of shape (n_thresholds,)
217
+ De-duplicated `y` values suitable to interpolate the y = f(X)
218
+ monotonic function.
219
+
220
+ .. versionadded:: 0.24
221
+
222
+ f_ : function
223
+ The stepwise interpolating function that covers the input domain ``X``.
224
+
225
+ increasing_ : bool
226
+ Inferred value for ``increasing``.
227
+
228
+ See Also
229
+ --------
230
+ sklearn.linear_model.LinearRegression : Ordinary least squares Linear
231
+ Regression.
232
+ sklearn.ensemble.HistGradientBoostingRegressor : Gradient boosting that
233
+ is a non-parametric model accepting monotonicity constraints.
234
+ isotonic_regression : Function to solve the isotonic regression model.
235
+
236
+ Notes
237
+ -----
238
+ Ties are broken using the secondary method from de Leeuw, 1977.
239
+
240
+ References
241
+ ----------
242
+ Isotonic Median Regression: A Linear Programming Approach
243
+ Nilotpal Chakravarti
244
+ Mathematics of Operations Research
245
+ Vol. 14, No. 2 (May, 1989), pp. 303-308
246
+
247
+ Isotone Optimization in R : Pool-Adjacent-Violators
248
+ Algorithm (PAVA) and Active Set Methods
249
+ de Leeuw, Hornik, Mair
250
+ Journal of Statistical Software 2009
251
+
252
+ Correctness of Kruskal's algorithms for monotone regression with ties
253
+ de Leeuw, Psychometrica, 1977
254
+
255
+ Examples
256
+ --------
257
+ >>> from sklearn.datasets import make_regression
258
+ >>> from sklearn.isotonic import IsotonicRegression
259
+ >>> X, y = make_regression(n_samples=10, n_features=1, random_state=41)
260
+ >>> iso_reg = IsotonicRegression().fit(X, y)
261
+ >>> iso_reg.predict([.1, .2])
262
+ array([1.8628..., 3.7256...])
263
+ """
264
+
265
+ _parameter_constraints: dict = {
266
+ "y_min": [Interval(Real, None, None, closed="both"), None],
267
+ "y_max": [Interval(Real, None, None, closed="both"), None],
268
+ "increasing": ["boolean", StrOptions({"auto"})],
269
+ "out_of_bounds": [StrOptions({"nan", "clip", "raise"})],
270
+ }
271
+
272
+ def __init__(self, *, y_min=None, y_max=None, increasing=True, out_of_bounds="nan"):
273
+ self.y_min = y_min
274
+ self.y_max = y_max
275
+ self.increasing = increasing
276
+ self.out_of_bounds = out_of_bounds
277
+
278
+ def _check_input_data_shape(self, X):
279
+ if not (X.ndim == 1 or (X.ndim == 2 and X.shape[1] == 1)):
280
+ msg = (
281
+ "Isotonic regression input X should be a 1d array or "
282
+ "2d array with 1 feature"
283
+ )
284
+ raise ValueError(msg)
285
+
286
+ def _build_f(self, X, y):
287
+ """Build the f_ interp1d function."""
288
+
289
+ bounds_error = self.out_of_bounds == "raise"
290
+ if len(y) == 1:
291
+ # single y, constant prediction
292
+ self.f_ = lambda x: y.repeat(x.shape)
293
+ else:
294
+ self.f_ = interpolate.interp1d(
295
+ X, y, kind="linear", bounds_error=bounds_error
296
+ )
297
+
298
+ def _build_y(self, X, y, sample_weight, trim_duplicates=True):
299
+ """Build the y_ IsotonicRegression."""
300
+ self._check_input_data_shape(X)
301
+ X = X.reshape(-1) # use 1d view
302
+
303
+ # Determine increasing if auto-determination requested
304
+ if self.increasing == "auto":
305
+ self.increasing_ = check_increasing(X, y)
306
+ else:
307
+ self.increasing_ = self.increasing
308
+
309
+ # If sample_weights is passed, removed zero-weight values and clean
310
+ # order
311
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
312
+ mask = sample_weight > 0
313
+ X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
314
+
315
+ order = np.lexsort((y, X))
316
+ X, y, sample_weight = [array[order] for array in [X, y, sample_weight]]
317
+ unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight)
318
+
319
+ X = unique_X
320
+ y = isotonic_regression(
321
+ unique_y,
322
+ sample_weight=unique_sample_weight,
323
+ y_min=self.y_min,
324
+ y_max=self.y_max,
325
+ increasing=self.increasing_,
326
+ )
327
+
328
+ # Handle the left and right bounds on X
329
+ self.X_min_, self.X_max_ = np.min(X), np.max(X)
330
+
331
+ if trim_duplicates:
332
+ # Remove unnecessary points for faster prediction
333
+ keep_data = np.ones((len(y),), dtype=bool)
334
+ # Aside from the 1st and last point, remove points whose y values
335
+ # are equal to both the point before and the point after it.
336
+ keep_data[1:-1] = np.logical_or(
337
+ np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:])
338
+ )
339
+ return X[keep_data], y[keep_data]
340
+ else:
341
+ # The ability to turn off trim_duplicates is only used to it make
342
+ # easier to unit test that removing duplicates in y does not have
343
+ # any impact the resulting interpolation function (besides
344
+ # prediction speed).
345
+ return X, y
346
+
347
+ @_fit_context(prefer_skip_nested_validation=True)
348
+ def fit(self, X, y, sample_weight=None):
349
+ """Fit the model using X, y as training data.
350
+
351
+ Parameters
352
+ ----------
353
+ X : array-like of shape (n_samples,) or (n_samples, 1)
354
+ Training data.
355
+
356
+ .. versionchanged:: 0.24
357
+ Also accepts 2d array with 1 feature.
358
+
359
+ y : array-like of shape (n_samples,)
360
+ Training target.
361
+
362
+ sample_weight : array-like of shape (n_samples,), default=None
363
+ Weights. If set to None, all weights will be set to 1 (equal
364
+ weights).
365
+
366
+ Returns
367
+ -------
368
+ self : object
369
+ Returns an instance of self.
370
+
371
+ Notes
372
+ -----
373
+ X is stored for future use, as :meth:`transform` needs X to interpolate
374
+ new input data.
375
+ """
376
+ check_params = dict(accept_sparse=False, ensure_2d=False)
377
+ X = check_array(
378
+ X, input_name="X", dtype=[np.float64, np.float32], **check_params
379
+ )
380
+ y = check_array(y, input_name="y", dtype=X.dtype, **check_params)
381
+ check_consistent_length(X, y, sample_weight)
382
+
383
+ # Transform y by running the isotonic regression algorithm and
384
+ # transform X accordingly.
385
+ X, y = self._build_y(X, y, sample_weight)
386
+
387
+ # It is necessary to store the non-redundant part of the training set
388
+ # on the model to make it possible to support model persistence via
389
+ # the pickle module as the object built by scipy.interp1d is not
390
+ # picklable directly.
391
+ self.X_thresholds_, self.y_thresholds_ = X, y
392
+
393
+ # Build the interpolation function
394
+ self._build_f(X, y)
395
+ return self
396
+
397
+ def _transform(self, T):
398
+ """`_transform` is called by both `transform` and `predict` methods.
399
+
400
+ Since `transform` is wrapped to output arrays of specific types (e.g.
401
+ NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform`
402
+ directly.
403
+
404
+ The above behaviour could be changed in the future, if we decide to output
405
+ other type of arrays when calling `predict`.
406
+ """
407
+ if hasattr(self, "X_thresholds_"):
408
+ dtype = self.X_thresholds_.dtype
409
+ else:
410
+ dtype = np.float64
411
+
412
+ T = check_array(T, dtype=dtype, ensure_2d=False)
413
+
414
+ self._check_input_data_shape(T)
415
+ T = T.reshape(-1) # use 1d view
416
+
417
+ if self.out_of_bounds == "clip":
418
+ T = np.clip(T, self.X_min_, self.X_max_)
419
+
420
+ res = self.f_(T)
421
+
422
+ # on scipy 0.17, interp1d up-casts to float64, so we cast back
423
+ res = res.astype(T.dtype)
424
+
425
+ return res
426
+
427
+ def transform(self, T):
428
+ """Transform new data by linear interpolation.
429
+
430
+ Parameters
431
+ ----------
432
+ T : array-like of shape (n_samples,) or (n_samples, 1)
433
+ Data to transform.
434
+
435
+ .. versionchanged:: 0.24
436
+ Also accepts 2d array with 1 feature.
437
+
438
+ Returns
439
+ -------
440
+ y_pred : ndarray of shape (n_samples,)
441
+ The transformed data.
442
+ """
443
+ return self._transform(T)
444
+
445
+ def predict(self, T):
446
+ """Predict new data by linear interpolation.
447
+
448
+ Parameters
449
+ ----------
450
+ T : array-like of shape (n_samples,) or (n_samples, 1)
451
+ Data to transform.
452
+
453
+ Returns
454
+ -------
455
+ y_pred : ndarray of shape (n_samples,)
456
+ Transformed data.
457
+ """
458
+ return self._transform(T)
459
+
460
+ # We implement get_feature_names_out here instead of using
461
+ # `ClassNamePrefixFeaturesOutMixin`` because `input_features` are ignored.
462
+ # `input_features` are ignored because `IsotonicRegression` accepts 1d
463
+ # arrays and the semantics of `feature_names_in_` are not clear for 1d arrays.
464
+ def get_feature_names_out(self, input_features=None):
465
+ """Get output feature names for transformation.
466
+
467
+ Parameters
468
+ ----------
469
+ input_features : array-like of str or None, default=None
470
+ Ignored.
471
+
472
+ Returns
473
+ -------
474
+ feature_names_out : ndarray of str objects
475
+ An ndarray with one string i.e. ["isotonicregression0"].
476
+ """
477
+ check_is_fitted(self, "f_")
478
+ class_name = self.__class__.__name__.lower()
479
+ return np.asarray([f"{class_name}0"], dtype=object)
480
+
481
+ def __getstate__(self):
482
+ """Pickle-protocol - return state of the estimator."""
483
+ state = super().__getstate__()
484
+ # remove interpolation method
485
+ state.pop("f_", None)
486
+ return state
487
+
488
+ def __setstate__(self, state):
489
+ """Pickle-protocol - set state of the estimator.
490
+
491
+ We need to rebuild the interpolation function.
492
+ """
493
+ super().__setstate__(state)
494
+ if hasattr(self, "X_thresholds_") and hasattr(self, "y_thresholds_"):
495
+ self._build_f(self.X_thresholds_, self.y_thresholds_)
496
+
497
+ def _more_tags(self):
498
+ return {"X_types": ["1darray"]}
llmeval-env/lib/python3.10/site-packages/sklearn/kernel_approximation.py ADDED
@@ -0,0 +1,1137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.kernel_approximation` module implements several
3
+ approximate kernel feature maps based on Fourier transforms and Count Sketches.
4
+ """
5
+
6
+ # Author: Andreas Mueller <[email protected]>
7
+ # Daniel Lopez-Sanchez (TensorSketch) <[email protected]>
8
+
9
+ # License: BSD 3 clause
10
+
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ import scipy.sparse as sp
16
+ from scipy.linalg import svd
17
+
18
+ try:
19
+ from scipy.fft import fft, ifft
20
+ except ImportError: # scipy < 1.4
21
+ from scipy.fftpack import fft, ifft
22
+
23
+ from .base import (
24
+ BaseEstimator,
25
+ ClassNamePrefixFeaturesOutMixin,
26
+ TransformerMixin,
27
+ _fit_context,
28
+ )
29
+ from .metrics.pairwise import KERNEL_PARAMS, PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels
30
+ from .utils import check_random_state, deprecated
31
+ from .utils._param_validation import Interval, StrOptions
32
+ from .utils.extmath import safe_sparse_dot
33
+ from .utils.validation import (
34
+ _check_feature_names_in,
35
+ check_is_fitted,
36
+ check_non_negative,
37
+ )
38
+
39
+
40
+ class PolynomialCountSketch(
41
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
42
+ ):
43
+ """Polynomial kernel approximation via Tensor Sketch.
44
+
45
+ Implements Tensor Sketch, which approximates the feature map
46
+ of the polynomial kernel::
47
+
48
+ K(X, Y) = (gamma * <X, Y> + coef0)^degree
49
+
50
+ by efficiently computing a Count Sketch of the outer product of a
51
+ vector with itself using Fast Fourier Transforms (FFT). Read more in the
52
+ :ref:`User Guide <polynomial_kernel_approx>`.
53
+
54
+ .. versionadded:: 0.24
55
+
56
+ Parameters
57
+ ----------
58
+ gamma : float, default=1.0
59
+ Parameter of the polynomial kernel whose feature map
60
+ will be approximated.
61
+
62
+ degree : int, default=2
63
+ Degree of the polynomial kernel whose feature map
64
+ will be approximated.
65
+
66
+ coef0 : int, default=0
67
+ Constant term of the polynomial kernel whose feature map
68
+ will be approximated.
69
+
70
+ n_components : int, default=100
71
+ Dimensionality of the output feature space. Usually, `n_components`
72
+ should be greater than the number of features in input samples in
73
+ order to achieve good performance. The optimal score / run time
74
+ balance is typically achieved around `n_components` = 10 * `n_features`,
75
+ but this depends on the specific dataset being used.
76
+
77
+ random_state : int, RandomState instance, default=None
78
+ Determines random number generation for indexHash and bitHash
79
+ initialization. Pass an int for reproducible results across multiple
80
+ function calls. See :term:`Glossary <random_state>`.
81
+
82
+ Attributes
83
+ ----------
84
+ indexHash_ : ndarray of shape (degree, n_features), dtype=int64
85
+ Array of indexes in range [0, n_components) used to represent
86
+ the 2-wise independent hash functions for Count Sketch computation.
87
+
88
+ bitHash_ : ndarray of shape (degree, n_features), dtype=float32
89
+ Array with random entries in {+1, -1}, used to represent
90
+ the 2-wise independent hash functions for Count Sketch computation.
91
+
92
+ n_features_in_ : int
93
+ Number of features seen during :term:`fit`.
94
+
95
+ .. versionadded:: 0.24
96
+
97
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
98
+ Names of features seen during :term:`fit`. Defined only when `X`
99
+ has feature names that are all strings.
100
+
101
+ .. versionadded:: 1.0
102
+
103
+ See Also
104
+ --------
105
+ AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
106
+ Nystroem : Approximate a kernel map using a subset of the training data.
107
+ RBFSampler : Approximate a RBF kernel feature map using random Fourier
108
+ features.
109
+ SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
110
+ sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
111
+
112
+ Examples
113
+ --------
114
+ >>> from sklearn.kernel_approximation import PolynomialCountSketch
115
+ >>> from sklearn.linear_model import SGDClassifier
116
+ >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
117
+ >>> y = [0, 0, 1, 1]
118
+ >>> ps = PolynomialCountSketch(degree=3, random_state=1)
119
+ >>> X_features = ps.fit_transform(X)
120
+ >>> clf = SGDClassifier(max_iter=10, tol=1e-3)
121
+ >>> clf.fit(X_features, y)
122
+ SGDClassifier(max_iter=10)
123
+ >>> clf.score(X_features, y)
124
+ 1.0
125
+
126
+ For a more detailed example of usage, see
127
+ :ref:`sphx_glr_auto_examples_kernel_approximation_plot_scalable_poly_kernels.py`
128
+ """
129
+
130
+ _parameter_constraints: dict = {
131
+ "gamma": [Interval(Real, 0, None, closed="left")],
132
+ "degree": [Interval(Integral, 1, None, closed="left")],
133
+ "coef0": [Interval(Real, None, None, closed="neither")],
134
+ "n_components": [Interval(Integral, 1, None, closed="left")],
135
+ "random_state": ["random_state"],
136
+ }
137
+
138
+ def __init__(
139
+ self, *, gamma=1.0, degree=2, coef0=0, n_components=100, random_state=None
140
+ ):
141
+ self.gamma = gamma
142
+ self.degree = degree
143
+ self.coef0 = coef0
144
+ self.n_components = n_components
145
+ self.random_state = random_state
146
+
147
+ @_fit_context(prefer_skip_nested_validation=True)
148
+ def fit(self, X, y=None):
149
+ """Fit the model with X.
150
+
151
+ Initializes the internal variables. The method needs no information
152
+ about the distribution of data, so we only care about n_features in X.
153
+
154
+ Parameters
155
+ ----------
156
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
157
+ Training data, where `n_samples` is the number of samples
158
+ and `n_features` is the number of features.
159
+
160
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
161
+ default=None
162
+ Target values (None for unsupervised transformations).
163
+
164
+ Returns
165
+ -------
166
+ self : object
167
+ Returns the instance itself.
168
+ """
169
+ X = self._validate_data(X, accept_sparse="csc")
170
+ random_state = check_random_state(self.random_state)
171
+
172
+ n_features = X.shape[1]
173
+ if self.coef0 != 0:
174
+ n_features += 1
175
+
176
+ self.indexHash_ = random_state.randint(
177
+ 0, high=self.n_components, size=(self.degree, n_features)
178
+ )
179
+
180
+ self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features))
181
+ self._n_features_out = self.n_components
182
+ return self
183
+
184
+ def transform(self, X):
185
+ """Generate the feature map approximation for X.
186
+
187
+ Parameters
188
+ ----------
189
+ X : {array-like}, shape (n_samples, n_features)
190
+ New data, where `n_samples` is the number of samples
191
+ and `n_features` is the number of features.
192
+
193
+ Returns
194
+ -------
195
+ X_new : array-like, shape (n_samples, n_components)
196
+ Returns the instance itself.
197
+ """
198
+
199
+ check_is_fitted(self)
200
+ X = self._validate_data(X, accept_sparse="csc", reset=False)
201
+
202
+ X_gamma = np.sqrt(self.gamma) * X
203
+
204
+ if sp.issparse(X_gamma) and self.coef0 != 0:
205
+ X_gamma = sp.hstack(
206
+ [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))],
207
+ format="csc",
208
+ )
209
+
210
+ elif not sp.issparse(X_gamma) and self.coef0 != 0:
211
+ X_gamma = np.hstack(
212
+ [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))]
213
+ )
214
+
215
+ if X_gamma.shape[1] != self.indexHash_.shape[1]:
216
+ raise ValueError(
217
+ "Number of features of test samples does not"
218
+ " match that of training samples."
219
+ )
220
+
221
+ count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components))
222
+
223
+ if sp.issparse(X_gamma):
224
+ for j in range(X_gamma.shape[1]):
225
+ for d in range(self.degree):
226
+ iHashIndex = self.indexHash_[d, j]
227
+ iHashBit = self.bitHash_[d, j]
228
+ count_sketches[:, d, iHashIndex] += (
229
+ (iHashBit * X_gamma[:, [j]]).toarray().ravel()
230
+ )
231
+
232
+ else:
233
+ for j in range(X_gamma.shape[1]):
234
+ for d in range(self.degree):
235
+ iHashIndex = self.indexHash_[d, j]
236
+ iHashBit = self.bitHash_[d, j]
237
+ count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j]
238
+
239
+ # For each same, compute a count sketch of phi(x) using the polynomial
240
+ # multiplication (via FFT) of p count sketches of x.
241
+ count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True)
242
+ count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1)
243
+ data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True))
244
+
245
+ return data_sketch
246
+
247
+
248
+ class RBFSampler(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
249
+ """Approximate a RBF kernel feature map using random Fourier features.
250
+
251
+ It implements a variant of Random Kitchen Sinks.[1]
252
+
253
+ Read more in the :ref:`User Guide <rbf_kernel_approx>`.
254
+
255
+ Parameters
256
+ ----------
257
+ gamma : 'scale' or float, default=1.0
258
+ Parameter of RBF kernel: exp(-gamma * x^2).
259
+ If ``gamma='scale'`` is passed then it uses
260
+ 1 / (n_features * X.var()) as value of gamma.
261
+
262
+ .. versionadded:: 1.2
263
+ The option `"scale"` was added in 1.2.
264
+
265
+ n_components : int, default=100
266
+ Number of Monte Carlo samples per original feature.
267
+ Equals the dimensionality of the computed feature space.
268
+
269
+ random_state : int, RandomState instance or None, default=None
270
+ Pseudo-random number generator to control the generation of the random
271
+ weights and random offset when fitting the training data.
272
+ Pass an int for reproducible output across multiple function calls.
273
+ See :term:`Glossary <random_state>`.
274
+
275
+ Attributes
276
+ ----------
277
+ random_offset_ : ndarray of shape (n_components,), dtype={np.float64, np.float32}
278
+ Random offset used to compute the projection in the `n_components`
279
+ dimensions of the feature space.
280
+
281
+ random_weights_ : ndarray of shape (n_features, n_components),\
282
+ dtype={np.float64, np.float32}
283
+ Random projection directions drawn from the Fourier transform
284
+ of the RBF kernel.
285
+
286
+ n_features_in_ : int
287
+ Number of features seen during :term:`fit`.
288
+
289
+ .. versionadded:: 0.24
290
+
291
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
292
+ Names of features seen during :term:`fit`. Defined only when `X`
293
+ has feature names that are all strings.
294
+
295
+ .. versionadded:: 1.0
296
+
297
+ See Also
298
+ --------
299
+ AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
300
+ Nystroem : Approximate a kernel map using a subset of the training data.
301
+ PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
302
+ SkewedChi2Sampler : Approximate feature map for
303
+ "skewed chi-squared" kernel.
304
+ sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
305
+
306
+ Notes
307
+ -----
308
+ See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
309
+ Benjamin Recht.
310
+
311
+ [1] "Weighted Sums of Random Kitchen Sinks: Replacing
312
+ minimization with randomization in learning" by A. Rahimi and
313
+ Benjamin Recht.
314
+ (https://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
315
+
316
+ Examples
317
+ --------
318
+ >>> from sklearn.kernel_approximation import RBFSampler
319
+ >>> from sklearn.linear_model import SGDClassifier
320
+ >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
321
+ >>> y = [0, 0, 1, 1]
322
+ >>> rbf_feature = RBFSampler(gamma=1, random_state=1)
323
+ >>> X_features = rbf_feature.fit_transform(X)
324
+ >>> clf = SGDClassifier(max_iter=5, tol=1e-3)
325
+ >>> clf.fit(X_features, y)
326
+ SGDClassifier(max_iter=5)
327
+ >>> clf.score(X_features, y)
328
+ 1.0
329
+ """
330
+
331
+ _parameter_constraints: dict = {
332
+ "gamma": [
333
+ StrOptions({"scale"}),
334
+ Interval(Real, 0.0, None, closed="left"),
335
+ ],
336
+ "n_components": [Interval(Integral, 1, None, closed="left")],
337
+ "random_state": ["random_state"],
338
+ }
339
+
340
+ def __init__(self, *, gamma=1.0, n_components=100, random_state=None):
341
+ self.gamma = gamma
342
+ self.n_components = n_components
343
+ self.random_state = random_state
344
+
345
+ @_fit_context(prefer_skip_nested_validation=True)
346
+ def fit(self, X, y=None):
347
+ """Fit the model with X.
348
+
349
+ Samples random projection according to n_features.
350
+
351
+ Parameters
352
+ ----------
353
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
354
+ Training data, where `n_samples` is the number of samples
355
+ and `n_features` is the number of features.
356
+
357
+ y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
358
+ default=None
359
+ Target values (None for unsupervised transformations).
360
+
361
+ Returns
362
+ -------
363
+ self : object
364
+ Returns the instance itself.
365
+ """
366
+ X = self._validate_data(X, accept_sparse="csr")
367
+ random_state = check_random_state(self.random_state)
368
+ n_features = X.shape[1]
369
+ sparse = sp.issparse(X)
370
+ if self.gamma == "scale":
371
+ # var = E[X^2] - E[X]^2 if sparse
372
+ X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var()
373
+ self._gamma = 1.0 / (n_features * X_var) if X_var != 0 else 1.0
374
+ else:
375
+ self._gamma = self.gamma
376
+ self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal(
377
+ size=(n_features, self.n_components)
378
+ )
379
+
380
+ self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
381
+
382
+ if X.dtype == np.float32:
383
+ # Setting the data type of the fitted attribute will ensure the
384
+ # output data type during `transform`.
385
+ self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
386
+ self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
387
+
388
+ self._n_features_out = self.n_components
389
+ return self
390
+
391
+ def transform(self, X):
392
+ """Apply the approximate feature map to X.
393
+
394
+ Parameters
395
+ ----------
396
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
397
+ New data, where `n_samples` is the number of samples
398
+ and `n_features` is the number of features.
399
+
400
+ Returns
401
+ -------
402
+ X_new : array-like, shape (n_samples, n_components)
403
+ Returns the instance itself.
404
+ """
405
+ check_is_fitted(self)
406
+
407
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
408
+ projection = safe_sparse_dot(X, self.random_weights_)
409
+ projection += self.random_offset_
410
+ np.cos(projection, projection)
411
+ projection *= (2.0 / self.n_components) ** 0.5
412
+ return projection
413
+
414
+ def _more_tags(self):
415
+ return {"preserves_dtype": [np.float64, np.float32]}
416
+
417
+
418
+ class SkewedChi2Sampler(
419
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
420
+ ):
421
+ """Approximate feature map for "skewed chi-squared" kernel.
422
+
423
+ Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
424
+
425
+ Parameters
426
+ ----------
427
+ skewedness : float, default=1.0
428
+ "skewedness" parameter of the kernel. Needs to be cross-validated.
429
+
430
+ n_components : int, default=100
431
+ Number of Monte Carlo samples per original feature.
432
+ Equals the dimensionality of the computed feature space.
433
+
434
+ random_state : int, RandomState instance or None, default=None
435
+ Pseudo-random number generator to control the generation of the random
436
+ weights and random offset when fitting the training data.
437
+ Pass an int for reproducible output across multiple function calls.
438
+ See :term:`Glossary <random_state>`.
439
+
440
+ Attributes
441
+ ----------
442
+ random_weights_ : ndarray of shape (n_features, n_components)
443
+ Weight array, sampled from a secant hyperbolic distribution, which will
444
+ be used to linearly transform the log of the data.
445
+
446
+ random_offset_ : ndarray of shape (n_features, n_components)
447
+ Bias term, which will be added to the data. It is uniformly distributed
448
+ between 0 and 2*pi.
449
+
450
+ n_features_in_ : int
451
+ Number of features seen during :term:`fit`.
452
+
453
+ .. versionadded:: 0.24
454
+
455
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
456
+ Names of features seen during :term:`fit`. Defined only when `X`
457
+ has feature names that are all strings.
458
+
459
+ .. versionadded:: 1.0
460
+
461
+ See Also
462
+ --------
463
+ AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
464
+ Nystroem : Approximate a kernel map using a subset of the training data.
465
+ RBFSampler : Approximate a RBF kernel feature map using random Fourier
466
+ features.
467
+ SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
468
+ sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
469
+ sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
470
+
471
+ References
472
+ ----------
473
+ See "Random Fourier Approximations for Skewed Multiplicative Histogram
474
+ Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
475
+
476
+ Examples
477
+ --------
478
+ >>> from sklearn.kernel_approximation import SkewedChi2Sampler
479
+ >>> from sklearn.linear_model import SGDClassifier
480
+ >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
481
+ >>> y = [0, 0, 1, 1]
482
+ >>> chi2_feature = SkewedChi2Sampler(skewedness=.01,
483
+ ... n_components=10,
484
+ ... random_state=0)
485
+ >>> X_features = chi2_feature.fit_transform(X, y)
486
+ >>> clf = SGDClassifier(max_iter=10, tol=1e-3)
487
+ >>> clf.fit(X_features, y)
488
+ SGDClassifier(max_iter=10)
489
+ >>> clf.score(X_features, y)
490
+ 1.0
491
+ """
492
+
493
+ _parameter_constraints: dict = {
494
+ "skewedness": [Interval(Real, None, None, closed="neither")],
495
+ "n_components": [Interval(Integral, 1, None, closed="left")],
496
+ "random_state": ["random_state"],
497
+ }
498
+
499
+ def __init__(self, *, skewedness=1.0, n_components=100, random_state=None):
500
+ self.skewedness = skewedness
501
+ self.n_components = n_components
502
+ self.random_state = random_state
503
+
504
+ @_fit_context(prefer_skip_nested_validation=True)
505
+ def fit(self, X, y=None):
506
+ """Fit the model with X.
507
+
508
+ Samples random projection according to n_features.
509
+
510
+ Parameters
511
+ ----------
512
+ X : array-like, shape (n_samples, n_features)
513
+ Training data, where `n_samples` is the number of samples
514
+ and `n_features` is the number of features.
515
+
516
+ y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
517
+ default=None
518
+ Target values (None for unsupervised transformations).
519
+
520
+ Returns
521
+ -------
522
+ self : object
523
+ Returns the instance itself.
524
+ """
525
+ X = self._validate_data(X)
526
+ random_state = check_random_state(self.random_state)
527
+ n_features = X.shape[1]
528
+ uniform = random_state.uniform(size=(n_features, self.n_components))
529
+ # transform by inverse CDF of sech
530
+ self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform))
531
+ self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
532
+
533
+ if X.dtype == np.float32:
534
+ # Setting the data type of the fitted attribute will ensure the
535
+ # output data type during `transform`.
536
+ self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
537
+ self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
538
+
539
+ self._n_features_out = self.n_components
540
+ return self
541
+
542
+ def transform(self, X):
543
+ """Apply the approximate feature map to X.
544
+
545
+ Parameters
546
+ ----------
547
+ X : array-like, shape (n_samples, n_features)
548
+ New data, where `n_samples` is the number of samples
549
+ and `n_features` is the number of features. All values of X must be
550
+ strictly greater than "-skewedness".
551
+
552
+ Returns
553
+ -------
554
+ X_new : array-like, shape (n_samples, n_components)
555
+ Returns the instance itself.
556
+ """
557
+ check_is_fitted(self)
558
+ X = self._validate_data(
559
+ X, copy=True, dtype=[np.float64, np.float32], reset=False
560
+ )
561
+ if (X <= -self.skewedness).any():
562
+ raise ValueError("X may not contain entries smaller than -skewedness.")
563
+
564
+ X += self.skewedness
565
+ np.log(X, X)
566
+ projection = safe_sparse_dot(X, self.random_weights_)
567
+ projection += self.random_offset_
568
+ np.cos(projection, projection)
569
+ projection *= np.sqrt(2.0) / np.sqrt(self.n_components)
570
+ return projection
571
+
572
+ def _more_tags(self):
573
+ return {"preserves_dtype": [np.float64, np.float32]}
574
+
575
+
576
+ class AdditiveChi2Sampler(TransformerMixin, BaseEstimator):
577
+ """Approximate feature map for additive chi2 kernel.
578
+
579
+ Uses sampling the fourier transform of the kernel characteristic
580
+ at regular intervals.
581
+
582
+ Since the kernel that is to be approximated is additive, the components of
583
+ the input vectors can be treated separately. Each entry in the original
584
+ space is transformed into 2*sample_steps-1 features, where sample_steps is
585
+ a parameter of the method. Typical values of sample_steps include 1, 2 and
586
+ 3.
587
+
588
+ Optimal choices for the sampling interval for certain data ranges can be
589
+ computed (see the reference). The default values should be reasonable.
590
+
591
+ Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
592
+
593
+ Parameters
594
+ ----------
595
+ sample_steps : int, default=2
596
+ Gives the number of (complex) sampling points.
597
+
598
+ sample_interval : float, default=None
599
+ Sampling interval. Must be specified when sample_steps not in {1,2,3}.
600
+
601
+ Attributes
602
+ ----------
603
+ sample_interval_ : float
604
+ Stored sampling interval. Specified as a parameter if `sample_steps`
605
+ not in {1,2,3}.
606
+
607
+ .. deprecated:: 1.3
608
+ `sample_interval_` serves internal purposes only and will be removed in 1.5.
609
+
610
+ n_features_in_ : int
611
+ Number of features seen during :term:`fit`.
612
+
613
+ .. versionadded:: 0.24
614
+
615
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
616
+ Names of features seen during :term:`fit`. Defined only when `X`
617
+ has feature names that are all strings.
618
+
619
+ .. versionadded:: 1.0
620
+
621
+ See Also
622
+ --------
623
+ SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
624
+ the chi squared kernel.
625
+
626
+ sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
627
+
628
+ sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
629
+ squared kernel.
630
+
631
+ Notes
632
+ -----
633
+ This estimator approximates a slightly different version of the additive
634
+ chi squared kernel then ``metric.additive_chi2`` computes.
635
+
636
+ This estimator is stateless and does not need to be fitted. However, we
637
+ recommend to call :meth:`fit_transform` instead of :meth:`transform`, as
638
+ parameter validation is only performed in :meth:`fit`.
639
+
640
+ References
641
+ ----------
642
+ See `"Efficient additive kernels via explicit feature maps"
643
+ <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
644
+ A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
645
+ 2011
646
+
647
+ Examples
648
+ --------
649
+ >>> from sklearn.datasets import load_digits
650
+ >>> from sklearn.linear_model import SGDClassifier
651
+ >>> from sklearn.kernel_approximation import AdditiveChi2Sampler
652
+ >>> X, y = load_digits(return_X_y=True)
653
+ >>> chi2sampler = AdditiveChi2Sampler(sample_steps=2)
654
+ >>> X_transformed = chi2sampler.fit_transform(X, y)
655
+ >>> clf = SGDClassifier(max_iter=5, random_state=0, tol=1e-3)
656
+ >>> clf.fit(X_transformed, y)
657
+ SGDClassifier(max_iter=5, random_state=0)
658
+ >>> clf.score(X_transformed, y)
659
+ 0.9499...
660
+ """
661
+
662
+ _parameter_constraints: dict = {
663
+ "sample_steps": [Interval(Integral, 1, None, closed="left")],
664
+ "sample_interval": [Interval(Real, 0, None, closed="left"), None],
665
+ }
666
+
667
+ def __init__(self, *, sample_steps=2, sample_interval=None):
668
+ self.sample_steps = sample_steps
669
+ self.sample_interval = sample_interval
670
+
671
+ @_fit_context(prefer_skip_nested_validation=True)
672
+ def fit(self, X, y=None):
673
+ """Only validates estimator's parameters.
674
+
675
+ This method allows to: (i) validate the estimator's parameters and
676
+ (ii) be consistent with the scikit-learn transformer API.
677
+
678
+ Parameters
679
+ ----------
680
+ X : array-like, shape (n_samples, n_features)
681
+ Training data, where `n_samples` is the number of samples
682
+ and `n_features` is the number of features.
683
+
684
+ y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
685
+ default=None
686
+ Target values (None for unsupervised transformations).
687
+
688
+ Returns
689
+ -------
690
+ self : object
691
+ Returns the transformer.
692
+ """
693
+ X = self._validate_data(X, accept_sparse="csr")
694
+ check_non_negative(X, "X in AdditiveChi2Sampler.fit")
695
+
696
+ # TODO(1.5): remove the setting of _sample_interval from fit
697
+ if self.sample_interval is None:
698
+ # See figure 2 c) of "Efficient additive kernels via explicit feature maps"
699
+ # <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>
700
+ # A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
701
+ # 2011
702
+ if self.sample_steps == 1:
703
+ self._sample_interval = 0.8
704
+ elif self.sample_steps == 2:
705
+ self._sample_interval = 0.5
706
+ elif self.sample_steps == 3:
707
+ self._sample_interval = 0.4
708
+ else:
709
+ raise ValueError(
710
+ "If sample_steps is not in [1, 2, 3],"
711
+ " you need to provide sample_interval"
712
+ )
713
+ else:
714
+ self._sample_interval = self.sample_interval
715
+
716
+ return self
717
+
718
+ # TODO(1.5): remove
719
+ @deprecated( # type: ignore
720
+ "The ``sample_interval_`` attribute was deprecated in version 1.3 and "
721
+ "will be removed 1.5."
722
+ )
723
+ @property
724
+ def sample_interval_(self):
725
+ return self._sample_interval
726
+
727
+ def transform(self, X):
728
+ """Apply approximate feature map to X.
729
+
730
+ Parameters
731
+ ----------
732
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
733
+ Training data, where `n_samples` is the number of samples
734
+ and `n_features` is the number of features.
735
+
736
+ Returns
737
+ -------
738
+ X_new : {ndarray, sparse matrix}, \
739
+ shape = (n_samples, n_features * (2*sample_steps - 1))
740
+ Whether the return value is an array or sparse matrix depends on
741
+ the type of the input X.
742
+ """
743
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
744
+ check_non_negative(X, "X in AdditiveChi2Sampler.transform")
745
+ sparse = sp.issparse(X)
746
+
747
+ if hasattr(self, "_sample_interval"):
748
+ # TODO(1.5): remove this branch
749
+ sample_interval = self._sample_interval
750
+
751
+ else:
752
+ if self.sample_interval is None:
753
+ # See figure 2 c) of "Efficient additive kernels via explicit feature maps" # noqa
754
+ # <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>
755
+ # A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, # noqa
756
+ # 2011
757
+ if self.sample_steps == 1:
758
+ sample_interval = 0.8
759
+ elif self.sample_steps == 2:
760
+ sample_interval = 0.5
761
+ elif self.sample_steps == 3:
762
+ sample_interval = 0.4
763
+ else:
764
+ raise ValueError(
765
+ "If sample_steps is not in [1, 2, 3],"
766
+ " you need to provide sample_interval"
767
+ )
768
+ else:
769
+ sample_interval = self.sample_interval
770
+
771
+ # zeroth component
772
+ # 1/cosh = sech
773
+ # cosh(0) = 1.0
774
+ transf = self._transform_sparse if sparse else self._transform_dense
775
+ return transf(X, self.sample_steps, sample_interval)
776
+
777
+ def get_feature_names_out(self, input_features=None):
778
+ """Get output feature names for transformation.
779
+
780
+ Parameters
781
+ ----------
782
+ input_features : array-like of str or None, default=None
783
+ Only used to validate feature names with the names seen in :meth:`fit`.
784
+
785
+ Returns
786
+ -------
787
+ feature_names_out : ndarray of str objects
788
+ Transformed feature names.
789
+ """
790
+ check_is_fitted(self, "n_features_in_")
791
+ input_features = _check_feature_names_in(
792
+ self, input_features, generate_names=True
793
+ )
794
+ est_name = self.__class__.__name__.lower()
795
+
796
+ names_list = [f"{est_name}_{name}_sqrt" for name in input_features]
797
+
798
+ for j in range(1, self.sample_steps):
799
+ cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features]
800
+ sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features]
801
+ names_list.extend(cos_names + sin_names)
802
+
803
+ return np.asarray(names_list, dtype=object)
804
+
805
+ @staticmethod
806
+ def _transform_dense(X, sample_steps, sample_interval):
807
+ non_zero = X != 0.0
808
+ X_nz = X[non_zero]
809
+
810
+ X_step = np.zeros_like(X)
811
+ X_step[non_zero] = np.sqrt(X_nz * sample_interval)
812
+
813
+ X_new = [X_step]
814
+
815
+ log_step_nz = sample_interval * np.log(X_nz)
816
+ step_nz = 2 * X_nz * sample_interval
817
+
818
+ for j in range(1, sample_steps):
819
+ factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval))
820
+
821
+ X_step = np.zeros_like(X)
822
+ X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
823
+ X_new.append(X_step)
824
+
825
+ X_step = np.zeros_like(X)
826
+ X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
827
+ X_new.append(X_step)
828
+
829
+ return np.hstack(X_new)
830
+
831
+ @staticmethod
832
+ def _transform_sparse(X, sample_steps, sample_interval):
833
+ indices = X.indices.copy()
834
+ indptr = X.indptr.copy()
835
+
836
+ data_step = np.sqrt(X.data * sample_interval)
837
+ X_step = sp.csr_matrix(
838
+ (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
839
+ )
840
+ X_new = [X_step]
841
+
842
+ log_step_nz = sample_interval * np.log(X.data)
843
+ step_nz = 2 * X.data * sample_interval
844
+
845
+ for j in range(1, sample_steps):
846
+ factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval))
847
+
848
+ data_step = factor_nz * np.cos(j * log_step_nz)
849
+ X_step = sp.csr_matrix(
850
+ (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
851
+ )
852
+ X_new.append(X_step)
853
+
854
+ data_step = factor_nz * np.sin(j * log_step_nz)
855
+ X_step = sp.csr_matrix(
856
+ (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
857
+ )
858
+ X_new.append(X_step)
859
+
860
+ return sp.hstack(X_new)
861
+
862
+ def _more_tags(self):
863
+ return {"stateless": True, "requires_positive_X": True}
864
+
865
+
866
+ class Nystroem(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
867
+ """Approximate a kernel map using a subset of the training data.
868
+
869
+ Constructs an approximate feature map for an arbitrary kernel
870
+ using a subset of the data as basis.
871
+
872
+ Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
873
+
874
+ .. versionadded:: 0.13
875
+
876
+ Parameters
877
+ ----------
878
+ kernel : str or callable, default='rbf'
879
+ Kernel map to be approximated. A callable should accept two arguments
880
+ and the keyword arguments passed to this object as `kernel_params`, and
881
+ should return a floating point number.
882
+
883
+ gamma : float, default=None
884
+ Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
885
+ and sigmoid kernels. Interpretation of the default value is left to
886
+ the kernel; see the documentation for sklearn.metrics.pairwise.
887
+ Ignored by other kernels.
888
+
889
+ coef0 : float, default=None
890
+ Zero coefficient for polynomial and sigmoid kernels.
891
+ Ignored by other kernels.
892
+
893
+ degree : float, default=None
894
+ Degree of the polynomial kernel. Ignored by other kernels.
895
+
896
+ kernel_params : dict, default=None
897
+ Additional parameters (keyword arguments) for kernel function passed
898
+ as callable object.
899
+
900
+ n_components : int, default=100
901
+ Number of features to construct.
902
+ How many data points will be used to construct the mapping.
903
+
904
+ random_state : int, RandomState instance or None, default=None
905
+ Pseudo-random number generator to control the uniform sampling without
906
+ replacement of `n_components` of the training data to construct the
907
+ basis kernel.
908
+ Pass an int for reproducible output across multiple function calls.
909
+ See :term:`Glossary <random_state>`.
910
+
911
+ n_jobs : int, default=None
912
+ The number of jobs to use for the computation. This works by breaking
913
+ down the kernel matrix into `n_jobs` even slices and computing them in
914
+ parallel.
915
+
916
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
917
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
918
+ for more details.
919
+
920
+ .. versionadded:: 0.24
921
+
922
+ Attributes
923
+ ----------
924
+ components_ : ndarray of shape (n_components, n_features)
925
+ Subset of training points used to construct the feature map.
926
+
927
+ component_indices_ : ndarray of shape (n_components)
928
+ Indices of ``components_`` in the training set.
929
+
930
+ normalization_ : ndarray of shape (n_components, n_components)
931
+ Normalization matrix needed for embedding.
932
+ Square root of the kernel matrix on ``components_``.
933
+
934
+ n_features_in_ : int
935
+ Number of features seen during :term:`fit`.
936
+
937
+ .. versionadded:: 0.24
938
+
939
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
940
+ Names of features seen during :term:`fit`. Defined only when `X`
941
+ has feature names that are all strings.
942
+
943
+ .. versionadded:: 1.0
944
+
945
+ See Also
946
+ --------
947
+ AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
948
+ PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
949
+ RBFSampler : Approximate a RBF kernel feature map using random Fourier
950
+ features.
951
+ SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
952
+ sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
953
+
954
+ References
955
+ ----------
956
+ * Williams, C.K.I. and Seeger, M.
957
+ "Using the Nystroem method to speed up kernel machines",
958
+ Advances in neural information processing systems 2001
959
+
960
+ * T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
961
+ "Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
962
+ Comparison",
963
+ Advances in Neural Information Processing Systems 2012
964
+
965
+ Examples
966
+ --------
967
+ >>> from sklearn import datasets, svm
968
+ >>> from sklearn.kernel_approximation import Nystroem
969
+ >>> X, y = datasets.load_digits(n_class=9, return_X_y=True)
970
+ >>> data = X / 16.
971
+ >>> clf = svm.LinearSVC(dual="auto")
972
+ >>> feature_map_nystroem = Nystroem(gamma=.2,
973
+ ... random_state=1,
974
+ ... n_components=300)
975
+ >>> data_transformed = feature_map_nystroem.fit_transform(data)
976
+ >>> clf.fit(data_transformed, y)
977
+ LinearSVC(dual='auto')
978
+ >>> clf.score(data_transformed, y)
979
+ 0.9987...
980
+ """
981
+
982
+ _parameter_constraints: dict = {
983
+ "kernel": [
984
+ StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}),
985
+ callable,
986
+ ],
987
+ "gamma": [Interval(Real, 0, None, closed="left"), None],
988
+ "coef0": [Interval(Real, None, None, closed="neither"), None],
989
+ "degree": [Interval(Real, 1, None, closed="left"), None],
990
+ "kernel_params": [dict, None],
991
+ "n_components": [Interval(Integral, 1, None, closed="left")],
992
+ "random_state": ["random_state"],
993
+ "n_jobs": [Integral, None],
994
+ }
995
+
996
+ def __init__(
997
+ self,
998
+ kernel="rbf",
999
+ *,
1000
+ gamma=None,
1001
+ coef0=None,
1002
+ degree=None,
1003
+ kernel_params=None,
1004
+ n_components=100,
1005
+ random_state=None,
1006
+ n_jobs=None,
1007
+ ):
1008
+ self.kernel = kernel
1009
+ self.gamma = gamma
1010
+ self.coef0 = coef0
1011
+ self.degree = degree
1012
+ self.kernel_params = kernel_params
1013
+ self.n_components = n_components
1014
+ self.random_state = random_state
1015
+ self.n_jobs = n_jobs
1016
+
1017
+ @_fit_context(prefer_skip_nested_validation=True)
1018
+ def fit(self, X, y=None):
1019
+ """Fit estimator to data.
1020
+
1021
+ Samples a subset of training points, computes kernel
1022
+ on these and computes normalization matrix.
1023
+
1024
+ Parameters
1025
+ ----------
1026
+ X : array-like, shape (n_samples, n_features)
1027
+ Training data, where `n_samples` is the number of samples
1028
+ and `n_features` is the number of features.
1029
+
1030
+ y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
1031
+ default=None
1032
+ Target values (None for unsupervised transformations).
1033
+
1034
+ Returns
1035
+ -------
1036
+ self : object
1037
+ Returns the instance itself.
1038
+ """
1039
+ X = self._validate_data(X, accept_sparse="csr")
1040
+ rnd = check_random_state(self.random_state)
1041
+ n_samples = X.shape[0]
1042
+
1043
+ # get basis vectors
1044
+ if self.n_components > n_samples:
1045
+ # XXX should we just bail?
1046
+ n_components = n_samples
1047
+ warnings.warn(
1048
+ "n_components > n_samples. This is not possible.\n"
1049
+ "n_components was set to n_samples, which results"
1050
+ " in inefficient evaluation of the full kernel."
1051
+ )
1052
+
1053
+ else:
1054
+ n_components = self.n_components
1055
+ n_components = min(n_samples, n_components)
1056
+ inds = rnd.permutation(n_samples)
1057
+ basis_inds = inds[:n_components]
1058
+ basis = X[basis_inds]
1059
+
1060
+ basis_kernel = pairwise_kernels(
1061
+ basis,
1062
+ metric=self.kernel,
1063
+ filter_params=True,
1064
+ n_jobs=self.n_jobs,
1065
+ **self._get_kernel_params(),
1066
+ )
1067
+
1068
+ # sqrt of kernel matrix on basis vectors
1069
+ U, S, V = svd(basis_kernel)
1070
+ S = np.maximum(S, 1e-12)
1071
+ self.normalization_ = np.dot(U / np.sqrt(S), V)
1072
+ self.components_ = basis
1073
+ self.component_indices_ = basis_inds
1074
+ self._n_features_out = n_components
1075
+ return self
1076
+
1077
+ def transform(self, X):
1078
+ """Apply feature map to X.
1079
+
1080
+ Computes an approximate feature map using the kernel
1081
+ between some training points and X.
1082
+
1083
+ Parameters
1084
+ ----------
1085
+ X : array-like of shape (n_samples, n_features)
1086
+ Data to transform.
1087
+
1088
+ Returns
1089
+ -------
1090
+ X_transformed : ndarray of shape (n_samples, n_components)
1091
+ Transformed data.
1092
+ """
1093
+ check_is_fitted(self)
1094
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
1095
+
1096
+ kernel_params = self._get_kernel_params()
1097
+ embedded = pairwise_kernels(
1098
+ X,
1099
+ self.components_,
1100
+ metric=self.kernel,
1101
+ filter_params=True,
1102
+ n_jobs=self.n_jobs,
1103
+ **kernel_params,
1104
+ )
1105
+ return np.dot(embedded, self.normalization_.T)
1106
+
1107
+ def _get_kernel_params(self):
1108
+ params = self.kernel_params
1109
+ if params is None:
1110
+ params = {}
1111
+ if not callable(self.kernel) and self.kernel != "precomputed":
1112
+ for param in KERNEL_PARAMS[self.kernel]:
1113
+ if getattr(self, param) is not None:
1114
+ params[param] = getattr(self, param)
1115
+ else:
1116
+ if (
1117
+ self.gamma is not None
1118
+ or self.coef0 is not None
1119
+ or self.degree is not None
1120
+ ):
1121
+ raise ValueError(
1122
+ "Don't pass gamma, coef0 or degree to "
1123
+ "Nystroem if using a callable "
1124
+ "or precomputed kernel"
1125
+ )
1126
+
1127
+ return params
1128
+
1129
+ def _more_tags(self):
1130
+ return {
1131
+ "_xfail_checks": {
1132
+ "check_transformer_preserve_dtypes": (
1133
+ "dtypes are preserved but not at a close enough precision"
1134
+ )
1135
+ },
1136
+ "preserves_dtype": [np.float64, np.float32],
1137
+ }
llmeval-env/lib/python3.10/site-packages/sklearn/kernel_ridge.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
2
+
3
+ # Authors: Mathieu Blondel <[email protected]>
4
+ # Jan Hendrik Metzen <[email protected]>
5
+ # License: BSD 3 clause
6
+ from numbers import Real
7
+
8
+ import numpy as np
9
+
10
+ from .base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context
11
+ from .linear_model._ridge import _solve_cholesky_kernel
12
+ from .metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels
13
+ from .utils._param_validation import Interval, StrOptions
14
+ from .utils.validation import _check_sample_weight, check_is_fitted
15
+
16
+
17
+ class KernelRidge(MultiOutputMixin, RegressorMixin, BaseEstimator):
18
+ """Kernel ridge regression.
19
+
20
+ Kernel ridge regression (KRR) combines ridge regression (linear least
21
+ squares with l2-norm regularization) with the kernel trick. It thus
22
+ learns a linear function in the space induced by the respective kernel and
23
+ the data. For non-linear kernels, this corresponds to a non-linear
24
+ function in the original space.
25
+
26
+ The form of the model learned by KRR is identical to support vector
27
+ regression (SVR). However, different loss functions are used: KRR uses
28
+ squared error loss while support vector regression uses epsilon-insensitive
29
+ loss, both combined with l2 regularization. In contrast to SVR, fitting a
30
+ KRR model can be done in closed-form and is typically faster for
31
+ medium-sized datasets. On the other hand, the learned model is non-sparse
32
+ and thus slower than SVR, which learns a sparse model for epsilon > 0, at
33
+ prediction-time.
34
+
35
+ This estimator has built-in support for multi-variate regression
36
+ (i.e., when y is a 2d-array of shape [n_samples, n_targets]).
37
+
38
+ Read more in the :ref:`User Guide <kernel_ridge>`.
39
+
40
+ Parameters
41
+ ----------
42
+ alpha : float or array-like of shape (n_targets,), default=1.0
43
+ Regularization strength; must be a positive float. Regularization
44
+ improves the conditioning of the problem and reduces the variance of
45
+ the estimates. Larger values specify stronger regularization.
46
+ Alpha corresponds to ``1 / (2C)`` in other linear models such as
47
+ :class:`~sklearn.linear_model.LogisticRegression` or
48
+ :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
49
+ assumed to be specific to the targets. Hence they must correspond in
50
+ number. See :ref:`ridge_regression` for formula.
51
+
52
+ kernel : str or callable, default="linear"
53
+ Kernel mapping used internally. This parameter is directly passed to
54
+ :class:`~sklearn.metrics.pairwise.pairwise_kernels`.
55
+ If `kernel` is a string, it must be one of the metrics
56
+ in `pairwise.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed".
57
+ If `kernel` is "precomputed", X is assumed to be a kernel matrix.
58
+ Alternatively, if `kernel` is a callable function, it is called on
59
+ each pair of instances (rows) and the resulting value recorded. The
60
+ callable should take two rows from X as input and return the
61
+ corresponding kernel value as a single number. This means that
62
+ callables from :mod:`sklearn.metrics.pairwise` are not allowed, as
63
+ they operate on matrices, not single samples. Use the string
64
+ identifying the kernel instead.
65
+
66
+ gamma : float, default=None
67
+ Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
68
+ and sigmoid kernels. Interpretation of the default value is left to
69
+ the kernel; see the documentation for sklearn.metrics.pairwise.
70
+ Ignored by other kernels.
71
+
72
+ degree : float, default=3
73
+ Degree of the polynomial kernel. Ignored by other kernels.
74
+
75
+ coef0 : float, default=1
76
+ Zero coefficient for polynomial and sigmoid kernels.
77
+ Ignored by other kernels.
78
+
79
+ kernel_params : dict, default=None
80
+ Additional parameters (keyword arguments) for kernel function passed
81
+ as callable object.
82
+
83
+ Attributes
84
+ ----------
85
+ dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets)
86
+ Representation of weight vector(s) in kernel space
87
+
88
+ X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features)
89
+ Training data, which is also required for prediction. If
90
+ kernel == "precomputed" this is instead the precomputed
91
+ training matrix, of shape (n_samples, n_samples).
92
+
93
+ n_features_in_ : int
94
+ Number of features seen during :term:`fit`.
95
+
96
+ .. versionadded:: 0.24
97
+
98
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
99
+ Names of features seen during :term:`fit`. Defined only when `X`
100
+ has feature names that are all strings.
101
+
102
+ .. versionadded:: 1.0
103
+
104
+ See Also
105
+ --------
106
+ sklearn.gaussian_process.GaussianProcessRegressor : Gaussian
107
+ Process regressor providing automatic kernel hyperparameters
108
+ tuning and predictions uncertainty.
109
+ sklearn.linear_model.Ridge : Linear ridge regression.
110
+ sklearn.linear_model.RidgeCV : Ridge regression with built-in
111
+ cross-validation.
112
+ sklearn.svm.SVR : Support Vector Regression accepting a large variety
113
+ of kernels.
114
+
115
+ References
116
+ ----------
117
+ * Kevin P. Murphy
118
+ "Machine Learning: A Probabilistic Perspective", The MIT Press
119
+ chapter 14.4.3, pp. 492-493
120
+
121
+ Examples
122
+ --------
123
+ >>> from sklearn.kernel_ridge import KernelRidge
124
+ >>> import numpy as np
125
+ >>> n_samples, n_features = 10, 5
126
+ >>> rng = np.random.RandomState(0)
127
+ >>> y = rng.randn(n_samples)
128
+ >>> X = rng.randn(n_samples, n_features)
129
+ >>> krr = KernelRidge(alpha=1.0)
130
+ >>> krr.fit(X, y)
131
+ KernelRidge(alpha=1.0)
132
+ """
133
+
134
+ _parameter_constraints: dict = {
135
+ "alpha": [Interval(Real, 0, None, closed="left"), "array-like"],
136
+ "kernel": [
137
+ StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}),
138
+ callable,
139
+ ],
140
+ "gamma": [Interval(Real, 0, None, closed="left"), None],
141
+ "degree": [Interval(Real, 0, None, closed="left")],
142
+ "coef0": [Interval(Real, None, None, closed="neither")],
143
+ "kernel_params": [dict, None],
144
+ }
145
+
146
+ def __init__(
147
+ self,
148
+ alpha=1,
149
+ *,
150
+ kernel="linear",
151
+ gamma=None,
152
+ degree=3,
153
+ coef0=1,
154
+ kernel_params=None,
155
+ ):
156
+ self.alpha = alpha
157
+ self.kernel = kernel
158
+ self.gamma = gamma
159
+ self.degree = degree
160
+ self.coef0 = coef0
161
+ self.kernel_params = kernel_params
162
+
163
+ def _get_kernel(self, X, Y=None):
164
+ if callable(self.kernel):
165
+ params = self.kernel_params or {}
166
+ else:
167
+ params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0}
168
+ return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params)
169
+
170
+ def _more_tags(self):
171
+ return {"pairwise": self.kernel == "precomputed"}
172
+
173
+ @_fit_context(prefer_skip_nested_validation=True)
174
+ def fit(self, X, y, sample_weight=None):
175
+ """Fit Kernel Ridge regression model.
176
+
177
+ Parameters
178
+ ----------
179
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
180
+ Training data. If kernel == "precomputed" this is instead
181
+ a precomputed kernel matrix, of shape (n_samples, n_samples).
182
+
183
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
184
+ Target values.
185
+
186
+ sample_weight : float or array-like of shape (n_samples,), default=None
187
+ Individual weights for each sample, ignored if None is passed.
188
+
189
+ Returns
190
+ -------
191
+ self : object
192
+ Returns the instance itself.
193
+ """
194
+ # Convert data
195
+ X, y = self._validate_data(
196
+ X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True
197
+ )
198
+ if sample_weight is not None and not isinstance(sample_weight, float):
199
+ sample_weight = _check_sample_weight(sample_weight, X)
200
+
201
+ K = self._get_kernel(X)
202
+ alpha = np.atleast_1d(self.alpha)
203
+
204
+ ravel = False
205
+ if len(y.shape) == 1:
206
+ y = y.reshape(-1, 1)
207
+ ravel = True
208
+
209
+ copy = self.kernel == "precomputed"
210
+ self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy)
211
+ if ravel:
212
+ self.dual_coef_ = self.dual_coef_.ravel()
213
+
214
+ self.X_fit_ = X
215
+
216
+ return self
217
+
218
+ def predict(self, X):
219
+ """Predict using the kernel ridge model.
220
+
221
+ Parameters
222
+ ----------
223
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
224
+ Samples. If kernel == "precomputed" this is instead a
225
+ precomputed kernel matrix, shape = [n_samples,
226
+ n_samples_fitted], where n_samples_fitted is the number of
227
+ samples used in the fitting for this estimator.
228
+
229
+ Returns
230
+ -------
231
+ C : ndarray of shape (n_samples,) or (n_samples, n_targets)
232
+ Returns predicted values.
233
+ """
234
+ check_is_fitted(self)
235
+ X = self._validate_data(X, accept_sparse=("csr", "csc"), reset=False)
236
+ K = self._get_kernel(X, self.X_fit_)
237
+ return np.dot(K, self.dual_coef_)
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__init__.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.linear_model` module implements a variety of linear models.
3
+ """
4
+
5
+ # See http://scikit-learn.sourceforge.net/modules/sgd.html and
6
+ # http://scikit-learn.sourceforge.net/modules/linear_model.html for
7
+ # complete documentation.
8
+
9
+ from ._base import LinearRegression
10
+ from ._bayes import ARDRegression, BayesianRidge
11
+ from ._coordinate_descent import (
12
+ ElasticNet,
13
+ ElasticNetCV,
14
+ Lasso,
15
+ LassoCV,
16
+ MultiTaskElasticNet,
17
+ MultiTaskElasticNetCV,
18
+ MultiTaskLasso,
19
+ MultiTaskLassoCV,
20
+ enet_path,
21
+ lasso_path,
22
+ )
23
+ from ._glm import GammaRegressor, PoissonRegressor, TweedieRegressor
24
+ from ._huber import HuberRegressor
25
+ from ._least_angle import (
26
+ Lars,
27
+ LarsCV,
28
+ LassoLars,
29
+ LassoLarsCV,
30
+ LassoLarsIC,
31
+ lars_path,
32
+ lars_path_gram,
33
+ )
34
+ from ._logistic import LogisticRegression, LogisticRegressionCV
35
+ from ._omp import (
36
+ OrthogonalMatchingPursuit,
37
+ OrthogonalMatchingPursuitCV,
38
+ orthogonal_mp,
39
+ orthogonal_mp_gram,
40
+ )
41
+ from ._passive_aggressive import PassiveAggressiveClassifier, PassiveAggressiveRegressor
42
+ from ._perceptron import Perceptron
43
+ from ._quantile import QuantileRegressor
44
+ from ._ransac import RANSACRegressor
45
+ from ._ridge import Ridge, RidgeClassifier, RidgeClassifierCV, RidgeCV, ridge_regression
46
+ from ._sgd_fast import Hinge, Huber, Log, ModifiedHuber, SquaredLoss
47
+ from ._stochastic_gradient import SGDClassifier, SGDOneClassSVM, SGDRegressor
48
+ from ._theil_sen import TheilSenRegressor
49
+
50
+ __all__ = [
51
+ "ARDRegression",
52
+ "BayesianRidge",
53
+ "ElasticNet",
54
+ "ElasticNetCV",
55
+ "Hinge",
56
+ "Huber",
57
+ "HuberRegressor",
58
+ "Lars",
59
+ "LarsCV",
60
+ "Lasso",
61
+ "LassoCV",
62
+ "LassoLars",
63
+ "LassoLarsCV",
64
+ "LassoLarsIC",
65
+ "LinearRegression",
66
+ "Log",
67
+ "LogisticRegression",
68
+ "LogisticRegressionCV",
69
+ "ModifiedHuber",
70
+ "MultiTaskElasticNet",
71
+ "MultiTaskElasticNetCV",
72
+ "MultiTaskLasso",
73
+ "MultiTaskLassoCV",
74
+ "OrthogonalMatchingPursuit",
75
+ "OrthogonalMatchingPursuitCV",
76
+ "PassiveAggressiveClassifier",
77
+ "PassiveAggressiveRegressor",
78
+ "Perceptron",
79
+ "QuantileRegressor",
80
+ "Ridge",
81
+ "RidgeCV",
82
+ "RidgeClassifier",
83
+ "RidgeClassifierCV",
84
+ "SGDClassifier",
85
+ "SGDRegressor",
86
+ "SGDOneClassSVM",
87
+ "SquaredLoss",
88
+ "TheilSenRegressor",
89
+ "enet_path",
90
+ "lars_path",
91
+ "lars_path_gram",
92
+ "lasso_path",
93
+ "orthogonal_mp",
94
+ "orthogonal_mp_gram",
95
+ "ridge_regression",
96
+ "RANSACRegressor",
97
+ "PoissonRegressor",
98
+ "GammaRegressor",
99
+ "TweedieRegressor",
100
+ ]
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_base.py ADDED
@@ -0,0 +1,814 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generalized Linear Models.
3
+ """
4
+
5
+ # Author: Alexandre Gramfort <[email protected]>
6
+ # Fabian Pedregosa <[email protected]>
7
+ # Olivier Grisel <[email protected]>
8
+ # Vincent Michel <[email protected]>
9
+ # Peter Prettenhofer <[email protected]>
10
+ # Mathieu Blondel <[email protected]>
11
+ # Lars Buitinck
12
+ # Maryan Morel <[email protected]>
13
+ # Giorgio Patrini <[email protected]>
14
+ # Maria Telenczuk <https://github.com/maikia>
15
+ # License: BSD 3 clause
16
+
17
+ import numbers
18
+ import warnings
19
+ from abc import ABCMeta, abstractmethod
20
+ from numbers import Integral
21
+
22
+ import numpy as np
23
+ import scipy.sparse as sp
24
+ from scipy import linalg, optimize, sparse
25
+ from scipy.sparse.linalg import lsqr
26
+ from scipy.special import expit
27
+
28
+ from ..base import (
29
+ BaseEstimator,
30
+ ClassifierMixin,
31
+ MultiOutputMixin,
32
+ RegressorMixin,
33
+ _fit_context,
34
+ )
35
+ from ..utils import check_array, check_random_state
36
+ from ..utils._array_api import get_namespace
37
+ from ..utils._seq_dataset import (
38
+ ArrayDataset32,
39
+ ArrayDataset64,
40
+ CSRDataset32,
41
+ CSRDataset64,
42
+ )
43
+ from ..utils.extmath import safe_sparse_dot
44
+ from ..utils.parallel import Parallel, delayed
45
+ from ..utils.sparsefuncs import mean_variance_axis
46
+ from ..utils.validation import FLOAT_DTYPES, _check_sample_weight, check_is_fitted
47
+
48
+ # TODO: bayesian_ridge_regression and bayesian_regression_ard
49
+ # should be squashed into its respective objects.
50
+
51
+ SPARSE_INTERCEPT_DECAY = 0.01
52
+ # For sparse data intercept updates are scaled by this decay factor to avoid
53
+ # intercept oscillation.
54
+
55
+
56
+ def make_dataset(X, y, sample_weight, random_state=None):
57
+ """Create ``Dataset`` abstraction for sparse and dense inputs.
58
+
59
+ This also returns the ``intercept_decay`` which is different
60
+ for sparse datasets.
61
+
62
+ Parameters
63
+ ----------
64
+ X : array-like, shape (n_samples, n_features)
65
+ Training data
66
+
67
+ y : array-like, shape (n_samples, )
68
+ Target values.
69
+
70
+ sample_weight : numpy array of shape (n_samples,)
71
+ The weight of each sample
72
+
73
+ random_state : int, RandomState instance or None (default)
74
+ Determines random number generation for dataset random sampling. It is not
75
+ used for dataset shuffling.
76
+ Pass an int for reproducible output across multiple function calls.
77
+ See :term:`Glossary <random_state>`.
78
+
79
+ Returns
80
+ -------
81
+ dataset
82
+ The ``Dataset`` abstraction
83
+ intercept_decay
84
+ The intercept decay
85
+ """
86
+
87
+ rng = check_random_state(random_state)
88
+ # seed should never be 0 in SequentialDataset64
89
+ seed = rng.randint(1, np.iinfo(np.int32).max)
90
+
91
+ if X.dtype == np.float32:
92
+ CSRData = CSRDataset32
93
+ ArrayData = ArrayDataset32
94
+ else:
95
+ CSRData = CSRDataset64
96
+ ArrayData = ArrayDataset64
97
+
98
+ if sp.issparse(X):
99
+ dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed)
100
+ intercept_decay = SPARSE_INTERCEPT_DECAY
101
+ else:
102
+ X = np.ascontiguousarray(X)
103
+ dataset = ArrayData(X, y, sample_weight, seed=seed)
104
+ intercept_decay = 1.0
105
+
106
+ return dataset, intercept_decay
107
+
108
+
109
+ def _preprocess_data(
110
+ X,
111
+ y,
112
+ *,
113
+ fit_intercept,
114
+ copy=True,
115
+ copy_y=True,
116
+ sample_weight=None,
117
+ check_input=True,
118
+ ):
119
+ """Common data preprocessing for fitting linear models.
120
+
121
+ This helper is in charge of the following steps:
122
+
123
+ - Ensure that `sample_weight` is an array or `None`.
124
+ - If `check_input=True`, perform standard input validation of `X`, `y`.
125
+ - Perform copies if requested to avoid side-effects in case of inplace
126
+ modifications of the input.
127
+
128
+ Then, if `fit_intercept=True` this preprocessing centers both `X` and `y` as
129
+ follows:
130
+ - if `X` is dense, center the data and
131
+ store the mean vector in `X_offset`.
132
+ - if `X` is sparse, store the mean in `X_offset`
133
+ without centering `X`. The centering is expected to be handled by the
134
+ linear solver where appropriate.
135
+ - in either case, always center `y` and store the mean in `y_offset`.
136
+ - both `X_offset` and `y_offset` are always weighted by `sample_weight`
137
+ if not set to `None`.
138
+
139
+ If `fit_intercept=False`, no centering is performed and `X_offset`, `y_offset`
140
+ are set to zero.
141
+
142
+ Returns
143
+ -------
144
+ X_out : {ndarray, sparse matrix} of shape (n_samples, n_features)
145
+ If copy=True a copy of the input X is triggered, otherwise operations are
146
+ inplace.
147
+ If input X is dense, then X_out is centered.
148
+ y_out : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
149
+ Centered version of y. Possibly performed inplace on input y depending
150
+ on the copy_y parameter.
151
+ X_offset : ndarray of shape (n_features,)
152
+ The mean per column of input X.
153
+ y_offset : float or ndarray of shape (n_features,)
154
+ X_scale : ndarray of shape (n_features,)
155
+ Always an array of ones. TODO: refactor the code base to make it
156
+ possible to remove this unused variable.
157
+ """
158
+ if isinstance(sample_weight, numbers.Number):
159
+ sample_weight = None
160
+ if sample_weight is not None:
161
+ sample_weight = np.asarray(sample_weight)
162
+
163
+ if check_input:
164
+ X = check_array(X, copy=copy, accept_sparse=["csr", "csc"], dtype=FLOAT_DTYPES)
165
+ y = check_array(y, dtype=X.dtype, copy=copy_y, ensure_2d=False)
166
+ else:
167
+ y = y.astype(X.dtype, copy=copy_y)
168
+ if copy:
169
+ if sp.issparse(X):
170
+ X = X.copy()
171
+ else:
172
+ X = X.copy(order="K")
173
+
174
+ if fit_intercept:
175
+ if sp.issparse(X):
176
+ X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight)
177
+ else:
178
+ X_offset = np.average(X, axis=0, weights=sample_weight)
179
+
180
+ X_offset = X_offset.astype(X.dtype, copy=False)
181
+ X -= X_offset
182
+
183
+ y_offset = np.average(y, axis=0, weights=sample_weight)
184
+ y -= y_offset
185
+ else:
186
+ X_offset = np.zeros(X.shape[1], dtype=X.dtype)
187
+ if y.ndim == 1:
188
+ y_offset = X.dtype.type(0)
189
+ else:
190
+ y_offset = np.zeros(y.shape[1], dtype=X.dtype)
191
+
192
+ # XXX: X_scale is no longer needed. It is an historic artifact from the
193
+ # time where linear model exposed the normalize parameter.
194
+ X_scale = np.ones(X.shape[1], dtype=X.dtype)
195
+ return X, y, X_offset, y_offset, X_scale
196
+
197
+
198
+ # TODO: _rescale_data should be factored into _preprocess_data.
199
+ # Currently, the fact that sag implements its own way to deal with
200
+ # sample_weight makes the refactoring tricky.
201
+
202
+
203
+ def _rescale_data(X, y, sample_weight, inplace=False):
204
+ """Rescale data sample-wise by square root of sample_weight.
205
+
206
+ For many linear models, this enables easy support for sample_weight because
207
+
208
+ (y - X w)' S (y - X w)
209
+
210
+ with S = diag(sample_weight) becomes
211
+
212
+ ||y_rescaled - X_rescaled w||_2^2
213
+
214
+ when setting
215
+
216
+ y_rescaled = sqrt(S) y
217
+ X_rescaled = sqrt(S) X
218
+
219
+ Returns
220
+ -------
221
+ X_rescaled : {array-like, sparse matrix}
222
+
223
+ y_rescaled : {array-like, sparse matrix}
224
+ """
225
+ # Assume that _validate_data and _check_sample_weight have been called by
226
+ # the caller.
227
+ n_samples = X.shape[0]
228
+ sample_weight_sqrt = np.sqrt(sample_weight)
229
+
230
+ if sp.issparse(X) or sp.issparse(y):
231
+ sw_matrix = sparse.dia_matrix(
232
+ (sample_weight_sqrt, 0), shape=(n_samples, n_samples)
233
+ )
234
+
235
+ if sp.issparse(X):
236
+ X = safe_sparse_dot(sw_matrix, X)
237
+ else:
238
+ if inplace:
239
+ X *= sample_weight_sqrt[:, np.newaxis]
240
+ else:
241
+ X = X * sample_weight_sqrt[:, np.newaxis]
242
+
243
+ if sp.issparse(y):
244
+ y = safe_sparse_dot(sw_matrix, y)
245
+ else:
246
+ if inplace:
247
+ if y.ndim == 1:
248
+ y *= sample_weight_sqrt
249
+ else:
250
+ y *= sample_weight_sqrt[:, np.newaxis]
251
+ else:
252
+ if y.ndim == 1:
253
+ y = y * sample_weight_sqrt
254
+ else:
255
+ y = y * sample_weight_sqrt[:, np.newaxis]
256
+ return X, y, sample_weight_sqrt
257
+
258
+
259
+ class LinearModel(BaseEstimator, metaclass=ABCMeta):
260
+ """Base class for Linear Models"""
261
+
262
+ @abstractmethod
263
+ def fit(self, X, y):
264
+ """Fit model."""
265
+
266
+ def _decision_function(self, X):
267
+ check_is_fitted(self)
268
+
269
+ X = self._validate_data(X, accept_sparse=["csr", "csc", "coo"], reset=False)
270
+ return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
271
+
272
+ def predict(self, X):
273
+ """
274
+ Predict using the linear model.
275
+
276
+ Parameters
277
+ ----------
278
+ X : array-like or sparse matrix, shape (n_samples, n_features)
279
+ Samples.
280
+
281
+ Returns
282
+ -------
283
+ C : array, shape (n_samples,)
284
+ Returns predicted values.
285
+ """
286
+ return self._decision_function(X)
287
+
288
+ def _set_intercept(self, X_offset, y_offset, X_scale):
289
+ """Set the intercept_"""
290
+ if self.fit_intercept:
291
+ # We always want coef_.dtype=X.dtype. For instance, X.dtype can differ from
292
+ # coef_.dtype if warm_start=True.
293
+ self.coef_ = np.divide(self.coef_, X_scale, dtype=X_scale.dtype)
294
+ self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
295
+ else:
296
+ self.intercept_ = 0.0
297
+
298
+ def _more_tags(self):
299
+ return {"requires_y": True}
300
+
301
+
302
+ # XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
303
+ # Maybe the n_features checking can be moved to LinearModel.
304
+ class LinearClassifierMixin(ClassifierMixin):
305
+ """Mixin for linear classifiers.
306
+
307
+ Handles prediction for sparse and dense X.
308
+ """
309
+
310
+ def decision_function(self, X):
311
+ """
312
+ Predict confidence scores for samples.
313
+
314
+ The confidence score for a sample is proportional to the signed
315
+ distance of that sample to the hyperplane.
316
+
317
+ Parameters
318
+ ----------
319
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
320
+ The data matrix for which we want to get the confidence scores.
321
+
322
+ Returns
323
+ -------
324
+ scores : ndarray of shape (n_samples,) or (n_samples, n_classes)
325
+ Confidence scores per `(n_samples, n_classes)` combination. In the
326
+ binary case, confidence score for `self.classes_[1]` where >0 means
327
+ this class would be predicted.
328
+ """
329
+ check_is_fitted(self)
330
+ xp, _ = get_namespace(X)
331
+
332
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
333
+ scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
334
+ return xp.reshape(scores, (-1,)) if scores.shape[1] == 1 else scores
335
+
336
+ def predict(self, X):
337
+ """
338
+ Predict class labels for samples in X.
339
+
340
+ Parameters
341
+ ----------
342
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
343
+ The data matrix for which we want to get the predictions.
344
+
345
+ Returns
346
+ -------
347
+ y_pred : ndarray of shape (n_samples,)
348
+ Vector containing the class labels for each sample.
349
+ """
350
+ xp, _ = get_namespace(X)
351
+ scores = self.decision_function(X)
352
+ if len(scores.shape) == 1:
353
+ indices = xp.astype(scores > 0, int)
354
+ else:
355
+ indices = xp.argmax(scores, axis=1)
356
+
357
+ return xp.take(self.classes_, indices, axis=0)
358
+
359
+ def _predict_proba_lr(self, X):
360
+ """Probability estimation for OvR logistic regression.
361
+
362
+ Positive class probabilities are computed as
363
+ 1. / (1. + np.exp(-self.decision_function(X)));
364
+ multiclass is handled by normalizing that over all classes.
365
+ """
366
+ prob = self.decision_function(X)
367
+ expit(prob, out=prob)
368
+ if prob.ndim == 1:
369
+ return np.vstack([1 - prob, prob]).T
370
+ else:
371
+ # OvR normalization, like LibLinear's predict_probability
372
+ prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
373
+ return prob
374
+
375
+
376
+ class SparseCoefMixin:
377
+ """Mixin for converting coef_ to and from CSR format.
378
+
379
+ L1-regularizing estimators should inherit this.
380
+ """
381
+
382
+ def densify(self):
383
+ """
384
+ Convert coefficient matrix to dense array format.
385
+
386
+ Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
387
+ default format of ``coef_`` and is required for fitting, so calling
388
+ this method is only required on models that have previously been
389
+ sparsified; otherwise, it is a no-op.
390
+
391
+ Returns
392
+ -------
393
+ self
394
+ Fitted estimator.
395
+ """
396
+ msg = "Estimator, %(name)s, must be fitted before densifying."
397
+ check_is_fitted(self, msg=msg)
398
+ if sp.issparse(self.coef_):
399
+ self.coef_ = self.coef_.toarray()
400
+ return self
401
+
402
+ def sparsify(self):
403
+ """
404
+ Convert coefficient matrix to sparse format.
405
+
406
+ Converts the ``coef_`` member to a scipy.sparse matrix, which for
407
+ L1-regularized models can be much more memory- and storage-efficient
408
+ than the usual numpy.ndarray representation.
409
+
410
+ The ``intercept_`` member is not converted.
411
+
412
+ Returns
413
+ -------
414
+ self
415
+ Fitted estimator.
416
+
417
+ Notes
418
+ -----
419
+ For non-sparse models, i.e. when there are not many zeros in ``coef_``,
420
+ this may actually *increase* memory usage, so use this method with
421
+ care. A rule of thumb is that the number of zero elements, which can
422
+ be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
423
+ to provide significant benefits.
424
+
425
+ After calling this method, further fitting with the partial_fit
426
+ method (if any) will not work until you call densify.
427
+ """
428
+ msg = "Estimator, %(name)s, must be fitted before sparsifying."
429
+ check_is_fitted(self, msg=msg)
430
+ self.coef_ = sp.csr_matrix(self.coef_)
431
+ return self
432
+
433
+
434
+ class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
435
+ """
436
+ Ordinary least squares Linear Regression.
437
+
438
+ LinearRegression fits a linear model with coefficients w = (w1, ..., wp)
439
+ to minimize the residual sum of squares between the observed targets in
440
+ the dataset, and the targets predicted by the linear approximation.
441
+
442
+ Parameters
443
+ ----------
444
+ fit_intercept : bool, default=True
445
+ Whether to calculate the intercept for this model. If set
446
+ to False, no intercept will be used in calculations
447
+ (i.e. data is expected to be centered).
448
+
449
+ copy_X : bool, default=True
450
+ If True, X will be copied; else, it may be overwritten.
451
+
452
+ n_jobs : int, default=None
453
+ The number of jobs to use for the computation. This will only provide
454
+ speedup in case of sufficiently large problems, that is if firstly
455
+ `n_targets > 1` and secondly `X` is sparse or if `positive` is set
456
+ to `True`. ``None`` means 1 unless in a
457
+ :obj:`joblib.parallel_backend` context. ``-1`` means using all
458
+ processors. See :term:`Glossary <n_jobs>` for more details.
459
+
460
+ positive : bool, default=False
461
+ When set to ``True``, forces the coefficients to be positive. This
462
+ option is only supported for dense arrays.
463
+
464
+ .. versionadded:: 0.24
465
+
466
+ Attributes
467
+ ----------
468
+ coef_ : array of shape (n_features, ) or (n_targets, n_features)
469
+ Estimated coefficients for the linear regression problem.
470
+ If multiple targets are passed during the fit (y 2D), this
471
+ is a 2D array of shape (n_targets, n_features), while if only
472
+ one target is passed, this is a 1D array of length n_features.
473
+
474
+ rank_ : int
475
+ Rank of matrix `X`. Only available when `X` is dense.
476
+
477
+ singular_ : array of shape (min(X, y),)
478
+ Singular values of `X`. Only available when `X` is dense.
479
+
480
+ intercept_ : float or array of shape (n_targets,)
481
+ Independent term in the linear model. Set to 0.0 if
482
+ `fit_intercept = False`.
483
+
484
+ n_features_in_ : int
485
+ Number of features seen during :term:`fit`.
486
+
487
+ .. versionadded:: 0.24
488
+
489
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
490
+ Names of features seen during :term:`fit`. Defined only when `X`
491
+ has feature names that are all strings.
492
+
493
+ .. versionadded:: 1.0
494
+
495
+ See Also
496
+ --------
497
+ Ridge : Ridge regression addresses some of the
498
+ problems of Ordinary Least Squares by imposing a penalty on the
499
+ size of the coefficients with l2 regularization.
500
+ Lasso : The Lasso is a linear model that estimates
501
+ sparse coefficients with l1 regularization.
502
+ ElasticNet : Elastic-Net is a linear regression
503
+ model trained with both l1 and l2 -norm regularization of the
504
+ coefficients.
505
+
506
+ Notes
507
+ -----
508
+ From the implementation point of view, this is just plain Ordinary
509
+ Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares
510
+ (scipy.optimize.nnls) wrapped as a predictor object.
511
+
512
+ Examples
513
+ --------
514
+ >>> import numpy as np
515
+ >>> from sklearn.linear_model import LinearRegression
516
+ >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
517
+ >>> # y = 1 * x_0 + 2 * x_1 + 3
518
+ >>> y = np.dot(X, np.array([1, 2])) + 3
519
+ >>> reg = LinearRegression().fit(X, y)
520
+ >>> reg.score(X, y)
521
+ 1.0
522
+ >>> reg.coef_
523
+ array([1., 2.])
524
+ >>> reg.intercept_
525
+ 3.0...
526
+ >>> reg.predict(np.array([[3, 5]]))
527
+ array([16.])
528
+ """
529
+
530
+ _parameter_constraints: dict = {
531
+ "fit_intercept": ["boolean"],
532
+ "copy_X": ["boolean"],
533
+ "n_jobs": [None, Integral],
534
+ "positive": ["boolean"],
535
+ }
536
+
537
+ def __init__(
538
+ self,
539
+ *,
540
+ fit_intercept=True,
541
+ copy_X=True,
542
+ n_jobs=None,
543
+ positive=False,
544
+ ):
545
+ self.fit_intercept = fit_intercept
546
+ self.copy_X = copy_X
547
+ self.n_jobs = n_jobs
548
+ self.positive = positive
549
+
550
+ @_fit_context(prefer_skip_nested_validation=True)
551
+ def fit(self, X, y, sample_weight=None):
552
+ """
553
+ Fit linear model.
554
+
555
+ Parameters
556
+ ----------
557
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
558
+ Training data.
559
+
560
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
561
+ Target values. Will be cast to X's dtype if necessary.
562
+
563
+ sample_weight : array-like of shape (n_samples,), default=None
564
+ Individual weights for each sample.
565
+
566
+ .. versionadded:: 0.17
567
+ parameter *sample_weight* support to LinearRegression.
568
+
569
+ Returns
570
+ -------
571
+ self : object
572
+ Fitted Estimator.
573
+ """
574
+ n_jobs_ = self.n_jobs
575
+
576
+ accept_sparse = False if self.positive else ["csr", "csc", "coo"]
577
+
578
+ X, y = self._validate_data(
579
+ X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True
580
+ )
581
+
582
+ has_sw = sample_weight is not None
583
+ if has_sw:
584
+ sample_weight = _check_sample_weight(
585
+ sample_weight, X, dtype=X.dtype, only_non_negative=True
586
+ )
587
+
588
+ # Note that neither _rescale_data nor the rest of the fit method of
589
+ # LinearRegression can benefit from in-place operations when X is a
590
+ # sparse matrix. Therefore, let's not copy X when it is sparse.
591
+ copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X)
592
+
593
+ X, y, X_offset, y_offset, X_scale = _preprocess_data(
594
+ X,
595
+ y,
596
+ fit_intercept=self.fit_intercept,
597
+ copy=copy_X_in_preprocess_data,
598
+ sample_weight=sample_weight,
599
+ )
600
+
601
+ if has_sw:
602
+ # Sample weight can be implemented via a simple rescaling. Note
603
+ # that we safely do inplace rescaling when _preprocess_data has
604
+ # already made a copy if requested.
605
+ X, y, sample_weight_sqrt = _rescale_data(
606
+ X, y, sample_weight, inplace=copy_X_in_preprocess_data
607
+ )
608
+
609
+ if self.positive:
610
+ if y.ndim < 2:
611
+ self.coef_ = optimize.nnls(X, y)[0]
612
+ else:
613
+ # scipy.optimize.nnls cannot handle y with shape (M, K)
614
+ outs = Parallel(n_jobs=n_jobs_)(
615
+ delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1])
616
+ )
617
+ self.coef_ = np.vstack([out[0] for out in outs])
618
+ elif sp.issparse(X):
619
+ X_offset_scale = X_offset / X_scale
620
+
621
+ if has_sw:
622
+
623
+ def matvec(b):
624
+ return X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale)
625
+
626
+ def rmatvec(b):
627
+ return X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt)
628
+
629
+ else:
630
+
631
+ def matvec(b):
632
+ return X.dot(b) - b.dot(X_offset_scale)
633
+
634
+ def rmatvec(b):
635
+ return X.T.dot(b) - X_offset_scale * b.sum()
636
+
637
+ X_centered = sparse.linalg.LinearOperator(
638
+ shape=X.shape, matvec=matvec, rmatvec=rmatvec
639
+ )
640
+
641
+ if y.ndim < 2:
642
+ self.coef_ = lsqr(X_centered, y)[0]
643
+ else:
644
+ # sparse_lstsq cannot handle y with shape (M, K)
645
+ outs = Parallel(n_jobs=n_jobs_)(
646
+ delayed(lsqr)(X_centered, y[:, j].ravel())
647
+ for j in range(y.shape[1])
648
+ )
649
+ self.coef_ = np.vstack([out[0] for out in outs])
650
+ else:
651
+ self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y)
652
+ self.coef_ = self.coef_.T
653
+
654
+ if y.ndim == 1:
655
+ self.coef_ = np.ravel(self.coef_)
656
+ self._set_intercept(X_offset, y_offset, X_scale)
657
+ return self
658
+
659
+
660
+ def _check_precomputed_gram_matrix(
661
+ X, precompute, X_offset, X_scale, rtol=None, atol=1e-5
662
+ ):
663
+ """Computes a single element of the gram matrix and compares it to
664
+ the corresponding element of the user supplied gram matrix.
665
+
666
+ If the values do not match a ValueError will be thrown.
667
+
668
+ Parameters
669
+ ----------
670
+ X : ndarray of shape (n_samples, n_features)
671
+ Data array.
672
+
673
+ precompute : array-like of shape (n_features, n_features)
674
+ User-supplied gram matrix.
675
+
676
+ X_offset : ndarray of shape (n_features,)
677
+ Array of feature means used to center design matrix.
678
+
679
+ X_scale : ndarray of shape (n_features,)
680
+ Array of feature scale factors used to normalize design matrix.
681
+
682
+ rtol : float, default=None
683
+ Relative tolerance; see numpy.allclose
684
+ If None, it is set to 1e-4 for arrays of dtype numpy.float32 and 1e-7
685
+ otherwise.
686
+
687
+ atol : float, default=1e-5
688
+ absolute tolerance; see :func`numpy.allclose`. Note that the default
689
+ here is more tolerant than the default for
690
+ :func:`numpy.testing.assert_allclose`, where `atol=0`.
691
+
692
+ Raises
693
+ ------
694
+ ValueError
695
+ Raised when the provided Gram matrix is not consistent.
696
+ """
697
+
698
+ n_features = X.shape[1]
699
+ f1 = n_features // 2
700
+ f2 = min(f1 + 1, n_features - 1)
701
+
702
+ v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1]
703
+ v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2]
704
+
705
+ expected = np.dot(v1, v2)
706
+ actual = precompute[f1, f2]
707
+
708
+ dtypes = [precompute.dtype, expected.dtype]
709
+ if rtol is None:
710
+ rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]
711
+ rtol = max(rtols)
712
+
713
+ if not np.isclose(expected, actual, rtol=rtol, atol=atol):
714
+ raise ValueError(
715
+ "Gram matrix passed in via 'precompute' parameter "
716
+ "did not pass validation when a single element was "
717
+ "checked - please check that it was computed "
718
+ f"properly. For element ({f1},{f2}) we computed "
719
+ f"{expected} but the user-supplied value was "
720
+ f"{actual}."
721
+ )
722
+
723
+
724
+ def _pre_fit(
725
+ X,
726
+ y,
727
+ Xy,
728
+ precompute,
729
+ fit_intercept,
730
+ copy,
731
+ check_input=True,
732
+ sample_weight=None,
733
+ ):
734
+ """Function used at beginning of fit in linear models with L1 or L0 penalty.
735
+
736
+ This function applies _preprocess_data and additionally computes the gram matrix
737
+ `precompute` as needed as well as `Xy`.
738
+ """
739
+ n_samples, n_features = X.shape
740
+
741
+ if sparse.issparse(X):
742
+ # copy is not needed here as X is not modified inplace when X is sparse
743
+ precompute = False
744
+ X, y, X_offset, y_offset, X_scale = _preprocess_data(
745
+ X,
746
+ y,
747
+ fit_intercept=fit_intercept,
748
+ copy=False,
749
+ check_input=check_input,
750
+ sample_weight=sample_weight,
751
+ )
752
+ else:
753
+ # copy was done in fit if necessary
754
+ X, y, X_offset, y_offset, X_scale = _preprocess_data(
755
+ X,
756
+ y,
757
+ fit_intercept=fit_intercept,
758
+ copy=copy,
759
+ check_input=check_input,
760
+ sample_weight=sample_weight,
761
+ )
762
+ # Rescale only in dense case. Sparse cd solver directly deals with
763
+ # sample_weight.
764
+ if sample_weight is not None:
765
+ # This triggers copies anyway.
766
+ X, y, _ = _rescale_data(X, y, sample_weight=sample_weight)
767
+
768
+ if hasattr(precompute, "__array__"):
769
+ if fit_intercept and not np.allclose(X_offset, np.zeros(n_features)):
770
+ warnings.warn(
771
+ (
772
+ "Gram matrix was provided but X was centered to fit "
773
+ "intercept: recomputing Gram matrix."
774
+ ),
775
+ UserWarning,
776
+ )
777
+ # TODO: instead of warning and recomputing, we could just center
778
+ # the user provided Gram matrix a-posteriori (after making a copy
779
+ # when `copy=True`).
780
+ # recompute Gram
781
+ precompute = "auto"
782
+ Xy = None
783
+ elif check_input:
784
+ # If we're going to use the user's precomputed gram matrix, we
785
+ # do a quick check to make sure its not totally bogus.
786
+ _check_precomputed_gram_matrix(X, precompute, X_offset, X_scale)
787
+
788
+ # precompute if n_samples > n_features
789
+ if isinstance(precompute, str) and precompute == "auto":
790
+ precompute = n_samples > n_features
791
+
792
+ if precompute is True:
793
+ # make sure that the 'precompute' array is contiguous.
794
+ precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order="C")
795
+ np.dot(X.T, X, out=precompute)
796
+
797
+ if not hasattr(precompute, "__array__"):
798
+ Xy = None # cannot use Xy if precompute is not Gram
799
+
800
+ if hasattr(precompute, "__array__") and Xy is None:
801
+ common_dtype = np.result_type(X.dtype, y.dtype)
802
+ if y.ndim == 1:
803
+ # Xy is 1d, make sure it is contiguous.
804
+ Xy = np.empty(shape=n_features, dtype=common_dtype, order="C")
805
+ np.dot(X.T, y, out=Xy)
806
+ else:
807
+ # Make sure that Xy is always F contiguous even if X or y are not
808
+ # contiguous: the goal is to make it fast to extract the data for a
809
+ # specific target.
810
+ n_targets = y.shape[1]
811
+ Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order="F")
812
+ np.dot(y.T, X, out=Xy.T)
813
+
814
+ return X, y, X_offset, y_offset, X_scale, precompute, Xy
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (496 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_huber.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Manoj Kumar [email protected]
2
+ # License: BSD 3 clause
3
+
4
+ from numbers import Integral, Real
5
+
6
+ import numpy as np
7
+ from scipy import optimize
8
+
9
+ from ..base import BaseEstimator, RegressorMixin, _fit_context
10
+ from ..utils import axis0_safe_slice
11
+ from ..utils._param_validation import Interval
12
+ from ..utils.extmath import safe_sparse_dot
13
+ from ..utils.optimize import _check_optimize_result
14
+ from ..utils.validation import _check_sample_weight
15
+ from ._base import LinearModel
16
+
17
+
18
+ def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
19
+ """Returns the Huber loss and the gradient.
20
+
21
+ Parameters
22
+ ----------
23
+ w : ndarray, shape (n_features + 1,) or (n_features + 2,)
24
+ Feature vector.
25
+ w[:n_features] gives the coefficients
26
+ w[-1] gives the scale factor and if the intercept is fit w[-2]
27
+ gives the intercept factor.
28
+
29
+ X : ndarray of shape (n_samples, n_features)
30
+ Input data.
31
+
32
+ y : ndarray of shape (n_samples,)
33
+ Target vector.
34
+
35
+ epsilon : float
36
+ Robustness of the Huber estimator.
37
+
38
+ alpha : float
39
+ Regularization parameter.
40
+
41
+ sample_weight : ndarray of shape (n_samples,), default=None
42
+ Weight assigned to each sample.
43
+
44
+ Returns
45
+ -------
46
+ loss : float
47
+ Huber loss.
48
+
49
+ gradient : ndarray, shape (len(w))
50
+ Returns the derivative of the Huber loss with respect to each
51
+ coefficient, intercept and the scale as a vector.
52
+ """
53
+ _, n_features = X.shape
54
+ fit_intercept = n_features + 2 == w.shape[0]
55
+ if fit_intercept:
56
+ intercept = w[-2]
57
+ sigma = w[-1]
58
+ w = w[:n_features]
59
+ n_samples = np.sum(sample_weight)
60
+
61
+ # Calculate the values where |y - X'w -c / sigma| > epsilon
62
+ # The values above this threshold are outliers.
63
+ linear_loss = y - safe_sparse_dot(X, w)
64
+ if fit_intercept:
65
+ linear_loss -= intercept
66
+ abs_linear_loss = np.abs(linear_loss)
67
+ outliers_mask = abs_linear_loss > epsilon * sigma
68
+
69
+ # Calculate the linear loss due to the outliers.
70
+ # This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma
71
+ outliers = abs_linear_loss[outliers_mask]
72
+ num_outliers = np.count_nonzero(outliers_mask)
73
+ n_non_outliers = X.shape[0] - num_outliers
74
+
75
+ # n_sq_outliers includes the weight give to the outliers while
76
+ # num_outliers is just the number of outliers.
77
+ outliers_sw = sample_weight[outliers_mask]
78
+ n_sw_outliers = np.sum(outliers_sw)
79
+ outlier_loss = (
80
+ 2.0 * epsilon * np.sum(outliers_sw * outliers)
81
+ - sigma * n_sw_outliers * epsilon**2
82
+ )
83
+
84
+ # Calculate the quadratic loss due to the non-outliers.-
85
+ # This is equal to |(y - X'w - c)**2 / sigma**2| * sigma
86
+ non_outliers = linear_loss[~outliers_mask]
87
+ weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers
88
+ weighted_loss = np.dot(weighted_non_outliers.T, non_outliers)
89
+ squared_loss = weighted_loss / sigma
90
+
91
+ if fit_intercept:
92
+ grad = np.zeros(n_features + 2)
93
+ else:
94
+ grad = np.zeros(n_features + 1)
95
+
96
+ # Gradient due to the squared loss.
97
+ X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers)
98
+ grad[:n_features] = (
99
+ 2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers)
100
+ )
101
+
102
+ # Gradient due to the linear loss.
103
+ signed_outliers = np.ones_like(outliers)
104
+ signed_outliers_mask = linear_loss[outliers_mask] < 0
105
+ signed_outliers[signed_outliers_mask] = -1.0
106
+ X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers)
107
+ sw_outliers = sample_weight[outliers_mask] * signed_outliers
108
+ grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers))
109
+
110
+ # Gradient due to the penalty.
111
+ grad[:n_features] += alpha * 2.0 * w
112
+
113
+ # Gradient due to sigma.
114
+ grad[-1] = n_samples
115
+ grad[-1] -= n_sw_outliers * epsilon**2
116
+ grad[-1] -= squared_loss / sigma
117
+
118
+ # Gradient due to the intercept.
119
+ if fit_intercept:
120
+ grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma
121
+ grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers)
122
+
123
+ loss = n_samples * sigma + squared_loss + outlier_loss
124
+ loss += alpha * np.dot(w, w)
125
+ return loss, grad
126
+
127
+
128
+ class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator):
129
+ """L2-regularized linear regression model that is robust to outliers.
130
+
131
+ The Huber Regressor optimizes the squared loss for the samples where
132
+ ``|(y - Xw - c) / sigma| < epsilon`` and the absolute loss for the samples
133
+ where ``|(y - Xw - c) / sigma| > epsilon``, where the model coefficients
134
+ ``w``, the intercept ``c`` and the scale ``sigma`` are parameters
135
+ to be optimized. The parameter sigma makes sure that if y is scaled up
136
+ or down by a certain factor, one does not need to rescale epsilon to
137
+ achieve the same robustness. Note that this does not take into account
138
+ the fact that the different features of X may be of different scales.
139
+
140
+ The Huber loss function has the advantage of not being heavily influenced
141
+ by the outliers while not completely ignoring their effect.
142
+
143
+ Read more in the :ref:`User Guide <huber_regression>`
144
+
145
+ .. versionadded:: 0.18
146
+
147
+ Parameters
148
+ ----------
149
+ epsilon : float, default=1.35
150
+ The parameter epsilon controls the number of samples that should be
151
+ classified as outliers. The smaller the epsilon, the more robust it is
152
+ to outliers. Epsilon must be in the range `[1, inf)`.
153
+
154
+ max_iter : int, default=100
155
+ Maximum number of iterations that
156
+ ``scipy.optimize.minimize(method="L-BFGS-B")`` should run for.
157
+
158
+ alpha : float, default=0.0001
159
+ Strength of the squared L2 regularization. Note that the penalty is
160
+ equal to ``alpha * ||w||^2``.
161
+ Must be in the range `[0, inf)`.
162
+
163
+ warm_start : bool, default=False
164
+ This is useful if the stored attributes of a previously used model
165
+ has to be reused. If set to False, then the coefficients will
166
+ be rewritten for every call to fit.
167
+ See :term:`the Glossary <warm_start>`.
168
+
169
+ fit_intercept : bool, default=True
170
+ Whether or not to fit the intercept. This can be set to False
171
+ if the data is already centered around the origin.
172
+
173
+ tol : float, default=1e-05
174
+ The iteration will stop when
175
+ ``max{|proj g_i | i = 1, ..., n}`` <= ``tol``
176
+ where pg_i is the i-th component of the projected gradient.
177
+
178
+ Attributes
179
+ ----------
180
+ coef_ : array, shape (n_features,)
181
+ Features got by optimizing the L2-regularized Huber loss.
182
+
183
+ intercept_ : float
184
+ Bias.
185
+
186
+ scale_ : float
187
+ The value by which ``|y - Xw - c|`` is scaled down.
188
+
189
+ n_features_in_ : int
190
+ Number of features seen during :term:`fit`.
191
+
192
+ .. versionadded:: 0.24
193
+
194
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
195
+ Names of features seen during :term:`fit`. Defined only when `X`
196
+ has feature names that are all strings.
197
+
198
+ .. versionadded:: 1.0
199
+
200
+ n_iter_ : int
201
+ Number of iterations that
202
+ ``scipy.optimize.minimize(method="L-BFGS-B")`` has run for.
203
+
204
+ .. versionchanged:: 0.20
205
+
206
+ In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
207
+ ``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
208
+
209
+ outliers_ : array, shape (n_samples,)
210
+ A boolean mask which is set to True where the samples are identified
211
+ as outliers.
212
+
213
+ See Also
214
+ --------
215
+ RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
216
+ TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
217
+ SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
218
+
219
+ References
220
+ ----------
221
+ .. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics
222
+ Concomitant scale estimates, pg 172
223
+ .. [2] Art B. Owen (2006), A robust hybrid of lasso and ridge regression.
224
+ https://statweb.stanford.edu/~owen/reports/hhu.pdf
225
+
226
+ Examples
227
+ --------
228
+ >>> import numpy as np
229
+ >>> from sklearn.linear_model import HuberRegressor, LinearRegression
230
+ >>> from sklearn.datasets import make_regression
231
+ >>> rng = np.random.RandomState(0)
232
+ >>> X, y, coef = make_regression(
233
+ ... n_samples=200, n_features=2, noise=4.0, coef=True, random_state=0)
234
+ >>> X[:4] = rng.uniform(10, 20, (4, 2))
235
+ >>> y[:4] = rng.uniform(10, 20, 4)
236
+ >>> huber = HuberRegressor().fit(X, y)
237
+ >>> huber.score(X, y)
238
+ -7.284...
239
+ >>> huber.predict(X[:1,])
240
+ array([806.7200...])
241
+ >>> linear = LinearRegression().fit(X, y)
242
+ >>> print("True coefficients:", coef)
243
+ True coefficients: [20.4923... 34.1698...]
244
+ >>> print("Huber coefficients:", huber.coef_)
245
+ Huber coefficients: [17.7906... 31.0106...]
246
+ >>> print("Linear Regression coefficients:", linear.coef_)
247
+ Linear Regression coefficients: [-1.9221... 7.0226...]
248
+ """
249
+
250
+ _parameter_constraints: dict = {
251
+ "epsilon": [Interval(Real, 1.0, None, closed="left")],
252
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
253
+ "alpha": [Interval(Real, 0, None, closed="left")],
254
+ "warm_start": ["boolean"],
255
+ "fit_intercept": ["boolean"],
256
+ "tol": [Interval(Real, 0.0, None, closed="left")],
257
+ }
258
+
259
+ def __init__(
260
+ self,
261
+ *,
262
+ epsilon=1.35,
263
+ max_iter=100,
264
+ alpha=0.0001,
265
+ warm_start=False,
266
+ fit_intercept=True,
267
+ tol=1e-05,
268
+ ):
269
+ self.epsilon = epsilon
270
+ self.max_iter = max_iter
271
+ self.alpha = alpha
272
+ self.warm_start = warm_start
273
+ self.fit_intercept = fit_intercept
274
+ self.tol = tol
275
+
276
+ @_fit_context(prefer_skip_nested_validation=True)
277
+ def fit(self, X, y, sample_weight=None):
278
+ """Fit the model according to the given training data.
279
+
280
+ Parameters
281
+ ----------
282
+ X : array-like, shape (n_samples, n_features)
283
+ Training vector, where `n_samples` is the number of samples and
284
+ `n_features` is the number of features.
285
+
286
+ y : array-like, shape (n_samples,)
287
+ Target vector relative to X.
288
+
289
+ sample_weight : array-like, shape (n_samples,)
290
+ Weight given to each sample.
291
+
292
+ Returns
293
+ -------
294
+ self : object
295
+ Fitted `HuberRegressor` estimator.
296
+ """
297
+ X, y = self._validate_data(
298
+ X,
299
+ y,
300
+ copy=False,
301
+ accept_sparse=["csr"],
302
+ y_numeric=True,
303
+ dtype=[np.float64, np.float32],
304
+ )
305
+
306
+ sample_weight = _check_sample_weight(sample_weight, X)
307
+
308
+ if self.warm_start and hasattr(self, "coef_"):
309
+ parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_]))
310
+ else:
311
+ if self.fit_intercept:
312
+ parameters = np.zeros(X.shape[1] + 2)
313
+ else:
314
+ parameters = np.zeros(X.shape[1] + 1)
315
+ # Make sure to initialize the scale parameter to a strictly
316
+ # positive value:
317
+ parameters[-1] = 1
318
+
319
+ # Sigma or the scale factor should be non-negative.
320
+ # Setting it to be zero might cause undefined bounds hence we set it
321
+ # to a value close to zero.
322
+ bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1))
323
+ bounds[-1][0] = np.finfo(np.float64).eps * 10
324
+
325
+ opt_res = optimize.minimize(
326
+ _huber_loss_and_gradient,
327
+ parameters,
328
+ method="L-BFGS-B",
329
+ jac=True,
330
+ args=(X, y, self.epsilon, self.alpha, sample_weight),
331
+ options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1},
332
+ bounds=bounds,
333
+ )
334
+
335
+ parameters = opt_res.x
336
+
337
+ if opt_res.status == 2:
338
+ raise ValueError(
339
+ "HuberRegressor convergence failed: l-BFGS-b solver terminated with %s"
340
+ % opt_res.message
341
+ )
342
+ self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
343
+ self.scale_ = parameters[-1]
344
+ if self.fit_intercept:
345
+ self.intercept_ = parameters[-2]
346
+ else:
347
+ self.intercept_ = 0.0
348
+ self.coef_ = parameters[: X.shape[1]]
349
+
350
+ residual = np.abs(y - safe_sparse_dot(X, self.coef_) - self.intercept_)
351
+ self.outliers_ = residual > self.scale_ * self.epsilon
352
+ return self
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_omp.py ADDED
@@ -0,0 +1,1097 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Orthogonal matching pursuit algorithms
2
+ """
3
+
4
+ # Author: Vlad Niculae
5
+ #
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+ from math import sqrt
10
+ from numbers import Integral, Real
11
+
12
+ import numpy as np
13
+ from scipy import linalg
14
+ from scipy.linalg.lapack import get_lapack_funcs
15
+
16
+ from ..base import MultiOutputMixin, RegressorMixin, _fit_context
17
+ from ..model_selection import check_cv
18
+ from ..utils import Bunch, as_float_array, check_array
19
+ from ..utils._param_validation import Interval, StrOptions, validate_params
20
+ from ..utils.metadata_routing import (
21
+ MetadataRouter,
22
+ MethodMapping,
23
+ _raise_for_params,
24
+ _routing_enabled,
25
+ process_routing,
26
+ )
27
+ from ..utils.parallel import Parallel, delayed
28
+ from ._base import LinearModel, _pre_fit
29
+
30
+ premature = (
31
+ "Orthogonal matching pursuit ended prematurely due to linear"
32
+ " dependence in the dictionary. The requested precision might"
33
+ " not have been met."
34
+ )
35
+
36
+
37
+ def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False):
38
+ """Orthogonal Matching Pursuit step using the Cholesky decomposition.
39
+
40
+ Parameters
41
+ ----------
42
+ X : ndarray of shape (n_samples, n_features)
43
+ Input dictionary. Columns are assumed to have unit norm.
44
+
45
+ y : ndarray of shape (n_samples,)
46
+ Input targets.
47
+
48
+ n_nonzero_coefs : int
49
+ Targeted number of non-zero elements.
50
+
51
+ tol : float, default=None
52
+ Targeted squared error, if not None overrides n_nonzero_coefs.
53
+
54
+ copy_X : bool, default=True
55
+ Whether the design matrix X must be copied by the algorithm. A false
56
+ value is only helpful if X is already Fortran-ordered, otherwise a
57
+ copy is made anyway.
58
+
59
+ return_path : bool, default=False
60
+ Whether to return every value of the nonzero coefficients along the
61
+ forward path. Useful for cross-validation.
62
+
63
+ Returns
64
+ -------
65
+ gamma : ndarray of shape (n_nonzero_coefs,)
66
+ Non-zero elements of the solution.
67
+
68
+ idx : ndarray of shape (n_nonzero_coefs,)
69
+ Indices of the positions of the elements in gamma within the solution
70
+ vector.
71
+
72
+ coef : ndarray of shape (n_features, n_nonzero_coefs)
73
+ The first k values of column k correspond to the coefficient value
74
+ for the active features at that step. The lower left triangle contains
75
+ garbage. Only returned if ``return_path=True``.
76
+
77
+ n_active : int
78
+ Number of active features at convergence.
79
+ """
80
+ if copy_X:
81
+ X = X.copy("F")
82
+ else: # even if we are allowed to overwrite, still copy it if bad order
83
+ X = np.asfortranarray(X)
84
+
85
+ min_float = np.finfo(X.dtype).eps
86
+ nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (X,))
87
+ (potrs,) = get_lapack_funcs(("potrs",), (X,))
88
+
89
+ alpha = np.dot(X.T, y)
90
+ residual = y
91
+ gamma = np.empty(0)
92
+ n_active = 0
93
+ indices = np.arange(X.shape[1]) # keeping track of swapping
94
+
95
+ max_features = X.shape[1] if tol is not None else n_nonzero_coefs
96
+
97
+ L = np.empty((max_features, max_features), dtype=X.dtype)
98
+
99
+ if return_path:
100
+ coefs = np.empty_like(L)
101
+
102
+ while True:
103
+ lam = np.argmax(np.abs(np.dot(X.T, residual)))
104
+ if lam < n_active or alpha[lam] ** 2 < min_float:
105
+ # atom already selected or inner product too small
106
+ warnings.warn(premature, RuntimeWarning, stacklevel=2)
107
+ break
108
+
109
+ if n_active > 0:
110
+ # Updates the Cholesky decomposition of X' X
111
+ L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
112
+ linalg.solve_triangular(
113
+ L[:n_active, :n_active],
114
+ L[n_active, :n_active],
115
+ trans=0,
116
+ lower=1,
117
+ overwrite_b=True,
118
+ check_finite=False,
119
+ )
120
+ v = nrm2(L[n_active, :n_active]) ** 2
121
+ Lkk = linalg.norm(X[:, lam]) ** 2 - v
122
+ if Lkk <= min_float: # selected atoms are dependent
123
+ warnings.warn(premature, RuntimeWarning, stacklevel=2)
124
+ break
125
+ L[n_active, n_active] = sqrt(Lkk)
126
+ else:
127
+ L[0, 0] = linalg.norm(X[:, lam])
128
+
129
+ X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
130
+ alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
131
+ indices[n_active], indices[lam] = indices[lam], indices[n_active]
132
+ n_active += 1
133
+
134
+ # solves LL'x = X'y as a composition of two triangular systems
135
+ gamma, _ = potrs(
136
+ L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False
137
+ )
138
+
139
+ if return_path:
140
+ coefs[:n_active, n_active - 1] = gamma
141
+ residual = y - np.dot(X[:, :n_active], gamma)
142
+ if tol is not None and nrm2(residual) ** 2 <= tol:
143
+ break
144
+ elif n_active == max_features:
145
+ break
146
+
147
+ if return_path:
148
+ return gamma, indices[:n_active], coefs[:, :n_active], n_active
149
+ else:
150
+ return gamma, indices[:n_active], n_active
151
+
152
+
153
+ def _gram_omp(
154
+ Gram,
155
+ Xy,
156
+ n_nonzero_coefs,
157
+ tol_0=None,
158
+ tol=None,
159
+ copy_Gram=True,
160
+ copy_Xy=True,
161
+ return_path=False,
162
+ ):
163
+ """Orthogonal Matching Pursuit step on a precomputed Gram matrix.
164
+
165
+ This function uses the Cholesky decomposition method.
166
+
167
+ Parameters
168
+ ----------
169
+ Gram : ndarray of shape (n_features, n_features)
170
+ Gram matrix of the input data matrix.
171
+
172
+ Xy : ndarray of shape (n_features,)
173
+ Input targets.
174
+
175
+ n_nonzero_coefs : int
176
+ Targeted number of non-zero elements.
177
+
178
+ tol_0 : float, default=None
179
+ Squared norm of y, required if tol is not None.
180
+
181
+ tol : float, default=None
182
+ Targeted squared error, if not None overrides n_nonzero_coefs.
183
+
184
+ copy_Gram : bool, default=True
185
+ Whether the gram matrix must be copied by the algorithm. A false
186
+ value is only helpful if it is already Fortran-ordered, otherwise a
187
+ copy is made anyway.
188
+
189
+ copy_Xy : bool, default=True
190
+ Whether the covariance vector Xy must be copied by the algorithm.
191
+ If False, it may be overwritten.
192
+
193
+ return_path : bool, default=False
194
+ Whether to return every value of the nonzero coefficients along the
195
+ forward path. Useful for cross-validation.
196
+
197
+ Returns
198
+ -------
199
+ gamma : ndarray of shape (n_nonzero_coefs,)
200
+ Non-zero elements of the solution.
201
+
202
+ idx : ndarray of shape (n_nonzero_coefs,)
203
+ Indices of the positions of the elements in gamma within the solution
204
+ vector.
205
+
206
+ coefs : ndarray of shape (n_features, n_nonzero_coefs)
207
+ The first k values of column k correspond to the coefficient value
208
+ for the active features at that step. The lower left triangle contains
209
+ garbage. Only returned if ``return_path=True``.
210
+
211
+ n_active : int
212
+ Number of active features at convergence.
213
+ """
214
+ Gram = Gram.copy("F") if copy_Gram else np.asfortranarray(Gram)
215
+
216
+ if copy_Xy or not Xy.flags.writeable:
217
+ Xy = Xy.copy()
218
+
219
+ min_float = np.finfo(Gram.dtype).eps
220
+ nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (Gram,))
221
+ (potrs,) = get_lapack_funcs(("potrs",), (Gram,))
222
+
223
+ indices = np.arange(len(Gram)) # keeping track of swapping
224
+ alpha = Xy
225
+ tol_curr = tol_0
226
+ delta = 0
227
+ gamma = np.empty(0)
228
+ n_active = 0
229
+
230
+ max_features = len(Gram) if tol is not None else n_nonzero_coefs
231
+
232
+ L = np.empty((max_features, max_features), dtype=Gram.dtype)
233
+
234
+ L[0, 0] = 1.0
235
+ if return_path:
236
+ coefs = np.empty_like(L)
237
+
238
+ while True:
239
+ lam = np.argmax(np.abs(alpha))
240
+ if lam < n_active or alpha[lam] ** 2 < min_float:
241
+ # selected same atom twice, or inner product too small
242
+ warnings.warn(premature, RuntimeWarning, stacklevel=3)
243
+ break
244
+ if n_active > 0:
245
+ L[n_active, :n_active] = Gram[lam, :n_active]
246
+ linalg.solve_triangular(
247
+ L[:n_active, :n_active],
248
+ L[n_active, :n_active],
249
+ trans=0,
250
+ lower=1,
251
+ overwrite_b=True,
252
+ check_finite=False,
253
+ )
254
+ v = nrm2(L[n_active, :n_active]) ** 2
255
+ Lkk = Gram[lam, lam] - v
256
+ if Lkk <= min_float: # selected atoms are dependent
257
+ warnings.warn(premature, RuntimeWarning, stacklevel=3)
258
+ break
259
+ L[n_active, n_active] = sqrt(Lkk)
260
+ else:
261
+ L[0, 0] = sqrt(Gram[lam, lam])
262
+
263
+ Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
264
+ Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
265
+ indices[n_active], indices[lam] = indices[lam], indices[n_active]
266
+ Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
267
+ n_active += 1
268
+ # solves LL'x = X'y as a composition of two triangular systems
269
+ gamma, _ = potrs(
270
+ L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False
271
+ )
272
+ if return_path:
273
+ coefs[:n_active, n_active - 1] = gamma
274
+ beta = np.dot(Gram[:, :n_active], gamma)
275
+ alpha = Xy - beta
276
+ if tol is not None:
277
+ tol_curr += delta
278
+ delta = np.inner(gamma, beta[:n_active])
279
+ tol_curr -= delta
280
+ if abs(tol_curr) <= tol:
281
+ break
282
+ elif n_active == max_features:
283
+ break
284
+
285
+ if return_path:
286
+ return gamma, indices[:n_active], coefs[:, :n_active], n_active
287
+ else:
288
+ return gamma, indices[:n_active], n_active
289
+
290
+
291
+ @validate_params(
292
+ {
293
+ "X": ["array-like"],
294
+ "y": [np.ndarray],
295
+ "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
296
+ "tol": [Interval(Real, 0, None, closed="left"), None],
297
+ "precompute": ["boolean", StrOptions({"auto"})],
298
+ "copy_X": ["boolean"],
299
+ "return_path": ["boolean"],
300
+ "return_n_iter": ["boolean"],
301
+ },
302
+ prefer_skip_nested_validation=True,
303
+ )
304
+ def orthogonal_mp(
305
+ X,
306
+ y,
307
+ *,
308
+ n_nonzero_coefs=None,
309
+ tol=None,
310
+ precompute=False,
311
+ copy_X=True,
312
+ return_path=False,
313
+ return_n_iter=False,
314
+ ):
315
+ r"""Orthogonal Matching Pursuit (OMP).
316
+
317
+ Solves n_targets Orthogonal Matching Pursuit problems.
318
+ An instance of the problem has the form:
319
+
320
+ When parametrized by the number of non-zero coefficients using
321
+ `n_nonzero_coefs`:
322
+ argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
323
+
324
+ When parametrized by error using the parameter `tol`:
325
+ argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
326
+
327
+ Read more in the :ref:`User Guide <omp>`.
328
+
329
+ Parameters
330
+ ----------
331
+ X : array-like of shape (n_samples, n_features)
332
+ Input data. Columns are assumed to have unit norm.
333
+
334
+ y : ndarray of shape (n_samples,) or (n_samples, n_targets)
335
+ Input targets.
336
+
337
+ n_nonzero_coefs : int, default=None
338
+ Desired number of non-zero entries in the solution. If None (by
339
+ default) this value is set to 10% of n_features.
340
+
341
+ tol : float, default=None
342
+ Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs.
343
+
344
+ precompute : 'auto' or bool, default=False
345
+ Whether to perform precomputations. Improves performance when n_targets
346
+ or n_samples is very large.
347
+
348
+ copy_X : bool, default=True
349
+ Whether the design matrix X must be copied by the algorithm. A false
350
+ value is only helpful if X is already Fortran-ordered, otherwise a
351
+ copy is made anyway.
352
+
353
+ return_path : bool, default=False
354
+ Whether to return every value of the nonzero coefficients along the
355
+ forward path. Useful for cross-validation.
356
+
357
+ return_n_iter : bool, default=False
358
+ Whether or not to return the number of iterations.
359
+
360
+ Returns
361
+ -------
362
+ coef : ndarray of shape (n_features,) or (n_features, n_targets)
363
+ Coefficients of the OMP solution. If `return_path=True`, this contains
364
+ the whole coefficient path. In this case its shape is
365
+ (n_features, n_features) or (n_features, n_targets, n_features) and
366
+ iterating over the last axis generates coefficients in increasing order
367
+ of active features.
368
+
369
+ n_iters : array-like or int
370
+ Number of active features across every target. Returned only if
371
+ `return_n_iter` is set to True.
372
+
373
+ See Also
374
+ --------
375
+ OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model.
376
+ orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y.
377
+ lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
378
+ sklearn.decomposition.sparse_encode : Sparse coding.
379
+
380
+ Notes
381
+ -----
382
+ Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
383
+ Matching pursuits with time-frequency dictionaries, IEEE Transactions on
384
+ Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
385
+ (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
386
+
387
+ This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
388
+ M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
389
+ Matching Pursuit Technical Report - CS Technion, April 2008.
390
+ https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
391
+ """
392
+ X = check_array(X, order="F", copy=copy_X)
393
+ copy_X = False
394
+ if y.ndim == 1:
395
+ y = y.reshape(-1, 1)
396
+ y = check_array(y)
397
+ if y.shape[1] > 1: # subsequent targets will be affected
398
+ copy_X = True
399
+ if n_nonzero_coefs is None and tol is None:
400
+ # default for n_nonzero_coefs is 0.1 * n_features
401
+ # but at least one.
402
+ n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
403
+ if tol is None and n_nonzero_coefs > X.shape[1]:
404
+ raise ValueError(
405
+ "The number of atoms cannot be more than the number of features"
406
+ )
407
+ if precompute == "auto":
408
+ precompute = X.shape[0] > X.shape[1]
409
+ if precompute:
410
+ G = np.dot(X.T, X)
411
+ G = np.asfortranarray(G)
412
+ Xy = np.dot(X.T, y)
413
+ if tol is not None:
414
+ norms_squared = np.sum((y**2), axis=0)
415
+ else:
416
+ norms_squared = None
417
+ return orthogonal_mp_gram(
418
+ G,
419
+ Xy,
420
+ n_nonzero_coefs=n_nonzero_coefs,
421
+ tol=tol,
422
+ norms_squared=norms_squared,
423
+ copy_Gram=copy_X,
424
+ copy_Xy=False,
425
+ return_path=return_path,
426
+ )
427
+
428
+ if return_path:
429
+ coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
430
+ else:
431
+ coef = np.zeros((X.shape[1], y.shape[1]))
432
+ n_iters = []
433
+
434
+ for k in range(y.shape[1]):
435
+ out = _cholesky_omp(
436
+ X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path
437
+ )
438
+ if return_path:
439
+ _, idx, coefs, n_iter = out
440
+ coef = coef[:, :, : len(idx)]
441
+ for n_active, x in enumerate(coefs.T):
442
+ coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]
443
+ else:
444
+ x, idx, n_iter = out
445
+ coef[idx, k] = x
446
+ n_iters.append(n_iter)
447
+
448
+ if y.shape[1] == 1:
449
+ n_iters = n_iters[0]
450
+
451
+ if return_n_iter:
452
+ return np.squeeze(coef), n_iters
453
+ else:
454
+ return np.squeeze(coef)
455
+
456
+
457
+ @validate_params(
458
+ {
459
+ "Gram": ["array-like"],
460
+ "Xy": ["array-like"],
461
+ "n_nonzero_coefs": [Interval(Integral, 0, None, closed="neither"), None],
462
+ "tol": [Interval(Real, 0, None, closed="left"), None],
463
+ "norms_squared": ["array-like", None],
464
+ "copy_Gram": ["boolean"],
465
+ "copy_Xy": ["boolean"],
466
+ "return_path": ["boolean"],
467
+ "return_n_iter": ["boolean"],
468
+ },
469
+ prefer_skip_nested_validation=True,
470
+ )
471
+ def orthogonal_mp_gram(
472
+ Gram,
473
+ Xy,
474
+ *,
475
+ n_nonzero_coefs=None,
476
+ tol=None,
477
+ norms_squared=None,
478
+ copy_Gram=True,
479
+ copy_Xy=True,
480
+ return_path=False,
481
+ return_n_iter=False,
482
+ ):
483
+ """Gram Orthogonal Matching Pursuit (OMP).
484
+
485
+ Solves n_targets Orthogonal Matching Pursuit problems using only
486
+ the Gram matrix X.T * X and the product X.T * y.
487
+
488
+ Read more in the :ref:`User Guide <omp>`.
489
+
490
+ Parameters
491
+ ----------
492
+ Gram : array-like of shape (n_features, n_features)
493
+ Gram matrix of the input data: `X.T * X`.
494
+
495
+ Xy : array-like of shape (n_features,) or (n_features, n_targets)
496
+ Input targets multiplied by `X`: `X.T * y`.
497
+
498
+ n_nonzero_coefs : int, default=None
499
+ Desired number of non-zero entries in the solution. If `None` (by
500
+ default) this value is set to 10% of n_features.
501
+
502
+ tol : float, default=None
503
+ Maximum squared norm of the residual. If not `None`,
504
+ overrides `n_nonzero_coefs`.
505
+
506
+ norms_squared : array-like of shape (n_targets,), default=None
507
+ Squared L2 norms of the lines of `y`. Required if `tol` is not None.
508
+
509
+ copy_Gram : bool, default=True
510
+ Whether the gram matrix must be copied by the algorithm. A `False`
511
+ value is only helpful if it is already Fortran-ordered, otherwise a
512
+ copy is made anyway.
513
+
514
+ copy_Xy : bool, default=True
515
+ Whether the covariance vector `Xy` must be copied by the algorithm.
516
+ If `False`, it may be overwritten.
517
+
518
+ return_path : bool, default=False
519
+ Whether to return every value of the nonzero coefficients along the
520
+ forward path. Useful for cross-validation.
521
+
522
+ return_n_iter : bool, default=False
523
+ Whether or not to return the number of iterations.
524
+
525
+ Returns
526
+ -------
527
+ coef : ndarray of shape (n_features,) or (n_features, n_targets)
528
+ Coefficients of the OMP solution. If `return_path=True`, this contains
529
+ the whole coefficient path. In this case its shape is
530
+ `(n_features, n_features)` or `(n_features, n_targets, n_features)` and
531
+ iterating over the last axis yields coefficients in increasing order
532
+ of active features.
533
+
534
+ n_iters : list or int
535
+ Number of active features across every target. Returned only if
536
+ `return_n_iter` is set to True.
537
+
538
+ See Also
539
+ --------
540
+ OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
541
+ orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
542
+ lars_path : Compute Least Angle Regression or Lasso path using
543
+ LARS algorithm.
544
+ sklearn.decomposition.sparse_encode : Generic sparse coding.
545
+ Each column of the result is the solution to a Lasso problem.
546
+
547
+ Notes
548
+ -----
549
+ Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
550
+ Matching pursuits with time-frequency dictionaries, IEEE Transactions on
551
+ Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
552
+ (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
553
+
554
+ This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
555
+ M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
556
+ Matching Pursuit Technical Report - CS Technion, April 2008.
557
+ https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
558
+ """
559
+ Gram = check_array(Gram, order="F", copy=copy_Gram)
560
+ Xy = np.asarray(Xy)
561
+ if Xy.ndim > 1 and Xy.shape[1] > 1:
562
+ # or subsequent target will be affected
563
+ copy_Gram = True
564
+ if Xy.ndim == 1:
565
+ Xy = Xy[:, np.newaxis]
566
+ if tol is not None:
567
+ norms_squared = [norms_squared]
568
+ if copy_Xy or not Xy.flags.writeable:
569
+ # Make the copy once instead of many times in _gram_omp itself.
570
+ Xy = Xy.copy()
571
+
572
+ if n_nonzero_coefs is None and tol is None:
573
+ n_nonzero_coefs = int(0.1 * len(Gram))
574
+ if tol is not None and norms_squared is None:
575
+ raise ValueError(
576
+ "Gram OMP needs the precomputed norms in order "
577
+ "to evaluate the error sum of squares."
578
+ )
579
+ if tol is not None and tol < 0:
580
+ raise ValueError("Epsilon cannot be negative")
581
+ if tol is None and n_nonzero_coefs <= 0:
582
+ raise ValueError("The number of atoms must be positive")
583
+ if tol is None and n_nonzero_coefs > len(Gram):
584
+ raise ValueError(
585
+ "The number of atoms cannot be more than the number of features"
586
+ )
587
+
588
+ if return_path:
589
+ coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
590
+ else:
591
+ coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
592
+
593
+ n_iters = []
594
+ for k in range(Xy.shape[1]):
595
+ out = _gram_omp(
596
+ Gram,
597
+ Xy[:, k],
598
+ n_nonzero_coefs,
599
+ norms_squared[k] if tol is not None else None,
600
+ tol,
601
+ copy_Gram=copy_Gram,
602
+ copy_Xy=False,
603
+ return_path=return_path,
604
+ )
605
+ if return_path:
606
+ _, idx, coefs, n_iter = out
607
+ coef = coef[:, :, : len(idx)]
608
+ for n_active, x in enumerate(coefs.T):
609
+ coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]
610
+ else:
611
+ x, idx, n_iter = out
612
+ coef[idx, k] = x
613
+ n_iters.append(n_iter)
614
+
615
+ if Xy.shape[1] == 1:
616
+ n_iters = n_iters[0]
617
+
618
+ if return_n_iter:
619
+ return np.squeeze(coef), n_iters
620
+ else:
621
+ return np.squeeze(coef)
622
+
623
+
624
+ class OrthogonalMatchingPursuit(MultiOutputMixin, RegressorMixin, LinearModel):
625
+ """Orthogonal Matching Pursuit model (OMP).
626
+
627
+ Read more in the :ref:`User Guide <omp>`.
628
+
629
+ Parameters
630
+ ----------
631
+ n_nonzero_coefs : int, default=None
632
+ Desired number of non-zero entries in the solution. If None (by
633
+ default) this value is set to 10% of n_features.
634
+
635
+ tol : float, default=None
636
+ Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs.
637
+
638
+ fit_intercept : bool, default=True
639
+ Whether to calculate the intercept for this model. If set
640
+ to false, no intercept will be used in calculations
641
+ (i.e. data is expected to be centered).
642
+
643
+ precompute : 'auto' or bool, default='auto'
644
+ Whether to use a precomputed Gram and Xy matrix to speed up
645
+ calculations. Improves performance when :term:`n_targets` or
646
+ :term:`n_samples` is very large. Note that if you already have such
647
+ matrices, you can pass them directly to the fit method.
648
+
649
+ Attributes
650
+ ----------
651
+ coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
652
+ Parameter vector (w in the formula).
653
+
654
+ intercept_ : float or ndarray of shape (n_targets,)
655
+ Independent term in decision function.
656
+
657
+ n_iter_ : int or array-like
658
+ Number of active features across every target.
659
+
660
+ n_nonzero_coefs_ : int
661
+ The number of non-zero coefficients in the solution. If
662
+ `n_nonzero_coefs` is None and `tol` is None this value is either set
663
+ to 10% of `n_features` or 1, whichever is greater.
664
+
665
+ n_features_in_ : int
666
+ Number of features seen during :term:`fit`.
667
+
668
+ .. versionadded:: 0.24
669
+
670
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
671
+ Names of features seen during :term:`fit`. Defined only when `X`
672
+ has feature names that are all strings.
673
+
674
+ .. versionadded:: 1.0
675
+
676
+ See Also
677
+ --------
678
+ orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
679
+ orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit
680
+ problems using only the Gram matrix X.T * X and the product X.T * y.
681
+ lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
682
+ Lars : Least Angle Regression model a.k.a. LAR.
683
+ LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
684
+ sklearn.decomposition.sparse_encode : Generic sparse coding.
685
+ Each column of the result is the solution to a Lasso problem.
686
+ OrthogonalMatchingPursuitCV : Cross-validated
687
+ Orthogonal Matching Pursuit model (OMP).
688
+
689
+ Notes
690
+ -----
691
+ Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
692
+ Matching pursuits with time-frequency dictionaries, IEEE Transactions on
693
+ Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
694
+ (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
695
+
696
+ This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
697
+ M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
698
+ Matching Pursuit Technical Report - CS Technion, April 2008.
699
+ https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
700
+
701
+ Examples
702
+ --------
703
+ >>> from sklearn.linear_model import OrthogonalMatchingPursuit
704
+ >>> from sklearn.datasets import make_regression
705
+ >>> X, y = make_regression(noise=4, random_state=0)
706
+ >>> reg = OrthogonalMatchingPursuit().fit(X, y)
707
+ >>> reg.score(X, y)
708
+ 0.9991...
709
+ >>> reg.predict(X[:1,])
710
+ array([-78.3854...])
711
+ """
712
+
713
+ _parameter_constraints: dict = {
714
+ "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
715
+ "tol": [Interval(Real, 0, None, closed="left"), None],
716
+ "fit_intercept": ["boolean"],
717
+ "precompute": [StrOptions({"auto"}), "boolean"],
718
+ }
719
+
720
+ def __init__(
721
+ self,
722
+ *,
723
+ n_nonzero_coefs=None,
724
+ tol=None,
725
+ fit_intercept=True,
726
+ precompute="auto",
727
+ ):
728
+ self.n_nonzero_coefs = n_nonzero_coefs
729
+ self.tol = tol
730
+ self.fit_intercept = fit_intercept
731
+ self.precompute = precompute
732
+
733
+ @_fit_context(prefer_skip_nested_validation=True)
734
+ def fit(self, X, y):
735
+ """Fit the model using X, y as training data.
736
+
737
+ Parameters
738
+ ----------
739
+ X : array-like of shape (n_samples, n_features)
740
+ Training data.
741
+
742
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
743
+ Target values. Will be cast to X's dtype if necessary.
744
+
745
+ Returns
746
+ -------
747
+ self : object
748
+ Returns an instance of self.
749
+ """
750
+ X, y = self._validate_data(X, y, multi_output=True, y_numeric=True)
751
+ n_features = X.shape[1]
752
+
753
+ X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit(
754
+ X, y, None, self.precompute, self.fit_intercept, copy=True
755
+ )
756
+
757
+ if y.ndim == 1:
758
+ y = y[:, np.newaxis]
759
+
760
+ if self.n_nonzero_coefs is None and self.tol is None:
761
+ # default for n_nonzero_coefs is 0.1 * n_features
762
+ # but at least one.
763
+ self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
764
+ else:
765
+ self.n_nonzero_coefs_ = self.n_nonzero_coefs
766
+
767
+ if Gram is False:
768
+ coef_, self.n_iter_ = orthogonal_mp(
769
+ X,
770
+ y,
771
+ n_nonzero_coefs=self.n_nonzero_coefs_,
772
+ tol=self.tol,
773
+ precompute=False,
774
+ copy_X=True,
775
+ return_n_iter=True,
776
+ )
777
+ else:
778
+ norms_sq = np.sum(y**2, axis=0) if self.tol is not None else None
779
+
780
+ coef_, self.n_iter_ = orthogonal_mp_gram(
781
+ Gram,
782
+ Xy=Xy,
783
+ n_nonzero_coefs=self.n_nonzero_coefs_,
784
+ tol=self.tol,
785
+ norms_squared=norms_sq,
786
+ copy_Gram=True,
787
+ copy_Xy=True,
788
+ return_n_iter=True,
789
+ )
790
+ self.coef_ = coef_.T
791
+ self._set_intercept(X_offset, y_offset, X_scale)
792
+ return self
793
+
794
+
795
+ def _omp_path_residues(
796
+ X_train,
797
+ y_train,
798
+ X_test,
799
+ y_test,
800
+ copy=True,
801
+ fit_intercept=True,
802
+ max_iter=100,
803
+ ):
804
+ """Compute the residues on left-out data for a full LARS path.
805
+
806
+ Parameters
807
+ ----------
808
+ X_train : ndarray of shape (n_samples, n_features)
809
+ The data to fit the LARS on.
810
+
811
+ y_train : ndarray of shape (n_samples)
812
+ The target variable to fit LARS on.
813
+
814
+ X_test : ndarray of shape (n_samples, n_features)
815
+ The data to compute the residues on.
816
+
817
+ y_test : ndarray of shape (n_samples)
818
+ The target variable to compute the residues on.
819
+
820
+ copy : bool, default=True
821
+ Whether X_train, X_test, y_train and y_test should be copied. If
822
+ False, they may be overwritten.
823
+
824
+ fit_intercept : bool, default=True
825
+ Whether to calculate the intercept for this model. If set
826
+ to false, no intercept will be used in calculations
827
+ (i.e. data is expected to be centered).
828
+
829
+ max_iter : int, default=100
830
+ Maximum numbers of iterations to perform, therefore maximum features
831
+ to include. 100 by default.
832
+
833
+ Returns
834
+ -------
835
+ residues : ndarray of shape (n_samples, max_features)
836
+ Residues of the prediction on the test data.
837
+ """
838
+
839
+ if copy:
840
+ X_train = X_train.copy()
841
+ y_train = y_train.copy()
842
+ X_test = X_test.copy()
843
+ y_test = y_test.copy()
844
+
845
+ if fit_intercept:
846
+ X_mean = X_train.mean(axis=0)
847
+ X_train -= X_mean
848
+ X_test -= X_mean
849
+ y_mean = y_train.mean(axis=0)
850
+ y_train = as_float_array(y_train, copy=False)
851
+ y_train -= y_mean
852
+ y_test = as_float_array(y_test, copy=False)
853
+ y_test -= y_mean
854
+
855
+ coefs = orthogonal_mp(
856
+ X_train,
857
+ y_train,
858
+ n_nonzero_coefs=max_iter,
859
+ tol=None,
860
+ precompute=False,
861
+ copy_X=False,
862
+ return_path=True,
863
+ )
864
+ if coefs.ndim == 1:
865
+ coefs = coefs[:, np.newaxis]
866
+
867
+ return np.dot(coefs.T, X_test.T) - y_test
868
+
869
+
870
+ class OrthogonalMatchingPursuitCV(RegressorMixin, LinearModel):
871
+ """Cross-validated Orthogonal Matching Pursuit model (OMP).
872
+
873
+ See glossary entry for :term:`cross-validation estimator`.
874
+
875
+ Read more in the :ref:`User Guide <omp>`.
876
+
877
+ Parameters
878
+ ----------
879
+ copy : bool, default=True
880
+ Whether the design matrix X must be copied by the algorithm. A false
881
+ value is only helpful if X is already Fortran-ordered, otherwise a
882
+ copy is made anyway.
883
+
884
+ fit_intercept : bool, default=True
885
+ Whether to calculate the intercept for this model. If set
886
+ to false, no intercept will be used in calculations
887
+ (i.e. data is expected to be centered).
888
+
889
+ max_iter : int, default=None
890
+ Maximum numbers of iterations to perform, therefore maximum features
891
+ to include. 10% of ``n_features`` but at least 5 if available.
892
+
893
+ cv : int, cross-validation generator or iterable, default=None
894
+ Determines the cross-validation splitting strategy.
895
+ Possible inputs for cv are:
896
+
897
+ - None, to use the default 5-fold cross-validation,
898
+ - integer, to specify the number of folds.
899
+ - :term:`CV splitter`,
900
+ - An iterable yielding (train, test) splits as arrays of indices.
901
+
902
+ For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
903
+
904
+ Refer :ref:`User Guide <cross_validation>` for the various
905
+ cross-validation strategies that can be used here.
906
+
907
+ .. versionchanged:: 0.22
908
+ ``cv`` default value if None changed from 3-fold to 5-fold.
909
+
910
+ n_jobs : int, default=None
911
+ Number of CPUs to use during the cross validation.
912
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
913
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
914
+ for more details.
915
+
916
+ verbose : bool or int, default=False
917
+ Sets the verbosity amount.
918
+
919
+ Attributes
920
+ ----------
921
+ intercept_ : float or ndarray of shape (n_targets,)
922
+ Independent term in decision function.
923
+
924
+ coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
925
+ Parameter vector (w in the problem formulation).
926
+
927
+ n_nonzero_coefs_ : int
928
+ Estimated number of non-zero coefficients giving the best mean squared
929
+ error over the cross-validation folds.
930
+
931
+ n_iter_ : int or array-like
932
+ Number of active features across every target for the model refit with
933
+ the best hyperparameters got by cross-validating across all folds.
934
+
935
+ n_features_in_ : int
936
+ Number of features seen during :term:`fit`.
937
+
938
+ .. versionadded:: 0.24
939
+
940
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
941
+ Names of features seen during :term:`fit`. Defined only when `X`
942
+ has feature names that are all strings.
943
+
944
+ .. versionadded:: 1.0
945
+
946
+ See Also
947
+ --------
948
+ orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
949
+ orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit
950
+ problems using only the Gram matrix X.T * X and the product X.T * y.
951
+ lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
952
+ Lars : Least Angle Regression model a.k.a. LAR.
953
+ LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
954
+ OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
955
+ LarsCV : Cross-validated Least Angle Regression model.
956
+ LassoLarsCV : Cross-validated Lasso model fit with Least Angle Regression.
957
+ sklearn.decomposition.sparse_encode : Generic sparse coding.
958
+ Each column of the result is the solution to a Lasso problem.
959
+
960
+ Notes
961
+ -----
962
+ In `fit`, once the optimal number of non-zero coefficients is found through
963
+ cross-validation, the model is fit again using the entire training set.
964
+
965
+ Examples
966
+ --------
967
+ >>> from sklearn.linear_model import OrthogonalMatchingPursuitCV
968
+ >>> from sklearn.datasets import make_regression
969
+ >>> X, y = make_regression(n_features=100, n_informative=10,
970
+ ... noise=4, random_state=0)
971
+ >>> reg = OrthogonalMatchingPursuitCV(cv=5).fit(X, y)
972
+ >>> reg.score(X, y)
973
+ 0.9991...
974
+ >>> reg.n_nonzero_coefs_
975
+ 10
976
+ >>> reg.predict(X[:1,])
977
+ array([-78.3854...])
978
+ """
979
+
980
+ _parameter_constraints: dict = {
981
+ "copy": ["boolean"],
982
+ "fit_intercept": ["boolean"],
983
+ "max_iter": [Interval(Integral, 0, None, closed="left"), None],
984
+ "cv": ["cv_object"],
985
+ "n_jobs": [Integral, None],
986
+ "verbose": ["verbose"],
987
+ }
988
+
989
+ def __init__(
990
+ self,
991
+ *,
992
+ copy=True,
993
+ fit_intercept=True,
994
+ max_iter=None,
995
+ cv=None,
996
+ n_jobs=None,
997
+ verbose=False,
998
+ ):
999
+ self.copy = copy
1000
+ self.fit_intercept = fit_intercept
1001
+ self.max_iter = max_iter
1002
+ self.cv = cv
1003
+ self.n_jobs = n_jobs
1004
+ self.verbose = verbose
1005
+
1006
+ @_fit_context(prefer_skip_nested_validation=True)
1007
+ def fit(self, X, y, **fit_params):
1008
+ """Fit the model using X, y as training data.
1009
+
1010
+ Parameters
1011
+ ----------
1012
+ X : array-like of shape (n_samples, n_features)
1013
+ Training data.
1014
+
1015
+ y : array-like of shape (n_samples,)
1016
+ Target values. Will be cast to X's dtype if necessary.
1017
+
1018
+ **fit_params : dict
1019
+ Parameters to pass to the underlying splitter.
1020
+
1021
+ .. versionadded:: 1.4
1022
+ Only available if `enable_metadata_routing=True`,
1023
+ which can be set by using
1024
+ ``sklearn.set_config(enable_metadata_routing=True)``.
1025
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
1026
+ more details.
1027
+
1028
+ Returns
1029
+ -------
1030
+ self : object
1031
+ Returns an instance of self.
1032
+ """
1033
+ _raise_for_params(fit_params, self, "fit")
1034
+
1035
+ X, y = self._validate_data(X, y, y_numeric=True, ensure_min_features=2)
1036
+ X = as_float_array(X, copy=False, force_all_finite=False)
1037
+ cv = check_cv(self.cv, classifier=False)
1038
+ if _routing_enabled():
1039
+ routed_params = process_routing(self, "fit", **fit_params)
1040
+ else:
1041
+ # TODO(SLEP6): remove when metadata routing cannot be disabled.
1042
+ routed_params = Bunch()
1043
+ routed_params.splitter = Bunch(split={})
1044
+ max_iter = (
1045
+ min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
1046
+ if not self.max_iter
1047
+ else self.max_iter
1048
+ )
1049
+ cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
1050
+ delayed(_omp_path_residues)(
1051
+ X[train],
1052
+ y[train],
1053
+ X[test],
1054
+ y[test],
1055
+ self.copy,
1056
+ self.fit_intercept,
1057
+ max_iter,
1058
+ )
1059
+ for train, test in cv.split(X, **routed_params.splitter.split)
1060
+ )
1061
+
1062
+ min_early_stop = min(fold.shape[0] for fold in cv_paths)
1063
+ mse_folds = np.array(
1064
+ [(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]
1065
+ )
1066
+ best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
1067
+ self.n_nonzero_coefs_ = best_n_nonzero_coefs
1068
+ omp = OrthogonalMatchingPursuit(
1069
+ n_nonzero_coefs=best_n_nonzero_coefs,
1070
+ fit_intercept=self.fit_intercept,
1071
+ ).fit(X, y)
1072
+
1073
+ self.coef_ = omp.coef_
1074
+ self.intercept_ = omp.intercept_
1075
+ self.n_iter_ = omp.n_iter_
1076
+ return self
1077
+
1078
+ def get_metadata_routing(self):
1079
+ """Get metadata routing of this object.
1080
+
1081
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1082
+ mechanism works.
1083
+
1084
+ .. versionadded:: 1.4
1085
+
1086
+ Returns
1087
+ -------
1088
+ routing : MetadataRouter
1089
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
1090
+ routing information.
1091
+ """
1092
+
1093
+ router = MetadataRouter(owner=self.__class__.__name__).add(
1094
+ splitter=self.cv,
1095
+ method_mapping=MethodMapping().add(callee="split", caller="fit"),
1096
+ )
1097
+ return router
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Rob Zinkov, Mathieu Blondel
2
+ # License: BSD 3 clause
3
+ from numbers import Real
4
+
5
+ from ..base import _fit_context
6
+ from ..utils._param_validation import Interval, StrOptions
7
+ from ._stochastic_gradient import DEFAULT_EPSILON, BaseSGDClassifier, BaseSGDRegressor
8
+
9
+
10
+ class PassiveAggressiveClassifier(BaseSGDClassifier):
11
+ """Passive Aggressive Classifier.
12
+
13
+ Read more in the :ref:`User Guide <passive_aggressive>`.
14
+
15
+ Parameters
16
+ ----------
17
+ C : float, default=1.0
18
+ Maximum step size (regularization). Defaults to 1.0.
19
+
20
+ fit_intercept : bool, default=True
21
+ Whether the intercept should be estimated or not. If False, the
22
+ data is assumed to be already centered.
23
+
24
+ max_iter : int, default=1000
25
+ The maximum number of passes over the training data (aka epochs).
26
+ It only impacts the behavior in the ``fit`` method, and not the
27
+ :meth:`~sklearn.linear_model.PassiveAggressiveClassifier.partial_fit` method.
28
+
29
+ .. versionadded:: 0.19
30
+
31
+ tol : float or None, default=1e-3
32
+ The stopping criterion. If it is not None, the iterations will stop
33
+ when (loss > previous_loss - tol).
34
+
35
+ .. versionadded:: 0.19
36
+
37
+ early_stopping : bool, default=False
38
+ Whether to use early stopping to terminate training when validation
39
+ score is not improving. If set to True, it will automatically set aside
40
+ a stratified fraction of training data as validation and terminate
41
+ training when validation score is not improving by at least `tol` for
42
+ `n_iter_no_change` consecutive epochs.
43
+
44
+ .. versionadded:: 0.20
45
+
46
+ validation_fraction : float, default=0.1
47
+ The proportion of training data to set aside as validation set for
48
+ early stopping. Must be between 0 and 1.
49
+ Only used if early_stopping is True.
50
+
51
+ .. versionadded:: 0.20
52
+
53
+ n_iter_no_change : int, default=5
54
+ Number of iterations with no improvement to wait before early stopping.
55
+
56
+ .. versionadded:: 0.20
57
+
58
+ shuffle : bool, default=True
59
+ Whether or not the training data should be shuffled after each epoch.
60
+
61
+ verbose : int, default=0
62
+ The verbosity level.
63
+
64
+ loss : str, default="hinge"
65
+ The loss function to be used:
66
+ hinge: equivalent to PA-I in the reference paper.
67
+ squared_hinge: equivalent to PA-II in the reference paper.
68
+
69
+ n_jobs : int or None, default=None
70
+ The number of CPUs to use to do the OVA (One Versus All, for
71
+ multi-class problems) computation.
72
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
73
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
74
+ for more details.
75
+
76
+ random_state : int, RandomState instance, default=None
77
+ Used to shuffle the training data, when ``shuffle`` is set to
78
+ ``True``. Pass an int for reproducible output across multiple
79
+ function calls.
80
+ See :term:`Glossary <random_state>`.
81
+
82
+ warm_start : bool, default=False
83
+ When set to True, reuse the solution of the previous call to fit as
84
+ initialization, otherwise, just erase the previous solution.
85
+ See :term:`the Glossary <warm_start>`.
86
+
87
+ Repeatedly calling fit or partial_fit when warm_start is True can
88
+ result in a different solution than when calling fit a single time
89
+ because of the way the data is shuffled.
90
+
91
+ class_weight : dict, {class_label: weight} or "balanced" or None, \
92
+ default=None
93
+ Preset for the class_weight fit parameter.
94
+
95
+ Weights associated with classes. If not given, all classes
96
+ are supposed to have weight one.
97
+
98
+ The "balanced" mode uses the values of y to automatically adjust
99
+ weights inversely proportional to class frequencies in the input data
100
+ as ``n_samples / (n_classes * np.bincount(y))``.
101
+
102
+ .. versionadded:: 0.17
103
+ parameter *class_weight* to automatically weight samples.
104
+
105
+ average : bool or int, default=False
106
+ When set to True, computes the averaged SGD weights and stores the
107
+ result in the ``coef_`` attribute. If set to an int greater than 1,
108
+ averaging will begin once the total number of samples seen reaches
109
+ average. So average=10 will begin averaging after seeing 10 samples.
110
+
111
+ .. versionadded:: 0.19
112
+ parameter *average* to use weights averaging in SGD.
113
+
114
+ Attributes
115
+ ----------
116
+ coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
117
+ (n_classes, n_features)
118
+ Weights assigned to the features.
119
+
120
+ intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
121
+ Constants in decision function.
122
+
123
+ n_features_in_ : int
124
+ Number of features seen during :term:`fit`.
125
+
126
+ .. versionadded:: 0.24
127
+
128
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
129
+ Names of features seen during :term:`fit`. Defined only when `X`
130
+ has feature names that are all strings.
131
+
132
+ .. versionadded:: 1.0
133
+
134
+ n_iter_ : int
135
+ The actual number of iterations to reach the stopping criterion.
136
+ For multiclass fits, it is the maximum over every binary fit.
137
+
138
+ classes_ : ndarray of shape (n_classes,)
139
+ The unique classes labels.
140
+
141
+ t_ : int
142
+ Number of weight updates performed during training.
143
+ Same as ``(n_iter_ * n_samples + 1)``.
144
+
145
+ loss_function_ : callable
146
+ Loss function used by the algorithm.
147
+
148
+ See Also
149
+ --------
150
+ SGDClassifier : Incrementally trained logistic regression.
151
+ Perceptron : Linear perceptron classifier.
152
+
153
+ References
154
+ ----------
155
+ Online Passive-Aggressive Algorithms
156
+ <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
157
+ K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
158
+
159
+ Examples
160
+ --------
161
+ >>> from sklearn.linear_model import PassiveAggressiveClassifier
162
+ >>> from sklearn.datasets import make_classification
163
+ >>> X, y = make_classification(n_features=4, random_state=0)
164
+ >>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0,
165
+ ... tol=1e-3)
166
+ >>> clf.fit(X, y)
167
+ PassiveAggressiveClassifier(random_state=0)
168
+ >>> print(clf.coef_)
169
+ [[0.26642044 0.45070924 0.67251877 0.64185414]]
170
+ >>> print(clf.intercept_)
171
+ [1.84127814]
172
+ >>> print(clf.predict([[0, 0, 0, 0]]))
173
+ [1]
174
+ """
175
+
176
+ _parameter_constraints: dict = {
177
+ **BaseSGDClassifier._parameter_constraints,
178
+ "loss": [StrOptions({"hinge", "squared_hinge"})],
179
+ "C": [Interval(Real, 0, None, closed="right")],
180
+ }
181
+
182
+ def __init__(
183
+ self,
184
+ *,
185
+ C=1.0,
186
+ fit_intercept=True,
187
+ max_iter=1000,
188
+ tol=1e-3,
189
+ early_stopping=False,
190
+ validation_fraction=0.1,
191
+ n_iter_no_change=5,
192
+ shuffle=True,
193
+ verbose=0,
194
+ loss="hinge",
195
+ n_jobs=None,
196
+ random_state=None,
197
+ warm_start=False,
198
+ class_weight=None,
199
+ average=False,
200
+ ):
201
+ super().__init__(
202
+ penalty=None,
203
+ fit_intercept=fit_intercept,
204
+ max_iter=max_iter,
205
+ tol=tol,
206
+ early_stopping=early_stopping,
207
+ validation_fraction=validation_fraction,
208
+ n_iter_no_change=n_iter_no_change,
209
+ shuffle=shuffle,
210
+ verbose=verbose,
211
+ random_state=random_state,
212
+ eta0=1.0,
213
+ warm_start=warm_start,
214
+ class_weight=class_weight,
215
+ average=average,
216
+ n_jobs=n_jobs,
217
+ )
218
+
219
+ self.C = C
220
+ self.loss = loss
221
+
222
+ @_fit_context(prefer_skip_nested_validation=True)
223
+ def partial_fit(self, X, y, classes=None):
224
+ """Fit linear model with Passive Aggressive algorithm.
225
+
226
+ Parameters
227
+ ----------
228
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
229
+ Subset of the training data.
230
+
231
+ y : array-like of shape (n_samples,)
232
+ Subset of the target values.
233
+
234
+ classes : ndarray of shape (n_classes,)
235
+ Classes across all calls to partial_fit.
236
+ Can be obtained by via `np.unique(y_all)`, where y_all is the
237
+ target vector of the entire dataset.
238
+ This argument is required for the first call to partial_fit
239
+ and can be omitted in the subsequent calls.
240
+ Note that y doesn't need to contain all labels in `classes`.
241
+
242
+ Returns
243
+ -------
244
+ self : object
245
+ Fitted estimator.
246
+ """
247
+ if not hasattr(self, "classes_"):
248
+ self._more_validate_params(for_partial_fit=True)
249
+
250
+ if self.class_weight == "balanced":
251
+ raise ValueError(
252
+ "class_weight 'balanced' is not supported for "
253
+ "partial_fit. For 'balanced' weights, use "
254
+ "`sklearn.utils.compute_class_weight` with "
255
+ "`class_weight='balanced'`. In place of y you "
256
+ "can use a large enough subset of the full "
257
+ "training set target to properly estimate the "
258
+ "class frequency distributions. Pass the "
259
+ "resulting weights as the class_weight "
260
+ "parameter."
261
+ )
262
+
263
+ lr = "pa1" if self.loss == "hinge" else "pa2"
264
+ return self._partial_fit(
265
+ X,
266
+ y,
267
+ alpha=1.0,
268
+ C=self.C,
269
+ loss="hinge",
270
+ learning_rate=lr,
271
+ max_iter=1,
272
+ classes=classes,
273
+ sample_weight=None,
274
+ coef_init=None,
275
+ intercept_init=None,
276
+ )
277
+
278
+ @_fit_context(prefer_skip_nested_validation=True)
279
+ def fit(self, X, y, coef_init=None, intercept_init=None):
280
+ """Fit linear model with Passive Aggressive algorithm.
281
+
282
+ Parameters
283
+ ----------
284
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
285
+ Training data.
286
+
287
+ y : array-like of shape (n_samples,)
288
+ Target values.
289
+
290
+ coef_init : ndarray of shape (n_classes, n_features)
291
+ The initial coefficients to warm-start the optimization.
292
+
293
+ intercept_init : ndarray of shape (n_classes,)
294
+ The initial intercept to warm-start the optimization.
295
+
296
+ Returns
297
+ -------
298
+ self : object
299
+ Fitted estimator.
300
+ """
301
+ self._more_validate_params()
302
+
303
+ lr = "pa1" if self.loss == "hinge" else "pa2"
304
+ return self._fit(
305
+ X,
306
+ y,
307
+ alpha=1.0,
308
+ C=self.C,
309
+ loss="hinge",
310
+ learning_rate=lr,
311
+ coef_init=coef_init,
312
+ intercept_init=intercept_init,
313
+ )
314
+
315
+
316
+ class PassiveAggressiveRegressor(BaseSGDRegressor):
317
+ """Passive Aggressive Regressor.
318
+
319
+ Read more in the :ref:`User Guide <passive_aggressive>`.
320
+
321
+ Parameters
322
+ ----------
323
+
324
+ C : float, default=1.0
325
+ Maximum step size (regularization). Defaults to 1.0.
326
+
327
+ fit_intercept : bool, default=True
328
+ Whether the intercept should be estimated or not. If False, the
329
+ data is assumed to be already centered. Defaults to True.
330
+
331
+ max_iter : int, default=1000
332
+ The maximum number of passes over the training data (aka epochs).
333
+ It only impacts the behavior in the ``fit`` method, and not the
334
+ :meth:`~sklearn.linear_model.PassiveAggressiveRegressor.partial_fit` method.
335
+
336
+ .. versionadded:: 0.19
337
+
338
+ tol : float or None, default=1e-3
339
+ The stopping criterion. If it is not None, the iterations will stop
340
+ when (loss > previous_loss - tol).
341
+
342
+ .. versionadded:: 0.19
343
+
344
+ early_stopping : bool, default=False
345
+ Whether to use early stopping to terminate training when validation.
346
+ score is not improving. If set to True, it will automatically set aside
347
+ a fraction of training data as validation and terminate
348
+ training when validation score is not improving by at least tol for
349
+ n_iter_no_change consecutive epochs.
350
+
351
+ .. versionadded:: 0.20
352
+
353
+ validation_fraction : float, default=0.1
354
+ The proportion of training data to set aside as validation set for
355
+ early stopping. Must be between 0 and 1.
356
+ Only used if early_stopping is True.
357
+
358
+ .. versionadded:: 0.20
359
+
360
+ n_iter_no_change : int, default=5
361
+ Number of iterations with no improvement to wait before early stopping.
362
+
363
+ .. versionadded:: 0.20
364
+
365
+ shuffle : bool, default=True
366
+ Whether or not the training data should be shuffled after each epoch.
367
+
368
+ verbose : int, default=0
369
+ The verbosity level.
370
+
371
+ loss : str, default="epsilon_insensitive"
372
+ The loss function to be used:
373
+ epsilon_insensitive: equivalent to PA-I in the reference paper.
374
+ squared_epsilon_insensitive: equivalent to PA-II in the reference
375
+ paper.
376
+
377
+ epsilon : float, default=0.1
378
+ If the difference between the current prediction and the correct label
379
+ is below this threshold, the model is not updated.
380
+
381
+ random_state : int, RandomState instance, default=None
382
+ Used to shuffle the training data, when ``shuffle`` is set to
383
+ ``True``. Pass an int for reproducible output across multiple
384
+ function calls.
385
+ See :term:`Glossary <random_state>`.
386
+
387
+ warm_start : bool, default=False
388
+ When set to True, reuse the solution of the previous call to fit as
389
+ initialization, otherwise, just erase the previous solution.
390
+ See :term:`the Glossary <warm_start>`.
391
+
392
+ Repeatedly calling fit or partial_fit when warm_start is True can
393
+ result in a different solution than when calling fit a single time
394
+ because of the way the data is shuffled.
395
+
396
+ average : bool or int, default=False
397
+ When set to True, computes the averaged SGD weights and stores the
398
+ result in the ``coef_`` attribute. If set to an int greater than 1,
399
+ averaging will begin once the total number of samples seen reaches
400
+ average. So average=10 will begin averaging after seeing 10 samples.
401
+
402
+ .. versionadded:: 0.19
403
+ parameter *average* to use weights averaging in SGD.
404
+
405
+ Attributes
406
+ ----------
407
+ coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
408
+ n_features]
409
+ Weights assigned to the features.
410
+
411
+ intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
412
+ Constants in decision function.
413
+
414
+ n_features_in_ : int
415
+ Number of features seen during :term:`fit`.
416
+
417
+ .. versionadded:: 0.24
418
+
419
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
420
+ Names of features seen during :term:`fit`. Defined only when `X`
421
+ has feature names that are all strings.
422
+
423
+ .. versionadded:: 1.0
424
+
425
+ n_iter_ : int
426
+ The actual number of iterations to reach the stopping criterion.
427
+
428
+ t_ : int
429
+ Number of weight updates performed during training.
430
+ Same as ``(n_iter_ * n_samples + 1)``.
431
+
432
+ See Also
433
+ --------
434
+ SGDRegressor : Linear model fitted by minimizing a regularized
435
+ empirical loss with SGD.
436
+
437
+ References
438
+ ----------
439
+ Online Passive-Aggressive Algorithms
440
+ <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
441
+ K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006).
442
+
443
+ Examples
444
+ --------
445
+ >>> from sklearn.linear_model import PassiveAggressiveRegressor
446
+ >>> from sklearn.datasets import make_regression
447
+
448
+ >>> X, y = make_regression(n_features=4, random_state=0)
449
+ >>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0,
450
+ ... tol=1e-3)
451
+ >>> regr.fit(X, y)
452
+ PassiveAggressiveRegressor(max_iter=100, random_state=0)
453
+ >>> print(regr.coef_)
454
+ [20.48736655 34.18818427 67.59122734 87.94731329]
455
+ >>> print(regr.intercept_)
456
+ [-0.02306214]
457
+ >>> print(regr.predict([[0, 0, 0, 0]]))
458
+ [-0.02306214]
459
+ """
460
+
461
+ _parameter_constraints: dict = {
462
+ **BaseSGDRegressor._parameter_constraints,
463
+ "loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})],
464
+ "C": [Interval(Real, 0, None, closed="right")],
465
+ "epsilon": [Interval(Real, 0, None, closed="left")],
466
+ }
467
+
468
+ def __init__(
469
+ self,
470
+ *,
471
+ C=1.0,
472
+ fit_intercept=True,
473
+ max_iter=1000,
474
+ tol=1e-3,
475
+ early_stopping=False,
476
+ validation_fraction=0.1,
477
+ n_iter_no_change=5,
478
+ shuffle=True,
479
+ verbose=0,
480
+ loss="epsilon_insensitive",
481
+ epsilon=DEFAULT_EPSILON,
482
+ random_state=None,
483
+ warm_start=False,
484
+ average=False,
485
+ ):
486
+ super().__init__(
487
+ penalty=None,
488
+ l1_ratio=0,
489
+ epsilon=epsilon,
490
+ eta0=1.0,
491
+ fit_intercept=fit_intercept,
492
+ max_iter=max_iter,
493
+ tol=tol,
494
+ early_stopping=early_stopping,
495
+ validation_fraction=validation_fraction,
496
+ n_iter_no_change=n_iter_no_change,
497
+ shuffle=shuffle,
498
+ verbose=verbose,
499
+ random_state=random_state,
500
+ warm_start=warm_start,
501
+ average=average,
502
+ )
503
+ self.C = C
504
+ self.loss = loss
505
+
506
+ @_fit_context(prefer_skip_nested_validation=True)
507
+ def partial_fit(self, X, y):
508
+ """Fit linear model with Passive Aggressive algorithm.
509
+
510
+ Parameters
511
+ ----------
512
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
513
+ Subset of training data.
514
+
515
+ y : numpy array of shape [n_samples]
516
+ Subset of target values.
517
+
518
+ Returns
519
+ -------
520
+ self : object
521
+ Fitted estimator.
522
+ """
523
+ if not hasattr(self, "coef_"):
524
+ self._more_validate_params(for_partial_fit=True)
525
+
526
+ lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
527
+ return self._partial_fit(
528
+ X,
529
+ y,
530
+ alpha=1.0,
531
+ C=self.C,
532
+ loss="epsilon_insensitive",
533
+ learning_rate=lr,
534
+ max_iter=1,
535
+ sample_weight=None,
536
+ coef_init=None,
537
+ intercept_init=None,
538
+ )
539
+
540
+ @_fit_context(prefer_skip_nested_validation=True)
541
+ def fit(self, X, y, coef_init=None, intercept_init=None):
542
+ """Fit linear model with Passive Aggressive algorithm.
543
+
544
+ Parameters
545
+ ----------
546
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
547
+ Training data.
548
+
549
+ y : numpy array of shape [n_samples]
550
+ Target values.
551
+
552
+ coef_init : array, shape = [n_features]
553
+ The initial coefficients to warm-start the optimization.
554
+
555
+ intercept_init : array, shape = [1]
556
+ The initial intercept to warm-start the optimization.
557
+
558
+ Returns
559
+ -------
560
+ self : object
561
+ Fitted estimator.
562
+ """
563
+ self._more_validate_params()
564
+
565
+ lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
566
+ return self._fit(
567
+ X,
568
+ y,
569
+ alpha=1.0,
570
+ C=self.C,
571
+ loss="epsilon_insensitive",
572
+ learning_rate=lr,
573
+ coef_init=coef_init,
574
+ intercept_init=intercept_init,
575
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Mathieu Blondel
2
+ # License: BSD 3 clause
3
+ from numbers import Real
4
+
5
+ from ..utils._param_validation import Interval, StrOptions
6
+ from ._stochastic_gradient import BaseSGDClassifier
7
+
8
+
9
+ class Perceptron(BaseSGDClassifier):
10
+ """Linear perceptron classifier.
11
+
12
+ The implementation is a wrapper around :class:`~sklearn.linear_model.SGDClassifier`
13
+ by fixing the `loss` and `learning_rate` parameters as::
14
+
15
+ SGDClassifier(loss="perceptron", learning_rate="constant")
16
+
17
+ Other available parameters are described below and are forwarded to
18
+ :class:`~sklearn.linear_model.SGDClassifier`.
19
+
20
+ Read more in the :ref:`User Guide <perceptron>`.
21
+
22
+ Parameters
23
+ ----------
24
+
25
+ penalty : {'l2','l1','elasticnet'}, default=None
26
+ The penalty (aka regularization term) to be used.
27
+
28
+ alpha : float, default=0.0001
29
+ Constant that multiplies the regularization term if regularization is
30
+ used.
31
+
32
+ l1_ratio : float, default=0.15
33
+ The Elastic Net mixing parameter, with `0 <= l1_ratio <= 1`.
34
+ `l1_ratio=0` corresponds to L2 penalty, `l1_ratio=1` to L1.
35
+ Only used if `penalty='elasticnet'`.
36
+
37
+ .. versionadded:: 0.24
38
+
39
+ fit_intercept : bool, default=True
40
+ Whether the intercept should be estimated or not. If False, the
41
+ data is assumed to be already centered.
42
+
43
+ max_iter : int, default=1000
44
+ The maximum number of passes over the training data (aka epochs).
45
+ It only impacts the behavior in the ``fit`` method, and not the
46
+ :meth:`partial_fit` method.
47
+
48
+ .. versionadded:: 0.19
49
+
50
+ tol : float or None, default=1e-3
51
+ The stopping criterion. If it is not None, the iterations will stop
52
+ when (loss > previous_loss - tol).
53
+
54
+ .. versionadded:: 0.19
55
+
56
+ shuffle : bool, default=True
57
+ Whether or not the training data should be shuffled after each epoch.
58
+
59
+ verbose : int, default=0
60
+ The verbosity level.
61
+
62
+ eta0 : float, default=1
63
+ Constant by which the updates are multiplied.
64
+
65
+ n_jobs : int, default=None
66
+ The number of CPUs to use to do the OVA (One Versus All, for
67
+ multi-class problems) computation.
68
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
69
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
70
+ for more details.
71
+
72
+ random_state : int, RandomState instance or None, default=0
73
+ Used to shuffle the training data, when ``shuffle`` is set to
74
+ ``True``. Pass an int for reproducible output across multiple
75
+ function calls.
76
+ See :term:`Glossary <random_state>`.
77
+
78
+ early_stopping : bool, default=False
79
+ Whether to use early stopping to terminate training when validation
80
+ score is not improving. If set to True, it will automatically set aside
81
+ a stratified fraction of training data as validation and terminate
82
+ training when validation score is not improving by at least `tol` for
83
+ `n_iter_no_change` consecutive epochs.
84
+
85
+ .. versionadded:: 0.20
86
+
87
+ validation_fraction : float, default=0.1
88
+ The proportion of training data to set aside as validation set for
89
+ early stopping. Must be between 0 and 1.
90
+ Only used if early_stopping is True.
91
+
92
+ .. versionadded:: 0.20
93
+
94
+ n_iter_no_change : int, default=5
95
+ Number of iterations with no improvement to wait before early stopping.
96
+
97
+ .. versionadded:: 0.20
98
+
99
+ class_weight : dict, {class_label: weight} or "balanced", default=None
100
+ Preset for the class_weight fit parameter.
101
+
102
+ Weights associated with classes. If not given, all classes
103
+ are supposed to have weight one.
104
+
105
+ The "balanced" mode uses the values of y to automatically adjust
106
+ weights inversely proportional to class frequencies in the input data
107
+ as ``n_samples / (n_classes * np.bincount(y))``.
108
+
109
+ warm_start : bool, default=False
110
+ When set to True, reuse the solution of the previous call to fit as
111
+ initialization, otherwise, just erase the previous solution. See
112
+ :term:`the Glossary <warm_start>`.
113
+
114
+ Attributes
115
+ ----------
116
+ classes_ : ndarray of shape (n_classes,)
117
+ The unique classes labels.
118
+
119
+ coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
120
+ (n_classes, n_features)
121
+ Weights assigned to the features.
122
+
123
+ intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
124
+ Constants in decision function.
125
+
126
+ loss_function_ : concrete LossFunction
127
+ The function that determines the loss, or difference between the
128
+ output of the algorithm and the target values.
129
+
130
+ n_features_in_ : int
131
+ Number of features seen during :term:`fit`.
132
+
133
+ .. versionadded:: 0.24
134
+
135
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
136
+ Names of features seen during :term:`fit`. Defined only when `X`
137
+ has feature names that are all strings.
138
+
139
+ .. versionadded:: 1.0
140
+
141
+ n_iter_ : int
142
+ The actual number of iterations to reach the stopping criterion.
143
+ For multiclass fits, it is the maximum over every binary fit.
144
+
145
+ t_ : int
146
+ Number of weight updates performed during training.
147
+ Same as ``(n_iter_ * n_samples + 1)``.
148
+
149
+ See Also
150
+ --------
151
+ sklearn.linear_model.SGDClassifier : Linear classifiers
152
+ (SVM, logistic regression, etc.) with SGD training.
153
+
154
+ Notes
155
+ -----
156
+ ``Perceptron`` is a classification algorithm which shares the same
157
+ underlying implementation with ``SGDClassifier``. In fact,
158
+ ``Perceptron()`` is equivalent to `SGDClassifier(loss="perceptron",
159
+ eta0=1, learning_rate="constant", penalty=None)`.
160
+
161
+ References
162
+ ----------
163
+ https://en.wikipedia.org/wiki/Perceptron and references therein.
164
+
165
+ Examples
166
+ --------
167
+ >>> from sklearn.datasets import load_digits
168
+ >>> from sklearn.linear_model import Perceptron
169
+ >>> X, y = load_digits(return_X_y=True)
170
+ >>> clf = Perceptron(tol=1e-3, random_state=0)
171
+ >>> clf.fit(X, y)
172
+ Perceptron()
173
+ >>> clf.score(X, y)
174
+ 0.939...
175
+ """
176
+
177
+ _parameter_constraints: dict = {**BaseSGDClassifier._parameter_constraints}
178
+ _parameter_constraints.pop("loss")
179
+ _parameter_constraints.pop("average")
180
+ _parameter_constraints.update(
181
+ {
182
+ "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None],
183
+ "alpha": [Interval(Real, 0, None, closed="left")],
184
+ "l1_ratio": [Interval(Real, 0, 1, closed="both")],
185
+ "eta0": [Interval(Real, 0, None, closed="left")],
186
+ }
187
+ )
188
+
189
+ def __init__(
190
+ self,
191
+ *,
192
+ penalty=None,
193
+ alpha=0.0001,
194
+ l1_ratio=0.15,
195
+ fit_intercept=True,
196
+ max_iter=1000,
197
+ tol=1e-3,
198
+ shuffle=True,
199
+ verbose=0,
200
+ eta0=1.0,
201
+ n_jobs=None,
202
+ random_state=0,
203
+ early_stopping=False,
204
+ validation_fraction=0.1,
205
+ n_iter_no_change=5,
206
+ class_weight=None,
207
+ warm_start=False,
208
+ ):
209
+ super().__init__(
210
+ loss="perceptron",
211
+ penalty=penalty,
212
+ alpha=alpha,
213
+ l1_ratio=l1_ratio,
214
+ fit_intercept=fit_intercept,
215
+ max_iter=max_iter,
216
+ tol=tol,
217
+ shuffle=shuffle,
218
+ verbose=verbose,
219
+ random_state=random_state,
220
+ learning_rate="constant",
221
+ eta0=eta0,
222
+ early_stopping=early_stopping,
223
+ validation_fraction=validation_fraction,
224
+ n_iter_no_change=n_iter_no_change,
225
+ power_t=0.5,
226
+ warm_start=warm_start,
227
+ class_weight=class_weight,
228
+ n_jobs=n_jobs,
229
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: David Dale <[email protected]>
2
+ # Christian Lorentzen <[email protected]>
3
+ # License: BSD 3 clause
4
+ import warnings
5
+ from numbers import Real
6
+
7
+ import numpy as np
8
+ from scipy import sparse
9
+ from scipy.optimize import linprog
10
+
11
+ from ..base import BaseEstimator, RegressorMixin, _fit_context
12
+ from ..exceptions import ConvergenceWarning
13
+ from ..utils import _safe_indexing
14
+ from ..utils._param_validation import Interval, StrOptions
15
+ from ..utils.fixes import parse_version, sp_version
16
+ from ..utils.validation import _check_sample_weight
17
+ from ._base import LinearModel
18
+
19
+
20
+ class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator):
21
+ """Linear regression model that predicts conditional quantiles.
22
+
23
+ The linear :class:`QuantileRegressor` optimizes the pinball loss for a
24
+ desired `quantile` and is robust to outliers.
25
+
26
+ This model uses an L1 regularization like
27
+ :class:`~sklearn.linear_model.Lasso`.
28
+
29
+ Read more in the :ref:`User Guide <quantile_regression>`.
30
+
31
+ .. versionadded:: 1.0
32
+
33
+ Parameters
34
+ ----------
35
+ quantile : float, default=0.5
36
+ The quantile that the model tries to predict. It must be strictly
37
+ between 0 and 1. If 0.5 (default), the model predicts the 50%
38
+ quantile, i.e. the median.
39
+
40
+ alpha : float, default=1.0
41
+ Regularization constant that multiplies the L1 penalty term.
42
+
43
+ fit_intercept : bool, default=True
44
+ Whether or not to fit the intercept.
45
+
46
+ solver : {'highs-ds', 'highs-ipm', 'highs', 'interior-point', \
47
+ 'revised simplex'}, default='highs'
48
+ Method used by :func:`scipy.optimize.linprog` to solve the linear
49
+ programming formulation.
50
+
51
+ From `scipy>=1.6.0`, it is recommended to use the highs methods because
52
+ they are the fastest ones. Solvers "highs-ds", "highs-ipm" and "highs"
53
+ support sparse input data and, in fact, always convert to sparse csc.
54
+
55
+ From `scipy>=1.11.0`, "interior-point" is not available anymore.
56
+
57
+ .. versionchanged:: 1.4
58
+ The default of `solver` changed to `"highs"` in version 1.4.
59
+
60
+ solver_options : dict, default=None
61
+ Additional parameters passed to :func:`scipy.optimize.linprog` as
62
+ options. If `None` and if `solver='interior-point'`, then
63
+ `{"lstsq": True}` is passed to :func:`scipy.optimize.linprog` for the
64
+ sake of stability.
65
+
66
+ Attributes
67
+ ----------
68
+ coef_ : array of shape (n_features,)
69
+ Estimated coefficients for the features.
70
+
71
+ intercept_ : float
72
+ The intercept of the model, aka bias term.
73
+
74
+ n_features_in_ : int
75
+ Number of features seen during :term:`fit`.
76
+
77
+ .. versionadded:: 0.24
78
+
79
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
80
+ Names of features seen during :term:`fit`. Defined only when `X`
81
+ has feature names that are all strings.
82
+
83
+ .. versionadded:: 1.0
84
+
85
+ n_iter_ : int
86
+ The actual number of iterations performed by the solver.
87
+
88
+ See Also
89
+ --------
90
+ Lasso : The Lasso is a linear model that estimates sparse coefficients
91
+ with l1 regularization.
92
+ HuberRegressor : Linear regression model that is robust to outliers.
93
+
94
+ Examples
95
+ --------
96
+ >>> from sklearn.linear_model import QuantileRegressor
97
+ >>> import numpy as np
98
+ >>> n_samples, n_features = 10, 2
99
+ >>> rng = np.random.RandomState(0)
100
+ >>> y = rng.randn(n_samples)
101
+ >>> X = rng.randn(n_samples, n_features)
102
+ >>> # the two following lines are optional in practice
103
+ >>> from sklearn.utils.fixes import sp_version, parse_version
104
+ >>> solver = "highs" if sp_version >= parse_version("1.6.0") else "interior-point"
105
+ >>> reg = QuantileRegressor(quantile=0.8, solver=solver).fit(X, y)
106
+ >>> np.mean(y <= reg.predict(X))
107
+ 0.8
108
+ """
109
+
110
+ _parameter_constraints: dict = {
111
+ "quantile": [Interval(Real, 0, 1, closed="neither")],
112
+ "alpha": [Interval(Real, 0, None, closed="left")],
113
+ "fit_intercept": ["boolean"],
114
+ "solver": [
115
+ StrOptions(
116
+ {
117
+ "highs-ds",
118
+ "highs-ipm",
119
+ "highs",
120
+ "interior-point",
121
+ "revised simplex",
122
+ }
123
+ ),
124
+ ],
125
+ "solver_options": [dict, None],
126
+ }
127
+
128
+ def __init__(
129
+ self,
130
+ *,
131
+ quantile=0.5,
132
+ alpha=1.0,
133
+ fit_intercept=True,
134
+ solver="highs",
135
+ solver_options=None,
136
+ ):
137
+ self.quantile = quantile
138
+ self.alpha = alpha
139
+ self.fit_intercept = fit_intercept
140
+ self.solver = solver
141
+ self.solver_options = solver_options
142
+
143
+ @_fit_context(prefer_skip_nested_validation=True)
144
+ def fit(self, X, y, sample_weight=None):
145
+ """Fit the model according to the given training data.
146
+
147
+ Parameters
148
+ ----------
149
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
150
+ Training data.
151
+
152
+ y : array-like of shape (n_samples,)
153
+ Target values.
154
+
155
+ sample_weight : array-like of shape (n_samples,), default=None
156
+ Sample weights.
157
+
158
+ Returns
159
+ -------
160
+ self : object
161
+ Returns self.
162
+ """
163
+ X, y = self._validate_data(
164
+ X,
165
+ y,
166
+ accept_sparse=["csc", "csr", "coo"],
167
+ y_numeric=True,
168
+ multi_output=False,
169
+ )
170
+ sample_weight = _check_sample_weight(sample_weight, X)
171
+
172
+ n_features = X.shape[1]
173
+ n_params = n_features
174
+
175
+ if self.fit_intercept:
176
+ n_params += 1
177
+ # Note that centering y and X with _preprocess_data does not work
178
+ # for quantile regression.
179
+
180
+ # The objective is defined as 1/n * sum(pinball loss) + alpha * L1.
181
+ # So we rescale the penalty term, which is equivalent.
182
+ alpha = np.sum(sample_weight) * self.alpha
183
+
184
+ if self.solver in (
185
+ "highs-ds",
186
+ "highs-ipm",
187
+ "highs",
188
+ ) and sp_version < parse_version("1.6.0"):
189
+ raise ValueError(
190
+ f"Solver {self.solver} is only available "
191
+ f"with scipy>=1.6.0, got {sp_version}"
192
+ )
193
+ else:
194
+ solver = self.solver
195
+
196
+ if solver == "interior-point" and sp_version >= parse_version("1.11.0"):
197
+ raise ValueError(
198
+ f"Solver {solver} is not anymore available in SciPy >= 1.11.0."
199
+ )
200
+
201
+ if sparse.issparse(X) and solver not in ["highs", "highs-ds", "highs-ipm"]:
202
+ raise ValueError(
203
+ f"Solver {self.solver} does not support sparse X. "
204
+ "Use solver 'highs' for example."
205
+ )
206
+ # make default solver more stable
207
+ if self.solver_options is None and solver == "interior-point":
208
+ solver_options = {"lstsq": True}
209
+ else:
210
+ solver_options = self.solver_options
211
+
212
+ # After rescaling alpha, the minimization problem is
213
+ # min sum(pinball loss) + alpha * L1
214
+ # Use linear programming formulation of quantile regression
215
+ # min_x c x
216
+ # A_eq x = b_eq
217
+ # 0 <= x
218
+ # x = (s0, s, t0, t, u, v) = slack variables >= 0
219
+ # intercept = s0 - t0
220
+ # coef = s - t
221
+ # c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n)
222
+ # residual = y - X@coef - intercept = u - v
223
+ # A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n))
224
+ # b_eq = y
225
+ # p = n_features
226
+ # n = n_samples
227
+ # 1_n = vector of length n with entries equal one
228
+ # see https://stats.stackexchange.com/questions/384909/
229
+ #
230
+ # Filtering out zero sample weights from the beginning makes life
231
+ # easier for the linprog solver.
232
+ indices = np.nonzero(sample_weight)[0]
233
+ n_indices = len(indices) # use n_mask instead of n_samples
234
+ if n_indices < len(sample_weight):
235
+ sample_weight = sample_weight[indices]
236
+ X = _safe_indexing(X, indices)
237
+ y = _safe_indexing(y, indices)
238
+ c = np.concatenate(
239
+ [
240
+ np.full(2 * n_params, fill_value=alpha),
241
+ sample_weight * self.quantile,
242
+ sample_weight * (1 - self.quantile),
243
+ ]
244
+ )
245
+ if self.fit_intercept:
246
+ # do not penalize the intercept
247
+ c[0] = 0
248
+ c[n_params] = 0
249
+
250
+ if solver in ["highs", "highs-ds", "highs-ipm"]:
251
+ # Note that highs methods always use a sparse CSC memory layout internally,
252
+ # even for optimization problems parametrized using dense numpy arrays.
253
+ # Therefore, we work with CSC matrices as early as possible to limit
254
+ # unnecessary repeated memory copies.
255
+ eye = sparse.eye(n_indices, dtype=X.dtype, format="csc")
256
+ if self.fit_intercept:
257
+ ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype))
258
+ A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc")
259
+ else:
260
+ A_eq = sparse.hstack([X, -X, eye, -eye], format="csc")
261
+ else:
262
+ eye = np.eye(n_indices)
263
+ if self.fit_intercept:
264
+ ones = np.ones((n_indices, 1))
265
+ A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1)
266
+ else:
267
+ A_eq = np.concatenate([X, -X, eye, -eye], axis=1)
268
+
269
+ b_eq = y
270
+
271
+ result = linprog(
272
+ c=c,
273
+ A_eq=A_eq,
274
+ b_eq=b_eq,
275
+ method=solver,
276
+ options=solver_options,
277
+ )
278
+ solution = result.x
279
+ if not result.success:
280
+ failure = {
281
+ 1: "Iteration limit reached.",
282
+ 2: "Problem appears to be infeasible.",
283
+ 3: "Problem appears to be unbounded.",
284
+ 4: "Numerical difficulties encountered.",
285
+ }
286
+ warnings.warn(
287
+ "Linear programming for QuantileRegressor did not succeed.\n"
288
+ f"Status is {result.status}: "
289
+ + failure.setdefault(result.status, "unknown reason")
290
+ + "\n"
291
+ + "Result message of linprog:\n"
292
+ + result.message,
293
+ ConvergenceWarning,
294
+ )
295
+
296
+ # positive slack - negative slack
297
+ # solution is an array with (params_pos, params_neg, u, v)
298
+ params = solution[:n_params] - solution[n_params : 2 * n_params]
299
+
300
+ self.n_iter_ = result.nit
301
+
302
+ if self.fit_intercept:
303
+ self.coef_ = params[1:]
304
+ self.intercept_ = params[0]
305
+ else:
306
+ self.coef_ = params
307
+ self.intercept_ = 0.0
308
+ return self
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Solvers for Ridge and LogisticRegression using SAG algorithm"""
2
+
3
+ # Authors: Tom Dupre la Tour <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ import warnings
8
+
9
+ import numpy as np
10
+
11
+ from ..exceptions import ConvergenceWarning
12
+ from ..utils import check_array
13
+ from ..utils.extmath import row_norms
14
+ from ..utils.validation import _check_sample_weight
15
+ from ._base import make_dataset
16
+ from ._sag_fast import sag32, sag64
17
+
18
+
19
+ def get_auto_step_size(
20
+ max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False
21
+ ):
22
+ """Compute automatic step size for SAG solver.
23
+
24
+ The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
25
+ the max sum of squares for over all samples.
26
+
27
+ Parameters
28
+ ----------
29
+ max_squared_sum : float
30
+ Maximum squared sum of X over samples.
31
+
32
+ alpha_scaled : float
33
+ Constant that multiplies the regularization term, scaled by
34
+ 1. / n_samples, the number of samples.
35
+
36
+ loss : {'log', 'squared', 'multinomial'}
37
+ The loss function used in SAG solver.
38
+
39
+ fit_intercept : bool
40
+ Specifies if a constant (a.k.a. bias or intercept) will be
41
+ added to the decision function.
42
+
43
+ n_samples : int, default=None
44
+ Number of rows in X. Useful if is_saga=True.
45
+
46
+ is_saga : bool, default=False
47
+ Whether to return step size for the SAGA algorithm or the SAG
48
+ algorithm.
49
+
50
+ Returns
51
+ -------
52
+ step_size : float
53
+ Step size used in SAG solver.
54
+
55
+ References
56
+ ----------
57
+ Schmidt, M., Roux, N. L., & Bach, F. (2013).
58
+ Minimizing finite sums with the stochastic average gradient
59
+ https://hal.inria.fr/hal-00860051/document
60
+
61
+ :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
62
+ "SAGA: A Fast Incremental Gradient Method With Support
63
+ for Non-Strongly Convex Composite Objectives" <1407.0202>`
64
+ """
65
+ if loss in ("log", "multinomial"):
66
+ L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled
67
+ elif loss == "squared":
68
+ # inverse Lipschitz constant for squared loss
69
+ L = max_squared_sum + int(fit_intercept) + alpha_scaled
70
+ else:
71
+ raise ValueError(
72
+ "Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'"
73
+ % loss
74
+ )
75
+ if is_saga:
76
+ # SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
77
+ # See Defazio et al. 2014
78
+ mun = min(2 * n_samples * alpha_scaled, L)
79
+ step = 1.0 / (2 * L + mun)
80
+ else:
81
+ # SAG theoretical step size is 1/16L but it is recommended to use 1 / L
82
+ # see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
83
+ # slide 65
84
+ step = 1.0 / L
85
+ return step
86
+
87
+
88
+ def sag_solver(
89
+ X,
90
+ y,
91
+ sample_weight=None,
92
+ loss="log",
93
+ alpha=1.0,
94
+ beta=0.0,
95
+ max_iter=1000,
96
+ tol=0.001,
97
+ verbose=0,
98
+ random_state=None,
99
+ check_input=True,
100
+ max_squared_sum=None,
101
+ warm_start_mem=None,
102
+ is_saga=False,
103
+ ):
104
+ """SAG solver for Ridge and LogisticRegression.
105
+
106
+ SAG stands for Stochastic Average Gradient: the gradient of the loss is
107
+ estimated each sample at a time and the model is updated along the way with
108
+ a constant learning rate.
109
+
110
+ IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
111
+ same scale. You can normalize the data by using
112
+ sklearn.preprocessing.StandardScaler on your data before passing it to the
113
+ fit method.
114
+
115
+ This implementation works with data represented as dense numpy arrays or
116
+ sparse scipy arrays of floating point values for the features. It will
117
+ fit the data according to squared loss or log loss.
118
+
119
+ The regularizer is a penalty added to the loss function that shrinks model
120
+ parameters towards the zero vector using the squared euclidean norm L2.
121
+
122
+ .. versionadded:: 0.17
123
+
124
+ Parameters
125
+ ----------
126
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
127
+ Training data.
128
+
129
+ y : ndarray of shape (n_samples,)
130
+ Target values. With loss='multinomial', y must be label encoded
131
+ (see preprocessing.LabelEncoder).
132
+
133
+ sample_weight : array-like of shape (n_samples,), default=None
134
+ Weights applied to individual samples (1. for unweighted).
135
+
136
+ loss : {'log', 'squared', 'multinomial'}, default='log'
137
+ Loss function that will be optimized:
138
+ -'log' is the binary logistic loss, as used in LogisticRegression.
139
+ -'squared' is the squared loss, as used in Ridge.
140
+ -'multinomial' is the multinomial logistic loss, as used in
141
+ LogisticRegression.
142
+
143
+ .. versionadded:: 0.18
144
+ *loss='multinomial'*
145
+
146
+ alpha : float, default=1.
147
+ L2 regularization term in the objective function
148
+ ``(0.5 * alpha * || W ||_F^2)``.
149
+
150
+ beta : float, default=0.
151
+ L1 regularization term in the objective function
152
+ ``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
153
+
154
+ max_iter : int, default=1000
155
+ The max number of passes over the training data if the stopping
156
+ criteria is not reached.
157
+
158
+ tol : float, default=0.001
159
+ The stopping criteria for the weights. The iterations will stop when
160
+ max(change in weights) / max(weights) < tol.
161
+
162
+ verbose : int, default=0
163
+ The verbosity level.
164
+
165
+ random_state : int, RandomState instance or None, default=None
166
+ Used when shuffling the data. Pass an int for reproducible output
167
+ across multiple function calls.
168
+ See :term:`Glossary <random_state>`.
169
+
170
+ check_input : bool, default=True
171
+ If False, the input arrays X and y will not be checked.
172
+
173
+ max_squared_sum : float, default=None
174
+ Maximum squared sum of X over samples. If None, it will be computed,
175
+ going through all the samples. The value should be precomputed
176
+ to speed up cross validation.
177
+
178
+ warm_start_mem : dict, default=None
179
+ The initialization parameters used for warm starting. Warm starting is
180
+ currently used in LogisticRegression but not in Ridge.
181
+ It contains:
182
+ - 'coef': the weight vector, with the intercept in last line
183
+ if the intercept is fitted.
184
+ - 'gradient_memory': the scalar gradient for all seen samples.
185
+ - 'sum_gradient': the sum of gradient over all seen samples,
186
+ for each feature.
187
+ - 'intercept_sum_gradient': the sum of gradient over all seen
188
+ samples, for the intercept.
189
+ - 'seen': array of boolean describing the seen samples.
190
+ - 'num_seen': the number of seen samples.
191
+
192
+ is_saga : bool, default=False
193
+ Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
194
+ better in the first epochs, and allow for l1 regularisation.
195
+
196
+ Returns
197
+ -------
198
+ coef_ : ndarray of shape (n_features,)
199
+ Weight vector.
200
+
201
+ n_iter_ : int
202
+ The number of full pass on all samples.
203
+
204
+ warm_start_mem : dict
205
+ Contains a 'coef' key with the fitted result, and possibly the
206
+ fitted intercept at the end of the array. Contains also other keys
207
+ used for warm starting.
208
+
209
+ Examples
210
+ --------
211
+ >>> import numpy as np
212
+ >>> from sklearn import linear_model
213
+ >>> n_samples, n_features = 10, 5
214
+ >>> rng = np.random.RandomState(0)
215
+ >>> X = rng.randn(n_samples, n_features)
216
+ >>> y = rng.randn(n_samples)
217
+ >>> clf = linear_model.Ridge(solver='sag')
218
+ >>> clf.fit(X, y)
219
+ Ridge(solver='sag')
220
+
221
+ >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
222
+ >>> y = np.array([1, 1, 2, 2])
223
+ >>> clf = linear_model.LogisticRegression(
224
+ ... solver='sag', multi_class='multinomial')
225
+ >>> clf.fit(X, y)
226
+ LogisticRegression(multi_class='multinomial', solver='sag')
227
+
228
+ References
229
+ ----------
230
+ Schmidt, M., Roux, N. L., & Bach, F. (2013).
231
+ Minimizing finite sums with the stochastic average gradient
232
+ https://hal.inria.fr/hal-00860051/document
233
+
234
+ :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
235
+ "SAGA: A Fast Incremental Gradient Method With Support
236
+ for Non-Strongly Convex Composite Objectives" <1407.0202>`
237
+
238
+ See Also
239
+ --------
240
+ Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
241
+ LogisticRegression, SGDClassifier, LinearSVC, Perceptron
242
+ """
243
+ if warm_start_mem is None:
244
+ warm_start_mem = {}
245
+ # Ridge default max_iter is None
246
+ if max_iter is None:
247
+ max_iter = 1000
248
+
249
+ if check_input:
250
+ _dtype = [np.float64, np.float32]
251
+ X = check_array(X, dtype=_dtype, accept_sparse="csr", order="C")
252
+ y = check_array(y, dtype=_dtype, ensure_2d=False, order="C")
253
+
254
+ n_samples, n_features = X.shape[0], X.shape[1]
255
+ # As in SGD, the alpha is scaled by n_samples.
256
+ alpha_scaled = float(alpha) / n_samples
257
+ beta_scaled = float(beta) / n_samples
258
+
259
+ # if loss == 'multinomial', y should be label encoded.
260
+ n_classes = int(y.max()) + 1 if loss == "multinomial" else 1
261
+
262
+ # initialization
263
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
264
+
265
+ if "coef" in warm_start_mem.keys():
266
+ coef_init = warm_start_mem["coef"]
267
+ else:
268
+ # assume fit_intercept is False
269
+ coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
270
+
271
+ # coef_init contains possibly the intercept_init at the end.
272
+ # Note that Ridge centers the data before fitting, so fit_intercept=False.
273
+ fit_intercept = coef_init.shape[0] == (n_features + 1)
274
+ if fit_intercept:
275
+ intercept_init = coef_init[-1, :]
276
+ coef_init = coef_init[:-1, :]
277
+ else:
278
+ intercept_init = np.zeros(n_classes, dtype=X.dtype)
279
+
280
+ if "intercept_sum_gradient" in warm_start_mem.keys():
281
+ intercept_sum_gradient = warm_start_mem["intercept_sum_gradient"]
282
+ else:
283
+ intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
284
+
285
+ if "gradient_memory" in warm_start_mem.keys():
286
+ gradient_memory_init = warm_start_mem["gradient_memory"]
287
+ else:
288
+ gradient_memory_init = np.zeros(
289
+ (n_samples, n_classes), dtype=X.dtype, order="C"
290
+ )
291
+ if "sum_gradient" in warm_start_mem.keys():
292
+ sum_gradient_init = warm_start_mem["sum_gradient"]
293
+ else:
294
+ sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
295
+
296
+ if "seen" in warm_start_mem.keys():
297
+ seen_init = warm_start_mem["seen"]
298
+ else:
299
+ seen_init = np.zeros(n_samples, dtype=np.int32, order="C")
300
+
301
+ if "num_seen" in warm_start_mem.keys():
302
+ num_seen_init = warm_start_mem["num_seen"]
303
+ else:
304
+ num_seen_init = 0
305
+
306
+ dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
307
+
308
+ if max_squared_sum is None:
309
+ max_squared_sum = row_norms(X, squared=True).max()
310
+ step_size = get_auto_step_size(
311
+ max_squared_sum,
312
+ alpha_scaled,
313
+ loss,
314
+ fit_intercept,
315
+ n_samples=n_samples,
316
+ is_saga=is_saga,
317
+ )
318
+ if step_size * alpha_scaled == 1:
319
+ raise ZeroDivisionError(
320
+ "Current sag implementation does not handle "
321
+ "the case step_size * alpha_scaled == 1"
322
+ )
323
+
324
+ sag = sag64 if X.dtype == np.float64 else sag32
325
+ num_seen, n_iter_ = sag(
326
+ dataset,
327
+ coef_init,
328
+ intercept_init,
329
+ n_samples,
330
+ n_features,
331
+ n_classes,
332
+ tol,
333
+ max_iter,
334
+ loss,
335
+ step_size,
336
+ alpha_scaled,
337
+ beta_scaled,
338
+ sum_gradient_init,
339
+ gradient_memory_init,
340
+ seen_init,
341
+ num_seen_init,
342
+ fit_intercept,
343
+ intercept_sum_gradient,
344
+ intercept_decay,
345
+ is_saga,
346
+ verbose,
347
+ )
348
+
349
+ if n_iter_ == max_iter:
350
+ warnings.warn(
351
+ "The max_iter was reached which means the coef_ did not converge",
352
+ ConvergenceWarning,
353
+ )
354
+
355
+ if fit_intercept:
356
+ coef_init = np.vstack((coef_init, intercept_init))
357
+
358
+ warm_start_mem = {
359
+ "coef": coef_init,
360
+ "sum_gradient": sum_gradient_init,
361
+ "intercept_sum_gradient": intercept_sum_gradient,
362
+ "gradient_memory": gradient_memory_init,
363
+ "seen": seen_init,
364
+ "num_seen": num_seen,
365
+ }
366
+
367
+ if loss == "multinomial":
368
+ coef_ = coef_init.T
369
+ else:
370
+ coef_ = coef_init[:, 0]
371
+
372
+ return coef_, n_iter_, warm_start_mem
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (385 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pxd ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # License: BSD 3 clause
2
+ """Helper to load LossFunction from sgd_fast.pyx to sag_fast.pyx"""
3
+
4
+ cdef class LossFunction:
5
+ cdef double loss(self, double p, double y) noexcept nogil
6
+ cdef double dloss(self, double p, double y) noexcept nogil
7
+
8
+
9
+ cdef class Regression(LossFunction):
10
+ cdef double loss(self, double p, double y) noexcept nogil
11
+ cdef double dloss(self, double p, double y) noexcept nogil
12
+
13
+
14
+ cdef class Classification(LossFunction):
15
+ cdef double loss(self, double p, double y) noexcept nogil
16
+ cdef double dloss(self, double p, double y) noexcept nogil
17
+
18
+
19
+ cdef class Log(Classification):
20
+ cdef double loss(self, double p, double y) noexcept nogil
21
+ cdef double dloss(self, double p, double y) noexcept nogil
22
+
23
+
24
+ cdef class SquaredLoss(Regression):
25
+ cdef double loss(self, double p, double y) noexcept nogil
26
+ cdef double dloss(self, double p, double y) noexcept nogil
llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py ADDED
@@ -0,0 +1,2605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Peter Prettenhofer <[email protected]> (main author)
2
+ # Mathieu Blondel (partial_fit support)
3
+ #
4
+ # License: BSD 3 clause
5
+ """Classification, regression and One-Class SVM using Stochastic Gradient
6
+ Descent (SGD).
7
+ """
8
+
9
+ import warnings
10
+ from abc import ABCMeta, abstractmethod
11
+ from numbers import Integral, Real
12
+
13
+ import numpy as np
14
+
15
+ from ..base import (
16
+ BaseEstimator,
17
+ OutlierMixin,
18
+ RegressorMixin,
19
+ _fit_context,
20
+ clone,
21
+ is_classifier,
22
+ )
23
+ from ..exceptions import ConvergenceWarning
24
+ from ..model_selection import ShuffleSplit, StratifiedShuffleSplit
25
+ from ..utils import check_random_state, compute_class_weight, deprecated
26
+ from ..utils._param_validation import Hidden, Interval, StrOptions
27
+ from ..utils.extmath import safe_sparse_dot
28
+ from ..utils.metaestimators import available_if
29
+ from ..utils.multiclass import _check_partial_fit_first_call
30
+ from ..utils.parallel import Parallel, delayed
31
+ from ..utils.validation import _check_sample_weight, check_is_fitted
32
+ from ._base import LinearClassifierMixin, SparseCoefMixin, make_dataset
33
+ from ._sgd_fast import (
34
+ EpsilonInsensitive,
35
+ Hinge,
36
+ Huber,
37
+ Log,
38
+ ModifiedHuber,
39
+ SquaredEpsilonInsensitive,
40
+ SquaredHinge,
41
+ SquaredLoss,
42
+ _plain_sgd32,
43
+ _plain_sgd64,
44
+ )
45
+
46
+ LEARNING_RATE_TYPES = {
47
+ "constant": 1,
48
+ "optimal": 2,
49
+ "invscaling": 3,
50
+ "adaptive": 4,
51
+ "pa1": 5,
52
+ "pa2": 6,
53
+ }
54
+
55
+ PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
56
+
57
+ DEFAULT_EPSILON = 0.1
58
+ # Default value of ``epsilon`` parameter.
59
+
60
+ MAX_INT = np.iinfo(np.int32).max
61
+
62
+
63
+ class _ValidationScoreCallback:
64
+ """Callback for early stopping based on validation score"""
65
+
66
+ def __init__(self, estimator, X_val, y_val, sample_weight_val, classes=None):
67
+ self.estimator = clone(estimator)
68
+ self.estimator.t_ = 1 # to pass check_is_fitted
69
+ if classes is not None:
70
+ self.estimator.classes_ = classes
71
+ self.X_val = X_val
72
+ self.y_val = y_val
73
+ self.sample_weight_val = sample_weight_val
74
+
75
+ def __call__(self, coef, intercept):
76
+ est = self.estimator
77
+ est.coef_ = coef.reshape(1, -1)
78
+ est.intercept_ = np.atleast_1d(intercept)
79
+ return est.score(self.X_val, self.y_val, self.sample_weight_val)
80
+
81
+
82
+ class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):
83
+ """Base class for SGD classification and regression."""
84
+
85
+ _parameter_constraints: dict = {
86
+ "fit_intercept": ["boolean"],
87
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
88
+ "tol": [Interval(Real, 0, None, closed="left"), None],
89
+ "shuffle": ["boolean"],
90
+ "verbose": ["verbose"],
91
+ "random_state": ["random_state"],
92
+ "warm_start": ["boolean"],
93
+ "average": [Interval(Integral, 0, None, closed="left"), bool, np.bool_],
94
+ }
95
+
96
+ def __init__(
97
+ self,
98
+ loss,
99
+ *,
100
+ penalty="l2",
101
+ alpha=0.0001,
102
+ C=1.0,
103
+ l1_ratio=0.15,
104
+ fit_intercept=True,
105
+ max_iter=1000,
106
+ tol=1e-3,
107
+ shuffle=True,
108
+ verbose=0,
109
+ epsilon=0.1,
110
+ random_state=None,
111
+ learning_rate="optimal",
112
+ eta0=0.0,
113
+ power_t=0.5,
114
+ early_stopping=False,
115
+ validation_fraction=0.1,
116
+ n_iter_no_change=5,
117
+ warm_start=False,
118
+ average=False,
119
+ ):
120
+ self.loss = loss
121
+ self.penalty = penalty
122
+ self.learning_rate = learning_rate
123
+ self.epsilon = epsilon
124
+ self.alpha = alpha
125
+ self.C = C
126
+ self.l1_ratio = l1_ratio
127
+ self.fit_intercept = fit_intercept
128
+ self.shuffle = shuffle
129
+ self.random_state = random_state
130
+ self.verbose = verbose
131
+ self.eta0 = eta0
132
+ self.power_t = power_t
133
+ self.early_stopping = early_stopping
134
+ self.validation_fraction = validation_fraction
135
+ self.n_iter_no_change = n_iter_no_change
136
+ self.warm_start = warm_start
137
+ self.average = average
138
+ self.max_iter = max_iter
139
+ self.tol = tol
140
+
141
+ @abstractmethod
142
+ def fit(self, X, y):
143
+ """Fit model."""
144
+
145
+ def _more_validate_params(self, for_partial_fit=False):
146
+ """Validate input params."""
147
+ if self.early_stopping and for_partial_fit:
148
+ raise ValueError("early_stopping should be False with partial_fit")
149
+ if (
150
+ self.learning_rate in ("constant", "invscaling", "adaptive")
151
+ and self.eta0 <= 0.0
152
+ ):
153
+ raise ValueError("eta0 must be > 0")
154
+ if self.learning_rate == "optimal" and self.alpha == 0:
155
+ raise ValueError(
156
+ "alpha must be > 0 since "
157
+ "learning_rate is 'optimal'. alpha is used "
158
+ "to compute the optimal learning rate."
159
+ )
160
+
161
+ # raises ValueError if not registered
162
+ self._get_penalty_type(self.penalty)
163
+ self._get_learning_rate_type(self.learning_rate)
164
+
165
+ def _get_loss_function(self, loss):
166
+ """Get concrete ``LossFunction`` object for str ``loss``."""
167
+ loss_ = self.loss_functions[loss]
168
+ loss_class, args = loss_[0], loss_[1:]
169
+ if loss in ("huber", "epsilon_insensitive", "squared_epsilon_insensitive"):
170
+ args = (self.epsilon,)
171
+ return loss_class(*args)
172
+
173
+ def _get_learning_rate_type(self, learning_rate):
174
+ return LEARNING_RATE_TYPES[learning_rate]
175
+
176
+ def _get_penalty_type(self, penalty):
177
+ penalty = str(penalty).lower()
178
+ return PENALTY_TYPES[penalty]
179
+
180
+ def _allocate_parameter_mem(
181
+ self,
182
+ n_classes,
183
+ n_features,
184
+ input_dtype,
185
+ coef_init=None,
186
+ intercept_init=None,
187
+ one_class=0,
188
+ ):
189
+ """Allocate mem for parameters; initialize if provided."""
190
+ if n_classes > 2:
191
+ # allocate coef_ for multi-class
192
+ if coef_init is not None:
193
+ coef_init = np.asarray(coef_init, dtype=input_dtype, order="C")
194
+ if coef_init.shape != (n_classes, n_features):
195
+ raise ValueError("Provided ``coef_`` does not match dataset. ")
196
+ self.coef_ = coef_init
197
+ else:
198
+ self.coef_ = np.zeros(
199
+ (n_classes, n_features), dtype=input_dtype, order="C"
200
+ )
201
+
202
+ # allocate intercept_ for multi-class
203
+ if intercept_init is not None:
204
+ intercept_init = np.asarray(
205
+ intercept_init, order="C", dtype=input_dtype
206
+ )
207
+ if intercept_init.shape != (n_classes,):
208
+ raise ValueError("Provided intercept_init does not match dataset.")
209
+ self.intercept_ = intercept_init
210
+ else:
211
+ self.intercept_ = np.zeros(n_classes, dtype=input_dtype, order="C")
212
+ else:
213
+ # allocate coef_
214
+ if coef_init is not None:
215
+ coef_init = np.asarray(coef_init, dtype=input_dtype, order="C")
216
+ coef_init = coef_init.ravel()
217
+ if coef_init.shape != (n_features,):
218
+ raise ValueError("Provided coef_init does not match dataset.")
219
+ self.coef_ = coef_init
220
+ else:
221
+ self.coef_ = np.zeros(n_features, dtype=input_dtype, order="C")
222
+
223
+ # allocate intercept_
224
+ if intercept_init is not None:
225
+ intercept_init = np.asarray(intercept_init, dtype=input_dtype)
226
+ if intercept_init.shape != (1,) and intercept_init.shape != ():
227
+ raise ValueError("Provided intercept_init does not match dataset.")
228
+ if one_class:
229
+ self.offset_ = intercept_init.reshape(
230
+ 1,
231
+ )
232
+ else:
233
+ self.intercept_ = intercept_init.reshape(
234
+ 1,
235
+ )
236
+ else:
237
+ if one_class:
238
+ self.offset_ = np.zeros(1, dtype=input_dtype, order="C")
239
+ else:
240
+ self.intercept_ = np.zeros(1, dtype=input_dtype, order="C")
241
+
242
+ # initialize average parameters
243
+ if self.average > 0:
244
+ self._standard_coef = self.coef_
245
+ self._average_coef = np.zeros(
246
+ self.coef_.shape, dtype=input_dtype, order="C"
247
+ )
248
+ if one_class:
249
+ self._standard_intercept = 1 - self.offset_
250
+ else:
251
+ self._standard_intercept = self.intercept_
252
+
253
+ self._average_intercept = np.zeros(
254
+ self._standard_intercept.shape, dtype=input_dtype, order="C"
255
+ )
256
+
257
+ def _make_validation_split(self, y, sample_mask):
258
+ """Split the dataset between training set and validation set.
259
+
260
+ Parameters
261
+ ----------
262
+ y : ndarray of shape (n_samples, )
263
+ Target values.
264
+
265
+ sample_mask : ndarray of shape (n_samples, )
266
+ A boolean array indicating whether each sample should be included
267
+ for validation set.
268
+
269
+ Returns
270
+ -------
271
+ validation_mask : ndarray of shape (n_samples, )
272
+ Equal to True on the validation set, False on the training set.
273
+ """
274
+ n_samples = y.shape[0]
275
+ validation_mask = np.zeros(n_samples, dtype=np.bool_)
276
+ if not self.early_stopping:
277
+ # use the full set for training, with an empty validation set
278
+ return validation_mask
279
+
280
+ if is_classifier(self):
281
+ splitter_type = StratifiedShuffleSplit
282
+ else:
283
+ splitter_type = ShuffleSplit
284
+ cv = splitter_type(
285
+ test_size=self.validation_fraction, random_state=self.random_state
286
+ )
287
+ idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
288
+
289
+ if not np.any(sample_mask[idx_val]):
290
+ raise ValueError(
291
+ "The sample weights for validation set are all zero, consider using a"
292
+ " different random state."
293
+ )
294
+
295
+ if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
296
+ raise ValueError(
297
+ "Splitting %d samples into a train set and a validation set "
298
+ "with validation_fraction=%r led to an empty set (%d and %d "
299
+ "samples). Please either change validation_fraction, increase "
300
+ "number of samples, or disable early_stopping."
301
+ % (
302
+ n_samples,
303
+ self.validation_fraction,
304
+ idx_train.shape[0],
305
+ idx_val.shape[0],
306
+ )
307
+ )
308
+
309
+ validation_mask[idx_val] = True
310
+ return validation_mask
311
+
312
+ def _make_validation_score_cb(
313
+ self, validation_mask, X, y, sample_weight, classes=None
314
+ ):
315
+ if not self.early_stopping:
316
+ return None
317
+
318
+ return _ValidationScoreCallback(
319
+ self,
320
+ X[validation_mask],
321
+ y[validation_mask],
322
+ sample_weight[validation_mask],
323
+ classes=classes,
324
+ )
325
+
326
+ # TODO(1.6): Remove
327
+ # mypy error: Decorated property not supported
328
+ @deprecated( # type: ignore
329
+ "Attribute `loss_function_` was deprecated in version 1.4 and will be removed "
330
+ "in 1.6."
331
+ )
332
+ @property
333
+ def loss_function_(self):
334
+ return self._loss_function_
335
+
336
+
337
+ def _prepare_fit_binary(est, y, i, input_dtye):
338
+ """Initialization for fit_binary.
339
+
340
+ Returns y, coef, intercept, average_coef, average_intercept.
341
+ """
342
+ y_i = np.ones(y.shape, dtype=input_dtye, order="C")
343
+ y_i[y != est.classes_[i]] = -1.0
344
+ average_intercept = 0
345
+ average_coef = None
346
+
347
+ if len(est.classes_) == 2:
348
+ if not est.average:
349
+ coef = est.coef_.ravel()
350
+ intercept = est.intercept_[0]
351
+ else:
352
+ coef = est._standard_coef.ravel()
353
+ intercept = est._standard_intercept[0]
354
+ average_coef = est._average_coef.ravel()
355
+ average_intercept = est._average_intercept[0]
356
+ else:
357
+ if not est.average:
358
+ coef = est.coef_[i]
359
+ intercept = est.intercept_[i]
360
+ else:
361
+ coef = est._standard_coef[i]
362
+ intercept = est._standard_intercept[i]
363
+ average_coef = est._average_coef[i]
364
+ average_intercept = est._average_intercept[i]
365
+
366
+ return y_i, coef, intercept, average_coef, average_intercept
367
+
368
+
369
+ def fit_binary(
370
+ est,
371
+ i,
372
+ X,
373
+ y,
374
+ alpha,
375
+ C,
376
+ learning_rate,
377
+ max_iter,
378
+ pos_weight,
379
+ neg_weight,
380
+ sample_weight,
381
+ validation_mask=None,
382
+ random_state=None,
383
+ ):
384
+ """Fit a single binary classifier.
385
+
386
+ The i'th class is considered the "positive" class.
387
+
388
+ Parameters
389
+ ----------
390
+ est : Estimator object
391
+ The estimator to fit
392
+
393
+ i : int
394
+ Index of the positive class
395
+
396
+ X : numpy array or sparse matrix of shape [n_samples,n_features]
397
+ Training data
398
+
399
+ y : numpy array of shape [n_samples, ]
400
+ Target values
401
+
402
+ alpha : float
403
+ The regularization parameter
404
+
405
+ C : float
406
+ Maximum step size for passive aggressive
407
+
408
+ learning_rate : str
409
+ The learning rate. Accepted values are 'constant', 'optimal',
410
+ 'invscaling', 'pa1' and 'pa2'.
411
+
412
+ max_iter : int
413
+ The maximum number of iterations (epochs)
414
+
415
+ pos_weight : float
416
+ The weight of the positive class
417
+
418
+ neg_weight : float
419
+ The weight of the negative class
420
+
421
+ sample_weight : numpy array of shape [n_samples, ]
422
+ The weight of each sample
423
+
424
+ validation_mask : numpy array of shape [n_samples, ], default=None
425
+ Precomputed validation mask in case _fit_binary is called in the
426
+ context of a one-vs-rest reduction.
427
+
428
+ random_state : int, RandomState instance, default=None
429
+ If int, random_state is the seed used by the random number generator;
430
+ If RandomState instance, random_state is the random number generator;
431
+ If None, the random number generator is the RandomState instance used
432
+ by `np.random`.
433
+ """
434
+ # if average is not true, average_coef, and average_intercept will be
435
+ # unused
436
+ y_i, coef, intercept, average_coef, average_intercept = _prepare_fit_binary(
437
+ est, y, i, input_dtye=X.dtype
438
+ )
439
+ assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
440
+
441
+ random_state = check_random_state(random_state)
442
+ dataset, intercept_decay = make_dataset(
443
+ X, y_i, sample_weight, random_state=random_state
444
+ )
445
+
446
+ penalty_type = est._get_penalty_type(est.penalty)
447
+ learning_rate_type = est._get_learning_rate_type(learning_rate)
448
+
449
+ if validation_mask is None:
450
+ validation_mask = est._make_validation_split(y_i, sample_mask=sample_weight > 0)
451
+ classes = np.array([-1, 1], dtype=y_i.dtype)
452
+ validation_score_cb = est._make_validation_score_cb(
453
+ validation_mask, X, y_i, sample_weight, classes=classes
454
+ )
455
+
456
+ # numpy mtrand expects a C long which is a signed 32 bit integer under
457
+ # Windows
458
+ seed = random_state.randint(MAX_INT)
459
+
460
+ tol = est.tol if est.tol is not None else -np.inf
461
+
462
+ _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
463
+ coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd(
464
+ coef,
465
+ intercept,
466
+ average_coef,
467
+ average_intercept,
468
+ est._loss_function_,
469
+ penalty_type,
470
+ alpha,
471
+ C,
472
+ est.l1_ratio,
473
+ dataset,
474
+ validation_mask,
475
+ est.early_stopping,
476
+ validation_score_cb,
477
+ int(est.n_iter_no_change),
478
+ max_iter,
479
+ tol,
480
+ int(est.fit_intercept),
481
+ int(est.verbose),
482
+ int(est.shuffle),
483
+ seed,
484
+ pos_weight,
485
+ neg_weight,
486
+ learning_rate_type,
487
+ est.eta0,
488
+ est.power_t,
489
+ 0,
490
+ est.t_,
491
+ intercept_decay,
492
+ est.average,
493
+ )
494
+
495
+ if est.average:
496
+ if len(est.classes_) == 2:
497
+ est._average_intercept[0] = average_intercept
498
+ else:
499
+ est._average_intercept[i] = average_intercept
500
+
501
+ return coef, intercept, n_iter_
502
+
503
+
504
+ def _get_plain_sgd_function(input_dtype):
505
+ return _plain_sgd32 if input_dtype == np.float32 else _plain_sgd64
506
+
507
+
508
+ class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):
509
+ loss_functions = {
510
+ "hinge": (Hinge, 1.0),
511
+ "squared_hinge": (SquaredHinge, 1.0),
512
+ "perceptron": (Hinge, 0.0),
513
+ "log_loss": (Log,),
514
+ "modified_huber": (ModifiedHuber,),
515
+ "squared_error": (SquaredLoss,),
516
+ "huber": (Huber, DEFAULT_EPSILON),
517
+ "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
518
+ "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON),
519
+ }
520
+
521
+ _parameter_constraints: dict = {
522
+ **BaseSGD._parameter_constraints,
523
+ "loss": [StrOptions(set(loss_functions))],
524
+ "early_stopping": ["boolean"],
525
+ "validation_fraction": [Interval(Real, 0, 1, closed="neither")],
526
+ "n_iter_no_change": [Interval(Integral, 1, None, closed="left")],
527
+ "n_jobs": [Integral, None],
528
+ "class_weight": [StrOptions({"balanced"}), dict, None],
529
+ }
530
+
531
+ @abstractmethod
532
+ def __init__(
533
+ self,
534
+ loss="hinge",
535
+ *,
536
+ penalty="l2",
537
+ alpha=0.0001,
538
+ l1_ratio=0.15,
539
+ fit_intercept=True,
540
+ max_iter=1000,
541
+ tol=1e-3,
542
+ shuffle=True,
543
+ verbose=0,
544
+ epsilon=DEFAULT_EPSILON,
545
+ n_jobs=None,
546
+ random_state=None,
547
+ learning_rate="optimal",
548
+ eta0=0.0,
549
+ power_t=0.5,
550
+ early_stopping=False,
551
+ validation_fraction=0.1,
552
+ n_iter_no_change=5,
553
+ class_weight=None,
554
+ warm_start=False,
555
+ average=False,
556
+ ):
557
+ super().__init__(
558
+ loss=loss,
559
+ penalty=penalty,
560
+ alpha=alpha,
561
+ l1_ratio=l1_ratio,
562
+ fit_intercept=fit_intercept,
563
+ max_iter=max_iter,
564
+ tol=tol,
565
+ shuffle=shuffle,
566
+ verbose=verbose,
567
+ epsilon=epsilon,
568
+ random_state=random_state,
569
+ learning_rate=learning_rate,
570
+ eta0=eta0,
571
+ power_t=power_t,
572
+ early_stopping=early_stopping,
573
+ validation_fraction=validation_fraction,
574
+ n_iter_no_change=n_iter_no_change,
575
+ warm_start=warm_start,
576
+ average=average,
577
+ )
578
+ self.class_weight = class_weight
579
+ self.n_jobs = n_jobs
580
+
581
+ def _partial_fit(
582
+ self,
583
+ X,
584
+ y,
585
+ alpha,
586
+ C,
587
+ loss,
588
+ learning_rate,
589
+ max_iter,
590
+ classes,
591
+ sample_weight,
592
+ coef_init,
593
+ intercept_init,
594
+ ):
595
+ first_call = not hasattr(self, "classes_")
596
+ X, y = self._validate_data(
597
+ X,
598
+ y,
599
+ accept_sparse="csr",
600
+ dtype=[np.float64, np.float32],
601
+ order="C",
602
+ accept_large_sparse=False,
603
+ reset=first_call,
604
+ )
605
+
606
+ n_samples, n_features = X.shape
607
+
608
+ _check_partial_fit_first_call(self, classes)
609
+
610
+ n_classes = self.classes_.shape[0]
611
+
612
+ # Allocate datastructures from input arguments
613
+ self._expanded_class_weight = compute_class_weight(
614
+ self.class_weight, classes=self.classes_, y=y
615
+ )
616
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
617
+
618
+ if getattr(self, "coef_", None) is None or coef_init is not None:
619
+ self._allocate_parameter_mem(
620
+ n_classes=n_classes,
621
+ n_features=n_features,
622
+ input_dtype=X.dtype,
623
+ coef_init=coef_init,
624
+ intercept_init=intercept_init,
625
+ )
626
+ elif n_features != self.coef_.shape[-1]:
627
+ raise ValueError(
628
+ "Number of features %d does not match previous data %d."
629
+ % (n_features, self.coef_.shape[-1])
630
+ )
631
+
632
+ self._loss_function_ = self._get_loss_function(loss)
633
+ if not hasattr(self, "t_"):
634
+ self.t_ = 1.0
635
+
636
+ # delegate to concrete training procedure
637
+ if n_classes > 2:
638
+ self._fit_multiclass(
639
+ X,
640
+ y,
641
+ alpha=alpha,
642
+ C=C,
643
+ learning_rate=learning_rate,
644
+ sample_weight=sample_weight,
645
+ max_iter=max_iter,
646
+ )
647
+ elif n_classes == 2:
648
+ self._fit_binary(
649
+ X,
650
+ y,
651
+ alpha=alpha,
652
+ C=C,
653
+ learning_rate=learning_rate,
654
+ sample_weight=sample_weight,
655
+ max_iter=max_iter,
656
+ )
657
+ else:
658
+ raise ValueError(
659
+ "The number of classes has to be greater than one; got %d class"
660
+ % n_classes
661
+ )
662
+
663
+ return self
664
+
665
+ def _fit(
666
+ self,
667
+ X,
668
+ y,
669
+ alpha,
670
+ C,
671
+ loss,
672
+ learning_rate,
673
+ coef_init=None,
674
+ intercept_init=None,
675
+ sample_weight=None,
676
+ ):
677
+ if hasattr(self, "classes_"):
678
+ # delete the attribute otherwise _partial_fit thinks it's not the first call
679
+ delattr(self, "classes_")
680
+
681
+ # labels can be encoded as float, int, or string literals
682
+ # np.unique sorts in asc order; largest class id is positive class
683
+ y = self._validate_data(y=y)
684
+ classes = np.unique(y)
685
+
686
+ if self.warm_start and hasattr(self, "coef_"):
687
+ if coef_init is None:
688
+ coef_init = self.coef_
689
+ if intercept_init is None:
690
+ intercept_init = self.intercept_
691
+ else:
692
+ self.coef_ = None
693
+ self.intercept_ = None
694
+
695
+ if self.average > 0:
696
+ self._standard_coef = self.coef_
697
+ self._standard_intercept = self.intercept_
698
+ self._average_coef = None
699
+ self._average_intercept = None
700
+
701
+ # Clear iteration count for multiple call to fit.
702
+ self.t_ = 1.0
703
+
704
+ self._partial_fit(
705
+ X,
706
+ y,
707
+ alpha,
708
+ C,
709
+ loss,
710
+ learning_rate,
711
+ self.max_iter,
712
+ classes,
713
+ sample_weight,
714
+ coef_init,
715
+ intercept_init,
716
+ )
717
+
718
+ if (
719
+ self.tol is not None
720
+ and self.tol > -np.inf
721
+ and self.n_iter_ == self.max_iter
722
+ ):
723
+ warnings.warn(
724
+ (
725
+ "Maximum number of iteration reached before "
726
+ "convergence. Consider increasing max_iter to "
727
+ "improve the fit."
728
+ ),
729
+ ConvergenceWarning,
730
+ )
731
+ return self
732
+
733
+ def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter):
734
+ """Fit a binary classifier on X and y."""
735
+ coef, intercept, n_iter_ = fit_binary(
736
+ self,
737
+ 1,
738
+ X,
739
+ y,
740
+ alpha,
741
+ C,
742
+ learning_rate,
743
+ max_iter,
744
+ self._expanded_class_weight[1],
745
+ self._expanded_class_weight[0],
746
+ sample_weight,
747
+ random_state=self.random_state,
748
+ )
749
+
750
+ self.t_ += n_iter_ * X.shape[0]
751
+ self.n_iter_ = n_iter_
752
+
753
+ # need to be 2d
754
+ if self.average > 0:
755
+ if self.average <= self.t_ - 1:
756
+ self.coef_ = self._average_coef.reshape(1, -1)
757
+ self.intercept_ = self._average_intercept
758
+ else:
759
+ self.coef_ = self._standard_coef.reshape(1, -1)
760
+ self._standard_intercept = np.atleast_1d(intercept)
761
+ self.intercept_ = self._standard_intercept
762
+ else:
763
+ self.coef_ = coef.reshape(1, -1)
764
+ # intercept is a float, need to convert it to an array of length 1
765
+ self.intercept_ = np.atleast_1d(intercept)
766
+
767
+ def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter):
768
+ """Fit a multi-class classifier by combining binary classifiers
769
+
770
+ Each binary classifier predicts one class versus all others. This
771
+ strategy is called OvA (One versus All) or OvR (One versus Rest).
772
+ """
773
+ # Precompute the validation split using the multiclass labels
774
+ # to ensure proper balancing of the classes.
775
+ validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)
776
+
777
+ # Use joblib to fit OvA in parallel.
778
+ # Pick the random seed for each job outside of fit_binary to avoid
779
+ # sharing the estimator random state between threads which could lead
780
+ # to non-deterministic behavior
781
+ random_state = check_random_state(self.random_state)
782
+ seeds = random_state.randint(MAX_INT, size=len(self.classes_))
783
+ result = Parallel(
784
+ n_jobs=self.n_jobs, verbose=self.verbose, require="sharedmem"
785
+ )(
786
+ delayed(fit_binary)(
787
+ self,
788
+ i,
789
+ X,
790
+ y,
791
+ alpha,
792
+ C,
793
+ learning_rate,
794
+ max_iter,
795
+ self._expanded_class_weight[i],
796
+ 1.0,
797
+ sample_weight,
798
+ validation_mask=validation_mask,
799
+ random_state=seed,
800
+ )
801
+ for i, seed in enumerate(seeds)
802
+ )
803
+
804
+ # take the maximum of n_iter_ over every binary fit
805
+ n_iter_ = 0.0
806
+ for i, (_, intercept, n_iter_i) in enumerate(result):
807
+ self.intercept_[i] = intercept
808
+ n_iter_ = max(n_iter_, n_iter_i)
809
+
810
+ self.t_ += n_iter_ * X.shape[0]
811
+ self.n_iter_ = n_iter_
812
+
813
+ if self.average > 0:
814
+ if self.average <= self.t_ - 1.0:
815
+ self.coef_ = self._average_coef
816
+ self.intercept_ = self._average_intercept
817
+ else:
818
+ self.coef_ = self._standard_coef
819
+ self._standard_intercept = np.atleast_1d(self.intercept_)
820
+ self.intercept_ = self._standard_intercept
821
+
822
+ @_fit_context(prefer_skip_nested_validation=True)
823
+ def partial_fit(self, X, y, classes=None, sample_weight=None):
824
+ """Perform one epoch of stochastic gradient descent on given samples.
825
+
826
+ Internally, this method uses ``max_iter = 1``. Therefore, it is not
827
+ guaranteed that a minimum of the cost function is reached after calling
828
+ it once. Matters such as objective convergence, early stopping, and
829
+ learning rate adjustments should be handled by the user.
830
+
831
+ Parameters
832
+ ----------
833
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
834
+ Subset of the training data.
835
+
836
+ y : ndarray of shape (n_samples,)
837
+ Subset of the target values.
838
+
839
+ classes : ndarray of shape (n_classes,), default=None
840
+ Classes across all calls to partial_fit.
841
+ Can be obtained by via `np.unique(y_all)`, where y_all is the
842
+ target vector of the entire dataset.
843
+ This argument is required for the first call to partial_fit
844
+ and can be omitted in the subsequent calls.
845
+ Note that y doesn't need to contain all labels in `classes`.
846
+
847
+ sample_weight : array-like, shape (n_samples,), default=None
848
+ Weights applied to individual samples.
849
+ If not provided, uniform weights are assumed.
850
+
851
+ Returns
852
+ -------
853
+ self : object
854
+ Returns an instance of self.
855
+ """
856
+ if not hasattr(self, "classes_"):
857
+ self._more_validate_params(for_partial_fit=True)
858
+
859
+ if self.class_weight == "balanced":
860
+ raise ValueError(
861
+ "class_weight '{0}' is not supported for "
862
+ "partial_fit. In order to use 'balanced' weights,"
863
+ " use compute_class_weight('{0}', "
864
+ "classes=classes, y=y). "
865
+ "In place of y you can use a large enough sample "
866
+ "of the full training set target to properly "
867
+ "estimate the class frequency distributions. "
868
+ "Pass the resulting weights as the class_weight "
869
+ "parameter.".format(self.class_weight)
870
+ )
871
+
872
+ return self._partial_fit(
873
+ X,
874
+ y,
875
+ alpha=self.alpha,
876
+ C=1.0,
877
+ loss=self.loss,
878
+ learning_rate=self.learning_rate,
879
+ max_iter=1,
880
+ classes=classes,
881
+ sample_weight=sample_weight,
882
+ coef_init=None,
883
+ intercept_init=None,
884
+ )
885
+
886
+ @_fit_context(prefer_skip_nested_validation=True)
887
+ def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
888
+ """Fit linear model with Stochastic Gradient Descent.
889
+
890
+ Parameters
891
+ ----------
892
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
893
+ Training data.
894
+
895
+ y : ndarray of shape (n_samples,)
896
+ Target values.
897
+
898
+ coef_init : ndarray of shape (n_classes, n_features), default=None
899
+ The initial coefficients to warm-start the optimization.
900
+
901
+ intercept_init : ndarray of shape (n_classes,), default=None
902
+ The initial intercept to warm-start the optimization.
903
+
904
+ sample_weight : array-like, shape (n_samples,), default=None
905
+ Weights applied to individual samples.
906
+ If not provided, uniform weights are assumed. These weights will
907
+ be multiplied with class_weight (passed through the
908
+ constructor) if class_weight is specified.
909
+
910
+ Returns
911
+ -------
912
+ self : object
913
+ Returns an instance of self.
914
+ """
915
+ self._more_validate_params()
916
+
917
+ return self._fit(
918
+ X,
919
+ y,
920
+ alpha=self.alpha,
921
+ C=1.0,
922
+ loss=self.loss,
923
+ learning_rate=self.learning_rate,
924
+ coef_init=coef_init,
925
+ intercept_init=intercept_init,
926
+ sample_weight=sample_weight,
927
+ )
928
+
929
+
930
+ class SGDClassifier(BaseSGDClassifier):
931
+ """Linear classifiers (SVM, logistic regression, etc.) with SGD training.
932
+
933
+ This estimator implements regularized linear models with stochastic
934
+ gradient descent (SGD) learning: the gradient of the loss is estimated
935
+ each sample at a time and the model is updated along the way with a
936
+ decreasing strength schedule (aka learning rate). SGD allows minibatch
937
+ (online/out-of-core) learning via the `partial_fit` method.
938
+ For best results using the default learning rate schedule, the data should
939
+ have zero mean and unit variance.
940
+
941
+ This implementation works with data represented as dense or sparse arrays
942
+ of floating point values for the features. The model it fits can be
943
+ controlled with the loss parameter; by default, it fits a linear support
944
+ vector machine (SVM).
945
+
946
+ The regularizer is a penalty added to the loss function that shrinks model
947
+ parameters towards the zero vector using either the squared euclidean norm
948
+ L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
949
+ parameter update crosses the 0.0 value because of the regularizer, the
950
+ update is truncated to 0.0 to allow for learning sparse models and achieve
951
+ online feature selection.
952
+
953
+ Read more in the :ref:`User Guide <sgd>`.
954
+
955
+ Parameters
956
+ ----------
957
+ loss : {'hinge', 'log_loss', 'modified_huber', 'squared_hinge',\
958
+ 'perceptron', 'squared_error', 'huber', 'epsilon_insensitive',\
959
+ 'squared_epsilon_insensitive'}, default='hinge'
960
+ The loss function to be used.
961
+
962
+ - 'hinge' gives a linear SVM.
963
+ - 'log_loss' gives logistic regression, a probabilistic classifier.
964
+ - 'modified_huber' is another smooth loss that brings tolerance to
965
+ outliers as well as probability estimates.
966
+ - 'squared_hinge' is like hinge but is quadratically penalized.
967
+ - 'perceptron' is the linear loss used by the perceptron algorithm.
968
+ - The other losses, 'squared_error', 'huber', 'epsilon_insensitive' and
969
+ 'squared_epsilon_insensitive' are designed for regression but can be useful
970
+ in classification as well; see
971
+ :class:`~sklearn.linear_model.SGDRegressor` for a description.
972
+
973
+ More details about the losses formulas can be found in the
974
+ :ref:`User Guide <sgd_mathematical_formulation>`.
975
+
976
+ penalty : {'l2', 'l1', 'elasticnet', None}, default='l2'
977
+ The penalty (aka regularization term) to be used. Defaults to 'l2'
978
+ which is the standard regularizer for linear SVM models. 'l1' and
979
+ 'elasticnet' might bring sparsity to the model (feature selection)
980
+ not achievable with 'l2'. No penalty is added when set to `None`.
981
+
982
+ alpha : float, default=0.0001
983
+ Constant that multiplies the regularization term. The higher the
984
+ value, the stronger the regularization. Also used to compute the
985
+ learning rate when `learning_rate` is set to 'optimal'.
986
+ Values must be in the range `[0.0, inf)`.
987
+
988
+ l1_ratio : float, default=0.15
989
+ The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
990
+ l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
991
+ Only used if `penalty` is 'elasticnet'.
992
+ Values must be in the range `[0.0, 1.0]`.
993
+
994
+ fit_intercept : bool, default=True
995
+ Whether the intercept should be estimated or not. If False, the
996
+ data is assumed to be already centered.
997
+
998
+ max_iter : int, default=1000
999
+ The maximum number of passes over the training data (aka epochs).
1000
+ It only impacts the behavior in the ``fit`` method, and not the
1001
+ :meth:`partial_fit` method.
1002
+ Values must be in the range `[1, inf)`.
1003
+
1004
+ .. versionadded:: 0.19
1005
+
1006
+ tol : float or None, default=1e-3
1007
+ The stopping criterion. If it is not None, training will stop
1008
+ when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
1009
+ epochs.
1010
+ Convergence is checked against the training loss or the
1011
+ validation loss depending on the `early_stopping` parameter.
1012
+ Values must be in the range `[0.0, inf)`.
1013
+
1014
+ .. versionadded:: 0.19
1015
+
1016
+ shuffle : bool, default=True
1017
+ Whether or not the training data should be shuffled after each epoch.
1018
+
1019
+ verbose : int, default=0
1020
+ The verbosity level.
1021
+ Values must be in the range `[0, inf)`.
1022
+
1023
+ epsilon : float, default=0.1
1024
+ Epsilon in the epsilon-insensitive loss functions; only if `loss` is
1025
+ 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
1026
+ For 'huber', determines the threshold at which it becomes less
1027
+ important to get the prediction exactly right.
1028
+ For epsilon-insensitive, any differences between the current prediction
1029
+ and the correct label are ignored if they are less than this threshold.
1030
+ Values must be in the range `[0.0, inf)`.
1031
+
1032
+ n_jobs : int, default=None
1033
+ The number of CPUs to use to do the OVA (One Versus All, for
1034
+ multi-class problems) computation.
1035
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1036
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1037
+ for more details.
1038
+
1039
+ random_state : int, RandomState instance, default=None
1040
+ Used for shuffling the data, when ``shuffle`` is set to ``True``.
1041
+ Pass an int for reproducible output across multiple function calls.
1042
+ See :term:`Glossary <random_state>`.
1043
+ Integer values must be in the range `[0, 2**32 - 1]`.
1044
+
1045
+ learning_rate : str, default='optimal'
1046
+ The learning rate schedule:
1047
+
1048
+ - 'constant': `eta = eta0`
1049
+ - 'optimal': `eta = 1.0 / (alpha * (t + t0))`
1050
+ where `t0` is chosen by a heuristic proposed by Leon Bottou.
1051
+ - 'invscaling': `eta = eta0 / pow(t, power_t)`
1052
+ - 'adaptive': `eta = eta0`, as long as the training keeps decreasing.
1053
+ Each time n_iter_no_change consecutive epochs fail to decrease the
1054
+ training loss by tol or fail to increase validation score by tol if
1055
+ `early_stopping` is `True`, the current learning rate is divided by 5.
1056
+
1057
+ .. versionadded:: 0.20
1058
+ Added 'adaptive' option
1059
+
1060
+ eta0 : float, default=0.0
1061
+ The initial learning rate for the 'constant', 'invscaling' or
1062
+ 'adaptive' schedules. The default value is 0.0 as eta0 is not used by
1063
+ the default schedule 'optimal'.
1064
+ Values must be in the range `[0.0, inf)`.
1065
+
1066
+ power_t : float, default=0.5
1067
+ The exponent for inverse scaling learning rate.
1068
+ Values must be in the range `(-inf, inf)`.
1069
+
1070
+ early_stopping : bool, default=False
1071
+ Whether to use early stopping to terminate training when validation
1072
+ score is not improving. If set to `True`, it will automatically set aside
1073
+ a stratified fraction of training data as validation and terminate
1074
+ training when validation score returned by the `score` method is not
1075
+ improving by at least tol for n_iter_no_change consecutive epochs.
1076
+
1077
+ .. versionadded:: 0.20
1078
+ Added 'early_stopping' option
1079
+
1080
+ validation_fraction : float, default=0.1
1081
+ The proportion of training data to set aside as validation set for
1082
+ early stopping. Must be between 0 and 1.
1083
+ Only used if `early_stopping` is True.
1084
+ Values must be in the range `(0.0, 1.0)`.
1085
+
1086
+ .. versionadded:: 0.20
1087
+ Added 'validation_fraction' option
1088
+
1089
+ n_iter_no_change : int, default=5
1090
+ Number of iterations with no improvement to wait before stopping
1091
+ fitting.
1092
+ Convergence is checked against the training loss or the
1093
+ validation loss depending on the `early_stopping` parameter.
1094
+ Integer values must be in the range `[1, max_iter)`.
1095
+
1096
+ .. versionadded:: 0.20
1097
+ Added 'n_iter_no_change' option
1098
+
1099
+ class_weight : dict, {class_label: weight} or "balanced", default=None
1100
+ Preset for the class_weight fit parameter.
1101
+
1102
+ Weights associated with classes. If not given, all classes
1103
+ are supposed to have weight one.
1104
+
1105
+ The "balanced" mode uses the values of y to automatically adjust
1106
+ weights inversely proportional to class frequencies in the input data
1107
+ as ``n_samples / (n_classes * np.bincount(y))``.
1108
+
1109
+ warm_start : bool, default=False
1110
+ When set to True, reuse the solution of the previous call to fit as
1111
+ initialization, otherwise, just erase the previous solution.
1112
+ See :term:`the Glossary <warm_start>`.
1113
+
1114
+ Repeatedly calling fit or partial_fit when warm_start is True can
1115
+ result in a different solution than when calling fit a single time
1116
+ because of the way the data is shuffled.
1117
+ If a dynamic learning rate is used, the learning rate is adapted
1118
+ depending on the number of samples already seen. Calling ``fit`` resets
1119
+ this counter, while ``partial_fit`` will result in increasing the
1120
+ existing counter.
1121
+
1122
+ average : bool or int, default=False
1123
+ When set to `True`, computes the averaged SGD weights across all
1124
+ updates and stores the result in the ``coef_`` attribute. If set to
1125
+ an int greater than 1, averaging will begin once the total number of
1126
+ samples seen reaches `average`. So ``average=10`` will begin
1127
+ averaging after seeing 10 samples.
1128
+ Integer values must be in the range `[1, n_samples]`.
1129
+
1130
+ Attributes
1131
+ ----------
1132
+ coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
1133
+ (n_classes, n_features)
1134
+ Weights assigned to the features.
1135
+
1136
+ intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
1137
+ Constants in decision function.
1138
+
1139
+ n_iter_ : int
1140
+ The actual number of iterations before reaching the stopping criterion.
1141
+ For multiclass fits, it is the maximum over every binary fit.
1142
+
1143
+ loss_function_ : concrete ``LossFunction``
1144
+
1145
+ .. deprecated:: 1.4
1146
+ Attribute `loss_function_` was deprecated in version 1.4 and will be
1147
+ removed in 1.6.
1148
+
1149
+ classes_ : array of shape (n_classes,)
1150
+
1151
+ t_ : int
1152
+ Number of weight updates performed during training.
1153
+ Same as ``(n_iter_ * n_samples + 1)``.
1154
+
1155
+ n_features_in_ : int
1156
+ Number of features seen during :term:`fit`.
1157
+
1158
+ .. versionadded:: 0.24
1159
+
1160
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1161
+ Names of features seen during :term:`fit`. Defined only when `X`
1162
+ has feature names that are all strings.
1163
+
1164
+ .. versionadded:: 1.0
1165
+
1166
+ See Also
1167
+ --------
1168
+ sklearn.svm.LinearSVC : Linear support vector classification.
1169
+ LogisticRegression : Logistic regression.
1170
+ Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to
1171
+ ``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant",
1172
+ penalty=None)``.
1173
+
1174
+ Examples
1175
+ --------
1176
+ >>> import numpy as np
1177
+ >>> from sklearn.linear_model import SGDClassifier
1178
+ >>> from sklearn.preprocessing import StandardScaler
1179
+ >>> from sklearn.pipeline import make_pipeline
1180
+ >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
1181
+ >>> Y = np.array([1, 1, 2, 2])
1182
+ >>> # Always scale the input. The most convenient way is to use a pipeline.
1183
+ >>> clf = make_pipeline(StandardScaler(),
1184
+ ... SGDClassifier(max_iter=1000, tol=1e-3))
1185
+ >>> clf.fit(X, Y)
1186
+ Pipeline(steps=[('standardscaler', StandardScaler()),
1187
+ ('sgdclassifier', SGDClassifier())])
1188
+ >>> print(clf.predict([[-0.8, -1]]))
1189
+ [1]
1190
+ """
1191
+
1192
+ _parameter_constraints: dict = {
1193
+ **BaseSGDClassifier._parameter_constraints,
1194
+ "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None],
1195
+ "alpha": [Interval(Real, 0, None, closed="left")],
1196
+ "l1_ratio": [Interval(Real, 0, 1, closed="both")],
1197
+ "power_t": [Interval(Real, None, None, closed="neither")],
1198
+ "epsilon": [Interval(Real, 0, None, closed="left")],
1199
+ "learning_rate": [
1200
+ StrOptions({"constant", "optimal", "invscaling", "adaptive"}),
1201
+ Hidden(StrOptions({"pa1", "pa2"})),
1202
+ ],
1203
+ "eta0": [Interval(Real, 0, None, closed="left")],
1204
+ }
1205
+
1206
+ def __init__(
1207
+ self,
1208
+ loss="hinge",
1209
+ *,
1210
+ penalty="l2",
1211
+ alpha=0.0001,
1212
+ l1_ratio=0.15,
1213
+ fit_intercept=True,
1214
+ max_iter=1000,
1215
+ tol=1e-3,
1216
+ shuffle=True,
1217
+ verbose=0,
1218
+ epsilon=DEFAULT_EPSILON,
1219
+ n_jobs=None,
1220
+ random_state=None,
1221
+ learning_rate="optimal",
1222
+ eta0=0.0,
1223
+ power_t=0.5,
1224
+ early_stopping=False,
1225
+ validation_fraction=0.1,
1226
+ n_iter_no_change=5,
1227
+ class_weight=None,
1228
+ warm_start=False,
1229
+ average=False,
1230
+ ):
1231
+ super().__init__(
1232
+ loss=loss,
1233
+ penalty=penalty,
1234
+ alpha=alpha,
1235
+ l1_ratio=l1_ratio,
1236
+ fit_intercept=fit_intercept,
1237
+ max_iter=max_iter,
1238
+ tol=tol,
1239
+ shuffle=shuffle,
1240
+ verbose=verbose,
1241
+ epsilon=epsilon,
1242
+ n_jobs=n_jobs,
1243
+ random_state=random_state,
1244
+ learning_rate=learning_rate,
1245
+ eta0=eta0,
1246
+ power_t=power_t,
1247
+ early_stopping=early_stopping,
1248
+ validation_fraction=validation_fraction,
1249
+ n_iter_no_change=n_iter_no_change,
1250
+ class_weight=class_weight,
1251
+ warm_start=warm_start,
1252
+ average=average,
1253
+ )
1254
+
1255
+ def _check_proba(self):
1256
+ if self.loss not in ("log_loss", "modified_huber"):
1257
+ raise AttributeError(
1258
+ "probability estimates are not available for loss=%r" % self.loss
1259
+ )
1260
+ return True
1261
+
1262
+ @available_if(_check_proba)
1263
+ def predict_proba(self, X):
1264
+ """Probability estimates.
1265
+
1266
+ This method is only available for log loss and modified Huber loss.
1267
+
1268
+ Multiclass probability estimates are derived from binary (one-vs.-rest)
1269
+ estimates by simple normalization, as recommended by Zadrozny and
1270
+ Elkan.
1271
+
1272
+ Binary probability estimates for loss="modified_huber" are given by
1273
+ (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
1274
+ it is necessary to perform proper probability calibration by wrapping
1275
+ the classifier with
1276
+ :class:`~sklearn.calibration.CalibratedClassifierCV` instead.
1277
+
1278
+ Parameters
1279
+ ----------
1280
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
1281
+ Input data for prediction.
1282
+
1283
+ Returns
1284
+ -------
1285
+ ndarray of shape (n_samples, n_classes)
1286
+ Returns the probability of the sample for each class in the model,
1287
+ where classes are ordered as they are in `self.classes_`.
1288
+
1289
+ References
1290
+ ----------
1291
+ Zadrozny and Elkan, "Transforming classifier scores into multiclass
1292
+ probability estimates", SIGKDD'02,
1293
+ https://dl.acm.org/doi/pdf/10.1145/775047.775151
1294
+
1295
+ The justification for the formula in the loss="modified_huber"
1296
+ case is in the appendix B in:
1297
+ http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
1298
+ """
1299
+ check_is_fitted(self)
1300
+
1301
+ if self.loss == "log_loss":
1302
+ return self._predict_proba_lr(X)
1303
+
1304
+ elif self.loss == "modified_huber":
1305
+ binary = len(self.classes_) == 2
1306
+ scores = self.decision_function(X)
1307
+
1308
+ if binary:
1309
+ prob2 = np.ones((scores.shape[0], 2))
1310
+ prob = prob2[:, 1]
1311
+ else:
1312
+ prob = scores
1313
+
1314
+ np.clip(scores, -1, 1, prob)
1315
+ prob += 1.0
1316
+ prob /= 2.0
1317
+
1318
+ if binary:
1319
+ prob2[:, 0] -= prob
1320
+ prob = prob2
1321
+ else:
1322
+ # the above might assign zero to all classes, which doesn't
1323
+ # normalize neatly; work around this to produce uniform
1324
+ # probabilities
1325
+ prob_sum = prob.sum(axis=1)
1326
+ all_zero = prob_sum == 0
1327
+ if np.any(all_zero):
1328
+ prob[all_zero, :] = 1
1329
+ prob_sum[all_zero] = len(self.classes_)
1330
+
1331
+ # normalize
1332
+ prob /= prob_sum.reshape((prob.shape[0], -1))
1333
+
1334
+ return prob
1335
+
1336
+ else:
1337
+ raise NotImplementedError(
1338
+ "predict_(log_)proba only supported when"
1339
+ " loss='log_loss' or loss='modified_huber' "
1340
+ "(%r given)"
1341
+ % self.loss
1342
+ )
1343
+
1344
+ @available_if(_check_proba)
1345
+ def predict_log_proba(self, X):
1346
+ """Log of probability estimates.
1347
+
1348
+ This method is only available for log loss and modified Huber loss.
1349
+
1350
+ When loss="modified_huber", probability estimates may be hard zeros
1351
+ and ones, so taking the logarithm is not possible.
1352
+
1353
+ See ``predict_proba`` for details.
1354
+
1355
+ Parameters
1356
+ ----------
1357
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1358
+ Input data for prediction.
1359
+
1360
+ Returns
1361
+ -------
1362
+ T : array-like, shape (n_samples, n_classes)
1363
+ Returns the log-probability of the sample for each class in the
1364
+ model, where classes are ordered as they are in
1365
+ `self.classes_`.
1366
+ """
1367
+ return np.log(self.predict_proba(X))
1368
+
1369
+ def _more_tags(self):
1370
+ return {
1371
+ "_xfail_checks": {
1372
+ "check_sample_weights_invariance": (
1373
+ "zero sample_weight is not equivalent to removing samples"
1374
+ ),
1375
+ },
1376
+ "preserves_dtype": [np.float64, np.float32],
1377
+ }
1378
+
1379
+
1380
+ class BaseSGDRegressor(RegressorMixin, BaseSGD):
1381
+ loss_functions = {
1382
+ "squared_error": (SquaredLoss,),
1383
+ "huber": (Huber, DEFAULT_EPSILON),
1384
+ "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
1385
+ "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON),
1386
+ }
1387
+
1388
+ _parameter_constraints: dict = {
1389
+ **BaseSGD._parameter_constraints,
1390
+ "loss": [StrOptions(set(loss_functions))],
1391
+ "early_stopping": ["boolean"],
1392
+ "validation_fraction": [Interval(Real, 0, 1, closed="neither")],
1393
+ "n_iter_no_change": [Interval(Integral, 1, None, closed="left")],
1394
+ }
1395
+
1396
+ @abstractmethod
1397
+ def __init__(
1398
+ self,
1399
+ loss="squared_error",
1400
+ *,
1401
+ penalty="l2",
1402
+ alpha=0.0001,
1403
+ l1_ratio=0.15,
1404
+ fit_intercept=True,
1405
+ max_iter=1000,
1406
+ tol=1e-3,
1407
+ shuffle=True,
1408
+ verbose=0,
1409
+ epsilon=DEFAULT_EPSILON,
1410
+ random_state=None,
1411
+ learning_rate="invscaling",
1412
+ eta0=0.01,
1413
+ power_t=0.25,
1414
+ early_stopping=False,
1415
+ validation_fraction=0.1,
1416
+ n_iter_no_change=5,
1417
+ warm_start=False,
1418
+ average=False,
1419
+ ):
1420
+ super().__init__(
1421
+ loss=loss,
1422
+ penalty=penalty,
1423
+ alpha=alpha,
1424
+ l1_ratio=l1_ratio,
1425
+ fit_intercept=fit_intercept,
1426
+ max_iter=max_iter,
1427
+ tol=tol,
1428
+ shuffle=shuffle,
1429
+ verbose=verbose,
1430
+ epsilon=epsilon,
1431
+ random_state=random_state,
1432
+ learning_rate=learning_rate,
1433
+ eta0=eta0,
1434
+ power_t=power_t,
1435
+ early_stopping=early_stopping,
1436
+ validation_fraction=validation_fraction,
1437
+ n_iter_no_change=n_iter_no_change,
1438
+ warm_start=warm_start,
1439
+ average=average,
1440
+ )
1441
+
1442
+ def _partial_fit(
1443
+ self,
1444
+ X,
1445
+ y,
1446
+ alpha,
1447
+ C,
1448
+ loss,
1449
+ learning_rate,
1450
+ max_iter,
1451
+ sample_weight,
1452
+ coef_init,
1453
+ intercept_init,
1454
+ ):
1455
+ first_call = getattr(self, "coef_", None) is None
1456
+ X, y = self._validate_data(
1457
+ X,
1458
+ y,
1459
+ accept_sparse="csr",
1460
+ copy=False,
1461
+ order="C",
1462
+ dtype=[np.float64, np.float32],
1463
+ accept_large_sparse=False,
1464
+ reset=first_call,
1465
+ )
1466
+ y = y.astype(X.dtype, copy=False)
1467
+
1468
+ n_samples, n_features = X.shape
1469
+
1470
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
1471
+
1472
+ # Allocate datastructures from input arguments
1473
+ if first_call:
1474
+ self._allocate_parameter_mem(
1475
+ n_classes=1,
1476
+ n_features=n_features,
1477
+ input_dtype=X.dtype,
1478
+ coef_init=coef_init,
1479
+ intercept_init=intercept_init,
1480
+ )
1481
+ if self.average > 0 and getattr(self, "_average_coef", None) is None:
1482
+ self._average_coef = np.zeros(n_features, dtype=X.dtype, order="C")
1483
+ self._average_intercept = np.zeros(1, dtype=X.dtype, order="C")
1484
+
1485
+ self._fit_regressor(
1486
+ X, y, alpha, C, loss, learning_rate, sample_weight, max_iter
1487
+ )
1488
+
1489
+ return self
1490
+
1491
+ @_fit_context(prefer_skip_nested_validation=True)
1492
+ def partial_fit(self, X, y, sample_weight=None):
1493
+ """Perform one epoch of stochastic gradient descent on given samples.
1494
+
1495
+ Internally, this method uses ``max_iter = 1``. Therefore, it is not
1496
+ guaranteed that a minimum of the cost function is reached after calling
1497
+ it once. Matters such as objective convergence and early stopping
1498
+ should be handled by the user.
1499
+
1500
+ Parameters
1501
+ ----------
1502
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
1503
+ Subset of training data.
1504
+
1505
+ y : numpy array of shape (n_samples,)
1506
+ Subset of target values.
1507
+
1508
+ sample_weight : array-like, shape (n_samples,), default=None
1509
+ Weights applied to individual samples.
1510
+ If not provided, uniform weights are assumed.
1511
+
1512
+ Returns
1513
+ -------
1514
+ self : object
1515
+ Returns an instance of self.
1516
+ """
1517
+ if not hasattr(self, "coef_"):
1518
+ self._more_validate_params(for_partial_fit=True)
1519
+
1520
+ return self._partial_fit(
1521
+ X,
1522
+ y,
1523
+ self.alpha,
1524
+ C=1.0,
1525
+ loss=self.loss,
1526
+ learning_rate=self.learning_rate,
1527
+ max_iter=1,
1528
+ sample_weight=sample_weight,
1529
+ coef_init=None,
1530
+ intercept_init=None,
1531
+ )
1532
+
1533
+ def _fit(
1534
+ self,
1535
+ X,
1536
+ y,
1537
+ alpha,
1538
+ C,
1539
+ loss,
1540
+ learning_rate,
1541
+ coef_init=None,
1542
+ intercept_init=None,
1543
+ sample_weight=None,
1544
+ ):
1545
+ if self.warm_start and getattr(self, "coef_", None) is not None:
1546
+ if coef_init is None:
1547
+ coef_init = self.coef_
1548
+ if intercept_init is None:
1549
+ intercept_init = self.intercept_
1550
+ else:
1551
+ self.coef_ = None
1552
+ self.intercept_ = None
1553
+
1554
+ # Clear iteration count for multiple call to fit.
1555
+ self.t_ = 1.0
1556
+
1557
+ self._partial_fit(
1558
+ X,
1559
+ y,
1560
+ alpha,
1561
+ C,
1562
+ loss,
1563
+ learning_rate,
1564
+ self.max_iter,
1565
+ sample_weight,
1566
+ coef_init,
1567
+ intercept_init,
1568
+ )
1569
+
1570
+ if (
1571
+ self.tol is not None
1572
+ and self.tol > -np.inf
1573
+ and self.n_iter_ == self.max_iter
1574
+ ):
1575
+ warnings.warn(
1576
+ (
1577
+ "Maximum number of iteration reached before "
1578
+ "convergence. Consider increasing max_iter to "
1579
+ "improve the fit."
1580
+ ),
1581
+ ConvergenceWarning,
1582
+ )
1583
+
1584
+ return self
1585
+
1586
+ @_fit_context(prefer_skip_nested_validation=True)
1587
+ def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
1588
+ """Fit linear model with Stochastic Gradient Descent.
1589
+
1590
+ Parameters
1591
+ ----------
1592
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
1593
+ Training data.
1594
+
1595
+ y : ndarray of shape (n_samples,)
1596
+ Target values.
1597
+
1598
+ coef_init : ndarray of shape (n_features,), default=None
1599
+ The initial coefficients to warm-start the optimization.
1600
+
1601
+ intercept_init : ndarray of shape (1,), default=None
1602
+ The initial intercept to warm-start the optimization.
1603
+
1604
+ sample_weight : array-like, shape (n_samples,), default=None
1605
+ Weights applied to individual samples (1. for unweighted).
1606
+
1607
+ Returns
1608
+ -------
1609
+ self : object
1610
+ Fitted `SGDRegressor` estimator.
1611
+ """
1612
+ self._more_validate_params()
1613
+
1614
+ return self._fit(
1615
+ X,
1616
+ y,
1617
+ alpha=self.alpha,
1618
+ C=1.0,
1619
+ loss=self.loss,
1620
+ learning_rate=self.learning_rate,
1621
+ coef_init=coef_init,
1622
+ intercept_init=intercept_init,
1623
+ sample_weight=sample_weight,
1624
+ )
1625
+
1626
+ def _decision_function(self, X):
1627
+ """Predict using the linear model
1628
+
1629
+ Parameters
1630
+ ----------
1631
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
1632
+
1633
+ Returns
1634
+ -------
1635
+ ndarray of shape (n_samples,)
1636
+ Predicted target values per element in X.
1637
+ """
1638
+ check_is_fitted(self)
1639
+
1640
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
1641
+
1642
+ scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
1643
+ return scores.ravel()
1644
+
1645
+ def predict(self, X):
1646
+ """Predict using the linear model.
1647
+
1648
+ Parameters
1649
+ ----------
1650
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
1651
+ Input data.
1652
+
1653
+ Returns
1654
+ -------
1655
+ ndarray of shape (n_samples,)
1656
+ Predicted target values per element in X.
1657
+ """
1658
+ return self._decision_function(X)
1659
+
1660
+ def _fit_regressor(
1661
+ self, X, y, alpha, C, loss, learning_rate, sample_weight, max_iter
1662
+ ):
1663
+ loss_function = self._get_loss_function(loss)
1664
+ penalty_type = self._get_penalty_type(self.penalty)
1665
+ learning_rate_type = self._get_learning_rate_type(learning_rate)
1666
+
1667
+ if not hasattr(self, "t_"):
1668
+ self.t_ = 1.0
1669
+
1670
+ validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)
1671
+ validation_score_cb = self._make_validation_score_cb(
1672
+ validation_mask, X, y, sample_weight
1673
+ )
1674
+
1675
+ random_state = check_random_state(self.random_state)
1676
+ # numpy mtrand expects a C long which is a signed 32 bit integer under
1677
+ # Windows
1678
+ seed = random_state.randint(0, MAX_INT)
1679
+
1680
+ dataset, intercept_decay = make_dataset(
1681
+ X, y, sample_weight, random_state=random_state
1682
+ )
1683
+
1684
+ tol = self.tol if self.tol is not None else -np.inf
1685
+
1686
+ if self.average:
1687
+ coef = self._standard_coef
1688
+ intercept = self._standard_intercept
1689
+ average_coef = self._average_coef
1690
+ average_intercept = self._average_intercept
1691
+ else:
1692
+ coef = self.coef_
1693
+ intercept = self.intercept_
1694
+ average_coef = None # Not used
1695
+ average_intercept = [0] # Not used
1696
+
1697
+ _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
1698
+ coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd(
1699
+ coef,
1700
+ intercept[0],
1701
+ average_coef,
1702
+ average_intercept[0],
1703
+ loss_function,
1704
+ penalty_type,
1705
+ alpha,
1706
+ C,
1707
+ self.l1_ratio,
1708
+ dataset,
1709
+ validation_mask,
1710
+ self.early_stopping,
1711
+ validation_score_cb,
1712
+ int(self.n_iter_no_change),
1713
+ max_iter,
1714
+ tol,
1715
+ int(self.fit_intercept),
1716
+ int(self.verbose),
1717
+ int(self.shuffle),
1718
+ seed,
1719
+ 1.0,
1720
+ 1.0,
1721
+ learning_rate_type,
1722
+ self.eta0,
1723
+ self.power_t,
1724
+ 0,
1725
+ self.t_,
1726
+ intercept_decay,
1727
+ self.average,
1728
+ )
1729
+
1730
+ self.t_ += self.n_iter_ * X.shape[0]
1731
+
1732
+ if self.average > 0:
1733
+ self._average_intercept = np.atleast_1d(average_intercept)
1734
+ self._standard_intercept = np.atleast_1d(intercept)
1735
+
1736
+ if self.average <= self.t_ - 1.0:
1737
+ # made enough updates for averaging to be taken into account
1738
+ self.coef_ = average_coef
1739
+ self.intercept_ = np.atleast_1d(average_intercept)
1740
+ else:
1741
+ self.coef_ = coef
1742
+ self.intercept_ = np.atleast_1d(intercept)
1743
+
1744
+ else:
1745
+ self.intercept_ = np.atleast_1d(intercept)
1746
+
1747
+
1748
+ class SGDRegressor(BaseSGDRegressor):
1749
+ """Linear model fitted by minimizing a regularized empirical loss with SGD.
1750
+
1751
+ SGD stands for Stochastic Gradient Descent: the gradient of the loss is
1752
+ estimated each sample at a time and the model is updated along the way with
1753
+ a decreasing strength schedule (aka learning rate).
1754
+
1755
+ The regularizer is a penalty added to the loss function that shrinks model
1756
+ parameters towards the zero vector using either the squared euclidean norm
1757
+ L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
1758
+ parameter update crosses the 0.0 value because of the regularizer, the
1759
+ update is truncated to 0.0 to allow for learning sparse models and achieve
1760
+ online feature selection.
1761
+
1762
+ This implementation works with data represented as dense numpy arrays of
1763
+ floating point values for the features.
1764
+
1765
+ Read more in the :ref:`User Guide <sgd>`.
1766
+
1767
+ Parameters
1768
+ ----------
1769
+ loss : str, default='squared_error'
1770
+ The loss function to be used. The possible values are 'squared_error',
1771
+ 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'
1772
+
1773
+ The 'squared_error' refers to the ordinary least squares fit.
1774
+ 'huber' modifies 'squared_error' to focus less on getting outliers
1775
+ correct by switching from squared to linear loss past a distance of
1776
+ epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is
1777
+ linear past that; this is the loss function used in SVR.
1778
+ 'squared_epsilon_insensitive' is the same but becomes squared loss past
1779
+ a tolerance of epsilon.
1780
+
1781
+ More details about the losses formulas can be found in the
1782
+ :ref:`User Guide <sgd_mathematical_formulation>`.
1783
+
1784
+ penalty : {'l2', 'l1', 'elasticnet', None}, default='l2'
1785
+ The penalty (aka regularization term) to be used. Defaults to 'l2'
1786
+ which is the standard regularizer for linear SVM models. 'l1' and
1787
+ 'elasticnet' might bring sparsity to the model (feature selection)
1788
+ not achievable with 'l2'. No penalty is added when set to `None`.
1789
+
1790
+ alpha : float, default=0.0001
1791
+ Constant that multiplies the regularization term. The higher the
1792
+ value, the stronger the regularization. Also used to compute the
1793
+ learning rate when `learning_rate` is set to 'optimal'.
1794
+ Values must be in the range `[0.0, inf)`.
1795
+
1796
+ l1_ratio : float, default=0.15
1797
+ The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
1798
+ l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
1799
+ Only used if `penalty` is 'elasticnet'.
1800
+ Values must be in the range `[0.0, 1.0]`.
1801
+
1802
+ fit_intercept : bool, default=True
1803
+ Whether the intercept should be estimated or not. If False, the
1804
+ data is assumed to be already centered.
1805
+
1806
+ max_iter : int, default=1000
1807
+ The maximum number of passes over the training data (aka epochs).
1808
+ It only impacts the behavior in the ``fit`` method, and not the
1809
+ :meth:`partial_fit` method.
1810
+ Values must be in the range `[1, inf)`.
1811
+
1812
+ .. versionadded:: 0.19
1813
+
1814
+ tol : float or None, default=1e-3
1815
+ The stopping criterion. If it is not None, training will stop
1816
+ when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
1817
+ epochs.
1818
+ Convergence is checked against the training loss or the
1819
+ validation loss depending on the `early_stopping` parameter.
1820
+ Values must be in the range `[0.0, inf)`.
1821
+
1822
+ .. versionadded:: 0.19
1823
+
1824
+ shuffle : bool, default=True
1825
+ Whether or not the training data should be shuffled after each epoch.
1826
+
1827
+ verbose : int, default=0
1828
+ The verbosity level.
1829
+ Values must be in the range `[0, inf)`.
1830
+
1831
+ epsilon : float, default=0.1
1832
+ Epsilon in the epsilon-insensitive loss functions; only if `loss` is
1833
+ 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
1834
+ For 'huber', determines the threshold at which it becomes less
1835
+ important to get the prediction exactly right.
1836
+ For epsilon-insensitive, any differences between the current prediction
1837
+ and the correct label are ignored if they are less than this threshold.
1838
+ Values must be in the range `[0.0, inf)`.
1839
+
1840
+ random_state : int, RandomState instance, default=None
1841
+ Used for shuffling the data, when ``shuffle`` is set to ``True``.
1842
+ Pass an int for reproducible output across multiple function calls.
1843
+ See :term:`Glossary <random_state>`.
1844
+
1845
+ learning_rate : str, default='invscaling'
1846
+ The learning rate schedule:
1847
+
1848
+ - 'constant': `eta = eta0`
1849
+ - 'optimal': `eta = 1.0 / (alpha * (t + t0))`
1850
+ where t0 is chosen by a heuristic proposed by Leon Bottou.
1851
+ - 'invscaling': `eta = eta0 / pow(t, power_t)`
1852
+ - 'adaptive': eta = eta0, as long as the training keeps decreasing.
1853
+ Each time n_iter_no_change consecutive epochs fail to decrease the
1854
+ training loss by tol or fail to increase validation score by tol if
1855
+ early_stopping is True, the current learning rate is divided by 5.
1856
+
1857
+ .. versionadded:: 0.20
1858
+ Added 'adaptive' option
1859
+
1860
+ eta0 : float, default=0.01
1861
+ The initial learning rate for the 'constant', 'invscaling' or
1862
+ 'adaptive' schedules. The default value is 0.01.
1863
+ Values must be in the range `[0.0, inf)`.
1864
+
1865
+ power_t : float, default=0.25
1866
+ The exponent for inverse scaling learning rate.
1867
+ Values must be in the range `(-inf, inf)`.
1868
+
1869
+ early_stopping : bool, default=False
1870
+ Whether to use early stopping to terminate training when validation
1871
+ score is not improving. If set to True, it will automatically set aside
1872
+ a fraction of training data as validation and terminate
1873
+ training when validation score returned by the `score` method is not
1874
+ improving by at least `tol` for `n_iter_no_change` consecutive
1875
+ epochs.
1876
+
1877
+ .. versionadded:: 0.20
1878
+ Added 'early_stopping' option
1879
+
1880
+ validation_fraction : float, default=0.1
1881
+ The proportion of training data to set aside as validation set for
1882
+ early stopping. Must be between 0 and 1.
1883
+ Only used if `early_stopping` is True.
1884
+ Values must be in the range `(0.0, 1.0)`.
1885
+
1886
+ .. versionadded:: 0.20
1887
+ Added 'validation_fraction' option
1888
+
1889
+ n_iter_no_change : int, default=5
1890
+ Number of iterations with no improvement to wait before stopping
1891
+ fitting.
1892
+ Convergence is checked against the training loss or the
1893
+ validation loss depending on the `early_stopping` parameter.
1894
+ Integer values must be in the range `[1, max_iter)`.
1895
+
1896
+ .. versionadded:: 0.20
1897
+ Added 'n_iter_no_change' option
1898
+
1899
+ warm_start : bool, default=False
1900
+ When set to True, reuse the solution of the previous call to fit as
1901
+ initialization, otherwise, just erase the previous solution.
1902
+ See :term:`the Glossary <warm_start>`.
1903
+
1904
+ Repeatedly calling fit or partial_fit when warm_start is True can
1905
+ result in a different solution than when calling fit a single time
1906
+ because of the way the data is shuffled.
1907
+ If a dynamic learning rate is used, the learning rate is adapted
1908
+ depending on the number of samples already seen. Calling ``fit`` resets
1909
+ this counter, while ``partial_fit`` will result in increasing the
1910
+ existing counter.
1911
+
1912
+ average : bool or int, default=False
1913
+ When set to True, computes the averaged SGD weights across all
1914
+ updates and stores the result in the ``coef_`` attribute. If set to
1915
+ an int greater than 1, averaging will begin once the total number of
1916
+ samples seen reaches `average`. So ``average=10`` will begin
1917
+ averaging after seeing 10 samples.
1918
+
1919
+ Attributes
1920
+ ----------
1921
+ coef_ : ndarray of shape (n_features,)
1922
+ Weights assigned to the features.
1923
+
1924
+ intercept_ : ndarray of shape (1,)
1925
+ The intercept term.
1926
+
1927
+ n_iter_ : int
1928
+ The actual number of iterations before reaching the stopping criterion.
1929
+
1930
+ t_ : int
1931
+ Number of weight updates performed during training.
1932
+ Same as ``(n_iter_ * n_samples + 1)``.
1933
+
1934
+ n_features_in_ : int
1935
+ Number of features seen during :term:`fit`.
1936
+
1937
+ .. versionadded:: 0.24
1938
+
1939
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1940
+ Names of features seen during :term:`fit`. Defined only when `X`
1941
+ has feature names that are all strings.
1942
+
1943
+ .. versionadded:: 1.0
1944
+
1945
+ See Also
1946
+ --------
1947
+ HuberRegressor : Linear regression model that is robust to outliers.
1948
+ Lars : Least Angle Regression model.
1949
+ Lasso : Linear Model trained with L1 prior as regularizer.
1950
+ RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
1951
+ Ridge : Linear least squares with l2 regularization.
1952
+ sklearn.svm.SVR : Epsilon-Support Vector Regression.
1953
+ TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
1954
+
1955
+ Examples
1956
+ --------
1957
+ >>> import numpy as np
1958
+ >>> from sklearn.linear_model import SGDRegressor
1959
+ >>> from sklearn.pipeline import make_pipeline
1960
+ >>> from sklearn.preprocessing import StandardScaler
1961
+ >>> n_samples, n_features = 10, 5
1962
+ >>> rng = np.random.RandomState(0)
1963
+ >>> y = rng.randn(n_samples)
1964
+ >>> X = rng.randn(n_samples, n_features)
1965
+ >>> # Always scale the input. The most convenient way is to use a pipeline.
1966
+ >>> reg = make_pipeline(StandardScaler(),
1967
+ ... SGDRegressor(max_iter=1000, tol=1e-3))
1968
+ >>> reg.fit(X, y)
1969
+ Pipeline(steps=[('standardscaler', StandardScaler()),
1970
+ ('sgdregressor', SGDRegressor())])
1971
+ """
1972
+
1973
+ _parameter_constraints: dict = {
1974
+ **BaseSGDRegressor._parameter_constraints,
1975
+ "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None],
1976
+ "alpha": [Interval(Real, 0, None, closed="left")],
1977
+ "l1_ratio": [Interval(Real, 0, 1, closed="both")],
1978
+ "power_t": [Interval(Real, None, None, closed="neither")],
1979
+ "learning_rate": [
1980
+ StrOptions({"constant", "optimal", "invscaling", "adaptive"}),
1981
+ Hidden(StrOptions({"pa1", "pa2"})),
1982
+ ],
1983
+ "epsilon": [Interval(Real, 0, None, closed="left")],
1984
+ "eta0": [Interval(Real, 0, None, closed="left")],
1985
+ }
1986
+
1987
+ def __init__(
1988
+ self,
1989
+ loss="squared_error",
1990
+ *,
1991
+ penalty="l2",
1992
+ alpha=0.0001,
1993
+ l1_ratio=0.15,
1994
+ fit_intercept=True,
1995
+ max_iter=1000,
1996
+ tol=1e-3,
1997
+ shuffle=True,
1998
+ verbose=0,
1999
+ epsilon=DEFAULT_EPSILON,
2000
+ random_state=None,
2001
+ learning_rate="invscaling",
2002
+ eta0=0.01,
2003
+ power_t=0.25,
2004
+ early_stopping=False,
2005
+ validation_fraction=0.1,
2006
+ n_iter_no_change=5,
2007
+ warm_start=False,
2008
+ average=False,
2009
+ ):
2010
+ super().__init__(
2011
+ loss=loss,
2012
+ penalty=penalty,
2013
+ alpha=alpha,
2014
+ l1_ratio=l1_ratio,
2015
+ fit_intercept=fit_intercept,
2016
+ max_iter=max_iter,
2017
+ tol=tol,
2018
+ shuffle=shuffle,
2019
+ verbose=verbose,
2020
+ epsilon=epsilon,
2021
+ random_state=random_state,
2022
+ learning_rate=learning_rate,
2023
+ eta0=eta0,
2024
+ power_t=power_t,
2025
+ early_stopping=early_stopping,
2026
+ validation_fraction=validation_fraction,
2027
+ n_iter_no_change=n_iter_no_change,
2028
+ warm_start=warm_start,
2029
+ average=average,
2030
+ )
2031
+
2032
+ def _more_tags(self):
2033
+ return {
2034
+ "_xfail_checks": {
2035
+ "check_sample_weights_invariance": (
2036
+ "zero sample_weight is not equivalent to removing samples"
2037
+ ),
2038
+ },
2039
+ "preserves_dtype": [np.float64, np.float32],
2040
+ }
2041
+
2042
+
2043
+ class SGDOneClassSVM(BaseSGD, OutlierMixin):
2044
+ """Solves linear One-Class SVM using Stochastic Gradient Descent.
2045
+
2046
+ This implementation is meant to be used with a kernel approximation
2047
+ technique (e.g. `sklearn.kernel_approximation.Nystroem`) to obtain results
2048
+ similar to `sklearn.svm.OneClassSVM` which uses a Gaussian kernel by
2049
+ default.
2050
+
2051
+ Read more in the :ref:`User Guide <sgd_online_one_class_svm>`.
2052
+
2053
+ .. versionadded:: 1.0
2054
+
2055
+ Parameters
2056
+ ----------
2057
+ nu : float, default=0.5
2058
+ The nu parameter of the One Class SVM: an upper bound on the
2059
+ fraction of training errors and a lower bound of the fraction of
2060
+ support vectors. Should be in the interval (0, 1]. By default 0.5
2061
+ will be taken.
2062
+
2063
+ fit_intercept : bool, default=True
2064
+ Whether the intercept should be estimated or not. Defaults to True.
2065
+
2066
+ max_iter : int, default=1000
2067
+ The maximum number of passes over the training data (aka epochs).
2068
+ It only impacts the behavior in the ``fit`` method, and not the
2069
+ `partial_fit`. Defaults to 1000.
2070
+ Values must be in the range `[1, inf)`.
2071
+
2072
+ tol : float or None, default=1e-3
2073
+ The stopping criterion. If it is not None, the iterations will stop
2074
+ when (loss > previous_loss - tol). Defaults to 1e-3.
2075
+ Values must be in the range `[0.0, inf)`.
2076
+
2077
+ shuffle : bool, default=True
2078
+ Whether or not the training data should be shuffled after each epoch.
2079
+ Defaults to True.
2080
+
2081
+ verbose : int, default=0
2082
+ The verbosity level.
2083
+
2084
+ random_state : int, RandomState instance or None, default=None
2085
+ The seed of the pseudo random number generator to use when shuffling
2086
+ the data. If int, random_state is the seed used by the random number
2087
+ generator; If RandomState instance, random_state is the random number
2088
+ generator; If None, the random number generator is the RandomState
2089
+ instance used by `np.random`.
2090
+
2091
+ learning_rate : {'constant', 'optimal', 'invscaling', 'adaptive'}, default='optimal'
2092
+ The learning rate schedule to use with `fit`. (If using `partial_fit`,
2093
+ learning rate must be controlled directly).
2094
+
2095
+ - 'constant': `eta = eta0`
2096
+ - 'optimal': `eta = 1.0 / (alpha * (t + t0))`
2097
+ where t0 is chosen by a heuristic proposed by Leon Bottou.
2098
+ - 'invscaling': `eta = eta0 / pow(t, power_t)`
2099
+ - 'adaptive': eta = eta0, as long as the training keeps decreasing.
2100
+ Each time n_iter_no_change consecutive epochs fail to decrease the
2101
+ training loss by tol or fail to increase validation score by tol if
2102
+ early_stopping is True, the current learning rate is divided by 5.
2103
+
2104
+ eta0 : float, default=0.0
2105
+ The initial learning rate for the 'constant', 'invscaling' or
2106
+ 'adaptive' schedules. The default value is 0.0 as eta0 is not used by
2107
+ the default schedule 'optimal'.
2108
+ Values must be in the range `[0.0, inf)`.
2109
+
2110
+ power_t : float, default=0.5
2111
+ The exponent for inverse scaling learning rate.
2112
+ Values must be in the range `(-inf, inf)`.
2113
+
2114
+ warm_start : bool, default=False
2115
+ When set to True, reuse the solution of the previous call to fit as
2116
+ initialization, otherwise, just erase the previous solution.
2117
+ See :term:`the Glossary <warm_start>`.
2118
+
2119
+ Repeatedly calling fit or partial_fit when warm_start is True can
2120
+ result in a different solution than when calling fit a single time
2121
+ because of the way the data is shuffled.
2122
+ If a dynamic learning rate is used, the learning rate is adapted
2123
+ depending on the number of samples already seen. Calling ``fit`` resets
2124
+ this counter, while ``partial_fit`` will result in increasing the
2125
+ existing counter.
2126
+
2127
+ average : bool or int, default=False
2128
+ When set to True, computes the averaged SGD weights and stores the
2129
+ result in the ``coef_`` attribute. If set to an int greater than 1,
2130
+ averaging will begin once the total number of samples seen reaches
2131
+ average. So ``average=10`` will begin averaging after seeing 10
2132
+ samples.
2133
+
2134
+ Attributes
2135
+ ----------
2136
+ coef_ : ndarray of shape (1, n_features)
2137
+ Weights assigned to the features.
2138
+
2139
+ offset_ : ndarray of shape (1,)
2140
+ Offset used to define the decision function from the raw scores.
2141
+ We have the relation: decision_function = score_samples - offset.
2142
+
2143
+ n_iter_ : int
2144
+ The actual number of iterations to reach the stopping criterion.
2145
+
2146
+ t_ : int
2147
+ Number of weight updates performed during training.
2148
+ Same as ``(n_iter_ * n_samples + 1)``.
2149
+
2150
+ loss_function_ : concrete ``LossFunction``
2151
+
2152
+ .. deprecated:: 1.4
2153
+ ``loss_function_`` was deprecated in version 1.4 and will be removed in
2154
+ 1.6.
2155
+
2156
+ n_features_in_ : int
2157
+ Number of features seen during :term:`fit`.
2158
+
2159
+ .. versionadded:: 0.24
2160
+
2161
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
2162
+ Names of features seen during :term:`fit`. Defined only when `X`
2163
+ has feature names that are all strings.
2164
+
2165
+ .. versionadded:: 1.0
2166
+
2167
+ See Also
2168
+ --------
2169
+ sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
2170
+
2171
+ Notes
2172
+ -----
2173
+ This estimator has a linear complexity in the number of training samples
2174
+ and is thus better suited than the `sklearn.svm.OneClassSVM`
2175
+ implementation for datasets with a large number of training samples (say
2176
+ > 10,000).
2177
+
2178
+ Examples
2179
+ --------
2180
+ >>> import numpy as np
2181
+ >>> from sklearn import linear_model
2182
+ >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
2183
+ >>> clf = linear_model.SGDOneClassSVM(random_state=42)
2184
+ >>> clf.fit(X)
2185
+ SGDOneClassSVM(random_state=42)
2186
+
2187
+ >>> print(clf.predict([[4, 4]]))
2188
+ [1]
2189
+ """
2190
+
2191
+ loss_functions = {"hinge": (Hinge, 1.0)}
2192
+
2193
+ _parameter_constraints: dict = {
2194
+ **BaseSGD._parameter_constraints,
2195
+ "nu": [Interval(Real, 0.0, 1.0, closed="right")],
2196
+ "learning_rate": [
2197
+ StrOptions({"constant", "optimal", "invscaling", "adaptive"}),
2198
+ Hidden(StrOptions({"pa1", "pa2"})),
2199
+ ],
2200
+ "eta0": [Interval(Real, 0, None, closed="left")],
2201
+ "power_t": [Interval(Real, None, None, closed="neither")],
2202
+ }
2203
+
2204
+ def __init__(
2205
+ self,
2206
+ nu=0.5,
2207
+ fit_intercept=True,
2208
+ max_iter=1000,
2209
+ tol=1e-3,
2210
+ shuffle=True,
2211
+ verbose=0,
2212
+ random_state=None,
2213
+ learning_rate="optimal",
2214
+ eta0=0.0,
2215
+ power_t=0.5,
2216
+ warm_start=False,
2217
+ average=False,
2218
+ ):
2219
+ self.nu = nu
2220
+ super(SGDOneClassSVM, self).__init__(
2221
+ loss="hinge",
2222
+ penalty="l2",
2223
+ C=1.0,
2224
+ l1_ratio=0,
2225
+ fit_intercept=fit_intercept,
2226
+ max_iter=max_iter,
2227
+ tol=tol,
2228
+ shuffle=shuffle,
2229
+ verbose=verbose,
2230
+ epsilon=DEFAULT_EPSILON,
2231
+ random_state=random_state,
2232
+ learning_rate=learning_rate,
2233
+ eta0=eta0,
2234
+ power_t=power_t,
2235
+ early_stopping=False,
2236
+ validation_fraction=0.1,
2237
+ n_iter_no_change=5,
2238
+ warm_start=warm_start,
2239
+ average=average,
2240
+ )
2241
+
2242
+ def _fit_one_class(self, X, alpha, C, sample_weight, learning_rate, max_iter):
2243
+ """Uses SGD implementation with X and y=np.ones(n_samples)."""
2244
+
2245
+ # The One-Class SVM uses the SGD implementation with
2246
+ # y=np.ones(n_samples).
2247
+ n_samples = X.shape[0]
2248
+ y = np.ones(n_samples, dtype=X.dtype, order="C")
2249
+
2250
+ dataset, offset_decay = make_dataset(X, y, sample_weight)
2251
+
2252
+ penalty_type = self._get_penalty_type(self.penalty)
2253
+ learning_rate_type = self._get_learning_rate_type(learning_rate)
2254
+
2255
+ # early stopping is set to False for the One-Class SVM. thus
2256
+ # validation_mask and validation_score_cb will be set to values
2257
+ # associated to early_stopping=False in _make_validation_split and
2258
+ # _make_validation_score_cb respectively.
2259
+ validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)
2260
+ validation_score_cb = self._make_validation_score_cb(
2261
+ validation_mask, X, y, sample_weight
2262
+ )
2263
+
2264
+ random_state = check_random_state(self.random_state)
2265
+ # numpy mtrand expects a C long which is a signed 32 bit integer under
2266
+ # Windows
2267
+ seed = random_state.randint(0, np.iinfo(np.int32).max)
2268
+
2269
+ tol = self.tol if self.tol is not None else -np.inf
2270
+
2271
+ one_class = 1
2272
+ # There are no class weights for the One-Class SVM and they are
2273
+ # therefore set to 1.
2274
+ pos_weight = 1
2275
+ neg_weight = 1
2276
+
2277
+ if self.average:
2278
+ coef = self._standard_coef
2279
+ intercept = self._standard_intercept
2280
+ average_coef = self._average_coef
2281
+ average_intercept = self._average_intercept
2282
+ else:
2283
+ coef = self.coef_
2284
+ intercept = 1 - self.offset_
2285
+ average_coef = None # Not used
2286
+ average_intercept = [0] # Not used
2287
+
2288
+ _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
2289
+ coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd(
2290
+ coef,
2291
+ intercept[0],
2292
+ average_coef,
2293
+ average_intercept[0],
2294
+ self._loss_function_,
2295
+ penalty_type,
2296
+ alpha,
2297
+ C,
2298
+ self.l1_ratio,
2299
+ dataset,
2300
+ validation_mask,
2301
+ self.early_stopping,
2302
+ validation_score_cb,
2303
+ int(self.n_iter_no_change),
2304
+ max_iter,
2305
+ tol,
2306
+ int(self.fit_intercept),
2307
+ int(self.verbose),
2308
+ int(self.shuffle),
2309
+ seed,
2310
+ neg_weight,
2311
+ pos_weight,
2312
+ learning_rate_type,
2313
+ self.eta0,
2314
+ self.power_t,
2315
+ one_class,
2316
+ self.t_,
2317
+ offset_decay,
2318
+ self.average,
2319
+ )
2320
+
2321
+ self.t_ += self.n_iter_ * n_samples
2322
+
2323
+ if self.average > 0:
2324
+ self._average_intercept = np.atleast_1d(average_intercept)
2325
+ self._standard_intercept = np.atleast_1d(intercept)
2326
+
2327
+ if self.average <= self.t_ - 1.0:
2328
+ # made enough updates for averaging to be taken into account
2329
+ self.coef_ = average_coef
2330
+ self.offset_ = 1 - np.atleast_1d(average_intercept)
2331
+ else:
2332
+ self.coef_ = coef
2333
+ self.offset_ = 1 - np.atleast_1d(intercept)
2334
+
2335
+ else:
2336
+ self.offset_ = 1 - np.atleast_1d(intercept)
2337
+
2338
+ def _partial_fit(
2339
+ self,
2340
+ X,
2341
+ alpha,
2342
+ C,
2343
+ loss,
2344
+ learning_rate,
2345
+ max_iter,
2346
+ sample_weight,
2347
+ coef_init,
2348
+ offset_init,
2349
+ ):
2350
+ first_call = getattr(self, "coef_", None) is None
2351
+ X = self._validate_data(
2352
+ X,
2353
+ None,
2354
+ accept_sparse="csr",
2355
+ dtype=[np.float64, np.float32],
2356
+ order="C",
2357
+ accept_large_sparse=False,
2358
+ reset=first_call,
2359
+ )
2360
+
2361
+ n_features = X.shape[1]
2362
+
2363
+ # Allocate datastructures from input arguments
2364
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
2365
+
2366
+ # We use intercept = 1 - offset where intercept is the intercept of
2367
+ # the SGD implementation and offset is the offset of the One-Class SVM
2368
+ # optimization problem.
2369
+ if getattr(self, "coef_", None) is None or coef_init is not None:
2370
+ self._allocate_parameter_mem(
2371
+ n_classes=1,
2372
+ n_features=n_features,
2373
+ input_dtype=X.dtype,
2374
+ coef_init=coef_init,
2375
+ intercept_init=offset_init,
2376
+ one_class=1,
2377
+ )
2378
+ elif n_features != self.coef_.shape[-1]:
2379
+ raise ValueError(
2380
+ "Number of features %d does not match previous data %d."
2381
+ % (n_features, self.coef_.shape[-1])
2382
+ )
2383
+
2384
+ if self.average and getattr(self, "_average_coef", None) is None:
2385
+ self._average_coef = np.zeros(n_features, dtype=X.dtype, order="C")
2386
+ self._average_intercept = np.zeros(1, dtype=X.dtype, order="C")
2387
+
2388
+ self._loss_function_ = self._get_loss_function(loss)
2389
+ if not hasattr(self, "t_"):
2390
+ self.t_ = 1.0
2391
+
2392
+ # delegate to concrete training procedure
2393
+ self._fit_one_class(
2394
+ X,
2395
+ alpha=alpha,
2396
+ C=C,
2397
+ learning_rate=learning_rate,
2398
+ sample_weight=sample_weight,
2399
+ max_iter=max_iter,
2400
+ )
2401
+
2402
+ return self
2403
+
2404
+ @_fit_context(prefer_skip_nested_validation=True)
2405
+ def partial_fit(self, X, y=None, sample_weight=None):
2406
+ """Fit linear One-Class SVM with Stochastic Gradient Descent.
2407
+
2408
+ Parameters
2409
+ ----------
2410
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
2411
+ Subset of the training data.
2412
+ y : Ignored
2413
+ Not used, present for API consistency by convention.
2414
+
2415
+ sample_weight : array-like, shape (n_samples,), optional
2416
+ Weights applied to individual samples.
2417
+ If not provided, uniform weights are assumed.
2418
+
2419
+ Returns
2420
+ -------
2421
+ self : object
2422
+ Returns a fitted instance of self.
2423
+ """
2424
+ if not hasattr(self, "coef_"):
2425
+ self._more_validate_params(for_partial_fit=True)
2426
+
2427
+ alpha = self.nu / 2
2428
+ return self._partial_fit(
2429
+ X,
2430
+ alpha,
2431
+ C=1.0,
2432
+ loss=self.loss,
2433
+ learning_rate=self.learning_rate,
2434
+ max_iter=1,
2435
+ sample_weight=sample_weight,
2436
+ coef_init=None,
2437
+ offset_init=None,
2438
+ )
2439
+
2440
+ def _fit(
2441
+ self,
2442
+ X,
2443
+ alpha,
2444
+ C,
2445
+ loss,
2446
+ learning_rate,
2447
+ coef_init=None,
2448
+ offset_init=None,
2449
+ sample_weight=None,
2450
+ ):
2451
+ if self.warm_start and hasattr(self, "coef_"):
2452
+ if coef_init is None:
2453
+ coef_init = self.coef_
2454
+ if offset_init is None:
2455
+ offset_init = self.offset_
2456
+ else:
2457
+ self.coef_ = None
2458
+ self.offset_ = None
2459
+
2460
+ # Clear iteration count for multiple call to fit.
2461
+ self.t_ = 1.0
2462
+
2463
+ self._partial_fit(
2464
+ X,
2465
+ alpha,
2466
+ C,
2467
+ loss,
2468
+ learning_rate,
2469
+ self.max_iter,
2470
+ sample_weight,
2471
+ coef_init,
2472
+ offset_init,
2473
+ )
2474
+
2475
+ if (
2476
+ self.tol is not None
2477
+ and self.tol > -np.inf
2478
+ and self.n_iter_ == self.max_iter
2479
+ ):
2480
+ warnings.warn(
2481
+ (
2482
+ "Maximum number of iteration reached before "
2483
+ "convergence. Consider increasing max_iter to "
2484
+ "improve the fit."
2485
+ ),
2486
+ ConvergenceWarning,
2487
+ )
2488
+
2489
+ return self
2490
+
2491
+ @_fit_context(prefer_skip_nested_validation=True)
2492
+ def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):
2493
+ """Fit linear One-Class SVM with Stochastic Gradient Descent.
2494
+
2495
+ This solves an equivalent optimization problem of the
2496
+ One-Class SVM primal optimization problem and returns a weight vector
2497
+ w and an offset rho such that the decision function is given by
2498
+ <w, x> - rho.
2499
+
2500
+ Parameters
2501
+ ----------
2502
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
2503
+ Training data.
2504
+ y : Ignored
2505
+ Not used, present for API consistency by convention.
2506
+
2507
+ coef_init : array, shape (n_classes, n_features)
2508
+ The initial coefficients to warm-start the optimization.
2509
+
2510
+ offset_init : array, shape (n_classes,)
2511
+ The initial offset to warm-start the optimization.
2512
+
2513
+ sample_weight : array-like, shape (n_samples,), optional
2514
+ Weights applied to individual samples.
2515
+ If not provided, uniform weights are assumed. These weights will
2516
+ be multiplied with class_weight (passed through the
2517
+ constructor) if class_weight is specified.
2518
+
2519
+ Returns
2520
+ -------
2521
+ self : object
2522
+ Returns a fitted instance of self.
2523
+ """
2524
+ self._more_validate_params()
2525
+
2526
+ alpha = self.nu / 2
2527
+ self._fit(
2528
+ X,
2529
+ alpha=alpha,
2530
+ C=1.0,
2531
+ loss=self.loss,
2532
+ learning_rate=self.learning_rate,
2533
+ coef_init=coef_init,
2534
+ offset_init=offset_init,
2535
+ sample_weight=sample_weight,
2536
+ )
2537
+
2538
+ return self
2539
+
2540
+ def decision_function(self, X):
2541
+ """Signed distance to the separating hyperplane.
2542
+
2543
+ Signed distance is positive for an inlier and negative for an
2544
+ outlier.
2545
+
2546
+ Parameters
2547
+ ----------
2548
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
2549
+ Testing data.
2550
+
2551
+ Returns
2552
+ -------
2553
+ dec : array-like, shape (n_samples,)
2554
+ Decision function values of the samples.
2555
+ """
2556
+
2557
+ check_is_fitted(self, "coef_")
2558
+
2559
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
2560
+ decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_
2561
+
2562
+ return decisions.ravel()
2563
+
2564
+ def score_samples(self, X):
2565
+ """Raw scoring function of the samples.
2566
+
2567
+ Parameters
2568
+ ----------
2569
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
2570
+ Testing data.
2571
+
2572
+ Returns
2573
+ -------
2574
+ score_samples : array-like, shape (n_samples,)
2575
+ Unshiffted scoring function values of the samples.
2576
+ """
2577
+ score_samples = self.decision_function(X) + self.offset_
2578
+ return score_samples
2579
+
2580
+ def predict(self, X):
2581
+ """Return labels (1 inlier, -1 outlier) of the samples.
2582
+
2583
+ Parameters
2584
+ ----------
2585
+ X : {array-like, sparse matrix}, shape (n_samples, n_features)
2586
+ Testing data.
2587
+
2588
+ Returns
2589
+ -------
2590
+ y : array, shape (n_samples,)
2591
+ Labels of the samples.
2592
+ """
2593
+ y = (self.decision_function(X) >= 0).astype(np.int32)
2594
+ y[y == 0] = -1 # for consistency with outlier detectors
2595
+ return y
2596
+
2597
+ def _more_tags(self):
2598
+ return {
2599
+ "_xfail_checks": {
2600
+ "check_sample_weights_invariance": (
2601
+ "zero sample_weight is not equivalent to removing samples"
2602
+ )
2603
+ },
2604
+ "preserves_dtype": [np.float64, np.float32],
2605
+ }